Fix pedantic compiler warnings

Allows building the library with the gcc -pedantic option, for improved
portabilty. In particular, this commit removes usage of C99/C++ style
single-line comments and dynamic struct initializers. This is a
continuation of the work done in commit 97b766a46, which removed most
of these warnings for decode only builds.

Change-Id: Id453d9c1d9f44cc0381b10c3869fabb0184d5966
This commit is contained in:
John Koleszar 2012-05-21 14:30:56 -07:00
parent 30fb976e3e
commit 0164a1cc5b
50 changed files with 2725 additions and 2447 deletions

View File

@ -1,16 +1,16 @@
// Copyright (c) 2010 The WebM project authors. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the LICENSE file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MKV_DEFS_HPP
#define MKV_DEFS_HPP 1
//Commenting out values not available in webm, but available in matroska
/* Commenting out values not available in webm, but available in matroska */
enum mkv
{
@ -22,7 +22,7 @@ enum mkv
DocType = 0x4282,
DocTypeVersion = 0x4287,
DocTypeReadVersion = 0x4285,
// CRC_32 = 0xBF,
/* CRC_32 = 0xBF, */
Void = 0xEC,
SignatureSlot = 0x1B538667,
SignatureAlgo = 0x7E8A,
@ -32,61 +32,61 @@ enum mkv
SignatureElements = 0x7E5B,
SignatureElementList = 0x7E7B,
SignedElement = 0x6532,
//segment
/* segment */
Segment = 0x18538067,
//Meta Seek Information
/* Meta Seek Information */
SeekHead = 0x114D9B74,
Seek = 0x4DBB,
SeekID = 0x53AB,
SeekPosition = 0x53AC,
//Segment Information
/* Segment Information */
Info = 0x1549A966,
// SegmentUID = 0x73A4,
// SegmentFilename = 0x7384,
// PrevUID = 0x3CB923,
// PrevFilename = 0x3C83AB,
// NextUID = 0x3EB923,
// NextFilename = 0x3E83BB,
// SegmentFamily = 0x4444,
// ChapterTranslate = 0x6924,
// ChapterTranslateEditionUID = 0x69FC,
// ChapterTranslateCodec = 0x69BF,
// ChapterTranslateID = 0x69A5,
/* SegmentUID = 0x73A4, */
/* SegmentFilename = 0x7384, */
/* PrevUID = 0x3CB923, */
/* PrevFilename = 0x3C83AB, */
/* NextUID = 0x3EB923, */
/* NextFilename = 0x3E83BB, */
/* SegmentFamily = 0x4444, */
/* ChapterTranslate = 0x6924, */
/* ChapterTranslateEditionUID = 0x69FC, */
/* ChapterTranslateCodec = 0x69BF, */
/* ChapterTranslateID = 0x69A5, */
TimecodeScale = 0x2AD7B1,
Segment_Duration = 0x4489,
DateUTC = 0x4461,
// Title = 0x7BA9,
/* Title = 0x7BA9, */
MuxingApp = 0x4D80,
WritingApp = 0x5741,
//Cluster
/* Cluster */
Cluster = 0x1F43B675,
Timecode = 0xE7,
// SilentTracks = 0x5854,
// SilentTrackNumber = 0x58D7,
// Position = 0xA7,
/* SilentTracks = 0x5854, */
/* SilentTrackNumber = 0x58D7, */
/* Position = 0xA7, */
PrevSize = 0xAB,
BlockGroup = 0xA0,
Block = 0xA1,
// BlockVirtual = 0xA2,
// BlockAdditions = 0x75A1,
// BlockMore = 0xA6,
// BlockAddID = 0xEE,
// BlockAdditional = 0xA5,
/* BlockVirtual = 0xA2, */
/* BlockAdditions = 0x75A1, */
/* BlockMore = 0xA6, */
/* BlockAddID = 0xEE, */
/* BlockAdditional = 0xA5, */
BlockDuration = 0x9B,
// ReferencePriority = 0xFA,
/* ReferencePriority = 0xFA, */
ReferenceBlock = 0xFB,
// ReferenceVirtual = 0xFD,
// CodecState = 0xA4,
// Slices = 0x8E,
// TimeSlice = 0xE8,
/* ReferenceVirtual = 0xFD, */
/* CodecState = 0xA4, */
/* Slices = 0x8E, */
/* TimeSlice = 0xE8, */
LaceNumber = 0xCC,
// FrameNumber = 0xCD,
// BlockAdditionID = 0xCB,
// MkvDelay = 0xCE,
// Cluster_Duration = 0xCF,
/* FrameNumber = 0xCD, */
/* BlockAdditionID = 0xCB, */
/* MkvDelay = 0xCE, */
/* Cluster_Duration = 0xCF, */
SimpleBlock = 0xA3,
// EncryptedBlock = 0xAF,
//Track
/* EncryptedBlock = 0xAF, */
/* Track */
Tracks = 0x1654AE6B,
TrackEntry = 0xAE,
TrackNumber = 0xD7,
@ -96,28 +96,28 @@ enum mkv
FlagDefault = 0x88,
FlagForced = 0x55AA,
FlagLacing = 0x9C,
// MinCache = 0x6DE7,
// MaxCache = 0x6DF8,
/* MinCache = 0x6DE7, */
/* MaxCache = 0x6DF8, */
DefaultDuration = 0x23E383,
// TrackTimecodeScale = 0x23314F,
// TrackOffset = 0x537F,
// MaxBlockAdditionID = 0x55EE,
/* TrackTimecodeScale = 0x23314F, */
/* TrackOffset = 0x537F, */
/* MaxBlockAdditionID = 0x55EE, */
Name = 0x536E,
Language = 0x22B59C,
CodecID = 0x86,
CodecPrivate = 0x63A2,
CodecName = 0x258688,
// AttachmentLink = 0x7446,
// CodecSettings = 0x3A9697,
// CodecInfoURL = 0x3B4040,
// CodecDownloadURL = 0x26B240,
// CodecDecodeAll = 0xAA,
// TrackOverlay = 0x6FAB,
// TrackTranslate = 0x6624,
// TrackTranslateEditionUID = 0x66FC,
// TrackTranslateCodec = 0x66BF,
// TrackTranslateTrackID = 0x66A5,
//video
/* AttachmentLink = 0x7446, */
/* CodecSettings = 0x3A9697, */
/* CodecInfoURL = 0x3B4040, */
/* CodecDownloadURL = 0x26B240, */
/* CodecDecodeAll = 0xAA, */
/* TrackOverlay = 0x6FAB, */
/* TrackTranslate = 0x6624, */
/* TrackTranslateEditionUID = 0x66FC, */
/* TrackTranslateCodec = 0x66BF, */
/* TrackTranslateTrackID = 0x66A5, */
/* video */
Video = 0xE0,
FlagInterlaced = 0x9A,
StereoMode = 0x53B8,
@ -131,101 +131,101 @@ enum mkv
DisplayHeight = 0x54BA,
DisplayUnit = 0x54B2,
AspectRatioType = 0x54B3,
// ColourSpace = 0x2EB524,
// GammaValue = 0x2FB523,
/* ColourSpace = 0x2EB524, */
/* GammaValue = 0x2FB523, */
FrameRate = 0x2383E3,
//end video
//audio
/* end video */
/* audio */
Audio = 0xE1,
SamplingFrequency = 0xB5,
OutputSamplingFrequency = 0x78B5,
Channels = 0x9F,
// ChannelPositions = 0x7D7B,
/* ChannelPositions = 0x7D7B, */
BitDepth = 0x6264,
//end audio
//content encoding
// ContentEncodings = 0x6d80,
// ContentEncoding = 0x6240,
// ContentEncodingOrder = 0x5031,
// ContentEncodingScope = 0x5032,
// ContentEncodingType = 0x5033,
// ContentCompression = 0x5034,
// ContentCompAlgo = 0x4254,
// ContentCompSettings = 0x4255,
// ContentEncryption = 0x5035,
// ContentEncAlgo = 0x47e1,
// ContentEncKeyID = 0x47e2,
// ContentSignature = 0x47e3,
// ContentSigKeyID = 0x47e4,
// ContentSigAlgo = 0x47e5,
// ContentSigHashAlgo = 0x47e6,
//end content encoding
//Cueing Data
/* end audio */
/* content encoding */
/* ContentEncodings = 0x6d80, */
/* ContentEncoding = 0x6240, */
/* ContentEncodingOrder = 0x5031, */
/* ContentEncodingScope = 0x5032, */
/* ContentEncodingType = 0x5033, */
/* ContentCompression = 0x5034, */
/* ContentCompAlgo = 0x4254, */
/* ContentCompSettings = 0x4255, */
/* ContentEncryption = 0x5035, */
/* ContentEncAlgo = 0x47e1, */
/* ContentEncKeyID = 0x47e2, */
/* ContentSignature = 0x47e3, */
/* ContentSigKeyID = 0x47e4, */
/* ContentSigAlgo = 0x47e5, */
/* ContentSigHashAlgo = 0x47e6, */
/* end content encoding */
/* Cueing Data */
Cues = 0x1C53BB6B,
CuePoint = 0xBB,
CueTime = 0xB3,
CueTrackPositions = 0xB7,
CueTrack = 0xF7,
CueClusterPosition = 0xF1,
CueBlockNumber = 0x5378,
// CueCodecState = 0xEA,
// CueReference = 0xDB,
// CueRefTime = 0x96,
// CueRefCluster = 0x97,
// CueRefNumber = 0x535F,
// CueRefCodecState = 0xEB,
//Attachment
// Attachments = 0x1941A469,
// AttachedFile = 0x61A7,
// FileDescription = 0x467E,
// FileName = 0x466E,
// FileMimeType = 0x4660,
// FileData = 0x465C,
// FileUID = 0x46AE,
// FileReferral = 0x4675,
//Chapters
// Chapters = 0x1043A770,
// EditionEntry = 0x45B9,
// EditionUID = 0x45BC,
// EditionFlagHidden = 0x45BD,
// EditionFlagDefault = 0x45DB,
// EditionFlagOrdered = 0x45DD,
// ChapterAtom = 0xB6,
// ChapterUID = 0x73C4,
// ChapterTimeStart = 0x91,
// ChapterTimeEnd = 0x92,
// ChapterFlagHidden = 0x98,
// ChapterFlagEnabled = 0x4598,
// ChapterSegmentUID = 0x6E67,
// ChapterSegmentEditionUID = 0x6EBC,
// ChapterPhysicalEquiv = 0x63C3,
// ChapterTrack = 0x8F,
// ChapterTrackNumber = 0x89,
// ChapterDisplay = 0x80,
// ChapString = 0x85,
// ChapLanguage = 0x437C,
// ChapCountry = 0x437E,
// ChapProcess = 0x6944,
// ChapProcessCodecID = 0x6955,
// ChapProcessPrivate = 0x450D,
// ChapProcessCommand = 0x6911,
// ChapProcessTime = 0x6922,
// ChapProcessData = 0x6933,
//Tagging
// Tags = 0x1254C367,
// Tag = 0x7373,
// Targets = 0x63C0,
// TargetTypeValue = 0x68CA,
// TargetType = 0x63CA,
// Tagging_TrackUID = 0x63C5,
// Tagging_EditionUID = 0x63C9,
// Tagging_ChapterUID = 0x63C4,
// AttachmentUID = 0x63C6,
// SimpleTag = 0x67C8,
// TagName = 0x45A3,
// TagLanguage = 0x447A,
// TagDefault = 0x4484,
// TagString = 0x4487,
// TagBinary = 0x4485,
CueBlockNumber = 0x5378
/* CueCodecState = 0xEA, */
/* CueReference = 0xDB, */
/* CueRefTime = 0x96, */
/* CueRefCluster = 0x97, */
/* CueRefNumber = 0x535F, */
/* CueRefCodecState = 0xEB, */
/* Attachment */
/* Attachments = 0x1941A469, */
/* AttachedFile = 0x61A7, */
/* FileDescription = 0x467E, */
/* FileName = 0x466E, */
/* FileMimeType = 0x4660, */
/* FileData = 0x465C, */
/* FileUID = 0x46AE, */
/* FileReferral = 0x4675, */
/* Chapters */
/* Chapters = 0x1043A770, */
/* EditionEntry = 0x45B9, */
/* EditionUID = 0x45BC, */
/* EditionFlagHidden = 0x45BD, */
/* EditionFlagDefault = 0x45DB, */
/* EditionFlagOrdered = 0x45DD, */
/* ChapterAtom = 0xB6, */
/* ChapterUID = 0x73C4, */
/* ChapterTimeStart = 0x91, */
/* ChapterTimeEnd = 0x92, */
/* ChapterFlagHidden = 0x98, */
/* ChapterFlagEnabled = 0x4598, */
/* ChapterSegmentUID = 0x6E67, */
/* ChapterSegmentEditionUID = 0x6EBC, */
/* ChapterPhysicalEquiv = 0x63C3, */
/* ChapterTrack = 0x8F, */
/* ChapterTrackNumber = 0x89, */
/* ChapterDisplay = 0x80, */
/* ChapString = 0x85, */
/* ChapLanguage = 0x437C, */
/* ChapCountry = 0x437E, */
/* ChapProcess = 0x6944, */
/* ChapProcessCodecID = 0x6955, */
/* ChapProcessPrivate = 0x450D, */
/* ChapProcessCommand = 0x6911, */
/* ChapProcessTime = 0x6922, */
/* ChapProcessData = 0x6933, */
/* Tagging */
/* Tags = 0x1254C367, */
/* Tag = 0x7373, */
/* Targets = 0x63C0, */
/* TargetTypeValue = 0x68CA, */
/* TargetType = 0x63CA, */
/* Tagging_TrackUID = 0x63C5, */
/* Tagging_EditionUID = 0x63C9, */
/* Tagging_ChapterUID = 0x63C4, */
/* AttachmentUID = 0x63C6, */
/* SimpleTag = 0x67C8, */
/* TagName = 0x45A3, */
/* TagLanguage = 0x447A, */
/* TagDefault = 0x4484, */
/* TagString = 0x4487, */
/* TagBinary = 0x4485, */
};
#endif

View File

@ -1,12 +1,12 @@
// Copyright (c) 2010 The WebM project authors. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the LICENSE file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "EbmlWriter.h"
#include <stdlib.h>
#include <wchar.h>
@ -18,11 +18,13 @@
#define LITERALU64(n) n##LLU
#endif
void Ebml_WriteLen(EbmlGlobal *glob, long long val)
void Ebml_WriteLen(EbmlGlobal *glob, int64_t val)
{
//TODO check and make sure we are not > than 0x0100000000000000LLU
unsigned char size = 8; //size in bytes to output
unsigned long long minVal = LITERALU64(0x00000000000000ff); //mask to compare for byte size
/* TODO check and make sure we are not > than 0x0100000000000000LLU */
unsigned char size = 8; /* size in bytes to output */
/* mask to compare for byte size */
uint64_t minVal = 0xff;
for (size = 1; size < 8; size ++)
{
@ -32,7 +34,7 @@ void Ebml_WriteLen(EbmlGlobal *glob, long long val)
minVal = (minVal << 7);
}
val |= (LITERALU64(0x000000000000080) << ((size - 1) * 7));
val |= (((uint64_t)0x80) << ((size - 1) * 7));
Ebml_Serialize(glob, (void *) &val, sizeof(val), size);
}
@ -40,10 +42,11 @@ void Ebml_WriteLen(EbmlGlobal *glob, long long val)
void Ebml_WriteString(EbmlGlobal *glob, const char *str)
{
const size_t size_ = strlen(str);
const unsigned long long size = size_;
const uint64_t size = size_;
Ebml_WriteLen(glob, size);
//TODO: it's not clear from the spec whether the nul terminator
//should be serialized too. For now we omit the null terminator.
/* TODO: it's not clear from the spec whether the nul terminator
* should be serialized too. For now we omit the null terminator.
*/
Ebml_Write(glob, str, size);
}
@ -51,9 +54,10 @@ void Ebml_WriteUTF8(EbmlGlobal *glob, const wchar_t *wstr)
{
const size_t strlen = wcslen(wstr);
//TODO: it's not clear from the spec whether the nul terminator
//should be serialized too. For now we include it.
const unsigned long long size = strlen;
/* TODO: it's not clear from the spec whether the nul terminator
* should be serialized too. For now we include it.
*/
const uint64_t size = strlen;
Ebml_WriteLen(glob, size);
Ebml_Write(glob, wstr, size);
@ -85,12 +89,12 @@ void Ebml_SerializeUnsigned64(EbmlGlobal *glob, unsigned long class_id, uint64_t
void Ebml_SerializeUnsigned(EbmlGlobal *glob, unsigned long class_id, unsigned long ui)
{
unsigned char size = 8; //size in bytes to output
unsigned char size = 8; /* size in bytes to output */
unsigned char sizeSerialized = 0;
unsigned long minVal;
Ebml_WriteID(glob, class_id);
minVal = 0x7fLU; //mask to compare for byte size
minVal = 0x7fLU; /* mask to compare for byte size */
for (size = 1; size < 4; size ++)
{
@ -106,7 +110,7 @@ void Ebml_SerializeUnsigned(EbmlGlobal *glob, unsigned long class_id, unsigned l
Ebml_Serialize(glob, &sizeSerialized, sizeof(sizeSerialized), 1);
Ebml_Serialize(glob, &ui, sizeof(ui), size);
}
//TODO: perhaps this is a poor name for this id serializer helper function
/* TODO: perhaps this is a poor name for this id serializer helper function */
void Ebml_SerializeBinary(EbmlGlobal *glob, unsigned long class_id, unsigned long bin)
{
int size;
@ -168,4 +172,4 @@ void Ebml_WriteVoid(EbmlGlobal *glob, unsigned long vSize)
}
}
//TODO Serialize Date
/* TODO Serialize Date */

View File

@ -1,26 +1,30 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef EBMLWRITER_HPP
#define EBMLWRITER_HPP
// Copyright (c) 2010 The WebM project authors. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the LICENSE file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
//note: you must define write and serialize functions as well as your own EBML_GLOBAL
//These functions MUST be implemented
#include <stddef.h>
#include "vpx/vpx_integer.h"
/* note: you must define write and serialize functions as well as your own
* EBML_GLOBAL
*
* These functions MUST be implemented
*/
typedef struct EbmlGlobal EbmlGlobal;
void Ebml_Serialize(EbmlGlobal *glob, const void *, int, unsigned long);
void Ebml_Write(EbmlGlobal *glob, const void *, unsigned long);
/////
/*****/
void Ebml_WriteLen(EbmlGlobal *glob, long long val);
void Ebml_WriteLen(EbmlGlobal *glob, int64_t val);
void Ebml_WriteString(EbmlGlobal *glob, const char *str);
void Ebml_WriteUTF8(EbmlGlobal *glob, const wchar_t *wstr);
void Ebml_WriteID(EbmlGlobal *glob, unsigned long class_id);
@ -28,11 +32,11 @@ void Ebml_SerializeUnsigned64(EbmlGlobal *glob, unsigned long class_id, uint64_t
void Ebml_SerializeUnsigned(EbmlGlobal *glob, unsigned long class_id, unsigned long ui);
void Ebml_SerializeBinary(EbmlGlobal *glob, unsigned long class_id, unsigned long ui);
void Ebml_SerializeFloat(EbmlGlobal *glob, unsigned long class_id, double d);
//TODO make this more generic to signed
/* TODO make this more generic to signed */
void Ebml_WriteSigned16(EbmlGlobal *glob, short val);
void Ebml_SerializeString(EbmlGlobal *glob, unsigned long class_id, const char *s);
void Ebml_SerializeUTF8(EbmlGlobal *glob, unsigned long class_id, wchar_t *s);
void Ebml_SerializeData(EbmlGlobal *glob, unsigned long class_id, unsigned char *data, unsigned long data_length);
void Ebml_WriteVoid(EbmlGlobal *glob, unsigned long vSize);
//TODO need date function
/* TODO need date function */
#endif

View File

@ -46,7 +46,7 @@
vst1.32 {d2[1]}, [r3], r12
vst1.32 {d4[0]}, [r3], r12
vst1.32 {d4[1]}, [r3]
bx lr
ENDP

View File

@ -174,8 +174,7 @@ typedef struct
MB_PREDICTION_MODE mode;
MV_REFERENCE_FRAME ref_frame;
int_mv mv;
//union b_mode_info bmi[16];
int dissim; // dissimilarity level of the macroblock
int dissim; /* dissimilarity level of the macroblock */
} LOWER_RES_MB_INFO;
/* The frame-level information needed to be stored for higher-resolution

View File

@ -101,7 +101,7 @@ const vp8_tree_index vp8_coef_tree[ 22] = /* corresponding _CONTEXT_NODEs */
/* vp8_coef_encodings generated with:
vp8_tokens_from_tree(vp8_coef_encodings, vp8_coef_tree);
*/
const vp8_token vp8_coef_encodings[MAX_ENTROPY_TOKENS] =
vp8_token vp8_coef_encodings[MAX_ENTROPY_TOKENS] =
{
{2, 2},
{6, 3},

View File

@ -24,11 +24,11 @@ typedef enum
SUBMVREF_LEFT_ABOVE_ZED
} sumvfref_t;
typedef const int vp8_mbsplit[16];
typedef int vp8_mbsplit[16];
#define VP8_NUMMBSPLITS 4
extern vp8_mbsplit vp8_mbsplits [VP8_NUMMBSPLITS];
extern const vp8_mbsplit vp8_mbsplits [VP8_NUMMBSPLITS];
extern const int vp8_mbsplit_count [VP8_NUMMBSPLITS]; /* # of subsets */

View File

@ -116,7 +116,7 @@ void vp8_copy_and_extend_frame_with_rect(YV12_BUFFER_CONFIG *src,
int src_uv_offset = ((srcy * src->uv_stride) >> 1) + (srcx >> 1);
int dst_uv_offset = ((srcy * dst->uv_stride) >> 1) + (srcx >> 1);
// If the side is not touching the bounder then don't extend.
/* If the side is not touching the bounder then don't extend. */
if (srcy)
et = 0;
if (srcx)
@ -157,7 +157,10 @@ void vp8_copy_and_extend_frame_with_rect(YV12_BUFFER_CONFIG *src,
/* note the extension is only for the last row, for intra prediction purpose */
void vp8_extend_mb_row(YV12_BUFFER_CONFIG *ybf, unsigned char *YPtr, unsigned char *UPtr, unsigned char *VPtr)
void vp8_extend_mb_row(YV12_BUFFER_CONFIG *ybf,
unsigned char *YPtr,
unsigned char *UPtr,
unsigned char *VPtr)
{
int i;

View File

@ -19,4 +19,4 @@
extern const short vp8_bilinear_filters[8][2];
extern const short vp8_sub_pel_filters[8][6];
#endif //FILTER_H
#endif

View File

@ -94,83 +94,101 @@ extern "C"
typedef struct
{
int Version; // 4 versions of bitstream defined 0 best quality/slowest decode, 3 lowest quality/fastest decode
int Width; // width of data passed to the compressor
int Height; // height of data passed to the compressor
/* 4 versions of bitstream defined:
* 0 best quality/slowest decode, 3 lowest quality/fastest decode
*/
int Version;
int Width;
int Height;
struct vpx_rational timebase;
unsigned int target_bandwidth; // bandwidth to be used in kilobits per second
unsigned int target_bandwidth; /* kilobits per second */
int noise_sensitivity; // parameter used for applying pre processing blur: recommendation 0
int Sharpness; // parameter used for sharpening output: recommendation 0:
/* parameter used for applying pre processing blur: recommendation 0 */
int noise_sensitivity;
/* parameter used for sharpening output: recommendation 0: */
int Sharpness;
int cpu_used;
unsigned int rc_max_intra_bitrate_pct;
// mode ->
//(0)=Realtime/Live Encoding. This mode is optimized for realtim encoding (for example, capturing
// a television signal or feed from a live camera). ( speed setting controls how fast )
//(1)=Good Quality Fast Encoding. The encoder balances quality with the amount of time it takes to
// encode the output. ( speed setting controls how fast )
//(2)=One Pass - Best Quality. The encoder places priority on the quality of the output over encoding
// speed. The output is compressed at the highest possible quality. This option takes the longest
// amount of time to encode. ( speed setting ignored )
//(3)=Two Pass - First Pass. The encoder generates a file of statistics for use in the second encoding
// pass. ( speed setting controls how fast )
//(4)=Two Pass - Second Pass. The encoder uses the statistics that were generated in the first encoding
// pass to create the compressed output. ( speed setting controls how fast )
//(5)=Two Pass - Second Pass Best. The encoder uses the statistics that were generated in the first
// encoding pass to create the compressed output using the highest possible quality, and taking a
// longer amount of time to encode.. ( speed setting ignored )
int Mode; //
/* mode ->
*(0)=Realtime/Live Encoding. This mode is optimized for realtim
* encoding (for example, capturing a television signal or feed
* from a live camera). ( speed setting controls how fast )
*(1)=Good Quality Fast Encoding. The encoder balances quality with
* the amount of time it takes to encode the output. ( speed
* setting controls how fast )
*(2)=One Pass - Best Quality. The encoder places priority on the
* quality of the output over encoding speed. The output is
* compressed at the highest possible quality. This option takes
* the longest amount of time to encode. ( speed setting ignored
* )
*(3)=Two Pass - First Pass. The encoder generates a file of
* statistics for use in the second encoding pass. ( speed
* setting controls how fast )
*(4)=Two Pass - Second Pass. The encoder uses the statistics that
* were generated in the first encoding pass to create the
* compressed output. ( speed setting controls how fast )
*(5)=Two Pass - Second Pass Best. The encoder uses the statistics
* that were generated in the first encoding pass to create the
* compressed output using the highest possible quality, and
* taking a longer amount of time to encode.. ( speed setting
* ignored )
*/
int Mode;
// Key Framing Operations
int auto_key; // automatically detect cut scenes and set the keyframes
int key_freq; // maximum distance to key frame.
/* Key Framing Operations */
int auto_key; /* automatically detect cut scenes */
int key_freq; /* maximum distance to key frame. */
int allow_lag; // allow lagged compression (if 0 lagin frames is ignored)
int lag_in_frames; // how many frames lag before we start encoding
/* lagged compression (if allow_lag == 0 lag_in_frames is ignored) */
int allow_lag;
int lag_in_frames; /* how many frames lag before we start encoding */
//----------------------------------------------------------------
// DATARATE CONTROL OPTIONS
/*
* DATARATE CONTROL OPTIONS
*/
int end_usage; // vbr or cbr
int end_usage; /* vbr or cbr */
// buffer targeting aggressiveness
/* buffer targeting aggressiveness */
int under_shoot_pct;
int over_shoot_pct;
// buffering parameters
int64_t starting_buffer_level; // in bytes
/* buffering parameters */
int64_t starting_buffer_level;
int64_t optimal_buffer_level;
int64_t maximum_buffer_size;
int64_t starting_buffer_level_in_ms; // in milli-seconds
int64_t starting_buffer_level_in_ms;
int64_t optimal_buffer_level_in_ms;
int64_t maximum_buffer_size_in_ms;
// controlling quality
/* controlling quality */
int fixed_q;
int worst_allowed_q;
int best_allowed_q;
int cq_level;
// allow internal resizing ( currently disabled in the build !!!!!)
/* allow internal resizing */
int allow_spatial_resampling;
int resample_down_water_mark;
int resample_up_water_mark;
// allow internal frame rate alterations
/* allow internal frame rate alterations */
int allow_df;
int drop_frames_water_mark;
// two pass datarate control
int two_pass_vbrbias; // two pass datarate control tweaks
/* two pass datarate control */
int two_pass_vbrbias;
int two_pass_vbrmin_section;
int two_pass_vbrmax_section;
// END DATARATE CONTROL OPTIONS
//----------------------------------------------------------------
/*
* END DATARATE CONTROL OPTIONS
*/
// these parameters aren't to be used in final build don't use!!!
/* these parameters aren't to be used in final build don't use!!! */
int play_alternate;
int alt_freq;
int alt_q;
@ -178,26 +196,28 @@ extern "C"
int gold_q;
int multi_threaded; // how many threads to run the encoder on
int token_partitions; // how many token partitions to create for multi core decoding
int encode_breakout; // early breakout encode threshold : for video conf recommend 800
int multi_threaded; /* how many threads to run the encoder on */
int token_partitions; /* how many token partitions to create */
unsigned int error_resilient_mode; // Bitfield defining the error
// resiliency features to enable. Can provide
// decodable frames after losses in previous
// frames and decodable partitions after
// losses in the same frame.
/* early breakout threshold: for video conf recommend 800 */
int encode_breakout;
/* Bitfield defining the error resiliency features to enable.
* Can provide decodable frames after losses in previous
* frames and decodable partitions after losses in the same frame.
*/
unsigned int error_resilient_mode;
int arnr_max_frames;
int arnr_strength ;
int arnr_type ;
int arnr_strength;
int arnr_type;
struct vpx_fixed_buf two_pass_stats_in;
struct vpx_fixed_buf two_pass_stats_in;
struct vpx_codec_pkt_list *output_pkt_list;
vp8e_tuning tuning;
// Temporal scaling parameters
/* Temporal scaling parameters */
unsigned int number_of_layers;
unsigned int target_bitrate[VPX_TS_MAX_PERIODICITY];
unsigned int rate_decimator[VPX_TS_MAX_PERIODICITY];
@ -228,8 +248,6 @@ extern "C"
void vp8_init_config(struct VP8_COMP* onyx, VP8_CONFIG *oxcf);
void vp8_change_config(struct VP8_COMP* onyx, VP8_CONFIG *oxcf);
// receive a frames worth of data caller can assume that a copy of this frame is made
// and not just a copy of the pointer..
int vp8_receive_raw_frame(struct VP8_COMP* comp, unsigned int frame_flags, YV12_BUFFER_CONFIG *sd, int64_t time_stamp, int64_t end_time_stamp);
int vp8_get_compressed_data(struct VP8_COMP* comp, unsigned int *frame_flags, unsigned long *size, unsigned char *dest, unsigned char *dest_end, int64_t *time_stamp, int64_t *time_end, int flush);
int vp8_get_preview_raw_frame(struct VP8_COMP* comp, YV12_BUFFER_CONFIG *dest, vp8_ppflags_t *flags);

View File

@ -240,8 +240,9 @@ void vp8_mbpost_proc_across_ip_c(unsigned char *src, int pitch, int rows, int co
for (i = -8; i<0; i++)
s[i]=s[0];
// 17 avoids valgrind warning - we buffer values in c in d
// and only write them when we've read 8 ahead...
/* 17 avoids valgrind warning - we buffer values in c in d
* and only write them when we've read 8 ahead...
*/
for (i = cols; i<cols+17; i++)
s[i]=s[cols-1];
@ -275,9 +276,6 @@ void vp8_mbpost_proc_across_ip_c(unsigned char *src, int pitch, int rows, int co
}
void vp8_mbpost_proc_down_c(unsigned char *dst, int pitch, int rows, int cols, int flimit)
{
int r, c, i;
@ -294,8 +292,9 @@ void vp8_mbpost_proc_down_c(unsigned char *dst, int pitch, int rows, int cols, i
for (i = -8; i < 0; i++)
s[i*pitch]=s[0];
// 17 avoids valgrind warning - we buffer values in c in d
// and only write them when we've read 8 ahead...
/* 17 avoids valgrind warning - we buffer values in c in d
* and only write them when we've read 8 ahead...
*/
for (i = rows; i < rows+17; i++)
s[i*pitch]=s[(rows-1)*pitch];
@ -731,8 +730,9 @@ int vp8_post_proc_frame(VP8_COMMON *oci, YV12_BUFFER_CONFIG *dest, vp8_ppflags_t
oci->post_proc_buffer_int_used = 1;
// insure that postproc is set to all 0's so that post proc
// doesn't pull random data in from edge
/* insure that postproc is set to all 0's so that post proc
* doesn't pull random data in from edge
*/
vpx_memset((&oci->post_proc_buffer_int)->buffer_alloc,128,(&oci->post_proc_buffer)->frame_size);
}

View File

@ -205,14 +205,14 @@ static void var_filter_block2d_bil_first_pass
{
for (j = 0; j < output_width; j++)
{
// Apply bilinear filter
/* Apply bilinear filter */
output_ptr[j] = (((int)src_ptr[0] * vp8_filter[0]) +
((int)src_ptr[pixel_step] * vp8_filter[1]) +
(VP8_FILTER_WEIGHT / 2)) >> VP8_FILTER_SHIFT;
src_ptr++;
}
// Next row...
/* Next row... */
src_ptr += src_pixels_per_line - output_width;
output_ptr += output_width;
}
@ -264,15 +264,15 @@ static void var_filter_block2d_bil_second_pass
{
for (j = 0; j < output_width; j++)
{
// Apply filter
Temp = ((int)src_ptr[0] * vp8_filter[0]) +
/* Apply filter */
Temp = ((int)src_ptr[0] * vp8_filter[0]) +
((int)src_ptr[pixel_step] * vp8_filter[1]) +
(VP8_FILTER_WEIGHT / 2);
output_ptr[j] = (unsigned int)(Temp >> VP8_FILTER_SHIFT);
src_ptr++;
}
// Next row...
/* Next row... */
src_ptr += src_pixels_per_line - output_width;
output_ptr += output_width;
}
@ -292,15 +292,15 @@ unsigned int vp8_sub_pixel_variance4x4_c
{
unsigned char temp2[20*16];
const short *HFilter, *VFilter;
unsigned short FData3[5*4]; // Temp data bufffer used in filtering
unsigned short FData3[5*4]; /* Temp data bufffer used in filtering */
HFilter = vp8_bilinear_filters[xoffset];
VFilter = vp8_bilinear_filters[yoffset];
// First filter 1d Horizontal
/* First filter 1d Horizontal */
var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 5, 4, HFilter);
// Now filter Verticaly
/* Now filter Verticaly */
var_filter_block2d_bil_second_pass(FData3, temp2, 4, 4, 4, 4, VFilter);
return vp8_variance4x4_c(temp2, 4, dst_ptr, dst_pixels_per_line, sse);
@ -318,7 +318,7 @@ unsigned int vp8_sub_pixel_variance8x8_c
unsigned int *sse
)
{
unsigned short FData3[9*8]; // Temp data bufffer used in filtering
unsigned short FData3[9*8]; /* Temp data bufffer used in filtering */
unsigned char temp2[20*16];
const short *HFilter, *VFilter;
@ -342,7 +342,7 @@ unsigned int vp8_sub_pixel_variance16x16_c
unsigned int *sse
)
{
unsigned short FData3[17*16]; // Temp data bufffer used in filtering
unsigned short FData3[17*16]; /* Temp data bufffer used in filtering */
unsigned char temp2[20*16];
const short *HFilter, *VFilter;
@ -418,7 +418,7 @@ unsigned int vp8_sub_pixel_variance16x8_c
unsigned int *sse
)
{
unsigned short FData3[16*9]; // Temp data bufffer used in filtering
unsigned short FData3[16*9]; /* Temp data bufffer used in filtering */
unsigned char temp2[20*16];
const short *HFilter, *VFilter;
@ -442,7 +442,7 @@ unsigned int vp8_sub_pixel_variance8x16_c
unsigned int *sse
)
{
unsigned short FData3[9*16]; // Temp data bufffer used in filtering
unsigned short FData3[9*16]; /* Temp data bufffer used in filtering */
unsigned char temp2[20*16];
const short *HFilter, *VFilter;

View File

@ -18,4 +18,7 @@ extern int rand(void)
{
return __rand();
}
#else
/* ISO C forbids an empty translation unit. */
int vp8_unused;
#endif

View File

@ -332,8 +332,9 @@ unsigned int vp8_sub_pixel_variance16x16_wmt
unsigned int xxsum0, xxsum1;
// note we could avoid these if statements if the calling function
// just called the appropriate functions inside.
/* note we could avoid these if statements if the calling function
* just called the appropriate functions inside.
*/
if (xoffset == 4 && yoffset == 0)
{
vp8_half_horiz_variance16x_h_sse2(

View File

@ -79,8 +79,9 @@ unsigned int vp8_sub_pixel_variance16x16_ssse3
int xsum0;
unsigned int xxsum0;
// note we could avoid these if statements if the calling function
// just called the appropriate functions inside.
/* note we could avoid these if statements if the calling function
* just called the appropriate functions inside.
*/
if (xoffset == 4 && yoffset == 0)
{
vp8_half_horiz_variance16x_h_sse2(

View File

@ -199,8 +199,6 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
left_stride = dst_stride;
top_left = yabove[-1];
// vp8_intra4x4_predict (base_dst + b->offset, dst_stride, b_mode,
// base_dst + b->offset, dst_stride );
vp8_intra4x4_predict_d_c(yabove, yleft, left_stride,
b_mode,
base_dst + b->offset, dst_stride,
@ -395,7 +393,7 @@ static void decode_mb_rows(VP8D_COMP *pbi)
xd->recon_above[1] -= xd->dst.uv_stride;
xd->recon_above[2] -= xd->dst.uv_stride;
//TODO: move to outside row loop
/* TODO: move to outside row loop */
xd->recon_left_stride[0] = xd->dst.y_stride;
xd->recon_left_stride[1] = xd->dst.uv_stride;

View File

@ -53,7 +53,8 @@ static const uint8_t kZigzag[16] = {
#define NUM_PROBAS 11
#define NUM_CTX 3
typedef const uint8_t (*ProbaArray)[NUM_CTX][NUM_PROBAS]; // for const-casting
/* for const-casting */
typedef const uint8_t (*ProbaArray)[NUM_CTX][NUM_PROBAS];
static int GetSigned(BOOL_DECODER *br, int value_to_sign)
{

View File

@ -185,7 +185,7 @@ static void mt_decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
/*Caution: For some b_mode, it needs 8 pixels (4 above + 4 above-right).*/
if (i < 4 && pbi->common.filter_level)
yabove = xd->recon_above[0] + b->offset; //i*4;
yabove = xd->recon_above[0] + b->offset;
else
yabove = (base_dst - dst_stride) + b->offset;
@ -383,7 +383,7 @@ static void mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd, int start_mb_row)
xd->recon_left[1] = pbi->mt_uleft_col[mb_row];
xd->recon_left[2] = pbi->mt_vleft_col[mb_row];
//TODO: move to outside row loop
/* TODO: move to outside row loop */
xd->recon_left_stride[0] = 1;
xd->recon_left_stride[1] = 1;
}
@ -401,7 +401,7 @@ static void mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd, int start_mb_row)
xd->recon_above[1] -= xd->dst.uv_stride;
xd->recon_above[2] -= xd->dst.uv_stride;
//TODO: move to outside row loop
/* TODO: move to outside row loop */
xd->recon_left_stride[0] = xd->dst.y_stride;
xd->recon_left_stride[1] = xd->dst.uv_stride;
}

View File

@ -172,7 +172,7 @@ void vp8_pack_tokens_c(vp8_writer *w, const TOKENEXTRA *p, int xcount)
while (p < stop)
{
const int t = p->Token;
const vp8_token *a = vp8_coef_encodings + t;
vp8_token *a = vp8_coef_encodings + t;
const vp8_extra_bit_struct *b = vp8_extra_bits + t;
int i = 0;
const unsigned char *pp = p->context_tree;
@ -461,7 +461,7 @@ static void write_mv
static void write_mb_features(vp8_writer *w, const MB_MODE_INFO *mi, const MACROBLOCKD *x)
{
// Encode the MB segment id.
/* Encode the MB segment id. */
if (x->segmentation_enabled && x->update_mb_segmentation_map)
{
switch (mi->segment_id)
@ -483,7 +483,7 @@ static void write_mb_features(vp8_writer *w, const MB_MODE_INFO *mi, const MACRO
vp8_write(w, 1, x->mb_segment_tree_probs[2]);
break;
// TRAP.. This should not happen
/* TRAP.. This should not happen */
default:
vp8_write(w, 0, x->mb_segment_tree_probs[0]);
vp8_write(w, 0, x->mb_segment_tree_probs[1]);
@ -497,7 +497,7 @@ void vp8_convert_rfct_to_prob(VP8_COMP *const cpi)
const int rf_intra = rfct[INTRA_FRAME];
const int rf_inter = rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
// Calculate the probabilities used to code the ref frame based on useage
/* Calculate the probabilities used to code the ref frame based on usage */
if (!(cpi->prob_intra_coded = rf_intra * 255 / (rf_intra + rf_inter)))
cpi->prob_intra_coded = 1;
@ -571,8 +571,10 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi)
MACROBLOCKD *xd = &cpi->mb.e_mbd;
// Distance of Mb to the various image edges.
// These specified to 8th pel as they are always compared to MV values that are in 1/8th pel units
/* Distance of Mb to the various image edges.
* These specified to 8th pel as they are always compared to MV
* values that are in 1/8th pel units
*/
xd->mb_to_left_edge = -((mb_col * 16) << 3);
xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3;
xd->mb_to_top_edge = -((mb_row * 16)) << 3;
@ -779,7 +781,7 @@ static void write_kfmodes(VP8_COMP *cpi)
write_uv_mode(bc, (m++)->mbmi.uv_mode, vp8_kf_uv_mode_prob);
}
m++; // skip L prediction border
m++; /* skip L prediction border */
}
}
@ -878,9 +880,6 @@ static int independent_coef_context_savings(VP8_COMP *cpi)
/* at every context */
/* calc probs and branch cts for this frame only */
//vp8_prob new_p [ENTROPY_NODES];
//unsigned int branch_ct [ENTROPY_NODES] [2];
int t = 0; /* token/prob index */
vp8_tree_probs_from_distribution(
@ -940,9 +939,6 @@ static int default_coef_context_savings(VP8_COMP *cpi)
/* at every context */
/* calc probs and branch cts for this frame only */
//vp8_prob new_p [ENTROPY_NODES];
//unsigned int branch_ct [ENTROPY_NODES] [2];
int t = 0; /* token/prob index */
vp8_tree_probs_from_distribution(
@ -1004,7 +1000,7 @@ int vp8_estimate_entropy_savings(VP8_COMP *cpi)
int new_intra, new_last, new_garf, oldtotal, newtotal;
int ref_frame_cost[MAX_REF_FRAMES];
vp8_clear_system_state(); //__asm emms;
vp8_clear_system_state();
if (cpi->common.frame_type != KEY_FRAME)
{
@ -1026,7 +1022,7 @@ int vp8_estimate_entropy_savings(VP8_COMP *cpi)
rfct[ALTREF_FRAME] * ref_frame_cost[ALTREF_FRAME];
// old costs
/* old costs */
vp8_calc_ref_frame_costs(ref_frame_cost,cpi->prob_intra_coded,
cpi->prob_last_coded,cpi->prob_gf_coded);
@ -1078,7 +1074,7 @@ void vp8_update_coef_probs(VP8_COMP *cpi)
#endif
int savings = 0;
vp8_clear_system_state(); //__asm emms;
vp8_clear_system_state();
do
{
@ -1110,21 +1106,15 @@ void vp8_update_coef_probs(VP8_COMP *cpi)
}
do
{
//note: use result from vp8_estimate_entropy_savings, so no need to call vp8_tree_probs_from_distribution here.
/* note: use result from vp8_estimate_entropy_savings, so no
* need to call vp8_tree_probs_from_distribution here.
*/
/* at every context */
/* calc probs and branch cts for this frame only */
//vp8_prob new_p [ENTROPY_NODES];
//unsigned int branch_ct [ENTROPY_NODES] [2];
int t = 0; /* token/prob index */
//vp8_tree_probs_from_distribution(
// MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
// new_p, branch_ct, (unsigned int *)cpi->coef_counts [i][j][k],
// 256, 1
// );
do
{
const vp8_prob newp = cpi->frame_coef_probs [i][j][k][t];
@ -1295,14 +1285,16 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
Sectionbits[active_section = 1] += sizeof(VP8_HEADER) * 8 * 256;
#endif
// every keyframe send startcode, width, height, scale factor, clamp and color type
/* every keyframe send startcode, width, height, scale factor, clamp
* and color type
*/
if (oh.type == KEY_FRAME)
{
int v;
validate_buffer(cx_data, 7, cx_data_end, &cpi->common.error);
// Start / synch code
/* Start / synch code */
cx_data[0] = 0x9D;
cx_data[1] = 0x01;
cx_data[2] = 0x2a;
@ -1321,7 +1313,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
vp8_start_encode(bc, cx_data, cx_data_end);
// signal clr type
/* signal clr type */
vp8_write_bit(bc, pc->clr_type);
vp8_write_bit(bc, pc->clamp_type);
@ -1330,13 +1322,13 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
vp8_start_encode(bc, cx_data, cx_data_end);
// Signal whether or not Segmentation is enabled
/* Signal whether or not Segmentation is enabled */
vp8_write_bit(bc, xd->segmentation_enabled);
// Indicate which features are enabled
/* Indicate which features are enabled */
if (xd->segmentation_enabled)
{
// Signal whether or not the segmentation map is being updated.
/* Signal whether or not the segmentation map is being updated. */
vp8_write_bit(bc, xd->update_mb_segmentation_map);
vp8_write_bit(bc, xd->update_mb_segmentation_data);
@ -1346,15 +1338,15 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
vp8_write_bit(bc, xd->mb_segement_abs_delta);
// For each segmentation feature (Quant and loop filter level)
/* For each segmentation feature (Quant and loop filter level) */
for (i = 0; i < MB_LVL_MAX; i++)
{
// For each of the segments
/* For each of the segments */
for (j = 0; j < MAX_MB_SEGMENTS; j++)
{
Data = xd->segment_feature_data[i][j];
// Frame level data
/* Frame level data */
if (Data)
{
vp8_write_bit(bc, 1);
@ -1379,7 +1371,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
if (xd->update_mb_segmentation_map)
{
// Write the probs used to decode the segment id for each macro block.
/* Write the probs used to decode the segment id for each mb */
for (i = 0; i < MB_FEATURE_TREE_PROBS; i++)
{
int Data = xd->mb_segment_tree_probs[i];
@ -1395,17 +1387,18 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
}
}
// Code to determine whether or not to update the scan order.
vp8_write_bit(bc, pc->filter_type);
vp8_write_literal(bc, pc->filter_level, 6);
vp8_write_literal(bc, pc->sharpness_level, 3);
// Write out loop filter deltas applied at the MB level based on mode or ref frame (if they are enabled).
/* Write out loop filter deltas applied at the MB level based on mode
* or ref frame (if they are enabled).
*/
vp8_write_bit(bc, xd->mode_ref_lf_delta_enabled);
if (xd->mode_ref_lf_delta_enabled)
{
// Do the deltas need to be updated
/* Do the deltas need to be updated */
int send_update = xd->mode_ref_lf_delta_update
|| cpi->oxcf.error_resilient_mode;
@ -1414,12 +1407,12 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
{
int Data;
// Send update
/* Send update */
for (i = 0; i < MAX_REF_LF_DELTAS; i++)
{
Data = xd->ref_lf_deltas[i];
// Frame level data
/* Frame level data */
if (xd->ref_lf_deltas[i] != xd->last_ref_lf_deltas[i]
|| cpi->oxcf.error_resilient_mode)
{
@ -1429,20 +1422,20 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
if (Data > 0)
{
vp8_write_literal(bc, (Data & 0x3F), 6);
vp8_write_bit(bc, 0); // sign
vp8_write_bit(bc, 0); /* sign */
}
else
{
Data = -Data;
vp8_write_literal(bc, (Data & 0x3F), 6);
vp8_write_bit(bc, 1); // sign
vp8_write_bit(bc, 1); /* sign */
}
}
else
vp8_write_bit(bc, 0);
}
// Send update
/* Send update */
for (i = 0; i < MAX_MODE_LF_DELTAS; i++)
{
Data = xd->mode_lf_deltas[i];
@ -1456,13 +1449,13 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
if (Data > 0)
{
vp8_write_literal(bc, (Data & 0x3F), 6);
vp8_write_bit(bc, 0); // sign
vp8_write_bit(bc, 0); /* sign */
}
else
{
Data = -Data;
vp8_write_literal(bc, (Data & 0x3F), 6);
vp8_write_bit(bc, 1); // sign
vp8_write_bit(bc, 1); /* sign */
}
}
else
@ -1471,34 +1464,42 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
}
}
//signal here is multi token partition is enabled
/* signal here is multi token partition is enabled */
vp8_write_literal(bc, pc->multi_token_partition, 2);
// Frame Qbaseline quantizer index
/* Frame Qbaseline quantizer index */
vp8_write_literal(bc, pc->base_qindex, 7);
// Transmit Dc, Second order and Uv quantizer delta information
/* Transmit Dc, Second order and Uv quantizer delta information */
put_delta_q(bc, pc->y1dc_delta_q);
put_delta_q(bc, pc->y2dc_delta_q);
put_delta_q(bc, pc->y2ac_delta_q);
put_delta_q(bc, pc->uvdc_delta_q);
put_delta_q(bc, pc->uvac_delta_q);
// When there is a key frame all reference buffers are updated using the new key frame
/* When there is a key frame all reference buffers are updated using
* the new key frame
*/
if (pc->frame_type != KEY_FRAME)
{
// Should the GF or ARF be updated using the transmitted frame or buffer
/* Should the GF or ARF be updated using the transmitted frame
* or buffer
*/
vp8_write_bit(bc, pc->refresh_golden_frame);
vp8_write_bit(bc, pc->refresh_alt_ref_frame);
// If not being updated from current frame should either GF or ARF be updated from another buffer
/* If not being updated from current frame should either GF or ARF
* be updated from another buffer
*/
if (!pc->refresh_golden_frame)
vp8_write_literal(bc, pc->copy_buffer_to_gf, 2);
if (!pc->refresh_alt_ref_frame)
vp8_write_literal(bc, pc->copy_buffer_to_arf, 2);
// Indicate reference frame sign bias for Golden and ARF frames (always 0 for last frame buffer)
/* Indicate reference frame sign bias for Golden and ARF frames
* (always 0 for last frame buffer)
*/
vp8_write_bit(bc, pc->ref_frame_sign_bias[GOLDEN_FRAME]);
vp8_write_bit(bc, pc->ref_frame_sign_bias[ALTREF_FRAME]);
}
@ -1527,14 +1528,14 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
#endif
vp8_clear_system_state(); //__asm emms;
vp8_clear_system_state();
#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
pack_coef_probs(cpi);
#else
if (pc->refresh_entropy_probs == 0)
{
// save a copy for later refresh
/* save a copy for later refresh */
vpx_memcpy(&cpi->common.lfc, &cpi->common.fc, sizeof(cpi->common.fc));
}
@ -1545,7 +1546,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
active_section = 2;
#endif
// Write out the mb_no_coeff_skip flag
/* Write out the mb_no_coeff_skip flag */
vp8_write_bit(bc, pc->mb_no_coeff_skip);
if (pc->frame_type == KEY_FRAME)

View File

@ -18,7 +18,7 @@
#include "vp8/common/entropy.h"
#include "vpx_ports/mem.h"
// motion search site
/* motion search site */
typedef struct
{
MV mv;
@ -27,11 +27,11 @@ typedef struct
typedef struct block
{
// 16 Y blocks, 4 U blocks, 4 V blocks each with 16 entries
/* 16 Y blocks, 4 U blocks, 4 V blocks each with 16 entries */
short *src_diff;
short *coeff;
// 16 Y blocks, 4 U blocks, 4 V blocks each with 16 entries
/* 16 Y blocks, 4 U blocks, 4 V blocks each with 16 entries */
short *quant;
short *quant_fast;
unsigned char *quant_shift;
@ -39,7 +39,7 @@ typedef struct block
short *zrun_zbin_boost;
short *round;
// Zbin Over Quant value
/* Zbin Over Quant value */
short zbin_extra;
unsigned char **base_src;
@ -59,12 +59,12 @@ typedef struct
typedef struct macroblock
{
DECLARE_ALIGNED(16, short, src_diff[400]); // 16x16 Y 8x8 U 8x8 V 4x4 2nd Y
DECLARE_ALIGNED(16, short, coeff[400]); // 16x16 Y 8x8 U 8x8 V 4x4 2nd Y
DECLARE_ALIGNED(16, short, src_diff[400]); /* 25 blocks Y,U,V,Y2 */
DECLARE_ALIGNED(16, short, coeff[400]); /* 25 blocks Y,U,V,Y2 */
DECLARE_ALIGNED(16, unsigned char, thismb[256]);
unsigned char *thismb_ptr;
// 16 Y blocks, 4 U blocks, 4 V blocks, 1 DC 2nd order block each with 16 entries
/* 16 Y, 4 U, 4 V, 1 DC 2nd order block */
BLOCK block[25];
YV12_BUFFER_CONFIG src;
@ -99,8 +99,9 @@ typedef struct macroblock
int (*token_costs)[COEF_BANDS][PREV_COEF_CONTEXTS]
[MAX_ENTROPY_TOKENS];
// These define limits to motion vector components to prevent
// them from extending outside the UMV borders
/* These define limits to motion vector components to prevent
* them from extending outside the UMV borders.
*/
int mv_col_min;
int mv_col_max;
int mv_row_min;
@ -110,7 +111,6 @@ typedef struct macroblock
unsigned int encode_breakout;
//char * gf_active_ptr;
signed char *gf_active_ptr;
unsigned char *active_ptr;

View File

@ -32,7 +32,7 @@ typedef struct
unsigned char *buffer_end;
struct vpx_internal_error_info *error;
// Variables used to track bit costs without outputing to the bitstream
/* Variables used to track bit costs without outputing to the bitstream */
unsigned int measure_cost;
unsigned long bit_counter;
} BOOL_CODER;

View File

@ -16,22 +16,26 @@
#include "vpx_rtcd.h"
static const unsigned int NOISE_MOTION_THRESHOLD = 25 * 25;
// SSE_DIFF_THRESHOLD is selected as ~95% confidence assuming var(noise) ~= 100.
/* SSE_DIFF_THRESHOLD is selected as ~95% confidence assuming
* var(noise) ~= 100.
*/
static const unsigned int SSE_DIFF_THRESHOLD = 16 * 16 * 20;
static const unsigned int SSE_THRESHOLD = 16 * 16 * 40;
// The filtering coefficients used for denoizing are adjusted for static
// blocks, or blocks with very small motion vectors. This is done through
// the motion magnitude parameter.
//
// There are currently 2048 possible mapping from absolute difference to
// filter coefficient depending on the motion magnitude. Each mapping is
// in a LUT table. All these tables are staticly allocated but they are only
// filled on their first use.
//
// Each entry is a pair of 16b values, the coefficient and its complement
// to 256. Each of these value should only be 8b but they are 16b wide to
// avoid slow partial register manipulations.
/*
* The filtering coefficients used for denoizing are adjusted for static
* blocks, or blocks with very small motion vectors. This is done through
* the motion magnitude parameter.
*
* There are currently 2048 possible mapping from absolute difference to
* filter coefficient depending on the motion magnitude. Each mapping is
* in a LUT table. All these tables are staticly allocated but they are only
* filled on their first use.
*
* Each entry is a pair of 16b values, the coefficient and its complement
* to 256. Each of these value should only be 8b but they are 16b wide to
* avoid slow partial register manipulations.
*/
enum {num_motion_magnitude_adjustments = 2048};
static union coeff_pair filter_coeff_LUT[num_motion_magnitude_adjustments][256];
@ -100,7 +104,7 @@ int vp8_denoiser_filter_c(YV12_BUFFER_CONFIG *mc_running_avg,
for (r = 0; r < 16; ++r)
{
// Calculate absolute differences
/* Calculate absolute differences */
unsigned char abs_diff[16];
union coeff_pair filter_coefficient[16];
@ -112,13 +116,13 @@ int vp8_denoiser_filter_c(YV12_BUFFER_CONFIG *mc_running_avg,
abs_diff[c] = absdiff;
}
// Use LUT to get filter coefficients (two 16b value; f and 256-f)
/* Use LUT to get filter coefficients (two 16b value; f and 256-f) */
for (c = 0; c < 16; ++c)
{
filter_coefficient[c] = LUT[abs_diff[c]];
}
// Filtering...
/* Filtering... */
for (c = 0; c < 16; ++c)
{
const uint16_t state = (uint16_t)(mc_running_avg_y[c]);
@ -128,10 +132,11 @@ int vp8_denoiser_filter_c(YV12_BUFFER_CONFIG *mc_running_avg,
filter_coefficient[c].as_short[1] * sample + 128) >> 8;
}
// Depending on the magnitude of the difference between the signal and
// filtered version, either replace the signal by the filtered one or
// update the filter state with the signal when the change in a pixel
// isn't classified as noise.
/* Depending on the magnitude of the difference between the signal and
* filtered version, either replace the signal by the filtered one or
* update the filter state with the signal when the change in a pixel
* isn't classified as noise.
*/
for (c = 0; c < 16; ++c)
{
const int diff = sig[c] - running_avg_y[c];
@ -148,7 +153,7 @@ int vp8_denoiser_filter_c(YV12_BUFFER_CONFIG *mc_running_avg,
}
}
// Update pointers for next iteration.
/* Update pointers for next iteration. */
sig += sig_stride;
filtered += 16;
mc_running_avg_y += mc_avg_y_stride;
@ -228,7 +233,6 @@ void vp8_denoiser_denoise_mb(VP8_DENOISER *denoiser,
enum vp8_denoiser_decision decision = FILTER_BLOCK;
// Motion compensate the running average.
if (zero_frame)
{
YV12_BUFFER_CONFIG *src = &denoiser->yv12_running_avg[frame];
@ -243,7 +247,7 @@ void vp8_denoiser_denoise_mb(VP8_DENOISER *denoiser,
saved_mbmi = *mbmi;
// Use the best MV for the compensation.
/* Use the best MV for the compensation. */
mbmi->ref_frame = x->best_reference_frame;
mbmi->mode = x->best_sse_inter_mode;
mbmi->mv = x->best_sse_mv;
@ -255,11 +259,14 @@ void vp8_denoiser_denoise_mb(VP8_DENOISER *denoiser,
(mv_row *mv_row + mv_col *mv_col <= NOISE_MOTION_THRESHOLD &&
sse_diff < SSE_DIFF_THRESHOLD))
{
// Handle intra blocks as referring to last frame with zero motion
// and let the absolute pixel difference affect the filter factor.
// Also consider small amount of motion as being random walk due to
// noise, if it doesn't mean that we get a much bigger error.
// Note that any changes to the mode info only affects the denoising.
/*
* Handle intra blocks as referring to last frame with zero motion
* and let the absolute pixel difference affect the filter factor.
* Also consider small amount of motion as being random walk due
* to noise, if it doesn't mean that we get a much bigger error.
* Note that any changes to the mode info only affects the
* denoising.
*/
mbmi->ref_frame =
x->best_zeromv_reference_frame;
@ -275,11 +282,11 @@ void vp8_denoiser_denoise_mb(VP8_DENOISER *denoiser,
saved_pre = filter_xd->pre;
saved_dst = filter_xd->dst;
// Compensate the running average.
/* Compensate the running average. */
filter_xd->pre.y_buffer = src->y_buffer + recon_yoffset;
filter_xd->pre.u_buffer = src->u_buffer + recon_uvoffset;
filter_xd->pre.v_buffer = src->v_buffer + recon_uvoffset;
// Write the compensated running average to the destination buffer.
/* Write the compensated running average to the destination buffer. */
filter_xd->dst.y_buffer = dst->y_buffer + recon_yoffset;
filter_xd->dst.u_buffer = dst->u_buffer + recon_uvoffset;
filter_xd->dst.v_buffer = dst->v_buffer + recon_uvoffset;
@ -314,7 +321,7 @@ void vp8_denoiser_denoise_mb(VP8_DENOISER *denoiser,
if (decision == FILTER_BLOCK)
{
// Filter.
/* Filter. */
decision = vp8_denoiser_filter(&denoiser->yv12_mc_running_avg,
&denoiser->yv12_running_avg[LAST_FRAME],
x,
@ -323,8 +330,9 @@ void vp8_denoiser_denoise_mb(VP8_DENOISER *denoiser,
}
if (decision == COPY_BLOCK)
{
// No filtering of this block; it differs too much from the predictor,
// or the motion vector magnitude is considered too big.
/* No filtering of this block; it differs too much from the predictor,
* or the motion vector magnitude is considered too big.
*/
vp8_copy_mem16x16(
x->thismb, 16,
denoiser->yv12_running_avg[LAST_FRAME].y_buffer + recon_yoffset,

View File

@ -19,7 +19,7 @@
enum vp8_denoiser_decision
{
COPY_BLOCK,
FILTER_BLOCK,
FILTER_BLOCK
};
typedef struct vp8_denoiser
@ -47,4 +47,4 @@ union coeff_pair
union coeff_pair *vp8_get_filter_coeff_LUT(unsigned int motion_magnitude);
#endif // VP8_ENCODER_DENOISING_H_
#endif /* VP8_ENCODER_DENOISING_H_ */

View File

@ -77,7 +77,7 @@ static const unsigned char VP8_VAR_OFFS[16]=
};
// Original activity measure from Tim T's code.
/* Original activity measure from Tim T's code. */
static unsigned int tt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
{
unsigned int act;
@ -100,7 +100,7 @@ static unsigned int tt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
return act;
}
// Stub for alternative experimental activity measures.
/* Stub for alternative experimental activity measures. */
static unsigned int alt_activity_measure( VP8_COMP *cpi,
MACROBLOCK *x, int use_dc_pred )
{
@ -108,8 +108,9 @@ static unsigned int alt_activity_measure( VP8_COMP *cpi,
}
// Measure the activity of the current macroblock
// What we measure here is TBD so abstracted to this function
/* Measure the activity of the current macroblock
* What we measure here is TBD so abstracted to this function
*/
#define ALT_ACT_MEASURE 1
static unsigned int mb_activity_measure( VP8_COMP *cpi, MACROBLOCK *x,
int mb_row, int mb_col)
@ -120,12 +121,12 @@ static unsigned int mb_activity_measure( VP8_COMP *cpi, MACROBLOCK *x,
{
int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row);
// Or use and alternative.
/* Or use and alternative. */
mb_activity = alt_activity_measure( cpi, x, use_dc_pred );
}
else
{
// Original activity measure from Tim T's code.
/* Original activity measure from Tim T's code. */
mb_activity = tt_activity_measure( cpi, x );
}
@ -135,36 +136,36 @@ static unsigned int mb_activity_measure( VP8_COMP *cpi, MACROBLOCK *x,
return mb_activity;
}
// Calculate an "average" mb activity value for the frame
/* Calculate an "average" mb activity value for the frame */
#define ACT_MEDIAN 0
static void calc_av_activity( VP8_COMP *cpi, int64_t activity_sum )
{
#if ACT_MEDIAN
// Find median: Simple n^2 algorithm for experimentation
/* Find median: Simple n^2 algorithm for experimentation */
{
unsigned int median;
unsigned int i,j;
unsigned int * sortlist;
unsigned int tmp;
// Create a list to sort to
/* Create a list to sort to */
CHECK_MEM_ERROR(sortlist,
vpx_calloc(sizeof(unsigned int),
cpi->common.MBs));
// Copy map to sort list
/* Copy map to sort list */
vpx_memcpy( sortlist, cpi->mb_activity_map,
sizeof(unsigned int) * cpi->common.MBs );
// Ripple each value down to its correct position
/* Ripple each value down to its correct position */
for ( i = 1; i < cpi->common.MBs; i ++ )
{
for ( j = i; j > 0; j -- )
{
if ( sortlist[j] < sortlist[j-1] )
{
// Swap values
/* Swap values */
tmp = sortlist[j-1];
sortlist[j-1] = sortlist[j];
sortlist[j] = tmp;
@ -174,7 +175,7 @@ static void calc_av_activity( VP8_COMP *cpi, int64_t activity_sum )
}
}
// Even number MBs so estimate median as mean of two either side.
/* Even number MBs so estimate median as mean of two either side. */
median = ( 1 + sortlist[cpi->common.MBs >> 1] +
sortlist[(cpi->common.MBs >> 1) + 1] ) >> 1;
@ -183,14 +184,14 @@ static void calc_av_activity( VP8_COMP *cpi, int64_t activity_sum )
vpx_free(sortlist);
}
#else
// Simple mean for now
/* Simple mean for now */
cpi->activity_avg = (unsigned int)(activity_sum/cpi->common.MBs);
#endif
if (cpi->activity_avg < VP8_ACTIVITY_AVG_MIN)
cpi->activity_avg = VP8_ACTIVITY_AVG_MIN;
// Experimental code: return fixed value normalized for several clips
/* Experimental code: return fixed value normalized for several clips */
if ( ALT_ACT_MEASURE )
cpi->activity_avg = 100000;
}
@ -199,7 +200,7 @@ static void calc_av_activity( VP8_COMP *cpi, int64_t activity_sum )
#define OUTPUT_NORM_ACT_STATS 0
#if USE_ACT_INDEX
// Calculate and activity index for each mb
/* Calculate and activity index for each mb */
static void calc_activity_index( VP8_COMP *cpi, MACROBLOCK *x )
{
VP8_COMMON *const cm = & cpi->common;
@ -214,19 +215,19 @@ static void calc_activity_index( VP8_COMP *cpi, MACROBLOCK *x )
fprintf(f, "\n%12d\n", cpi->activity_avg );
#endif
// Reset pointers to start of activity map
/* Reset pointers to start of activity map */
x->mb_activity_ptr = cpi->mb_activity_map;
// Calculate normalized mb activity number.
/* Calculate normalized mb activity number. */
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
{
// for each macroblock col in image
/* for each macroblock col in image */
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
{
// Read activity from the map
/* Read activity from the map */
act = *(x->mb_activity_ptr);
// Calculate a normalized activity number
/* Calculate a normalized activity number */
a = act + 4*cpi->activity_avg;
b = 4*act + cpi->activity_avg;
@ -238,7 +239,7 @@ static void calc_activity_index( VP8_COMP *cpi, MACROBLOCK *x )
#if OUTPUT_NORM_ACT_STATS
fprintf(f, " %6d", *(x->mb_activity_ptr));
#endif
// Increment activity map pointers
/* Increment activity map pointers */
x->mb_activity_ptr++;
}
@ -255,8 +256,9 @@ static void calc_activity_index( VP8_COMP *cpi, MACROBLOCK *x )
}
#endif
// Loop through all MBs. Note activity of each, average activity and
// calculate a normalized activity for each
/* Loop through all MBs. Note activity of each, average activity and
* calculate a normalized activity for each
*/
static void build_activity_map( VP8_COMP *cpi )
{
MACROBLOCK *const x = & cpi->mb;
@ -273,15 +275,15 @@ static void build_activity_map( VP8_COMP *cpi )
unsigned int mb_activity;
int64_t activity_sum = 0;
// for each macroblock row in image
/* for each macroblock row in image */
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
{
#if ALT_ACT_MEASURE
// reset above block coeffs
/* reset above block coeffs */
xd->up_available = (mb_row != 0);
recon_yoffset = (mb_row * recon_y_stride * 16);
#endif
// for each macroblock col in image
/* for each macroblock col in image */
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
{
#if ALT_ACT_MEASURE
@ -289,48 +291,48 @@ static void build_activity_map( VP8_COMP *cpi )
xd->left_available = (mb_col != 0);
recon_yoffset += 16;
#endif
//Copy current mb to a buffer
/* Copy current mb to a buffer */
vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
// measure activity
/* measure activity */
mb_activity = mb_activity_measure( cpi, x, mb_row, mb_col );
// Keep frame sum
/* Keep frame sum */
activity_sum += mb_activity;
// Store MB level activity details.
/* Store MB level activity details. */
*x->mb_activity_ptr = mb_activity;
// Increment activity map pointer
/* Increment activity map pointer */
x->mb_activity_ptr++;
// adjust to the next column of source macroblocks
/* adjust to the next column of source macroblocks */
x->src.y_buffer += 16;
}
// adjust to the next row of mbs
/* adjust to the next row of mbs */
x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
#if ALT_ACT_MEASURE
//extend the recon for intra prediction
/* extend the recon for intra prediction */
vp8_extend_mb_row(new_yv12, xd->dst.y_buffer + 16,
xd->dst.u_buffer + 8, xd->dst.v_buffer + 8);
#endif
}
// Calculate an "average" MB activity
/* Calculate an "average" MB activity */
calc_av_activity(cpi, activity_sum);
#if USE_ACT_INDEX
// Calculate an activity index number of each mb
/* Calculate an activity index number of each mb */
calc_activity_index( cpi, x );
#endif
}
// Macroblock activity masking
/* Macroblock activity masking */
void vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x)
{
#if USE_ACT_INDEX
@ -342,7 +344,7 @@ void vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x)
int64_t b;
int64_t act = *(x->mb_activity_ptr);
// Apply the masking to the RD multiplier.
/* Apply the masking to the RD multiplier. */
a = act + (2*cpi->activity_avg);
b = (2*act) + cpi->activity_avg;
@ -351,7 +353,7 @@ void vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x)
x->errorperbit += (x->errorperbit==0);
#endif
// Activity based Zbin adjustment
/* Activity based Zbin adjustment */
adjust_act_zbin(cpi, x);
}
@ -398,7 +400,7 @@ void encode_mb_row(VP8_COMP *cpi,
w = &cpi->bc[1];
#endif
// reset above block coeffs
/* reset above block coeffs */
xd->above_context = cm->above_context;
xd->up_available = (mb_row != 0);
@ -406,37 +408,41 @@ void encode_mb_row(VP8_COMP *cpi,
recon_uvoffset = (mb_row * recon_uv_stride * 8);
cpi->tplist[mb_row].start = *tp;
//printf("Main mb_row = %d\n", mb_row);
/* printf("Main mb_row = %d\n", mb_row); */
// Distance of Mb to the top & bottom edges, specified in 1/8th pel
// units as they are always compared to values that are in 1/8th pel units
/* Distance of Mb to the top & bottom edges, specified in 1/8th pel
* units as they are always compared to values that are in 1/8th pel
*/
xd->mb_to_top_edge = -((mb_row * 16) << 3);
xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
// Set up limit values for vertical motion vector components
// to prevent them extending beyond the UMV borders
/* Set up limit values for vertical motion vector components
* to prevent them extending beyond the UMV borders
*/
x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16));
x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16)
+ (VP8BORDERINPIXELS - 16);
// Set the mb activity pointer to the start of the row.
/* Set the mb activity pointer to the start of the row. */
x->mb_activity_ptr = &cpi->mb_activity_map[map_index];
// for each macroblock col in image
/* for each macroblock col in image */
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
{
#if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
*tp = cpi->tok;
#endif
// Distance of Mb to the left & right edges, specified in
// 1/8th pel units as they are always compared to values
// that are in 1/8th pel units
/* Distance of Mb to the left & right edges, specified in
* 1/8th pel units as they are always compared to values
* that are in 1/8th pel units
*/
xd->mb_to_left_edge = -((mb_col * 16) << 3);
xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
// Set up limit values for horizontal motion vector components
// to prevent them extending beyond the UMV borders
/* Set up limit values for horizontal motion vector components
* to prevent them extending beyond the UMV borders
*/
x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16));
x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16)
+ (VP8BORDERINPIXELS - 16);
@ -449,13 +455,13 @@ void encode_mb_row(VP8_COMP *cpi,
x->rddiv = cpi->RDDIV;
x->rdmult = cpi->RDMULT;
//Copy current mb to a buffer
/* Copy current mb to a buffer */
vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
#if CONFIG_MULTITHREAD
if (cpi->b_multi_threaded != 0)
{
*current_mb_col = mb_col - 1; // set previous MB done
*current_mb_col = mb_col - 1; /* set previous MB done */
if ((mb_col & (nsync - 1)) == 0)
{
@ -471,11 +477,13 @@ void encode_mb_row(VP8_COMP *cpi,
if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
vp8_activity_masking(cpi, x);
// Is segmentation enabled
// MB level adjustment to quantizer
/* Is segmentation enabled */
/* MB level adjustment to quantizer */
if (xd->segmentation_enabled)
{
// Code to set segment id in xd->mbmi.segment_id for current MB (with range checking)
/* Code to set segment id in xd->mbmi.segment_id for current MB
* (with range checking)
*/
if (cpi->segmentation_map[map_index+mb_col] <= 3)
xd->mode_info_context->mbmi.segment_id = cpi->segmentation_map[map_index+mb_col];
else
@ -484,7 +492,8 @@ void encode_mb_row(VP8_COMP *cpi,
vp8cx_mb_init_quantizer(cpi, x, 1);
}
else
xd->mode_info_context->mbmi.segment_id = 0; // Set to Segment 0 by default
/* Set to Segment 0 by default */
xd->mode_info_context->mbmi.segment_id = 0;
x->active_ptr = cpi->active_map + map_index + mb_col;
@ -514,21 +523,28 @@ void encode_mb_row(VP8_COMP *cpi,
#endif
// Count of last ref frame 0,0 usage
/* Count of last ref frame 0,0 usage */
if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
cpi->inter_zz_count ++;
// Special case code for cyclic refresh
// If cyclic update enabled then copy xd->mbmi.segment_id; (which may have been updated based on mode
// during vp8cx_encode_inter_macroblock()) back into the global segmentation map
/* Special case code for cyclic refresh
* If cyclic update enabled then copy xd->mbmi.segment_id; (which
* may have been updated based on mode during
* vp8cx_encode_inter_macroblock()) back into the global
* segmentation map
*/
if ((cpi->current_layer == 0) &&
(cpi->cyclic_refresh_mode_enabled && xd->segmentation_enabled))
{
cpi->segmentation_map[map_index+mb_col] = xd->mode_info_context->mbmi.segment_id;
// If the block has been refreshed mark it as clean (the magnitude of the -ve influences how long it will be before we consider another refresh):
// Else if it was coded (last frame 0,0) and has not already been refreshed then mark it as a candidate for cleanup next time (marked 0)
// else mark it as dirty (1).
/* If the block has been refreshed mark it as clean (the
* magnitude of the -ve influences how long it will be before
* we consider another refresh):
* Else if it was coded (last frame 0,0) and has not already
* been refreshed then mark it as a candidate for cleanup
* next time (marked 0) else mark it as dirty (1).
*/
if (xd->mode_info_context->mbmi.segment_id)
cpi->cyclic_refresh_map[map_index+mb_col] = -1;
else if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
@ -551,13 +567,13 @@ void encode_mb_row(VP8_COMP *cpi,
pack_tokens(w, tp_start, tok_count);
}
#endif
// Increment pointer into gf usage flags structure.
/* Increment pointer into gf usage flags structure. */
x->gf_active_ptr++;
// Increment the activity mask pointers.
/* Increment the activity mask pointers. */
x->mb_activity_ptr++;
// adjust to the next column of macroblocks
/* adjust to the next column of macroblocks */
x->src.y_buffer += 16;
x->src.u_buffer += 8;
x->src.v_buffer += 8;
@ -565,16 +581,16 @@ void encode_mb_row(VP8_COMP *cpi,
recon_yoffset += 16;
recon_uvoffset += 8;
// Keep track of segment usage
/* Keep track of segment usage */
segment_counts[xd->mode_info_context->mbmi.segment_id] ++;
// skip to next mb
/* skip to next mb */
xd->mode_info_context++;
x->partition_info++;
xd->above_context++;
}
//extend the recon for intra prediction
/* extend the recon for intra prediction */
vp8_extend_mb_row( &cm->yv12_fb[dst_fb_idx],
xd->dst.y_buffer + 16,
xd->dst.u_buffer + 8,
@ -585,7 +601,7 @@ void encode_mb_row(VP8_COMP *cpi,
*current_mb_col = rightmost_col;
#endif
// this is to account for the border
/* this is to account for the border */
xd->mode_info_context++;
x->partition_info++;
}
@ -596,10 +612,10 @@ static void init_encode_frame_mb_context(VP8_COMP *cpi)
VP8_COMMON *const cm = & cpi->common;
MACROBLOCKD *const xd = & x->e_mbd;
// GF active flags data structure
/* GF active flags data structure */
x->gf_active_ptr = (signed char *)cpi->gf_active_flags;
// Activity map pointer
/* Activity map pointer */
x->mb_activity_ptr = cpi->mb_activity_map;
x->act_zbin_adj = 0;
@ -611,16 +627,16 @@ static void init_encode_frame_mb_context(VP8_COMP *cpi)
xd->frame_type = cm->frame_type;
// reset intra mode contexts
/* reset intra mode contexts */
if (cm->frame_type == KEY_FRAME)
vp8_init_mbmode_probs(cm);
// Copy data over into macro block data structures.
/* Copy data over into macro block data structures. */
x->src = * cpi->Source;
xd->pre = cm->yv12_fb[cm->lst_fb_idx];
xd->dst = cm->yv12_fb[cm->new_fb_idx];
// set up frame for intra coded blocks
/* set up frame for intra coded blocks */
vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]);
vp8_build_block_offsets(x);
@ -643,7 +659,9 @@ static void init_encode_frame_mb_context(VP8_COMP *cpi)
vpx_memset(cm->above_context, 0,
sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);
// Special case treatment when GF and ARF are not sensible options for reference
/* Special case treatment when GF and ARF are not sensible options
* for reference
*/
if (cpi->ref_frame_flags == VP8_LAST_FRAME)
vp8_calc_ref_frame_costs(x->ref_frame_cost,
cpi->prob_intra_coded,255,128);
@ -676,7 +694,7 @@ void vp8_encode_frame(VP8_COMP *cpi)
int segment_counts[MAX_MB_SEGMENTS];
int totalrate;
#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
BOOL_CODER * bc = &cpi->bc[1]; // bc[0] is for control partition
BOOL_CODER * bc = &cpi->bc[1]; /* bc[0] is for control partition */
const int num_part = (1 << cm->multi_token_partition);
#endif
@ -691,7 +709,7 @@ void vp8_encode_frame(VP8_COMP *cpi)
vp8_auto_select_speed(cpi);
}
// Functions setup for all frame types so we can use MC in AltRef
/* Functions setup for all frame types so we can use MC in AltRef */
if(!cm->use_bilinear_mc_filter)
{
xd->subpixel_predict = vp8_sixtap_predict4x4;
@ -707,7 +725,7 @@ void vp8_encode_frame(VP8_COMP *cpi)
xd->subpixel_predict16x16 = vp8_bilinear_predict16x16;
}
// Reset frame count of inter 0,0 motion vector usage.
/* Reset frame count of inter 0,0 motion vector usage. */
cpi->inter_zz_count = 0;
cpi->prediction_error = 0;
@ -716,7 +734,7 @@ void vp8_encode_frame(VP8_COMP *cpi)
cpi->tok_count = 0;
#if 0
// Experimental code
/* Experimental code */
cpi->frame_distortion = 0;
cpi->last_mb_distortion = 0;
#endif
@ -736,14 +754,14 @@ void vp8_encode_frame(VP8_COMP *cpi)
if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
{
// Initialize encode frame context.
/* Initialize encode frame context. */
init_encode_frame_mb_context(cpi);
// Build a frame level activity map
/* Build a frame level activity map */
build_activity_map(cpi);
}
// re-init encode frame context.
/* re-init encode frame context. */
init_encode_frame_mb_context(cpi);
#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
@ -790,7 +808,7 @@ void vp8_encode_frame(VP8_COMP *cpi)
encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
// adjust to the next row of mbs
/* adjust to the next row of mbs */
x->src.y_buffer += 16 * x->src.y_stride * (cpi->encoding_thread_count + 1) - 16 * cm->mb_cols;
x->src.u_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols;
x->src.v_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols;
@ -836,7 +854,7 @@ void vp8_encode_frame(VP8_COMP *cpi)
else
#endif
{
// for each macroblock row in image
/* for each macroblock row in image */
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
{
vp8_zero(cm->left_context)
@ -847,7 +865,7 @@ void vp8_encode_frame(VP8_COMP *cpi)
encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
// adjust to the next row of mbs
/* adjust to the next row of mbs */
x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
@ -872,13 +890,13 @@ void vp8_encode_frame(VP8_COMP *cpi)
}
// Work out the segment probabilities if segmentation is enabled
/* Work out the segment probabilities if segmentation is enabled */
if (xd->segmentation_enabled)
{
int tot_count;
int i;
// Set to defaults
/* Set to defaults */
vpx_memset(xd->mb_segment_tree_probs, 255 , sizeof(xd->mb_segment_tree_probs));
tot_count = segment_counts[0] + segment_counts[1] + segment_counts[2] + segment_counts[3];
@ -899,7 +917,7 @@ void vp8_encode_frame(VP8_COMP *cpi)
if (tot_count > 0)
xd->mb_segment_tree_probs[2] = (segment_counts[2] * 255) / tot_count;
// Zero probabilities not allowed
/* Zero probabilities not allowed */
for (i = 0; i < MB_FEATURE_TREE_PROBS; i ++)
{
if (xd->mb_segment_tree_probs[i] == 0)
@ -908,10 +926,10 @@ void vp8_encode_frame(VP8_COMP *cpi)
}
}
// 256 rate units to the bit
cpi->projected_frame_size = totalrate >> 8; // projected_frame_size in units of BYTES
/* projected_frame_size in units of BYTES */
cpi->projected_frame_size = totalrate >> 8;
// Make a note of the percentage MBs coded Intra.
/* Make a note of the percentage MBs coded Intra. */
if (cm->frame_type == KEY_FRAME)
{
cpi->this_frame_percent_intra = 100;
@ -961,9 +979,11 @@ void vp8_encode_frame(VP8_COMP *cpi)
#endif
#if ! CONFIG_REALTIME_ONLY
// Adjust the projected reference frame usage probability numbers to reflect
// what we have just seen. This may be useful when we make multiple iterations
// of the recode loop rather than continuing to use values from the previous frame.
/* Adjust the projected reference frame usage probability numbers to
* reflect what we have just seen. This may be useful when we make
* multiple iterations of the recode loop rather than continuing to use
* values from the previous frame.
*/
if ((cm->frame_type != KEY_FRAME) && ((cpi->oxcf.number_of_layers > 1) ||
(!cm->refresh_alt_ref_frame && !cm->refresh_golden_frame)))
{
@ -1017,16 +1037,13 @@ void vp8_build_block_offsets(MACROBLOCK *x)
vp8_build_block_doffsets(&x->e_mbd);
// y blocks
/* y blocks */
x->thismb_ptr = &x->thismb[0];
for (br = 0; br < 4; br++)
{
for (bc = 0; bc < 4; bc++)
{
BLOCK *this_block = &x->block[block];
//this_block->base_src = &x->src.y_buffer;
//this_block->src_stride = x->src.y_stride;
//this_block->src = 4 * br * this_block->src_stride + 4 * bc;
this_block->base_src = &x->thismb_ptr;
this_block->src_stride = 16;
this_block->src = 4 * br * 16 + 4 * bc;
@ -1034,7 +1051,7 @@ void vp8_build_block_offsets(MACROBLOCK *x)
}
}
// u blocks
/* u blocks */
for (br = 0; br < 2; br++)
{
for (bc = 0; bc < 2; bc++)
@ -1047,7 +1064,7 @@ void vp8_build_block_offsets(MACROBLOCK *x)
}
}
// v blocks
/* v blocks */
for (br = 0; br < 2; br++)
{
for (bc = 0; bc < 2; bc++)
@ -1092,8 +1109,9 @@ static void sum_intra_stats(VP8_COMP *cpi, MACROBLOCK *x)
}
// Experimental stub function to create a per MB zbin adjustment based on
// some previously calculated measure of MB activity.
/* Experimental stub function to create a per MB zbin adjustment based on
* some previously calculated measure of MB activity.
*/
static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x )
{
#if USE_ACT_INDEX
@ -1103,7 +1121,7 @@ static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x )
int64_t b;
int64_t act = *(x->mb_activity_ptr);
// Apply the masking to the RD multiplier.
/* Apply the masking to the RD multiplier. */
a = act + 4*cpi->activity_avg;
b = 4*act + cpi->activity_avg;
@ -1176,7 +1194,7 @@ int vp8cx_encode_inter_macroblock
x->encode_breakout = cpi->oxcf.encode_breakout;
#if CONFIG_TEMPORAL_DENOISING
// Reset the best sse mode/mv for each macroblock.
/* Reset the best sse mode/mv for each macroblock. */
x->best_reference_frame = INTRA_FRAME;
x->best_zeromv_reference_frame = INTRA_FRAME;
x->best_sse_inter_mode = 0;
@ -1223,23 +1241,23 @@ int vp8cx_encode_inter_macroblock
if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
{
// Adjust the zbin based on this MB rate.
/* Adjust the zbin based on this MB rate. */
adjust_act_zbin( cpi, x );
}
#if 0
// Experimental RD code
/* Experimental RD code */
cpi->frame_distortion += distortion;
cpi->last_mb_distortion = distortion;
#endif
// MB level adjutment to quantizer setup
/* MB level adjutment to quantizer setup */
if (xd->segmentation_enabled)
{
// If cyclic update enabled
/* If cyclic update enabled */
if (cpi->current_layer == 0 && cpi->cyclic_refresh_mode_enabled)
{
// Clear segment_id back to 0 if not coded (last frame 0,0)
/* Clear segment_id back to 0 if not coded (last frame 0,0) */
if ((xd->mode_info_context->mbmi.segment_id == 1) &&
((xd->mode_info_context->mbmi.ref_frame != LAST_FRAME) || (xd->mode_info_context->mbmi.mode != ZEROMV)))
{
@ -1252,8 +1270,9 @@ int vp8cx_encode_inter_macroblock
}
{
// Experimental code. Special case for gf and arf zeromv modes.
// Increase zbin size to supress noise
/* Experimental code. Special case for gf and arf zeromv modes.
* Increase zbin size to supress noise
*/
cpi->zbin_mode_boost = 0;
if (cpi->zbin_mode_boost_enabled)
{

View File

@ -137,10 +137,10 @@ void vp8_transform_intra_mby(MACROBLOCK *x)
&x->block[i].coeff[0], 32);
}
// build dc block from 16 y dc values
/* build dc block from 16 y dc values */
build_dcblock(x);
// do 2nd order transform on the dc block
/* do 2nd order transform on the dc block */
x->short_walsh4x4(&x->block[24].src_diff[0],
&x->block[24].coeff[0], 8);
@ -157,7 +157,7 @@ static void transform_mb(MACROBLOCK *x)
&x->block[i].coeff[0], 32);
}
// build dc block from 16 y dc values
/* build dc block from 16 y dc values */
if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
build_dcblock(x);
@ -167,7 +167,7 @@ static void transform_mb(MACROBLOCK *x)
&x->block[i].coeff[0], 16);
}
// do 2nd order transform on the dc block
/* do 2nd order transform on the dc block */
if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
x->short_walsh4x4(&x->block[24].src_diff[0],
&x->block[24].coeff[0], 8);
@ -185,7 +185,7 @@ static void transform_mby(MACROBLOCK *x)
&x->block[i].coeff[0], 32);
}
// build dc block from 16 y dc values
/* build dc block from 16 y dc values */
if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
{
build_dcblock(x);
@ -208,7 +208,7 @@ struct vp8_token_state{
short qc;
};
// TODO: experiments to find optimal multiple numbers
/* TODO: experiments to find optimal multiple numbers */
#define Y1_RD_MULT 4
#define UV_RD_MULT 2
#define Y2_RD_MULT 16

View File

@ -29,15 +29,15 @@ static void encode_mvcomponent(
const vp8_prob *p = mvc->prob;
const int x = v < 0 ? -v : v;
if (x < mvnum_short) // Small
if (x < mvnum_short) /* Small */
{
vp8_write(w, 0, p [mvpis_short]);
vp8_treed_write(w, vp8_small_mvtree, p + MVPshort, x, 3);
if (!x)
return; // no sign bit
return; /* no sign bit */
}
else // Large
else /* Large */
{
int i = 0;
@ -100,7 +100,7 @@ void vp8_encode_motion_vector(vp8_writer *w, const MV *mv, const MV_CONTEXT *mvc
static unsigned int cost_mvcomponent(const int v, const struct mv_context *mvc)
{
const vp8_prob *p = mvc->prob;
const int x = v; //v<0? -v:v;
const int x = v;
unsigned int cost;
if (x < mvnum_short)
@ -132,12 +132,12 @@ static unsigned int cost_mvcomponent(const int v, const struct mv_context *mvc)
cost += vp8_cost_bit(p [MVPbits + 3], (x >> 3) & 1);
}
return cost; // + vp8_cost_bit( p [MVPsign], v < 0);
return cost; /* + vp8_cost_bit( p [MVPsign], v < 0); */
}
void vp8_build_component_cost_table(int *mvcost[2], const MV_CONTEXT *mvc, int mvc_flag[2])
{
int i = 1; //-mv_max;
int i = 1;
unsigned int cost0 = 0;
unsigned int cost1 = 0;
@ -151,7 +151,6 @@ void vp8_build_component_cost_table(int *mvcost[2], const MV_CONTEXT *mvc, int m
do
{
//mvcost [0] [i] = cost_mvcomponent( i, &mvc[0]);
cost0 = cost_mvcomponent(i, &mvc[0]);
mvcost [0] [i] = cost0 + vp8_cost_zero(mvc[0].prob[MVPsign]);
@ -168,7 +167,6 @@ void vp8_build_component_cost_table(int *mvcost[2], const MV_CONTEXT *mvc, int m
do
{
//mvcost [1] [i] = cost_mvcomponent( i, mvc[1]);
cost1 = cost_mvcomponent(i, &mvc[1]);
mvcost [1] [i] = cost1 + vp8_cost_zero(mvc[1].prob[MVPsign]);
@ -179,10 +177,10 @@ void vp8_build_component_cost_table(int *mvcost[2], const MV_CONTEXT *mvc, int m
}
// Motion vector probability table update depends on benefit.
// Small correction allows for the fact that an update to an MV probability
// may have benefit in subsequent frames as well as the current one.
/* Motion vector probability table update depends on benefit.
* Small correction allows for the fact that an update to an MV probability
* may have benefit in subsequent frames as well as the current one.
*/
#define MV_PROB_UPDATE_CORRECTION -1
@ -254,22 +252,22 @@ static void write_component_probs(
vp8_zero(short_bct)
//j=0
/* j=0 */
{
const int c = events [mv_max];
is_short_ct [0] += c; // Short vector
short_ct [0] += c; // Magnitude distribution
is_short_ct [0] += c; /* Short vector */
short_ct [0] += c; /* Magnitude distribution */
}
//j: 1 ~ mv_max (1023)
/* j: 1 ~ mv_max (1023) */
{
int j = 1;
do
{
const int c1 = events [mv_max + j]; //positive
const int c2 = events [mv_max - j]; //negative
const int c1 = events [mv_max + j]; /* positive */
const int c2 = events [mv_max - j]; /* negative */
const int c = c1 + c2;
int a = j;
@ -278,13 +276,13 @@ static void write_component_probs(
if (a < mvnum_short)
{
is_short_ct [0] += c; // Short vector
short_ct [a] += c; // Magnitude distribution
is_short_ct [0] += c; /* Short vector */
short_ct [a] += c; /* Magnitude distribution */
}
else
{
int k = mvlong_width - 1;
is_short_ct [1] += c; // Long vector
is_short_ct [1] += c; /* Long vector */
/* bit 3 not always encoded. */
do
@ -296,43 +294,6 @@ static void write_component_probs(
while (++j <= mv_max);
}
/*
{
int j = -mv_max;
do
{
const int c = events [mv_max + j];
int a = j;
if( j < 0)
{
sign_ct [1] += c;
a = -j;
}
else if( j)
sign_ct [0] += c;
if( a < mvnum_short)
{
is_short_ct [0] += c; // Short vector
short_ct [a] += c; // Magnitude distribution
}
else
{
int k = mvlong_width - 1;
is_short_ct [1] += c; // Long vector
// bit 3 not always encoded.
do
bit_ct [k] [(a >> k) & 1] += c;
while( --k >= 0);
}
} while( ++j <= mv_max);
}
*/
calc_prob(Pnew + mvpis_short, is_short_ct);
calc_prob(Pnew + MVPsign, sign_ct);

View File

@ -39,7 +39,7 @@ static THREAD_FUNCTION thread_loopfilter(void *p_data)
if (sem_wait(&cpi->h_event_start_lpf) == 0)
{
if (cpi->b_multi_threaded == 0) // we're shutting down
if (cpi->b_multi_threaded == 0) /* we're shutting down */
break;
vp8_loopfilter_frame(cpi, cm);
@ -60,14 +60,12 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data)
ENTROPY_CONTEXT_PLANES mb_row_left_context;
const int nsync = cpi->mt_sync_range;
//printf("Started thread %d\n", ithread);
while (1)
{
if (cpi->b_multi_threaded == 0)
break;
//if(WaitForSingleObject(cpi->h_event_mbrencoding[ithread], INFINITE) == WAIT_OBJECT_0)
if (sem_wait(&cpi->h_event_start_encoding[ithread]) == 0)
{
VP8_COMMON *cm = &cpi->common;
@ -83,7 +81,7 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data)
int *segment_counts = mbri->segment_counts;
int *totalrate = &mbri->totalrate;
if (cpi->b_multi_threaded == 0) // we're shutting down
if (cpi->b_multi_threaded == 0) /* we're shutting down */
break;
for (mb_row = ithread + 1; mb_row < cm->mb_rows; mb_row += (cpi->encoding_thread_count + 1))
@ -108,7 +106,7 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data)
last_row_current_mb_col = &cpi->mt_current_mb_col[mb_row - 1];
// reset above block coeffs
/* reset above block coeffs */
xd->above_context = cm->above_context;
xd->left_context = &mb_row_left_context;
@ -118,10 +116,10 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data)
recon_yoffset = (mb_row * recon_y_stride * 16);
recon_uvoffset = (mb_row * recon_uv_stride * 8);
// Set the mb activity pointer to the start of the row.
/* Set the mb activity pointer to the start of the row. */
x->mb_activity_ptr = &cpi->mb_activity_map[map_index];
// for each macroblock col in image
/* for each macroblock col in image */
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
{
*current_mb_col = mb_col - 1;
@ -139,14 +137,18 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data)
tp = tp_start;
#endif
// Distance of Mb to the various image edges.
// These specified to 8th pel as they are always compared to values that are in 1/8th pel units
/* Distance of Mb to the various image edges.
* These specified to 8th pel as they are always compared
* to values that are in 1/8th pel units
*/
xd->mb_to_left_edge = -((mb_col * 16) << 3);
xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
xd->mb_to_top_edge = -((mb_row * 16) << 3);
xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
// Set up limit values for motion vectors used to prevent them extending outside the UMV borders
/* Set up limit values for motion vectors used to prevent
* them extending outside the UMV borders
*/
x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16));
x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16) + (VP8BORDERINPIXELS - 16);
x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16));
@ -160,17 +162,19 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data)
x->rddiv = cpi->RDDIV;
x->rdmult = cpi->RDMULT;
//Copy current mb to a buffer
/* Copy current mb to a buffer */
vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
vp8_activity_masking(cpi, x);
// Is segmentation enabled
// MB level adjustment to quantizer
/* Is segmentation enabled */
/* MB level adjustment to quantizer */
if (xd->segmentation_enabled)
{
// Code to set segment id in xd->mbmi.segment_id for current MB (with range checking)
/* Code to set segment id in xd->mbmi.segment_id for
* current MB (with range checking)
*/
if (cpi->segmentation_map[map_index + mb_col] <= 3)
xd->mode_info_context->mbmi.segment_id = cpi->segmentation_map[map_index + mb_col];
else
@ -179,7 +183,8 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data)
vp8cx_mb_init_quantizer(cpi, x, 1);
}
else
xd->mode_info_context->mbmi.segment_id = 0; // Set to Segment 0 by default
/* Set to Segment 0 by default */
xd->mode_info_context->mbmi.segment_id = 0;
x->active_ptr = cpi->active_map + map_index + mb_col;
@ -209,21 +214,30 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data)
#endif
// Count of last ref frame 0,0 usage
/* Count of last ref frame 0,0 usage */
if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
cpi->inter_zz_count++;
// Special case code for cyclic refresh
// If cyclic update enabled then copy xd->mbmi.segment_id; (which may have been updated based on mode
// during vp8cx_encode_inter_macroblock()) back into the global segmentation map
/* Special case code for cyclic refresh
* If cyclic update enabled then copy
* xd->mbmi.segment_id; (which may have been updated
* based on mode during
* vp8cx_encode_inter_macroblock()) back into the
* global segmentation map
*/
if (cpi->cyclic_refresh_mode_enabled && xd->segmentation_enabled)
{
const MB_MODE_INFO * mbmi = &xd->mode_info_context->mbmi;
cpi->segmentation_map[map_index + mb_col] = mbmi->segment_id;
// If the block has been refreshed mark it as clean (the magnitude of the -ve influences how long it will be before we consider another refresh):
// Else if it was coded (last frame 0,0) and has not already been refreshed then mark it as a candidate for cleanup next time (marked 0)
// else mark it as dirty (1).
/* If the block has been refreshed mark it as clean
* (the magnitude of the -ve influences how long it
* will be before we consider another refresh):
* Else if it was coded (last frame 0,0) and has
* not already been refreshed then mark it as a
* candidate for cleanup next time (marked 0) else
* mark it as dirty (1).
*/
if (mbmi->segment_id)
cpi->cyclic_refresh_map[map_index + mb_col] = -1;
else if ((mbmi->mode == ZEROMV) && (mbmi->ref_frame == LAST_FRAME))
@ -246,13 +260,13 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data)
#else
cpi->tplist[mb_row].stop = tp;
#endif
// Increment pointer into gf usage flags structure.
/* Increment pointer into gf usage flags structure. */
x->gf_active_ptr++;
// Increment the activity mask pointers.
/* Increment the activity mask pointers. */
x->mb_activity_ptr++;
// adjust to the next column of macroblocks
/* adjust to the next column of macroblocks */
x->src.y_buffer += 16;
x->src.u_buffer += 8;
x->src.v_buffer += 8;
@ -260,10 +274,10 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data)
recon_yoffset += 16;
recon_uvoffset += 8;
// Keep track of segment usage
/* Keep track of segment usage */
segment_counts[xd->mode_info_context->mbmi.segment_id]++;
// skip to next mb
/* skip to next mb */
xd->mode_info_context++;
x->partition_info++;
xd->above_context++;
@ -276,7 +290,7 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data)
*current_mb_col = mb_col + nsync;
// this is to account for the border
/* this is to account for the border */
xd->mode_info_context++;
x->partition_info++;
@ -296,7 +310,7 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data)
}
}
//printf("exit thread %d\n", ithread);
/* printf("exit thread %d\n", ithread); */
return 0;
}
@ -550,14 +564,13 @@ void vp8cx_remove_encoder_threads(VP8_COMP *cpi)
{
if (cpi->b_multi_threaded)
{
//shutdown other threads
/* shutdown other threads */
cpi->b_multi_threaded = 0;
{
int i;
for (i = 0; i < cpi->encoding_thread_count; i++)
{
//SetEvent(cpi->h_event_mbrencoding[i]);
sem_post(&cpi->h_event_start_encoding[i]);
pthread_join(cpi->h_encoding_thread[i], 0);
@ -572,7 +585,7 @@ void vp8cx_remove_encoder_threads(VP8_COMP *cpi)
sem_destroy(&cpi->h_event_end_lpf);
sem_destroy(&cpi->h_event_start_lpf);
//free thread related resources
/* free thread related resources */
vpx_free(cpi->h_event_start_encoding);
vpx_free(cpi->h_encoding_thread);
vpx_free(cpi->mb_row_ei);

File diff suppressed because it is too large Load Diff

View File

@ -118,10 +118,11 @@ vp8_lookahead_push(struct lookahead_ctx *ctx,
ctx->sz++;
buf = pop(ctx, &ctx->write_idx);
// Only do this partial copy if the following conditions are all met:
// 1. Lookahead queue has has size of 1.
// 2. Active map is provided.
// 3. This is not a key frame, golden nor altref frame.
/* Only do this partial copy if the following conditions are all met:
* 1. Lookahead queue has has size of 1.
* 2. Active map is provided.
* 3. This is not a key frame, golden nor altref frame.
*/
if (ctx->max_sz == 1 && active_map && !flags)
{
for (row = 0; row < mb_rows; ++row)
@ -130,18 +131,18 @@ vp8_lookahead_push(struct lookahead_ctx *ctx,
while (1)
{
// Find the first active macroblock in this row.
/* Find the first active macroblock in this row. */
for (; col < mb_cols; ++col)
{
if (active_map[col])
break;
}
// No more active macroblock in this row.
/* No more active macroblock in this row. */
if (col == mb_cols)
break;
// Find the end of active region in this row.
/* Find the end of active region in this row. */
active_end = col;
for (; active_end < mb_cols; ++active_end)
@ -150,13 +151,13 @@ vp8_lookahead_push(struct lookahead_ctx *ctx,
break;
}
// Only copy this active region.
/* Only copy this active region. */
vp8_copy_and_extend_frame_with_rect(src, &buf->img,
row << 4,
col << 4, 16,
(active_end - col) << 4);
// Start again from the end of this active region.
/* Start again from the end of this active region. */
col = active_end;
}

View File

@ -25,16 +25,19 @@ static int mv_mode_cts [4] [2];
int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight)
{
// MV costing is based on the distribution of vectors in the previous frame and as such will tend to
// over state the cost of vectors. In addition coding a new vector can have a knock on effect on the
// cost of subsequent vectors and the quality of prediction from NEAR and NEAREST for subsequent blocks.
// The "Weight" parameter allows, to a limited extent, for some account to be taken of these factors.
/* MV costing is based on the distribution of vectors in the previous
* frame and as such will tend to over state the cost of vectors. In
* addition coding a new vector can have a knock on effect on the cost
* of subsequent vectors and the quality of prediction from NEAR and
* NEAREST for subsequent blocks. The "Weight" parameter allows, to a
* limited extent, for some account to be taken of these factors.
*/
return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] + mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> 1]) * Weight) >> 7;
}
static int mv_err_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int error_per_bit)
{
// Ignore mv costing if mvcost is NULL
/* Ignore mv costing if mvcost is NULL */
if (mvcost)
return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] +
mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> 1])
@ -44,8 +47,8 @@ static int mv_err_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int error_per_bi
static int mvsad_err_cost(int_mv *mv, int_mv *ref, int *mvsadcost[2], int error_per_bit)
{
// Calculate sad error cost on full pixel basis.
// Ignore mv costing if mvsadcost is NULL
/* Calculate sad error cost on full pixel basis. */
/* Ignore mv costing if mvsadcost is NULL */
if (mvsadcost)
return ((mvsadcost[0][(mv->as_mv.row - ref->as_mv.row)] +
mvsadcost[1][(mv->as_mv.col - ref->as_mv.col)])
@ -59,7 +62,7 @@ void vp8_init_dsmotion_compensation(MACROBLOCK *x, int stride)
int search_site_count = 0;
// Generate offsets for 4 search sites per step.
/* Generate offsets for 4 search sites per step. */
Len = MAX_FIRST_STEP;
x->ss[search_site_count].mv.col = 0;
x->ss[search_site_count].mv.row = 0;
@ -69,31 +72,31 @@ void vp8_init_dsmotion_compensation(MACROBLOCK *x, int stride)
while (Len > 0)
{
// Compute offsets for search sites.
/* Compute offsets for search sites. */
x->ss[search_site_count].mv.col = 0;
x->ss[search_site_count].mv.row = -Len;
x->ss[search_site_count].offset = -Len * stride;
search_site_count++;
// Compute offsets for search sites.
/* Compute offsets for search sites. */
x->ss[search_site_count].mv.col = 0;
x->ss[search_site_count].mv.row = Len;
x->ss[search_site_count].offset = Len * stride;
search_site_count++;
// Compute offsets for search sites.
/* Compute offsets for search sites. */
x->ss[search_site_count].mv.col = -Len;
x->ss[search_site_count].mv.row = 0;
x->ss[search_site_count].offset = -Len;
search_site_count++;
// Compute offsets for search sites.
/* Compute offsets for search sites. */
x->ss[search_site_count].mv.col = Len;
x->ss[search_site_count].mv.row = 0;
x->ss[search_site_count].offset = Len;
search_site_count++;
// Contract.
/* Contract. */
Len /= 2;
}
@ -106,7 +109,7 @@ void vp8_init3smotion_compensation(MACROBLOCK *x, int stride)
int Len;
int search_site_count = 0;
// Generate offsets for 8 search sites per step.
/* Generate offsets for 8 search sites per step. */
Len = MAX_FIRST_STEP;
x->ss[search_site_count].mv.col = 0;
x->ss[search_site_count].mv.row = 0;
@ -116,56 +119,56 @@ void vp8_init3smotion_compensation(MACROBLOCK *x, int stride)
while (Len > 0)
{
// Compute offsets for search sites.
/* Compute offsets for search sites. */
x->ss[search_site_count].mv.col = 0;
x->ss[search_site_count].mv.row = -Len;
x->ss[search_site_count].offset = -Len * stride;
search_site_count++;
// Compute offsets for search sites.
/* Compute offsets for search sites. */
x->ss[search_site_count].mv.col = 0;
x->ss[search_site_count].mv.row = Len;
x->ss[search_site_count].offset = Len * stride;
search_site_count++;
// Compute offsets for search sites.
/* Compute offsets for search sites. */
x->ss[search_site_count].mv.col = -Len;
x->ss[search_site_count].mv.row = 0;
x->ss[search_site_count].offset = -Len;
search_site_count++;
// Compute offsets for search sites.
/* Compute offsets for search sites. */
x->ss[search_site_count].mv.col = Len;
x->ss[search_site_count].mv.row = 0;
x->ss[search_site_count].offset = Len;
search_site_count++;
// Compute offsets for search sites.
/* Compute offsets for search sites. */
x->ss[search_site_count].mv.col = -Len;
x->ss[search_site_count].mv.row = -Len;
x->ss[search_site_count].offset = -Len * stride - Len;
search_site_count++;
// Compute offsets for search sites.
/* Compute offsets for search sites. */
x->ss[search_site_count].mv.col = Len;
x->ss[search_site_count].mv.row = -Len;
x->ss[search_site_count].offset = -Len * stride + Len;
search_site_count++;
// Compute offsets for search sites.
/* Compute offsets for search sites. */
x->ss[search_site_count].mv.col = -Len;
x->ss[search_site_count].mv.row = Len;
x->ss[search_site_count].offset = Len * stride - Len;
search_site_count++;
// Compute offsets for search sites.
/* Compute offsets for search sites. */
x->ss[search_site_count].mv.col = Len;
x->ss[search_site_count].mv.row = Len;
x->ss[search_site_count].offset = Len * stride + Len;
search_site_count++;
// Contract.
/* Contract. */
Len /= 2;
}
@ -182,13 +185,20 @@ void vp8_init3smotion_compensation(MACROBLOCK *x, int stride)
* 32 cols area that is enough for 16x16 macroblock. Later, for SPLITMV, we
* could reduce the area.
*/
#define MVC(r,c) (mvcost ? ((mvcost[0][(r)-rr] + mvcost[1][(c) - rc]) * error_per_bit + 128 )>>8 : 0) // estimated cost of a motion vector (r,c)
#define PRE(r,c) (y + (((r)>>2) * y_stride + ((c)>>2) -(offset))) // pointer to predictor base of a motionvector
#define SP(x) (((x)&3)<<1) // convert motion vector component to offset for svf calc
#define DIST(r,c) vfp->svf( PRE(r,c), y_stride, SP(c),SP(r), z,b->src_stride,&sse) // returns subpixel variance error function.
/* estimated cost of a motion vector (r,c) */
#define MVC(r,c) (mvcost ? ((mvcost[0][(r)-rr] + mvcost[1][(c) - rc]) * error_per_bit + 128 )>>8 : 0)
/* pointer to predictor base of a motionvector */
#define PRE(r,c) (y + (((r)>>2) * y_stride + ((c)>>2) -(offset)))
/* convert motion vector component to offset for svf calc */
#define SP(x) (((x)&3)<<1)
/* returns subpixel variance error function. */
#define DIST(r,c) vfp->svf( PRE(r,c), y_stride, SP(c),SP(r), z,b->src_stride,&sse)
#define IFMVCV(r,c,s,e) if ( c >= minc && c <= maxc && r >= minr && r <= maxr) s else e;
#define ERR(r,c) (MVC(r,c)+DIST(r,c)) // returns distortion + motion vector cost
#define CHECK_BETTER(v,r,c) IFMVCV(r,c,{thismse = DIST(r,c); if((v = (MVC(r,c)+thismse)) < besterr) { besterr = v; br=r; bc=c; *distortion = thismse; *sse1 = sse; }}, v=INT_MAX;)// checks if (r,c) has better score than previous best
/* returns distortion + motion vector cost */
#define ERR(r,c) (MVC(r,c)+DIST(r,c))
/* checks if (r,c) has better score than previous best */
#define CHECK_BETTER(v,r,c) IFMVCV(r,c,{thismse = DIST(r,c); if((v = (MVC(r,c)+thismse)) < besterr) { besterr = v; br=r; bc=c; *distortion = thismse; *sse1 = sse; }}, v=INT_MAX;)
int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int_mv *bestmv, int_mv *ref_mv,
@ -227,7 +237,7 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
unsigned char *y;
int buf_r1, buf_r2, buf_c1, buf_c2;
// Clamping to avoid out-of-range data access
/* Clamping to avoid out-of-range data access */
buf_r1 = ((bestmv->as_mv.row - 3) < x->mv_row_min)?(bestmv->as_mv.row - x->mv_row_min):3;
buf_r2 = ((bestmv->as_mv.row + 3) > x->mv_row_max)?(x->mv_row_max - bestmv->as_mv.row):3;
buf_c1 = ((bestmv->as_mv.col - 3) < x->mv_col_min)?(bestmv->as_mv.col - x->mv_col_min):3;
@ -244,19 +254,21 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
offset = (bestmv->as_mv.row) * y_stride + bestmv->as_mv.col;
// central mv
/* central mv */
bestmv->as_mv.row <<= 3;
bestmv->as_mv.col <<= 3;
// calculate central point error
/* calculate central point error */
besterr = vfp->vf(y, y_stride, z, b->src_stride, sse1);
*distortion = besterr;
besterr += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
// TODO: Each subsequent iteration checks at least one point in common with the last iteration could be 2 ( if diag selected)
/* TODO: Each subsequent iteration checks at least one point in common
* with the last iteration could be 2 ( if diag selected)
*/
while (--halfiters)
{
// 1/2 pel
/* 1/2 pel */
CHECK_BETTER(left, tr, tc - 2);
CHECK_BETTER(right, tr, tc + 2);
CHECK_BETTER(up, tr - 2, tc);
@ -280,7 +292,7 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
break;
}
// no reason to check the same one again.
/* no reason to check the same one again. */
if (tr == br && tc == bc)
break;
@ -288,8 +300,11 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
tc = bc;
}
// TODO: Each subsequent iteration checks at least one point in common with the last iteration could be 2 ( if diag selected)
// 1/4 pel
/* TODO: Each subsequent iteration checks at least one point in common
* with the last iteration could be 2 ( if diag selected)
*/
/* 1/4 pel */
while (--quarteriters)
{
CHECK_BETTER(left, tr, tc - 1);
@ -315,7 +330,7 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
break;
}
// no reason to check the same one again.
/* no reason to check the same one again. */
if (tr == br && tc == bc)
break;
@ -373,17 +388,17 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
y_stride = pre_stride;
#endif
// central mv
/* central mv */
bestmv->as_mv.row <<= 3;
bestmv->as_mv.col <<= 3;
startmv = *bestmv;
// calculate central point error
/* calculate central point error */
bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1);
*distortion = bestmse;
bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
// go left then right and check error
/* go left then right and check error */
this_mv.as_mv.row = startmv.as_mv.row;
this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
thismse = vfp->svf_halfpix_h(y - 1, y_stride, z, b->src_stride, &sse);
@ -409,7 +424,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
*sse1 = sse;
}
// go up then down and check error
/* go up then down and check error */
this_mv.as_mv.col = startmv.as_mv.col;
this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
thismse = vfp->svf_halfpix_v(y - y_stride, y_stride, z, b->src_stride, &sse);
@ -436,10 +451,8 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
}
// now check 1 more diagonal
/* now check 1 more diagonal */
whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
//for(whichdir =0;whichdir<4;whichdir++)
//{
this_mv = startmv;
switch (whichdir)
@ -477,10 +490,8 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
*sse1 = sse;
}
// }
// time to check quarter pels.
/* time to check quarter pels. */
if (bestmv->as_mv.row < startmv.as_mv.row)
y -= y_stride;
@ -491,7 +502,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
// go left then right and check error
/* go left then right and check error */
this_mv.as_mv.row = startmv.as_mv.row;
if (startmv.as_mv.col & 7)
@ -527,7 +538,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
*sse1 = sse;
}
// go up then down and check error
/* go up then down and check error */
this_mv.as_mv.col = startmv.as_mv.col;
if (startmv.as_mv.row & 7)
@ -564,11 +575,9 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
}
// now check 1 more diagonal
/* now check 1 more diagonal */
whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
// for(whichdir=0;whichdir<4;whichdir++)
// {
this_mv = startmv;
switch (whichdir)
@ -690,17 +699,17 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
y_stride = pre_stride;
#endif
// central mv
/* central mv */
bestmv->as_mv.row <<= 3;
bestmv->as_mv.col <<= 3;
startmv = *bestmv;
// calculate central point error
/* calculate central point error */
bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1);
*distortion = bestmse;
bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
// go left then right and check error
/* go left then right and check error */
this_mv.as_mv.row = startmv.as_mv.row;
this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
thismse = vfp->svf_halfpix_h(y - 1, y_stride, z, b->src_stride, &sse);
@ -726,7 +735,7 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
*sse1 = sse;
}
// go up then down and check error
/* go up then down and check error */
this_mv.as_mv.col = startmv.as_mv.col;
this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
thismse = vfp->svf_halfpix_v(y - y_stride, y_stride, z, b->src_stride, &sse);
@ -752,7 +761,7 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
*sse1 = sse;
}
// now check 1 more diagonal -
/* now check 1 more diagonal - */
whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
this_mv = startmv;
@ -875,12 +884,12 @@ int vp8_hex_search
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
// adjust ref_mv to make sure it is within MV range
/* adjust ref_mv to make sure it is within MV range */
vp8_clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
br = ref_mv->as_mv.row;
bc = ref_mv->as_mv.col;
// Work out the start point for the search
/* Work out the start point for the search */
base_offset = (unsigned char *)(base_pre + d->offset);
this_offset = base_offset + (br * (pre_stride)) + bc;
this_mv.as_mv.row = br;
@ -901,8 +910,7 @@ int vp8_hex_search
dia_range = 8;
#endif
// hex search
//j=0
/* hex search */
CHECK_BOUNDS(2)
if(all_in)
@ -977,7 +985,7 @@ int vp8_hex_search
}
}
// check 4 1-away neighbors
/* check 4 1-away neighbors */
cal_neighbors:
for (j = 0; j < dia_range; j++)
{
@ -1066,8 +1074,11 @@ int vp8_diamond_search_sad_c
unsigned char *check_here;
int thissad;
int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
int *mvsadcost[2];
int_mv fcenter_mv;
mvsadcost[0] = x->mvsadcost[0];
mvsadcost[1] = x->mvsadcost[1];
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@ -1078,17 +1089,19 @@ int vp8_diamond_search_sad_c
best_mv->as_mv.row = ref_row;
best_mv->as_mv.col = ref_col;
// Work out the start point for the search
/* Work out the start point for the search */
in_what = (unsigned char *)(base_pre + d->offset + (ref_row * pre_stride) + ref_col);
best_address = in_what;
// Check the starting position
/* Check the starting position */
bestsad = fn_ptr->sdf(what, what_stride, in_what,
in_what_stride, 0x7fffffff)
+ mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
// search_param determines the length of the initial step and hence the number of iterations
// 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
/* search_param determines the length of the initial step and hence
* the number of iterations 0 = initial step (MAX_FIRST_STEP) pel :
* 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
*/
ss = &x->ss[search_param * x->searches_per_step];
tot_steps = (x->ss_count / x->searches_per_step) - search_param;
@ -1098,7 +1111,7 @@ int vp8_diamond_search_sad_c
{
for (j = 0 ; j < x->searches_per_step ; j++)
{
// Trap illegal vectors
/* Trap illegal vectors */
this_row_offset = best_mv->as_mv.row + ss[i].mv.row;
this_col_offset = best_mv->as_mv.col + ss[i].mv.col;
@ -1189,8 +1202,11 @@ int vp8_diamond_search_sadx4
unsigned char *check_here;
unsigned int thissad;
int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
int *mvsadcost[2];
int_mv fcenter_mv;
mvsadcost[0] = x->mvsadcost[0];
mvsadcost[1] = x->mvsadcost[1];
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@ -1201,17 +1217,19 @@ int vp8_diamond_search_sadx4
best_mv->as_mv.row = ref_row;
best_mv->as_mv.col = ref_col;
// Work out the start point for the search
/* Work out the start point for the search */
in_what = (unsigned char *)(base_pre + d->offset + (ref_row * pre_stride) + ref_col);
best_address = in_what;
// Check the starting position
/* Check the starting position */
bestsad = fn_ptr->sdf(what, what_stride,
in_what, in_what_stride, 0x7fffffff)
+ mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
// search_param determines the length of the initial step and hence the number of iterations
// 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
/* search_param determines the length of the initial step and hence the
* number of iterations 0 = initial step (MAX_FIRST_STEP) pel : 1 =
* (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
*/
ss = &x->ss[search_param * x->searches_per_step];
tot_steps = (x->ss_count / x->searches_per_step) - search_param;
@ -1221,8 +1239,10 @@ int vp8_diamond_search_sadx4
{
int all_in = 1, t;
// To know if all neighbor points are within the bounds, 4 bounds checking are enough instead of
// checking 4 bounds for each points.
/* To know if all neighbor points are within the bounds, 4 bounds
* checking are enough instead of checking 4 bounds for each
* points.
*/
all_in &= ((best_mv->as_mv.row + ss[i].mv.row)> x->mv_row_min);
all_in &= ((best_mv->as_mv.row + ss[i+1].mv.row) < x->mv_row_max);
all_in &= ((best_mv->as_mv.col + ss[i+2].mv.col) > x->mv_col_min);
@ -1263,7 +1283,7 @@ int vp8_diamond_search_sadx4
{
for (j = 0 ; j < x->searches_per_step ; j++)
{
// Trap illegal vectors
/* Trap illegal vectors */
this_row_offset = best_mv->as_mv.row + ss[i].mv.row;
this_col_offset = best_mv->as_mv.col + ss[i].mv.col;
@ -1341,24 +1361,29 @@ int vp8_full_search_sad_c(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
int col_min = ref_col - distance;
int col_max = ref_col + distance;
int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
int *mvsadcost[2];
int_mv fcenter_mv;
mvsadcost[0] = x->mvsadcost[0];
mvsadcost[1] = x->mvsadcost[1];
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
// Work out the mid point for the search
/* Work out the mid point for the search */
in_what = base_pre + d->offset;
bestaddress = in_what + (ref_row * pre_stride) + ref_col;
best_mv->as_mv.row = ref_row;
best_mv->as_mv.col = ref_col;
// Baseline value at the centre
/* Baseline value at the centre */
bestsad = fn_ptr->sdf(what, what_stride, bestaddress,
in_what_stride, 0x7fffffff)
+ mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
// Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
/* Apply further limits to prevent us looking using vectors that
* stretch beyiond the UMV border
*/
if (col_min < x->mv_col_min)
col_min = x->mv_col_min;
@ -1437,24 +1462,29 @@ int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
unsigned int sad_array[3];
int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
int *mvsadcost[2];
int_mv fcenter_mv;
mvsadcost[0] = x->mvsadcost[0];
mvsadcost[1] = x->mvsadcost[1];
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
// Work out the mid point for the search
/* Work out the mid point for the search */
in_what = base_pre + d->offset;
bestaddress = in_what + (ref_row * pre_stride) + ref_col;
best_mv->as_mv.row = ref_row;
best_mv->as_mv.col = ref_col;
// Baseline value at the centre
/* Baseline value at the centre */
bestsad = fn_ptr->sdf(what, what_stride,
bestaddress, in_what_stride, 0x7fffffff)
+ mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
// Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
/* Apply further limits to prevent us looking using vectors that stretch
* beyond the UMV border
*/
if (col_min < x->mv_col_min)
col_min = x->mv_col_min;
@ -1570,24 +1600,29 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
DECLARE_ALIGNED_ARRAY(16, unsigned short, sad_array8, 8);
unsigned int sad_array[3];
int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
int *mvsadcost[2];
int_mv fcenter_mv;
mvsadcost[0] = x->mvsadcost[0];
mvsadcost[1] = x->mvsadcost[1];
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
// Work out the mid point for the search
/* Work out the mid point for the search */
in_what = base_pre + d->offset;
bestaddress = in_what + (ref_row * pre_stride) + ref_col;
best_mv->as_mv.row = ref_row;
best_mv->as_mv.col = ref_col;
// Baseline value at the centre
/* Baseline value at the centre */
bestsad = fn_ptr->sdf(what, what_stride,
bestaddress, in_what_stride, 0x7fffffff)
+ mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
// Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
/* Apply further limits to prevent us looking using vectors that stretch
* beyond the UMV border
*/
if (col_min < x->mv_col_min)
col_min = x->mv_col_min;
@ -1721,9 +1756,11 @@ int vp8_refining_search_sad_c(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv
int_mv this_mv;
unsigned int bestsad = INT_MAX;
int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
int *mvsadcost[2];
int_mv fcenter_mv;
mvsadcost[0] = x->mvsadcost[0];
mvsadcost[1] = x->mvsadcost[1];
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@ -1800,9 +1837,11 @@ int vp8_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int_mv this_mv;
unsigned int bestsad = INT_MAX;
int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
int *mvsadcost[2];
int_mv fcenter_mv;
mvsadcost[0] = x->mvsadcost[0];
mvsadcost[1] = x->mvsadcost[1];
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@ -1906,16 +1945,16 @@ void print_mode_context(void)
for (j = 0; j < 6; j++)
{
fprintf(f, " { // %d \n", j);
fprintf(f, " { /* %d */\n", j);
fprintf(f, " ");
for (i = 0; i < 4; i++)
{
int overal_prob;
int this_prob;
int count; // = mv_ref_ct[j][i][0]+mv_ref_ct[j][i][1];
int count;
// Overall probs
/* Overall probs */
count = mv_mode_cts[i][0] + mv_mode_cts[i][1];
if (count)
@ -1926,7 +1965,7 @@ void print_mode_context(void)
if (overal_prob == 0)
overal_prob = 1;
// context probs
/* context probs */
count = mv_ref_ct[j][i][0] + mv_ref_ct[j][i][1];
if (count)
@ -1938,8 +1977,6 @@ void print_mode_context(void)
this_prob = 1;
fprintf(f, "%5d, ", this_prob);
//fprintf(f,"%5d, %5d, %8d,", this_prob, overal_prob, (this_prob << 10)/overal_prob);
//fprintf(f,"%8d, ", (this_prob << 10)/overal_prob);
}
fprintf(f, " },\n");

View File

@ -21,9 +21,16 @@ extern void accum_mv_refs(MB_PREDICTION_MODE, const int near_mv_ref_cts[4]);
#endif
#define MAX_MVSEARCH_STEPS 8 // The maximum number of steps in a step search given the largest allowed initial step
#define MAX_FULL_PEL_VAL ((1 << (MAX_MVSEARCH_STEPS)) - 1) // Max full pel mv specified in 1 pel units
#define MAX_FIRST_STEP (1 << (MAX_MVSEARCH_STEPS-1)) // Maximum size of the first step in full pel units
/* The maximum number of steps in a step search given the largest allowed
* initial step
*/
#define MAX_MVSEARCH_STEPS 8
/* Max full pel mv specified in 1 pel units */
#define MAX_FULL_PEL_VAL ((1 << (MAX_MVSEARCH_STEPS)) - 1)
/* Maximum size of the first step in full pel units */
#define MAX_FIRST_STEP (1 << (MAX_MVSEARCH_STEPS-1))
extern void print_mode_context(void);
extern int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight);

File diff suppressed because it is too large Load Diff

View File

@ -33,7 +33,6 @@
#include "vp8/encoder/denoising.h"
#endif
//#define SPEEDSTATS 1
#define MIN_GF_INTERVAL 4
#define DEFAULT_GF_INTERVAL 7
@ -74,7 +73,6 @@ typedef struct
int mvcosts[2][MVvals+1];
#ifdef MODE_STATS
// Stats
int y_modes[5];
int uv_modes[4];
int b_modes[10];
@ -233,11 +231,11 @@ enum
typedef struct
{
// Layer configuration
/* Layer configuration */
double frame_rate;
int target_bandwidth;
// Layer specific coding parameters
/* Layer specific coding parameters */
int starting_buffer_level;
int optimal_buffer_level;
int maximum_buffer_size;
@ -308,7 +306,7 @@ typedef struct VP8_COMP
MACROBLOCK mb;
VP8_COMMON common;
vp8_writer bc[9]; // one boolcoder for each partition
vp8_writer bc[9]; /* one boolcoder for each partition */
VP8_CONFIG oxcf;
@ -322,16 +320,20 @@ typedef struct VP8_COMP
YV12_BUFFER_CONFIG scaled_source;
YV12_BUFFER_CONFIG *last_frame_unscaled_source;
int source_alt_ref_pending; // frame in src_buffers has been identified to be encoded as an alt ref
int source_alt_ref_active; // an alt ref frame has been encoded and is usable
/* frame in src_buffers has been identified to be encoded as an alt ref */
int source_alt_ref_pending;
/* an alt ref frame has been encoded and is usable */
int source_alt_ref_active;
/* source of frame to encode is an exact copy of an alt ref frame */
int is_src_frame_alt_ref;
int is_src_frame_alt_ref; // source of frame to encode is an exact copy of an alt ref frame
/* golden frame same as last frame ( short circuit gold searches) */
int gold_is_last;
/* Alt reference frame same as last ( short circuit altref search) */
int alt_is_last;
/* don't do both alt and gold search ( just do gold). */
int gold_is_alt;
int gold_is_last; // golden frame same as last frame ( short circuit gold searches)
int alt_is_last; // Alt reference frame same as last ( short circuit altref search)
int gold_is_alt; // don't do both alt and gold search ( just do gold).
//int refresh_alt_ref_frame;
YV12_BUFFER_CONFIG pick_lf_lvl_frame;
TOKENEXTRA *tok;
@ -343,7 +345,7 @@ typedef struct VP8_COMP
unsigned int this_key_frame_forced;
unsigned int next_key_frame_forced;
// Ambient reconstruction err target for force key frames
/* Ambient reconstruction err target for force key frames */
int ambient_err;
unsigned int mode_check_freq[MAX_MODES];
@ -360,7 +362,7 @@ typedef struct VP8_COMP
CODING_CONTEXT coding_context;
// Rate targetting variables
/* Rate targetting variables */
int64_t prediction_error;
int64_t last_prediction_error;
int64_t intra_error;
@ -368,30 +370,43 @@ typedef struct VP8_COMP
int this_frame_target;
int projected_frame_size;
int last_q[2]; // Separate values for Intra/Inter
int last_q[2]; /* Separate values for Intra/Inter */
double rate_correction_factor;
double key_frame_rate_correction_factor;
double gf_rate_correction_factor;
int frames_till_gf_update_due; // Count down till next GF
int current_gf_interval; // GF interval chosen when we coded the last GF
/* Count down till next GF */
int frames_till_gf_update_due;
int gf_overspend_bits; // Total bits overspent becasue of GF boost (cumulative)
/* GF interval chosen when we coded the last GF */
int current_gf_interval;
int non_gf_bitrate_adjustment; // Used in the few frames following a GF to recover the extra bits spent in that GF
/* Total bits overspent becasue of GF boost (cumulative) */
int gf_overspend_bits;
int kf_overspend_bits; // Extra bits spent on key frames that need to be recovered on inter frames
int kf_bitrate_adjustment; // Current number of bit s to try and recover on each inter frame.
/* Used in the few frames following a GF to recover the extra bits
* spent in that GF
*/
int non_gf_bitrate_adjustment;
/* Extra bits spent on key frames that need to be recovered */
int kf_overspend_bits;
/* Current number of bit s to try and recover on each inter frame. */
int kf_bitrate_adjustment;
int max_gf_interval;
int baseline_gf_interval;
int active_arnr_frames; // <= cpi->oxcf.arnr_max_frames
int active_arnr_frames;
int64_t key_frame_count;
int prior_key_frame_distance[KEY_FRAME_CONTEXT];
int per_frame_bandwidth; // Current section per frame bandwidth target
int av_per_frame_bandwidth; // Average frame size target for clip
int min_frame_bandwidth; // Minimum allocation that should be used for any frame
/* Current section per frame bandwidth target */
int per_frame_bandwidth;
/* Average frame size target for clip */
int av_per_frame_bandwidth;
/* Minimum allocation that should be used for any frame */
int min_frame_bandwidth;
int inter_frame_target;
double output_frame_rate;
int64_t last_time_stamp_seen;
@ -425,7 +440,7 @@ typedef struct VP8_COMP
int long_rolling_actual_bits;
int64_t total_actual_bits;
int total_target_vs_actual; // debug stats
int total_target_vs_actual; /* debug stats */
int worst_quality;
int active_worst_quality;
@ -434,18 +449,16 @@ typedef struct VP8_COMP
int cq_target_quality;
int drop_frames_allowed; // Are we permitted to drop frames?
int drop_frame; // Drop this frame?
int drop_frames_allowed; /* Are we permitted to drop frames? */
int drop_frame; /* Drop this frame? */
int ymode_count [VP8_YMODES]; /* intra MB type cts this frame */
int uv_mode_count[VP8_UV_MODES]; /* intra MB type cts this frame */
int uv_mode_count[VP8_UV_MODES]; /* intra MB type cts this frame */
unsigned int MVcount [2] [MVvals]; /* (row,col) MV cts this frame */
unsigned int coef_counts [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
//DECLARE_ALIGNED(16, int, coef_counts_backup [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]); //not used any more
//save vp8_tree_probs_from_distribution result for each frame to avoid repeat calculation
vp8_prob frame_coef_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
char update_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
@ -459,7 +472,7 @@ typedef struct VP8_COMP
struct vpx_codec_pkt_list *output_pkt_list;
#if 0
// Experimental code for lagged and one pass
/* Experimental code for lagged and one pass */
ONEPASS_FRAMESTATS one_pass_frame_stats[MAX_LAG_BUFFERS];
int one_pass_frame_index;
#endif
@ -467,11 +480,10 @@ typedef struct VP8_COMP
int decimation_factor;
int decimation_count;
// for real time encoding
int avg_encode_time; //microsecond
int avg_pick_mode_time; //microsecond
/* for real time encoding */
int avg_encode_time; /* microsecond */
int avg_pick_mode_time; /* microsecond */
int Speed;
unsigned int cpu_freq; //Mhz
int compressor_speed;
int interquantizer;
@ -500,20 +512,25 @@ typedef struct VP8_COMP
SPEED_FEATURES sf;
int error_bins[1024];
// Data used for real time conferencing mode to help determine if it would be good to update the gf
/* Data used for real time conferencing mode to help determine if it
* would be good to update the gf
*/
int inter_zz_count;
int gf_bad_count;
int gf_update_recommended;
int skip_true_count;
unsigned char *segmentation_map;
signed char segment_feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS]; // Segment data (can be deltas or absolute values)
int segment_encode_breakout[MAX_MB_SEGMENTS]; // segment threashold for encode breakout
signed char segment_feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
int segment_encode_breakout[MAX_MB_SEGMENTS];
unsigned char *active_map;
unsigned int active_map_enabled;
// Video conferencing cyclic refresh mode flags etc
// This is a mode designed to clean up the background over time in live encoding scenarious. It uses segmentation
/* Video conferencing cyclic refresh mode flags. This is a mode
* designed to clean up the background over time in live encoding
* scenarious. It uses segmentation.
*/
int cyclic_refresh_mode_enabled;
int cyclic_refresh_mode_max_mbs_perframe;
int cyclic_refresh_mode_index;
@ -521,7 +538,7 @@ typedef struct VP8_COMP
signed char *cyclic_refresh_map;
#if CONFIG_MULTITHREAD
// multithread data
/* multithread data */
int * mt_current_mb_col;
int mt_sync_range;
int b_multi_threaded;
@ -535,7 +552,7 @@ typedef struct VP8_COMP
ENCODETHREAD_DATA *en_thread_data;
LPFTHREAD_DATA lpf_thread_data;
//events
/* events */
sem_t *h_event_start_encoding;
sem_t h_event_end_encoding;
sem_t h_event_start_lpf;
@ -546,7 +563,6 @@ typedef struct VP8_COMP
unsigned int partition_sz[MAX_PARTITIONS];
unsigned char *partition_d[MAX_PARTITIONS];
unsigned char *partition_d_end[MAX_PARTITIONS];
// end of multithread data
fractional_mv_step_fp *find_fractional_mv_step;
@ -591,16 +607,16 @@ typedef struct VP8_COMP
int gf_decay_rate;
int static_scene_max_gf_interval;
int kf_bits;
int gf_group_error_left; // Remaining error from uncoded frames in a gf group. Two pass use only
// Projected total bits available for a key frame group of frames
/* Remaining error from uncoded frames in a gf group. */
int gf_group_error_left;
/* Projected total bits available for a key frame group of frames */
int64_t kf_group_bits;
// Error score of frames still to be coded in kf group
/* Error score of frames still to be coded in kf group */
int64_t kf_group_error_left;
int gf_group_bits; // Projected Bits available for a group of frames including 1 GF or ARF
int gf_bits; // Bits for the golden frame or ARF - 2 pass only
/* Projected Bits available for a group including 1 GF or ARF */
int gf_group_bits;
/* Bits for the golden frame or ARF */
int gf_bits;
int alt_extra_bits;
double est_max_qcorrection_factor;
} twopass;
@ -638,24 +654,26 @@ typedef struct VP8_COMP
#endif
int b_calculate_psnr;
// Per MB activity measurement
/* Per MB activity measurement */
unsigned int activity_avg;
unsigned int * mb_activity_map;
int * mb_norm_activity_map;
// Record of which MBs still refer to last golden frame either
// directly or through 0,0
/* Record of which MBs still refer to last golden frame either
* directly or through 0,0
*/
unsigned char *gf_active_flags;
int gf_active_count;
int output_partition;
//Store last frame's MV info for next frame MV prediction
/* Store last frame's MV info for next frame MV prediction */
int_mv *lfmv;
int *lf_ref_frame_sign_bias;
int *lf_ref_frame;
int force_next_frame_intra; /* force next frame to intra when kf_auto says so */
/* force next frame to intra when kf_auto says so */
int force_next_frame_intra;
int droppable;
@ -663,7 +681,7 @@ typedef struct VP8_COMP
VP8_DENOISER denoiser;
#endif
// Coding layer state variables
/* Coding layer state variables */
unsigned int current_layer;
LAYER_CONTEXT layer_context[VPX_TS_MAX_LAYERS];

View File

@ -143,7 +143,7 @@ static int pick_intra4x4block(
int dst_stride = x->e_mbd.dst.y_stride;
unsigned char *base_dst = x->e_mbd.dst.y_buffer;
B_PREDICTION_MODE mode;
int best_rd = INT_MAX; // 1<<30
int best_rd = INT_MAX;
int rate;
int distortion;
@ -214,8 +214,9 @@ static int pick_intra4x4mby_modes
distortion += d;
mic->bmi[i].as_mode = best_mode;
// Break out case where we have already exceeded best so far value
// that was passed in
/* Break out case where we have already exceeded best so far value
* that was passed in
*/
if (distortion > *best_dist)
break;
}
@ -408,7 +409,6 @@ void get_lower_res_motion_info(VP8_COMP *cpi, MACROBLOCKD *xd, int *dissim,
LOWER_RES_MB_INFO* store_mode_info
= ((LOWER_RES_FRAME_INFO*)cpi->oxcf.mr_low_res_mode_info)->mb_info;
unsigned int parent_mb_index;
//unsigned int parent_mb_index = map_640x480_to_320x240[mb_row][mb_col];
/* Consider different down_sampling_factor. */
{
@ -440,7 +440,6 @@ void get_lower_res_motion_info(VP8_COMP *cpi, MACROBLOCKD *xd, int *dissim,
/* Consider different down_sampling_factor.
* The result can be rounded to be more precise, but it takes more time.
*/
//int round = cpi->oxcf.mr_down_sampling_factor.den/2;
(*parent_ref_mv).as_mv.row = store_mode_info[parent_mb_index].mv.as_mv.row
*cpi->oxcf.mr_down_sampling_factor.num
/cpi->oxcf.mr_down_sampling_factor.den;
@ -457,7 +456,7 @@ static void check_for_encode_breakout(unsigned int sse, MACROBLOCK* x)
{
if (sse < x->encode_breakout)
{
// Check u and v to make sure skip is ok
/* Check u and v to make sure skip is ok */
unsigned int sse2 = 0;
sse2 = VP8_UVSSE(x);
@ -513,7 +512,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
MB_PREDICTION_MODE this_mode;
int num00;
int mdcounts[4];
int best_rd = INT_MAX; // 1 << 30;
int best_rd = INT_MAX;
int best_intra_rd = INT_MAX;
int mode_index;
int rate;
@ -530,7 +529,8 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
int near_sadidx[8] = {0, 1, 2, 3, 4, 5, 6, 7};
int saddone=0;
int sr=0; //search range got from mv_pred(). It uses step_param levels. (0-7)
/* search range got from mv_pred(). It uses step_param levels. (0-7) */
int sr=0;
unsigned char *plane[4][3];
int ref_frame_map[4];
@ -574,15 +574,17 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
get_predictor_pointers(cpi, plane, recon_yoffset, recon_uvoffset);
cpi->mbs_tested_so_far++; // Count of the number of MBs tested so far this frame
/* Count of the number of MBs tested so far this frame */
cpi->mbs_tested_so_far++;
*returnintra = INT_MAX;
x->skip = 0;
x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
// if we encode a new mv this is important
// find the best new motion vector
/* if we encode a new mv this is important
* find the best new motion vector
*/
for (mode_index = 0; mode_index < MAX_MODES; mode_index++)
{
int frame_cost;
@ -613,7 +615,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
}
#endif
// everything but intra
/* everything but intra */
if (x->e_mbd.mode_info_context->mbmi.ref_frame)
{
x->e_mbd.pre.y_buffer = plane[this_ref_frame][0];
@ -638,7 +640,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
continue;
if (vp8_mode_order[mode_index] == NEWMV && parent_mode == ZEROMV
&& best_ref_mv.as_int==0) //&& dissim==0
&& best_ref_mv.as_int==0)
continue;
else if(vp8_mode_order[mode_index] == NEWMV && dissim==0
&& best_ref_mv.as_int==parent_ref_mv.as_int)
@ -728,7 +730,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
case SPLITMV:
// Split MV modes currently not supported when RD is nopt enabled.
/* Split MV modes currently not supported when RD is not enabled. */
break;
case DC_PRED:
@ -777,13 +779,15 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
int speed_adjust = (cpi->Speed > 5) ? ((cpi->Speed >= 8)? 3 : 2) : 1;
// Further step/diamond searches as necessary
/* Further step/diamond searches as necessary */
step_param = cpi->sf.first_step + speed_adjust;
#if CONFIG_MULTI_RES_ENCODING
if (cpi->oxcf.mr_encoder_id)
{
// Use parent MV as predictor. Adjust search range accordingly.
/* Use parent MV as predictor. Adjust search range
* accordingly.
*/
mvp.as_int = parent_ref_mv.as_int;
mvp_full.as_mv.col = parent_ref_mv.as_mv.col>>3;
mvp_full.as_mv.row = parent_ref_mv.as_mv.row>>3;
@ -808,7 +812,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
&near_sadidx[0]);
sr += speed_adjust;
//adjust search range according to sr from mv prediction
/* adjust search range according to sr from mv prediction */
if(sr > step_param)
step_param = sr;
@ -877,10 +881,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
x->mvcost, &best_ref_mv);
mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
// Further step/diamond searches as necessary
n = 0;
//further_steps = (cpi->sf.max_step_search_steps - 1) - step_param;
/* Further step/diamond searches as necessary */
n = num00;
num00 = 0;
@ -927,7 +928,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
// mv cost;
/* mv cost; */
rate2 += vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv,
cpi->mb.mvcost, 128);
}
@ -965,7 +966,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
if (cpi->oxcf.noise_sensitivity)
{
// Store for later use by denoiser.
/* Store for later use by denoiser. */
if (this_mode == ZEROMV && sse < zero_mv_sse )
{
zero_mv_sse = sse;
@ -973,7 +974,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
x->e_mbd.mode_info_context->mbmi.ref_frame;
}
// Store the best NEWMV in x for later use in the denoiser.
/* Store the best NEWMV in x for later use in the denoiser. */
if (x->e_mbd.mode_info_context->mbmi.mode == NEWMV &&
sse < best_sse)
{
@ -990,7 +991,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
if (this_rd < best_rd || x->skip)
{
// Note index of best mode
/* Note index of best mode */
best_mode_index = mode_index;
*returnrate = rate2;
@ -1030,7 +1031,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
break;
}
// Reduce the activation RD thresholds for the best choice mode
/* Reduce the activation RD thresholds for the best choice mode */
if ((cpi->rd_baseline_thresh[best_mode_index] > 0) && (cpi->rd_baseline_thresh[best_mode_index] < (INT_MAX >> 2)))
{
int best_adjustment = (cpi->rd_thresh_mult[best_mode_index] >> 3);
@ -1062,7 +1063,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
{
if (x->best_sse_inter_mode == DC_PRED)
{
// No best MV found.
/* No best MV found. */
x->best_sse_inter_mode = best_mbmode.mode;
x->best_sse_mv = best_mbmode.mv;
x->need_to_clamp_best_mvs = best_mbmode.need_to_clamp_mvs;
@ -1073,7 +1074,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
recon_yoffset, recon_uvoffset);
// Reevaluate ZEROMV after denoising.
/* Reevaluate ZEROMV after denoising. */
if (best_mbmode.ref_frame == INTRA_FRAME &&
x->best_zeromv_reference_frame != INTRA_FRAME)
{
@ -1083,7 +1084,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
vp8_cost_mv_ref(ZEROMV, mdcounts);
distortion2 = 0;
// set up the proper prediction buffers for the frame
/* set up the proper prediction buffers for the frame */
x->e_mbd.mode_info_context->mbmi.ref_frame = this_ref_frame;
x->e_mbd.pre.y_buffer = plane[this_ref_frame][0];
x->e_mbd.pre.u_buffer = plane[this_ref_frame][1];

View File

@ -74,7 +74,9 @@ static int calc_partial_ssl_err(YV12_BUFFER_CONFIG *source,
src += srcoffset;
dst += dstoffset;
// Loop through the Y plane raw and reconstruction data summing (square differences)
/* Loop through the Y plane raw and reconstruction data summing
* (square differences)
*/
for (i = 0; i < linestocopy; i += 16)
{
for (j = 0; j < source->y_width; j += 16)
@ -92,7 +94,7 @@ static int calc_partial_ssl_err(YV12_BUFFER_CONFIG *source,
return Total;
}
// Enforce a minimum filter level based upon baseline Q
/* Enforce a minimum filter level based upon baseline Q */
static int get_min_filter_level(VP8_COMP *cpi, int base_qindex)
{
int min_filter_level;
@ -113,14 +115,15 @@ static int get_min_filter_level(VP8_COMP *cpi, int base_qindex)
return min_filter_level;
}
// Enforce a maximum filter level based upon baseline Q
/* Enforce a maximum filter level based upon baseline Q */
static int get_max_filter_level(VP8_COMP *cpi, int base_qindex)
{
// PGW August 2006: Highest filter values almost always a bad idea
/* PGW August 2006: Highest filter values almost always a bad idea */
// jbb chg: 20100118 - not so any more with this overquant stuff allow high values
// with lots of intra coming in.
int max_filter_level = MAX_LOOP_FILTER ;//* 3 / 4;
/* jbb chg: 20100118 - not so any more with this overquant stuff allow
* high values with lots of intra coming in.
*/
int max_filter_level = MAX_LOOP_FILTER;
(void)base_qindex;
if (cpi->twopass.section_intra_rating > 8)
@ -155,7 +158,9 @@ void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
cm->last_sharpness_level = cm->sharpness_level;
}
// Start the search at the previous frame filter level unless it is now out of range.
/* Start the search at the previous frame filter level unless it is
* now out of range.
*/
if (cm->filter_level < min_filter_level)
cm->filter_level = min_filter_level;
else if (cm->filter_level > max_filter_level)
@ -164,7 +169,7 @@ void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
filt_val = cm->filter_level;
best_filt_val = filt_val;
// Get the err using the previous frame's filter value.
/* Get the err using the previous frame's filter value. */
/* Copy the unfiltered / processed recon buffer to the new buffer */
vp8_yv12_copy_partial_frame(saved_frame, cm->frame_to_show);
@ -174,17 +179,17 @@ void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
filt_val -= 1 + (filt_val > 10);
// Search lower filter levels
/* Search lower filter levels */
while (filt_val >= min_filter_level)
{
// Apply the loop filter
/* Apply the loop filter */
vp8_yv12_copy_partial_frame(saved_frame, cm->frame_to_show);
vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
// Get the err for filtered frame
/* Get the err for filtered frame */
filt_err = calc_partial_ssl_err(sd, cm->frame_to_show);
// Update the best case record or exit loop.
/* Update the best case record or exit loop. */
if (filt_err < best_err)
{
best_err = filt_err;
@ -193,32 +198,34 @@ void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
else
break;
// Adjust filter level
/* Adjust filter level */
filt_val -= 1 + (filt_val > 10);
}
// Search up (note that we have already done filt_val = cm->filter_level)
/* Search up (note that we have already done filt_val = cm->filter_level) */
filt_val = cm->filter_level + 1 + (filt_val > 10);
if (best_filt_val == cm->filter_level)
{
// Resist raising filter level for very small gains
/* Resist raising filter level for very small gains */
best_err -= (best_err >> 10);
while (filt_val < max_filter_level)
{
// Apply the loop filter
/* Apply the loop filter */
vp8_yv12_copy_partial_frame(saved_frame, cm->frame_to_show);
vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
// Get the err for filtered frame
/* Get the err for filtered frame */
filt_err = calc_partial_ssl_err(sd, cm->frame_to_show);
// Update the best case record or exit loop.
/* Update the best case record or exit loop. */
if (filt_err < best_err)
{
// Do not raise filter level if improvement is < 1 part in 4096
/* Do not raise filter level if improvement is < 1 part
* in 4096
*/
best_err = filt_err - (filt_err >> 10);
best_filt_val = filt_val;
@ -226,7 +233,7 @@ void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
else
break;
// Adjust filter level
/* Adjust filter level */
filt_val += 1 + (filt_val > 10);
}
}
@ -243,7 +250,7 @@ void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
cm->frame_to_show = saved_frame;
}
// Stub function for now Alt LF not used
/* Stub function for now Alt LF not used */
void vp8cx_set_alt_lf_level(VP8_COMP *cpi, int filt_val)
{
MACROBLOCKD *mbd = &cpi->mb.e_mbd;
@ -266,12 +273,14 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
int filter_step;
int filt_high = 0;
int filt_mid = cm->filter_level; // Start search at previous frame filter level
/* Start search at previous frame filter level */
int filt_mid = cm->filter_level;
int filt_low = 0;
int filt_best;
int filt_direction = 0;
int Bias = 0; // Bias against raising loop filter and in favor of lowering it
/* Bias against raising loop filter and in favor of lowering it */
int Bias = 0;
int ss_err[MAX_LOOP_FILTER + 1];
@ -287,7 +296,9 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
else
cm->sharpness_level = cpi->oxcf.Sharpness;
// Start the search at the previous frame filter level unless it is now out of range.
/* Start the search at the previous frame filter level unless it is
* now out of range.
*/
filt_mid = cm->filter_level;
if (filt_mid < min_filter_level)
@ -295,10 +306,10 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
else if (filt_mid > max_filter_level)
filt_mid = max_filter_level;
// Define the initial step size
/* Define the initial step size */
filter_step = (filt_mid < 16) ? 4 : filt_mid / 4;
// Get baseline error score
/* Get baseline error score */
/* Copy the unfiltered / processed recon buffer to the new buffer */
vp8_yv12_copy_y(saved_frame, cm->frame_to_show);
@ -314,9 +325,8 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
while (filter_step > 0)
{
Bias = (best_err >> (15 - (filt_mid / 8))) * filter_step; //PGW change 12/12/06 for small images
Bias = (best_err >> (15 - (filt_mid / 8))) * filter_step;
// jbb chg: 20100118 - in sections with lots of new material coming in don't bias as much to a low filter value
if (cpi->twopass.section_intra_rating < 20)
Bias = Bias * cpi->twopass.section_intra_rating / 20;
@ -327,7 +337,7 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
{
if(ss_err[filt_low] == 0)
{
// Get Low filter error score
/* Get Low filter error score */
vp8_yv12_copy_y(saved_frame, cm->frame_to_show);
vp8cx_set_alt_lf_level(cpi, filt_low);
vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_low);
@ -338,10 +348,12 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
else
filt_err = ss_err[filt_low];
// If value is close to the best so far then bias towards a lower loop filter value.
/* If value is close to the best so far then bias towards a
* lower loop filter value.
*/
if ((filt_err - Bias) < best_err)
{
// Was it actually better than the previous best?
/* Was it actually better than the previous best? */
if (filt_err < best_err)
best_err = filt_err;
@ -349,7 +361,7 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
}
}
// Now look at filt_high
/* Now look at filt_high */
if ((filt_direction >= 0) && (filt_high != filt_mid))
{
if(ss_err[filt_high] == 0)
@ -364,7 +376,7 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
else
filt_err = ss_err[filt_high];
// Was it better than the previous best?
/* Was it better than the previous best? */
if (filt_err < (best_err - Bias))
{
best_err = filt_err;
@ -372,7 +384,9 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
}
}
// Half the step distance if the best filter value was the same as last time
/* Half the step distance if the best filter value was the same
* as last time
*/
if (filt_best == filt_mid)
{
filter_step = filter_step / 2;

View File

@ -22,7 +22,7 @@ double vp8_mse2psnr(double Samples, double Peak, double Mse)
if ((double)Mse > 0.0)
psnr = 10.0 * log10(Peak * Peak * Samples / Mse);
else
psnr = MAX_PSNR; // Limit to prevent / 0
psnr = MAX_PSNR; /* Limit to prevent / 0 */
if (psnr > MAX_PSNR)
psnr = MAX_PSNR;

View File

@ -44,21 +44,21 @@ void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d)
z = coeff_ptr[rc];
zbin = zbin_ptr[rc] ;
sz = (z >> 31); // sign of z
x = (z ^ sz) - sz; // x = abs(z)
sz = (z >> 31); /* sign of z */
x = (z ^ sz) - sz; /* x = abs(z) */
if (x >= zbin)
{
x += round_ptr[rc];
y = (((x * quant_ptr[rc]) >> 16) + x)
>> quant_shift_ptr[rc]; // quantize (x)
x = (y ^ sz) - sz; // get the sign back
qcoeff_ptr[rc] = x; // write to destination
dqcoeff_ptr[rc] = x * dequant_ptr[rc]; // dequantized value
>> quant_shift_ptr[rc]; /* quantize (x) */
x = (y ^ sz) - sz; /* get the sign back */
qcoeff_ptr[rc] = x; /* write to destination */
dqcoeff_ptr[rc] = x * dequant_ptr[rc]; /* dequantized value */
if (y)
{
eob = i; // last nonzero coeffs
eob = i; /* last nonzero coeffs */
}
}
}
@ -84,17 +84,17 @@ void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d)
rc = vp8_default_zig_zag1d[i];
z = coeff_ptr[rc];
sz = (z >> 31); // sign of z
x = (z ^ sz) - sz; // x = abs(z)
sz = (z >> 31); /* sign of z */
x = (z ^ sz) - sz; /* x = abs(z) */
y = ((x + round_ptr[rc]) * quant_ptr[rc]) >> 16; // quantize (x)
x = (y ^ sz) - sz; // get the sign back
qcoeff_ptr[rc] = x; // write to destination
dqcoeff_ptr[rc] = x * dequant_ptr[rc]; // dequantized value
y = ((x + round_ptr[rc]) * quant_ptr[rc]) >> 16; /* quantize (x) */
x = (y ^ sz) - sz; /* get the sign back */
qcoeff_ptr[rc] = x; /* write to destination */
dqcoeff_ptr[rc] = x * dequant_ptr[rc]; /* dequantized value */
if (y)
{
eob = i; // last nonzero coeffs
eob = i; /* last nonzero coeffs */
}
}
*d->eob = (char)(eob + 1);
@ -132,22 +132,22 @@ void vp8_regular_quantize_b_c(BLOCK *b, BLOCKD *d)
zbin = zbin_ptr[rc] + *zbin_boost_ptr + zbin_oq_value;
zbin_boost_ptr ++;
sz = (z >> 31); // sign of z
x = (z ^ sz) - sz; // x = abs(z)
sz = (z >> 31); /* sign of z */
x = (z ^ sz) - sz; /* x = abs(z) */
if (x >= zbin)
{
x += round_ptr[rc];
y = (((x * quant_ptr[rc]) >> 16) + x)
>> quant_shift_ptr[rc]; // quantize (x)
x = (y ^ sz) - sz; // get the sign back
qcoeff_ptr[rc] = x; // write to destination
dqcoeff_ptr[rc] = x * dequant_ptr[rc]; // dequantized value
>> quant_shift_ptr[rc]; /* quantize (x) */
x = (y ^ sz) - sz; /* get the sign back */
qcoeff_ptr[rc] = x; /* write to destination */
dqcoeff_ptr[rc] = x * dequant_ptr[rc]; /* dequantized value */
if (y)
{
eob = i; // last nonzero coeffs
zbin_boost_ptr = b->zrun_zbin_boost; // reset zero runlength
eob = i; /* last nonzero coeffs */
zbin_boost_ptr = b->zrun_zbin_boost; /* reset zero runlength */
}
}
}
@ -240,26 +240,23 @@ void vp8_regular_quantize_b_c(BLOCK *b, BLOCKD *d)
rc = vp8_default_zig_zag1d[i];
z = coeff_ptr[rc];
//if ( i == 0 )
// zbin = zbin_ptr[rc] + *zbin_boost_ptr + zbin_oq_value/2;
//else
zbin = zbin_ptr[rc] + *zbin_boost_ptr + zbin_oq_value;
zbin_boost_ptr ++;
sz = (z >> 31); // sign of z
x = (z ^ sz) - sz; // x = abs(z)
sz = (z >> 31); /* sign of z */
x = (z ^ sz) - sz; /* x = abs(z) */
if (x >= zbin)
{
y = ((x + round_ptr[rc]) * quant_ptr[rc]) >> 16; // quantize (x)
x = (y ^ sz) - sz; // get the sign back
qcoeff_ptr[rc] = x; // write to destination
dqcoeff_ptr[rc] = x * dequant_ptr[rc]; // dequantized value
y = ((x + round_ptr[rc]) * quant_ptr[rc]) >> 16; /* quantize (x) */
x = (y ^ sz) - sz; /* get the sign back */
qcoeff_ptr[rc] = x; /* write to destination */
dqcoeff_ptr[rc] = x * dequant_ptr[rc]; /* dequantized value */
if (y)
{
eob = i; // last nonzero coeffs
zbin_boost_ptr = &b->zrun_zbin_boost[0]; // reset zero runlength
eob = i; /* last nonzero coeffs */
zbin_boost_ptr = &b->zrun_zbin_boost[0]; /* reset zrl */
}
}
}
@ -441,7 +438,7 @@ void vp8cx_init_quantizer(VP8_COMP *cpi)
for (Q = 0; Q < QINDEX_RANGE; Q++)
{
// dc values
/* dc values */
quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
cpi->Y1quant_fast[Q][0] = (1 << 16) / quant_val;
invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + 0,
@ -469,7 +466,7 @@ void vp8cx_init_quantizer(VP8_COMP *cpi)
cpi->common.UVdequant[Q][0] = quant_val;
cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
// all the ac values = ;
/* all the ac values = ; */
quant_val = vp8_ac_yquant(Q);
cpi->Y1quant_fast[Q][1] = (1 << 16) / quant_val;
invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + 1,
@ -536,7 +533,7 @@ void vp8cx_init_quantizer(VP8_COMP *cpi)
for (Q = 0; Q < QINDEX_RANGE; Q++)
{
// dc values
/* dc values */
quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
cpi->Y1quant[Q][0] = (1 << 16) / quant_val;
cpi->Y1zbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
@ -558,7 +555,7 @@ void vp8cx_init_quantizer(VP8_COMP *cpi)
cpi->common.UVdequant[Q][0] = quant_val;
cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
// all the ac values = ;
/* all the ac values = ; */
for (i = 1; i < 16; i++)
{
int rc = vp8_default_zig_zag1d[i];
@ -613,18 +610,18 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x, int ok_to_skip)
MACROBLOCKD *xd = &x->e_mbd;
int zbin_extra;
// Select the baseline MB Q index.
/* Select the baseline MB Q index. */
if (xd->segmentation_enabled)
{
// Abs Value
/* Abs Value */
if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA)
QIndex = xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
// Delta Value
/* Delta Value */
else
{
QIndex = cpi->common.base_qindex + xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0; // Clamp to valid range
/* Clamp to valid range */
QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0;
}
}
else
@ -657,13 +654,13 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x, int ok_to_skip)
* This will also require modifications to the x86 and neon assembly.
* */
for (i = 0; i < 16; i++)
x->e_mbd.block[i].dequant = xd->dequant_y1; //cpi->common.Y1dequant[QIndex];
x->e_mbd.block[i].dequant = xd->dequant_y1;
for (i = 16; i < 24; i++)
x->e_mbd.block[i].dequant = xd->dequant_uv; //cpi->common.UVdequant[QIndex];
x->e_mbd.block[24].dequant = xd->dequant_y2; //cpi->common.Y2dequant[QIndex];
x->e_mbd.block[i].dequant = xd->dequant_uv;
x->e_mbd.block[24].dequant = xd->dequant_y2;
#endif
// Y
/* Y */
zbin_extra = ZBIN_EXTRA_Y;
for (i = 0; i < 16; i++)
@ -677,7 +674,7 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x, int ok_to_skip)
x->block[i].zbin_extra = (short)zbin_extra;
}
// UV
/* UV */
zbin_extra = ZBIN_EXTRA_UV;
for (i = 16; i < 24; i++)
@ -691,7 +688,7 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x, int ok_to_skip)
x->block[i].zbin_extra = (short)zbin_extra;
}
// Y2
/* Y2 */
zbin_extra = ZBIN_EXTRA_Y2;
x->block[24].quant_fast = cpi->Y2quant_fast[QIndex];
@ -716,19 +713,19 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x, int ok_to_skip)
|| cpi->last_zbin_mode_boost != cpi->zbin_mode_boost
|| x->last_act_zbin_adj != x->act_zbin_adj)
{
// Y
/* Y */
zbin_extra = ZBIN_EXTRA_Y;
for (i = 0; i < 16; i++)
x->block[i].zbin_extra = (short)zbin_extra;
// UV
/* UV */
zbin_extra = ZBIN_EXTRA_UV;
for (i = 16; i < 24; i++)
x->block[i].zbin_extra = (short)zbin_extra;
// Y2
/* Y2 */
zbin_extra = ZBIN_EXTRA_Y2;
x->block[24].zbin_extra = (short)zbin_extra;
@ -744,19 +741,19 @@ void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x)
int QIndex = x->q_index;
int zbin_extra;
// Y
/* Y */
zbin_extra = ZBIN_EXTRA_Y;
for (i = 0; i < 16; i++)
x->block[i].zbin_extra = (short)zbin_extra;
// UV
/* UV */
zbin_extra = ZBIN_EXTRA_UV;
for (i = 16; i < 24; i++)
x->block[i].zbin_extra = (short)zbin_extra;
// Y2
/* Y2 */
zbin_extra = ZBIN_EXTRA_Y2;
x->block[24].zbin_extra = (short)zbin_extra;
}
@ -766,10 +763,10 @@ void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x)
void vp8cx_frame_init_quantizer(VP8_COMP *cpi)
{
// Clear Zbin mode boost for default case
/* Clear Zbin mode boost for default case */
cpi->zbin_mode_boost = 0;
// MB level quantizer setup
/* MB level quantizer setup */
vp8cx_mb_init_quantizer(cpi, &cpi->mb, 0);
}
@ -801,7 +798,7 @@ void vp8_set_quantizer(struct VP8_COMP *cpi, int Q)
cm->y2dc_delta_q = new_delta_q;
// Set Segment specific quatizers
/* Set Segment specific quatizers */
mbd->segment_feature_data[MB_LVL_ALT_Q][0] = cpi->segment_feature_data[MB_LVL_ALT_Q][0];
mbd->segment_feature_data[MB_LVL_ALT_Q][1] = cpi->segment_feature_data[MB_LVL_ALT_Q][1];
mbd->segment_feature_data[MB_LVL_ALT_Q][2] = cpi->segment_feature_data[MB_LVL_ALT_Q][2];

File diff suppressed because it is too large Load Diff

View File

@ -22,7 +22,7 @@ extern int vp8_regulate_q(VP8_COMP *cpi, int target_bits_per_frame);
extern void vp8_adjust_key_frame_context(VP8_COMP *cpi);
extern void vp8_compute_frame_size_bounds(VP8_COMP *cpi, int *frame_under_shoot_limit, int *frame_over_shoot_limit);
// return of 0 means drop frame
/* return of 0 means drop frame */
extern int vp8_pick_frame_size(VP8_COMP *cpi);
#endif

View File

@ -160,7 +160,9 @@ static void fill_token_costs(
for (j = 0; j < COEF_BANDS; j++)
for (k = 0; k < PREV_COEF_CONTEXTS; k++)
// check for pt=0 and band > 1 if block type 0 and 0 if blocktype 1
/* check for pt=0 and band > 1 if block type 0
* and 0 if blocktype 1
*/
if (k == 0 && j > (i == 0))
vp8_cost_tokens2(c[i][j][k], p [i][j][k], vp8_coef_tree, 2);
else
@ -228,22 +230,22 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int Qvalue)
double capped_q = (Qvalue < 160) ? (double)Qvalue : 160.0;
double rdconst = 2.80;
vp8_clear_system_state(); //__asm emms;
vp8_clear_system_state();
// Further tests required to see if optimum is different
// for key frames, golden frames and arf frames.
// if (cpi->common.refresh_golden_frame ||
// cpi->common.refresh_alt_ref_frame)
/* Further tests required to see if optimum is different
* for key frames, golden frames and arf frames.
*/
cpi->RDMULT = (int)(rdconst * (capped_q * capped_q));
// Extend rate multiplier along side quantizer zbin increases
/* Extend rate multiplier along side quantizer zbin increases */
if (cpi->zbin_over_quant > 0)
{
double oq_factor;
double modq;
// Experimental code using the same basic equation as used for Q above
// The units of cpi->zbin_over_quant are 1/128 of Q bin size
/* Experimental code using the same basic equation as used for Q above
* The units of cpi->zbin_over_quant are 1/128 of Q bin size
*/
oq_factor = 1.0 + ((double)0.0015625 * cpi->zbin_over_quant);
modq = (int)((double)capped_q * oq_factor);
cpi->RDMULT = (int)(rdconst * (modq * modq));
@ -307,7 +309,7 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int Qvalue)
}
{
// build token cost array for the type of frame we have now
/* build token cost array for the type of frame we have now */
FRAME_CONTEXT *l = &cpi->lfc_n;
if(cpi->common.refresh_alt_ref_frame)
@ -326,12 +328,8 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int Qvalue)
*/
// TODO make these mode costs depend on last,alt or gold too. (jbb)
/* TODO make these mode costs depend on last,alt or gold too. (jbb) */
vp8_init_mode_costs(cpi);
// TODO figure onnnnuut why making mv cost frame type dependent didn't help (jbb)
//vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) l->mvc, flags);
}
}
@ -356,14 +354,6 @@ void vp8_auto_select_speed(VP8_COMP *cpi)
#endif
/*
// this is done during parameter valid check
if( cpi->oxcf.cpu_used > 16)
cpi->oxcf.cpu_used = 16;
if( cpi->oxcf.cpu_used < -16)
cpi->oxcf.cpu_used = -16;
*/
if (cpi->avg_pick_mode_time < milliseconds_for_compress && (cpi->avg_encode_time - cpi->avg_pick_mode_time) < milliseconds_for_compress)
{
if (cpi->avg_pick_mode_time == 0)
@ -390,10 +380,10 @@ void vp8_auto_select_speed(VP8_COMP *cpi)
cpi->avg_pick_mode_time = 0;
cpi->avg_encode_time = 0;
// In real-time mode, cpi->speed is in [4, 16].
if (cpi->Speed < 4) //if ( cpi->Speed < 0 )
/* In real-time mode, cpi->speed is in [4, 16]. */
if (cpi->Speed < 4)
{
cpi->Speed = 4; //cpi->Speed = 0;
cpi->Speed = 4;
}
}
}
@ -549,7 +539,7 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, int type, ENTROPY_CONTEXT *a,
if (c < 16)
cost += mb->token_costs [type] [vp8_coef_bands[c]] [pt] [DCT_EOB_TOKEN];
pt = (c != !type); // is eob first coefficient;
pt = (c != !type); /* is eob first coefficient; */
*a = *l = pt;
return cost;
@ -595,7 +585,7 @@ static void macro_block_yrd( MACROBLOCK *mb,
vp8_subtract_mby( mb->src_diff, *(mb->block[0].base_src),
mb->block[0].src_stride, mb->e_mbd.predictor, 16);
// Fdct and building the 2nd order block
/* Fdct and building the 2nd order block */
for (beptr = mb->block; beptr < mb->block + 16; beptr += 2)
{
mb->short_fdct8x4(beptr->src_diff, beptr->coeff, 32);
@ -603,25 +593,25 @@ static void macro_block_yrd( MACROBLOCK *mb,
*Y2DCPtr++ = beptr->coeff[16];
}
// 2nd order fdct
/* 2nd order fdct */
mb->short_walsh4x4(mb_y2->src_diff, mb_y2->coeff, 8);
// Quantization
/* Quantization */
for (b = 0; b < 16; b++)
{
mb->quantize_b(&mb->block[b], &mb->e_mbd.block[b]);
}
// DC predication and Quantization of 2nd Order block
/* DC predication and Quantization of 2nd Order block */
mb->quantize_b(mb_y2, x_y2);
// Distortion
/* Distortion */
d = vp8_mbblock_error(mb, 1) << 2;
d += vp8_block_error(mb_y2->coeff, x_y2->dqcoeff);
*Distortion = (d >> 4);
// rate
/* rate */
*Rate = vp8_rdcost_mby(mb);
}
@ -787,7 +777,7 @@ static int rd_pick_intra16x16mby_mode(VP8_COMP *cpi,
int this_rd;
MACROBLOCKD *xd = &x->e_mbd;
//Y Search for 16x16 intra prediction mode
/* Y Search for 16x16 intra prediction mode */
for (mode = DC_PRED; mode <= TM_PRED; mode++)
{
xd->mode_info_context->mbmi.mode = mode;
@ -984,8 +974,9 @@ static int labels2mode(
m = ABOVE4X4;
else
{
// the only time we should do costing for new motion vector or mode
// is when we are on a new label (jbb May 08, 2007)
/* the only time we should do costing for new motion vector
* or mode is when we are on a new label (jbb May 08, 2007)
*/
switch (m = this_mode)
{
case NEW4X4 :
@ -1004,7 +995,7 @@ static int labels2mode(
break;
}
if (m == ABOVE4X4) // replace above with left if same
if (m == ABOVE4X4) /* replace above with left if same */
{
int_mv left_mv;
@ -1065,9 +1056,6 @@ static unsigned int vp8_encode_inter_mb_segment(MACROBLOCK *x, int const *labels
vp8_build_inter_predictors_b(bd, 16, base_pre, pre_stride, x->e_mbd.subpixel_predict);
vp8_subtract_b(be, bd, 16);
x->short_fdct4x4(be->src_diff, be->coeff, 32);
// set to 0 no way to account for 2nd order DC so discount
//be->coeff[0] = 0;
x->quantize_b(be, bd);
distortion += vp8_block_error(be->coeff, bd->dqcoeff);
@ -1098,8 +1086,8 @@ typedef struct
int mvthresh;
int *mdcounts;
int_mv sv_mvp[4]; // save 4 mvp from 8x8
int sv_istep[2]; // save 2 initial step_param for 16x8/8x16
int_mv sv_mvp[4]; /* save 4 mvp from 8x8 */
int sv_istep[2]; /* save 2 initial step_param for 16x8/8x16 */
} BEST_SEG_INFO;
@ -1146,13 +1134,13 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
labels = vp8_mbsplits[segmentation];
label_count = vp8_mbsplit_count[segmentation];
// 64 makes this threshold really big effectively
// making it so that we very rarely check mvs on
// segments. setting this to 1 would make mv thresh
// roughly equal to what it is for macroblocks
/* 64 makes this threshold really big effectively making it so that we
* very rarely check mvs on segments. setting this to 1 would make mv
* thresh roughly equal to what it is for macroblocks
*/
label_mv_thresh = 1 * bsi->mvthresh / label_count ;
// Segmentation method overheads
/* Segmentation method overheads */
rate = vp8_cost_token(vp8_mbsplit_tree, vp8_mbsplit_probs, vp8_mbsplit_encodings + segmentation);
rate += vp8_cost_mv_ref(SPLITMV, bsi->mdcounts);
this_segment_rd += RDCOST(x->rdmult, x->rddiv, rate, 0);
@ -1165,7 +1153,7 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
B_PREDICTION_MODE mode_selected = ZERO4X4;
int bestlabelyrate = 0;
// search for the best motion vector on this segment
/* search for the best motion vector on this segment */
for (this_mode = LEFT4X4; this_mode <= NEW4X4 ; this_mode ++)
{
int this_rd;
@ -1194,7 +1182,9 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
BLOCK *c;
BLOCKD *e;
// Is the best so far sufficiently good that we cant justify doing and new motion search.
/* Is the best so far sufficiently good that we cant justify
* doing a new motion search.
*/
if (best_label_rd < label_mv_thresh)
break;
@ -1209,7 +1199,9 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
step_param = bsi->sv_istep[i];
}
// use previous block's result as next block's MV predictor.
/* use previous block's result as next block's MV
* predictor.
*/
if (segmentation == BLOCK_4X4 && i>0)
{
bsi->mvp.as_int = x->e_mbd.block[i-1].bmi.mv.as_int;
@ -1228,7 +1220,7 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
mvp_full.as_mv.row = bsi->mvp.as_mv.row >>3;
mvp_full.as_mv.col = bsi->mvp.as_mv.col >>3;
// find first label
/* find first label */
n = vp8_mbsplit_offset[segmentation][i];
c = &x->block[n];
@ -1268,7 +1260,7 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
sseshift = segmentation_to_sseshift[segmentation];
// Should we do a full search (best quality only)
/* Should we do a full search (best quality only) */
if ((cpi->compressor_speed == 0) && (bestsme >> sseshift) > 4000)
{
/* Check if mvp_full is within the range. */
@ -1285,7 +1277,9 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
}
else
{
// The full search result is actually worse so re-instate the previous best vector
/* The full search result is actually worse so
* re-instate the previous best vector
*/
e->bmi.mv.as_int = mode_mv[NEW4X4].as_int;
}
}
@ -1305,7 +1299,7 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
rate = labels2mode(x, labels, i, this_mode, &mode_mv[this_mode],
bsi->ref_mv, x->mvcost);
// Trap vectors that reach beyond the UMV borders
/* Trap vectors that reach beyond the UMV borders */
if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) || ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) || ((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max))
{
@ -1357,7 +1351,7 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
bsi->segment_rd = this_segment_rd;
bsi->segment_num = segmentation;
// store everything needed to come back to this!!
/* store everything needed to come back to this!! */
for (i = 0; i < 16; i++)
{
bsi->mvs[i].as_mv = x->partition_info->bmi[i].mv.as_mv;
@ -1519,7 +1513,7 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
return bsi.segment_rd;
}
//The improved MV prediction
/* The improved MV prediction */
void vp8_mv_pred
(
VP8_COMP *cpi,
@ -1553,7 +1547,9 @@ void vp8_mv_pred
near_mvs[0].as_int = near_mvs[1].as_int = near_mvs[2].as_int = near_mvs[3].as_int = near_mvs[4].as_int = near_mvs[5].as_int = near_mvs[6].as_int = near_mvs[7].as_int = 0;
near_ref[0] = near_ref[1] = near_ref[2] = near_ref[3] = near_ref[4] = near_ref[5] = near_ref[6] = near_ref[7] = 0;
// read in 3 nearby block's MVs from current frame as prediction candidates.
/* read in 3 nearby block's MVs from current frame as prediction
* candidates.
*/
if (above->mbmi.ref_frame != INTRA_FRAME)
{
near_mvs[vcnt].as_int = above->mbmi.mv.as_int;
@ -1576,12 +1572,12 @@ void vp8_mv_pred
}
vcnt++;
// read in 5 nearby block's MVs from last frame.
/* read in 5 nearby block's MVs from last frame. */
if(cpi->common.last_frame_type != KEY_FRAME)
{
mb_offset = (-xd->mb_to_top_edge/128 + 1) * (xd->mode_info_stride +1) + (-xd->mb_to_left_edge/128 +1) ;
// current in last frame
/* current in last frame */
if (cpi->lf_ref_frame[mb_offset] != INTRA_FRAME)
{
near_mvs[vcnt].as_int = cpi->lfmv[mb_offset].as_int;
@ -1590,7 +1586,7 @@ void vp8_mv_pred
}
vcnt++;
// above in last frame
/* above in last frame */
if (cpi->lf_ref_frame[mb_offset - xd->mode_info_stride-1] != INTRA_FRAME)
{
near_mvs[vcnt].as_int = cpi->lfmv[mb_offset - xd->mode_info_stride-1].as_int;
@ -1599,7 +1595,7 @@ void vp8_mv_pred
}
vcnt++;
// left in last frame
/* left in last frame */
if (cpi->lf_ref_frame[mb_offset-1] != INTRA_FRAME)
{
near_mvs[vcnt].as_int = cpi->lfmv[mb_offset -1].as_int;
@ -1608,7 +1604,7 @@ void vp8_mv_pred
}
vcnt++;
// right in last frame
/* right in last frame */
if (cpi->lf_ref_frame[mb_offset +1] != INTRA_FRAME)
{
near_mvs[vcnt].as_int = cpi->lfmv[mb_offset +1].as_int;
@ -1617,7 +1613,7 @@ void vp8_mv_pred
}
vcnt++;
// below in last frame
/* below in last frame */
if (cpi->lf_ref_frame[mb_offset + xd->mode_info_stride +1] != INTRA_FRAME)
{
near_mvs[vcnt].as_int = cpi->lfmv[mb_offset + xd->mode_info_stride +1].as_int;
@ -1658,7 +1654,9 @@ void vp8_mv_pred
mv.as_mv.col = mvy[vcnt/2];
find = 1;
//sr is set to 0 to allow calling function to decide the search range.
/* sr is set to 0 to allow calling function to decide the search
* range.
*/
*sr = 0;
}
}
@ -1670,21 +1668,24 @@ void vp8_mv_pred
void vp8_cal_sad(VP8_COMP *cpi, MACROBLOCKD *xd, MACROBLOCK *x, int recon_yoffset, int near_sadidx[])
{
int near_sad[8] = {0}; // 0-cf above, 1-cf left, 2-cf aboveleft, 3-lf current, 4-lf above, 5-lf left, 6-lf right, 7-lf below
/* near_sad indexes:
* 0-cf above, 1-cf left, 2-cf aboveleft,
* 3-lf current, 4-lf above, 5-lf left, 6-lf right, 7-lf below
*/
int near_sad[8] = {0};
BLOCK *b = &x->block[0];
unsigned char *src_y_ptr = *(b->base_src);
//calculate sad for current frame 3 nearby MBs.
/* calculate sad for current frame 3 nearby MBs. */
if( xd->mb_to_top_edge==0 && xd->mb_to_left_edge ==0)
{
near_sad[0] = near_sad[1] = near_sad[2] = INT_MAX;
}else if(xd->mb_to_top_edge==0)
{ //only has left MB for sad calculation.
{ /* only has left MB for sad calculation. */
near_sad[0] = near_sad[2] = INT_MAX;
near_sad[1] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, xd->dst.y_buffer - 16,xd->dst.y_stride, 0x7fffffff);
}else if(xd->mb_to_left_edge ==0)
{ //only has left MB for sad calculation.
{ /* only has left MB for sad calculation. */
near_sad[1] = near_sad[2] = INT_MAX;
near_sad[0] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, xd->dst.y_buffer - xd->dst.y_stride *16,xd->dst.y_stride, 0x7fffffff);
}else
@ -1696,7 +1697,7 @@ void vp8_cal_sad(VP8_COMP *cpi, MACROBLOCKD *xd, MACROBLOCK *x, int recon_yoffse
if(cpi->common.last_frame_type != KEY_FRAME)
{
//calculate sad for last frame 5 nearby MBs.
/* calculate sad for last frame 5 nearby MBs. */
unsigned char *pre_y_buffer = cpi->common.yv12_fb[cpi->common.lst_fb_idx].y_buffer + recon_yoffset;
int pre_y_stride = cpi->common.yv12_fb[cpi->common.lst_fb_idx].y_stride;
@ -1787,7 +1788,7 @@ static int evaluate_inter_mode_rd(int mdcounts[4],
if ((sse - var < q2dc * q2dc >>4) ||
(sse /2 > var && sse-var < 64))
{
// Check u and v to make sure skip is ok
/* Check u and v to make sure skip is ok */
unsigned int sse2 = VP8_UVSSE(x);
if (sse2 * 2 < threshold)
{
@ -1808,17 +1809,15 @@ static int evaluate_inter_mode_rd(int mdcounts[4],
}
//intermodecost[mode_index] = vp8_cost_mv_ref(this_mode, mdcounts); // Experimental debug code
// Add in the Mv/mode cost
/* Add in the Mv/mode cost */
rd->rate2 += vp8_cost_mv_ref(this_mode, mdcounts);
// Y cost and distortion
/* Y cost and distortion */
macro_block_yrd(x, &rd->rate_y, &distortion);
rd->rate2 += rd->rate_y;
rd->distortion2 += distortion;
// UV cost and distortion
/* UV cost and distortion */
rd_inter16x16_uv(cpi, x, &rd->rate_uv, &rd->distortion_uv,
cpi->common.full_pixel);
rd->rate2 += rd->rate_uv;
@ -1835,9 +1834,11 @@ static int calculate_final_rd_costs(int this_rd,
VP8_COMP *cpi, MACROBLOCK *x)
{
MB_PREDICTION_MODE this_mode = x->e_mbd.mode_info_context->mbmi.mode;
// Where skip is allowable add in the default per mb cost for the no skip case.
// where we then decide to skip we have to delete this and replace it with the
// cost of signallying a skip
/* Where skip is allowable add in the default per mb cost for the no
* skip case. where we then decide to skip we have to delete this and
* replace it with the cost of signalling a skip
*/
if (cpi->common.mb_no_coeff_skip)
{
*other_cost += vp8_cost_bit(cpi->prob_skip_false, 0);
@ -1852,7 +1853,10 @@ static int calculate_final_rd_costs(int this_rd,
if (!disable_skip)
{
// Test for the condition where skip block will be activated because there are no non zero coefficients and make any necessary adjustment for rate
/* Test for the condition where skip block will be activated
* because there are no non zero coefficients and make any
* necessary adjustment for rate
*/
if (cpi->common.mb_no_coeff_skip)
{
int i;
@ -1877,10 +1881,10 @@ static int calculate_final_rd_costs(int this_rd,
if (tteob == 0)
{
rd->rate2 -= (rd->rate_y + rd->rate_uv);
//for best_yrd calculation
/* for best_yrd calculation */
rd->rate_uv = 0;
// Back out no skip flag costing and add in skip flag costing
/* Back out no skip flag costing and add in skip flag costing */
if (cpi->prob_skip_false)
{
int prob_skip_cost;
@ -1892,7 +1896,7 @@ static int calculate_final_rd_costs(int this_rd,
}
}
}
// Calculate the final RD estimate for this mode
/* Calculate the final RD estimate for this mode */
this_rd = RDCOST(x->rdmult, x->rddiv, rd->rate2, rd->distortion2);
if (this_rd < INT_MAX && x->e_mbd.mode_info_context->mbmi.ref_frame
== INTRA_FRAME)
@ -1956,7 +1960,8 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
int_mv mvp;
int near_sadidx[8] = {0, 1, 2, 3, 4, 5, 6, 7};
int saddone=0;
int sr=0; //search range got from mv_pred(). It uses step_param levels. (0-7)
/* search range got from mv_pred(). It uses step_param levels. (0-7) */
int sr=0;
unsigned char *plane[4][3];
int ref_frame_map[4];
@ -2002,7 +2007,8 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
get_predictor_pointers(cpi, plane, recon_yoffset, recon_uvoffset);
*returnintra = INT_MAX;
cpi->mbs_tested_so_far++; // Count of the number of MBs tested so far this frame
/* Count of the number of MBs tested so far this frame */
cpi->mbs_tested_so_far++;
x->skip = 0;
@ -2013,14 +2019,16 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
int other_cost = 0;
int this_ref_frame = ref_frame_map[vp8_ref_frame_order[mode_index]];
// Test best rd so far against threshold for trying this mode.
/* Test best rd so far against threshold for trying this mode. */
if (best_mode.rd <= cpi->rd_threshes[mode_index])
continue;
if (this_ref_frame < 0)
continue;
// These variables hold are rolling total cost and distortion for this mode
/* These variables hold are rolling total cost and distortion for
* this mode
*/
rd.rate2 = 0;
rd.distortion2 = 0;
@ -2029,9 +2037,10 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
x->e_mbd.mode_info_context->mbmi.mode = this_mode;
x->e_mbd.mode_info_context->mbmi.ref_frame = this_ref_frame;
// Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
// unless ARNR filtering is enabled in which case we want
// an unfiltered alternative
/* Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
* unless ARNR filtering is enabled in which case we want
* an unfiltered alternative
*/
if (cpi->is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0))
{
if (this_mode != ZEROMV || x->e_mbd.mode_info_context->mbmi.ref_frame != ALTREF_FRAME)
@ -2053,13 +2062,17 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
}
}
// Check to see if the testing frequency for this mode is at its max
// If so then prevent it from being tested and increase the threshold for its testing
/* Check to see if the testing frequency for this mode is at its
* max If so then prevent it from being tested and increase the
* threshold for its testing
*/
if (cpi->mode_test_hit_counts[mode_index] && (cpi->mode_check_freq[mode_index] > 1))
{
if (cpi->mbs_tested_so_far <= cpi->mode_check_freq[mode_index] * cpi->mode_test_hit_counts[mode_index])
{
// Increase the threshold for coding this mode to make it less likely to be chosen
/* Increase the threshold for coding this mode to make it
* less likely to be chosen
*/
cpi->rd_thresh_mult[mode_index] += 4;
if (cpi->rd_thresh_mult[mode_index] > MAX_THRESHMULT)
@ -2071,10 +2084,15 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
}
}
// We have now reached the point where we are going to test the current mode so increment the counter for the number of times it has been tested
/* We have now reached the point where we are going to test the
* current mode so increment the counter for the number of times
* it has been tested
*/
cpi->mode_test_hit_counts[mode_index] ++;
// Experimental code. Special case for gf and arf zeromv modes. Increase zbin size to supress noise
/* Experimental code. Special case for gf and arf zeromv modes.
* Increase zbin size to supress noise
*/
if (cpi->zbin_mode_boost_enabled)
{
if ( this_ref_frame == INTRA_FRAME )
@ -2121,7 +2139,9 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
{
int tmp_rd;
// Note the rate value returned here includes the cost of coding the BPRED mode : x->mbmode_cost[x->e_mbd.frame_type][BPRED];
/* Note the rate value returned here includes the cost of
* coding the BPRED mode: x->mbmode_cost[x->e_mbd.frame_type][BPRED]
*/
int distortion;
tmp_rd = rd_pick_intra4x4mby_modes(cpi, x, &rate, &rd.rate_y, &distortion, best_mode.yrd);
rd.rate2 += rate;
@ -2158,10 +2178,12 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
rd.rate2 += rate;
rd.distortion2 += distortion;
// If even the 'Y' rd value of split is higher than best so far then dont bother looking at UV
/* If even the 'Y' rd value of split is higher than best so far
* then dont bother looking at UV
*/
if (tmp_rd < best_mode.yrd)
{
// Now work out UV cost and add it in
/* Now work out UV cost and add it in */
rd_inter4x4_uv(cpi, x, &rd.rate_uv, &rd.distortion_uv, cpi->common.full_pixel);
rd.rate2 += rd.rate_uv;
rd.distortion2 += rd.distortion_uv;
@ -2233,7 +2255,9 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
mvp_full.as_mv.col = mvp.as_mv.col>>3;
mvp_full.as_mv.row = mvp.as_mv.row>>3;
// Get intersection of UMV window and valid MV window to reduce # of checks in diamond search.
/* Get intersection of UMV window and valid MV window to
* reduce # of checks in diamond search.
*/
if (x->mv_col_min < col_min )
x->mv_col_min = col_min;
if (x->mv_col_max > col_max )
@ -2243,11 +2267,11 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
if (x->mv_row_max > row_max )
x->mv_row_max = row_max;
//adjust search range according to sr from mv prediction
/* adjust search range according to sr from mv prediction */
if(sr > step_param)
step_param = sr;
// Initial step/diamond search
/* Initial step/diamond search */
{
bestsme = cpi->diamond_search_sad(x, b, d, &mvp_full, &d->bmi.mv,
step_param, sadpb, &num00,
@ -2255,7 +2279,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
x->mvcost, &best_ref_mv);
mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
// Further step/diamond searches as necessary
/* Further step/diamond searches as necessary */
n = 0;
further_steps = (cpi->sf.max_step_search_steps - 1) - step_param;
@ -2301,11 +2325,8 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
{
int search_range;
//It seems not a good way to set search_range. Need further investigation.
//search_range = MAXF(abs((mvp.row>>3) - d->bmi.mv.as_mv.row), abs((mvp.col>>3) - d->bmi.mv.as_mv.col));
search_range = 8;
//thissme = cpi->full_search_sad(x, b, d, &d->bmi.mv.as_mv, sadpb, search_range, &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv);
thissme = cpi->refining_search_sad(x, b, d, &d->bmi.mv, sadpb,
search_range, &cpi->fn_ptr[BLOCK_16X16],
x->mvcost, &best_ref_mv);
@ -2338,24 +2359,31 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
// Add the new motion vector cost to our rolling cost variable
/* Add the new motion vector cost to our rolling cost variable */
rd.rate2 += vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv, x->mvcost, 96);
}
case NEARESTMV:
case NEARMV:
// Clip "next_nearest" so that it does not extend to far out of image
/* Clip "next_nearest" so that it does not extend to far out
* of image
*/
vp8_clamp_mv2(&mode_mv[this_mode], xd);
// Do not bother proceeding if the vector (from newmv,nearest or near) is 0,0 as this should then be coded using the zeromv mode.
/* Do not bother proceeding if the vector (from newmv, nearest
* or near) is 0,0 as this should then be coded using the zeromv
* mode.
*/
if (((this_mode == NEARMV) || (this_mode == NEARESTMV)) && (mode_mv[this_mode].as_int == 0))
continue;
case ZEROMV:
// Trap vectors that reach beyond the UMV borders
// Note that ALL New MV, Nearest MV Near MV and Zero MV code drops through to this point
// because of the lack of break statements in the previous two cases.
/* Trap vectors that reach beyond the UMV borders
* Note that ALL New MV, Nearest MV Near MV and Zero MV code
* drops through to this point because of the lack of break
* statements in the previous two cases.
*/
if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) || ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) || ((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max))
continue;
@ -2373,7 +2401,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
disable_skip, uv_intra_tteob,
intra_rd_penalty, cpi, x);
// Keep record of best intra distortion
/* Keep record of best intra distortion */
if ((x->e_mbd.mode_info_context->mbmi.ref_frame == INTRA_FRAME) &&
(this_rd < best_mode.intra_rd) )
{
@ -2390,7 +2418,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
if (sse < best_rd_sse)
best_rd_sse = sse;
// Store for later use by denoiser.
/* Store for later use by denoiser. */
if (this_mode == ZEROMV && sse < zero_mv_sse )
{
zero_mv_sse = sse;
@ -2398,7 +2426,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
x->e_mbd.mode_info_context->mbmi.ref_frame;
}
// Store the best NEWMV in x for later use in the denoiser.
/* Store the best NEWMV in x for later use in the denoiser. */
if (x->e_mbd.mode_info_context->mbmi.mode == NEWMV &&
sse < best_sse)
{
@ -2415,10 +2443,10 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
}
#endif
// Did this mode help.. i.i is it the new best mode
/* Did this mode help.. i.i is it the new best mode */
if (this_rd < best_mode.rd || x->skip)
{
// Note index of best mode so far
/* Note index of best mode so far */
best_mode_index = mode_index;
*returnrate = rd.rate2;
*returndistortion = rd.distortion2;
@ -2431,12 +2459,16 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
update_best_mode(&best_mode, this_rd, &rd, other_cost, x);
// Testing this mode gave rise to an improvement in best error score. Lower threshold a bit for next time
/* Testing this mode gave rise to an improvement in best error
* score. Lower threshold a bit for next time
*/
cpi->rd_thresh_mult[mode_index] = (cpi->rd_thresh_mult[mode_index] >= (MIN_THRESHMULT + 2)) ? cpi->rd_thresh_mult[mode_index] - 2 : MIN_THRESHMULT;
cpi->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) * cpi->rd_thresh_mult[mode_index];
}
// If the mode did not help improve the best error case then raise the threshold for testing that mode next time around.
/* If the mode did not help improve the best error case then raise
* the threshold for testing that mode next time around.
*/
else
{
cpi->rd_thresh_mult[mode_index] += 4;
@ -2452,33 +2484,16 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
}
// Reduce the activation RD thresholds for the best choice mode
/* Reduce the activation RD thresholds for the best choice mode */
if ((cpi->rd_baseline_thresh[best_mode_index] > 0) && (cpi->rd_baseline_thresh[best_mode_index] < (INT_MAX >> 2)))
{
int best_adjustment = (cpi->rd_thresh_mult[best_mode_index] >> 2);
cpi->rd_thresh_mult[best_mode_index] = (cpi->rd_thresh_mult[best_mode_index] >= (MIN_THRESHMULT + best_adjustment)) ? cpi->rd_thresh_mult[best_mode_index] - best_adjustment : MIN_THRESHMULT;
cpi->rd_threshes[best_mode_index] = (cpi->rd_baseline_thresh[best_mode_index] >> 7) * cpi->rd_thresh_mult[best_mode_index];
// If we chose a split mode then reset the new MV thresholds as well
/*if ( vp8_mode_order[best_mode_index] == SPLITMV )
{
best_adjustment = 4; //(cpi->rd_thresh_mult[THR_NEWMV] >> 4);
cpi->rd_thresh_mult[THR_NEWMV] = (cpi->rd_thresh_mult[THR_NEWMV] >= (MIN_THRESHMULT+best_adjustment)) ? cpi->rd_thresh_mult[THR_NEWMV]-best_adjustment: MIN_THRESHMULT;
cpi->rd_threshes[THR_NEWMV] = (cpi->rd_baseline_thresh[THR_NEWMV] >> 7) * cpi->rd_thresh_mult[THR_NEWMV];
best_adjustment = 4; //(cpi->rd_thresh_mult[THR_NEWG] >> 4);
cpi->rd_thresh_mult[THR_NEWG] = (cpi->rd_thresh_mult[THR_NEWG] >= (MIN_THRESHMULT+best_adjustment)) ? cpi->rd_thresh_mult[THR_NEWG]-best_adjustment: MIN_THRESHMULT;
cpi->rd_threshes[THR_NEWG] = (cpi->rd_baseline_thresh[THR_NEWG] >> 7) * cpi->rd_thresh_mult[THR_NEWG];
best_adjustment = 4; //(cpi->rd_thresh_mult[THR_NEWA] >> 4);
cpi->rd_thresh_mult[THR_NEWA] = (cpi->rd_thresh_mult[THR_NEWA] >= (MIN_THRESHMULT+best_adjustment)) ? cpi->rd_thresh_mult[THR_NEWA]-best_adjustment: MIN_THRESHMULT;
cpi->rd_threshes[THR_NEWA] = (cpi->rd_baseline_thresh[THR_NEWA] >> 7) * cpi->rd_thresh_mult[THR_NEWA];
}*/
}
// Note how often each mode chosen as best
/* Note how often each mode chosen as best */
cpi->mode_chosen_counts[best_mode_index] ++;
#if CONFIG_TEMPORAL_DENOISING
@ -2486,7 +2501,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
{
if (x->best_sse_inter_mode == DC_PRED)
{
// No best MV found.
/* No best MV found. */
x->best_sse_inter_mode = best_mode.mbmode.mode;
x->best_sse_mv = best_mode.mbmode.mv;
x->need_to_clamp_best_mvs = best_mode.mbmode.need_to_clamp_mvs;
@ -2497,7 +2512,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
recon_yoffset, recon_uvoffset);
// Reevaluate ZEROMV after denoising.
/* Reevaluate ZEROMV after denoising. */
if (best_mode.mbmode.ref_frame == INTRA_FRAME &&
x->best_zeromv_reference_frame != INTRA_FRAME)
{
@ -2509,7 +2524,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
vp8_cost_mv_ref(ZEROMV, mdcounts);
rd.distortion2 = 0;
// set up the proper prediction buffers for the frame
/* set up the proper prediction buffers for the frame */
x->e_mbd.mode_info_context->mbmi.ref_frame = this_ref_frame;
x->e_mbd.pre.y_buffer = plane[this_ref_frame][0];
x->e_mbd.pre.u_buffer = plane[this_ref_frame][1];
@ -2525,7 +2540,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
intra_rd_penalty, cpi, x);
if (this_rd < best_mode.rd || x->skip)
{
// Note index of best mode so far
/* Note index of best mode so far */
best_mode_index = mode_index;
*returnrate = rd.rate2;
*returndistortion = rd.distortion2;
@ -2550,7 +2565,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
}
// macroblock modes
/* macroblock modes */
vpx_memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mode.mbmode, sizeof(MB_MODE_INFO));
if (best_mode.mbmode.mode == B_PRED)

View File

@ -22,22 +22,24 @@ void vp8_update_gf_useage_maps(VP8_COMP *cpi, VP8_COMMON *cm, MACROBLOCK *x)
if ((cm->frame_type == KEY_FRAME) || (cm->refresh_golden_frame))
{
// Reset Gf useage monitors
/* Reset Gf useage monitors */
vpx_memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
}
else
{
// for each macroblock row in image
/* for each macroblock row in image */
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
{
// for each macroblock col in image
/* for each macroblock col in image */
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
{
// If using golden then set GF active flag if not already set.
// If using last frame 0,0 mode then leave flag as it is
// else if using non 0,0 motion or intra modes then clear flag if it is currently set
/* If using golden then set GF active flag if not already set.
* If using last frame 0,0 mode then leave flag as it is
* else if using non 0,0 motion or intra modes then clear
* flag if it is currently set
*/
if ((this_mb_mode_info->mbmi.ref_frame == GOLDEN_FRAME) || (this_mb_mode_info->mbmi.ref_frame == ALTREF_FRAME))
{
if (*(x->gf_active_ptr) == 0)
@ -52,12 +54,12 @@ void vp8_update_gf_useage_maps(VP8_COMP *cpi, VP8_COMMON *cm, MACROBLOCK *x)
cpi->gf_active_count--;
}
x->gf_active_ptr++; // Step onto next entry
this_mb_mode_info++; // skip to next mb
x->gf_active_ptr++; /* Step onto next entry */
this_mb_mode_info++; /* skip to next mb */
}
// this is to account for the border
/* this is to account for the border */
this_mb_mode_info++;
}
}

View File

@ -30,8 +30,8 @@
#include <math.h>
#include <limits.h>
#define ALT_REF_MC_ENABLED 1 // dis/enable MC in AltRef filtering
#define ALT_REF_SUBPEL_ENABLED 1 // dis/enable subpel in MC AltRef filtering
#define ALT_REF_MC_ENABLED 1 /* dis/enable MC in AltRef filtering */
#define ALT_REF_SUBPEL_ENABLED 1 /* dis/enable subpel in MC AltRef filtering */
#if VP8_TEMPORAL_ALT_REF
@ -50,7 +50,7 @@ static void vp8_temporal_filter_predictors_mb_c
int offset;
unsigned char *yptr, *uptr, *vptr;
// Y
/* Y */
yptr = y_mb_ptr + (mv_row >> 3) * stride + (mv_col >> 3);
if ((mv_row | mv_col) & 7)
@ -63,7 +63,7 @@ static void vp8_temporal_filter_predictors_mb_c
vp8_copy_mem16x16(yptr, stride, &pred[0], 16);
}
// U & V
/* U & V */
mv_row >>= 1;
mv_col >>= 1;
stride = (stride + 1) >> 1;
@ -109,9 +109,10 @@ void vp8_temporal_filter_apply_c
int pixel_value = *frame2++;
modifier = src_byte - pixel_value;
// This is an integer approximation of:
// float coeff = (3.0 * modifer * modifier) / pow(2, strength);
// modifier = (int)roundf(coeff > 16 ? 0 : 16-coeff);
/* This is an integer approximation of:
* float coeff = (3.0 * modifer * modifier) / pow(2, strength);
* modifier = (int)roundf(coeff > 16 ? 0 : 16-coeff);
*/
modifier *= modifier;
modifier *= 3;
modifier += 1 << (strength - 1);
@ -154,7 +155,7 @@ static int vp8_temporal_filter_find_matching_mb_c
int_mv best_ref_mv1;
int_mv best_ref_mv1_full; /* full-pixel value of best_ref_mv1 */
// Save input state
/* Save input state */
unsigned char **base_src = b->base_src;
int src = b->src;
int src_stride = b->src_stride;
@ -166,7 +167,7 @@ static int vp8_temporal_filter_find_matching_mb_c
best_ref_mv1_full.as_mv.col = best_ref_mv1.as_mv.col >>3;
best_ref_mv1_full.as_mv.row = best_ref_mv1.as_mv.row >>3;
// Setup frame pointers
/* Setup frame pointers */
b->base_src = &arf_frame->y_buffer;
b->src_stride = arf_frame->y_stride;
b->src = mb_offset;
@ -175,7 +176,7 @@ static int vp8_temporal_filter_find_matching_mb_c
x->e_mbd.pre.y_stride = frame_ptr->y_stride;
d->offset = mb_offset;
// Further step/diamond searches as necessary
/* Further step/diamond searches as necessary */
if (cpi->Speed < 8)
{
step_param = cpi->sf.first_step + (cpi->Speed > 5);
@ -185,21 +186,19 @@ static int vp8_temporal_filter_find_matching_mb_c
step_param = cpi->sf.first_step + 2;
}
/*cpi->sf.search_method == HEX*/
// TODO Check that the 16x16 vf & sdf are selected here
// Ignore mv costing by sending NULL cost arrays
/* TODO Check that the 16x16 vf & sdf are selected here */
/* Ignore mv costing by sending NULL cost arrays */
bestsme = vp8_hex_search(x, b, d, &best_ref_mv1_full, &d->bmi.mv,
step_param, sadpb,
&cpi->fn_ptr[BLOCK_16X16],
NULL, NULL, &best_ref_mv1);
#if ALT_REF_SUBPEL_ENABLED
// Try sub-pixel MC?
//if (bestsme > error_thresh && bestsme < INT_MAX)
/* Try sub-pixel MC? */
{
int distortion;
unsigned int sse;
// Ignore mv costing by sending NULL cost array
/* Ignore mv costing by sending NULL cost array */
bestsme = cpi->find_fractional_mv_step(x, b, d,
&d->bmi.mv,
&best_ref_mv1,
@ -209,7 +208,7 @@ static int vp8_temporal_filter_find_matching_mb_c
}
#endif
// Save input state
/* Save input state */
b->base_src = base_src;
b->src = src;
b->src_stride = src_stride;
@ -244,7 +243,7 @@ static void vp8_temporal_filter_iterate_c
unsigned char *dst1, *dst2;
DECLARE_ALIGNED_ARRAY(16, unsigned char, predictor, 16*16 + 8*8 + 8*8);
// Save input state
/* Save input state */
unsigned char *y_buffer = mbd->pre.y_buffer;
unsigned char *u_buffer = mbd->pre.u_buffer;
unsigned char *v_buffer = mbd->pre.v_buffer;
@ -252,16 +251,17 @@ static void vp8_temporal_filter_iterate_c
for (mb_row = 0; mb_row < mb_rows; mb_row++)
{
#if ALT_REF_MC_ENABLED
// Source frames are extended to 16 pixels. This is different than
// L/A/G reference frames that have a border of 32 (VP8BORDERINPIXELS)
// A 6 tap filter is used for motion search. This requires 2 pixels
// before and 3 pixels after. So the largest Y mv on a border would
// then be 16 - 3. The UV blocks are half the size of the Y and
// therefore only extended by 8. The largest mv that a UV block
// can support is 8 - 3. A UV mv is half of a Y mv.
// (16 - 3) >> 1 == 6 which is greater than 8 - 3.
// To keep the mv in play for both Y and UV planes the max that it
// can be on a border is therefore 16 - 5.
/* Source frames are extended to 16 pixels. This is different than
* L/A/G reference frames that have a border of 32 (VP8BORDERINPIXELS)
* A 6 tap filter is used for motion search. This requires 2 pixels
* before and 3 pixels after. So the largest Y mv on a border would
* then be 16 - 3. The UV blocks are half the size of the Y and
* therefore only extended by 8. The largest mv that a UV block
* can support is 8 - 3. A UV mv is half of a Y mv.
* (16 - 3) >> 1 == 6 which is greater than 8 - 3.
* To keep the mv in play for both Y and UV planes the max that it
* can be on a border is therefore 16 - 5.
*/
cpi->mb.mv_row_min = -((mb_row * 16) + (16 - 5));
cpi->mb.mv_row_max = ((cpi->common.mb_rows - 1 - mb_row) * 16)
+ (16 - 5);
@ -299,7 +299,7 @@ static void vp8_temporal_filter_iterate_c
#if ALT_REF_MC_ENABLED
#define THRESH_LOW 10000
#define THRESH_HIGH 20000
// Find best match in this frame by MC
/* Find best match in this frame by MC */
err = vp8_temporal_filter_find_matching_mb_c
(cpi,
cpi->frames[alt_ref_index],
@ -307,16 +307,17 @@ static void vp8_temporal_filter_iterate_c
mb_y_offset,
THRESH_LOW);
#endif
// Assign higher weight to matching MB if it's error
// score is lower. If not applying MC default behavior
// is to weight all MBs equal.
/* Assign higher weight to matching MB if it's error
* score is lower. If not applying MC default behavior
* is to weight all MBs equal.
*/
filter_weight = err<THRESH_LOW
? 2 : err<THRESH_HIGH ? 1 : 0;
}
if (filter_weight != 0)
{
// Construct the predictors
/* Construct the predictors */
vp8_temporal_filter_predictors_mb_c
(mbd,
cpi->frames[frame]->y_buffer + mb_y_offset,
@ -327,7 +328,7 @@ static void vp8_temporal_filter_iterate_c
mbd->block[0].bmi.mv.as_mv.col,
predictor);
// Apply the filter (YUV)
/* Apply the filter (YUV) */
vp8_temporal_filter_apply
(f->y_buffer + mb_y_offset,
f->y_stride,
@ -360,7 +361,7 @@ static void vp8_temporal_filter_iterate_c
}
}
// Normalize filter output to produce AltRef frame
/* Normalize filter output to produce AltRef frame */
dst1 = cpi->alt_ref_buffer.y_buffer;
stride = cpi->alt_ref_buffer.y_stride;
byte = mb_y_offset;
@ -374,7 +375,7 @@ static void vp8_temporal_filter_iterate_c
dst1[byte] = (unsigned char)pval;
// move to next pixel
/* move to next pixel */
byte++;
}
@ -391,19 +392,19 @@ static void vp8_temporal_filter_iterate_c
{
int m=k+64;
// U
/* U */
unsigned int pval = accumulator[k] + (count[k] >> 1);
pval *= cpi->fixed_divide[count[k]];
pval >>= 19;
dst1[byte] = (unsigned char)pval;
// V
/* V */
pval = accumulator[m] + (count[m] >> 1);
pval *= cpi->fixed_divide[count[m]];
pval >>= 19;
dst2[byte] = (unsigned char)pval;
// move to next pixel
/* move to next pixel */
byte++;
}
@ -418,7 +419,7 @@ static void vp8_temporal_filter_iterate_c
mb_uv_offset += 8*(f->uv_stride-mb_cols);
}
// Restore input state
/* Restore input state */
mbd->pre.y_buffer = y_buffer;
mbd->pre.u_buffer = u_buffer;
mbd->pre.v_buffer = v_buffer;
@ -452,8 +453,7 @@ void vp8_temporal_filter_prepare_c
switch (blur_type)
{
case 1:
/////////////////////////////////////////
// Backward Blur
/* Backward Blur */
frames_to_blur_backward = num_frames_backward;
@ -464,8 +464,7 @@ void vp8_temporal_filter_prepare_c
break;
case 2:
/////////////////////////////////////////
// Forward Blur
/* Forward Blur */
frames_to_blur_forward = num_frames_forward;
@ -477,8 +476,7 @@ void vp8_temporal_filter_prepare_c
case 3:
default:
/////////////////////////////////////////
// Center Blur
/* Center Blur */
frames_to_blur_forward = num_frames_forward;
frames_to_blur_backward = num_frames_backward;
@ -488,7 +486,7 @@ void vp8_temporal_filter_prepare_c
if (frames_to_blur_backward > frames_to_blur_forward)
frames_to_blur_backward = frames_to_blur_forward;
// When max_frames is even we have 1 more frame backward than forward
/* When max_frames is even we have 1 more frame backward than forward */
if (frames_to_blur_forward > (max_frames - 1) / 2)
frames_to_blur_forward = ((max_frames - 1) / 2);
@ -501,21 +499,7 @@ void vp8_temporal_filter_prepare_c
start_frame = distance + frames_to_blur_forward;
#ifdef DEBUGFWG
// DEBUG FWG
printf("max:%d FBCK:%d FFWD:%d ftb:%d ftbbck:%d ftbfwd:%d sei:%d lasei:%d start:%d"
, max_frames
, num_frames_backward
, num_frames_forward
, frames_to_blur
, frames_to_blur_backward
, frames_to_blur_forward
, cpi->source_encode_index
, cpi->last_alt_ref_sei
, start_frame);
#endif
// Setup frame pointers, NULL indicates frame not included in filter
/* Setup frame pointers, NULL indicates frame not included in filter */
vpx_memset(cpi->frames, 0, max_frames*sizeof(YV12_BUFFER_CONFIG *));
for (frame = 0; frame < frames_to_blur; frame++)
{

View File

@ -55,7 +55,7 @@ int vp8_denoiser_filter_sse2(YV12_BUFFER_CONFIG *mc_running_avg,
const __m128i k_zero = _mm_set1_epi16(0);
const __m128i k_128 = _mm_set1_epi32(128);
// Calculate absolute differences
/* Calculate absolute differences */
DECLARE_ALIGNED_ARRAY(16,unsigned char,abs_diff,16);
DECLARE_ALIGNED_ARRAY(16,uint32_t,filter_coefficient,16);
__m128i v_sig = _mm_loadu_si128((__m128i *)(&sig[0]));
@ -66,14 +66,14 @@ int vp8_denoiser_filter_sse2(YV12_BUFFER_CONFIG *mc_running_avg,
__m128i v_abs_diff = _mm_adds_epu8(a_minus_b, b_minus_a);
_mm_store_si128((__m128i *)(&abs_diff[0]), v_abs_diff);
// Use LUT to get filter coefficients (two 16b value; f and 256-f)
/* Use LUT to get filter coefficients (two 16b value; f and 256-f) */
for (c = 0; c < 16; ++c)
{
filter_coefficient[c] = LUT[abs_diff[c]].as_int;
}
// Filtering...
// load filter coefficients (two 16b value; f and 256-f)
/* Filtering... */
/* load filter coefficients (two 16b value; f and 256-f) */
filter_coefficient_00 = _mm_load_si128(
(__m128i *)(&filter_coefficient[ 0]));
filter_coefficient_04 = _mm_load_si128(
@ -83,18 +83,18 @@ int vp8_denoiser_filter_sse2(YV12_BUFFER_CONFIG *mc_running_avg,
filter_coefficient_12 = _mm_load_si128(
(__m128i *)(&filter_coefficient[12]));
// expand sig from 8b to 16b
/* expand sig from 8b to 16b */
v_sig0 = _mm_unpacklo_epi8(v_sig, k_zero);
v_sig1 = _mm_unpackhi_epi8(v_sig, k_zero);
// expand mc_running_avg_y from 8b to 16b
/* expand mc_running_avg_y from 8b to 16b */
v_mc_running_avg_y0 = _mm_unpacklo_epi8(v_mc_running_avg_y, k_zero);
v_mc_running_avg_y1 = _mm_unpackhi_epi8(v_mc_running_avg_y, k_zero);
// interleave sig and mc_running_avg_y for upcoming multiply-add
/* interleave sig and mc_running_avg_y for upcoming multiply-add */
state0 = _mm_unpacklo_epi16(v_mc_running_avg_y0, v_sig0);
state1 = _mm_unpackhi_epi16(v_mc_running_avg_y0, v_sig0);
state2 = _mm_unpacklo_epi16(v_mc_running_avg_y1, v_sig1);
state3 = _mm_unpackhi_epi16(v_mc_running_avg_y1, v_sig1);
// blend values
/* blend values */
res0 = _mm_madd_epi16(filter_coefficient_00, state0);
res1 = _mm_madd_epi16(filter_coefficient_04, state1);
res2 = _mm_madd_epi16(filter_coefficient_08, state2);
@ -107,15 +107,16 @@ int vp8_denoiser_filter_sse2(YV12_BUFFER_CONFIG *mc_running_avg,
res1 = _mm_srai_epi32(res1, 8);
res2 = _mm_srai_epi32(res2, 8);
res3 = _mm_srai_epi32(res3, 8);
// combine the 32b results into a single 8b vector
/* combine the 32b results into a single 8b vector */
res0 = _mm_packs_epi32(res0, res1);
res2 = _mm_packs_epi32(res2, res3);
v_running_avg_y = _mm_packus_epi16(res0, res2);
// Depending on the magnitude of the difference between the signal and
// filtered version, either replace the signal by the filtered one or
// update the filter state with the signal when the change in a pixel
// isn't classified as noise.
/* Depending on the magnitude of the difference between the signal and
* filtered version, either replace the signal by the filtered one or
* update the filter state with the signal when the change in a pixel
* isn't classified as noise.
*/
diff0 = _mm_sub_epi16(v_sig0, res0);
diff1 = _mm_sub_epi16(v_sig1, res2);
acc_diff = _mm_add_epi16(acc_diff, _mm_add_epi16(diff0, diff1));
@ -130,14 +131,14 @@ int vp8_denoiser_filter_sse2(YV12_BUFFER_CONFIG *mc_running_avg,
_mm_storeu_si128((__m128i *)(&running_avg_y[0]), p2);
_mm_storeu_si128((__m128i *)(&filtered[0]), p2);
// Update pointers for next iteration.
/* Update pointers for next iteration. */
sig += sig_stride;
filtered += 16;
mc_running_avg_y += mc_avg_y_stride;
running_avg_y += avg_y_stride;
}
{
// Compute the sum of all pixel differences of this MB.
/* Compute the sum of all pixel differences of this MB. */
union sum_union s;
int sum_diff;
s.v = acc_diff;

View File

@ -88,7 +88,8 @@ struct vpx_codec_alg_priv
vpx_image_t preview_img;
unsigned int next_frame_flag;
vp8_postproc_cfg_t preview_ppcfg;
vpx_codec_pkt_list_decl(64) pkt_list; // changed to accomendate the maximum number of lagged frames allowed
/* pkt_list size depends on the maximum number of lagged frames allowed. */
vpx_codec_pkt_list_decl(64) pkt_list;
unsigned int fixed_kf_cntr;
};
@ -156,7 +157,6 @@ static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx,
RANGE_CHECK_HI(cfg, rc_overshoot_pct, 1000);
RANGE_CHECK_HI(cfg, rc_2pass_vbr_bias_pct, 100);
RANGE_CHECK(cfg, kf_mode, VPX_KF_DISABLED, VPX_KF_AUTO);
//RANGE_CHECK_BOOL(cfg, g_delete_firstpassfile);
RANGE_CHECK_BOOL(cfg, rc_resize_allowed);
RANGE_CHECK_HI(cfg, rc_dropframe_thresh, 100);
RANGE_CHECK_HI(cfg, rc_resize_up_thresh, 100);
@ -355,7 +355,6 @@ static vpx_codec_err_t set_vp8e_config(VP8_CONFIG *oxcf,
oxcf->auto_key = cfg.kf_mode == VPX_KF_AUTO
&& cfg.kf_min_dist != cfg.kf_max_dist;
//oxcf->kf_min_dist = cfg.kf_min_dis;
oxcf->key_freq = cfg.kf_max_dist;
oxcf->number_of_layers = cfg.ts_number_layers;
@ -385,9 +384,6 @@ static vpx_codec_err_t set_vp8e_config(VP8_CONFIG *oxcf,
}
#endif
//oxcf->delete_first_pass_file = cfg.g_delete_firstpassfile;
//strcpy(oxcf->first_pass_file, cfg.g_firstpass_file);
oxcf->cpu_used = vp8_cfg.cpu_used;
oxcf->encode_breakout = vp8_cfg.static_thresh;
oxcf->play_alternate = vp8_cfg.enable_auto_alt_ref;
@ -685,7 +681,7 @@ static vpx_codec_err_t image2yuvconfig(const vpx_image_t *img,
yv12->uv_stride = img->stride[VPX_PLANE_U];
yv12->border = (img->stride[VPX_PLANE_Y] - img->w) / 2;
yv12->clrtype = (img->fmt == VPX_IMG_FMT_VPXI420 || img->fmt == VPX_IMG_FMT_VPXYV12); //REG_YUV = 0
yv12->clrtype = (img->fmt == VPX_IMG_FMT_VPXI420 || img->fmt == VPX_IMG_FMT_VPXYV12);
return res;
}
@ -902,10 +898,11 @@ static vpx_codec_err_t vp8e_encode(vpx_codec_alg_priv_t *ctx,
{
pkt.data.frame.flags |= VPX_FRAME_IS_INVISIBLE;
// This timestamp should be as close as possible to the
// prior PTS so that if a decoder uses pts to schedule when
// to do this, we start right after last frame was decoded.
// Invisible frames have no duration.
/* This timestamp should be as close as possible to the
* prior PTS so that if a decoder uses pts to schedule when
* to do this, we start right after last frame was decoded.
* Invisible frames have no duration.
*/
pkt.data.frame.pts = ((cpi->last_time_stamp_seen
* ctx->cfg.g_timebase.den + round)
/ ctx->cfg.g_timebase.num / 10000000) + 1;
@ -957,8 +954,6 @@ static vpx_codec_err_t vp8e_encode(vpx_codec_alg_priv_t *ctx,
cx_data += size;
cx_data_sz -= size;
}
//printf("timestamp: %lld, duration: %d\n", pkt->data.frame.pts, pkt->data.frame.duration);
}
}
}

View File

@ -129,8 +129,8 @@ int main(int argc, char **argv) {
int got_data;
int flags = 0;
int i;
int pts = 0; // PTS starts at 0
int frame_duration = 1; // 1 timebase tick per frame
int pts = 0; /* PTS starts at 0 */
int frame_duration = 1; /* 1 timebase tick per frame */
int layering_mode = 0;
int frames_in_layer[VPX_TS_MAX_LAYERS] = {0};
@ -138,7 +138,7 @@ int main(int argc, char **argv) {
int flag_periodicity;
int max_intra_size_pct;
// Check usage and arguments
/* Check usage and arguments */
if (argc < 9)
die("Usage: %s <infile> <outfile> <width> <height> <rate_num> "
" <rate_den> <mode> <Rate_0> ... <Rate_nlayers-1>\n", argv[0]);
@ -161,29 +161,29 @@ int main(int argc, char **argv) {
printf("Using %s\n",vpx_codec_iface_name(interface));
// Populate encoder configuration
/* Populate encoder configuration */
res = vpx_codec_enc_config_default(interface, &cfg, 0);
if(res) {
printf("Failed to get config: %s\n", vpx_codec_err_to_string(res));
return EXIT_FAILURE;
}
// Update the default configuration with our settings
/* Update the default configuration with our settings */
cfg.g_w = width;
cfg.g_h = height;
// Timebase format e.g. 30fps: numerator=1, demoninator=30
/* Timebase format e.g. 30fps: numerator=1, demoninator=30 */
if (!sscanf (argv[5], "%d", &cfg.g_timebase.num ))
die ("Invalid timebase numerator %s", argv[5]);
if (!sscanf (argv[6], "%d", &cfg.g_timebase.den ))
die ("Invalid timebase denominator %s", argv[6]);
for (i=8; i<8+mode_to_num_layers[layering_mode]; i++)
if (!sscanf(argv[i], "%d", &cfg.ts_target_bitrate[i-8]))
if (!sscanf(argv[i], "%ud", &cfg.ts_target_bitrate[i-8]))
die ("Invalid data rate %s", argv[i]);
// Real time parameters
cfg.rc_dropframe_thresh = 0; // 30
/* Real time parameters */
cfg.rc_dropframe_thresh = 0;
cfg.rc_end_usage = VPX_CBR;
cfg.rc_resize_allowed = 0;
cfg.rc_min_quantizer = 8;
@ -194,25 +194,26 @@ int main(int argc, char **argv) {
cfg.rc_buf_optimal_sz = 600;
cfg.rc_buf_sz = 1000;
// Enable error resilient mode
/* Enable error resilient mode */
cfg.g_error_resilient = 1;
cfg.g_lag_in_frames = 0;
cfg.kf_mode = VPX_KF_DISABLED;
// Disable automatic keyframe placement
/* Disable automatic keyframe placement */
cfg.kf_min_dist = cfg.kf_max_dist = 1000;
// Temporal scaling parameters:
// NOTE: The 3 prediction frames cannot be used interchangeably due to
// differences in the way they are handled throughout the code. The
// frames should be allocated to layers in the order LAST, GF, ARF.
// Other combinations work, but may produce slightly inferior results.
/* Temporal scaling parameters: */
/* NOTE: The 3 prediction frames cannot be used interchangeably due to
* differences in the way they are handled throughout the code. The
* frames should be allocated to layers in the order LAST, GF, ARF.
* Other combinations work, but may produce slightly inferior results.
*/
switch (layering_mode)
{
case 0:
{
// 2-layers, 2-frame period
/* 2-layers, 2-frame period */
int ids[2] = {0,1};
cfg.ts_number_layers = 2;
cfg.ts_periodicity = 2;
@ -222,14 +223,14 @@ int main(int argc, char **argv) {
flag_periodicity = cfg.ts_periodicity;
#if 1
// 0=L, 1=GF, Intra-layer prediction enabled
/* 0=L, 1=GF, Intra-layer prediction enabled */
layer_flags[0] = VPX_EFLAG_FORCE_KF |
VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF;
layer_flags[1] = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_REF_ARF;
#else
// 0=L, 1=GF, Intra-layer prediction disabled
/* 0=L, 1=GF, Intra-layer prediction disabled */
layer_flags[0] = VPX_EFLAG_FORCE_KF |
VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF;
@ -241,7 +242,7 @@ int main(int argc, char **argv) {
case 1:
{
// 2-layers, 3-frame period
/* 2-layers, 3-frame period */
int ids[3] = {0,1,1};
cfg.ts_number_layers = 2;
cfg.ts_periodicity = 3;
@ -251,7 +252,7 @@ int main(int argc, char **argv) {
flag_periodicity = cfg.ts_periodicity;
// 0=L, 1=GF, Intra-layer prediction enabled
/* 0=L, 1=GF, Intra-layer prediction enabled */
layer_flags[0] = VPX_EFLAG_FORCE_KF |
VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF |
VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
@ -264,7 +265,7 @@ int main(int argc, char **argv) {
case 2:
{
// 3-layers, 6-frame period
/* 3-layers, 6-frame period */
int ids[6] = {0,2,2,1,2,2};
cfg.ts_number_layers = 3;
cfg.ts_periodicity = 6;
@ -275,7 +276,7 @@ int main(int argc, char **argv) {
flag_periodicity = cfg.ts_periodicity;
// 0=L, 1=GF, 2=ARF, Intra-layer prediction enabled
/* 0=L, 1=GF, 2=ARF, Intra-layer prediction enabled */
layer_flags[0] = VPX_EFLAG_FORCE_KF |
VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF |
VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
@ -290,7 +291,7 @@ int main(int argc, char **argv) {
case 3:
{
// 3-layers, 4-frame period
/* 3-layers, 4-frame period */
int ids[4] = {0,2,1,2};
cfg.ts_number_layers = 3;
cfg.ts_periodicity = 4;
@ -301,7 +302,7 @@ int main(int argc, char **argv) {
flag_periodicity = cfg.ts_periodicity;
// 0=L, 1=GF, 2=ARF, Intra-layer prediction disabled
/* 0=L, 1=GF, 2=ARF, Intra-layer prediction disabled */
layer_flags[0] = VPX_EFLAG_FORCE_KF |
VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF |
VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
@ -317,7 +318,7 @@ int main(int argc, char **argv) {
case 4:
{
// 3-layers, 4-frame period
/* 3-layers, 4-frame period */
int ids[4] = {0,2,1,2};
cfg.ts_number_layers = 3;
cfg.ts_periodicity = 4;
@ -328,8 +329,9 @@ int main(int argc, char **argv) {
flag_periodicity = cfg.ts_periodicity;
// 0=L, 1=GF, 2=ARF, Intra-layer prediction enabled in layer 1,
// disabled in layer 2
/* 0=L, 1=GF, 2=ARF, Intra-layer prediction enabled in layer 1,
* disabled in layer 2
*/
layer_flags[0] = VPX_EFLAG_FORCE_KF |
VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF |
VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
@ -344,7 +346,7 @@ int main(int argc, char **argv) {
case 5:
{
// 3-layers, 4-frame period
/* 3-layers, 4-frame period */
int ids[4] = {0,2,1,2};
cfg.ts_number_layers = 3;
cfg.ts_periodicity = 4;
@ -355,7 +357,7 @@ int main(int argc, char **argv) {
flag_periodicity = cfg.ts_periodicity;
// 0=L, 1=GF, 2=ARF, Intra-layer prediction enabled
/* 0=L, 1=GF, 2=ARF, Intra-layer prediction enabled */
layer_flags[0] = VPX_EFLAG_FORCE_KF |
VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF |
VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
@ -368,9 +370,9 @@ int main(int argc, char **argv) {
case 6:
{
// NOTE: Probably of academic interest only
/* NOTE: Probably of academic interest only */
// 5-layers, 16-frame period
/* 5-layers, 16-frame period */
int ids[16] = {0,4,3,4,2,4,3,4,1,4,3,4,2,4,3,4};
cfg.ts_number_layers = 5;
cfg.ts_periodicity = 16;
@ -407,7 +409,7 @@ int main(int argc, char **argv) {
case 7:
{
// 2-layers
/* 2-layers */
int ids[2] = {0,1};
cfg.ts_number_layers = 2;
cfg.ts_periodicity = 2;
@ -417,7 +419,7 @@ int main(int argc, char **argv) {
flag_periodicity = 8;
// 0=L, 1=GF
/* 0=L, 1=GF */
layer_flags[0] = VPX_EFLAG_FORCE_KF |
VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF |
VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
@ -440,7 +442,7 @@ int main(int argc, char **argv) {
case 8:
default:
{
// 3-layers
/* 3-layers */
int ids[4] = {0,2,1,2};
cfg.ts_number_layers = 3;
cfg.ts_periodicity = 4;
@ -451,7 +453,7 @@ int main(int argc, char **argv) {
flag_periodicity = 8;
// 0=L, 1=GF, 2=ARF
/* 0=L, 1=GF, 2=ARF */
layer_flags[0] = VPX_EFLAG_FORCE_KF |
VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF |
VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
@ -472,11 +474,11 @@ int main(int argc, char **argv) {
}
}
// Open input file
/* Open input file */
if(!(infile = fopen(argv[1], "rb")))
die("Failed to open %s for reading", argv[1]);
// Open an output file for each stream
/* Open an output file for each stream */
for (i=0; i<cfg.ts_number_layers; i++)
{
char file_name[512];
@ -486,11 +488,11 @@ int main(int argc, char **argv) {
write_ivf_file_header(outfile[i], &cfg, 0);
}
// Initialize codec
/* Initialize codec */
if (vpx_codec_enc_init (&codec, interface, &cfg, 0))
die_codec (&codec, "Failed to initialize encoder");
// Cap CPU & first I-frame size
/* Cap CPU & first I-frame size */
vpx_codec_control (&codec, VP8E_SET_CPUUSED, -6);
vpx_codec_control (&codec, VP8E_SET_STATIC_THRESHOLD, 800);
vpx_codec_control (&codec, VP8E_SET_NOISE_SENSITIVITY, 1);
@ -498,12 +500,10 @@ int main(int argc, char **argv) {
max_intra_size_pct = (int) (((double)cfg.rc_buf_optimal_sz * 0.5)
* ((double) cfg.g_timebase.den / cfg.g_timebase.num)
/ 10.0);
//printf ("max_intra_size_pct=%d\n", max_intra_size_pct);
/* printf ("max_intra_size_pct=%d\n", max_intra_size_pct); */
vpx_codec_control(&codec, VP8E_SET_MAX_INTRA_BITRATE_PCT,
max_intra_size_pct);
// vpx_codec_control (&codec, VP8E_SET_TOKEN_PARTITIONS,
// static_cast<vp8e_token_partitions>(_tokenPartitions));
frame_avail = 1;
while (frame_avail || got_data) {
@ -517,7 +517,7 @@ int main(int argc, char **argv) {
1, flags, VPX_DL_REALTIME))
die_codec(&codec, "Failed to encode frame");
// Reset KF flag
/* Reset KF flag */
if (layering_mode != 6)
layer_flags[0] &= ~VPX_EFLAG_FORCE_KF;
@ -552,7 +552,7 @@ int main(int argc, char **argv) {
if (vpx_codec_destroy(&codec))
die_codec (&codec, "Failed to destroy codec");
// Try to rewrite the output file headers with the actual frame count
/* Try to rewrite the output file headers with the actual frame count */
for (i=0; i<cfg.ts_number_layers; i++)
{
if (!fseek(outfile[i], 0, SEEK_SET))

View File

@ -19,11 +19,11 @@
static void assert_##name(void) {switch(0){case 0:case !!(cond):;}}
#if INLINE_ASM
#define DEFINE(sym, val) asm("\n" #sym " EQU %0" : : "i" (val));
#define DEFINE(sym, val) asm("\n" #sym " EQU %0" : : "i" (val))
#define BEGIN int main(void) {
#define END return 0; }
#else
#define DEFINE(sym, val) const int sym = val;
#define DEFINE(sym, val) const int sym = val
#define BEGIN
#define END
#endif

View File

@ -145,27 +145,27 @@ static unsigned MEM_VALUE_T mem_get_le32(const void *vmem)
#undef mem_get_sbe16
#define mem_get_sbe16 mem_ops_wrap_symbol(mem_get_sbe16)
mem_get_s_generic(be, 16);
mem_get_s_generic(be, 16)
#undef mem_get_sbe24
#define mem_get_sbe24 mem_ops_wrap_symbol(mem_get_sbe24)
mem_get_s_generic(be, 24);
mem_get_s_generic(be, 24)
#undef mem_get_sbe32
#define mem_get_sbe32 mem_ops_wrap_symbol(mem_get_sbe32)
mem_get_s_generic(be, 32);
mem_get_s_generic(be, 32)
#undef mem_get_sle16
#define mem_get_sle16 mem_ops_wrap_symbol(mem_get_sle16)
mem_get_s_generic(le, 16);
mem_get_s_generic(le, 16)
#undef mem_get_sle24
#define mem_get_sle24 mem_ops_wrap_symbol(mem_get_sle24)
mem_get_s_generic(le, 24);
mem_get_s_generic(le, 24)
#undef mem_get_sle32
#define mem_get_sle32 mem_ops_wrap_symbol(mem_get_sle32)
mem_get_s_generic(le, 32);
mem_get_s_generic(le, 32)
#undef mem_put_be16
#define mem_put_be16 mem_ops_wrap_symbol(mem_put_be16)

View File

@ -99,51 +99,51 @@
#undef mem_get_be16_aligned
#define mem_get_be16_aligned mem_ops_wrap_symbol(mem_get_be16_aligned)
mem_get_be_aligned_generic(16);
mem_get_be_aligned_generic(16)
#undef mem_get_be32_aligned
#define mem_get_be32_aligned mem_ops_wrap_symbol(mem_get_be32_aligned)
mem_get_be_aligned_generic(32);
mem_get_be_aligned_generic(32)
#undef mem_get_le16_aligned
#define mem_get_le16_aligned mem_ops_wrap_symbol(mem_get_le16_aligned)
mem_get_le_aligned_generic(16);
mem_get_le_aligned_generic(16)
#undef mem_get_le32_aligned
#define mem_get_le32_aligned mem_ops_wrap_symbol(mem_get_le32_aligned)
mem_get_le_aligned_generic(32);
mem_get_le_aligned_generic(32)
#undef mem_get_sbe16_aligned
#define mem_get_sbe16_aligned mem_ops_wrap_symbol(mem_get_sbe16_aligned)
mem_get_sbe_aligned_generic(16);
mem_get_sbe_aligned_generic(16)
#undef mem_get_sbe32_aligned
#define mem_get_sbe32_aligned mem_ops_wrap_symbol(mem_get_sbe32_aligned)
mem_get_sbe_aligned_generic(32);
mem_get_sbe_aligned_generic(32)
#undef mem_get_sle16_aligned
#define mem_get_sle16_aligned mem_ops_wrap_symbol(mem_get_sle16_aligned)
mem_get_sle_aligned_generic(16);
mem_get_sle_aligned_generic(16)
#undef mem_get_sle32_aligned
#define mem_get_sle32_aligned mem_ops_wrap_symbol(mem_get_sle32_aligned)
mem_get_sle_aligned_generic(32);
mem_get_sle_aligned_generic(32)
#undef mem_put_be16_aligned
#define mem_put_be16_aligned mem_ops_wrap_symbol(mem_put_be16_aligned)
mem_put_be_aligned_generic(16);
mem_put_be_aligned_generic(16)
#undef mem_put_be32_aligned
#define mem_put_be32_aligned mem_ops_wrap_symbol(mem_put_be32_aligned)
mem_put_be_aligned_generic(32);
mem_put_be_aligned_generic(32)
#undef mem_put_le16_aligned
#define mem_put_le16_aligned mem_ops_wrap_symbol(mem_put_le16_aligned)
mem_put_le_aligned_generic(16);
mem_put_le_aligned_generic(16)
#undef mem_put_le32_aligned
#define mem_put_le32_aligned mem_ops_wrap_symbol(mem_put_le32_aligned)
mem_put_le_aligned_generic(32);
mem_put_le_aligned_generic(32)
#undef mem_get_ne_aligned_generic
#undef mem_get_se_aligned_generic

View File

@ -52,7 +52,7 @@ static const char *exec_name;
static const struct
{
char const *name;
const vpx_codec_iface_t *iface;
vpx_codec_iface_t *iface;
unsigned int fourcc;
unsigned int fourcc_mask;
} ifaces[] =
@ -152,7 +152,8 @@ static void usage_exit()
"write to. If the\n argument does not include any escape "
"characters, the output will be\n written to a single file. "
"Otherwise, the filename will be calculated by\n expanding "
"the following escape characters:\n"
"the following escape characters:\n");
fprintf(stderr,
"\n\t%%w - Frame width"
"\n\t%%h - Frame height"
"\n\t%%<n> - Frame number, zero padded to <n> places (1..9)"
@ -580,10 +581,10 @@ file_is_webm(struct input_ctx *input,
unsigned int i, n;
int track_type = -1;
nestegg_io io = {nestegg_read_cb, nestegg_seek_cb, nestegg_tell_cb,
input->infile};
nestegg_io io = {nestegg_read_cb, nestegg_seek_cb, nestegg_tell_cb, 0};
nestegg_video_params params;
io.userdata = input->infile;
if(nestegg_init(&input->nestegg_ctx, io, NULL))
goto fail;
@ -647,7 +648,7 @@ void generate_filename(const char *pattern, char *out, size_t q_len,
{
size_t pat_len;
// parse the pattern
/* parse the pattern */
q[q_len - 1] = '\0';
switch(p[1])
{
@ -677,7 +678,7 @@ void generate_filename(const char *pattern, char *out, size_t q_len,
{
size_t copy_len;
// copy the next segment
/* copy the next segment */
if(!next_pat)
copy_len = strlen(p);
else
@ -922,7 +923,7 @@ int main(int argc, const char **argv_)
p = strchr(p, '%');
if(p && p[1] >= '1' && p[1] <= '9')
{
// pattern contains sequence number, so it's not unique.
/* pattern contains sequence number, so it's not unique. */
single_file = 0;
break;
}

View File

@ -73,7 +73,7 @@ static const char *exec_name;
static const struct codec_item
{
char const *name;
const vpx_codec_iface_t *iface;
vpx_codec_iface_t *iface;
unsigned int fourcc;
} codecs[] =
{
@ -597,9 +597,9 @@ static void
Ebml_StartSubElement(EbmlGlobal *glob, EbmlLoc *ebmlLoc,
unsigned long class_id)
{
//todo this is always taking 8 bytes, this may need later optimization
//this is a key that says length unknown
uint64_t unknownLen = LITERALU64(0x01FFFFFFFFFFFFFF);
/* todo this is always taking 8 bytes, this may need later optimization */
/* this is a key that says length unknown */
uint64_t unknownLen = 0x01FFFFFFFFFFFFFF;
Ebml_WriteID(glob, class_id);
*ebmlLoc = ftello(glob->stream);
@ -617,7 +617,7 @@ Ebml_EndSubElement(EbmlGlobal *glob, EbmlLoc *ebmlLoc)
/* Calculate the size of this element */
size = pos - *ebmlLoc - 8;
size |= LITERALU64(0x0100000000000000);
size |= 0x0100000000000000;
/* Seek back to the beginning of the element and write the new size */
fseeko(glob->stream, *ebmlLoc, SEEK_SET);
@ -664,7 +664,7 @@ write_webm_seek_info(EbmlGlobal *ebml)
Ebml_EndSubElement(ebml, &start);
}
{
//segment info
/* segment info */
EbmlLoc startInfo;
uint64_t frame_time;
char version_string[64];
@ -704,16 +704,16 @@ write_webm_file_header(EbmlGlobal *glob,
EbmlLoc start;
Ebml_StartSubElement(glob, &start, EBML);
Ebml_SerializeUnsigned(glob, EBMLVersion, 1);
Ebml_SerializeUnsigned(glob, EBMLReadVersion, 1); //EBML Read Version
Ebml_SerializeUnsigned(glob, EBMLMaxIDLength, 4); //EBML Max ID Length
Ebml_SerializeUnsigned(glob, EBMLMaxSizeLength, 8); //EBML Max Size Length
Ebml_SerializeString(glob, DocType, "webm"); //Doc Type
Ebml_SerializeUnsigned(glob, DocTypeVersion, 2); //Doc Type Version
Ebml_SerializeUnsigned(glob, DocTypeReadVersion, 2); //Doc Type Read Version
Ebml_SerializeUnsigned(glob, EBMLReadVersion, 1);
Ebml_SerializeUnsigned(glob, EBMLMaxIDLength, 4);
Ebml_SerializeUnsigned(glob, EBMLMaxSizeLength, 8);
Ebml_SerializeString(glob, DocType, "webm");
Ebml_SerializeUnsigned(glob, DocTypeVersion, 2);
Ebml_SerializeUnsigned(glob, DocTypeReadVersion, 2);
Ebml_EndSubElement(glob, &start);
}
{
Ebml_StartSubElement(glob, &glob->startSegment, Segment); //segment
Ebml_StartSubElement(glob, &glob->startSegment, Segment);
glob->position_reference = ftello(glob->stream);
glob->framerate = *fps;
write_webm_seek_info(glob);
@ -731,7 +731,7 @@ write_webm_file_header(EbmlGlobal *glob,
Ebml_SerializeUnsigned(glob, TrackNumber, trackNumber);
glob->track_id_pos = ftello(glob->stream);
Ebml_SerializeUnsigned32(glob, TrackUID, trackID);
Ebml_SerializeUnsigned(glob, TrackType, 1); //video is always 1
Ebml_SerializeUnsigned(glob, TrackType, 1);
Ebml_SerializeString(glob, CodecID, "V_VP8");
{
unsigned int pixelWidth = cfg->g_w;
@ -744,13 +744,13 @@ write_webm_file_header(EbmlGlobal *glob,
Ebml_SerializeUnsigned(glob, PixelHeight, pixelHeight);
Ebml_SerializeUnsigned(glob, StereoMode, stereo_fmt);
Ebml_SerializeFloat(glob, FrameRate, frameRate);
Ebml_EndSubElement(glob, &videoStart); //Video
Ebml_EndSubElement(glob, &videoStart);
}
Ebml_EndSubElement(glob, &start); //Track Entry
Ebml_EndSubElement(glob, &start); /* Track Entry */
}
Ebml_EndSubElement(glob, &trackStart);
}
// segment element is open
/* segment element is open */
}
}
@ -791,7 +791,7 @@ write_webm_block(EbmlGlobal *glob,
glob->cluster_open = 1;
glob->cluster_timecode = pts_ms;
glob->cluster_pos = ftello(glob->stream);
Ebml_StartSubElement(glob, &glob->startCluster, Cluster); //cluster
Ebml_StartSubElement(glob, &glob->startCluster, Cluster); /* cluster */
Ebml_SerializeUnsigned(glob, Timecode, glob->cluster_timecode);
/* Save a cue point if this is a keyframe. */
@ -865,7 +865,6 @@ write_webm_file_footer(EbmlGlobal *glob, long hash)
Ebml_SerializeUnsigned(glob, CueTrack, 1);
Ebml_SerializeUnsigned64(glob, CueClusterPosition,
cue->loc - glob->position_reference);
//Ebml_SerializeUnsigned(glob, CueBlockNumber, cue->blockNumber);
Ebml_EndSubElement(glob, &start);
}
Ebml_EndSubElement(glob, &start);
@ -942,7 +941,7 @@ static double vp8_mse2psnr(double Samples, double Peak, double Mse)
if ((double)Mse > 0.0)
psnr = 10.0 * log10(Peak * Peak * Samples / Mse);
else
psnr = 60; // Limit to prevent / 0
psnr = 60; /* Limit to prevent / 0 */
if (psnr > 60)
psnr = 60;
@ -1225,7 +1224,7 @@ static int merge_hist_buckets(struct hist_bucket *bucket,
{
int last_bucket = buckets - 1;
// merge the small bucket with an adjacent one.
/* merge the small bucket with an adjacent one. */
if(small_bucket == 0)
merge_bucket = 1;
else if(small_bucket == last_bucket)
@ -1393,7 +1392,7 @@ static void init_rate_histogram(struct rate_hist *hist,
*/
hist->samples = cfg->rc_buf_sz * 5 / 4 * fps->num / fps->den / 1000;
// prevent division by zero
/* prevent division by zero */
if (hist->samples == 0)
hist->samples=1;
@ -2283,7 +2282,7 @@ static void get_cx_data(struct stream_state *stream,
stream->psnr_samples_total += pkt->data.psnr.samples[0];
for (i = 0; i < 4; i++)
{
fprintf(stderr, "%.3lf ", pkt->data.psnr.psnr[i]);
fprintf(stderr, "%.3f ", pkt->data.psnr.psnr[i]);
stream->psnr_totals[i] += pkt->data.psnr.psnr[i];
}
stream->psnr_count++;
@ -2308,11 +2307,11 @@ static void show_psnr(struct stream_state *stream)
fprintf(stderr, "Stream %d PSNR (Overall/Avg/Y/U/V)", stream->index);
ovpsnr = vp8_mse2psnr(stream->psnr_samples_total, 255.0,
stream->psnr_sse_total);
fprintf(stderr, " %.3lf", ovpsnr);
fprintf(stderr, " %.3f", ovpsnr);
for (i = 0; i < 4; i++)
{
fprintf(stderr, " %.3lf", stream->psnr_totals[i]/stream->psnr_count);
fprintf(stderr, " %.3f", stream->psnr_totals[i]/stream->psnr_count);
}
fprintf(stderr, "\n");
}