cleanup
Originally committed as revision 1882 to svn://svn.ffmpeg.org/ffmpeg/trunk
This commit is contained in:
parent
3bb10888ff
commit
d8085ea727
2
ffmpeg.c
2
ffmpeg.c
@ -680,7 +680,7 @@ static void do_video_stats(AVFormatContext *os, AVOutputStream *ost,
|
||||
avg_bitrate = (double)(total_size * 8) / ti1 / 1000.0;
|
||||
fprintf(fvstats, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
|
||||
(double)total_size / 1024, ti1, bitrate, avg_bitrate);
|
||||
fprintf(fvstats,"type= %s\n", enc->coded_frame->key_frame == 1 ? "I" : "P");
|
||||
fprintf(fvstats,"type= %s\n", av_get_pict_type_char(enc->coded_frame->pict_type));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -15,8 +15,8 @@ extern "C" {
|
||||
|
||||
#define LIBAVCODEC_VERSION_INT 0x000406
|
||||
#define LIBAVCODEC_VERSION "0.4.6"
|
||||
#define LIBAVCODEC_BUILD 4666
|
||||
#define LIBAVCODEC_BUILD_STR "4666"
|
||||
#define LIBAVCODEC_BUILD 4667
|
||||
#define LIBAVCODEC_BUILD_STR "4667"
|
||||
|
||||
#define LIBAVCODEC_IDENT "FFmpeg" LIBAVCODEC_VERSION "b" LIBAVCODEC_BUILD_STR
|
||||
|
||||
@ -1372,6 +1372,12 @@ void avcodec_register_all(void);
|
||||
void avcodec_flush_buffers(AVCodecContext *avctx);
|
||||
|
||||
/* misc usefull functions */
|
||||
|
||||
/**
|
||||
* returns a single letter to describe the picture type
|
||||
*/
|
||||
char av_get_pict_type_char(int pict_type);
|
||||
|
||||
/**
|
||||
* reduce a fraction.
|
||||
* this is usefull for framerate calculations
|
||||
|
@ -494,10 +494,10 @@ static inline void OPNAME ## _no_rnd_pixels8_l2(uint8_t *dst, const uint8_t *src
|
||||
uint32_t a,b;\
|
||||
a= LD32(&src1[i*src_stride1 ]);\
|
||||
b= LD32(&src2[i*src_stride2 ]);\
|
||||
OP(*((uint32_t*)&dst[i*dst_stride ]), (a&b) + (((a^b)&0xFEFEFEFEUL)>>1));\
|
||||
OP(*((uint32_t*)&dst[i*dst_stride ]), no_rnd_avg32(a, b));\
|
||||
a= LD32(&src1[i*src_stride1+4]);\
|
||||
b= LD32(&src2[i*src_stride2+4]);\
|
||||
OP(*((uint32_t*)&dst[i*dst_stride+4]), (a&b) + (((a^b)&0xFEFEFEFEUL)>>1));\
|
||||
OP(*((uint32_t*)&dst[i*dst_stride+4]), no_rnd_avg32(a, b));\
|
||||
}\
|
||||
}\
|
||||
\
|
||||
@ -508,10 +508,10 @@ static inline void OPNAME ## _pixels8_l2(uint8_t *dst, const uint8_t *src1, cons
|
||||
uint32_t a,b;\
|
||||
a= LD32(&src1[i*src_stride1 ]);\
|
||||
b= LD32(&src2[i*src_stride2 ]);\
|
||||
OP(*((uint32_t*)&dst[i*dst_stride ]), (a|b) - (((a^b)&0xFEFEFEFEUL)>>1));\
|
||||
OP(*((uint32_t*)&dst[i*dst_stride ]), rnd_avg32(a, b));\
|
||||
a= LD32(&src1[i*src_stride1+4]);\
|
||||
b= LD32(&src2[i*src_stride2+4]);\
|
||||
OP(*((uint32_t*)&dst[i*dst_stride+4]), (a|b) - (((a^b)&0xFEFEFEFEUL)>>1));\
|
||||
OP(*((uint32_t*)&dst[i*dst_stride+4]), rnd_avg32(a, b));\
|
||||
}\
|
||||
}\
|
||||
\
|
||||
@ -522,7 +522,7 @@ static inline void OPNAME ## _pixels4_l2(uint8_t *dst, const uint8_t *src1, cons
|
||||
uint32_t a,b;\
|
||||
a= LD32(&src1[i*src_stride1 ]);\
|
||||
b= LD32(&src2[i*src_stride2 ]);\
|
||||
OP(*((uint32_t*)&dst[i*dst_stride ]), (a|b) - (((a^b)&0xFEFEFEFEUL)>>1));\
|
||||
OP(*((uint32_t*)&dst[i*dst_stride ]), rnd_avg32(a, b));\
|
||||
}\
|
||||
}\
|
||||
\
|
||||
@ -726,7 +726,7 @@ CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_x2_c , OPNAME ## _no_rnd_pixels8_x2_c
|
||||
CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_y2_c , OPNAME ## _no_rnd_pixels8_y2_c , 8)\
|
||||
CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_xy2_c, OPNAME ## _no_rnd_pixels8_xy2_c, 8)\
|
||||
|
||||
#define op_avg(a, b) a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEUL)>>1) )
|
||||
#define op_avg(a, b) a = rnd_avg32(a, b)
|
||||
#endif
|
||||
#define op_put(a, b) a = b
|
||||
|
||||
|
@ -268,6 +268,18 @@ void dsputil_init(DSPContext* p, AVCodecContext *avctx);
|
||||
*/
|
||||
void ff_block_permute(DCTELEM *block, uint8_t *permutation, const uint8_t *scantable, int last);
|
||||
|
||||
#define BYTE_VEC32(c) ((c)*0x01010101UL)
|
||||
|
||||
static inline uint32_t rnd_avg32(uint32_t a, uint32_t b)
|
||||
{
|
||||
return (a | b) - (((a ^ b) & ~BYTE_VEC32(0x01)) >> 1);
|
||||
}
|
||||
|
||||
static inline uint32_t no_rnd_avg32(uint32_t a, uint32_t b)
|
||||
{
|
||||
return (a & b) + (((a ^ b) & ~BYTE_VEC32(0x01)) >> 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Empty mmx state.
|
||||
* this must be called between any dsp function and float/double code.
|
||||
|
@ -3015,7 +3015,7 @@ static int decode_slice_header(H264Context *h){
|
||||
if(s->avctx->debug&FF_DEBUG_PICT_INFO){
|
||||
printf("mb:%d %c pps:%d frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d\n",
|
||||
first_mb_in_slice,
|
||||
ff_get_pict_type_char(h->slice_type),
|
||||
av_get_pict_type_char(h->slice_type),
|
||||
pps_id, h->frame_num,
|
||||
s->current_picture_ptr->field_poc[0], s->current_picture_ptr->field_poc[1],
|
||||
h->ref_count[0], h->ref_count[1],
|
||||
@ -3265,7 +3265,7 @@ static int decode_mb(H264Context *h){
|
||||
assert(h->slice_type == I_TYPE);
|
||||
decode_intra_mb:
|
||||
if(mb_type > 25){
|
||||
fprintf(stderr, "mb_type %d in %c slice to large at %d %d\n", mb_type, ff_get_pict_type_char(h->slice_type), s->mb_x, s->mb_y);
|
||||
fprintf(stderr, "mb_type %d in %c slice to large at %d %d\n", mb_type, av_get_pict_type_char(h->slice_type), s->mb_x, s->mb_y);
|
||||
return -1;
|
||||
}
|
||||
partition_count=0;
|
||||
|
@ -4105,18 +4105,6 @@ static void dct_unquantize_h263_c(MpegEncContext *s,
|
||||
}
|
||||
|
||||
|
||||
char ff_get_pict_type_char(int pict_type){
|
||||
switch(pict_type){
|
||||
case I_TYPE: return 'I';
|
||||
case P_TYPE: return 'P';
|
||||
case B_TYPE: return 'B';
|
||||
case S_TYPE: return 'S';
|
||||
case SI_TYPE:return 'i';
|
||||
case SP_TYPE:return 'p';
|
||||
default: return '?';
|
||||
}
|
||||
}
|
||||
|
||||
static const AVOption mpeg4_options[] =
|
||||
{
|
||||
AVOPTION_CODEC_INT("bitrate", "desired video bitrate", bit_rate, 4, 240000000, 800000),
|
||||
|
@ -710,7 +710,6 @@ void ff_init_scantable(MpegEncContext *s, ScanTable *st, const uint8_t *src_scan
|
||||
void ff_draw_horiz_band(MpegEncContext *s, int y, int h);
|
||||
void ff_emulated_edge_mc(MpegEncContext *s, uint8_t *src, int linesize, int block_w, int block_h,
|
||||
int src_x, int src_y, int w, int h);
|
||||
char ff_get_pict_type_char(int pict_type);
|
||||
#define END_NOT_FOUND -100
|
||||
int ff_combine_frame( MpegEncContext *s, int next, uint8_t **buf, int *buf_size);
|
||||
void ff_print_debug_info(MpegEncContext *s, Picture *pict);
|
||||
|
@ -668,7 +668,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s)
|
||||
|
||||
if(s->avctx->debug&FF_DEBUG_RC){
|
||||
printf("%c qp:%d<%2.1f<%d %d want:%d total:%d comp:%f st_q:%2.2f size:%d var:%d/%d br:%d fps:%d\n",
|
||||
ff_get_pict_type_char(pict_type), qmin, q, qmax, picture_number, (int)wanted_bits/1000, (int)s->total_bits/1000,
|
||||
av_get_pict_type_char(pict_type), qmin, q, qmax, picture_number, (int)wanted_bits/1000, (int)s->total_bits/1000,
|
||||
br_compensation, short_term_q, s->frame_bits, pic->mb_var_sum, pic->mc_mb_var_sum, s->bit_rate/1000, (int)fps
|
||||
);
|
||||
}
|
||||
|
@ -26,15 +26,13 @@
|
||||
#define LP(p) *(uint32_t*)(p)
|
||||
|
||||
|
||||
#define BYTE_VEC(c) ((c)*0x01010101UL)
|
||||
|
||||
#define UNPACK(ph,pl,tt0,tt1) do { \
|
||||
uint32_t t0,t1; t0=tt0;t1=tt1; \
|
||||
ph = ( (t0 & ~BYTE_VEC(0x03))>>2) + ( (t1 & ~BYTE_VEC(0x03))>>2); \
|
||||
pl = (t0 & BYTE_VEC(0x03)) + (t1 & BYTE_VEC(0x03)); } while(0)
|
||||
ph = ( (t0 & ~BYTE_VEC32(0x03))>>2) + ( (t1 & ~BYTE_VEC32(0x03))>>2); \
|
||||
pl = (t0 & BYTE_VEC32(0x03)) + (t1 & BYTE_VEC32(0x03)); } while(0)
|
||||
|
||||
#define rnd_PACK(ph,pl,nph,npl) ph + nph + (((pl + npl + BYTE_VEC(0x02))>>2) & BYTE_VEC(0x03))
|
||||
#define no_rnd_PACK(ph,pl,nph,npl) ph + nph + (((pl + npl + BYTE_VEC(0x01))>>2) & BYTE_VEC(0x03))
|
||||
#define rnd_PACK(ph,pl,nph,npl) ph + nph + (((pl + npl + BYTE_VEC32(0x02))>>2) & BYTE_VEC32(0x03))
|
||||
#define no_rnd_PACK(ph,pl,nph,npl) ph + nph + (((pl + npl + BYTE_VEC32(0x01))>>2) & BYTE_VEC32(0x03))
|
||||
|
||||
/* little endian */
|
||||
#define MERGE1(a,b,ofs) (ofs==0)?a:( ((a)>>(8*ofs))|((b)<<(32-8*ofs)) )
|
||||
@ -46,18 +44,7 @@
|
||||
|
||||
|
||||
#define put(d,s) d = s
|
||||
#define avg(d,s) d = rnd_avg2(s,d)
|
||||
|
||||
static inline uint32_t rnd_avg2(uint32_t a, uint32_t b)
|
||||
{
|
||||
return (a | b) - (((a ^ b) & ~BYTE_VEC(0x01)) >> 1);
|
||||
}
|
||||
|
||||
static inline uint32_t no_rnd_avg2(uint32_t a, uint32_t b)
|
||||
{
|
||||
return (a & b) + (((a ^ b) & ~BYTE_VEC(0x01)) >> 1);
|
||||
}
|
||||
|
||||
#define avg(d,s) d = rnd_avg32(s,d)
|
||||
|
||||
#define OP_C4(ofs) \
|
||||
ref-=ofs; \
|
||||
|
@ -9,8 +9,8 @@
|
||||
/*static inline void OPNAME ## _no_rnd_pixels8_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
|
||||
{\
|
||||
do {\
|
||||
OP(LP(dst ),no_rnd_avg2(LD32(src1 ),LD32(src2 )) ); \
|
||||
OP(LP(dst+4),no_rnd_avg2(LD32(src1+4),LD32(src2+4)) ); \
|
||||
OP(LP(dst ),no_rnd_avg32(LD32(src1 ),LD32(src2 )) ); \
|
||||
OP(LP(dst+4),no_rnd_avg32(LD32(src1+4),LD32(src2+4)) ); \
|
||||
src1+=src_stride1; \
|
||||
src2+=src_stride2; \
|
||||
dst+=dst_stride; \
|
||||
@ -20,8 +20,8 @@
|
||||
static inline void OPNAME ## _pixels8_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
|
||||
{\
|
||||
do {\
|
||||
OP(LP(dst ),rnd_avg2(LD32(src1 ),LD32(src2 )) ); \
|
||||
OP(LP(dst+4),rnd_avg2(LD32(src1+4),LD32(src2+4)) ); \
|
||||
OP(LP(dst ),rnd_avg32(LD32(src1 ),LD32(src2 )) ); \
|
||||
OP(LP(dst+4),rnd_avg32(LD32(src1+4),LD32(src2+4)) ); \
|
||||
src1+=src_stride1; \
|
||||
src2+=src_stride2; \
|
||||
dst+=dst_stride; \
|
||||
@ -31,7 +31,7 @@ static inline void OPNAME ## _pixels8_l2(uint8_t *dst, const uint8_t *src1, cons
|
||||
static inline void OPNAME ## _pixels4_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
|
||||
{\
|
||||
do {\
|
||||
OP(LP(dst ),rnd_avg2(LD32(src1 ),LD32(src2 )) ); \
|
||||
OP(LP(dst ),rnd_avg32(LD32(src1 ),LD32(src2 )) ); \
|
||||
src1+=src_stride1; \
|
||||
src2+=src_stride2; \
|
||||
dst+=dst_stride; \
|
||||
@ -41,10 +41,10 @@ static inline void OPNAME ## _pixels4_l2(uint8_t *dst, const uint8_t *src1, cons
|
||||
static inline void OPNAME ## _no_rnd_pixels16_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
|
||||
{\
|
||||
do {\
|
||||
OP(LP(dst ),no_rnd_avg2(LD32(src1 ),LD32(src2 )) ); \
|
||||
OP(LP(dst+4),no_rnd_avg2(LD32(src1+4),LD32(src2+4)) ); \
|
||||
OP(LP(dst+8),no_rnd_avg2(LD32(src1+8),LD32(src2+8)) ); \
|
||||
OP(LP(dst+12),no_rnd_avg2(LD32(src1+12),LD32(src2+12)) ); \
|
||||
OP(LP(dst ),no_rnd_avg32(LD32(src1 ),LD32(src2 )) ); \
|
||||
OP(LP(dst+4),no_rnd_avg32(LD32(src1+4),LD32(src2+4)) ); \
|
||||
OP(LP(dst+8),no_rnd_avg32(LD32(src1+8),LD32(src2+8)) ); \
|
||||
OP(LP(dst+12),no_rnd_avg32(LD32(src1+12),LD32(src2+12)) ); \
|
||||
src1+=src_stride1; \
|
||||
src2+=src_stride2; \
|
||||
dst+=dst_stride; \
|
||||
@ -54,10 +54,10 @@ static inline void OPNAME ## _no_rnd_pixels16_l2(uint8_t *dst, const uint8_t *sr
|
||||
static inline void OPNAME ## _pixels16_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
|
||||
{\
|
||||
do {\
|
||||
OP(LP(dst ),rnd_avg2(LD32(src1 ),LD32(src2 )) ); \
|
||||
OP(LP(dst+4),rnd_avg2(LD32(src1+4),LD32(src2+4)) ); \
|
||||
OP(LP(dst+8),rnd_avg2(LD32(src1+8),LD32(src2+8)) ); \
|
||||
OP(LP(dst+12),rnd_avg2(LD32(src1+12),LD32(src2+12)) ); \
|
||||
OP(LP(dst ),rnd_avg32(LD32(src1 ),LD32(src2 )) ); \
|
||||
OP(LP(dst+4),rnd_avg32(LD32(src1+4),LD32(src2+4)) ); \
|
||||
OP(LP(dst+8),rnd_avg32(LD32(src1+8),LD32(src2+8)) ); \
|
||||
OP(LP(dst+12),rnd_avg32(LD32(src1+12),LD32(src2+12)) ); \
|
||||
src1+=src_stride1; \
|
||||
src2+=src_stride2; \
|
||||
dst+=dst_stride; \
|
||||
@ -67,7 +67,7 @@ static inline void OPNAME ## _pixels16_l2(uint8_t *dst, const uint8_t *src1, con
|
||||
static inline void OPNAME ## _pixels4_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
|
||||
{\
|
||||
do {\
|
||||
OP(LP(dst ),rnd_avg2(LP(src1 ),LP(src2 )) ); \
|
||||
OP(LP(dst ),rnd_avg32(LP(src1 ),LP(src2 )) ); \
|
||||
src1+=src_stride1; \
|
||||
src2+=src_stride2; \
|
||||
dst+=dst_stride; \
|
||||
@ -77,7 +77,7 @@ static inline void OPNAME ## _pixels4_l2_aligned(uint8_t *dst, const uint8_t *sr
|
||||
static inline void OPNAME ## _pixels4_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
|
||||
{\
|
||||
do {\
|
||||
OP(LP(dst ),rnd_avg2(LD32(src1 ),LP(src2 )) ); \
|
||||
OP(LP(dst ),rnd_avg32(LD32(src1 ),LP(src2 )) ); \
|
||||
src1+=src_stride1; \
|
||||
src2+=src_stride2; \
|
||||
dst+=dst_stride; \
|
||||
@ -87,10 +87,10 @@ static inline void OPNAME ## _pixels4_l2_aligned2(uint8_t *dst, const uint8_t *s
|
||||
static inline void OPNAME ## _no_rnd_pixels16_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
|
||||
{\
|
||||
do {\
|
||||
OP(LP(dst ),no_rnd_avg2(LD32(src1 ),LP(src2 )) ); \
|
||||
OP(LP(dst+4),no_rnd_avg2(LD32(src1+4),LP(src2+4)) ); \
|
||||
OP(LP(dst+8),no_rnd_avg2(LD32(src1+8),LP(src2+8)) ); \
|
||||
OP(LP(dst+12),no_rnd_avg2(LD32(src1+12),LP(src2+12)) ); \
|
||||
OP(LP(dst ),no_rnd_avg32(LD32(src1 ),LP(src2 )) ); \
|
||||
OP(LP(dst+4),no_rnd_avg32(LD32(src1+4),LP(src2+4)) ); \
|
||||
OP(LP(dst+8),no_rnd_avg32(LD32(src1+8),LP(src2+8)) ); \
|
||||
OP(LP(dst+12),no_rnd_avg32(LD32(src1+12),LP(src2+12)) ); \
|
||||
src1+=src_stride1; \
|
||||
src2+=src_stride2; \
|
||||
dst+=dst_stride; \
|
||||
@ -100,10 +100,10 @@ static inline void OPNAME ## _no_rnd_pixels16_l2_aligned2(uint8_t *dst, const ui
|
||||
static inline void OPNAME ## _pixels16_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
|
||||
{\
|
||||
do {\
|
||||
OP(LP(dst ),rnd_avg2(LD32(src1 ),LP(src2 )) ); \
|
||||
OP(LP(dst+4),rnd_avg2(LD32(src1+4),LP(src2+4)) ); \
|
||||
OP(LP(dst+8),rnd_avg2(LD32(src1+8),LP(src2+8)) ); \
|
||||
OP(LP(dst+12),rnd_avg2(LD32(src1+12),LP(src2+12)) ); \
|
||||
OP(LP(dst ),rnd_avg32(LD32(src1 ),LP(src2 )) ); \
|
||||
OP(LP(dst+4),rnd_avg32(LD32(src1+4),LP(src2+4)) ); \
|
||||
OP(LP(dst+8),rnd_avg32(LD32(src1+8),LP(src2+8)) ); \
|
||||
OP(LP(dst+12),rnd_avg32(LD32(src1+12),LP(src2+12)) ); \
|
||||
src1+=src_stride1; \
|
||||
src2+=src_stride2; \
|
||||
dst+=dst_stride; \
|
||||
@ -113,8 +113,8 @@ static inline void OPNAME ## _pixels16_l2_aligned2(uint8_t *dst, const uint8_t *
|
||||
static inline void OPNAME ## _no_rnd_pixels8_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
|
||||
{\
|
||||
do { /* onlye src2 aligned */\
|
||||
OP(LP(dst ),no_rnd_avg2(LD32(src1 ),LP(src2 )) ); \
|
||||
OP(LP(dst+4),no_rnd_avg2(LD32(src1+4),LP(src2+4)) ); \
|
||||
OP(LP(dst ),no_rnd_avg32(LD32(src1 ),LP(src2 )) ); \
|
||||
OP(LP(dst+4),no_rnd_avg32(LD32(src1+4),LP(src2+4)) ); \
|
||||
src1+=src_stride1; \
|
||||
src2+=src_stride2; \
|
||||
dst+=dst_stride; \
|
||||
@ -124,8 +124,8 @@ static inline void OPNAME ## _no_rnd_pixels8_l2_aligned2(uint8_t *dst, const uin
|
||||
static inline void OPNAME ## _pixels8_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
|
||||
{\
|
||||
do {\
|
||||
OP(LP(dst ),rnd_avg2(LD32(src1 ),LP(src2 )) ); \
|
||||
OP(LP(dst+4),rnd_avg2(LD32(src1+4),LP(src2+4)) ); \
|
||||
OP(LP(dst ),rnd_avg32(LD32(src1 ),LP(src2 )) ); \
|
||||
OP(LP(dst+4),rnd_avg32(LD32(src1+4),LP(src2+4)) ); \
|
||||
src1+=src_stride1; \
|
||||
src2+=src_stride2; \
|
||||
dst+=dst_stride; \
|
||||
@ -135,8 +135,8 @@ static inline void OPNAME ## _pixels8_l2_aligned2(uint8_t *dst, const uint8_t *s
|
||||
static inline void OPNAME ## _no_rnd_pixels8_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
|
||||
{\
|
||||
do {\
|
||||
OP(LP(dst ),no_rnd_avg2(LP(src1 ),LP(src2 )) ); \
|
||||
OP(LP(dst+4),no_rnd_avg2(LP(src1+4),LP(src2+4)) ); \
|
||||
OP(LP(dst ),no_rnd_avg32(LP(src1 ),LP(src2 )) ); \
|
||||
OP(LP(dst+4),no_rnd_avg32(LP(src1+4),LP(src2+4)) ); \
|
||||
src1+=src_stride1; \
|
||||
src2+=src_stride2; \
|
||||
dst+=dst_stride; \
|
||||
@ -146,8 +146,8 @@ static inline void OPNAME ## _no_rnd_pixels8_l2_aligned(uint8_t *dst, const uint
|
||||
static inline void OPNAME ## _pixels8_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
|
||||
{\
|
||||
do {\
|
||||
OP(LP(dst ),rnd_avg2(LP(src1 ),LP(src2 )) ); \
|
||||
OP(LP(dst+4),rnd_avg2(LP(src1+4),LP(src2+4)) ); \
|
||||
OP(LP(dst ),rnd_avg32(LP(src1 ),LP(src2 )) ); \
|
||||
OP(LP(dst+4),rnd_avg32(LP(src1+4),LP(src2+4)) ); \
|
||||
src1+=src_stride1; \
|
||||
src2+=src_stride2; \
|
||||
dst+=dst_stride; \
|
||||
@ -157,10 +157,10 @@ static inline void OPNAME ## _pixels8_l2_aligned(uint8_t *dst, const uint8_t *sr
|
||||
static inline void OPNAME ## _no_rnd_pixels16_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
|
||||
{\
|
||||
do {\
|
||||
OP(LP(dst ),no_rnd_avg2(LP(src1 ),LP(src2 )) ); \
|
||||
OP(LP(dst+4),no_rnd_avg2(LP(src1+4),LP(src2+4)) ); \
|
||||
OP(LP(dst+8),no_rnd_avg2(LP(src1+8),LP(src2+8)) ); \
|
||||
OP(LP(dst+12),no_rnd_avg2(LP(src1+12),LP(src2+12)) ); \
|
||||
OP(LP(dst ),no_rnd_avg32(LP(src1 ),LP(src2 )) ); \
|
||||
OP(LP(dst+4),no_rnd_avg32(LP(src1+4),LP(src2+4)) ); \
|
||||
OP(LP(dst+8),no_rnd_avg32(LP(src1+8),LP(src2+8)) ); \
|
||||
OP(LP(dst+12),no_rnd_avg32(LP(src1+12),LP(src2+12)) ); \
|
||||
src1+=src_stride1; \
|
||||
src2+=src_stride2; \
|
||||
dst+=dst_stride; \
|
||||
@ -170,10 +170,10 @@ static inline void OPNAME ## _no_rnd_pixels16_l2_aligned(uint8_t *dst, const uin
|
||||
static inline void OPNAME ## _pixels16_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
|
||||
{\
|
||||
do {\
|
||||
OP(LP(dst ),rnd_avg2(LP(src1 ),LP(src2 )) ); \
|
||||
OP(LP(dst+4),rnd_avg2(LP(src1+4),LP(src2+4)) ); \
|
||||
OP(LP(dst+8),rnd_avg2(LP(src1+8),LP(src2+8)) ); \
|
||||
OP(LP(dst+12),rnd_avg2(LP(src1+12),LP(src2+12)) ); \
|
||||
OP(LP(dst ),rnd_avg32(LP(src1 ),LP(src2 )) ); \
|
||||
OP(LP(dst+4),rnd_avg32(LP(src1+4),LP(src2+4)) ); \
|
||||
OP(LP(dst+8),rnd_avg32(LP(src1+8),LP(src2+8)) ); \
|
||||
OP(LP(dst+12),rnd_avg32(LP(src1+12),LP(src2+12)) ); \
|
||||
src1+=src_stride1; \
|
||||
src2+=src_stride2; \
|
||||
dst+=dst_stride; \
|
||||
@ -353,7 +353,7 @@ static inline void OPNAME ## _no_rnd_pixels16_l4_aligned0(uint8_t *dst, const ui
|
||||
} \
|
||||
\
|
||||
|
||||
#define op_avg(a, b) a = rnd_avg2(a,b)
|
||||
#define op_avg(a, b) a = rnd_avg32(a,b)
|
||||
#define op_put(a, b) a = b
|
||||
|
||||
PIXOP2(avg, op_avg)
|
||||
|
@ -742,7 +742,7 @@ static int svq3_decode_frame (AVCodecContext *avctx,
|
||||
|
||||
if(avctx->debug&FF_DEBUG_PICT_INFO){
|
||||
printf("%c hpel:%d, tpel:%d aqp:%d qp:%d\n",
|
||||
ff_get_pict_type_char(s->pict_type), h->halfpel_flag, h->thirdpel_flag,
|
||||
av_get_pict_type_char(s->pict_type), h->halfpel_flag, h->thirdpel_flag,
|
||||
s->adaptive_quant, s->qscale
|
||||
);
|
||||
}
|
||||
|
@ -640,6 +640,18 @@ void avcodec_default_free_buffers(AVCodecContext *s){
|
||||
s->internal_buffer_count=0;
|
||||
}
|
||||
|
||||
char av_get_pict_type_char(int pict_type){
|
||||
switch(pict_type){
|
||||
case I_TYPE: return 'I';
|
||||
case P_TYPE: return 'P';
|
||||
case B_TYPE: return 'B';
|
||||
case S_TYPE: return 'S';
|
||||
case SI_TYPE:return 'i';
|
||||
case SP_TYPE:return 'p';
|
||||
default: return '?';
|
||||
}
|
||||
}
|
||||
|
||||
int av_reduce(int *dst_nom, int *dst_den, int64_t nom, int64_t den, int64_t max){
|
||||
int exact=1, sign=0;
|
||||
int64_t gcd, larger;
|
||||
|
Loading…
Reference in New Issue
Block a user