Merge remote-tracking branch 'qatar/master'

* qatar/master:
  Code cleanup - mpegvideo.c - 500-1000line
  rv40: NEON optimised weighted prediction
  rv40: NEON optimised chroma MC
  ARM: move NEON H264 chroma mc to a separate file
  rv34: NEON optimised inverse transform functions

Conflicts:
	libavcodec/mpegvideo.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer
2011-12-07 02:04:00 +01:00
11 changed files with 962 additions and 560 deletions

View File

@@ -55,6 +55,7 @@ NEON-OBJS-$(CONFIG_RDFT) += arm/rdft_neon.o \
NEON-OBJS-$(CONFIG_H264DSP) += arm/h264dsp_neon.o \ NEON-OBJS-$(CONFIG_H264DSP) += arm/h264dsp_neon.o \
arm/h264idct_neon.o \ arm/h264idct_neon.o \
arm/h264cmc_neon.o \
NEON-OBJS-$(CONFIG_H264PRED) += arm/h264pred_neon.o \ NEON-OBJS-$(CONFIG_H264PRED) += arm/h264pred_neon.o \
@@ -63,6 +64,15 @@ NEON-OBJS-$(CONFIG_AC3DSP) += arm/ac3dsp_neon.o
NEON-OBJS-$(CONFIG_DCA_DECODER) += arm/dcadsp_neon.o \ NEON-OBJS-$(CONFIG_DCA_DECODER) += arm/dcadsp_neon.o \
arm/synth_filter_neon.o \ arm/synth_filter_neon.o \
NEON-OBJS-$(CONFIG_RV30_DECODER) += arm/rv34dsp_init_neon.o \
arm/rv34dsp_neon.o \
NEON-OBJS-$(CONFIG_RV40_DECODER) += arm/rv34dsp_init_neon.o \
arm/rv34dsp_neon.o \
arm/rv40dsp_init_neon.o \
arm/rv40dsp_neon.o \
arm/h264cmc_neon.o \
NEON-OBJS-$(CONFIG_VP3_DECODER) += arm/vp3dsp_neon.o NEON-OBJS-$(CONFIG_VP3_DECODER) += arm/vp3dsp_neon.o
NEON-OBJS-$(CONFIG_VP5_DECODER) += arm/vp56dsp_neon.o \ NEON-OBJS-$(CONFIG_VP5_DECODER) += arm/vp56dsp_neon.o \

View File

@@ -0,0 +1,430 @@
/*
* Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "asm.S"
/* chroma_mc8(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */
.macro h264_chroma_mc8 type, codec=h264
function ff_\type\()_\codec\()_chroma_mc8_neon, export=1
push {r4-r7, lr}
ldrd r4, [sp, #20]
.ifc \type,avg
mov lr, r0
.endif
pld [r1]
pld [r1, r2]
.ifc \codec,rv40
movrel r6, rv40bias
lsr r7, r5, #1
add r6, r6, r7, lsl #3
lsr r7, r4, #1
add r6, r6, r7, lsl #1
vld1.16 {d22[],d23[]}, [r6,:16]
.endif
A muls r7, r4, r5
T mul r7, r4, r5
T cmp r7, #0
rsb r6, r7, r5, lsl #3
rsb r12, r7, r4, lsl #3
sub r4, r7, r4, lsl #3
sub r4, r4, r5, lsl #3
add r4, r4, #64
beq 2f
add r5, r1, r2
vdup.8 d0, r4
lsl r4, r2, #1
vdup.8 d1, r12
vld1.8 {d4, d5}, [r1], r4
vdup.8 d2, r6
vld1.8 {d6, d7}, [r5], r4
vdup.8 d3, r7
vext.8 d5, d4, d5, #1
vext.8 d7, d6, d7, #1
1: pld [r5]
vmull.u8 q8, d4, d0
vmlal.u8 q8, d5, d1
vld1.8 {d4, d5}, [r1], r4
vmlal.u8 q8, d6, d2
vext.8 d5, d4, d5, #1
vmlal.u8 q8, d7, d3
vmull.u8 q9, d6, d0
subs r3, r3, #2
vmlal.u8 q9, d7, d1
vmlal.u8 q9, d4, d2
vmlal.u8 q9, d5, d3
vld1.8 {d6, d7}, [r5], r4
pld [r1]
.ifc \codec,h264
vrshrn.u16 d16, q8, #6
vrshrn.u16 d17, q9, #6
.else
vadd.u16 q8, q8, q11
vadd.u16 q9, q9, q11
vshrn.u16 d16, q8, #6
vshrn.u16 d17, q9, #6
.endif
.ifc \type,avg
vld1.8 {d20}, [lr,:64], r2
vld1.8 {d21}, [lr,:64], r2
vrhadd.u8 q8, q8, q10
.endif
vext.8 d7, d6, d7, #1
vst1.8 {d16}, [r0,:64], r2
vst1.8 {d17}, [r0,:64], r2
bgt 1b
pop {r4-r7, pc}
2: tst r6, r6
add r12, r12, r6
vdup.8 d0, r4
vdup.8 d1, r12
beq 4f
add r5, r1, r2
lsl r4, r2, #1
vld1.8 {d4}, [r1], r4
vld1.8 {d6}, [r5], r4
3: pld [r5]
vmull.u8 q8, d4, d0
vmlal.u8 q8, d6, d1
vld1.8 {d4}, [r1], r4
vmull.u8 q9, d6, d0
vmlal.u8 q9, d4, d1
vld1.8 {d6}, [r5], r4
.ifc \codec,h264
vrshrn.u16 d16, q8, #6
vrshrn.u16 d17, q9, #6
.else
vadd.u16 q8, q8, q11
vadd.u16 q9, q9, q11
vshrn.u16 d16, q8, #6
vshrn.u16 d17, q9, #6
.endif
.ifc \type,avg
vld1.8 {d20}, [lr,:64], r2
vld1.8 {d21}, [lr,:64], r2
vrhadd.u8 q8, q8, q10
.endif
subs r3, r3, #2
pld [r1]
vst1.8 {d16}, [r0,:64], r2
vst1.8 {d17}, [r0,:64], r2
bgt 3b
pop {r4-r7, pc}
4: vld1.8 {d4, d5}, [r1], r2
vld1.8 {d6, d7}, [r1], r2
vext.8 d5, d4, d5, #1
vext.8 d7, d6, d7, #1
5: pld [r1]
subs r3, r3, #2
vmull.u8 q8, d4, d0
vmlal.u8 q8, d5, d1
vld1.8 {d4, d5}, [r1], r2
vmull.u8 q9, d6, d0
vmlal.u8 q9, d7, d1
pld [r1]
vext.8 d5, d4, d5, #1
.ifc \codec,h264
vrshrn.u16 d16, q8, #6
vrshrn.u16 d17, q9, #6
.else
vadd.u16 q8, q8, q11
vadd.u16 q9, q9, q11
vshrn.u16 d16, q8, #6
vshrn.u16 d17, q9, #6
.endif
.ifc \type,avg
vld1.8 {d20}, [lr,:64], r2
vld1.8 {d21}, [lr,:64], r2
vrhadd.u8 q8, q8, q10
.endif
vld1.8 {d6, d7}, [r1], r2
vext.8 d7, d6, d7, #1
vst1.8 {d16}, [r0,:64], r2
vst1.8 {d17}, [r0,:64], r2
bgt 5b
pop {r4-r7, pc}
endfunc
.endm
/* chroma_mc4(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */
.macro h264_chroma_mc4 type, codec=h264
function ff_\type\()_\codec\()_chroma_mc4_neon, export=1
push {r4-r7, lr}
ldrd r4, [sp, #20]
.ifc \type,avg
mov lr, r0
.endif
pld [r1]
pld [r1, r2]
.ifc \codec,rv40
movrel r6, rv40bias
lsr r7, r5, #1
add r6, r6, r7, lsl #3
lsr r7, r4, #1
add r6, r6, r7, lsl #1
vld1.16 {d22[],d23[]}, [r6,:16]
.endif
A muls r7, r4, r5
T mul r7, r4, r5
T cmp r7, #0
rsb r6, r7, r5, lsl #3
rsb r12, r7, r4, lsl #3
sub r4, r7, r4, lsl #3
sub r4, r4, r5, lsl #3
add r4, r4, #64
beq 2f
add r5, r1, r2
vdup.8 d0, r4
lsl r4, r2, #1
vdup.8 d1, r12
vld1.8 {d4}, [r1], r4
vdup.8 d2, r6
vld1.8 {d6}, [r5], r4
vdup.8 d3, r7
vext.8 d5, d4, d5, #1
vext.8 d7, d6, d7, #1
vtrn.32 d4, d5
vtrn.32 d6, d7
vtrn.32 d0, d1
vtrn.32 d2, d3
1: pld [r5]
vmull.u8 q8, d4, d0
vmlal.u8 q8, d6, d2
vld1.8 {d4}, [r1], r4
vext.8 d5, d4, d5, #1
vtrn.32 d4, d5
vmull.u8 q9, d6, d0
vmlal.u8 q9, d4, d2
vld1.8 {d6}, [r5], r4
vadd.i16 d16, d16, d17
vadd.i16 d17, d18, d19
.ifc \codec,h264
vrshrn.u16 d16, q8, #6
.else
vadd.u16 q8, q8, q11
vshrn.u16 d16, q8, #6
.endif
subs r3, r3, #2
pld [r1]
.ifc \type,avg
vld1.32 {d20[0]}, [lr,:32], r2
vld1.32 {d20[1]}, [lr,:32], r2
vrhadd.u8 d16, d16, d20
.endif
vext.8 d7, d6, d7, #1
vtrn.32 d6, d7
vst1.32 {d16[0]}, [r0,:32], r2
vst1.32 {d16[1]}, [r0,:32], r2
bgt 1b
pop {r4-r7, pc}
2: tst r6, r6
add r12, r12, r6
vdup.8 d0, r4
vdup.8 d1, r12
vtrn.32 d0, d1
beq 4f
vext.32 d1, d0, d1, #1
add r5, r1, r2
lsl r4, r2, #1
vld1.32 {d4[0]}, [r1], r4
vld1.32 {d4[1]}, [r5], r4
3: pld [r5]
vmull.u8 q8, d4, d0
vld1.32 {d4[0]}, [r1], r4
vmull.u8 q9, d4, d1
vld1.32 {d4[1]}, [r5], r4
vadd.i16 d16, d16, d17
vadd.i16 d17, d18, d19
.ifc \codec,h264
vrshrn.u16 d16, q8, #6
.else
vadd.u16 q8, q8, q11
vshrn.u16 d16, q8, #6
.endif
.ifc \type,avg
vld1.32 {d20[0]}, [lr,:32], r2
vld1.32 {d20[1]}, [lr,:32], r2
vrhadd.u8 d16, d16, d20
.endif
subs r3, r3, #2
pld [r1]
vst1.32 {d16[0]}, [r0,:32], r2
vst1.32 {d16[1]}, [r0,:32], r2
bgt 3b
pop {r4-r7, pc}
4: vld1.8 {d4}, [r1], r2
vld1.8 {d6}, [r1], r2
vext.8 d5, d4, d5, #1
vext.8 d7, d6, d7, #1
vtrn.32 d4, d5
vtrn.32 d6, d7
5: vmull.u8 q8, d4, d0
vmull.u8 q9, d6, d0
subs r3, r3, #2
vld1.8 {d4}, [r1], r2
vext.8 d5, d4, d5, #1
vtrn.32 d4, d5
vadd.i16 d16, d16, d17
vadd.i16 d17, d18, d19
pld [r1]
.ifc \codec,h264
vrshrn.u16 d16, q8, #6
.else
vadd.u16 q8, q8, q11
vshrn.u16 d16, q8, #6
.endif
.ifc \type,avg
vld1.32 {d20[0]}, [lr,:32], r2
vld1.32 {d20[1]}, [lr,:32], r2
vrhadd.u8 d16, d16, d20
.endif
vld1.8 {d6}, [r1], r2
vext.8 d7, d6, d7, #1
vtrn.32 d6, d7
pld [r1]
vst1.32 {d16[0]}, [r0,:32], r2
vst1.32 {d16[1]}, [r0,:32], r2
bgt 5b
pop {r4-r7, pc}
endfunc
.endm
.macro h264_chroma_mc2 type
function ff_\type\()_h264_chroma_mc2_neon, export=1
push {r4-r6, lr}
ldr r4, [sp, #16]
ldr lr, [sp, #20]
pld [r1]
pld [r1, r2]
orrs r5, r4, lr
beq 2f
mul r5, r4, lr
rsb r6, r5, lr, lsl #3
rsb r12, r5, r4, lsl #3
sub r4, r5, r4, lsl #3
sub r4, r4, lr, lsl #3
add r4, r4, #64
vdup.8 d0, r4
vdup.8 d2, r12
vdup.8 d1, r6
vdup.8 d3, r5
vtrn.16 q0, q1
1:
vld1.32 {d4[0]}, [r1], r2
vld1.32 {d4[1]}, [r1], r2
vrev64.32 d5, d4
vld1.32 {d5[1]}, [r1]
vext.8 q3, q2, q2, #1
vtrn.16 q2, q3
vmull.u8 q8, d4, d0
vmlal.u8 q8, d5, d1
.ifc \type,avg
vld1.16 {d18[0]}, [r0,:16], r2
vld1.16 {d18[1]}, [r0,:16]
sub r0, r0, r2
.endif
vtrn.32 d16, d17
vadd.i16 d16, d16, d17
vrshrn.u16 d16, q8, #6
.ifc \type,avg
vrhadd.u8 d16, d16, d18
.endif
vst1.16 {d16[0]}, [r0,:16], r2
vst1.16 {d16[1]}, [r0,:16], r2
subs r3, r3, #2
bgt 1b
pop {r4-r6, pc}
2:
.ifc \type,put
ldrh_post r5, r1, r2
strh_post r5, r0, r2
ldrh_post r6, r1, r2
strh_post r6, r0, r2
.else
vld1.16 {d16[0]}, [r1], r2
vld1.16 {d16[1]}, [r1], r2
vld1.16 {d18[0]}, [r0,:16], r2
vld1.16 {d18[1]}, [r0,:16]
sub r0, r0, r2
vrhadd.u8 d16, d16, d18
vst1.16 {d16[0]}, [r0,:16], r2
vst1.16 {d16[1]}, [r0,:16], r2
.endif
subs r3, r3, #2
bgt 2b
pop {r4-r6, pc}
endfunc
.endm
#if CONFIG_H264_DECODER
h264_chroma_mc8 put
h264_chroma_mc8 avg
h264_chroma_mc4 put
h264_chroma_mc4 avg
h264_chroma_mc2 put
h264_chroma_mc2 avg
#endif
#if CONFIG_RV40_DECODER
const rv40bias
.short 0, 16, 32, 16
.short 32, 28, 32, 28
.short 0, 32, 16, 32
.short 32, 28, 32, 28
endconst
h264_chroma_mc8 put, rv40
h264_chroma_mc8 avg, rv40
h264_chroma_mc4 put, rv40
h264_chroma_mc4 avg, rv40
#endif

View File

@@ -21,345 +21,6 @@
#include "asm.S" #include "asm.S"
#include "neon.S" #include "neon.S"
/* chroma_mc8(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */
.macro h264_chroma_mc8 type
function ff_\type\()_h264_chroma_mc8_neon, export=1
push {r4-r7, lr}
ldrd r4, [sp, #20]
.ifc \type,avg
mov lr, r0
.endif
pld [r1]
pld [r1, r2]
A muls r7, r4, r5
T mul r7, r4, r5
T cmp r7, #0
rsb r6, r7, r5, lsl #3
rsb r12, r7, r4, lsl #3
sub r4, r7, r4, lsl #3
sub r4, r4, r5, lsl #3
add r4, r4, #64
beq 2f
add r5, r1, r2
vdup.8 d0, r4
lsl r4, r2, #1
vdup.8 d1, r12
vld1.8 {d4, d5}, [r1], r4
vdup.8 d2, r6
vld1.8 {d6, d7}, [r5], r4
vdup.8 d3, r7
vext.8 d5, d4, d5, #1
vext.8 d7, d6, d7, #1
1: pld [r5]
vmull.u8 q8, d4, d0
vmlal.u8 q8, d5, d1
vld1.8 {d4, d5}, [r1], r4
vmlal.u8 q8, d6, d2
vext.8 d5, d4, d5, #1
vmlal.u8 q8, d7, d3
vmull.u8 q9, d6, d0
subs r3, r3, #2
vmlal.u8 q9, d7, d1
vmlal.u8 q9, d4, d2
vmlal.u8 q9, d5, d3
vrshrn.u16 d16, q8, #6
vld1.8 {d6, d7}, [r5], r4
pld [r1]
vrshrn.u16 d17, q9, #6
.ifc \type,avg
vld1.8 {d20}, [lr,:64], r2
vld1.8 {d21}, [lr,:64], r2
vrhadd.u8 q8, q8, q10
.endif
vext.8 d7, d6, d7, #1
vst1.8 {d16}, [r0,:64], r2
vst1.8 {d17}, [r0,:64], r2
bgt 1b
pop {r4-r7, pc}
2: tst r6, r6
add r12, r12, r6
vdup.8 d0, r4
vdup.8 d1, r12
beq 4f
add r5, r1, r2
lsl r4, r2, #1
vld1.8 {d4}, [r1], r4
vld1.8 {d6}, [r5], r4
3: pld [r5]
vmull.u8 q8, d4, d0
vmlal.u8 q8, d6, d1
vld1.8 {d4}, [r1], r4
vmull.u8 q9, d6, d0
vmlal.u8 q9, d4, d1
vld1.8 {d6}, [r5], r4
vrshrn.u16 d16, q8, #6
vrshrn.u16 d17, q9, #6
.ifc \type,avg
vld1.8 {d20}, [lr,:64], r2
vld1.8 {d21}, [lr,:64], r2
vrhadd.u8 q8, q8, q10
.endif
subs r3, r3, #2
pld [r1]
vst1.8 {d16}, [r0,:64], r2
vst1.8 {d17}, [r0,:64], r2
bgt 3b
pop {r4-r7, pc}
4: vld1.8 {d4, d5}, [r1], r2
vld1.8 {d6, d7}, [r1], r2
vext.8 d5, d4, d5, #1
vext.8 d7, d6, d7, #1
5: pld [r1]
subs r3, r3, #2
vmull.u8 q8, d4, d0
vmlal.u8 q8, d5, d1
vld1.8 {d4, d5}, [r1], r2
vmull.u8 q9, d6, d0
vmlal.u8 q9, d7, d1
pld [r1]
vext.8 d5, d4, d5, #1
vrshrn.u16 d16, q8, #6
vrshrn.u16 d17, q9, #6
.ifc \type,avg
vld1.8 {d20}, [lr,:64], r2
vld1.8 {d21}, [lr,:64], r2
vrhadd.u8 q8, q8, q10
.endif
vld1.8 {d6, d7}, [r1], r2
vext.8 d7, d6, d7, #1
vst1.8 {d16}, [r0,:64], r2
vst1.8 {d17}, [r0,:64], r2
bgt 5b
pop {r4-r7, pc}
endfunc
.endm
/* chroma_mc4(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */
.macro h264_chroma_mc4 type
function ff_\type\()_h264_chroma_mc4_neon, export=1
push {r4-r7, lr}
ldrd r4, [sp, #20]
.ifc \type,avg
mov lr, r0
.endif
pld [r1]
pld [r1, r2]
A muls r7, r4, r5
T mul r7, r4, r5
T cmp r7, #0
rsb r6, r7, r5, lsl #3
rsb r12, r7, r4, lsl #3
sub r4, r7, r4, lsl #3
sub r4, r4, r5, lsl #3
add r4, r4, #64
beq 2f
add r5, r1, r2
vdup.8 d0, r4
lsl r4, r2, #1
vdup.8 d1, r12
vld1.8 {d4}, [r1], r4
vdup.8 d2, r6
vld1.8 {d6}, [r5], r4
vdup.8 d3, r7
vext.8 d5, d4, d5, #1
vext.8 d7, d6, d7, #1
vtrn.32 d4, d5
vtrn.32 d6, d7
vtrn.32 d0, d1
vtrn.32 d2, d3
1: pld [r5]
vmull.u8 q8, d4, d0
vmlal.u8 q8, d6, d2
vld1.8 {d4}, [r1], r4
vext.8 d5, d4, d5, #1
vtrn.32 d4, d5
vmull.u8 q9, d6, d0
vmlal.u8 q9, d4, d2
vld1.8 {d6}, [r5], r4
vadd.i16 d16, d16, d17
vadd.i16 d17, d18, d19
vrshrn.u16 d16, q8, #6
subs r3, r3, #2
pld [r1]
.ifc \type,avg
vld1.32 {d20[0]}, [lr,:32], r2
vld1.32 {d20[1]}, [lr,:32], r2
vrhadd.u8 d16, d16, d20
.endif
vext.8 d7, d6, d7, #1
vtrn.32 d6, d7
vst1.32 {d16[0]}, [r0,:32], r2
vst1.32 {d16[1]}, [r0,:32], r2
bgt 1b
pop {r4-r7, pc}
2: tst r6, r6
add r12, r12, r6
vdup.8 d0, r4
vdup.8 d1, r12
vtrn.32 d0, d1
beq 4f
vext.32 d1, d0, d1, #1
add r5, r1, r2
lsl r4, r2, #1
vld1.32 {d4[0]}, [r1], r4
vld1.32 {d4[1]}, [r5], r4
3: pld [r5]
vmull.u8 q8, d4, d0
vld1.32 {d4[0]}, [r1], r4
vmull.u8 q9, d4, d1
vld1.32 {d4[1]}, [r5], r4
vadd.i16 d16, d16, d17
vadd.i16 d17, d18, d19
vrshrn.u16 d16, q8, #6
.ifc \type,avg
vld1.32 {d20[0]}, [lr,:32], r2
vld1.32 {d20[1]}, [lr,:32], r2
vrhadd.u8 d16, d16, d20
.endif
subs r3, r3, #2
pld [r1]
vst1.32 {d16[0]}, [r0,:32], r2
vst1.32 {d16[1]}, [r0,:32], r2
bgt 3b
pop {r4-r7, pc}
4: vld1.8 {d4}, [r1], r2
vld1.8 {d6}, [r1], r2
vext.8 d5, d4, d5, #1
vext.8 d7, d6, d7, #1
vtrn.32 d4, d5
vtrn.32 d6, d7
5: vmull.u8 q8, d4, d0
vmull.u8 q9, d6, d0
subs r3, r3, #2
vld1.8 {d4}, [r1], r2
vext.8 d5, d4, d5, #1
vtrn.32 d4, d5
vadd.i16 d16, d16, d17
vadd.i16 d17, d18, d19
pld [r1]
vrshrn.u16 d16, q8, #6
.ifc \type,avg
vld1.32 {d20[0]}, [lr,:32], r2
vld1.32 {d20[1]}, [lr,:32], r2
vrhadd.u8 d16, d16, d20
.endif
vld1.8 {d6}, [r1], r2
vext.8 d7, d6, d7, #1
vtrn.32 d6, d7
pld [r1]
vst1.32 {d16[0]}, [r0,:32], r2
vst1.32 {d16[1]}, [r0,:32], r2
bgt 5b
pop {r4-r7, pc}
endfunc
.endm
.macro h264_chroma_mc2 type
function ff_\type\()_h264_chroma_mc2_neon, export=1
push {r4-r6, lr}
ldr r4, [sp, #16]
ldr lr, [sp, #20]
pld [r1]
pld [r1, r2]
orrs r5, r4, lr
beq 2f
mul r5, r4, lr
rsb r6, r5, lr, lsl #3
rsb r12, r5, r4, lsl #3
sub r4, r5, r4, lsl #3
sub r4, r4, lr, lsl #3
add r4, r4, #64
vdup.8 d0, r4
vdup.8 d2, r12
vdup.8 d1, r6
vdup.8 d3, r5
vtrn.16 q0, q1
1:
vld1.32 {d4[0]}, [r1], r2
vld1.32 {d4[1]}, [r1], r2
vrev64.32 d5, d4
vld1.32 {d5[1]}, [r1]
vext.8 q3, q2, q2, #1
vtrn.16 q2, q3
vmull.u8 q8, d4, d0
vmlal.u8 q8, d5, d1
.ifc \type,avg
vld1.16 {d18[0]}, [r0,:16], r2
vld1.16 {d18[1]}, [r0,:16]
sub r0, r0, r2
.endif
vtrn.32 d16, d17
vadd.i16 d16, d16, d17
vrshrn.u16 d16, q8, #6
.ifc \type,avg
vrhadd.u8 d16, d16, d18
.endif
vst1.16 {d16[0]}, [r0,:16], r2
vst1.16 {d16[1]}, [r0,:16], r2
subs r3, r3, #2
bgt 1b
pop {r4-r6, pc}
2:
.ifc \type,put
ldrh_post r5, r1, r2
strh_post r5, r0, r2
ldrh_post r6, r1, r2
strh_post r6, r0, r2
.else
vld1.16 {d16[0]}, [r1], r2
vld1.16 {d16[1]}, [r1], r2
vld1.16 {d18[0]}, [r0,:16], r2
vld1.16 {d18[1]}, [r0,:16]
sub r0, r0, r2
vrhadd.u8 d16, d16, d18
vst1.16 {d16[0]}, [r0,:16], r2
vst1.16 {d16[1]}, [r0,:16], r2
.endif
subs r3, r3, #2
bgt 2b
pop {r4-r6, pc}
endfunc
.endm
h264_chroma_mc8 put
h264_chroma_mc8 avg
h264_chroma_mc4 put
h264_chroma_mc4 avg
h264_chroma_mc2 put
h264_chroma_mc2 avg
/* H.264 loop filter */ /* H.264 loop filter */
.macro h264_loop_filter_start .macro h264_loop_filter_start

View File

@@ -0,0 +1,33 @@
/*
* Copyright (c) 2011 Janne Grunau <janne-libav@jannau.net>
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include "libavcodec/avcodec.h"
#include "libavcodec/rv34dsp.h"
void ff_rv34_inv_transform_neon(DCTELEM *block);
void ff_rv34_inv_transform_noround_neon(DCTELEM *block);
void ff_rv34dsp_init_neon(RV34DSPContext *c, DSPContext* dsp)
{
c->rv34_inv_transform_tab[0] = ff_rv34_inv_transform_neon;
c->rv34_inv_transform_tab[1] = ff_rv34_inv_transform_noround_neon;
}

View File

@@ -0,0 +1,109 @@
/*
* Copyright (c) 2011 Janne Grunau <janne-libav@jannau.net>
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "asm.S"
.macro rv34_inv_transform
mov r1, #16
vld1.16 {d28}, [r0,:64], r1 @ block[i+8*0]
vld1.16 {d29}, [r0,:64], r1 @ block[i+8*1]
vld1.16 {d30}, [r0,:64], r1 @ block[i+8*2]
vld1.16 {d31}, [r0,:64], r1 @ block[i+8*3]
vmov.s16 d0, #13
vshll.s16 q12, d29, #3
vshll.s16 q13, d29, #4
vshll.s16 q9, d31, #3
vshll.s16 q1, d31, #4
vmull.s16 q10, d28, d0
vmlal.s16 q10, d30, d0
vmull.s16 q11, d28, d0
vmlsl.s16 q11, d30, d0
vsubw.s16 q12, q12, d29 @ z2 = block[i+8*1]*7
vaddw.s16 q13, q13, d29 @ z3 = block[i+8*1]*17
vsubw.s16 q9, q9, d31
vaddw.s16 q1, q1, d31
vadd.s32 q13, q13, q9 @ z3 = 17*block[i+8*1] + 7*block[i+8*3]
vsub.s32 q12, q12, q1 @ z2 = 7*block[i+8*1] - 17*block[i+8*3]
vadd.s32 q1, q10, q13 @ z0 + z3
vadd.s32 q2, q11, q12 @ z1 + z2
vsub.s32 q8, q10, q13 @ z0 - z3
vsub.s32 q3, q11, q12 @ z1 - z2
vtrn.32 q1, q2
vtrn.32 q3, q8
vswp d3, d6
vswp d5, d16
vmov.s32 d0, #13
vadd.s32 q10, q1, q3
vsub.s32 q11, q1, q3
vshl.s32 q12, q2, #3
vshl.s32 q9, q2, #4
vmul.s32 q13, q11, d0[0]
vshl.s32 q11, q8, #4
vadd.s32 q9, q9, q2
vshl.s32 q15, q8, #3
vsub.s32 q12, q12, q2
vadd.s32 q11, q11, q8
vmul.s32 q14, q10, d0[0]
vsub.s32 q8, q15, q8
vsub.s32 q12, q12, q11
vadd.s32 q9, q9, q8
vadd.s32 q2, q13, q12 @ z1 + z2
vadd.s32 q1, q14, q9 @ z0 + z3
vsub.s32 q3, q13, q12 @ z1 - z2
vsub.s32 q15, q14, q9 @ z0 - z3
.endm
/* void ff_rv34_inv_transform_neon(DCTELEM *block); */
function ff_rv34_inv_transform_neon, export=1
mov r2, r0
rv34_inv_transform
vrshrn.s32 d1, q2, #10 @ (z1 + z2) >> 10
vrshrn.s32 d0, q1, #10 @ (z0 + z3) >> 10
vrshrn.s32 d2, q3, #10 @ (z1 - z2) >> 10
vrshrn.s32 d3, q15, #10 @ (z0 - z3) >> 10
vst4.16 {d0[0], d1[0], d2[0], d3[0]}, [r2,:64], r1
vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r2,:64], r1
vst4.16 {d0[2], d1[2], d2[2], d3[2]}, [r2,:64], r1
vst4.16 {d0[3], d1[3], d2[3], d3[3]}, [r2,:64], r1
bx lr
endfunc
/* void rv34_inv_transform_noround_neon(DCTELEM *block); */
function ff_rv34_inv_transform_noround_neon, export=1
mov r2, r0
rv34_inv_transform
vshl.s32 q11, q2, #1
vshl.s32 q10, q1, #1
vshl.s32 q12, q3, #1
vshl.s32 q13, q15, #1
vadd.s32 q11, q11, q2
vadd.s32 q10, q10, q1
vadd.s32 q12, q12, q3
vadd.s32 q13, q13, q15
vshrn.s32 d0, q10, #11 @ (z0 + z3)*3 >> 11
vshrn.s32 d1, q11, #11 @ (z1 + z2)*3 >> 11
vshrn.s32 d2, q12, #11 @ (z1 - z2)*3 >> 11
vshrn.s32 d3, q13, #11 @ (z0 - z3)*3 >> 11
vst4.16 {d0[0], d1[0], d2[0], d3[0]}, [r2,:64], r1
vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r2,:64], r1
vst4.16 {d0[2], d1[2], d2[2], d3[2]}, [r2,:64], r1
vst4.16 {d0[3], d1[3], d2[3], d3[3]}, [r2,:64], r1
bx lr
endfunc

View File

@@ -0,0 +1,44 @@
/*
* Copyright (c) 2011 Janne Grunau <janne-libav@jannau.net>
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include "libavcodec/avcodec.h"
#include "libavcodec/rv34dsp.h"
void ff_put_rv40_chroma_mc8_neon(uint8_t *, uint8_t *, int, int, int, int);
void ff_put_rv40_chroma_mc4_neon(uint8_t *, uint8_t *, int, int, int, int);
void ff_avg_rv40_chroma_mc8_neon(uint8_t *, uint8_t *, int, int, int, int);
void ff_avg_rv40_chroma_mc4_neon(uint8_t *, uint8_t *, int, int, int, int);
void ff_rv40_weight_func_16_neon(uint8_t *, uint8_t *, uint8_t *, int, int, int);
void ff_rv40_weight_func_8_neon(uint8_t *, uint8_t *, uint8_t *, int, int, int);
void ff_rv40dsp_init_neon(RV34DSPContext *c, DSPContext* dsp)
{
c->put_chroma_pixels_tab[0] = ff_put_rv40_chroma_mc8_neon;
c->put_chroma_pixels_tab[1] = ff_put_rv40_chroma_mc4_neon;
c->avg_chroma_pixels_tab[0] = ff_avg_rv40_chroma_mc8_neon;
c->avg_chroma_pixels_tab[1] = ff_avg_rv40_chroma_mc4_neon;
c->rv40_weight_pixels_tab[0] = ff_rv40_weight_func_16_neon;
c->rv40_weight_pixels_tab[1] = ff_rv40_weight_func_8_neon;
}

View File

@@ -0,0 +1,85 @@
/*
* Copyright (c) 2011 Janne Grunau <janne-libav@jannau.net>
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "asm.S"
.macro rv40_weight
vmovl.u8 q8, d2
vmovl.u8 q9, d3
vmovl.u8 q10, d4
vmovl.u8 q11, d5
vmull.u16 q2, d16, d0[2]
vmull.u16 q3, d17, d0[2]
vmull.u16 q8, d18, d0[2]
vmull.u16 q9, d19, d0[2]
vmull.u16 q12, d20, d0[0]
vmull.u16 q13, d21, d0[0]
vmull.u16 q14, d22, d0[0]
vmull.u16 q15, d23, d0[0]
vshrn.i32 d4, q2, #9
vshrn.i32 d5, q3, #9
vshrn.i32 d6, q8, #9
vshrn.i32 d7, q9, #9
vshrn.i32 d16, q12, #9
vshrn.i32 d17, q13, #9
vshrn.i32 d18, q14, #9
vshrn.i32 d19, q15, #9
vadd.u16 q2, q2, q8
vadd.u16 q3, q3, q9
vrshrn.i16 d2, q2, #5
vrshrn.i16 d3, q3, #5
.endm
/* void ff_rv40_weight_func_16_neon(uint8_t *dst, uint8_t *src1, uint8_t *src2,
int w1, int w2, int stride) */
function ff_rv40_weight_func_16_neon, export=1
ldr r12, [sp]
vmov d0, r3, r12
ldr r12, [sp, #4]
mov r3, #16
1:
vld1.8 {q1}, [r1,:128], r12
vld1.8 {q2}, [r2,:128], r12
rv40_weight
vst1.8 {q1}, [r0,:128], r12
subs r3, r3, #1
bne 1b
bx lr
endfunc
/* void ff_rv40_weight_func_8_neon(uint8_t *dst, uint8_t *src1, uint8_t *src2,
int w1, int w2, int stride) */
function ff_rv40_weight_func_8_neon, export=1
ldr r12, [sp]
vmov d0, r3, r12
ldr r12, [sp, #4]
mov r3, #8
1:
vld1.8 {d2}, [r1,:64], r12
vld1.8 {d3}, [r1,:64], r12
vld1.8 {d4}, [r2,:64], r12
vld1.8 {d5}, [r2,:64], r12
rv40_weight
vst1.8 {d2}, [r0,:64], r12
vst1.8 {d3}, [r0,:64], r12
subs r3, r3, #2
bne 1b
bx lr
endfunc

View File

@@ -567,13 +567,15 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst,
s->input_picture_number = s1->input_picture_number; s->input_picture_number = s1->input_picture_number;
memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture)); memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
memcpy(&s->last_picture, &s1->last_picture, (char*)&s1->last_picture_ptr - (char*)&s1->last_picture); memcpy(&s->last_picture, &s1->last_picture,
(char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1); s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1); s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1); s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
memcpy(s->prev_pict_types, s1->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE); memcpy(s->prev_pict_types, s1->prev_pict_types,
PREV_PICT_TYPES_BUFFER_SIZE);
// Error/bug resilience // Error/bug resilience
s->next_p_frame_damaged = s1->next_p_frame_damaged; s->next_p_frame_damaged = s1->next_p_frame_damaged;
@@ -581,7 +583,8 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst,
s->padding_bug_score = s1->padding_bug_score; s->padding_bug_score = s1->padding_bug_score;
// MPEG4 timing info // MPEG4 timing info
memcpy(&s->time_increment_bits, &s1->time_increment_bits, (char*)&s1->shape - (char*)&s1->time_increment_bits); memcpy(&s->time_increment_bits, &s1->time_increment_bits,
(char *) &s1->shape - (char *) &s1->time_increment_bits);
// B-frame info // B-frame info
s->max_b_frames = s1->max_b_frames; s->max_b_frames = s1->max_b_frames;
@@ -592,19 +595,26 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst,
s->divx_packed = s1->divx_packed; s->divx_packed = s1->divx_packed;
if (s1->bitstream_buffer) { if (s1->bitstream_buffer) {
if (s1->bitstream_buffer_size + FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size) if (s1->bitstream_buffer_size +
av_fast_malloc(&s->bitstream_buffer, &s->allocated_bitstream_buffer_size, s1->allocated_bitstream_buffer_size); FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
av_fast_malloc(&s->bitstream_buffer,
&s->allocated_bitstream_buffer_size,
s1->allocated_bitstream_buffer_size);
s->bitstream_buffer_size = s1->bitstream_buffer_size; s->bitstream_buffer_size = s1->bitstream_buffer_size;
memcpy(s->bitstream_buffer, s1->bitstream_buffer, s1->bitstream_buffer_size); memcpy(s->bitstream_buffer, s1->bitstream_buffer,
memset(s->bitstream_buffer+s->bitstream_buffer_size, 0, FF_INPUT_BUFFER_PADDING_SIZE); s1->bitstream_buffer_size);
memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
FF_INPUT_BUFFER_PADDING_SIZE);
} }
// MPEG2/interlacing info // MPEG2/interlacing info
memcpy(&s->progressive_sequence, &s1->progressive_sequence, (char*)&s1->rtp_mode - (char*)&s1->progressive_sequence); memcpy(&s->progressive_sequence, &s1->progressive_sequence,
(char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
if (!s1->first_field) { if (!s1->first_field) {
s->last_pict_type = s1->pict_type; s->last_pict_type = s1->pict_type;
if (s1->current_picture_ptr) s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality; if (s1->current_picture_ptr)
s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
if (s1->pict_type != AV_PICTURE_TYPE_B) { if (s1->pict_type != AV_PICTURE_TYPE_B) {
s->last_non_b_pict_type = s1->pict_type; s->last_non_b_pict_type = s1->pict_type;
@@ -615,10 +625,13 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst,
} }
/** /**
* sets the given MpegEncContext to common defaults (same for encoding and decoding). * sets the given MpegEncContext to common defaults
* the changed fields will not depend upon the prior state of the MpegEncContext. * (same for encoding and decoding).
* the changed fields will not depend upon the
* prior state of the MpegEncContext.
*/ */
void MPV_common_defaults(MpegEncContext *s){ void MPV_common_defaults(MpegEncContext *s)
{
s->y_dc_scale_table = s->y_dc_scale_table =
s->c_dc_scale_table = ff_mpeg1_dc_scale_table; s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
s->chroma_qscale_table = ff_default_chroma_qscale_table; s->chroma_qscale_table = ff_default_chroma_qscale_table;
@@ -641,9 +654,11 @@ void MPV_common_defaults(MpegEncContext *s){
/** /**
* sets the given MpegEncContext to defaults for decoding. * sets the given MpegEncContext to defaults for decoding.
* the changed fields will not depend upon the prior state of the MpegEncContext. * the changed fields will not depend upon
* the prior state of the MpegEncContext.
*/ */
void MPV_decode_defaults(MpegEncContext *s){ void MPV_decode_defaults(MpegEncContext *s)
{
MPV_common_defaults(s); MPV_common_defaults(s);
} }
@@ -665,19 +680,23 @@ av_cold int MPV_common_init(MpegEncContext *s)
s->mb_height = (s->height + 15) / 16; s->mb_height = (s->height + 15) / 16;
if (s->avctx->pix_fmt == PIX_FMT_NONE) { if (s->avctx->pix_fmt == PIX_FMT_NONE) {
av_log(s->avctx, AV_LOG_ERROR, "decoding to PIX_FMT_NONE is not supported.\n"); av_log(s->avctx, AV_LOG_ERROR,
"decoding to PIX_FMT_NONE is not supported.\n");
return -1; return -1;
} }
if ((s->encoding || (s->avctx->active_thread_type & FF_THREAD_SLICE)) && if ((s->encoding || (s->avctx->active_thread_type & FF_THREAD_SLICE)) &&
(s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height))){ (s->avctx->thread_count > MAX_THREADS ||
(s->avctx->thread_count > s->mb_height && s->mb_height))) {
int max_threads = FFMIN(MAX_THREADS, s->mb_height); int max_threads = FFMIN(MAX_THREADS, s->mb_height);
av_log(s->avctx, AV_LOG_WARNING, "too many threads (%d), reducing to %d\n", av_log(s->avctx, AV_LOG_WARNING,
"too many threads (%d), reducing to %d\n",
s->avctx->thread_count, max_threads); s->avctx->thread_count, max_threads);
threads = max_threads; threads = max_threads;
} }
if((s->width || s->height) && av_image_check_size(s->width, s->height, 0, s->avctx)) if ((s->width || s->height) &&
av_image_check_size(s->width, s->height, 0, s->avctx))
return -1; return -1;
ff_dct_common_init(s); ff_dct_common_init(s);
@@ -719,12 +738,11 @@ av_cold int MPV_common_init(MpegEncContext *s)
s->avctx->coded_frame = (AVFrame*)&s->current_picture; s->avctx->coded_frame = (AVFrame*)&s->current_picture;
FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num+1)*sizeof(int), fail) //error ressilience code looks cleaner with this FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
for(y=0; y<s->mb_height; y++){ for (y = 0; y < s->mb_height; y++)
for(x=0; x<s->mb_width; x++){ for (x = 0; x < s->mb_width; x++)
s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride; s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
}
}
s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed? s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
if (s->encoding) { if (s->encoding) {
@@ -767,7 +785,8 @@ av_cold int MPV_common_init(MpegEncContext *s)
} }
s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count); s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, s->picture_count * sizeof(Picture), fail) FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
s->picture_count * sizeof(Picture), fail);
for (i = 0; i < s->picture_count; i++) { for (i = 0; i < s->picture_count; i++) {
avcodec_get_frame_defaults((AVFrame *) &s->picture[i]); avcodec_get_frame_defaults((AVFrame *) &s->picture[i]);
} }
@@ -796,8 +815,8 @@ av_cold int MPV_common_init(MpegEncContext *s)
s->coded_block = s->coded_block_base + s->b8_stride + 1; s->coded_block = s->coded_block_base + s->b8_stride + 1;
/* cbp, ac_pred, pred_dir */ /* cbp, ac_pred, pred_dir */
FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail) FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail) FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
} }
if (s->h263_pred || s->h263_plus || !s->encoding) { if (s->h263_pred || s->h263_plus || !s->encoding) {
@@ -843,10 +862,10 @@ av_cold int MPV_common_init(MpegEncContext *s)
s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count / 2) / s->avctx->thread_count; s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count / 2) / s->avctx->thread_count;
} }
} else { } else {
if(init_duplicate_context(s, s) < 0) goto fail; if (init_duplicate_context(s, s) < 0)
goto fail;
s->start_mb_y = 0; s->start_mb_y = 0;
s->end_mb_y = s->mb_height; s->end_mb_y = s->mb_height;
} }
return 0; return 0;
@@ -945,7 +964,8 @@ void MPV_common_end(MpegEncContext *s)
avcodec_default_free_buffers(s->avctx); avcodec_default_free_buffers(s->avctx);
} }
void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3]) void init_rl(RLTable *rl,
uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
{ {
int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1]; int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
uint8_t index_run[MAX_RUN + 1]; uint8_t index_run[MAX_RUN + 1];
@@ -1042,16 +1062,17 @@ void ff_release_unused_pictures(MpegEncContext *s, int remove_current)
/* release non reference frames */ /* release non reference frames */
for (i = 0; i < s->picture_count; i++) { for (i = 0; i < s->picture_count; i++) {
if (s->picture[i].f.data[0] && !s->picture[i].f.reference if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
&& (!s->picture[i].owner2 || s->picture[i].owner2 == s) (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
&& (remove_current || &s->picture[i] != s->current_picture_ptr) (remove_current || &s->picture[i] != s->current_picture_ptr)
/* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) { /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
free_frame_buffer(s, &s->picture[i]); free_frame_buffer(s, &s->picture[i]);
} }
} }
} }
int ff_find_unused_picture(MpegEncContext *s, int shared){ int ff_find_unused_picture(MpegEncContext *s, int shared)
{
int i; int i;
if (shared) { if (shared) {
@@ -1070,7 +1091,8 @@ int ff_find_unused_picture(MpegEncContext *s, int shared){
} }
} }
av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n"); av_log(s->avctx, AV_LOG_FATAL,
"Internal error, picture buffer overflow\n");
/* We could return -1, but the codec would crash trying to draw into a /* We could return -1, but the codec would crash trying to draw into a
* non-existing frame anyway. This is safer than waiting for a random crash. * non-existing frame anyway. This is safer than waiting for a random crash.
* Also the return of this is never useful, an encoder must only allocate * Also the return of this is never useful, an encoder must only allocate

View File

@@ -103,4 +103,7 @@ static void rv34_inv_transform_noround_c(DCTELEM *block){
av_cold void ff_rv34dsp_init(RV34DSPContext *c, DSPContext* dsp) { av_cold void ff_rv34dsp_init(RV34DSPContext *c, DSPContext* dsp) {
c->rv34_inv_transform_tab[0] = rv34_inv_transform_c; c->rv34_inv_transform_tab[0] = rv34_inv_transform_c;
c->rv34_inv_transform_tab[1] = rv34_inv_transform_noround_c; c->rv34_inv_transform_tab[1] = rv34_inv_transform_noround_c;
if (HAVE_NEON)
ff_rv34dsp_init_neon(c, dsp);
} }

View File

@@ -56,6 +56,9 @@ void ff_rv30dsp_init(RV34DSPContext *c, DSPContext* dsp);
void ff_rv34dsp_init(RV34DSPContext *c, DSPContext* dsp); void ff_rv34dsp_init(RV34DSPContext *c, DSPContext* dsp);
void ff_rv40dsp_init(RV34DSPContext *c, DSPContext* dsp); void ff_rv40dsp_init(RV34DSPContext *c, DSPContext* dsp);
void ff_rv34dsp_init_neon(RV34DSPContext *c, DSPContext *dsp);
void ff_rv40dsp_init_x86(RV34DSPContext *c, DSPContext *dsp); void ff_rv40dsp_init_x86(RV34DSPContext *c, DSPContext *dsp);
void ff_rv40dsp_init_neon(RV34DSPContext *c, DSPContext *dsp);
#endif /* AVCODEC_RV34DSP_H */ #endif /* AVCODEC_RV34DSP_H */

View File

@@ -534,4 +534,6 @@ av_cold void ff_rv40dsp_init(RV34DSPContext *c, DSPContext* dsp) {
if (HAVE_MMX) if (HAVE_MMX)
ff_rv40dsp_init_x86(c, dsp); ff_rv40dsp_init_x86(c, dsp);
if (HAVE_NEON)
ff_rv40dsp_init_neon(c, dsp);
} }