Add arm asm code for processing.
This commit is contained in:
parent
248f324c62
commit
e7cc8c2780
@ -795,7 +795,7 @@ WELS_ASM_FUNC_BEGIN DeblockChromaEq4H_neon
|
||||
WELS_ASM_FUNC_END
|
||||
|
||||
|
||||
WELS_ASM_FUNC_BEGIN enc_avc_non_zero_count_neon
|
||||
WELS_ASM_FUNC_BEGIN WelsNonZeroCount_neon
|
||||
|
||||
vld1.64 {d0-d2}, [r0]
|
||||
|
||||
@ -810,7 +810,6 @@ WELS_ASM_FUNC_BEGIN enc_avc_non_zero_count_neon
|
||||
WELS_ASM_FUNC_END
|
||||
|
||||
#ifdef APPLE_IOS
|
||||
|
||||
.macro BS_NZC_CHECK
|
||||
vld1.8 {d0,d1}, [$0]
|
||||
/* Arrenge the input data --- TOP */
|
||||
@ -904,7 +903,6 @@ bs_mv_check_jump1:
|
||||
BS_COMPARE_MV q4, q0, q1, q2, q3, $5, $6
|
||||
.endm
|
||||
#else
|
||||
|
||||
.macro BS_NZC_CHECK arg0, arg1, arg2, arg3, arg4
|
||||
vld1.8 {d0,d1}, [\arg0]
|
||||
/* Arrenge the input data --- TOP */
|
||||
|
0
codec/common/expand_picture.S
Executable file → Normal file
0
codec/common/expand_picture.S
Executable file → Normal file
@ -533,7 +533,7 @@ WELS_ASM_FUNC_BEGIN WelsDecoderIChromaPredH_neon
|
||||
WELS_ASM_FUNC_END
|
||||
|
||||
|
||||
WELS_ASM_FUNC_BEGIN WelsDecoderIChromaPredDC_neon
|
||||
WELS_ASM_FUNC_BEGIN WelsDecoderIChromaPredDc_neon
|
||||
//stmdb sp!, { r2-r5, lr}
|
||||
//Load the left column data (8 bytes)
|
||||
sub r2, r0, #1
|
||||
|
0
codec/encoder/core/arm/intra_pred_neon.S
Executable file → Normal file
0
codec/encoder/core/arm/intra_pred_neon.S
Executable file → Normal file
10
codec/encoder/core/arm/intra_pred_sad_3_opt_neon.S
Executable file → Normal file
10
codec/encoder/core/arm/intra_pred_sad_3_opt_neon.S
Executable file → Normal file
@ -152,7 +152,7 @@
|
||||
.endm
|
||||
#endif
|
||||
|
||||
WELS_ASM_FUNC_BEGIN satd_intra_16x16_x3_opt_neon
|
||||
WELS_ASM_FUNC_BEGIN WelsIntra16x16Combined3Satd_neon
|
||||
stmdb sp!, {r4-r7, lr}
|
||||
|
||||
//Get the top line data to 'q15'(16 bytes)
|
||||
@ -295,7 +295,7 @@ WELS_ASM_FUNC_BEGIN satd_intra_16x16_x3_opt_neon
|
||||
WELS_ASM_FUNC_END
|
||||
|
||||
|
||||
WELS_ASM_FUNC_BEGIN sad_intra_16x16_x3_opt_neon
|
||||
WELS_ASM_FUNC_BEGIN WelsIntra16x16Combined3Sad_neon
|
||||
stmdb sp!, {r4-r7, lr}
|
||||
|
||||
//Get the top line data to 'q15'(16 bytes)
|
||||
@ -384,7 +384,7 @@ sad_intra_16x16_x3_opt_loop0:
|
||||
WELS_ASM_FUNC_END
|
||||
|
||||
|
||||
WELS_ASM_FUNC_BEGIN sad_intra_8x8_x3_opt_neon
|
||||
WELS_ASM_FUNC_BEGIN WelsIntra8x8Combined3Sad_neon
|
||||
stmdb sp!, {r4-r7, lr}
|
||||
|
||||
//Get the data from stack
|
||||
@ -533,7 +533,7 @@ sad_intra_8x8_x3_opt_loop1:
|
||||
WELS_ASM_FUNC_END
|
||||
|
||||
|
||||
WELS_ASM_FUNC_BEGIN satd_intra_8x8_x3_opt_neon
|
||||
WELS_ASM_FUNC_BEGIN WelsIntra8x8Combined3Satd_neon
|
||||
stmdb sp!, {r4-r7, lr}
|
||||
|
||||
//Get the data from stack
|
||||
@ -672,7 +672,7 @@ WELS_ASM_FUNC_BEGIN satd_intra_8x8_x3_opt_neon
|
||||
WELS_ASM_FUNC_END
|
||||
|
||||
|
||||
WELS_ASM_FUNC_BEGIN satd_intra_4x4_x3_opt_neon
|
||||
WELS_ASM_FUNC_BEGIN WelsIntra4x4Combined3Satd_neon
|
||||
stmdb sp!, {r4-r7, lr}
|
||||
|
||||
//Get the top line data to 'd31[0~3]'(4 bytes)
|
||||
|
248
codec/encoder/core/arm/mc_neon.S
Executable file → Normal file
248
codec/encoder/core/arm/mc_neon.S
Executable file → Normal file
@ -37,221 +37,221 @@
|
||||
#ifdef APPLE_IOS
|
||||
.macro AVERAGE_TWO_8BITS
|
||||
// { // input:dst_d, src_d A and B; working: q13
|
||||
vaddl.u8 q13, $2, $1
|
||||
vrshrn.u16 $0, q13, #1
|
||||
vaddl.u8 q13, $2, $1
|
||||
vrshrn.u16 $0, q13, #1
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro FILTER_6TAG_8BITS
|
||||
// { // input:src[-2], src[-1], src[0], src[1], src[2], src[3], dst_d, multiplier a/b;
|
||||
vaddl.u8 q12, $0, $5 //q12=src[-2]+src[3]
|
||||
vaddl.u8 q13, $2, $3 //src[0]+src[1]
|
||||
vmla.u16 q12, q13, $7 //q12 += 20*(src[0]+src[1]), 2 cycles
|
||||
vaddl.u8 q13, $1, $4 //src[-1]+src[2]
|
||||
vmls.s16 q12, q13, $8 //q12 -= 5*(src[-1]+src[2]), 2 cycles
|
||||
vqrshrun.s16 $6, q12, #5
|
||||
vaddl.u8 q12, $0, $5 //q12=src[-2]+src[3]
|
||||
vaddl.u8 q13, $2, $3 //src[0]+src[1]
|
||||
vmla.u16 q12, q13, $7 //q12 += 20*(src[0]+src[1]), 2 cycles
|
||||
vaddl.u8 q13, $1, $4 //src[-1]+src[2]
|
||||
vmls.s16 q12, q13, $8 //q12 -= 5*(src[-1]+src[2]), 2 cycles
|
||||
vqrshrun.s16 $6, q12, #5
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro FILTER_SINGLE_TAG_8BITS // when width=17/9, used
|
||||
// { // input: src_d{Y[0][1][2][3][4][5]X, the even of working_q2},
|
||||
vrev64.8 $2, $0 // X[5][4][3][2][1][0]O
|
||||
vaddl.u8 $3, $0, $2 // each 16bits, *[50][41][32][23][14][05]*
|
||||
vmul.s16 $0, $2, $1 // 0+1*[50]-5*[41]+20[32]
|
||||
vpadd.s16 $0, $0, $0
|
||||
vpadd.s16 $0, $0, $0
|
||||
vqrshrun.s16 $0, $4, #5
|
||||
vrev64.8 $2, $0 // X[5][4][3][2][1][0]O
|
||||
vaddl.u8 $3, $0, $2 // each 16bits, *[50][41][32][23][14][05]*
|
||||
vmul.s16 $0, $2, $1 // 0+1*[50]-5*[41]+20[32]
|
||||
vpadd.s16 $0, $0, $0
|
||||
vpadd.s16 $0, $0, $0
|
||||
vqrshrun.s16 $0, $4, #5
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro FILTER_6TAG_8BITS_AVERAGE_WITH_0
|
||||
// { // input:src[-2], src[-1], src[0], src[1], src[2], src[3], dst_d, multiplier a/b;
|
||||
vaddl.u8 q12, $0, $5 //q12=src[-2]+src[3]
|
||||
vaddl.u8 q13, $2, $3 //src[0]+src[1]
|
||||
vmla.u16 q12, q13, $7 //q12 += 20*(src[0]+src[1]), 2 cycles
|
||||
vaddl.u8 q13, $1, $4 //src[-1]+src[2]
|
||||
vmls.s16 q12, q13, $8 //q12 -= 5*(src[-1]+src[2]), 2 cycles
|
||||
vqrshrun.s16 $6, q12, #5
|
||||
vaddl.u8 q13, $2, $6
|
||||
vrshrn.u16 $6, q13, #1
|
||||
vaddl.u8 q12, $0, $5 //q12=src[-2]+src[3]
|
||||
vaddl.u8 q13, $2, $3 //src[0]+src[1]
|
||||
vmla.u16 q12, q13, $7 //q12 += 20*(src[0]+src[1]), 2 cycles
|
||||
vaddl.u8 q13, $1, $4 //src[-1]+src[2]
|
||||
vmls.s16 q12, q13, $8 //q12 -= 5*(src[-1]+src[2]), 2 cycles
|
||||
vqrshrun.s16 $6, q12, #5
|
||||
vaddl.u8 q13, $2, $6
|
||||
vrshrn.u16 $6, q13, #1
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro FILTER_6TAG_8BITS_AVERAGE_WITH_1
|
||||
// { // input:src[-2], src[-1], src[0], src[1], src[2], src[3], dst_d, multiplier a/b;
|
||||
vaddl.u8 q12, $0, $5 //q12=src[-2]+src[3]
|
||||
vaddl.u8 q13, $2, $3 //src[0]+src[1]
|
||||
vmla.u16 q12, q13, $7 //q12 += 20*(src[0]+src[1]), 2 cycles
|
||||
vaddl.u8 q13, $1, $4 //src[-1]+src[2]
|
||||
vmls.s16 q12, q13, $8 //q12 -= 5*(src[-1]+src[2]), 2 cycles
|
||||
vqrshrun.s16 $6, q12, #5
|
||||
vaddl.u8 q13, $3, $6
|
||||
vrshrn.u16 $6, q13, #1
|
||||
vaddl.u8 q12, $0, $5 //q12=src[-2]+src[3]
|
||||
vaddl.u8 q13, $2, $3 //src[0]+src[1]
|
||||
vmla.u16 q12, q13, $7 //q12 += 20*(src[0]+src[1]), 2 cycles
|
||||
vaddl.u8 q13, $1, $4 //src[-1]+src[2]
|
||||
vmls.s16 q12, q13, $8 //q12 -= 5*(src[-1]+src[2]), 2 cycles
|
||||
vqrshrun.s16 $6, q12, #5
|
||||
vaddl.u8 q13, $3, $6
|
||||
vrshrn.u16 $6, q13, #1
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro FILTER_6TAG_8BITS_TO_16BITS
|
||||
// { // input:d_src[-2], d_src[-1], d_src[0], d_src[1], d_src[2], d_src[3], dst_q,
|
||||
vaddl.u8 $6, $0, $5 //dst_q=src[-2]+src[3]
|
||||
vaddl.u8 q13, $2, $3 //src[0]+src[1]
|
||||
vmla.u16 $6, q13, $7 //dst_q += 20*(src[0]+src[1]), 2 cycles
|
||||
vaddl.u8 q13, $1, $4 //src[-1]+src[2]
|
||||
vmls.s16 $6, q13, $8 //dst_q -= 5*(src[-1]+src[2]), 2 cycles
|
||||
vaddl.u8 $6, $0, $5 //dst_q=src[-2]+src[3]
|
||||
vaddl.u8 q13, $2, $3 //src[0]+src[1]
|
||||
vmla.u16 $6, q13, $7 //dst_q += 20*(src[0]+src[1]), 2 cycles
|
||||
vaddl.u8 q13, $1, $4 //src[-1]+src[2]
|
||||
vmls.s16 $6, q13, $8 //dst_q -= 5*(src[-1]+src[2]), 2 cycles
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro FILTER_3_IN_16BITS_TO_8BITS
|
||||
// { // input:a, b, c, dst_d;
|
||||
vsub.s16 $0, $0, $1 //a-b
|
||||
vshr.s16 $0, $0, #2 //(a-b)/4
|
||||
vsub.s16 $0, $0, $1 //(a-b)/4-b
|
||||
vadd.s16 $0, $0, $2 //(a-b)/4-b+c
|
||||
vshr.s16 $0, $0, #2 //((a-b)/4-b+c)/4
|
||||
vadd.s16 $0, $0, $2 //((a-b)/4-b+c)/4+c = (a-5*b+20*c)/16
|
||||
vqrshrun.s16 $3, $0, #6 //(+32)>>6
|
||||
vsub.s16 $0, $0, $1 //a-b
|
||||
vshr.s16 $0, $0, #2 //(a-b)/4
|
||||
vsub.s16 $0, $0, $1 //(a-b)/4-b
|
||||
vadd.s16 $0, $0, $2 //(a-b)/4-b+c
|
||||
vshr.s16 $0, $0, #2 //((a-b)/4-b+c)/4
|
||||
vadd.s16 $0, $0, $2 //((a-b)/4-b+c)/4+c = (a-5*b+20*c)/16
|
||||
vqrshrun.s16 $3, $0, #6 //(+32)>>6
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro UNPACK_2_16BITS_TO_ABC
|
||||
// { // input:q_src[-2:5], q_src[6:13](avail 8+5)/q_src[6:**](avail 4+5), dst_a
|
||||
vext.16 $4, $0, $1, #2 //src[0]
|
||||
vext.16 $3, $0, $1, #3 //src[1]
|
||||
vadd.s16 $4, $3 //c=src[0]+src[1]
|
||||
vext.16 $4, $0, $1, #2 //src[0]
|
||||
vext.16 $3, $0, $1, #3 //src[1]
|
||||
vadd.s16 $4, $3 //c=src[0]+src[1]
|
||||
|
||||
vext.16 $3, $0, $1, #1 //src[-1]
|
||||
vext.16 $2, $0, $1, #4 //src[2]
|
||||
vadd.s16 $3, $2 //b=src[-1]+src[2]
|
||||
vext.16 $3, $0, $1, #1 //src[-1]
|
||||
vext.16 $2, $0, $1, #4 //src[2]
|
||||
vadd.s16 $3, $2 //b=src[-1]+src[2]
|
||||
|
||||
vext.16 $2, $0, $1, #5 //src[3]
|
||||
vadd.s16 $2, $0 //a=src[-2]+src[3]
|
||||
vext.16 $2, $0, $1, #5 //src[3]
|
||||
vadd.s16 $2, $0 //a=src[-2]+src[3]
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro UNPACK_1_IN_8x16BITS_TO_8BITS
|
||||
// { // each 16bits; input: d_dst, d_src[0:3] (even), d_src[4:5]+%% (odd)
|
||||
vext.16 $3, $3, $3, #7 // 0x????, [0][1][2][3][4][5],
|
||||
vrev64.16 $1, $1
|
||||
vadd.u16 $2, $1 // C[2+3],B[1+4],A[0+5],
|
||||
vshr.s64 $1, $2, #16
|
||||
vshr.s64 $0, $2, #32 // Output: C $2, B $1, A $0
|
||||
vext.16 $3, $3, $3, #7 // 0x????, [0][1][2][3][4][5],
|
||||
vrev64.16 $1, $1
|
||||
vadd.u16 $2, $1 // C[2+3],B[1+4],A[0+5],
|
||||
vshr.s64 $1, $2, #16
|
||||
vshr.s64 $0, $2, #32 // Output: C $2, B $1, A $0
|
||||
|
||||
vsub.s16 $0, $0, $1 //a-b
|
||||
vshr.s16 $0, $0, #2 //(a-b)/4
|
||||
vsub.s16 $0, $0, $1 //(a-b)/4-b
|
||||
vadd.s16 $0, $0, $2 //(a-b)/4-b+c
|
||||
vshr.s16 $0, $0, #2 //((a-b)/4-b+c)/4
|
||||
vadd.s16 $1, $0, $2 //((a-b)/4-b+c)/4+c = (a-5*b+20*c)/16
|
||||
vqrshrun.s16 $0, $3, #6 //(+32)>>6
|
||||
vsub.s16 $0, $0, $1 //a-b
|
||||
vshr.s16 $0, $0, #2 //(a-b)/4
|
||||
vsub.s16 $0, $0, $1 //(a-b)/4-b
|
||||
vadd.s16 $0, $0, $2 //(a-b)/4-b+c
|
||||
vshr.s16 $0, $0, #2 //((a-b)/4-b+c)/4
|
||||
vadd.s16 $1, $0, $2 //((a-b)/4-b+c)/4+c = (a-5*b+20*c)/16
|
||||
vqrshrun.s16 $0, $3, #6 //(+32)>>6
|
||||
// }
|
||||
.endm
|
||||
#else
|
||||
.macro AVERAGE_TWO_8BITS arg0, arg1,arg2
|
||||
// { // input:dst_d, src_d A and B; working: q13
|
||||
vaddl.u8 q13, \arg2, \arg1
|
||||
vrshrn.u16 \arg0, q13, #1
|
||||
vaddl.u8 q13, \arg2, \arg1
|
||||
vrshrn.u16 \arg0, q13, #1
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro FILTER_6TAG_8BITS arg0, arg1,arg2, arg3, arg4,arg5, arg6, arg7,arg8
|
||||
// { // input:src[-2], src[-1], src[0], src[1], src[2], src[3], dst_d, multiplier a/b
|
||||
vaddl.u8 q12, \arg0, \arg5 //q12=src[-2]+src[3]
|
||||
vaddl.u8 q13, \arg2, \arg3 //src[0]+src[1]
|
||||
vmla.u16 q12, q13, \arg7 //q12 += 20*(src[0]+src[1]), 2 cycles
|
||||
vaddl.u8 q13, \arg1, \arg4 //src[-1]+src[2]
|
||||
vmls.s16 q12, q13, \arg8 //q12 -= 5*(src[-1]+src[2]), 2 cycles
|
||||
vqrshrun.s16 \arg6, q12, #5
|
||||
vaddl.u8 q12, \arg0, \arg5 //q12=src[-2]+src[3]
|
||||
vaddl.u8 q13, \arg2, \arg3 //src[0]+src[1]
|
||||
vmla.u16 q12, q13, \arg7 //q12 += 20*(src[0]+src[1]), 2 cycles
|
||||
vaddl.u8 q13, \arg1, \arg4 //src[-1]+src[2]
|
||||
vmls.s16 q12, q13, \arg8 //q12 -= 5*(src[-1]+src[2]), 2 cycles
|
||||
vqrshrun.s16 \arg6, q12, #5
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro FILTER_SINGLE_TAG_8BITS arg0, arg1,arg2, arg3, arg4,arg5 // when width=17/9, used
|
||||
// { // input: src_d{Y[0][1][2][3][4][5]X, the even of working_q2}
|
||||
vrev64.8 \arg2, \arg0 // X[5][4][3][2][1][0]O
|
||||
vaddl.u8 \arg3, \arg0, \arg2 // each 16bits, *[50][41][32][23][14][05]*
|
||||
vmul.s16 \arg0, \arg2, \arg1 // 0+1*[50]-5*[41]+20[32]
|
||||
vpadd.s16 \arg0, \arg0, \arg0
|
||||
vpadd.s16 \arg0, \arg0, \arg0
|
||||
vqrshrun.s16 \arg0, \arg4, #5
|
||||
vrev64.8 \arg2, \arg0 // X[5][4][3][2][1][0]O
|
||||
vaddl.u8 \arg3, \arg0, \arg2 // each 16bits, *[50][41][32][23][14][05]*
|
||||
vmul.s16 \arg0, \arg2, \arg1 // 0+1*[50]-5*[41]+20[32]
|
||||
vpadd.s16 \arg0, \arg0, \arg0
|
||||
vpadd.s16 \arg0, \arg0, \arg0
|
||||
vqrshrun.s16 \arg0, \arg4, #5
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro FILTER_6TAG_8BITS_AVERAGE_WITH_0 arg0, arg1,arg2, arg3, arg4,arg5, arg6, arg7,arg8
|
||||
// { // input:src[-2], src[-1], src[0], src[1], src[2], src[3], dst_d
|
||||
vaddl.u8 q12, \arg0, \arg5 //q12=src[-2]+src[3]
|
||||
vaddl.u8 q13, \arg2, \arg3 //src[0]+src[1]
|
||||
vmla.u16 q12, q13, \arg7 //q12 += 20*(src[0]+src[1]), 2 cycles
|
||||
vaddl.u8 q13, \arg1, \arg4 //src[-1]+src[2]
|
||||
vmls.s16 q12, q13, \arg8 //q12 -= 5*(src[-1]+src[2]), 2 cycles
|
||||
vqrshrun.s16 \arg6, q12, #5
|
||||
vaddl.u8 q13, \arg2, \arg6
|
||||
vrshrn.u16 \arg6, q13, #1
|
||||
vaddl.u8 q12, \arg0, \arg5 //q12=src[-2]+src[3]
|
||||
vaddl.u8 q13, \arg2, \arg3 //src[0]+src[1]
|
||||
vmla.u16 q12, q13, \arg7 //q12 += 20*(src[0]+src[1]), 2 cycles
|
||||
vaddl.u8 q13, \arg1, \arg4 //src[-1]+src[2]
|
||||
vmls.s16 q12, q13, \arg8 //q12 -= 5*(src[-1]+src[2]), 2 cycles
|
||||
vqrshrun.s16 \arg6, q12, #5
|
||||
vaddl.u8 q13, \arg2, \arg6
|
||||
vrshrn.u16 \arg6, q13, #1
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro FILTER_6TAG_8BITS_AVERAGE_WITH_1 arg0, arg1,arg2, arg3, arg4,arg5, arg6, arg7,arg8
|
||||
// { // input:src[-2], src[-1], src[0], src[1], src[2], src[3], dst_d
|
||||
vaddl.u8 q12, \arg0, \arg5 //q12=src[-2]+src[3]
|
||||
vaddl.u8 q13, \arg2, \arg3 //src[0]+src[1]
|
||||
vmla.u16 q12, q13, \arg7 //q12 += 20*(src[0]+src[1]), 2 cycles
|
||||
vaddl.u8 q13, \arg1, \arg4 //src[-1]+src[2]
|
||||
vmls.s16 q12, q13, \arg8 //q12 -= 5*(src[-1]+src[2]), 2 cycles
|
||||
vqrshrun.s16 \arg6, q12, #5
|
||||
vaddl.u8 q13, \arg3, \arg6
|
||||
vrshrn.u16 \arg6, q13, #1
|
||||
vaddl.u8 q12, \arg0, \arg5 //q12=src[-2]+src[3]
|
||||
vaddl.u8 q13, \arg2, \arg3 //src[0]+src[1]
|
||||
vmla.u16 q12, q13, \arg7 //q12 += 20*(src[0]+src[1]), 2 cycles
|
||||
vaddl.u8 q13, \arg1, \arg4 //src[-1]+src[2]
|
||||
vmls.s16 q12, q13, \arg8 //q12 -= 5*(src[-1]+src[2]), 2 cycles
|
||||
vqrshrun.s16 \arg6, q12, #5
|
||||
vaddl.u8 q13, \arg3, \arg6
|
||||
vrshrn.u16 \arg6, q13, #1
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro FILTER_6TAG_8BITS_TO_16BITS arg0, arg1,arg2, arg3, arg4,arg5, arg6, arg7,arg8
|
||||
// { // input:d_src[-2], d_src[-1], d_src[0], d_src[1], d_src[2], d_src[3]
|
||||
vaddl.u8 \arg6, \arg0, \arg5 //dst_q=src[-2]+src[3]
|
||||
vaddl.u8 q13, \arg2, \arg3 //src[0]+src[1]
|
||||
vmla.u16 \arg6, q13, \arg7 //dst_q += 20*(src[0]+src[1]), 2 cycles
|
||||
vaddl.u8 q13, \arg1, \arg4 //src[-1]+src[2]
|
||||
vmls.s16 \arg6, q13, \arg8 //dst_q -= 5*(src[-1]+src[2]), 2 cycles
|
||||
vaddl.u8 \arg6, \arg0, \arg5 //dst_q=src[-2]+src[3]
|
||||
vaddl.u8 q13, \arg2, \arg3 //src[0]+src[1]
|
||||
vmla.u16 \arg6, q13, \arg7 //dst_q += 20*(src[0]+src[1]), 2 cycles
|
||||
vaddl.u8 q13, \arg1, \arg4 //src[-1]+src[2]
|
||||
vmls.s16 \arg6, q13, \arg8 //dst_q -= 5*(src[-1]+src[2]), 2 cycles
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro FILTER_3_IN_16BITS_TO_8BITS arg0, arg1,arg2, arg3
|
||||
// { // input:a, b, c, dst_d;
|
||||
vsub.s16 \arg0, \arg0, \arg1 //a-b
|
||||
vshr.s16 \arg0, \arg0, #2 //(a-b)/4
|
||||
vsub.s16 \arg0, \arg0, \arg1 //(a-b)/4-b
|
||||
vadd.s16 \arg0, \arg0, \arg2 //(a-b)/4-b+c
|
||||
vshr.s16 \arg0, \arg0, #2 //((a-b)/4-b+c)/4
|
||||
vadd.s16 \arg0, \arg0, \arg2 //((a-b)/4-b+c)/4+c = (a-5*b+20*c)/16
|
||||
vqrshrun.s16 \arg3, \arg0, #6 //(+32)>>6
|
||||
vsub.s16 \arg0, \arg0, \arg1 //a-b
|
||||
vshr.s16 \arg0, \arg0, #2 //(a-b)/4
|
||||
vsub.s16 \arg0, \arg0, \arg1 //(a-b)/4-b
|
||||
vadd.s16 \arg0, \arg0, \arg2 //(a-b)/4-b+c
|
||||
vshr.s16 \arg0, \arg0, #2 //((a-b)/4-b+c)/4
|
||||
vadd.s16 \arg0, \arg0, \arg2 //((a-b)/4-b+c)/4+c = (a-5*b+20*c)/16
|
||||
vqrshrun.s16 \arg3, \arg0, #6 //(+32)>>6
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro UNPACK_2_16BITS_TO_ABC arg0, arg1,arg2, arg3, arg4
|
||||
// { // input:q_src[-2:5], q_src[6:13](avail 8+5)/q_src[6:**](avail 4+5)
|
||||
vext.16 \arg4, \arg0, \arg1, #2 //src[0]
|
||||
vext.16 \arg3, \arg0, \arg1, #3 //src[1]
|
||||
vadd.s16 \arg4, \arg3 //c=src[0]+src[1]
|
||||
vext.16 \arg4, \arg0, \arg1, #2 //src[0]
|
||||
vext.16 \arg3, \arg0, \arg1, #3 //src[1]
|
||||
vadd.s16 \arg4, \arg3 //c=src[0]+src[1]
|
||||
|
||||
vext.16 \arg3, \arg0, \arg1, #1 //src[-1]
|
||||
vext.16 \arg2, \arg0, \arg1, #4 //src[2]
|
||||
vadd.s16 \arg3, \arg2 //b=src[-1]+src[2]
|
||||
vext.16 \arg3, \arg0, \arg1, #1 //src[-1]
|
||||
vext.16 \arg2, \arg0, \arg1, #4 //src[2]
|
||||
vadd.s16 \arg3, \arg2 //b=src[-1]+src[2]
|
||||
|
||||
vext.16 \arg2, \arg0, \arg1, #5 //src[3]
|
||||
vadd.s16 \arg2, \arg0 //a=src[-2]+src[3]
|
||||
vext.16 \arg2, \arg0, \arg1, #5 //src[3]
|
||||
vadd.s16 \arg2, \arg0 //a=src[-2]+src[3]
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro UNPACK_1_IN_8x16BITS_TO_8BITS arg0, arg1,arg2, arg3
|
||||
// { // each 16bits; input: d_dst, d_src[0:3] (even), d_src[4:5]+%% (odd)
|
||||
vext.16 \arg3, \arg3, \arg3, #7 // 0x????, [0][1][2][3][4][5]
|
||||
vrev64.16 \arg1, \arg1
|
||||
vadd.u16 \arg2, \arg1 // C[2+3],B[1+4],A[0+5]
|
||||
vshr.s64 \arg1, \arg2, #16
|
||||
vshr.s64 \arg0, \arg2, #32 // Output: C \arg2, B \arg1, A \arg0
|
||||
vext.16 \arg3, \arg3, \arg3, #7 // 0x????, [0][1][2][3][4][5]
|
||||
vrev64.16 \arg1, \arg1
|
||||
vadd.u16 \arg2, \arg1 // C[2+3],B[1+4],A[0+5]
|
||||
vshr.s64 \arg1, \arg2, #16
|
||||
vshr.s64 \arg0, \arg2, #32 // Output: C \arg2, B \arg1, A \arg0
|
||||
|
||||
vsub.s16 \arg0, \arg0, \arg1 //a-b
|
||||
vshr.s16 \arg0, \arg0, #2 //(a-b)/4
|
||||
vsub.s16 \arg0, \arg0, \arg1 //(a-b)/4-b
|
||||
vadd.s16 \arg0, \arg0, \arg2 //(a-b)/4-b+c
|
||||
vshr.s16 \arg0, \arg0, #2 //((a-b)/4-b+c)/4
|
||||
vadd.s16 \arg1, \arg0, \arg2 //((a-b)/4-b+c)/4+c = (a-5*b+20*c)/16
|
||||
vqrshrun.s16 \arg0, \arg3, #6 //(+32)>>6
|
||||
vsub.s16 \arg0, \arg0, \arg1 //a-b
|
||||
vshr.s16 \arg0, \arg0, #2 //(a-b)/4
|
||||
vsub.s16 \arg0, \arg0, \arg1 //(a-b)/4-b
|
||||
vadd.s16 \arg0, \arg0, \arg2 //(a-b)/4-b+c
|
||||
vshr.s16 \arg0, \arg0, #2 //((a-b)/4-b+c)/4
|
||||
vadd.s16 \arg1, \arg0, \arg2 //((a-b)/4-b+c)/4+c = (a-5*b+20*c)/16
|
||||
vqrshrun.s16 \arg0, \arg3, #6 //(+32)>>6
|
||||
// }
|
||||
.endm
|
||||
#endif
|
||||
|
0
codec/encoder/core/arm/memory_neon.S
Executable file → Normal file
0
codec/encoder/core/arm/memory_neon.S
Executable file → Normal file
34
codec/encoder/core/arm/pixel_neon.S
Executable file → Normal file
34
codec/encoder/core/arm/pixel_neon.S
Executable file → Normal file
@ -220,7 +220,7 @@
|
||||
.endm
|
||||
|
||||
|
||||
WELS_ASM_FUNC_BEGIN pixel_sad_16x16_neon
|
||||
WELS_ASM_FUNC_BEGIN WelsSampleSad16x16_neon
|
||||
|
||||
vld1.64 {q0}, [r0, :128], r1
|
||||
vld1.64 {q1}, [r2], r3
|
||||
@ -260,7 +260,7 @@ WELS_ASM_FUNC_BEGIN pixel_sad_16x16_neon
|
||||
WELS_ASM_FUNC_END
|
||||
|
||||
|
||||
WELS_ASM_FUNC_BEGIN pixel_sad_16x8_neon
|
||||
WELS_ASM_FUNC_BEGIN WelsSampleSad16x8_neon
|
||||
|
||||
vld1.64 {q0}, [r0, :128], r1
|
||||
vld1.64 {q1}, [r2], r3
|
||||
@ -298,7 +298,7 @@ WELS_ASM_FUNC_BEGIN pixel_sad_16x8_neon
|
||||
WELS_ASM_FUNC_END
|
||||
|
||||
|
||||
WELS_ASM_FUNC_BEGIN pixel_sad_8x16_neon
|
||||
WELS_ASM_FUNC_BEGIN WelsSampleSad8x16_neon
|
||||
|
||||
vld1.64 {d0}, [r0, :64], r1
|
||||
vld1.64 {d1}, [r2], r3
|
||||
@ -332,7 +332,7 @@ WELS_ASM_FUNC_BEGIN pixel_sad_8x16_neon
|
||||
WELS_ASM_FUNC_END
|
||||
|
||||
|
||||
WELS_ASM_FUNC_BEGIN pixel_sad_8x8_neon
|
||||
WELS_ASM_FUNC_BEGIN WelsSampleSad8x8_neon
|
||||
|
||||
vld1.64 {d0}, [r0, :64], r1
|
||||
vld1.64 {d1}, [r2], r3
|
||||
@ -364,7 +364,7 @@ WELS_ASM_FUNC_BEGIN pixel_sad_8x8_neon
|
||||
WELS_ASM_FUNC_END
|
||||
|
||||
|
||||
WELS_ASM_FUNC_BEGIN pixel_sad_4x4_neon
|
||||
WELS_ASM_FUNC_BEGIN WelsSampleSad4x4_neon
|
||||
stmdb sp!, {r4-r5, lr}
|
||||
|
||||
//Loading a horizontal line data (4 bytes)
|
||||
@ -392,7 +392,7 @@ WELS_ASM_FUNC_BEGIN pixel_sad_4x4_neon
|
||||
WELS_ASM_FUNC_END
|
||||
|
||||
|
||||
WELS_ASM_FUNC_BEGIN pixel_sad_4_16x16_neon
|
||||
WELS_ASM_FUNC_BEGIN WelsSampleSadFour16x16_neon
|
||||
|
||||
stmdb sp!, {r4-r5, lr}
|
||||
|
||||
@ -471,7 +471,7 @@ pixel_sad_4_16x16_loop_0:
|
||||
WELS_ASM_FUNC_END
|
||||
|
||||
|
||||
WELS_ASM_FUNC_BEGIN pixel_sad_4_16x8_neon
|
||||
WELS_ASM_FUNC_BEGIN WelsSampleSadFour16x8_neon
|
||||
stmdb sp!, {r4-r5, lr}
|
||||
|
||||
//Generate the pix2 start addr
|
||||
@ -548,7 +548,7 @@ pixel_sad_4_16x8_loop_0:
|
||||
WELS_ASM_FUNC_END
|
||||
|
||||
|
||||
WELS_ASM_FUNC_BEGIN pixel_sad_4_8x16_neon
|
||||
WELS_ASM_FUNC_BEGIN WelsSampleSadFour8x16_neon
|
||||
stmdb sp!, {r4-r5, lr}
|
||||
|
||||
//Generate the pix2 start addr
|
||||
@ -614,7 +614,7 @@ pixel_sad_4_8x16_loop_0:
|
||||
WELS_ASM_FUNC_END
|
||||
|
||||
|
||||
WELS_ASM_FUNC_BEGIN pixel_sad_4_8x8_neon
|
||||
WELS_ASM_FUNC_BEGIN WelsSampleSadFour8x8_neon
|
||||
stmdb sp!, {r4-r5, lr}
|
||||
|
||||
//Generate the pix2 start addr
|
||||
@ -679,7 +679,7 @@ pixel_sad_4_8x8_loop_0:
|
||||
WELS_ASM_FUNC_END
|
||||
|
||||
|
||||
WELS_ASM_FUNC_BEGIN pixel_sad_4_4x4_neon
|
||||
WELS_ASM_FUNC_BEGIN WelsSampleSadFour4x4_neon
|
||||
|
||||
vld1.32 {d0[0]}, [r0], r1
|
||||
vld1.32 {d0[1]}, [r0], r1
|
||||
@ -744,7 +744,7 @@ WELS_ASM_FUNC_BEGIN pixel_sad_4_4x4_neon
|
||||
WELS_ASM_FUNC_END
|
||||
|
||||
|
||||
WELS_ASM_FUNC_BEGIN pixel_satd_16x16_neon
|
||||
WELS_ASM_FUNC_BEGIN WelsSampleSatd16x16_neon
|
||||
|
||||
SATD_16x4
|
||||
vadd.u16 q15, q0, q2
|
||||
@ -769,7 +769,7 @@ WELS_ASM_FUNC_BEGIN pixel_satd_16x16_neon
|
||||
WELS_ASM_FUNC_END
|
||||
|
||||
|
||||
WELS_ASM_FUNC_BEGIN pixel_satd_16x8_neon
|
||||
WELS_ASM_FUNC_BEGIN WelsSampleSatd16x8_neon
|
||||
|
||||
SATD_16x4
|
||||
vadd.u16 q15, q0, q2
|
||||
@ -786,7 +786,7 @@ WELS_ASM_FUNC_BEGIN pixel_satd_16x8_neon
|
||||
WELS_ASM_FUNC_END
|
||||
|
||||
|
||||
WELS_ASM_FUNC_BEGIN pixel_satd_8x16_neon
|
||||
WELS_ASM_FUNC_BEGIN WelsSampleSatd8x16_neon
|
||||
|
||||
SATD_8x4
|
||||
vadd.u16 q15, q0, q1
|
||||
@ -811,7 +811,7 @@ WELS_ASM_FUNC_BEGIN pixel_satd_8x16_neon
|
||||
WELS_ASM_FUNC_END
|
||||
|
||||
|
||||
WELS_ASM_FUNC_BEGIN pixel_satd_8x8_neon
|
||||
WELS_ASM_FUNC_BEGIN WelsSampleSatd8x8_neon
|
||||
|
||||
SATD_8x4
|
||||
vadd.u16 q15, q0, q1
|
||||
@ -828,7 +828,7 @@ WELS_ASM_FUNC_BEGIN pixel_satd_8x8_neon
|
||||
WELS_ASM_FUNC_END
|
||||
|
||||
|
||||
WELS_ASM_FUNC_BEGIN pixel_satd_4x4_neon
|
||||
WELS_ASM_FUNC_BEGIN WelsSampleSatd4x4_neon
|
||||
|
||||
//Load the pix1 data --- 16 bytes
|
||||
vld1.32 {d0[0]}, [r0], r1
|
||||
@ -868,8 +868,8 @@ WELS_ASM_FUNC_BEGIN pixel_satd_4x4_neon
|
||||
vadd.u16 q0, q15, q14
|
||||
|
||||
vrhadd.u16 d0, d1
|
||||
vpaddl.u16 d0, d0
|
||||
vpaddl.u32 d0, d0
|
||||
vpaddl.u16 d0, d0
|
||||
vpaddl.u32 d0, d0
|
||||
|
||||
vmov.u32 r0, d0[0]
|
||||
|
||||
|
604
codec/encoder/core/arm/reconstruct_neon.S
Executable file → Normal file
604
codec/encoder/core/arm/reconstruct_neon.S
Executable file → Normal file
@ -37,561 +37,561 @@
|
||||
#ifdef APPLE_IOS
|
||||
.macro LORD_ALIGNED_DATA_WITH_STRIDE
|
||||
// { // input: $0~$3, src*, src_stride
|
||||
vld1.64 {$0}, [$4,:128], $5
|
||||
vld1.64 {$1}, [$4,:128], $5
|
||||
vld1.64 {$2}, [$4,:128], $5
|
||||
vld1.64 {$3}, [$4,:128], $5
|
||||
vld1.64 {$0}, [$4,:128], $5
|
||||
vld1.64 {$1}, [$4,:128], $5
|
||||
vld1.64 {$2}, [$4,:128], $5
|
||||
vld1.64 {$3}, [$4,:128], $5
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro STORE_ALIGNED_DATA_WITH_STRIDE
|
||||
// { // input: $0~$3, dst*, dst_stride
|
||||
vst1.64 {$0}, [$4,:128], $5
|
||||
vst1.64 {$1}, [$4,:128], $5
|
||||
vst1.64 {$2}, [$4,:128], $5
|
||||
vst1.64 {$3}, [$4,:128], $5
|
||||
vst1.64 {$0}, [$4,:128], $5
|
||||
vst1.64 {$1}, [$4,:128], $5
|
||||
vst1.64 {$2}, [$4,:128], $5
|
||||
vst1.64 {$3}, [$4,:128], $5
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro LORD_UNALIGNED_DATA_WITH_STRIDE
|
||||
// { // input: $0~$3, src*, src_stride
|
||||
vld1.64 {$0}, [$4], $5
|
||||
vld1.64 {$1}, [$4], $5
|
||||
vld1.64 {$2}, [$4], $5
|
||||
vld1.64 {$3}, [$4], $5
|
||||
vld1.64 {$0}, [$4], $5
|
||||
vld1.64 {$1}, [$4], $5
|
||||
vld1.64 {$2}, [$4], $5
|
||||
vld1.64 {$3}, [$4], $5
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro STORE_UNALIGNED_DATA_WITH_STRIDE
|
||||
// { // input: $0~$3, dst*, dst_stride
|
||||
vst1.64 {$0}, [$4], $5
|
||||
vst1.64 {$1}, [$4], $5
|
||||
vst1.64 {$2}, [$4], $5
|
||||
vst1.64 {$3}, [$4], $5
|
||||
vst1.64 {$0}, [$4], $5
|
||||
vst1.64 {$1}, [$4], $5
|
||||
vst1.64 {$2}, [$4], $5
|
||||
vst1.64 {$3}, [$4], $5
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro LOAD_4x4_DATA_FOR_DCT
|
||||
// { // input: $0~$3, src1*, src1_stride, src2*, src2_stride
|
||||
vld2.16 {$0[0],$1[0]}, [$4], $5
|
||||
vld2.16 {$2[0],$3[0]}, [$6], $7
|
||||
vld2.16 {$0[1],$1[1]}, [$4], $5
|
||||
vld2.16 {$2[1],$3[1]}, [$6], $7
|
||||
vld2.16 {$0[0],$1[0]}, [$4], $5
|
||||
vld2.16 {$2[0],$3[0]}, [$6], $7
|
||||
vld2.16 {$0[1],$1[1]}, [$4], $5
|
||||
vld2.16 {$2[1],$3[1]}, [$6], $7
|
||||
|
||||
vld2.16 {$0[2],$1[2]}, [$4], $5
|
||||
vld2.16 {$2[2],$3[2]}, [$6], $7
|
||||
vld2.16 {$0[3],$1[3]}, [$4], $5
|
||||
vld2.16 {$2[3],$3[3]}, [$6], $7
|
||||
vld2.16 {$0[2],$1[2]}, [$4], $5
|
||||
vld2.16 {$2[2],$3[2]}, [$6], $7
|
||||
vld2.16 {$0[3],$1[3]}, [$4], $5
|
||||
vld2.16 {$2[3],$3[3]}, [$6], $7
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro LOAD_8x8_DATA_FOR_DCT
|
||||
// { // input: $0~$3, src1*, src2*; untouched r2:src1_stride &r4:src2_stride
|
||||
vld1.64 {$0}, [$8], r2
|
||||
vld1.64 {$4}, [$9], r4
|
||||
vld1.64 {$1}, [$8], r2
|
||||
vld1.64 {$5}, [$9], r4
|
||||
vld1.64 {$0}, [$8], r2
|
||||
vld1.64 {$4}, [$9], r4
|
||||
vld1.64 {$1}, [$8], r2
|
||||
vld1.64 {$5}, [$9], r4
|
||||
|
||||
vld1.64 {$2}, [$8], r2
|
||||
vld1.64 {$6}, [$9], r4
|
||||
vld1.64 {$3}, [$8], r2
|
||||
vld1.64 {$7}, [$9], r4
|
||||
vld1.64 {$2}, [$8], r2
|
||||
vld1.64 {$6}, [$9], r4
|
||||
vld1.64 {$3}, [$8], r2
|
||||
vld1.64 {$7}, [$9], r4
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro DCT_ROW_TRANSFORM_TOTAL_16BITS
|
||||
// { // input: src_d[0]~[3], working: [4]~[7]
|
||||
vadd.s16 $4, $0, $3 //int16 s[0] = data[i] + data[i3];
|
||||
vsub.s16 $7, $0, $3 //int16 s[3] = data[i] - data[i3];
|
||||
vadd.s16 $5, $1, $2 //int16 s[1] = data[i1] + data[i2];
|
||||
vsub.s16 $6, $1, $2 //int16 s[2] = data[i1] - data[i2];
|
||||
vadd.s16 $4, $0, $3 //int16 s[0] = data[i] + data[i3];
|
||||
vsub.s16 $7, $0, $3 //int16 s[3] = data[i] - data[i3];
|
||||
vadd.s16 $5, $1, $2 //int16 s[1] = data[i1] + data[i2];
|
||||
vsub.s16 $6, $1, $2 //int16 s[2] = data[i1] - data[i2];
|
||||
|
||||
vadd.s16 $0, $4, $5 //int16 dct[i ] = s[0] + s[1];
|
||||
vsub.s16 $2, $4, $5 //int16 dct[i2] = s[0] - s[1];
|
||||
vshl.s16 $1, $7, #1
|
||||
vshl.s16 $3, $6, #1
|
||||
vadd.s16 $1, $1, $6 //int16 dct[i1] = (s[3] << 1) + s[2];
|
||||
vsub.s16 $3, $7, $3 //int16 dct[i3] = s[3] - (s[2] << 1);
|
||||
vadd.s16 $0, $4, $5 //int16 dct[i ] = s[0] + s[1];
|
||||
vsub.s16 $2, $4, $5 //int16 dct[i2] = s[0] - s[1];
|
||||
vshl.s16 $1, $7, #1
|
||||
vshl.s16 $3, $6, #1
|
||||
vadd.s16 $1, $1, $6 //int16 dct[i1] = (s[3] << 1) + s[2];
|
||||
vsub.s16 $3, $7, $3 //int16 dct[i3] = s[3] - (s[2] << 1);
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro MATRIX_TRANSFORM_EACH_16BITS
|
||||
// { // input & output: src_d[0]~[3];[0 1 2 3]+[4 5 6 7]+[8 9 10 11]+[12 13 14 15]
|
||||
vtrn.s16 $0, $1 //[0 1 2 3]+[4 5 6 7]-->[0 4 2 6]+[1 5 3 7]
|
||||
vtrn.s16 $2, $3 //[8 9 10 11]+[12 13 14 15]-->[8 12 10 14]+[9 13 11 15]
|
||||
vtrn.32 $0, $2 //[0 4 2 6]+[8 12 10 14]-->[0 4 8 12]+[2 6 10 14]
|
||||
vtrn.32 $1, $3 //[1 5 3 7]+[9 13 11 15]-->[1 5 9 13]+[3 7 11 15]
|
||||
vtrn.s16 $0, $1 //[0 1 2 3]+[4 5 6 7]-->[0 4 2 6]+[1 5 3 7]
|
||||
vtrn.s16 $2, $3 //[8 9 10 11]+[12 13 14 15]-->[8 12 10 14]+[9 13 11 15]
|
||||
vtrn.32 $0, $2 //[0 4 2 6]+[8 12 10 14]-->[0 4 8 12]+[2 6 10 14]
|
||||
vtrn.32 $1, $3 //[1 5 3 7]+[9 13 11 15]-->[1 5 9 13]+[3 7 11 15]
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro NEWQUANT_COEF_EACH_16BITS // if coef <= 0, - coef; else , coef;
|
||||
// { // input: coef, ff (dst), ff_d0, ff_d1, mf_d0, md_d1
|
||||
veor.s16 $6, $6 // init 0 , and keep 0;
|
||||
vaba.s16 $1, $0, $6 // f + abs(coef - 0)
|
||||
vmull.s16 $7, $2, $4
|
||||
vmull.s16 $8, $3, $5
|
||||
vshr.s32 $7, #16
|
||||
vshr.s32 $8, #16
|
||||
vmovn.s32 $2, $7
|
||||
vmovn.s32 $3, $8
|
||||
veor.s16 $6, $6 // init 0 , and keep 0;
|
||||
vaba.s16 $1, $0, $6 // f + abs(coef - 0)
|
||||
vmull.s16 $7, $2, $4
|
||||
vmull.s16 $8, $3, $5
|
||||
vshr.s32 $7, #16
|
||||
vshr.s32 $8, #16
|
||||
vmovn.s32 $2, $7
|
||||
vmovn.s32 $3, $8
|
||||
|
||||
vcgt.s16 $7, $0, #0 // if true, location of coef == 11111111
|
||||
vbif.s16 $6, $1, $7 // if (x<0) reserved part; else keep 0 untouched
|
||||
vshl.s16 $6, #1
|
||||
vsub.s16 $1, $1, $6 // if x > 0, -= 0; else x-= 2x
|
||||
vcgt.s16 $7, $0, #0 // if true, location of coef == 11111111
|
||||
vbif.s16 $6, $1, $7 // if (x<0) reserved part; else keep 0 untouched
|
||||
vshl.s16 $6, #1
|
||||
vsub.s16 $1, $1, $6 // if x > 0, -= 0; else x-= 2x
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro NEWQUANT_COEF_EACH_16BITS_MAX // if coef <= 0, - coef; else , coef;
|
||||
// { // input: coef, ff (dst), ff_d0, ff_d1, mf_d0(max), md_d1
|
||||
veor.s16 $6, $6 // init 0 , and keep 0;
|
||||
vaba.s16 $1, $0, $6 // f + abs(coef - 0)
|
||||
vmull.s16 $7, $2, $4
|
||||
vmull.s16 $8, $3, $5
|
||||
vshr.s32 $7, #16
|
||||
vshr.s32 $8, #16
|
||||
vmovn.s32 $2, $7
|
||||
vmovn.s32 $3, $8
|
||||
veor.s16 $6, $6 // init 0 , and keep 0;
|
||||
vaba.s16 $1, $0, $6 // f + abs(coef - 0)
|
||||
vmull.s16 $7, $2, $4
|
||||
vmull.s16 $8, $3, $5
|
||||
vshr.s32 $7, #16
|
||||
vshr.s32 $8, #16
|
||||
vmovn.s32 $2, $7
|
||||
vmovn.s32 $3, $8
|
||||
|
||||
vcgt.s16 $7, $0, #0 // if true, location of coef == 11111111
|
||||
vbif.s16 $6, $1, $7 // if (x<0) reserved part; else keep 0 untouched
|
||||
vshl.s16 $6, #1
|
||||
vmax.s16 $9, $2, $3
|
||||
vsub.s16 $1, $1, $6 // if x > 0, -= 0; else x-= 2x
|
||||
vcgt.s16 $7, $0, #0 // if true, location of coef == 11111111
|
||||
vbif.s16 $6, $1, $7 // if (x<0) reserved part; else keep 0 untouched
|
||||
vshl.s16 $6, #1
|
||||
vmax.s16 $9, $2, $3
|
||||
vsub.s16 $1, $1, $6 // if x > 0, -= 0; else x-= 2x
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro QUANT_DUALWORD_COEF_EACH_16BITS // if coef <= 0, - coef; else , coef;
|
||||
// { // input: coef, ff (dst), mf , working_d (all 0), working_q
|
||||
vaba.s16 $1, $0, $3 // f + abs(coef - 0)
|
||||
vmull.s16 $4, $1, $2 // *= mf
|
||||
vshr.s32 $4, #16
|
||||
vmovn.s32 $1, $4 // >> 16
|
||||
vaba.s16 $1, $0, $3 // f + abs(coef - 0)
|
||||
vmull.s16 $4, $1, $2 // *= mf
|
||||
vshr.s32 $4, #16
|
||||
vmovn.s32 $1, $4 // >> 16
|
||||
|
||||
vcgt.s16 $2, $0, #0 // if true, location of coef == 11111111
|
||||
vbif.s16 $3, $1, $2 // if (x<0) reserved part; else keep 0 untouched
|
||||
vshl.s16 $3, #1
|
||||
vsub.s16 $1, $1, $3 // if x > 0, -= 0; else x-= 2x
|
||||
vcgt.s16 $2, $0, #0 // if true, location of coef == 11111111
|
||||
vbif.s16 $3, $1, $2 // if (x<0) reserved part; else keep 0 untouched
|
||||
vshl.s16 $3, #1
|
||||
vsub.s16 $1, $1, $3 // if x > 0, -= 0; else x-= 2x
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro DC_ZERO_COUNT_IN_DUALWORD
|
||||
// { // input: coef, dst_d, working_d (all 0x01)
|
||||
vceq.s16 $1, $0, #0
|
||||
vand.s16 $1, $2
|
||||
vpadd.s16 $1, $1, $1
|
||||
vpadd.s16 $1, $1, $1
|
||||
vceq.s16 $1, $0, #0
|
||||
vand.s16 $1, $2
|
||||
vpadd.s16 $1, $1, $1
|
||||
vpadd.s16 $1, $1, $1
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro SELECT_MAX_IN_ABS_COEF
|
||||
// { // input: coef_0, coef_1, max_q (identy to follow two)
|
||||
vmax.s16 $2, $0, $1 // max 1st in $3 & max 2nd in $4
|
||||
vpmax.s16 $3, $3, $4 // max 1st in $3[0][1] & max 2nd in $3[2][3]
|
||||
vpmax.s16 $3, $3, $4 // max 1st in $3[0][1]
|
||||
vmax.s16 $2, $0, $1 // max 1st in $3 & max 2nd in $4
|
||||
vpmax.s16 $3, $3, $4 // max 1st in $3[0][1] & max 2nd in $3[2][3]
|
||||
vpmax.s16 $3, $3, $4 // max 1st in $3[0][1]
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro ZERO_COUNT_IN_2_QUARWORD
|
||||
// { // input: coef_0 (identy to $3 $4), coef_1(identy to $5 $6), mask_q
|
||||
vceq.s16 $0, #0
|
||||
vceq.s16 $1, #0
|
||||
vand.s16 $0, $2
|
||||
vand.s16 $1, $2
|
||||
vceq.s16 $0, #0
|
||||
vceq.s16 $1, #0
|
||||
vand.s16 $0, $2
|
||||
vand.s16 $1, $2
|
||||
|
||||
vpadd.s16 $3, $3, $5
|
||||
vpadd.s16 $4, $4, $6
|
||||
vpadd.s16 $3, $3, $4 // 8-->4
|
||||
vpadd.s16 $3, $3, $3
|
||||
vpadd.s16 $3, $3, $3
|
||||
vpadd.s16 $3, $3, $5
|
||||
vpadd.s16 $4, $4, $6
|
||||
vpadd.s16 $3, $3, $4 // 8-->4
|
||||
vpadd.s16 $3, $3, $3
|
||||
vpadd.s16 $3, $3, $3
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro HDM_QUANT_2x2_TOTAL_16BITS
|
||||
// { // input: src_d[0]~[3], working_d, dst_d
|
||||
vshr.s64 $1, $0, #32
|
||||
vadd.s16 $2, $0, $1 // [0] = rs[0] + rs[32];[1] = rs[16] + rs[48];
|
||||
vsub.s16 $1, $0, $1 // [0] = rs[0] - rs[32];[1] = rs[16] - rs[48];
|
||||
vtrn.s16 $2, $1
|
||||
vtrn.s32 $2, $1
|
||||
vshr.s64 $1, $0, #32
|
||||
vadd.s16 $2, $0, $1 // [0] = rs[0] + rs[32];[1] = rs[16] + rs[48];
|
||||
vsub.s16 $1, $0, $1 // [0] = rs[0] - rs[32];[1] = rs[16] - rs[48];
|
||||
vtrn.s16 $2, $1
|
||||
vtrn.s32 $2, $1
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro IHDM_4x4_TOTAL_16BITS
|
||||
// { // input: each src_d[0]~[3](dst), working_q0, working_q1, working_q2
|
||||
vshr.s64 $1, $0, #32
|
||||
vadd.s16 $2, $0, $1 // [0] = rs[0] + rs[2];[1] = rs[1] + rs[3];
|
||||
vsub.s16 $1, $0, $1 // [0] = rs[0] - rs[2];[1] = rs[1] - rs[3];
|
||||
vtrn.s16 $2, $1
|
||||
vrev32.16 $1, $1
|
||||
vtrn.s32 $2, $1 // [0] = rs[0] + rs[2];[1] = rs[0] - rs[2];[2] = rs[1] - rs[3];[3] = rs[1] + rs[3];
|
||||
vshr.s64 $1, $0, #32
|
||||
vadd.s16 $2, $0, $1 // [0] = rs[0] + rs[2];[1] = rs[1] + rs[3];
|
||||
vsub.s16 $1, $0, $1 // [0] = rs[0] - rs[2];[1] = rs[1] - rs[3];
|
||||
vtrn.s16 $2, $1
|
||||
vrev32.16 $1, $1
|
||||
vtrn.s32 $2, $1 // [0] = rs[0] + rs[2];[1] = rs[0] - rs[2];[2] = rs[1] - rs[3];[3] = rs[1] + rs[3];
|
||||
|
||||
vrev64.16 $1, $2
|
||||
vadd.s16 $0, $2, $1 // [0] = rs[0] + rs[3];[1] = rs[1] + rs[2];
|
||||
vsub.s16 $1, $2, $1
|
||||
vrev32.16 $1, $1 // [0] = rs[1] - rs[2];[1] = rs[0] - rs[3];
|
||||
vtrn.s32 $0, $1 // [0] = rs[0] + rs[3];[1] = rs[1] + rs[2];[2] = rs[1] - rs[2];[3] = rs[0] - rs[3];
|
||||
vrev64.16 $1, $2
|
||||
vadd.s16 $0, $2, $1 // [0] = rs[0] + rs[3];[1] = rs[1] + rs[2];
|
||||
vsub.s16 $1, $2, $1
|
||||
vrev32.16 $1, $1 // [0] = rs[1] - rs[2];[1] = rs[0] - rs[3];
|
||||
vtrn.s32 $0, $1 // [0] = rs[0] + rs[3];[1] = rs[1] + rs[2];[2] = rs[1] - rs[2];[3] = rs[0] - rs[3];
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro MB_PRED_8BITS_ADD_DCT_16BITS_CLIP
|
||||
// { // input: pred_d[0]/[1](output), dct_q0/1, working_q0/1;
|
||||
vmovl.u8 $4,$0
|
||||
vmovl.u8 $5,$1
|
||||
vadd.s16 $4,$2
|
||||
vadd.s16 $5,$3
|
||||
vqmovun.s16 $0,$4
|
||||
vqmovun.s16 $1,$5
|
||||
vmovl.u8 $4,$0
|
||||
vmovl.u8 $5,$1
|
||||
vadd.s16 $4,$2
|
||||
vadd.s16 $5,$3
|
||||
vqmovun.s16 $0,$4
|
||||
vqmovun.s16 $1,$5
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro ROW_TRANSFORM_1_STEP_TOTAL_16BITS
|
||||
// { // input: src_d[0]~[3], output: e_d[0]~[3];
|
||||
vadd.s16 $4, $0, $2 //int16 e[i][0] = src[0] + src[2];
|
||||
vsub.s16 $5, $0, $2 //int16 e[i][1] = src[0] - src[2];
|
||||
vshr.s16 $6, $1, #1
|
||||
vshr.s16 $7, $3, #1
|
||||
vsub.s16 $6, $6, $3 //int16 e[i][2] = (src[1]>>1)-src[3];
|
||||
vadd.s16 $7, $1, $7 //int16 e[i][3] = src[1] + (src[3]>>1);
|
||||
vadd.s16 $4, $0, $2 //int16 e[i][0] = src[0] + src[2];
|
||||
vsub.s16 $5, $0, $2 //int16 e[i][1] = src[0] - src[2];
|
||||
vshr.s16 $6, $1, #1
|
||||
vshr.s16 $7, $3, #1
|
||||
vsub.s16 $6, $6, $3 //int16 e[i][2] = (src[1]>>1)-src[3];
|
||||
vadd.s16 $7, $1, $7 //int16 e[i][3] = src[1] + (src[3]>>1);
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro TRANSFORM_TOTAL_16BITS // both row & col transform used
|
||||
// { // output: f_q[0]~[3], input: e_q[0]~[3];
|
||||
vadd.s16 $0, $4, $7 //int16 f[i][0] = e[i][0] + e[i][3];
|
||||
vadd.s16 $1, $5, $6 //int16 f[i][1] = e[i][1] + e[i][2];
|
||||
vsub.s16 $2, $5, $6 //int16 f[i][2] = e[i][1] - e[i][2];
|
||||
vsub.s16 $3, $4, $7 //int16 f[i][3] = e[i][0] - e[i][3];
|
||||
vadd.s16 $0, $4, $7 //int16 f[i][0] = e[i][0] + e[i][3];
|
||||
vadd.s16 $1, $5, $6 //int16 f[i][1] = e[i][1] + e[i][2];
|
||||
vsub.s16 $2, $5, $6 //int16 f[i][2] = e[i][1] - e[i][2];
|
||||
vsub.s16 $3, $4, $7 //int16 f[i][3] = e[i][0] - e[i][3];
|
||||
// }
|
||||
.endm
|
||||
|
||||
|
||||
.macro ROW_TRANSFORM_0_STEP
|
||||
// { // input: src_d[0]~[3], output: e_q[0]~[3];
|
||||
vaddl.s16 $4, $0, $2 //int32 e[i][0] = src[0] + src[2];
|
||||
vsubl.s16 $5, $0, $2 //int32 e[i][1] = src[0] - src[2];
|
||||
vsubl.s16 $6, $1, $3 //int32 e[i][2] = src[1] - src[3];
|
||||
vaddl.s16 $7, $1, $3 //int32 e[i][3] = src[1] + src[3];
|
||||
vaddl.s16 $4, $0, $2 //int32 e[i][0] = src[0] + src[2];
|
||||
vsubl.s16 $5, $0, $2 //int32 e[i][1] = src[0] - src[2];
|
||||
vsubl.s16 $6, $1, $3 //int32 e[i][2] = src[1] - src[3];
|
||||
vaddl.s16 $7, $1, $3 //int32 e[i][3] = src[1] + src[3];
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro ROW_TRANSFORM_1_STEP
|
||||
// { // input: src_d[0]~[3], output: e_q[0]~[3]; working: $8 $9
|
||||
vaddl.s16 $4, $0, $2 //int32 e[i][0] = src[0] + src[2];
|
||||
vsubl.s16 $5, $0, $2 //int32 e[i][1] = src[0] - src[2];
|
||||
vshr.s16 $8, $1, #1
|
||||
vshr.s16 $9, $3, #1
|
||||
vsubl.s16 $6, $8, $3 //int32 e[i][2] = (src[1]>>1)-src[3];
|
||||
vaddl.s16 $7, $1, $9 //int32 e[i][3] = src[1] + (src[3]>>1);
|
||||
vaddl.s16 $4, $0, $2 //int32 e[i][0] = src[0] + src[2];
|
||||
vsubl.s16 $5, $0, $2 //int32 e[i][1] = src[0] - src[2];
|
||||
vshr.s16 $8, $1, #1
|
||||
vshr.s16 $9, $3, #1
|
||||
vsubl.s16 $6, $8, $3 //int32 e[i][2] = (src[1]>>1)-src[3];
|
||||
vaddl.s16 $7, $1, $9 //int32 e[i][3] = src[1] + (src[3]>>1);
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro TRANSFORM_4BYTES // both row & col transform used
|
||||
// { // output: f_q[0]~[3], input: e_q[0]~[3];
|
||||
vadd.s32 $0, $4, $7 //int16 f[i][0] = e[i][0] + e[i][3];
|
||||
vadd.s32 $1, $5, $6 //int16 f[i][1] = e[i][1] + e[i][2];
|
||||
vsub.s32 $2, $5, $6 //int16 f[i][2] = e[i][1] - e[i][2];
|
||||
vsub.s32 $3, $4, $7 //int16 f[i][3] = e[i][0] - e[i][3];
|
||||
vadd.s32 $0, $4, $7 //int16 f[i][0] = e[i][0] + e[i][3];
|
||||
vadd.s32 $1, $5, $6 //int16 f[i][1] = e[i][1] + e[i][2];
|
||||
vsub.s32 $2, $5, $6 //int16 f[i][2] = e[i][1] - e[i][2];
|
||||
vsub.s32 $3, $4, $7 //int16 f[i][3] = e[i][0] - e[i][3];
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro COL_TRANSFORM_0_STEP
|
||||
// { // input: src_q[0]~[3], output: e_q[0]~[3];
|
||||
vadd.s32 $4, $0, $2 //int32 e[0][j] = f[0][j] + f[2][j];
|
||||
vsub.s32 $5, $0, $2 //int32 e[1][j] = f[0][j] - f[2][j];
|
||||
vsub.s32 $6, $1, $3 //int32 e[2][j] = (f[1][j]>>1) - f[3][j];
|
||||
vadd.s32 $7, $1, $3 //int32 e[3][j] = f[1][j] + (f[3][j]>>1);
|
||||
vadd.s32 $4, $0, $2 //int32 e[0][j] = f[0][j] + f[2][j];
|
||||
vsub.s32 $5, $0, $2 //int32 e[1][j] = f[0][j] - f[2][j];
|
||||
vsub.s32 $6, $1, $3 //int32 e[2][j] = (f[1][j]>>1) - f[3][j];
|
||||
vadd.s32 $7, $1, $3 //int32 e[3][j] = f[1][j] + (f[3][j]>>1);
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro COL_TRANSFORM_1_STEP
|
||||
// { // input: src_q[0]~[3], output: e_q[0]~[3];
|
||||
vadd.s32 $4, $0, $2 //int32 e[0][j] = f[0][j] + f[2][j];
|
||||
vsub.s32 $5, $0, $2 //int32 e[1][j] = f[0][j] - f[2][j];
|
||||
vshr.s32 $6, $1, #1
|
||||
vshr.s32 $7, $3, #1
|
||||
vsub.s32 $6, $6, $3 //int32 e[2][j] = (f[1][j]>>1) - f[3][j];
|
||||
vadd.s32 $7, $1, $7 //int32 e[3][j] = f[1][j] + (f[3][j]>>1);
|
||||
vadd.s32 $4, $0, $2 //int32 e[0][j] = f[0][j] + f[2][j];
|
||||
vsub.s32 $5, $0, $2 //int32 e[1][j] = f[0][j] - f[2][j];
|
||||
vshr.s32 $6, $1, #1
|
||||
vshr.s32 $7, $3, #1
|
||||
vsub.s32 $6, $6, $3 //int32 e[2][j] = (f[1][j]>>1) - f[3][j];
|
||||
vadd.s32 $7, $1, $7 //int32 e[3][j] = f[1][j] + (f[3][j]>>1);
|
||||
// }
|
||||
.endm
|
||||
#else
|
||||
.macro LORD_ALIGNED_DATA_WITH_STRIDE arg0, arg1, arg2, arg3, arg4, arg5
|
||||
// { // input: \arg0~\arg3, src*, src_stride
|
||||
vld1.64 {\arg0}, [\arg4,:128], \arg5
|
||||
vld1.64 {\arg1}, [\arg4,:128], \arg5
|
||||
vld1.64 {\arg2}, [\arg4,:128], \arg5
|
||||
vld1.64 {\arg3}, [\arg4,:128], \arg5
|
||||
vld1.64 {\arg0}, [\arg4,:128], \arg5
|
||||
vld1.64 {\arg1}, [\arg4,:128], \arg5
|
||||
vld1.64 {\arg2}, [\arg4,:128], \arg5
|
||||
vld1.64 {\arg3}, [\arg4,:128], \arg5
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro STORE_ALIGNED_DATA_WITH_STRIDE arg0, arg1, arg2, arg3, arg4, arg5
|
||||
// { // input: \arg0~\arg3, dst*, dst_stride
|
||||
vst1.64 {\arg0}, [\arg4,:128], \arg5
|
||||
vst1.64 {\arg1}, [\arg4,:128], \arg5
|
||||
vst1.64 {\arg2}, [\arg4,:128], \arg5
|
||||
vst1.64 {\arg3}, [\arg4,:128], \arg5
|
||||
vst1.64 {\arg0}, [\arg4,:128], \arg5
|
||||
vst1.64 {\arg1}, [\arg4,:128], \arg5
|
||||
vst1.64 {\arg2}, [\arg4,:128], \arg5
|
||||
vst1.64 {\arg3}, [\arg4,:128], \arg5
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro LORD_UNALIGNED_DATA_WITH_STRIDE arg0, arg1, arg2, arg3, arg4, arg5
|
||||
// { // input: \arg0~\arg3, src*, src_stride
|
||||
vld1.64 {\arg0}, [\arg4], \arg5
|
||||
vld1.64 {\arg1}, [\arg4], \arg5
|
||||
vld1.64 {\arg2}, [\arg4], \arg5
|
||||
vld1.64 {\arg3}, [\arg4], \arg5
|
||||
vld1.64 {\arg0}, [\arg4], \arg5
|
||||
vld1.64 {\arg1}, [\arg4], \arg5
|
||||
vld1.64 {\arg2}, [\arg4], \arg5
|
||||
vld1.64 {\arg3}, [\arg4], \arg5
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro STORE_UNALIGNED_DATA_WITH_STRIDE arg0, arg1, arg2, arg3, arg4, arg5
|
||||
// { // input: \arg0~\arg3, dst*, dst_stride
|
||||
vst1.64 {\arg0}, [\arg4], \arg5
|
||||
vst1.64 {\arg1}, [\arg4], \arg5
|
||||
vst1.64 {\arg2}, [\arg4], \arg5
|
||||
vst1.64 {\arg3}, [\arg4], \arg5
|
||||
vst1.64 {\arg0}, [\arg4], \arg5
|
||||
vst1.64 {\arg1}, [\arg4], \arg5
|
||||
vst1.64 {\arg2}, [\arg4], \arg5
|
||||
vst1.64 {\arg3}, [\arg4], \arg5
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro LOAD_4x4_DATA_FOR_DCT arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7
|
||||
// { // input: \arg0~\arg3, src1*, src1_stride, src2*, src2_stride
|
||||
vld2.16 {\arg0[0],\arg1[0]}, [\arg4], \arg5
|
||||
vld2.16 {\arg2[0],\arg3[0]}, [\arg6], \arg7
|
||||
vld2.16 {\arg0[1],\arg1[1]}, [\arg4], \arg5
|
||||
vld2.16 {\arg2[1],\arg3[1]}, [\arg6], \arg7
|
||||
vld2.16 {\arg0[0],\arg1[0]}, [\arg4], \arg5
|
||||
vld2.16 {\arg2[0],\arg3[0]}, [\arg6], \arg7
|
||||
vld2.16 {\arg0[1],\arg1[1]}, [\arg4], \arg5
|
||||
vld2.16 {\arg2[1],\arg3[1]}, [\arg6], \arg7
|
||||
|
||||
vld2.16 {\arg0[2],\arg1[2]}, [\arg4], \arg5
|
||||
vld2.16 {\arg2[2],\arg3[2]}, [\arg6], \arg7
|
||||
vld2.16 {\arg0[3],\arg1[3]}, [\arg4], \arg5
|
||||
vld2.16 {\arg2[3],\arg3[3]}, [\arg6], \arg7
|
||||
vld2.16 {\arg0[2],\arg1[2]}, [\arg4], \arg5
|
||||
vld2.16 {\arg2[2],\arg3[2]}, [\arg6], \arg7
|
||||
vld2.16 {\arg0[3],\arg1[3]}, [\arg4], \arg5
|
||||
vld2.16 {\arg2[3],\arg3[3]}, [\arg6], \arg7
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro LOAD_8x8_DATA_FOR_DCT arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9
|
||||
// { // input: \arg0~\arg3, src1*, src2*; untouched r2:src1_stride &r4:src2_stride
|
||||
vld1.64 {\arg0}, [\arg8], r2
|
||||
vld1.64 {\arg4}, [\arg9], r4
|
||||
vld1.64 {\arg1}, [\arg8], r2
|
||||
vld1.64 {\arg5}, [\arg9], r4
|
||||
vld1.64 {\arg0}, [\arg8], r2
|
||||
vld1.64 {\arg4}, [\arg9], r4
|
||||
vld1.64 {\arg1}, [\arg8], r2
|
||||
vld1.64 {\arg5}, [\arg9], r4
|
||||
|
||||
vld1.64 {\arg2}, [\arg8], r2
|
||||
vld1.64 {\arg6}, [\arg9], r4
|
||||
vld1.64 {\arg3}, [\arg8], r2
|
||||
vld1.64 {\arg7}, [\arg9], r4
|
||||
vld1.64 {\arg2}, [\arg8], r2
|
||||
vld1.64 {\arg6}, [\arg9], r4
|
||||
vld1.64 {\arg3}, [\arg8], r2
|
||||
vld1.64 {\arg7}, [\arg9], r4
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro DCT_ROW_TRANSFORM_TOTAL_16BITS arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7
|
||||
// { // input: src_d[0]~[3], working: [4]~[7]
|
||||
vadd.s16 \arg4, \arg0, \arg3 //int16 s[0] = data[i] + data[i3];
|
||||
vsub.s16 \arg7, \arg0, \arg3 //int16 s[3] = data[i] - data[i3];
|
||||
vadd.s16 \arg5, \arg1, \arg2 //int16 s[1] = data[i1] + data[i2];
|
||||
vsub.s16 \arg6, \arg1, \arg2 //int16 s[2] = data[i1] - data[i2];
|
||||
vadd.s16 \arg4, \arg0, \arg3 //int16 s[0] = data[i] + data[i3];
|
||||
vsub.s16 \arg7, \arg0, \arg3 //int16 s[3] = data[i] - data[i3];
|
||||
vadd.s16 \arg5, \arg1, \arg2 //int16 s[1] = data[i1] + data[i2];
|
||||
vsub.s16 \arg6, \arg1, \arg2 //int16 s[2] = data[i1] - data[i2];
|
||||
|
||||
vadd.s16 \arg0, \arg4, \arg5 //int16 dct[i ] = s[0] + s[1];
|
||||
vsub.s16 \arg2, \arg4, \arg5 //int16 dct[i2] = s[0] - s[1];
|
||||
vshl.s16 \arg1, \arg7, #1
|
||||
vshl.s16 \arg3, \arg6, #1
|
||||
vadd.s16 \arg1, \arg1, \arg6 //int16 dct[i1] = (s[3] << 1) + s[2];
|
||||
vsub.s16 \arg3, \arg7, \arg3 //int16 dct[i3] = s[3] - (s[2] << 1);
|
||||
vadd.s16 \arg0, \arg4, \arg5 //int16 dct[i ] = s[0] + s[1];
|
||||
vsub.s16 \arg2, \arg4, \arg5 //int16 dct[i2] = s[0] - s[1];
|
||||
vshl.s16 \arg1, \arg7, #1
|
||||
vshl.s16 \arg3, \arg6, #1
|
||||
vadd.s16 \arg1, \arg1, \arg6 //int16 dct[i1] = (s[3] << 1) + s[2];
|
||||
vsub.s16 \arg3, \arg7, \arg3 //int16 dct[i3] = s[3] - (s[2] << 1);
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro MATRIX_TRANSFORM_EACH_16BITS arg0, arg1, arg2, arg3
|
||||
// { // input & output: src_d[0]~[3];[0 1 2 3]+[4 5 6 7]+[8 9 10 11]+[12 13 14 15]
|
||||
vtrn.s16 \arg0, \arg1 //[0 1 2 3]+[4 5 6 7]-->[0 4 2 6]+[1 5 3 7]
|
||||
vtrn.s16 \arg2, \arg3 //[8 9 10 11]+[12 13 14 15]-->[8 12 10 14]+[9 13 11 15]
|
||||
vtrn.32 \arg0, \arg2 //[0 4 2 6]+[8 12 10 14]-->[0 4 8 12]+[2 6 10 14]
|
||||
vtrn.32 \arg1, \arg3 //[1 5 3 7]+[9 13 11 15]-->[1 5 9 13]+[3 7 11 15]
|
||||
vtrn.s16 \arg0, \arg1 //[0 1 2 3]+[4 5 6 7]-->[0 4 2 6]+[1 5 3 7]
|
||||
vtrn.s16 \arg2, \arg3 //[8 9 10 11]+[12 13 14 15]-->[8 12 10 14]+[9 13 11 15]
|
||||
vtrn.32 \arg0, \arg2 //[0 4 2 6]+[8 12 10 14]-->[0 4 8 12]+[2 6 10 14]
|
||||
vtrn.32 \arg1, \arg3 //[1 5 3 7]+[9 13 11 15]-->[1 5 9 13]+[3 7 11 15]
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro NEWQUANT_COEF_EACH_16BITS arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8
|
||||
// { // input: coef, ff (dst), ff_d0, ff_d1, mf_d0, md_d1
|
||||
veor.s16 \arg6, \arg6 // init 0 , and keep 0;
|
||||
vaba.s16 \arg1, \arg0, \arg6 // f + abs(coef - 0)
|
||||
vmull.s16 \arg7, \arg2, \arg4
|
||||
vmull.s16 \arg8, \arg3, \arg5
|
||||
vshr.s32 \arg7, #16
|
||||
vshr.s32 \arg8, #16
|
||||
vmovn.s32 \arg2, \arg7
|
||||
vmovn.s32 \arg3, \arg8
|
||||
veor.s16 \arg6, \arg6 // init 0 , and keep 0;
|
||||
vaba.s16 \arg1, \arg0, \arg6 // f + abs(coef - 0)
|
||||
vmull.s16 \arg7, \arg2, \arg4
|
||||
vmull.s16 \arg8, \arg3, \arg5
|
||||
vshr.s32 \arg7, #16
|
||||
vshr.s32 \arg8, #16
|
||||
vmovn.s32 \arg2, \arg7
|
||||
vmovn.s32 \arg3, \arg8
|
||||
|
||||
vcgt.s16 \arg7, \arg0, #0 // if true, location of coef == 11111111
|
||||
vbif.s16 \arg6, \arg1, \arg7 // if (x<0) reserved part; else keep 0 untouched
|
||||
vshl.s16 \arg6, #1
|
||||
vsub.s16 \arg1, \arg1, \arg6 // if x > 0, -= 0; else x-= 2x
|
||||
vcgt.s16 \arg7, \arg0, #0 // if true, location of coef == 11111111
|
||||
vbif.s16 \arg6, \arg1, \arg7 // if (x<0) reserved part; else keep 0 untouched
|
||||
vshl.s16 \arg6, #1
|
||||
vsub.s16 \arg1, \arg1, \arg6 // if x > 0, -= 0; else x-= 2x
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro NEWQUANT_COEF_EACH_16BITS_MAX arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9
|
||||
// { // input: coef, ff (dst), ff_d0, ff_d1, mf_d0(max), md_d1
|
||||
veor.s16 \arg6, \arg6 // init 0 , and keep 0;
|
||||
vaba.s16 \arg1, \arg0, \arg6 // f + abs(coef - 0)
|
||||
vmull.s16 \arg7, \arg2, \arg4
|
||||
vmull.s16 \arg8, \arg3, \arg5
|
||||
vshr.s32 \arg7, #16
|
||||
vshr.s32 \arg8, #16
|
||||
vmovn.s32 \arg2, \arg7
|
||||
vmovn.s32 \arg3, \arg8
|
||||
veor.s16 \arg6, \arg6 // init 0 , and keep 0;
|
||||
vaba.s16 \arg1, \arg0, \arg6 // f + abs(coef - 0)
|
||||
vmull.s16 \arg7, \arg2, \arg4
|
||||
vmull.s16 \arg8, \arg3, \arg5
|
||||
vshr.s32 \arg7, #16
|
||||
vshr.s32 \arg8, #16
|
||||
vmovn.s32 \arg2, \arg7
|
||||
vmovn.s32 \arg3, \arg8
|
||||
|
||||
vcgt.s16 \arg7, \arg0, #0 // if true, location of coef == 11111111
|
||||
vbif.s16 \arg6, \arg1, \arg7 // if (x<0) reserved part; else keep 0 untouched
|
||||
vshl.s16 \arg6, #1
|
||||
vmax.s16 \arg9, \arg2, \arg3
|
||||
vsub.s16 \arg1, \arg1, \arg6 // if x > 0, -= 0; else x-= 2x
|
||||
vcgt.s16 \arg7, \arg0, #0 // if true, location of coef == 11111111
|
||||
vbif.s16 \arg6, \arg1, \arg7 // if (x<0) reserved part; else keep 0 untouched
|
||||
vshl.s16 \arg6, #1
|
||||
vmax.s16 \arg9, \arg2, \arg3
|
||||
vsub.s16 \arg1, \arg1, \arg6 // if x > 0, -= 0; else x-= 2x
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro QUANT_DUALWORD_COEF_EACH_16BITS arg0, arg1, arg2, arg3, arg4
|
||||
// { // input: coef, ff (dst), mf , working_d (all 0), working_q
|
||||
vaba.s16 \arg1, \arg0, \arg3 // f + abs(coef - 0)
|
||||
vmull.s16 \arg4, \arg1, \arg2 // *= mf
|
||||
vshr.s32 \arg4, #16
|
||||
vmovn.s32 \arg1, \arg4 // >> 16
|
||||
vaba.s16 \arg1, \arg0, \arg3 // f + abs(coef - 0)
|
||||
vmull.s16 \arg4, \arg1, \arg2 // *= mf
|
||||
vshr.s32 \arg4, #16
|
||||
vmovn.s32 \arg1, \arg4 // >> 16
|
||||
|
||||
vcgt.s16 \arg2, \arg0, #0 // if true, location of coef == 11111111
|
||||
vbif.s16 \arg3, \arg1, \arg2 // if (x<0) reserved part; else keep 0 untouched
|
||||
vshl.s16 \arg3, #1
|
||||
vsub.s16 \arg1, \arg1, \arg3 // if x > 0, -= 0; else x-= 2x
|
||||
vcgt.s16 \arg2, \arg0, #0 // if true, location of coef == 11111111
|
||||
vbif.s16 \arg3, \arg1, \arg2 // if (x<0) reserved part; else keep 0 untouched
|
||||
vshl.s16 \arg3, #1
|
||||
vsub.s16 \arg1, \arg1, \arg3 // if x > 0, -= 0; else x-= 2x
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro DC_ZERO_COUNT_IN_DUALWORD arg0, arg1, arg2
|
||||
// { // input: coef, dst_d, working_d (all 0x01)
|
||||
vceq.s16 \arg1, \arg0, #0
|
||||
vand.s16 \arg1, \arg2
|
||||
vpadd.s16 \arg1, \arg1, \arg1
|
||||
vpadd.s16 \arg1, \arg1, \arg1
|
||||
vceq.s16 \arg1, \arg0, #0
|
||||
vand.s16 \arg1, \arg2
|
||||
vpadd.s16 \arg1, \arg1, \arg1
|
||||
vpadd.s16 \arg1, \arg1, \arg1
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro SELECT_MAX_IN_ABS_COEF arg0, arg1, arg2, arg3, arg4
|
||||
// { // input: coef_0, coef_1, max_q (identy to follow two), output: max_d0, max_d1
|
||||
vmax.s16 \arg2, \arg0, \arg1 // max 1st in \arg3 & max 2nd in \arg4
|
||||
vpmax.s16 \arg3, \arg3, \arg4 // max 1st in \arg3[0][1] & max 2nd in \arg3[2][3]
|
||||
vpmax.s16 \arg3, \arg3, \arg4 // max 1st in \arg3[0][1]
|
||||
vmax.s16 \arg2, \arg0, \arg1 // max 1st in \arg3 & max 2nd in \arg4
|
||||
vpmax.s16 \arg3, \arg3, \arg4 // max 1st in \arg3[0][1] & max 2nd in \arg3[2][3]
|
||||
vpmax.s16 \arg3, \arg3, \arg4 // max 1st in \arg3[0][1]
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro ZERO_COUNT_IN_2_QUARWORD arg0, arg1, arg2, arg3, arg4, arg5, arg6
|
||||
// { // input: coef_0 (identy to \arg3 \arg4), coef_1(identy to \arg5 \arg6), mask_q
|
||||
vceq.s16 \arg0, #0
|
||||
vceq.s16 \arg1, #0
|
||||
vand.s16 \arg0, \arg2
|
||||
vand.s16 \arg1, \arg2
|
||||
vceq.s16 \arg0, #0
|
||||
vceq.s16 \arg1, #0
|
||||
vand.s16 \arg0, \arg2
|
||||
vand.s16 \arg1, \arg2
|
||||
|
||||
vpadd.s16 \arg3, \arg3, \arg5
|
||||
vpadd.s16 \arg4, \arg4, \arg6
|
||||
vpadd.s16 \arg3, \arg3, \arg4 // 8-->4
|
||||
vpadd.s16 \arg3, \arg3, \arg3
|
||||
vpadd.s16 \arg3, \arg3, \arg3
|
||||
vpadd.s16 \arg3, \arg3, \arg5
|
||||
vpadd.s16 \arg4, \arg4, \arg6
|
||||
vpadd.s16 \arg3, \arg3, \arg4 // 8-->4
|
||||
vpadd.s16 \arg3, \arg3, \arg3
|
||||
vpadd.s16 \arg3, \arg3, \arg3
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro HDM_QUANT_2x2_TOTAL_16BITS arg0, arg1, arg2
|
||||
// { // input: src_d[0]~[3], working_d, dst_d
|
||||
vshr.s64 \arg1, \arg0, #32
|
||||
vadd.s16 \arg2, \arg0, \arg1 // [0] = rs[0] + rs[32];[1] = rs[16] + rs[48];
|
||||
vsub.s16 \arg1, \arg0, \arg1 // [0] = rs[0] - rs[32];[1] = rs[16] - rs[48];
|
||||
vtrn.s16 \arg2, \arg1
|
||||
vtrn.s32 \arg2, \arg1
|
||||
vshr.s64 \arg1, \arg0, #32
|
||||
vadd.s16 \arg2, \arg0, \arg1 // [0] = rs[0] + rs[32];[1] = rs[16] + rs[48];
|
||||
vsub.s16 \arg1, \arg0, \arg1 // [0] = rs[0] - rs[32];[1] = rs[16] - rs[48];
|
||||
vtrn.s16 \arg2, \arg1
|
||||
vtrn.s32 \arg2, \arg1
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro IHDM_4x4_TOTAL_16BITS arg0, arg1, arg2
|
||||
// { // input: each src_d[0]~[3](dst), working_q0, working_q1, working_q2
|
||||
vshr.s64 \arg1, \arg0, #32
|
||||
vadd.s16 \arg2, \arg0, \arg1 // [0] = rs[0] + rs[2];[1] = rs[1] + rs[3];
|
||||
vsub.s16 \arg1, \arg0, \arg1 // [0] = rs[0] - rs[2];[1] = rs[1] - rs[3];
|
||||
vtrn.s16 \arg2, \arg1
|
||||
vrev32.16 \arg1, \arg1
|
||||
vtrn.s32 \arg2, \arg1 // [0] = rs[0] + rs[2];[1] = rs[0] - rs[2];[2] = rs[1] - rs[3];[3] = rs[1] + rs[3];
|
||||
vshr.s64 \arg1, \arg0, #32
|
||||
vadd.s16 \arg2, \arg0, \arg1 // [0] = rs[0] + rs[2];[1] = rs[1] + rs[3];
|
||||
vsub.s16 \arg1, \arg0, \arg1 // [0] = rs[0] - rs[2];[1] = rs[1] - rs[3];
|
||||
vtrn.s16 \arg2, \arg1
|
||||
vrev32.16 \arg1, \arg1
|
||||
vtrn.s32 \arg2, \arg1 // [0] = rs[0] + rs[2];[1] = rs[0] - rs[2];[2] = rs[1] - rs[3];[3] = rs[1] + rs[3];
|
||||
|
||||
vrev64.16 \arg1, \arg2
|
||||
vadd.s16 \arg0, \arg2, \arg1 // [0] = rs[0] + rs[3];[1] = rs[1] + rs[2];
|
||||
vsub.s16 \arg1, \arg2, \arg1
|
||||
vrev32.16 \arg1, \arg1 // [0] = rs[1] - rs[2];[1] = rs[0] - rs[3];
|
||||
vtrn.s32 \arg0, \arg1 // [0] = rs[0] + rs[3];[1] = rs[1] + rs[2];[2] = rs[1] - rs[2];[3] = rs[0] - rs[3];
|
||||
vrev64.16 \arg1, \arg2
|
||||
vadd.s16 \arg0, \arg2, \arg1 // [0] = rs[0] + rs[3];[1] = rs[1] + rs[2];
|
||||
vsub.s16 \arg1, \arg2, \arg1
|
||||
vrev32.16 \arg1, \arg1 // [0] = rs[1] - rs[2];[1] = rs[0] - rs[3];
|
||||
vtrn.s32 \arg0, \arg1 // [0] = rs[0] + rs[3];[1] = rs[1] + rs[2];[2] = rs[1] - rs[2];[3] = rs[0] - rs[3];
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro MB_PRED_8BITS_ADD_DCT_16BITS_CLIP arg0, arg1, arg2, arg3, arg4, arg5
|
||||
// { // input: pred_d[0]/[1](output), dct_q0/1, working_q0/1;
|
||||
vmovl.u8 \arg4,\arg0
|
||||
vmovl.u8 \arg5,\arg1
|
||||
vadd.s16 \arg4,\arg2
|
||||
vadd.s16 \arg5,\arg3
|
||||
vqmovun.s16 \arg0,\arg4
|
||||
vqmovun.s16 \arg1,\arg5
|
||||
vmovl.u8 \arg4,\arg0
|
||||
vmovl.u8 \arg5,\arg1
|
||||
vadd.s16 \arg4,\arg2
|
||||
vadd.s16 \arg5,\arg3
|
||||
vqmovun.s16 \arg0,\arg4
|
||||
vqmovun.s16 \arg1,\arg5
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro ROW_TRANSFORM_1_STEP_TOTAL_16BITS arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7
|
||||
// { // input: src_d[0]~[3], output: e_d[0]~[3];
|
||||
vadd.s16 \arg4, \arg0, \arg2 //int16 e[i][0] = src[0] + src[2];
|
||||
vsub.s16 \arg5, \arg0, \arg2 //int16 e[i][1] = src[0] - src[2];
|
||||
vshr.s16 \arg6, \arg1, #1
|
||||
vshr.s16 \arg7, \arg3, #1
|
||||
vsub.s16 \arg6, \arg6, \arg3 //int16 e[i][2] = (src[1]>>1)-src[3];
|
||||
vadd.s16 \arg7, \arg1, \arg7 //int16 e[i][3] = src[1] + (src[3]>>1);
|
||||
vadd.s16 \arg4, \arg0, \arg2 //int16 e[i][0] = src[0] + src[2];
|
||||
vsub.s16 \arg5, \arg0, \arg2 //int16 e[i][1] = src[0] - src[2];
|
||||
vshr.s16 \arg6, \arg1, #1
|
||||
vshr.s16 \arg7, \arg3, #1
|
||||
vsub.s16 \arg6, \arg6, \arg3 //int16 e[i][2] = (src[1]>>1)-src[3];
|
||||
vadd.s16 \arg7, \arg1, \arg7 //int16 e[i][3] = src[1] + (src[3]>>1);
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro TRANSFORM_TOTAL_16BITS arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 // both row & col transform used
|
||||
// { // output: f_q[0]~[3], input: e_q[0]~[3];
|
||||
vadd.s16 \arg0, \arg4, \arg7 //int16 f[i][0] = e[i][0] + e[i][3];
|
||||
vadd.s16 \arg1, \arg5, \arg6 //int16 f[i][1] = e[i][1] + e[i][2];
|
||||
vsub.s16 \arg2, \arg5, \arg6 //int16 f[i][2] = e[i][1] - e[i][2];
|
||||
vsub.s16 \arg3, \arg4, \arg7 //int16 f[i][3] = e[i][0] - e[i][3];
|
||||
vadd.s16 \arg0, \arg4, \arg7 //int16 f[i][0] = e[i][0] + e[i][3];
|
||||
vadd.s16 \arg1, \arg5, \arg6 //int16 f[i][1] = e[i][1] + e[i][2];
|
||||
vsub.s16 \arg2, \arg5, \arg6 //int16 f[i][2] = e[i][1] - e[i][2];
|
||||
vsub.s16 \arg3, \arg4, \arg7 //int16 f[i][3] = e[i][0] - e[i][3];
|
||||
// }
|
||||
.endm
|
||||
|
||||
|
||||
.macro ROW_TRANSFORM_0_STEP arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7
|
||||
// { // input: src_d[0]~[3], output: e_q[0]~[3];
|
||||
vaddl.s16 \arg4, \arg0, \arg2 //int32 e[i][0] = src[0] + src[2];
|
||||
vsubl.s16 \arg5, \arg0, \arg2 //int32 e[i][1] = src[0] - src[2];
|
||||
vsubl.s16 \arg6, \arg1, \arg3 //int32 e[i][2] = src[1] - src[3];
|
||||
vaddl.s16 \arg7, \arg1, \arg3 //int32 e[i][3] = src[1] + src[3];
|
||||
vaddl.s16 \arg4, \arg0, \arg2 //int32 e[i][0] = src[0] + src[2];
|
||||
vsubl.s16 \arg5, \arg0, \arg2 //int32 e[i][1] = src[0] - src[2];
|
||||
vsubl.s16 \arg6, \arg1, \arg3 //int32 e[i][2] = src[1] - src[3];
|
||||
vaddl.s16 \arg7, \arg1, \arg3 //int32 e[i][3] = src[1] + src[3];
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro ROW_TRANSFORM_1_STEP arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9
|
||||
// { // input: src_d[0]~[3], output: e_q[0]~[3]; working: \arg8 \arg9
|
||||
vaddl.s16 \arg4, \arg0, \arg2 //int32 e[i][0] = src[0] + src[2];
|
||||
vsubl.s16 \arg5, \arg0, \arg2 //int32 e[i][1] = src[0] - src[2];
|
||||
vshr.s16 \arg8, \arg1, #1
|
||||
vshr.s16 \arg9, \arg3, #1
|
||||
vsubl.s16 \arg6, \arg8, \arg3 //int32 e[i][2] = (src[1]>>1)-src[3];
|
||||
vaddl.s16 \arg7, \arg1, \arg9 //int32 e[i][3] = src[1] + (src[3]>>1);
|
||||
vaddl.s16 \arg4, \arg0, \arg2 //int32 e[i][0] = src[0] + src[2];
|
||||
vsubl.s16 \arg5, \arg0, \arg2 //int32 e[i][1] = src[0] - src[2];
|
||||
vshr.s16 \arg8, \arg1, #1
|
||||
vshr.s16 \arg9, \arg3, #1
|
||||
vsubl.s16 \arg6, \arg8, \arg3 //int32 e[i][2] = (src[1]>>1)-src[3];
|
||||
vaddl.s16 \arg7, \arg1, \arg9 //int32 e[i][3] = src[1] + (src[3]>>1);
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro TRANSFORM_4BYTES arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 // both row & col transform used
|
||||
// { // output: f_q[0]~[3], input: e_q[0]~[3];
|
||||
vadd.s32 \arg0, \arg4, \arg7 //int16 f[i][0] = e[i][0] + e[i][3];
|
||||
vadd.s32 \arg1, \arg5, \arg6 //int16 f[i][1] = e[i][1] + e[i][2];
|
||||
vsub.s32 \arg2, \arg5, \arg6 //int16 f[i][2] = e[i][1] - e[i][2];
|
||||
vsub.s32 \arg3, \arg4, \arg7 //int16 f[i][3] = e[i][0] - e[i][3];
|
||||
vadd.s32 \arg0, \arg4, \arg7 //int16 f[i][0] = e[i][0] + e[i][3];
|
||||
vadd.s32 \arg1, \arg5, \arg6 //int16 f[i][1] = e[i][1] + e[i][2];
|
||||
vsub.s32 \arg2, \arg5, \arg6 //int16 f[i][2] = e[i][1] - e[i][2];
|
||||
vsub.s32 \arg3, \arg4, \arg7 //int16 f[i][3] = e[i][0] - e[i][3];
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro COL_TRANSFORM_0_STEP arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7
|
||||
// { // input: src_q[0]~[3], output: e_q[0]~[3];
|
||||
vadd.s32 \arg4, \arg0, \arg2 //int32 e[0][j] = f[0][j] + f[2][j];
|
||||
vsub.s32 \arg5, \arg0, \arg2 //int32 e[1][j] = f[0][j] - f[2][j];
|
||||
vsub.s32 \arg6, \arg1, \arg3 //int32 e[2][j] = (f[1][j]>>1) - f[3][j];
|
||||
vadd.s32 \arg7, \arg1, \arg3 //int32 e[3][j] = f[1][j] + (f[3][j]>>1);
|
||||
vadd.s32 \arg4, \arg0, \arg2 //int32 e[0][j] = f[0][j] + f[2][j];
|
||||
vsub.s32 \arg5, \arg0, \arg2 //int32 e[1][j] = f[0][j] - f[2][j];
|
||||
vsub.s32 \arg6, \arg1, \arg3 //int32 e[2][j] = (f[1][j]>>1) - f[3][j];
|
||||
vadd.s32 \arg7, \arg1, \arg3 //int32 e[3][j] = f[1][j] + (f[3][j]>>1);
|
||||
// }
|
||||
.endm
|
||||
|
||||
.macro COL_TRANSFORM_1_STEP arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7
|
||||
// { // input: src_q[0]~[3], output: e_q[0]~[3];
|
||||
vadd.s32 \arg4, \arg0, \arg2 //int32 e[0][j] = f[0][j] + f[2][j];
|
||||
vsub.s32 \arg5, \arg0, \arg2 //int32 e[1][j] = f[0][j] - f[2][j];
|
||||
vshr.s32 \arg6, \arg1, #1
|
||||
vshr.s32 \arg7, \arg3, #1
|
||||
vsub.s32 \arg6, \arg6, \arg3 //int32 e[2][j] = (f[1][j]>>1) - f[3][j];
|
||||
vadd.s32 \arg7, \arg1, \arg7 //int32 e[3][j] = f[1][j] + (f[3][j]>>1);
|
||||
vadd.s32 \arg4, \arg0, \arg2 //int32 e[0][j] = f[0][j] + f[2][j];
|
||||
vsub.s32 \arg5, \arg0, \arg2 //int32 e[1][j] = f[0][j] - f[2][j];
|
||||
vshr.s32 \arg6, \arg1, #1
|
||||
vshr.s32 \arg7, \arg3, #1
|
||||
vsub.s32 \arg6, \arg6, \arg3 //int32 e[2][j] = (f[1][j]>>1) - f[3][j];
|
||||
vadd.s32 \arg7, \arg1, \arg7 //int32 e[3][j] = f[1][j] + (f[3][j]>>1);
|
||||
// }
|
||||
.endm
|
||||
#endif
|
||||
|
@ -110,6 +110,33 @@ int32_t WelsIntraChroma8x8Combined3Satd_sse41 (uint8_t*, int32_t, uint8_t*, int3
|
||||
|
||||
#endif//X86_ASM
|
||||
|
||||
#if defined (HAVE_NEON)
|
||||
|
||||
int32_t WelsSampleSad4x4_neon (uint8_t*, int32_t, uint8_t*, int32_t);
|
||||
int32_t WelsSampleSad16x16_neon (uint8_t*, int32_t, uint8_t*, int32_t);
|
||||
int32_t WelsSampleSad16x8_neon (uint8_t*, int32_t, uint8_t*, int32_t);
|
||||
int32_t WelsSampleSad8x16_neon (uint8_t*, int32_t, uint8_t*, int32_t);
|
||||
int32_t WelsSampleSad8x8_neon (uint8_t*, int32_t, uint8_t*, int32_t);
|
||||
|
||||
void WelsSampleSadFour16x16_neon (uint8_t*, int32_t, uint8_t*, int32_t, int32_t*);
|
||||
void WelsSampleSadFour16x8_neon (uint8_t*, int32_t, uint8_t*, int32_t, int32_t*);
|
||||
void WelsSampleSadFour8x16_neon (uint8_t*, int32_t, uint8_t*, int32_t, int32_t*);
|
||||
void WelsSampleSadFour8x8_neon (uint8_t*, int32_t, uint8_t*, int32_t, int32_t*);
|
||||
void WelsSampleSadFour4x4_neon (uint8_t*, int32_t, uint8_t*, int32_t, int32_t*);
|
||||
|
||||
int32_t WelsSampleSatd8x8_neon (uint8_t*, int32_t, uint8_t*, int32_t);
|
||||
int32_t WelsSampleSatd16x8_neon (uint8_t*, int32_t, uint8_t*, int32_t);
|
||||
int32_t WelsSampleSatd8x16_neon (uint8_t*, int32_t, uint8_t*, int32_t);
|
||||
int32_t WelsSampleSatd16x16_neon (uint8_t*, int32_t, uint8_t*, int32_t);
|
||||
int32_t WelsSampleSatd4x4_neon (uint8_t*, int32_t, uint8_t*, int32_t);
|
||||
|
||||
int32_t WelsIntra16x16Combined3Satd_neon (uint8_t*, int32_t, uint8_t*, int32_t, int32_t*, int32_t, uint8_t*);
|
||||
int32_t WelsIntra16x16Combined3Sad_neon (uint8_t*, int32_t, uint8_t*, int32_t, int32_t*, int32_t, uint8_t*);
|
||||
int32_t WelsIntra8x8Combined3Satd_neon (uint8_t*, int32_t, uint8_t*, int32_t, int32_t*, int32_t, uint8_t*, uint8_t*, uint8_t*);
|
||||
int32_t WelsIntra8x8Combined3Sad_neon (uint8_t*, int32_t, uint8_t*, int32_t, int32_t*, int32_t, uint8_t*, uint8_t*, uint8_t*);
|
||||
int32_t WelsIntra4x4Combined3Satd_neon (uint8_t*, int32_t, uint8_t*, int32_t, uint8_t*, int32_t*, int32_t, int32_t, int32_t);
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
|
@ -482,6 +482,33 @@ void WelsInitSampleSadFunc (SWelsFuncPtrList* pFuncList, uint32_t uiCpuFlag) {
|
||||
|
||||
#endif //(X86_ASM)
|
||||
|
||||
#if defined (HAVE_NEON)
|
||||
if (uiCpuFlag & WELS_CPU_NEON) {
|
||||
pFuncList->sSampleDealingFuncs.pfSampleSad[BLOCK_4x4 ] = WelsSampleSad4x4_neon;
|
||||
pFuncList->sSampleDealingFuncs.pfSampleSad[BLOCK_16x16] = WelsSampleSad16x16_neon;
|
||||
pFuncList->sSampleDealingFuncs.pfSampleSad[BLOCK_16x8 ] = WelsSampleSad16x8_neon;
|
||||
pFuncList->sSampleDealingFuncs.pfSampleSad[BLOCK_8x16] = WelsSampleSad8x16_neon;
|
||||
pFuncList->sSampleDealingFuncs.pfSampleSad[BLOCK_8x8] = WelsSampleSad8x8_neon;
|
||||
|
||||
pFuncList->sSampleDealingFuncs.pfSample4Sad[BLOCK_16x16] = WelsSampleSadFour16x16_neon;
|
||||
pFuncList->sSampleDealingFuncs.pfSample4Sad[BLOCK_16x8] = WelsSampleSadFour16x8_neon;
|
||||
pFuncList->sSampleDealingFuncs.pfSample4Sad[BLOCK_8x16] = WelsSampleSadFour8x16_neon;
|
||||
pFuncList->sSampleDealingFuncs.pfSample4Sad[BLOCK_8x8] = WelsSampleSadFour8x8_neon;
|
||||
pFuncList->sSampleDealingFuncs.pfSample4Sad[BLOCK_4x4] = WelsSampleSadFour4x4_neon;
|
||||
|
||||
pFuncList->sSampleDealingFuncs.pfSampleSatd[BLOCK_4x4 ] = WelsSampleSatd4x4_neon;
|
||||
pFuncList->sSampleDealingFuncs.pfSampleSatd[BLOCK_8x8 ] = WelsSampleSatd8x8_neon;
|
||||
pFuncList->sSampleDealingFuncs.pfSampleSatd[BLOCK_8x16 ] = WelsSampleSatd8x16_neon;
|
||||
pFuncList->sSampleDealingFuncs.pfSampleSatd[BLOCK_16x8 ] = WelsSampleSatd16x8_neon;
|
||||
pFuncList->sSampleDealingFuncs.pfSampleSatd[BLOCK_16x16] = WelsSampleSatd16x16_neon;
|
||||
|
||||
pFuncList->sSampleDealingFuncs.pfIntra4x4Combined3Satd = WelsIntra4x4Combined3Satd_neon;
|
||||
pFuncList->sSampleDealingFuncs.pfIntra8x8Combined3Satd = WelsIntra8x8Combined3Satd_neon;
|
||||
pFuncList->sSampleDealingFuncs.pfIntra8x8Combined3Sad = WelsIntra8x8Combined3Sad_neon;
|
||||
pFuncList->sSampleDealingFuncs.pfIntra16x16Combined3Satd = WelsIntra16x16Combined3Satd_neon;
|
||||
pFuncList->sSampleDealingFuncs.pfIntra16x16Combined3Sad = WelsIntra16x16Combined3Sad_neon;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace WelsSVCEnc
|
||||
|
@ -231,6 +231,11 @@ void CAdaptiveQuantization::WelsInitVarFunc (PVarFunc& pfVar, int32_t iCpuFlag)
|
||||
pfVar = SampleVariance16x16_sse2;
|
||||
}
|
||||
#endif
|
||||
#ifdef HAVE_NEON
|
||||
if (iCpuFlag & WELS_CPU_NEON) {
|
||||
pfVar = SampleVariance16x16_neon;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void SampleVariance16x16_c (uint8_t* pRefY, int32_t iRefStride, uint8_t* pSrcY, int32_t iSrcStride,
|
||||
|
@ -62,6 +62,11 @@ VarFunc SampleVariance16x16_sse2;
|
||||
WELSVP_EXTERN_C_END
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_NEON
|
||||
WELSVP_EXTERN_C_BEGIN
|
||||
VarFunc SampleVariance16x16_neon;
|
||||
WELSVP_EXTERN_C_END
|
||||
#endif
|
||||
|
||||
class CAdaptiveQuantization : public IStrategy {
|
||||
public:
|
||||
|
2
codec/processing/src/arm/adaptive_quantization.S
Executable file → Normal file
2
codec/processing/src/arm/adaptive_quantization.S
Executable file → Normal file
@ -51,7 +51,7 @@
|
||||
#endif
|
||||
|
||||
|
||||
WELS_ASM_FUNC_BEGIN pixel_var_16x16_neon
|
||||
WELS_ASM_FUNC_BEGIN SampleVariance16x16_neon
|
||||
stmdb sp!, {r4}
|
||||
|
||||
vld1.8 {q15}, [r0], r1 //save the ref data (16bytes)
|
||||
|
6
codec/processing/src/arm/down_sample_neon.S
Executable file → Normal file
6
codec/processing/src/arm/down_sample_neon.S
Executable file → Normal file
@ -35,7 +35,7 @@
|
||||
#include "arm_arch_common_macro.S"
|
||||
|
||||
|
||||
WELS_ASM_FUNC_BEGIN comp_ds_bilinear_neon
|
||||
WELS_ASM_FUNC_BEGIN DyadicBilinearDownsampler_neon
|
||||
stmdb sp!, {r4-r8, lr}
|
||||
|
||||
//Get the width and height
|
||||
@ -174,7 +174,7 @@ comp_ds_bilinear_w_x16_loop1:
|
||||
WELS_ASM_FUNC_END
|
||||
|
||||
|
||||
WELS_ASM_FUNC_BEGIN comp_ds_bilinear_w_x32_neon
|
||||
WELS_ASM_FUNC_BEGIN DyadicBilinearDownsamplerWidthx32_neon
|
||||
stmdb sp!, {r4-r7, lr}
|
||||
|
||||
//Get the width and height
|
||||
@ -223,7 +223,7 @@ comp_ds_bilinear_w_x32_loop1:
|
||||
WELS_ASM_FUNC_END
|
||||
|
||||
|
||||
WELS_ASM_FUNC_BEGIN general_ds_bilinear_accurate_neon
|
||||
WELS_ASM_FUNC_BEGIN GeneralBilinearAccurateDownsampler_neon
|
||||
stmdb sp!, {r4-r12, lr}
|
||||
|
||||
//Get the data from stack
|
||||
|
2
codec/processing/src/arm/pixel_sad_neon.S
Executable file → Normal file
2
codec/processing/src/arm/pixel_sad_neon.S
Executable file → Normal file
@ -35,7 +35,7 @@
|
||||
#include "arm_arch_common_macro.S"
|
||||
|
||||
|
||||
WELS_ASM_FUNC_BEGIN pixel_sad_8x8_neon
|
||||
WELS_ASM_FUNC_BEGIN WelsSampleSad8x8_neon
|
||||
stmdb sp!, {lr}
|
||||
//Loading a horizontal line data (8 bytes)
|
||||
vld1.8 {d0}, [r0], r1
|
||||
|
10
codec/processing/src/arm/vaa_calc_neon.S
Executable file → Normal file
10
codec/processing/src/arm/vaa_calc_neon.S
Executable file → Normal file
@ -96,7 +96,7 @@
|
||||
#endif
|
||||
|
||||
|
||||
WELS_ASM_FUNC_BEGIN vaa_calc_sad_neon
|
||||
WELS_ASM_FUNC_BEGIN VAACalcSad_neon
|
||||
|
||||
stmdb sp!, {r4-r8}
|
||||
|
||||
@ -252,7 +252,7 @@ WELS_ASM_FUNC_END
|
||||
.endm
|
||||
#endif
|
||||
|
||||
WELS_ASM_FUNC_BEGIN vaa_calc_sad_bgd_neon
|
||||
WELS_ASM_FUNC_BEGIN VAACalcSadBgd_neon
|
||||
|
||||
stmdb sp!, {r4-r10}
|
||||
|
||||
@ -633,7 +633,7 @@ WELS_ASM_FUNC_END
|
||||
#endif
|
||||
|
||||
|
||||
WELS_ASM_FUNC_BEGIN vaa_calc_sad_ssd_bgd_neon
|
||||
WELS_ASM_FUNC_BEGIN VAACalcSadSsdBgd_neon
|
||||
stmdb sp!, {r0-r12, r14}
|
||||
|
||||
ldr r4, [sp, #56] //r4 keeps the pic_stride
|
||||
@ -910,7 +910,7 @@ WELS_ASM_FUNC_END
|
||||
#endif
|
||||
|
||||
|
||||
WELS_ASM_FUNC_BEGIN vaa_calc_sad_var_neon
|
||||
WELS_ASM_FUNC_BEGIN VAACalcSadVar_neon
|
||||
stmdb sp!, {r4-r11}
|
||||
|
||||
ldr r4, [sp, #32] //r4 keeps the pic_stride
|
||||
@ -1078,7 +1078,7 @@ WELS_ASM_FUNC_END
|
||||
#endif
|
||||
|
||||
|
||||
WELS_ASM_FUNC_BEGIN vaa_calc_sad_ssd_neon
|
||||
WELS_ASM_FUNC_BEGIN VAACalcSadSsd_neon
|
||||
stmdb sp!, {r4-r12}
|
||||
|
||||
ldr r4, [sp, #36] //r4 keeps the pic_stride
|
||||
|
@ -75,6 +75,16 @@ void CDownsampling::InitDownsampleFuncs (SDownsampleFuncs& sDownsampleFunc, int
|
||||
}
|
||||
#endif//X86_ASM
|
||||
|
||||
#if defined(HAVE_NEON)
|
||||
if (iCpuFlag & WELS_CPU_NEON) {
|
||||
sDownsampleFunc.pfHalfAverage[0] = DyadicBilinearDownsamplerWidthx32_neon;
|
||||
sDownsampleFunc.pfHalfAverage[1] = DyadicBilinearDownsampler_neon;
|
||||
sDownsampleFunc.pfHalfAverage[2] = DyadicBilinearDownsampler_neon;
|
||||
sDownsampleFunc.pfHalfAverage[3] = DyadicBilinearDownsampler_neon;
|
||||
sDownsampleFunc.pfGeneralRatioChroma = GeneralBilinearAccurateDownsamplerWrap_neon;
|
||||
sDownsampleFunc.pfGeneralRatioLuma = GeneralBilinearAccurateDownsamplerWrap_neon;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
EResult CDownsampling::Process (int32_t iType, SPixMap* pSrcPixMap, SPixMap* pDstPixMap) {
|
||||
|
@ -103,7 +103,20 @@ void GeneralBilinearAccurateDownsampler_sse2 (uint8_t* pDst, const int32_t kiDst
|
||||
WELSVP_EXTERN_C_END
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_NEON
|
||||
WELSVP_EXTERN_C_BEGIN
|
||||
// iSrcWidth no limitation
|
||||
HalveDownsampleFunc DyadicBilinearDownsampler_neon;
|
||||
// iSrcWidth = x32 pixels
|
||||
HalveDownsampleFunc DyadicBilinearDownsamplerWidthx32_neon;
|
||||
|
||||
GeneralDownsampleFunc GeneralBilinearAccurateDownsamplerWrap_neon;
|
||||
|
||||
void GeneralBilinearAccurateDownsampler_neon( uint8_t* pDst, const int32_t kiDstStride, const int32_t kiDstWidth, const int32_t kiDstHeight,
|
||||
uint8_t* pSrc, const int32_t kiSrcStride, const uint32_t kuiScaleX, const uint32_t kuiScaleY);
|
||||
|
||||
WELSVP_EXTERN_C_END
|
||||
#endif
|
||||
|
||||
|
||||
class CDownsampling : public IStrategy {
|
||||
|
@ -229,4 +229,14 @@ void GeneralBilinearAccurateDownsampler_c (uint8_t* pDst, const int32_t kiDstStr
|
||||
//}
|
||||
#endif //X86_ASM
|
||||
|
||||
#ifdef HAVE_NEON
|
||||
void GeneralBilinearAccurateDownsamplerWrap_neon(uint8_t* pDst, const int32_t kiDstStride, const int32_t kiDstWidth, const int32_t kiDstHeight,
|
||||
uint8_t* pSrc, const int32_t kiSrcStride, const int32_t kiSrcWidth, const int32_t kiSrcHeight) {
|
||||
const int32_t kiScaleBit = 15;
|
||||
const uint32_t kuiScale = (1 << kiScaleBit);
|
||||
uint32_t uiScalex = (uint32_t)((float)kiSrcWidth / (float)kiDstWidth * kuiScale);
|
||||
uint32_t uiScaley = (uint32_t)((float)kiSrcHeight / (float)kiDstHeight * kuiScale);
|
||||
GeneralBilinearAccurateDownsampler_neon(pDst, kiDstStride, kiDstWidth, kiDstHeight, pSrc, kiSrcStride, uiScalex, uiScaley);
|
||||
}
|
||||
#endif
|
||||
WELSVP_NAMESPACE_END
|
||||
|
@ -130,6 +130,12 @@ void CSceneChangeDetection::InitSadFuncs (SadFuncPtr& pfSad, int32_t iCpuFlag)
|
||||
pfSad = WelsSampleSad8x8_sse21;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_NEON
|
||||
if (iCpuFlag & WELS_CPU_NEON) {
|
||||
pfSad = WelsSampleSad8x8_neon;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
@ -60,6 +60,12 @@ SadFunc WelsSampleSad8x8_sse21;
|
||||
WELSVP_EXTERN_C_END
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_NEON
|
||||
WELSVP_EXTERN_C_BEGIN
|
||||
SadFunc WelsSampleSad8x8_neon;
|
||||
WELSVP_EXTERN_C_END
|
||||
#endif
|
||||
|
||||
WELSVP_NAMESPACE_END
|
||||
|
||||
#endif
|
||||
|
@ -65,6 +65,15 @@ void CVAACalculation::InitVaaFuncs (SVaaFuncs& sVaaFuncs, int32_t iCpuFlag) {
|
||||
sVaaFuncs.pfVAACalcSadVar = VAACalcSadVar_sse2;
|
||||
}
|
||||
#endif//X86_ASM
|
||||
#ifdef HAVE_NEON
|
||||
if ((iCpuFlag & WELS_CPU_NEON) == WELS_CPU_NEON) {
|
||||
sVaaFuncs.pfVAACalcSad = VAACalcSad_neon;
|
||||
sVaaFuncs.pfVAACalcSadBgd = VAACalcSadBgd_neon;
|
||||
sVaaFuncs.pfVAACalcSadSsd = VAACalcSadSsd_neon;
|
||||
sVaaFuncs.pfVAACalcSadSsdBgd = VAACalcSadSsdBgd_neon;
|
||||
sVaaFuncs.pfVAACalcSadVar = VAACalcSadVar_neon;
|
||||
}
|
||||
#endif//X86_ASM
|
||||
}
|
||||
|
||||
EResult CVAACalculation::Process (int32_t iType, SPixMap* pSrcPixMap, SPixMap* pRefPixMap) {
|
||||
|
@ -103,6 +103,16 @@ VAACalcSadSsdFunc VAACalcSadSsd_sse2;
|
||||
WELSVP_EXTERN_C_END
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_NEON
|
||||
WELSVP_EXTERN_C_BEGIN
|
||||
VAACalcSadBgdFunc VAACalcSadBgd_neon;
|
||||
VAACalcSadSsdBgdFunc VAACalcSadSsdBgd_neon;
|
||||
VAACalcSadFunc VAACalcSad_neon;
|
||||
VAACalcSadVarFunc VAACalcSadVar_neon;
|
||||
VAACalcSadSsdFunc VAACalcSadSsd_neon;
|
||||
WELSVP_EXTERN_C_END
|
||||
#endif
|
||||
|
||||
class CVAACalculation : public IStrategy {
|
||||
public:
|
||||
CVAACalculation (int32_t iCpuFlag);
|
||||
|
Loading…
x
Reference in New Issue
Block a user