dec_neon: relocate some inline-asm defines

move simple loop filter defines closer to their use and LOAD* to a
location common with the intrinsics

Change-Id: Iaec506d27bbc9a01be20936e30b68a4b0e690ee3
This commit is contained in:
James Zern 2014-04-26 13:29:07 -07:00 committed by Gerrit Code Review
parent 4e393bb9f1
commit c8bbb636ea

View File

@ -29,57 +29,30 @@
#define WORK_AROUND_GCC
#endif
#define QRegs "q0", "q1", "q2", "q3", \
"q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
#define FLIP_SIGN_BIT2(a, b, s) \
"veor " #a "," #a "," #s " \n" \
"veor " #b "," #b "," #s " \n" \
#define FLIP_SIGN_BIT4(a, b, c, d, s) \
FLIP_SIGN_BIT2(a, b, s) \
FLIP_SIGN_BIT2(c, d, s) \
#define NEEDS_FILTER(p1, p0, q0, q1, thresh, mask) \
"vabd.u8 q15," #p0 "," #q0 " \n" /* abs(p0 - q0) */ \
"vabd.u8 q14," #p1 "," #q1 " \n" /* abs(p1 - q1) */ \
"vqadd.u8 q15, q15, q15 \n" /* abs(p0 - q0) * 2 */ \
"vshr.u8 q14, q14, #1 \n" /* abs(p1 - q1) / 2 */ \
"vqadd.u8 q15, q15, q14 \n" /* abs(p0 - q0) * 2 + abs(p1 - q1) / 2 */ \
"vdup.8 q14, " #thresh " \n" \
"vcge.u8 " #mask ", q14, q15 \n" /* mask <= thresh */
#define GET_BASE_DELTA(p1, p0, q0, q1, o) \
"vqsub.s8 q15," #q0 "," #p0 " \n" /* (q0 - p0) */ \
"vqsub.s8 " #o "," #p1 "," #q1 " \n" /* (p1 - q1) */ \
"vqadd.s8 " #o "," #o ", q15 \n" /* (p1 - q1) + 1 * (p0 - q0) */ \
"vqadd.s8 " #o "," #o ", q15 \n" /* (p1 - q1) + 2 * (p0 - q0) */ \
"vqadd.s8 " #o "," #o ", q15 \n" /* (p1 - q1) + 3 * (p0 - q0) */
#define DO_SIMPLE_FILTER(p0, q0, fl) \
"vmov.i8 q15, #0x03 \n" \
"vqadd.s8 q15, q15, " #fl " \n" /* filter1 = filter + 3 */ \
"vshr.s8 q15, q15, #3 \n" /* filter1 >> 3 */ \
"vqadd.s8 " #p0 "," #p0 ", q15 \n" /* p0 += filter1 */ \
\
"vmov.i8 q15, #0x04 \n" \
"vqadd.s8 q15, q15, " #fl " \n" /* filter1 = filter + 4 */ \
"vshr.s8 q15, q15, #3 \n" /* filter2 >> 3 */ \
"vqsub.s8 " #q0 "," #q0 ", q15 \n" /* q0 -= filter2 */
// Applies filter on 2 pixels (p0 and q0)
#define DO_FILTER2(p1, p0, q0, q1, thresh) \
NEEDS_FILTER(p1, p0, q0, q1, thresh, q9) /* filter mask in q9 */ \
"vmov.i8 q10, #0x80 \n" /* sign bit */ \
FLIP_SIGN_BIT4(p1, p0, q0, q1, q10) /* convert to signed value */ \
GET_BASE_DELTA(p1, p0, q0, q1, q11) /* get filter level */ \
"vand q9, q9, q11 \n" /* apply filter mask */ \
DO_SIMPLE_FILTER(p0, q0, q9) /* apply filter */ \
FLIP_SIGN_BIT2(p0, q0, q10)
//------------------------------------------------------------------------------
// NxM Loading functions
// Load/Store vertical edge
#define LOAD8x4(c1, c2, c3, c4, b1, b2, stride) \
"vld4.8 {" #c1"[0], " #c2"[0], " #c3"[0], " #c4"[0]}," #b1 "," #stride"\n" \
"vld4.8 {" #c1"[1], " #c2"[1], " #c3"[1], " #c4"[1]}," #b2 "," #stride"\n" \
"vld4.8 {" #c1"[2], " #c2"[2], " #c3"[2], " #c4"[2]}," #b1 "," #stride"\n" \
"vld4.8 {" #c1"[3], " #c2"[3], " #c3"[3], " #c4"[3]}," #b2 "," #stride"\n" \
"vld4.8 {" #c1"[4], " #c2"[4], " #c3"[4], " #c4"[4]}," #b1 "," #stride"\n" \
"vld4.8 {" #c1"[5], " #c2"[5], " #c3"[5], " #c4"[5]}," #b2 "," #stride"\n" \
"vld4.8 {" #c1"[6], " #c2"[6], " #c3"[6], " #c4"[6]}," #b1 "," #stride"\n" \
"vld4.8 {" #c1"[7], " #c2"[7], " #c3"[7], " #c4"[7]}," #b2 "," #stride"\n"
#define STORE8x2(c1, c2, p, stride) \
"vst2.8 {" #c1"[0], " #c2"[0]}," #p "," #stride " \n" \
"vst2.8 {" #c1"[1], " #c2"[1]}," #p "," #stride " \n" \
"vst2.8 {" #c1"[2], " #c2"[2]}," #p "," #stride " \n" \
"vst2.8 {" #c1"[3], " #c2"[3]}," #p "," #stride " \n" \
"vst2.8 {" #c1"[4], " #c2"[4]}," #p "," #stride " \n" \
"vst2.8 {" #c1"[5], " #c2"[5]}," #p "," #stride " \n" \
"vst2.8 {" #c1"[6], " #c2"[6]}," #p "," #stride " \n" \
"vst2.8 {" #c1"[7], " #c2"[7]}," #p "," #stride " \n"
#if !defined(WORK_AROUND_GCC)
// This intrinsics version makes gcc-4.6.3 crash during Load4x??() compilation
@ -417,27 +390,6 @@ static WEBP_INLINE void Store4x8x2(const uint8x16_t p1, const uint8x16_t p0,
#endif // !WORK_AROUND_GCC
// Load/Store vertical edge
#define LOAD8x4(c1, c2, c3, c4, b1, b2, stride) \
"vld4.8 {" #c1"[0], " #c2"[0], " #c3"[0], " #c4"[0]}," #b1 "," #stride"\n" \
"vld4.8 {" #c1"[1], " #c2"[1], " #c3"[1], " #c4"[1]}," #b2 "," #stride"\n" \
"vld4.8 {" #c1"[2], " #c2"[2], " #c3"[2], " #c4"[2]}," #b1 "," #stride"\n" \
"vld4.8 {" #c1"[3], " #c2"[3], " #c3"[3], " #c4"[3]}," #b2 "," #stride"\n" \
"vld4.8 {" #c1"[4], " #c2"[4], " #c3"[4], " #c4"[4]}," #b1 "," #stride"\n" \
"vld4.8 {" #c1"[5], " #c2"[5], " #c3"[5], " #c4"[5]}," #b2 "," #stride"\n" \
"vld4.8 {" #c1"[6], " #c2"[6], " #c3"[6], " #c4"[6]}," #b1 "," #stride"\n" \
"vld4.8 {" #c1"[7], " #c2"[7], " #c3"[7], " #c4"[7]}," #b2 "," #stride"\n"
#define STORE8x2(c1, c2, p, stride) \
"vst2.8 {" #c1"[0], " #c2"[0]}," #p "," #stride " \n" \
"vst2.8 {" #c1"[1], " #c2"[1]}," #p "," #stride " \n" \
"vst2.8 {" #c1"[2], " #c2"[2]}," #p "," #stride " \n" \
"vst2.8 {" #c1"[3], " #c2"[3]}," #p "," #stride " \n" \
"vst2.8 {" #c1"[4], " #c2"[4]}," #p "," #stride " \n" \
"vst2.8 {" #c1"[5], " #c2"[5]}," #p "," #stride " \n" \
"vst2.8 {" #c1"[6], " #c2"[6]}," #p "," #stride " \n" \
"vst2.8 {" #c1"[7], " #c2"[7]}," #p "," #stride " \n"
// Treats 'v' as an uint8x8_t and zero extends to an int16x8_t.
static WEBP_INLINE int16x8_t ConvertU8ToS16(uint32x2_t v) {
return vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(v)));
@ -580,6 +532,54 @@ static void SimpleHFilter16(uint8_t* p, int stride, int thresh) {
#else
#define QRegs "q0", "q1", "q2", "q3", \
"q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
#define FLIP_SIGN_BIT2(a, b, s) \
"veor " #a "," #a "," #s " \n" \
"veor " #b "," #b "," #s " \n" \
#define FLIP_SIGN_BIT4(a, b, c, d, s) \
FLIP_SIGN_BIT2(a, b, s) \
FLIP_SIGN_BIT2(c, d, s) \
#define NEEDS_FILTER(p1, p0, q0, q1, thresh, mask) \
"vabd.u8 q15," #p0 "," #q0 " \n" /* abs(p0 - q0) */ \
"vabd.u8 q14," #p1 "," #q1 " \n" /* abs(p1 - q1) */ \
"vqadd.u8 q15, q15, q15 \n" /* abs(p0 - q0) * 2 */ \
"vshr.u8 q14, q14, #1 \n" /* abs(p1 - q1) / 2 */ \
"vqadd.u8 q15, q15, q14 \n" /* abs(p0 - q0) * 2 + abs(p1 - q1) / 2 */ \
"vdup.8 q14, " #thresh " \n" \
"vcge.u8 " #mask ", q14, q15 \n" /* mask <= thresh */
#define GET_BASE_DELTA(p1, p0, q0, q1, o) \
"vqsub.s8 q15," #q0 "," #p0 " \n" /* (q0 - p0) */ \
"vqsub.s8 " #o "," #p1 "," #q1 " \n" /* (p1 - q1) */ \
"vqadd.s8 " #o "," #o ", q15 \n" /* (p1 - q1) + 1 * (p0 - q0) */ \
"vqadd.s8 " #o "," #o ", q15 \n" /* (p1 - q1) + 2 * (p0 - q0) */ \
"vqadd.s8 " #o "," #o ", q15 \n" /* (p1 - q1) + 3 * (p0 - q0) */
#define DO_SIMPLE_FILTER(p0, q0, fl) \
"vmov.i8 q15, #0x03 \n" \
"vqadd.s8 q15, q15, " #fl " \n" /* filter1 = filter + 3 */ \
"vshr.s8 q15, q15, #3 \n" /* filter1 >> 3 */ \
"vqadd.s8 " #p0 "," #p0 ", q15 \n" /* p0 += filter1 */ \
\
"vmov.i8 q15, #0x04 \n" \
"vqadd.s8 q15, q15, " #fl " \n" /* filter1 = filter + 4 */ \
"vshr.s8 q15, q15, #3 \n" /* filter2 >> 3 */ \
"vqsub.s8 " #q0 "," #q0 ", q15 \n" /* q0 -= filter2 */
// Applies filter on 2 pixels (p0 and q0)
#define DO_FILTER2(p1, p0, q0, q1, thresh) \
NEEDS_FILTER(p1, p0, q0, q1, thresh, q9) /* filter mask in q9 */ \
"vmov.i8 q10, #0x80 \n" /* sign bit */ \
FLIP_SIGN_BIT4(p1, p0, q0, q1, q10) /* convert to signed value */ \
GET_BASE_DELTA(p1, p0, q0, q1, q11) /* get filter level */ \
"vand q9, q9, q11 \n" /* apply filter mask */ \
DO_SIMPLE_FILTER(p0, q0, q9) /* apply filter */ \
FLIP_SIGN_BIT2(p0, q0, q10)
static void SimpleVFilter16(uint8_t* p, int stride, int thresh) {
__asm__ volatile (
"sub %[p], %[p], %[stride], lsl #1 \n" // p -= 2 * stride