cb14764fab
Adds vp8_sub_pixel_variance16x16_armv6 function to encoder. Integrates ARMv6 optimized bilinear interpolations from vp8/common/arm/armv6 and adds new assembly file for variance16x16 calculation. - vp8_filter_block2d_bil_first_pass_armv6 (integrated) - vp8_filter_block2d_bil_second_pass_armv6 (integrated) - vp8_variance16x16_armv6 (new) - bilinearfilter_arm.h (new) Change-Id: I18a8331ce7d031ceedd6cd415ecacb0c8f3392db
148 lines
5.7 KiB
NASM
148 lines
5.7 KiB
NASM
;
|
|
; Copyright (c) 2011 The WebM project authors. All Rights Reserved.
|
|
;
|
|
; Use of this source code is governed by a BSD-style license
|
|
; that can be found in the LICENSE file in the root of the source
|
|
; tree. An additional intellectual property rights grant can be found
|
|
; in the file PATENTS. All contributing project authors may
|
|
; be found in the AUTHORS file in the root of the source tree.
|
|
;
|
|
|
|
|
|
EXPORT |vp8_variance16x16_armv6|
|
|
|
|
ARM
|
|
REQUIRE8
|
|
PRESERVE8
|
|
|
|
AREA ||.text||, CODE, READONLY, ALIGN=2
|
|
|
|
; r0 unsigned char *src_ptr
|
|
; r1 int source_stride
|
|
; r2 unsigned char *ref_ptr
|
|
; r3 int recon_stride
|
|
; stack unsigned int *sse
|
|
|vp8_variance16x16_armv6| PROC
|
|
|
|
stmfd sp!, {r4-r12, lr}
|
|
mov r12, #16 ; set loop counter to 16 (=block height)
|
|
mov r8, #0 ; initialize sum = 0
|
|
mov r11, #0 ; initialize sse = 0
|
|
|
|
loop
|
|
; 1st 4 pixels
|
|
ldr r4, [r0, #0x0] ; load 4 src pixels
|
|
ldr r5, [r2, #0x0] ; load 4 ref pixels
|
|
|
|
mov lr, #0 ; constant zero
|
|
|
|
usub8 r6, r4, r5 ; calculate difference
|
|
sel r7, r6, lr ; select bytes with positive difference
|
|
usub8 r9, r5, r4 ; calculate difference with reversed operands
|
|
sel r6, r9, lr ; select bytes with negative difference
|
|
|
|
; calculate partial sums
|
|
usad8 r4, r7, lr ; calculate sum of positive differences
|
|
usad8 r5, r6, lr ; calculate sum of negative differences
|
|
orr r6, r6, r7 ; differences of all 4 pixels
|
|
; calculate total sum
|
|
adds r8, r8, r4 ; add positive differences to sum
|
|
subs r8, r8, r5 ; substract negative differences from sum
|
|
|
|
; calculate sse
|
|
uxtb16 r5, r6 ; byte (two pixels) to halfwords
|
|
uxtb16 r10, r6, ror #8 ; another two pixels to halfwords
|
|
smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1)
|
|
|
|
; 2nd 4 pixels
|
|
ldr r4, [r0, #0x4] ; load 4 src pixels
|
|
ldr r5, [r2, #0x4] ; load 4 ref pixels
|
|
smlad r11, r10, r10, r11 ; dual signed multiply, add and accumulate (2)
|
|
|
|
usub8 r6, r4, r5 ; calculate difference
|
|
sel r7, r6, lr ; select bytes with positive difference
|
|
usub8 r9, r5, r4 ; calculate difference with reversed operands
|
|
sel r6, r9, lr ; select bytes with negative difference
|
|
|
|
; calculate partial sums
|
|
usad8 r4, r7, lr ; calculate sum of positive differences
|
|
usad8 r5, r6, lr ; calculate sum of negative differences
|
|
orr r6, r6, r7 ; differences of all 4 pixels
|
|
|
|
; calculate total sum
|
|
add r8, r8, r4 ; add positive differences to sum
|
|
sub r8, r8, r5 ; substract negative differences from sum
|
|
|
|
; calculate sse
|
|
uxtb16 r5, r6 ; byte (two pixels) to halfwords
|
|
uxtb16 r10, r6, ror #8 ; another two pixels to halfwords
|
|
smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1)
|
|
|
|
; 3rd 4 pixels
|
|
ldr r4, [r0, #0x8] ; load 4 src pixels
|
|
ldr r5, [r2, #0x8] ; load 4 ref pixels
|
|
smlad r11, r10, r10, r11 ; dual signed multiply, add and accumulate (2)
|
|
|
|
usub8 r6, r4, r5 ; calculate difference
|
|
sel r7, r6, lr ; select bytes with positive difference
|
|
usub8 r9, r5, r4 ; calculate difference with reversed operands
|
|
sel r6, r9, lr ; select bytes with negative difference
|
|
|
|
; calculate partial sums
|
|
usad8 r4, r7, lr ; calculate sum of positive differences
|
|
usad8 r5, r6, lr ; calculate sum of negative differences
|
|
orr r6, r6, r7 ; differences of all 4 pixels
|
|
|
|
; calculate total sum
|
|
add r8, r8, r4 ; add positive differences to sum
|
|
sub r8, r8, r5 ; substract negative differences from sum
|
|
|
|
; calculate sse
|
|
uxtb16 r5, r6 ; byte (two pixels) to halfwords
|
|
uxtb16 r10, r6, ror #8 ; another two pixels to halfwords
|
|
smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1)
|
|
|
|
; 4th 4 pixels
|
|
ldr r4, [r0, #0xc] ; load 4 src pixels
|
|
ldr r5, [r2, #0xc] ; load 4 ref pixels
|
|
smlad r11, r10, r10, r11 ; dual signed multiply, add and accumulate (2)
|
|
|
|
usub8 r6, r4, r5 ; calculate difference
|
|
add r0, r0, r1 ; set src_ptr to next row
|
|
sel r7, r6, lr ; select bytes with positive difference
|
|
usub8 r9, r5, r4 ; calculate difference with reversed operands
|
|
add r2, r2, r3 ; set dst_ptr to next row
|
|
sel r6, r9, lr ; select bytes with negative difference
|
|
|
|
; calculate partial sums
|
|
usad8 r4, r7, lr ; calculate sum of positive differences
|
|
usad8 r5, r6, lr ; calculate sum of negative differences
|
|
orr r6, r6, r7 ; differences of all 4 pixels
|
|
|
|
; calculate total sum
|
|
add r8, r8, r4 ; add positive differences to sum
|
|
sub r8, r8, r5 ; substract negative differences from sum
|
|
|
|
; calculate sse
|
|
uxtb16 r5, r6 ; byte (two pixels) to halfwords
|
|
uxtb16 r10, r6, ror #8 ; another two pixels to halfwords
|
|
smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1)
|
|
smlad r11, r10, r10, r11 ; dual signed multiply, add and accumulate (2)
|
|
|
|
|
|
subs r12, r12, #1
|
|
|
|
bne loop
|
|
|
|
; return stuff
|
|
ldr r6, [sp, #0x28] ; get address of sse
|
|
mul r0, r8, r8 ; sum * sum
|
|
str r11, [r6] ; store sse
|
|
sub r0, r11, r0, ASR #8 ; return (sse - ((sum * sum) >> 8))
|
|
|
|
ldmfd sp!, {r4-r12, pc}
|
|
|
|
ENDP
|
|
|
|
END
|