vpx/vp8/encoder/arm/armv5te/boolhuff_armv5te.asm

312 lines
10 KiB
NASM
Raw Normal View History

2010-05-18 17:58:33 +02:00
;
; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
2010-05-18 17:58:33 +02:00
;
; Use of this source code is governed by a BSD-style license
; that can be found in the LICENSE file in the root of the source
; tree. An additional intellectual property rights grant can be found
; in the file PATENTS. All contributing project authors may
; be found in the AUTHORS file in the root of the source tree.
2010-05-18 17:58:33 +02:00
;
EXPORT |vp8_start_encode|
EXPORT |vp8_encode_bool|
EXPORT |vp8_stop_encode|
EXPORT |vp8_encode_value|
IMPORT |vp8_validate_buffer_arm|
2010-05-18 17:58:33 +02:00
INCLUDE asm_enc_offsets.asm
2010-05-18 17:58:33 +02:00
ARM
REQUIRE8
PRESERVE8
AREA |.text|, CODE, READONLY
; macro for validating write buffer position
; needs vp8_writer in r0
; start shall not be in r1
MACRO
VALIDATE_POS $start, $pos
push {r0-r3, r12, lr} ; rest of regs are preserved by subroutine call
ldr r2, [r0, #vp8_writer_buffer_end]
ldr r3, [r0, #vp8_writer_error]
mov r1, $pos
mov r0, $start
bl vp8_validate_buffer_arm
pop {r0-r3, r12, lr}
MEND
2010-05-18 17:58:33 +02:00
; r0 BOOL_CODER *br
; r1 unsigned char *source
; r2 unsigned char *source_end
2010-05-18 17:58:33 +02:00
|vp8_start_encode| PROC
str r2, [r0, #vp8_writer_buffer_end]
2010-05-18 17:58:33 +02:00
mov r12, #0
mov r3, #255
mvn r2, #23
str r12, [r0, #vp8_writer_lowvalue]
str r3, [r0, #vp8_writer_range]
str r12, [r0, #vp8_writer_value]
str r2, [r0, #vp8_writer_count]
str r12, [r0, #vp8_writer_pos]
str r1, [r0, #vp8_writer_buffer]
bx lr
ENDP
; r0 BOOL_CODER *br
; r1 int bit
; r2 int probability
|vp8_encode_bool| PROC
push {r4-r10, lr}
2010-05-18 17:58:33 +02:00
mov r4, r2
ldr r2, [r0, #vp8_writer_lowvalue]
ldr r5, [r0, #vp8_writer_range]
ldr r3, [r0, #vp8_writer_count]
sub r7, r5, #1 ; range-1
cmp r1, #0
mul r6, r4, r7 ; ((range-1) * probability)
2010-05-18 17:58:33 +02:00
mov r7, #1
add r4, r7, r6, lsr #8 ; 1 + (((range-1) * probability) >> 8)
2010-05-18 17:58:33 +02:00
addne r2, r2, r4 ; if (bit) lowvalue += split
subne r4, r5, r4 ; if (bit) range = range-split
; Counting the leading zeros is used to normalize range.
clz r6, r4
sub r6, r6, #24 ; shift
; Flag is set on the sum of count. This flag is used later
; to determine if count >= 0
adds r3, r3, r6 ; count += shift
lsl r5, r4, r6 ; range <<= shift
bmi token_count_lt_zero ; if(count >= 0)
sub r6, r6, r3 ; offset = shift - count
sub r4, r6, #1 ; offset-1
lsls r4, r2, r4 ; if((lowvalue<<(offset-1)) & 0x80000000 )
bpl token_high_bit_not_set
ldr r4, [r0, #vp8_writer_pos] ; x
sub r4, r4, #1 ; x = w->pos-1
b token_zero_while_start
token_zero_while_loop
mov r9, #0
strb r9, [r7, r4] ; w->buffer[x] =(unsigned char)0
sub r4, r4, #1 ; x--
token_zero_while_start
cmp r4, #0
ldrge r7, [r0, #vp8_writer_buffer]
ldrb r1, [r7, r4]
cmpge r1, #0xff
beq token_zero_while_loop
ldr r7, [r0, #vp8_writer_buffer]
ldrb r9, [r7, r4] ; w->buffer[x]
add r9, r9, #1
strb r9, [r7, r4] ; w->buffer[x] + 1
token_high_bit_not_set
rsb r4, r6, #24 ; 24-offset
ldr r9, [r0, #vp8_writer_buffer]
lsr r7, r2, r4 ; lowvalue >> (24-offset)
ldr r4, [r0, #vp8_writer_pos] ; w->pos
lsl r2, r2, r6 ; lowvalue <<= offset
mov r6, r3 ; shift = count
add r1, r4, #1 ; w->pos++
bic r2, r2, #0xff000000 ; lowvalue &= 0xffffff
str r1, [r0, #vp8_writer_pos]
sub r3, r3, #8 ; count -= 8
VALIDATE_POS r9, r1 ; validate_buffer at pos
2010-05-18 17:58:33 +02:00
strb r7, [r9, r4] ; w->buffer[w->pos++]
token_count_lt_zero
lsl r2, r2, r6 ; lowvalue <<= shift
str r2, [r0, #vp8_writer_lowvalue]
str r5, [r0, #vp8_writer_range]
str r3, [r0, #vp8_writer_count]
pop {r4-r10, pc}
2010-05-18 17:58:33 +02:00
ENDP
; r0 BOOL_CODER *br
|vp8_stop_encode| PROC
push {r4-r10, lr}
ldr r2, [r0, #vp8_writer_lowvalue]
ldr r5, [r0, #vp8_writer_range]
ldr r3, [r0, #vp8_writer_count]
mov r10, #32
stop_encode_loop
sub r7, r5, #1 ; range-1
mov r4, r7, lsl #7 ; ((range-1) * 128)
mov r7, #1
add r4, r7, r4, lsr #8 ; 1 + (((range-1) * 128) >> 8)
; Counting the leading zeros is used to normalize range.
clz r6, r4
sub r6, r6, #24 ; shift
; Flag is set on the sum of count. This flag is used later
; to determine if count >= 0
adds r3, r3, r6 ; count += shift
lsl r5, r4, r6 ; range <<= shift
bmi token_count_lt_zero_se ; if(count >= 0)
sub r6, r6, r3 ; offset = shift - count
sub r4, r6, #1 ; offset-1
lsls r4, r2, r4 ; if((lowvalue<<(offset-1)) & 0x80000000 )
bpl token_high_bit_not_set_se
ldr r4, [r0, #vp8_writer_pos] ; x
sub r4, r4, #1 ; x = w->pos-1
b token_zero_while_start_se
token_zero_while_loop_se
mov r9, #0
strb r9, [r7, r4] ; w->buffer[x] =(unsigned char)0
sub r4, r4, #1 ; x--
token_zero_while_start_se
cmp r4, #0
ldrge r7, [r0, #vp8_writer_buffer]
ldrb r1, [r7, r4]
cmpge r1, #0xff
beq token_zero_while_loop_se
ldr r7, [r0, #vp8_writer_buffer]
ldrb r9, [r7, r4] ; w->buffer[x]
add r9, r9, #1
strb r9, [r7, r4] ; w->buffer[x] + 1
token_high_bit_not_set_se
rsb r4, r6, #24 ; 24-offset
ldr r9, [r0, #vp8_writer_buffer]
lsr r7, r2, r4 ; lowvalue >> (24-offset)
ldr r4, [r0, #vp8_writer_pos] ; w->pos
lsl r2, r2, r6 ; lowvalue <<= offset
mov r6, r3 ; shift = count
add r1, r4, #1 ; w->pos++
bic r2, r2, #0xff000000 ; lowvalue &= 0xffffff
str r1, [r0, #vp8_writer_pos]
sub r3, r3, #8 ; count -= 8
VALIDATE_POS r9, r1 ; validate_buffer at pos
2010-05-18 17:58:33 +02:00
strb r7, [r9, r4] ; w->buffer[w->pos++]
token_count_lt_zero_se
lsl r2, r2, r6 ; lowvalue <<= shift
subs r10, r10, #1
bne stop_encode_loop
str r2, [r0, #vp8_writer_lowvalue]
str r5, [r0, #vp8_writer_range]
str r3, [r0, #vp8_writer_count]
pop {r4-r10, pc}
ENDP
; r0 BOOL_CODER *br
; r1 int data
; r2 int bits
|vp8_encode_value| PROC
push {r4-r12, lr}
2010-05-18 17:58:33 +02:00
mov r10, r2
ldr r2, [r0, #vp8_writer_lowvalue]
ldr r5, [r0, #vp8_writer_range]
ldr r3, [r0, #vp8_writer_count]
rsb r4, r10, #32 ; 32-n
; v is kept in r1 during the token pack loop
Add runtime CPU detection support for ARM. The primary goal is to allow a binary to be built which supports NEON, but can fall back to non-NEON routines, since some Android devices do not have NEON, even if they are otherwise ARMv7 (e.g., Tegra). The configure-generated flags HAVE_ARMV7, etc., are used to decide which versions of each function to build, and when CONFIG_RUNTIME_CPU_DETECT is enabled, the correct version is chosen at run time. In order for this to work, the CFLAGS must be set to something appropriate (e.g., without -mfpu=neon for ARMv7, and with appropriate -march and -mcpu for even earlier configurations), or the native C code will not be able to run. The ASFLAGS must remain set for the most advanced instruction set required at build time, since the ARM assembler will refuse to emit them otherwise. I have not attempted to make any changes to configure to do this automatically. Doing so will probably require the addition of new configure options. Many of the hooks for RTCD on ARM were already there, but a lot of the code had bit-rotted, and a good deal of the ARM-specific code is not integrated into the RTCD structs at all. I did not try to resolve the latter, merely to add the minimal amount of protection around them to allow RTCD to work. Those functions that were called based on an ifdef at the calling site were expanded to check the RTCD flags at that site, but they should be added to an RTCD struct somewhere in the future. The functions invoked with global function pointers still are, but these should be moved into an RTCD struct for thread safety (I believe every platform currently supported has atomic pointer stores, but this is not guaranteed). The encoder's boolhuff functions did not even have _c and armv7 suffixes, and the correct version was resolved at link time. The token packing functions did have appropriate suffixes, but the version was selected with a define, with no associated RTCD struct. However, for both of these, the only armv7 instruction they actually used was rbit, and this was completely superfluous, so I reworked them to avoid it. The only non-ARMv4 instruction remaining in them is clz, which is ARMv5 (not even ARMv5TE is required). Considering that there are no ARM-specific configs which are not at least ARMv5TE, I did not try to detect these at runtime, and simply enable them for ARMv5 and above. Finally, the NEON register saving code was completely non-reentrant, since it saved the registers to a global, static variable. I moved the storage for this onto the stack. A single binary built with this code was tested on an ARM11 (ARMv6) and a Cortex A8 (ARMv7 w/NEON), for both the encoder and decoder, and produced identical output, while using the correct accelerated functions on each. I did not test on any earlier processors. Change-Id: I45cbd63a614f4554c3b325c45d46c0806f009eaa
2010-10-21 00:39:11 +02:00
lsl r1, r1, r4 ; r1 = v << 32 - n
2010-05-18 17:58:33 +02:00
encode_value_loop
sub r7, r5, #1 ; range-1
; Decisions are made based on the bit value shifted
; off of v, so set a flag here based on this.
; This value is refered to as "bb"
Add runtime CPU detection support for ARM. The primary goal is to allow a binary to be built which supports NEON, but can fall back to non-NEON routines, since some Android devices do not have NEON, even if they are otherwise ARMv7 (e.g., Tegra). The configure-generated flags HAVE_ARMV7, etc., are used to decide which versions of each function to build, and when CONFIG_RUNTIME_CPU_DETECT is enabled, the correct version is chosen at run time. In order for this to work, the CFLAGS must be set to something appropriate (e.g., without -mfpu=neon for ARMv7, and with appropriate -march and -mcpu for even earlier configurations), or the native C code will not be able to run. The ASFLAGS must remain set for the most advanced instruction set required at build time, since the ARM assembler will refuse to emit them otherwise. I have not attempted to make any changes to configure to do this automatically. Doing so will probably require the addition of new configure options. Many of the hooks for RTCD on ARM were already there, but a lot of the code had bit-rotted, and a good deal of the ARM-specific code is not integrated into the RTCD structs at all. I did not try to resolve the latter, merely to add the minimal amount of protection around them to allow RTCD to work. Those functions that were called based on an ifdef at the calling site were expanded to check the RTCD flags at that site, but they should be added to an RTCD struct somewhere in the future. The functions invoked with global function pointers still are, but these should be moved into an RTCD struct for thread safety (I believe every platform currently supported has atomic pointer stores, but this is not guaranteed). The encoder's boolhuff functions did not even have _c and armv7 suffixes, and the correct version was resolved at link time. The token packing functions did have appropriate suffixes, but the version was selected with a define, with no associated RTCD struct. However, for both of these, the only armv7 instruction they actually used was rbit, and this was completely superfluous, so I reworked them to avoid it. The only non-ARMv4 instruction remaining in them is clz, which is ARMv5 (not even ARMv5TE is required). Considering that there are no ARM-specific configs which are not at least ARMv5TE, I did not try to detect these at runtime, and simply enable them for ARMv5 and above. Finally, the NEON register saving code was completely non-reentrant, since it saved the registers to a global, static variable. I moved the storage for this onto the stack. A single binary built with this code was tested on an ARM11 (ARMv6) and a Cortex A8 (ARMv7 w/NEON), for both the encoder and decoder, and produced identical output, while using the correct accelerated functions on each. I did not test on any earlier processors. Change-Id: I45cbd63a614f4554c3b325c45d46c0806f009eaa
2010-10-21 00:39:11 +02:00
lsls r1, r1, #1 ; bit = v >> n
2010-05-18 17:58:33 +02:00
mov r4, r7, lsl #7 ; ((range-1) * 128)
mov r7, #1
add r4, r7, r4, lsr #8 ; 1 + (((range-1) * 128) >> 8)
addcs r2, r2, r4 ; if (bit) lowvalue += split
subcs r4, r5, r4 ; if (bit) range = range-split
; Counting the leading zeros is used to normalize range.
clz r6, r4
sub r6, r6, #24 ; shift
; Flag is set on the sum of count. This flag is used later
; to determine if count >= 0
adds r3, r3, r6 ; count += shift
lsl r5, r4, r6 ; range <<= shift
bmi token_count_lt_zero_ev ; if(count >= 0)
sub r6, r6, r3 ; offset = shift - count
sub r4, r6, #1 ; offset-1
lsls r4, r2, r4 ; if((lowvalue<<(offset-1)) & 0x80000000 )
bpl token_high_bit_not_set_ev
ldr r4, [r0, #vp8_writer_pos] ; x
sub r4, r4, #1 ; x = w->pos-1
b token_zero_while_start_ev
token_zero_while_loop_ev
mov r9, #0
strb r9, [r7, r4] ; w->buffer[x] =(unsigned char)0
sub r4, r4, #1 ; x--
token_zero_while_start_ev
cmp r4, #0
ldrge r7, [r0, #vp8_writer_buffer]
ldrb r11, [r7, r4]
cmpge r11, #0xff
beq token_zero_while_loop_ev
ldr r7, [r0, #vp8_writer_buffer]
ldrb r9, [r7, r4] ; w->buffer[x]
add r9, r9, #1
strb r9, [r7, r4] ; w->buffer[x] + 1
token_high_bit_not_set_ev
rsb r4, r6, #24 ; 24-offset
ldr r9, [r0, #vp8_writer_buffer]
lsr r7, r2, r4 ; lowvalue >> (24-offset)
ldr r4, [r0, #vp8_writer_pos] ; w->pos
lsl r2, r2, r6 ; lowvalue <<= offset
mov r6, r3 ; shift = count
add r11, r4, #1 ; w->pos++
bic r2, r2, #0xff000000 ; lowvalue &= 0xffffff
str r11, [r0, #vp8_writer_pos]
sub r3, r3, #8 ; count -= 8
VALIDATE_POS r9, r11 ; validate_buffer at pos
2010-05-18 17:58:33 +02:00
strb r7, [r9, r4] ; w->buffer[w->pos++]
token_count_lt_zero_ev
lsl r2, r2, r6 ; lowvalue <<= shift
subs r10, r10, #1
bne encode_value_loop
str r2, [r0, #vp8_writer_lowvalue]
str r5, [r0, #vp8_writer_range]
str r3, [r0, #vp8_writer_count]
pop {r4-r12, pc}
2010-05-18 17:58:33 +02:00
ENDP
END