mirror of
https://github.com/intel/isa-l.git
synced 2024-12-13 09:52:56 +01:00
3b3d7cc47b
This patch adds Arm (aarch64) SVE [1] variable-length vector assembly support into ISA-L erasure code library. "Arm designed the Scalable Vector Extension (SVE) as a next-generation SIMD extension to AArch64. SVE allows flexible vector length implementations with a range of possible values in CPU implementations. The vector length can vary from a minimum of 128 bits up to a maximum of 2048 bits, at 128-bit increments. The SVE design guarantees that the same application can run on different implementations that support SVE, without the need to recompile the code. " [3] Test method: - This patch was tested on Fujitsu's A64FX [2], and it passed all erasure code related test cases, including "make checks" , "make test", and "make perf". - To ensure code testing coverage, parameters in files (erasure_code/ erasure_code_test.c , erasure_code_update_test.c and gf_vect_mad_test.c) are modified to cover all _vect versions of _mad_sve() / _dot_prod_sve() rutines. Performance improvements over NEON: In general, SVE benchmarks (bandwidth in MB/s) are 40% ~ 100% higher than NEON when running _cold style (data uncached and pulled from memory) perfs. This includes routines of dot_prod, mad, and mul. Optimization points: This patch was tuned for the best performance on A64FX. Tuning points being touched in this patch include: 1) Data prefetch into L2 cache before loading. See _sve.S files. 2) Instruction sequence orchestration. Such as interleaving every two 'ld1b/st1b' instructions with other instructions. See _sve.S files. 3) To improve dest vectors parallelism, in highlevel, running gf_4vect_dot_prod_sve twice is better than running gf_8vect_dot_prod_sve() once, and it's also better than running _7vect + _vect, _6vect + _2vect, and _5vect + _3vect. The similar idea is applied to improve 11 ~ 9 dest vectors dot product computing as well. The related change can be found in ec_encode_data_sve() of file: erasure_code/aarch64/ec_aarch64_highlevel_func.c Notes: 1) About vector length: A64FX has a vector register length of 512bit. However, this patchset was written with variable length assembly so it work automatically on aarch64 machines with any types of SVE vector length, such as SVE-128, SVE-256, etc.. 2) About optimization: Due to differences in microarchitecture and cache/memory design, to achieve optimum performance on SVE capable CPUs other than A64FX, it is considered necessary to do microarchitecture-level tunings on these CPUs. [1] Introduction to SVE - Arm Developer. https://developer.arm.com/documentation/102476/latest/ [2] FUJITSU Processor A64FX. https://www.fujitsu.com/global/products/computing/servers/supercomputer/a64fx/ [3] Introducing SVE. https://developer.arm.com/documentation/102476/0001/Introducing-SVE Change-Id: If49eb8a956154d799dcda0ba4c9c6d979f5064a9 Signed-off-by: Guodong Xu <guodong.xu@linaro.org>
172 lines
4.7 KiB
ArmAsm
172 lines
4.7 KiB
ArmAsm
/**************************************************************
|
|
Copyright (c) 2021 Linaro Ltd.
|
|
|
|
Redistribution and use in source and binary forms, with or without
|
|
modification, are permitted provided that the following conditions
|
|
are met:
|
|
* Redistributions of source code must retain the above copyright
|
|
notice, this list of conditions and the following disclaimer.
|
|
* Redistributions in binary form must reproduce the above copyright
|
|
notice, this list of conditions and the following disclaimer in
|
|
the documentation and/or other materials provided with the
|
|
distribution.
|
|
* Neither the name of Huawei Corporation nor the names of its
|
|
contributors may be used to endorse or promote products derived
|
|
from this software without specific prior written permission.
|
|
|
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
**********************************************************************/
|
|
.text
|
|
.align 6
|
|
.arch armv8-a+sve
|
|
|
|
.global gf_3vect_mad_sve
|
|
.type gf_3vect_mad_sve, %function
|
|
|
|
/* gf_3vect_mad_sve(int len, int vec, int vec_i, unsigned char *gftbls,
|
|
unsigned char *src, unsigned char **dest);
|
|
*/
|
|
/* arguments */
|
|
x_len .req x0
|
|
x_vec .req x1
|
|
x_vec_i .req x2
|
|
x_tbl .req x3
|
|
x_src .req x4
|
|
x_dest .req x5
|
|
|
|
/* returns */
|
|
w_ret .req w0
|
|
|
|
/* local variables */
|
|
x_pos .req x6
|
|
x_dest2 .req x7
|
|
x_dest3 .req x8
|
|
x_dest1 .req x12
|
|
|
|
/* vectors */
|
|
z_mask0f .req z0
|
|
|
|
z_src .req z1
|
|
z_src_lo .req z2
|
|
z_src_hi .req z_src
|
|
|
|
z_dest1 .req z3
|
|
|
|
z_tmp_lo .req z4
|
|
z_tmp_hi .req z5
|
|
|
|
z_gft1_lo .req z6
|
|
z_gft1_hi .req z7
|
|
q_gft1_lo .req q6
|
|
q_gft1_hi .req q7
|
|
|
|
/* bottom 64-bit of v8..v15 must be preserved if used */
|
|
z_gft2_lo .req z17
|
|
z_gft2_hi .req z18
|
|
q_gft2_lo .req q17
|
|
q_gft2_hi .req q18
|
|
|
|
z_gft3_lo .req z19
|
|
z_gft3_hi .req z20
|
|
q_gft3_lo .req q19
|
|
q_gft3_hi .req q20
|
|
|
|
z_dest2 .req z27
|
|
z_dest3 .req z28
|
|
|
|
gf_3vect_mad_sve:
|
|
/* less than 16 bytes, return_fail */
|
|
cmp x_len, #16
|
|
blt .return_fail
|
|
|
|
mov z_mask0f.b, #0x0f /* z_mask0f = 0x0F0F...0F */
|
|
/* load table 1 */
|
|
add x_tbl, x_tbl, x_vec_i, LSL #5 /* x_tbl += x_vec_i * 2^5 */
|
|
|
|
/* Load table 1 with NEON instruction ldp */
|
|
ldp q_gft1_lo, q_gft1_hi, [x_tbl]
|
|
/* load table 2 */
|
|
add x_tbl, x_tbl, x_vec, LSL #5 /* x_tbl += x_vec * 2^5 */
|
|
ldp q_gft2_lo, q_gft2_hi, [x_tbl]
|
|
/* load table 3 */
|
|
add x_tbl, x_tbl, x_vec, LSL #5 /* x_tbl += x_vec * 2^5 */
|
|
ldp q_gft3_lo, q_gft3_hi, [x_tbl]
|
|
|
|
ldr x_dest1, [x_dest, #8*0] /* pointer to dest1 */
|
|
ldr x_dest2, [x_dest, #8*1] /* pointer to dest2 */
|
|
ldr x_dest3, [x_dest, #8*2] /* pointer to dest3 */
|
|
|
|
mov x_pos, #0
|
|
|
|
/* vector length agnostic */
|
|
.Lloopsve_vl:
|
|
whilelo p0.b, x_pos, x_len
|
|
b.none .return_pass
|
|
|
|
/* dest data prefetch */
|
|
prfb pldl2strm, p0, [x_dest1, x_pos]
|
|
prfb pldl2strm, p0, [x_dest2, x_pos]
|
|
|
|
/* load src data, governed by p0 */
|
|
ld1b z_src.b, p0/z, [x_src, x_pos]
|
|
|
|
/* split 4-bit lo; 4-bit hi */
|
|
and z_src_lo.d, z_src.d, z_mask0f.d
|
|
lsr z_src_hi.b, z_src.b, #4
|
|
|
|
/* load dest data, governed by p0 */
|
|
ld1b z_dest1.b, p0/z, [x_dest1, x_pos]
|
|
ld1b z_dest2.b, p0/z, [x_dest2, x_pos]
|
|
prfb pldl2strm, p0, [x_dest3, x_pos]
|
|
|
|
/* dest1 */
|
|
/* table indexing, ie. gf(2^8) multiplication */
|
|
tbl z_tmp_lo.b, {z_gft1_lo.b}, z_src_lo.b
|
|
tbl z_tmp_hi.b, {z_gft1_hi.b}, z_src_hi.b
|
|
/* exclusive or, ie. gf(2^8) add */
|
|
eor z_dest1.d, z_tmp_lo.d, z_dest1.d
|
|
eor z_dest1.d, z_tmp_hi.d, z_dest1.d
|
|
|
|
/* dest2 */
|
|
tbl z_tmp_lo.b, {z_gft2_lo.b}, z_src_lo.b
|
|
tbl z_tmp_hi.b, {z_gft2_hi.b}, z_src_hi.b
|
|
|
|
ld1b z_dest3.b, p0/z, [x_dest3, x_pos]
|
|
/* store dest data, governed by p0 */
|
|
st1b z_dest1.b, p0, [x_dest1, x_pos]
|
|
|
|
eor z_dest2.d, z_tmp_lo.d, z_dest2.d
|
|
eor z_dest2.d, z_tmp_hi.d, z_dest2.d
|
|
|
|
/* dest3 */
|
|
tbl z_tmp_lo.b, {z_gft3_lo.b}, z_src_lo.b
|
|
tbl z_tmp_hi.b, {z_gft3_hi.b}, z_src_hi.b
|
|
eor z_dest3.d, z_tmp_lo.d, z_dest3.d
|
|
eor z_dest3.d, z_tmp_hi.d, z_dest3.d
|
|
|
|
/* store dest data, governed by p0 */
|
|
st1b z_dest2.b, p0, [x_dest2, x_pos]
|
|
st1b z_dest3.b, p0, [x_dest3, x_pos]
|
|
/* increment one vector length */
|
|
incb x_pos
|
|
|
|
b .Lloopsve_vl
|
|
|
|
.return_pass:
|
|
mov w_ret, #0
|
|
ret
|
|
|
|
.return_fail:
|
|
mov w_ret, #1
|
|
ret
|