isa-l/erasure_code/aarch64/ec_aarch64_highlevel_func.c
Guodong Xu 3b3d7cc47b Enable SVE in ISA-L erasure code for aarch64
This patch adds Arm (aarch64) SVE [1] variable-length vector assembly support
into ISA-L erasure code library. "Arm designed the Scalable Vector Extension
(SVE) as a next-generation SIMD extension to AArch64. SVE allows flexible
vector length implementations with a range of possible values in CPU
implementations. The vector length can vary from a minimum of 128 bits up to
a maximum of 2048 bits, at 128-bit increments. The SVE design guarantees
that the same application can run on different implementations that support
SVE, without the need to recompile the code. " [3]

Test method:
 - This patch was tested on Fujitsu's A64FX [2], and it passed all erasure
     code related test cases, including "make checks" , "make test", and
     "make perf".
 - To ensure code testing coverage, parameters in files (erasure_code/
     erasure_code_test.c , erasure_code_update_test.c and gf_vect_mad_test.c)
     are modified to cover all _vect versions of _mad_sve() / _dot_prod_sve()
     rutines.

Performance improvements over NEON:
In general, SVE benchmarks (bandwidth in MB/s) are 40% ~ 100% higher than NEON
when running _cold style (data uncached and pulled from memory) perfs. This
includes routines of dot_prod, mad, and mul.

Optimization points:
This patch was tuned for the best performance on A64FX. Tuning points being
touched in this patch include:
1) Data prefetch into L2 cache before loading. See _sve.S files.
2) Instruction sequence orchestration. Such as interleaving every two
     'ld1b/st1b' instructions with other instructions. See _sve.S files.
3) To improve dest vectors parallelism, in highlevel, running
     gf_4vect_dot_prod_sve twice is better than running gf_8vect_dot_prod_sve()
     once, and it's also better than running _7vect + _vect, _6vect + _2vect,
     and _5vect + _3vect. The similar idea is applied to improve 11 ~ 9 dest
     vectors dot product computing as well. The related change can be found
     in ec_encode_data_sve() of file:
     erasure_code/aarch64/ec_aarch64_highlevel_func.c

Notes:
1) About vector length: A64FX has a vector register length of 512bit. However,
     this patchset was written with variable length assembly so it work
     automatically on aarch64 machines with any types of SVE vector length,
     such as SVE-128, SVE-256, etc..
2) About optimization: Due to differences in microarchitecture and
     cache/memory design, to achieve optimum performance on SVE capable CPUs
     other than A64FX, it is considered necessary to do microarchitecture-level
     tunings on these CPUs.

[1] Introduction to SVE - Arm Developer.
      https://developer.arm.com/documentation/102476/latest/
[2] FUJITSU Processor A64FX.
      https://www.fujitsu.com/global/products/computing/servers/supercomputer/a64fx/
[3] Introducing SVE.
      https://developer.arm.com/documentation/102476/0001/Introducing-SVE

Change-Id: If49eb8a956154d799dcda0ba4c9c6d979f5064a9
Signed-off-by: Guodong Xu <guodong.xu@linaro.org>
2022-01-04 10:54:38 -07:00

265 lines
8.9 KiB
C

/**************************************************************
Copyright (c) 2019 Huawei Technologies Co., Ltd.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Huawei Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**********************************************************************/
#include "erasure_code.h"
/*external function*/
extern void gf_vect_dot_prod_neon(int len, int vlen, unsigned char *gftbls,
unsigned char **src, unsigned char *dest);
extern void gf_2vect_dot_prod_neon(int len, int vlen, unsigned char *gftbls,
unsigned char **src, unsigned char **dest);
extern void gf_3vect_dot_prod_neon(int len, int vlen, unsigned char *gftbls,
unsigned char **src, unsigned char **dest);
extern void gf_4vect_dot_prod_neon(int len, int vlen, unsigned char *gftbls,
unsigned char **src, unsigned char **dest);
extern void gf_5vect_dot_prod_neon(int len, int vlen, unsigned char *gftbls,
unsigned char **src, unsigned char **dest);
extern void gf_vect_mad_neon(int len, int vec, int vec_i, unsigned char *gftbls,
unsigned char *src, unsigned char *dest);
extern void gf_2vect_mad_neon(int len, int vec, int vec_i, unsigned char *gftbls,
unsigned char *src, unsigned char **dest);
extern void gf_3vect_mad_neon(int len, int vec, int vec_i, unsigned char *gftbls,
unsigned char *src, unsigned char **dest);
extern void gf_4vect_mad_neon(int len, int vec, int vec_i, unsigned char *gftbls,
unsigned char *src, unsigned char **dest);
extern void gf_5vect_mad_neon(int len, int vec, int vec_i, unsigned char *gftbls,
unsigned char *src, unsigned char **dest);
extern void gf_6vect_mad_neon(int len, int vec, int vec_i, unsigned char *gftbls,
unsigned char *src, unsigned char **dest);
void ec_encode_data_neon(int len, int k, int rows, unsigned char *g_tbls, unsigned char **data,
unsigned char **coding)
{
if (len < 16) {
ec_encode_data_base(len, k, rows, g_tbls, data, coding);
return;
}
while (rows > 5) {
gf_5vect_dot_prod_neon(len, k, g_tbls, data, coding);
g_tbls += 5 * k * 32;
coding += 5;
rows -= 5;
}
switch (rows) {
case 5:
gf_5vect_dot_prod_neon(len, k, g_tbls, data, coding);
break;
case 4:
gf_4vect_dot_prod_neon(len, k, g_tbls, data, coding);
break;
case 3:
gf_3vect_dot_prod_neon(len, k, g_tbls, data, coding);
break;
case 2:
gf_2vect_dot_prod_neon(len, k, g_tbls, data, coding);
break;
case 1:
gf_vect_dot_prod_neon(len, k, g_tbls, data, *coding);
break;
case 0:
break;
default:
break;
}
}
void ec_encode_data_update_neon(int len, int k, int rows, int vec_i, unsigned char *g_tbls,
unsigned char *data, unsigned char **coding)
{
if (len < 16) {
ec_encode_data_update_base(len, k, rows, vec_i, g_tbls, data, coding);
return;
}
while (rows > 6) {
gf_6vect_mad_neon(len, k, vec_i, g_tbls, data, coding);
g_tbls += 6 * k * 32;
coding += 6;
rows -= 6;
}
switch (rows) {
case 6:
gf_6vect_mad_neon(len, k, vec_i, g_tbls, data, coding);
break;
case 5:
gf_5vect_mad_neon(len, k, vec_i, g_tbls, data, coding);
break;
case 4:
gf_4vect_mad_neon(len, k, vec_i, g_tbls, data, coding);
break;
case 3:
gf_3vect_mad_neon(len, k, vec_i, g_tbls, data, coding);
break;
case 2:
gf_2vect_mad_neon(len, k, vec_i, g_tbls, data, coding);
break;
case 1:
gf_vect_mad_neon(len, k, vec_i, g_tbls, data, *coding);
break;
case 0:
break;
}
}
/* SVE */
extern void gf_vect_dot_prod_sve(int len, int vlen, unsigned char *gftbls,
unsigned char **src, unsigned char *dest);
extern void gf_2vect_dot_prod_sve(int len, int vlen, unsigned char *gftbls,
unsigned char **src, unsigned char **dest);
extern void gf_3vect_dot_prod_sve(int len, int vlen, unsigned char *gftbls,
unsigned char **src, unsigned char **dest);
extern void gf_4vect_dot_prod_sve(int len, int vlen, unsigned char *gftbls,
unsigned char **src, unsigned char **dest);
extern void gf_5vect_dot_prod_sve(int len, int vlen, unsigned char *gftbls,
unsigned char **src, unsigned char **dest);
extern void gf_6vect_dot_prod_sve(int len, int vlen, unsigned char *gftbls,
unsigned char **src, unsigned char **dest);
extern void gf_7vect_dot_prod_sve(int len, int vlen, unsigned char *gftbls,
unsigned char **src, unsigned char **dest);
extern void gf_8vect_dot_prod_sve(int len, int vlen, unsigned char *gftbls,
unsigned char **src, unsigned char **dest);
extern void gf_vect_mad_sve(int len, int vec, int vec_i, unsigned char *gftbls,
unsigned char *src, unsigned char *dest);
extern void gf_2vect_mad_sve(int len, int vec, int vec_i, unsigned char *gftbls,
unsigned char *src, unsigned char **dest);
extern void gf_3vect_mad_sve(int len, int vec, int vec_i, unsigned char *gftbls,
unsigned char *src, unsigned char **dest);
extern void gf_4vect_mad_sve(int len, int vec, int vec_i, unsigned char *gftbls,
unsigned char *src, unsigned char **dest);
extern void gf_5vect_mad_sve(int len, int vec, int vec_i, unsigned char *gftbls,
unsigned char *src, unsigned char **dest);
extern void gf_6vect_mad_sve(int len, int vec, int vec_i, unsigned char *gftbls,
unsigned char *src, unsigned char **dest);
void ec_encode_data_sve(int len, int k, int rows, unsigned char *g_tbls, unsigned char **data,
unsigned char **coding)
{
if (len < 16) {
ec_encode_data_base(len, k, rows, g_tbls, data, coding);
return;
}
while (rows > 11) {
gf_6vect_dot_prod_sve(len, k, g_tbls, data, coding);
g_tbls += 6 * k * 32;
coding += 6;
rows -= 6;
}
switch (rows) {
case 11:
/* 7 + 4 */
gf_7vect_dot_prod_sve(len, k, g_tbls, data, coding);
g_tbls += 7 * k * 32;
coding += 7;
gf_4vect_dot_prod_sve(len, k, g_tbls, data, coding);
break;
case 10:
/* 6 + 4 */
gf_6vect_dot_prod_sve(len, k, g_tbls, data, coding);
g_tbls += 6 * k * 32;
coding += 6;
gf_4vect_dot_prod_sve(len, k, g_tbls, data, coding);
break;
case 9:
/* 5 + 4 */
gf_5vect_dot_prod_sve(len, k, g_tbls, data, coding);
g_tbls += 5 * k * 32;
coding += 5;
gf_4vect_dot_prod_sve(len, k, g_tbls, data, coding);
break;
case 8:
/* 4 + 4 */
gf_4vect_dot_prod_sve(len, k, g_tbls, data, coding);
g_tbls += 4 * k * 32;
coding += 4;
gf_4vect_dot_prod_sve(len, k, g_tbls, data, coding);
break;
case 7:
gf_7vect_dot_prod_sve(len, k, g_tbls, data, coding);
break;
case 6:
gf_6vect_dot_prod_sve(len, k, g_tbls, data, coding);
break;
case 5:
gf_5vect_dot_prod_sve(len, k, g_tbls, data, coding);
break;
case 4:
gf_4vect_dot_prod_sve(len, k, g_tbls, data, coding);
break;
case 3:
gf_3vect_dot_prod_sve(len, k, g_tbls, data, coding);
break;
case 2:
gf_2vect_dot_prod_sve(len, k, g_tbls, data, coding);
break;
case 1:
gf_vect_dot_prod_sve(len, k, g_tbls, data, *coding);
break;
default:
break;
}
}
void ec_encode_data_update_sve(int len, int k, int rows, int vec_i, unsigned char *g_tbls,
unsigned char *data, unsigned char **coding)
{
if (len < 16) {
ec_encode_data_update_base(len, k, rows, vec_i, g_tbls, data, coding);
return;
}
while (rows > 6) {
gf_6vect_mad_sve(len, k, vec_i, g_tbls, data, coding);
g_tbls += 6 * k * 32;
coding += 6;
rows -= 6;
}
switch (rows) {
case 6:
gf_6vect_mad_sve(len, k, vec_i, g_tbls, data, coding);
break;
case 5:
gf_5vect_mad_sve(len, k, vec_i, g_tbls, data, coding);
break;
case 4:
gf_4vect_mad_sve(len, k, vec_i, g_tbls, data, coding);
break;
case 3:
gf_3vect_mad_sve(len, k, vec_i, g_tbls, data, coding);
break;
case 2:
gf_2vect_mad_sve(len, k, vec_i, g_tbls, data, coding);
break;
case 1:
gf_vect_mad_sve(len, k, vec_i, g_tbls, data, *coding);
break;
default:
break;
}
}