SSE2: 15% faster alpha-processing functions

ApplyAlphaMultiply / MultARGBRow / MultRow

we use now: x/255 = (x * 0x8081) >> (16 + 7)
and x/255 + .5 = ((x + 128) * 0x0101) >> 16

Change-Id: I8931091316ffc8bbf65aa3402f2e7d2b800e1971
This commit is contained in:
Pascal Massimino 2017-01-11 15:35:16 +01:00
parent e3b8abbc9b
commit 48b1e85fbe

View File

@ -151,8 +151,8 @@ static int ExtractAlpha(const uint8_t* argb, int argb_stride,
// We can't use a 'const int' for the SHUFFLE value, because it has to be an
// immediate in the _mm_shufflexx_epi16() instruction. We really need a macro.
// We use: v / 255 = (v + 1 + (v >> 8)) >> 8, where v = alpha * {r,g,b} is
// a 16bit value.
// We use: v / 255 = (v * 0x8081) >> 23, where v = alpha * {r,g,b} is a 16bit
// value.
#define APPLY_ALPHA(RGBX, SHUFFLE) do { \
const __m128i argb0 = _mm_loadl_epi64((const __m128i*)&(RGBX)); \
const __m128i argb1 = _mm_unpacklo_epi8(argb0, zero); \
@ -161,18 +161,16 @@ static int ExtractAlpha(const uint8_t* argb, int argb_stride,
const __m128i alpha2 = _mm_shufflehi_epi16(alpha1, SHUFFLE); \
/* alpha2 = [ff a0 a0 a0][ff a1 a1 a1] */ \
const __m128i A0 = _mm_mullo_epi16(alpha2, argb1); \
const __m128i A1 = _mm_srli_epi16(A0, 8); \
const __m128i A2 = _mm_add_epi16(A1, A0); \
const __m128i A3 = _mm_add_epi16(A2, one); \
const __m128i A4 = _mm_srli_epi16(A3, 8); \
const __m128i A5 = _mm_packus_epi16(A4, zero); \
_mm_storel_epi64((__m128i*)&(RGBX), A5); \
const __m128i A1 = _mm_mulhi_epu16(A0, kMult); \
const __m128i A2 = _mm_srli_epi16(A1, 7); \
const __m128i A3 = _mm_packus_epi16(A2, zero); \
_mm_storel_epi64((__m128i*)&(RGBX), A3); \
} while (0)
static void ApplyAlphaMultiply_SSE2(uint8_t* rgba, int alpha_first,
int w, int h, int stride) {
const __m128i zero = _mm_setzero_si128();
const __m128i one = _mm_set1_epi16(1);
const __m128i kMult = _mm_set1_epi16(0x8081u);
const __m128i kMask = _mm_set_epi16(0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0);
const int kSpan = 2;
while (h-- > 0) {
@ -214,10 +212,11 @@ static void MultARGBRow_SSE2(uint32_t* const ptr, int width, int inverse) {
const int kSpan = 2;
const __m128i zero = _mm_setzero_si128();
const __m128i k128 = _mm_set1_epi16(128);
const __m128i kMult = _mm_set1_epi16(0x0101);
const __m128i kMask = _mm_set_epi16(0, 0xff, 0, 0, 0, 0xff, 0, 0);
for (x = 0; x + kSpan <= width; x += kSpan) {
// To compute 'result = (int)(a * x / 255. + .5)', we use:
// tmp = a * v + 128, result = (tmp + (tmp >> 8)) >> 8
// tmp = a * v + 128, result = (tmp * 0x0101u) >> 16
const __m128i A0 = _mm_loadl_epi64((const __m128i*)&ptr[x]);
const __m128i A1 = _mm_unpacklo_epi8(A0, zero);
const __m128i A2 = _mm_or_si128(A1, kMask);
@ -226,10 +225,8 @@ static void MultARGBRow_SSE2(uint32_t* const ptr, int width, int inverse) {
// here, A4 = [ff a0 a0 a0][ff a1 a1 a1]
const __m128i A5 = _mm_mullo_epi16(A4, A1);
const __m128i A6 = _mm_add_epi16(A5, k128);
const __m128i A7 = _mm_srli_epi16(A6, 8);
const __m128i A8 = _mm_add_epi16(A7, A6);
const __m128i A9 = _mm_srli_epi16(A8, 8);
const __m128i A10 = _mm_packus_epi16(A9, zero);
const __m128i A7 = _mm_mulhi_epu16(A6, kMult);
const __m128i A10 = _mm_packus_epi16(A7, zero);
_mm_storel_epi64((__m128i*)&ptr[x], A10);
}
}
@ -241,23 +238,19 @@ static void MultRow_SSE2(uint8_t* const ptr, const uint8_t* const alpha,
int width, int inverse) {
int x = 0;
if (!inverse) {
const int kSpan = 8;
const __m128i zero = _mm_setzero_si128();
const __m128i kRound = _mm_set1_epi16(1 << 7);
const int w2 = width & ~(kSpan - 1);
for (x = 0; x < w2; x += kSpan) {
const __m128i k128 = _mm_set1_epi16(128);
const __m128i kMult = _mm_set1_epi16(0x0101);
for (x = 0; x + 8 <= width; x += 8) {
const __m128i v0 = _mm_loadl_epi64((__m128i*)&ptr[x]);
const __m128i a0 = _mm_loadl_epi64((const __m128i*)&alpha[x]);
const __m128i v1 = _mm_unpacklo_epi8(v0, zero);
const __m128i alpha0 = _mm_loadl_epi64((const __m128i*)&alpha[x]);
const __m128i alpha1 = _mm_unpacklo_epi8(alpha0, zero);
const __m128i alpha2 = _mm_unpacklo_epi8(alpha0, alpha0);
const __m128i v2 = _mm_mulhi_epu16(v1, alpha2);
const __m128i v3 = _mm_mullo_epi16(v1, alpha1);
const __m128i v4 = _mm_adds_epu16(v2, v3);
const __m128i v5 = _mm_adds_epu16(v4, kRound);
const __m128i v6 = _mm_srli_epi16(v5, 8);
const __m128i v7 = _mm_packus_epi16(v6, zero);
_mm_storel_epi64((__m128i*)&ptr[x], v7);
const __m128i a1 = _mm_unpacklo_epi8(a0, zero);
const __m128i v2 = _mm_mullo_epi16(v1, a1);
const __m128i v3 = _mm_add_epi16(v2, k128);
const __m128i v4 = _mm_mulhi_epu16(v3, kMult);
const __m128i v5 = _mm_packus_epi16(v4, zero);
_mm_storel_epi64((__m128i*)&ptr[x], v5);
}
}
width -= x;