SSE2/SSE41: optimize SSE_16xN loops

After several trials at re-organizing the main loop and accumulation scheme,
this is apparently the faster variant.

removed the SSE41 version, which is no longer faster now.
For some reason, the AVX variant seems to benefit most for the change.

Change-Id: Ib11ee18dbb69596cee1a3a289af8e2b4253de7b5
This commit is contained in:
Pascal Massimino 2015-07-02 20:55:04 +02:00
parent 39216e59d9
commit 0ae2c2e4b2
2 changed files with 21 additions and 72 deletions

View File

@ -1025,38 +1025,37 @@ static void Intra16Preds(uint8_t* dst,
//------------------------------------------------------------------------------
// Metric
static WEBP_INLINE __m128i SubtractAndAccumulate(const __m128i a,
const __m128i b) {
static WEBP_INLINE void SubtractAndAccumulate(const __m128i a, const __m128i b,
__m128i* const sum) {
// take abs(a-b) in 8b
const __m128i a_b = _mm_subs_epu8(a, b);
const __m128i b_a = _mm_subs_epu8(b, a);
const __m128i abs_a_b = _mm_or_si128(a_b, b_a);
// zero-extend to 16b
const __m128i zero = _mm_setzero_si128();
// convert to 16b
const __m128i A0 = _mm_unpacklo_epi8(a, zero);
const __m128i B0 = _mm_unpacklo_epi8(b, zero);
const __m128i A1 = _mm_unpackhi_epi8(a, zero);
const __m128i B1 = _mm_unpackhi_epi8(b, zero);
// subtract
const __m128i C0 = _mm_subs_epi16(A0, B0);
const __m128i C1 = _mm_subs_epi16(A1, B1);
const __m128i C0 = _mm_unpacklo_epi8(abs_a_b, zero);
const __m128i C1 = _mm_unpackhi_epi8(abs_a_b, zero);
// multiply with self
const __m128i D0 = _mm_madd_epi16(C0, C0);
const __m128i D1 = _mm_madd_epi16(C1, C1);
// accumulate
const __m128i sum = _mm_add_epi32(D0, D1);
return sum;
const __m128i sum1 = _mm_madd_epi16(C0, C0);
const __m128i sum2 = _mm_madd_epi16(C1, C1);
*sum = _mm_add_epi32(sum1, sum2);
}
static int SSE_16xN(const uint8_t* a, const uint8_t* b, int num_pairs) {
static WEBP_INLINE int SSE_16xN(const uint8_t* a, const uint8_t* b,
int num_pairs) {
__m128i sum = _mm_setzero_si128();
int32_t tmp[4];
int i;
while (num_pairs-- > 0) {
for (i = 0; i < num_pairs; ++i) {
const __m128i a0 = _mm_loadu_si128((const __m128i*)&a[BPS * 0]);
const __m128i a1 = _mm_loadu_si128((const __m128i*)&a[BPS * 1]);
const __m128i b0 = _mm_loadu_si128((const __m128i*)&b[BPS * 0]);
const __m128i a1 = _mm_loadu_si128((const __m128i*)&a[BPS * 1]);
const __m128i b1 = _mm_loadu_si128((const __m128i*)&b[BPS * 1]);
const __m128i sum1 = SubtractAndAccumulate(a0, b0);
const __m128i sum2 = SubtractAndAccumulate(a1, b1);
const __m128i sum12 = _mm_add_epi32(sum1, sum2);
sum = _mm_add_epi32(sum, sum12);
__m128i sum1, sum2;
SubtractAndAccumulate(a0, b0, &sum1);
SubtractAndAccumulate(a1, b1, &sum2);
sum = _mm_add_epi32(sum, _mm_add_epi32(sum1, sum2));
a += 2 * BPS;
b += 2 * BPS;
}

View File

@ -60,54 +60,6 @@ static void CollectHistogram(const uint8_t* ref, const uint8_t* pred,
VP8SetHistogramData(distribution, histo);
}
//------------------------------------------------------------------------------
// Metric
static WEBP_INLINE __m128i SubtractAndAccumulate(const __m128i a,
const __m128i b) {
// take abs(a-b) in 8b
const __m128i a_b = _mm_subs_epu8(a, b);
const __m128i b_a = _mm_subs_epu8(b, a);
const __m128i abs_a_b = _mm_or_si128(a_b, b_a);
// zero-extend to 16b
const __m128i C0 = _mm_cvtepu8_epi16(abs_a_b);
const __m128i C1 = _mm_cvtepu8_epi16(_mm_srli_si128(abs_a_b, 8));
// multiply with self
const __m128i D0 = _mm_madd_epi16(C0, C0);
const __m128i D1 = _mm_madd_epi16(C1, C1);
// accumulate
const __m128i sum = _mm_add_epi32(D0, D1);
return sum;
}
static int SSE_16xN(const uint8_t* a, const uint8_t* b, int num_pairs) {
__m128i sum = _mm_setzero_si128();
int32_t tmp[4];
while (num_pairs-- > 0) {
const __m128i a0 = _mm_loadu_si128((const __m128i*)&a[BPS * 0]);
const __m128i a1 = _mm_loadu_si128((const __m128i*)&a[BPS * 1]);
const __m128i b0 = _mm_loadu_si128((const __m128i*)&b[BPS * 0]);
const __m128i b1 = _mm_loadu_si128((const __m128i*)&b[BPS * 1]);
const __m128i sum1 = SubtractAndAccumulate(a0, b0);
const __m128i sum2 = SubtractAndAccumulate(a1, b1);
const __m128i sum12 = _mm_add_epi32(sum1, sum2);
sum = _mm_add_epi32(sum, sum12);
a += 2 * BPS;
b += 2 * BPS;
}
_mm_storeu_si128((__m128i*)tmp, sum);
return (tmp[3] + tmp[2] + tmp[1] + tmp[0]);
}
static int SSE16x16(const uint8_t* a, const uint8_t* b) {
return SSE_16xN(a, b, 8);
}
static int SSE16x8(const uint8_t* a, const uint8_t* b) {
return SSE_16xN(a, b, 4);
}
//------------------------------------------------------------------------------
// Texture distortion
//
@ -412,8 +364,6 @@ WEBP_TSAN_IGNORE_FUNCTION void VP8EncDspInitSSE41(void) {
VP8EncQuantizeBlock = QuantizeBlock;
VP8EncQuantize2Blocks = Quantize2Blocks;
VP8EncQuantizeBlockWHT = QuantizeBlockWHT;
VP8SSE16x16 = SSE16x16;
VP8SSE16x8 = SSE16x8;
VP8TDisto4x4 = Disto4x4;
VP8TDisto16x16 = Disto16x16;
}