variance_avx2: sync variance functions with c-code
add missing int64 -> uint32 cast; quiets -Wshorten-64-to-32 warnings Change-Id: I4850b36e18dc8b399108342be4bfe0b684aefb78
This commit is contained in:
@@ -61,7 +61,7 @@ unsigned int vpx_variance32x16_avx2(const uint8_t *src, int src_stride,
|
||||
int sum;
|
||||
variance_avx2(src, src_stride, ref, ref_stride, 32, 16, sse, &sum,
|
||||
vpx_get32x32var_avx2, 32);
|
||||
return *sse - (((int64_t)sum * sum) >> 9);
|
||||
return *sse - (uint32_t)(((int64_t)sum * sum) >> 9);
|
||||
}
|
||||
|
||||
unsigned int vpx_variance32x32_avx2(const uint8_t *src, int src_stride,
|
||||
@@ -70,7 +70,7 @@ unsigned int vpx_variance32x32_avx2(const uint8_t *src, int src_stride,
|
||||
int sum;
|
||||
variance_avx2(src, src_stride, ref, ref_stride, 32, 32, sse, &sum,
|
||||
vpx_get32x32var_avx2, 32);
|
||||
return *sse - (((int64_t)sum * sum) >> 10);
|
||||
return *sse - (uint32_t)(((int64_t)sum * sum) >> 10);
|
||||
}
|
||||
|
||||
unsigned int vpx_variance64x64_avx2(const uint8_t *src, int src_stride,
|
||||
@@ -79,7 +79,7 @@ unsigned int vpx_variance64x64_avx2(const uint8_t *src, int src_stride,
|
||||
int sum;
|
||||
variance_avx2(src, src_stride, ref, ref_stride, 64, 64, sse, &sum,
|
||||
vpx_get32x32var_avx2, 32);
|
||||
return *sse - (((int64_t)sum * sum) >> 12);
|
||||
return *sse - (uint32_t)(((int64_t)sum * sum) >> 12);
|
||||
}
|
||||
|
||||
unsigned int vpx_variance64x32_avx2(const uint8_t *src, int src_stride,
|
||||
@@ -88,7 +88,7 @@ unsigned int vpx_variance64x32_avx2(const uint8_t *src, int src_stride,
|
||||
int sum;
|
||||
variance_avx2(src, src_stride, ref, ref_stride, 64, 32, sse, &sum,
|
||||
vpx_get32x32var_avx2, 32);
|
||||
return *sse - (((int64_t)sum * sum) >> 11);
|
||||
return *sse - (uint32_t)(((int64_t)sum * sum) >> 11);
|
||||
}
|
||||
|
||||
unsigned int vpx_sub_pixel_variance32xh_avx2(const uint8_t *src, int src_stride,
|
||||
@@ -115,7 +115,7 @@ unsigned int vpx_sub_pixel_variance64x64_avx2(const uint8_t *src,
|
||||
dst + 32, dst_stride, 64, &sse2);
|
||||
const int se = se1 + se2;
|
||||
*sse = sse1 + sse2;
|
||||
return *sse - (((int64_t)se * se) >> 12);
|
||||
return *sse - (uint32_t)(((int64_t)se * se) >> 12);
|
||||
}
|
||||
|
||||
unsigned int vpx_sub_pixel_variance32x32_avx2(const uint8_t *src,
|
||||
@@ -125,7 +125,7 @@ unsigned int vpx_sub_pixel_variance32x32_avx2(const uint8_t *src,
|
||||
unsigned int *sse) {
|
||||
const int se = vpx_sub_pixel_variance32xh_avx2(
|
||||
src, src_stride, x_offset, y_offset, dst, dst_stride, 32, sse);
|
||||
return *sse - (((int64_t)se * se) >> 10);
|
||||
return *sse - (uint32_t)(((int64_t)se * se) >> 10);
|
||||
}
|
||||
|
||||
unsigned int vpx_sub_pixel_avg_variance64x64_avx2(
|
||||
@@ -142,7 +142,7 @@ unsigned int vpx_sub_pixel_avg_variance64x64_avx2(
|
||||
|
||||
*sse = sse1 + sse2;
|
||||
|
||||
return *sse - (((int64_t)se * se) >> 12);
|
||||
return *sse - (uint32_t)(((int64_t)se * se) >> 12);
|
||||
}
|
||||
|
||||
unsigned int vpx_sub_pixel_avg_variance32x32_avx2(
|
||||
@@ -151,5 +151,5 @@ unsigned int vpx_sub_pixel_avg_variance32x32_avx2(
|
||||
// Process 32 elements in parallel.
|
||||
const int se = vpx_sub_pixel_avg_variance32xh_avx2(
|
||||
src, src_stride, x_offset, y_offset, dst, dst_stride, sec, 32, 32, sse);
|
||||
return *sse - (((int64_t)se * se) >> 10);
|
||||
return *sse - (uint32_t)(((int64_t)se * se) >> 10);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user