Resolve -Wshorten-64-to-32 in highbd variance.

For 8-bit the subtrahend is small enough to fit into uint32_t.

This is the same that was done for:
c0241664a Resolve -Wshorten-64-to-32 in variance.

For 10/12-bit apply:
63a37d16f Prevent negative variance

Change-Id: Iab35e3f3f269035e17c711bd6cc01272c3137e1d
This commit is contained in:
James Zern
2017-04-04 20:37:17 -07:00
parent fb60204d4c
commit 47b9a09120
2 changed files with 16 additions and 8 deletions

View File

@@ -294,7 +294,7 @@ static void highbd_12_variance(const uint8_t *a8, int a_stride,
uint32_t *sse) { \ uint32_t *sse) { \
int sum; \ int sum; \
highbd_8_variance(a, a_stride, b, b_stride, W, H, sse, &sum); \ highbd_8_variance(a, a_stride, b, b_stride, W, H, sse, &sum); \
return *sse - (((int64_t)sum * sum) / (W * H)); \ return *sse - (uint32_t)(((int64_t)sum * sum) / (W * H)); \
} \ } \
\ \
uint32_t vpx_highbd_10_variance##W##x##H##_c(const uint8_t *a, int a_stride, \ uint32_t vpx_highbd_10_variance##W##x##H##_c(const uint8_t *a, int a_stride, \

View File

@@ -135,7 +135,7 @@ HIGH_GET_VAR(8);
highbd_8_variance_sse2( \ highbd_8_variance_sse2( \
src, src_stride, ref, ref_stride, w, h, sse, &sum, \ src, src_stride, ref, ref_stride, w, h, sse, &sum, \
vpx_highbd_calc##block_size##x##block_size##var_sse2, block_size); \ vpx_highbd_calc##block_size##x##block_size##var_sse2, block_size); \
return *sse - (((int64_t)sum * sum) >> shift); \ return *sse - (uint32_t)(((int64_t)sum * sum) >> shift); \
} \ } \
\ \
uint32_t vpx_highbd_10_variance##w##x##h##_sse2( \ uint32_t vpx_highbd_10_variance##w##x##h##_sse2( \
@@ -293,12 +293,13 @@ DECLS(sse2);
} \ } \
} \ } \
*sse_ptr = sse; \ *sse_ptr = sse; \
return sse - ((cast se * se) >> (wlog2 + hlog2)); \ return sse - (uint32_t)((cast se * se) >> (wlog2 + hlog2)); \
} \ } \
\ \
uint32_t vpx_highbd_10_sub_pixel_variance##w##x##h##_##opt( \ uint32_t vpx_highbd_10_sub_pixel_variance##w##x##h##_##opt( \
const uint8_t *src8, int src_stride, int x_offset, int y_offset, \ const uint8_t *src8, int src_stride, int x_offset, int y_offset, \
const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr) { \ const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr) { \
int64_t var; \
uint32_t sse; \ uint32_t sse; \
uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \ uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \
@@ -328,7 +329,8 @@ DECLS(sse2);
se = ROUND_POWER_OF_TWO(se, 2); \ se = ROUND_POWER_OF_TWO(se, 2); \
sse = ROUND_POWER_OF_TWO(sse, 4); \ sse = ROUND_POWER_OF_TWO(sse, 4); \
*sse_ptr = sse; \ *sse_ptr = sse; \
return sse - ((cast se * se) >> (wlog2 + hlog2)); \ var = (int64_t)(sse) - ((cast se * se) >> (wlog2 + hlog2)); \
return (var >= 0) ? (uint32_t)var : 0; \
} \ } \
\ \
uint32_t vpx_highbd_12_sub_pixel_variance##w##x##h##_##opt( \ uint32_t vpx_highbd_12_sub_pixel_variance##w##x##h##_##opt( \
@@ -337,6 +339,7 @@ DECLS(sse2);
int start_row; \ int start_row; \
uint32_t sse; \ uint32_t sse; \
int se = 0; \ int se = 0; \
int64_t var; \
uint64_t long_sse = 0; \ uint64_t long_sse = 0; \
uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \ uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \
@@ -375,7 +378,8 @@ DECLS(sse2);
se = ROUND_POWER_OF_TWO(se, 4); \ se = ROUND_POWER_OF_TWO(se, 4); \
sse = (uint32_t)ROUND_POWER_OF_TWO(long_sse, 8); \ sse = (uint32_t)ROUND_POWER_OF_TWO(long_sse, 8); \
*sse_ptr = sse; \ *sse_ptr = sse; \
return sse - ((cast se * se) >> (wlog2 + hlog2)); \ var = (int64_t)(sse) - ((cast se * se) >> (wlog2 + hlog2)); \
return (var >= 0) ? (uint32_t)var : 0; \
} }
#define FNS(opt) \ #define FNS(opt) \
@@ -444,13 +448,14 @@ DECLS(sse2);
} \ } \
} \ } \
*sse_ptr = sse; \ *sse_ptr = sse; \
return sse - ((cast se * se) >> (wlog2 + hlog2)); \ return sse - (uint32_t)((cast se * se) >> (wlog2 + hlog2)); \
} \ } \
\ \
uint32_t vpx_highbd_10_sub_pixel_avg_variance##w##x##h##_##opt( \ uint32_t vpx_highbd_10_sub_pixel_avg_variance##w##x##h##_##opt( \
const uint8_t *src8, int src_stride, int x_offset, int y_offset, \ const uint8_t *src8, int src_stride, int x_offset, int y_offset, \
const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr, \ const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr, \
const uint8_t *sec8) { \ const uint8_t *sec8) { \
int64_t var; \
uint32_t sse; \ uint32_t sse; \
uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \ uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \
@@ -481,7 +486,8 @@ DECLS(sse2);
se = ROUND_POWER_OF_TWO(se, 2); \ se = ROUND_POWER_OF_TWO(se, 2); \
sse = ROUND_POWER_OF_TWO(sse, 4); \ sse = ROUND_POWER_OF_TWO(sse, 4); \
*sse_ptr = sse; \ *sse_ptr = sse; \
return sse - ((cast se * se) >> (wlog2 + hlog2)); \ var = (int64_t)(sse) - ((cast se * se) >> (wlog2 + hlog2)); \
return (var >= 0) ? (uint32_t)var : 0; \
} \ } \
\ \
uint32_t vpx_highbd_12_sub_pixel_avg_variance##w##x##h##_##opt( \ uint32_t vpx_highbd_12_sub_pixel_avg_variance##w##x##h##_##opt( \
@@ -489,6 +495,7 @@ DECLS(sse2);
const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr, \ const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr, \
const uint8_t *sec8) { \ const uint8_t *sec8) { \
int start_row; \ int start_row; \
int64_t var; \
uint32_t sse; \ uint32_t sse; \
int se = 0; \ int se = 0; \
uint64_t long_sse = 0; \ uint64_t long_sse = 0; \
@@ -530,7 +537,8 @@ DECLS(sse2);
se = ROUND_POWER_OF_TWO(se, 4); \ se = ROUND_POWER_OF_TWO(se, 4); \
sse = (uint32_t)ROUND_POWER_OF_TWO(long_sse, 8); \ sse = (uint32_t)ROUND_POWER_OF_TWO(long_sse, 8); \
*sse_ptr = sse; \ *sse_ptr = sse; \
return sse - ((cast se * se) >> (wlog2 + hlog2)); \ var = (int64_t)(sse) - ((cast se * se) >> (wlog2 + hlog2)); \
return (var >= 0) ? (uint32_t)var : 0; \
} }
#define FNS(opt1) \ #define FNS(opt1) \