diff --git a/vp10/encoder/bitstream.c b/vp10/encoder/bitstream.c index e736877df..4a81122da 100644 --- a/vp10/encoder/bitstream.c +++ b/vp10/encoder/bitstream.c @@ -2887,7 +2887,7 @@ static uint32_t write_tiles(VP10_COMP *const cpi, } } #endif // CONFIG_EXT_TILE - return total_size; + return (uint32_t)total_size; } static void write_render_size(const VP10_COMMON *cm, @@ -3436,7 +3436,7 @@ void vp10_pack_bitstream(VP10_COMP *const cpi, uint8_t *dst, size_t *size) { // Size of compressed header vpx_wb_write_literal(&wb, 0, 16); - uncompressed_header_size = vpx_wb_bytes_written(&wb); + uncompressed_header_size = (uint32_t)vpx_wb_bytes_written(&wb); data += uncompressed_header_size; vpx_clear_system_state(); diff --git a/vpx_dsp/variance.c b/vpx_dsp/variance.c index e6be1dd73..cc99d256b 100644 --- a/vpx_dsp/variance.c +++ b/vpx_dsp/variance.c @@ -719,8 +719,8 @@ void masked_variance(const uint8_t *a, int a_stride, m += m_stride; } sum64 = (sum64 >= 0) ? sum64 : -sum64; - *sum = ROUND_POWER_OF_TWO(sum64, 6); - *sse = ROUND_POWER_OF_TWO(sse64, 12); + *sum = (int)ROUND_POWER_OF_TWO(sum64, 6); + *sse = (uint32_t)ROUND_POWER_OF_TWO(sse64, 12); } #define MASK_VAR(W, H) \ diff --git a/vpx_dsp/x86/highbd_variance_sse4.c b/vpx_dsp/x86/highbd_variance_sse4.c index 5c1dfe4dc..54fc609fb 100644 --- a/vpx_dsp/x86/highbd_variance_sse4.c +++ b/vpx_dsp/x86/highbd_variance_sse4.c @@ -76,7 +76,7 @@ uint32_t vpx_highbd_8_variance4x4_sse4_1(const uint8_t *a, variance4x4_64_sse4_1(a, a_stride, b, b_stride, &local_sse, &sum); *sse = (uint32_t)local_sse; - return *sse - ((sum * sum) >> 4); + return *sse - (uint32_t)((sum * sum) >> 4); } uint32_t vpx_highbd_10_variance4x4_sse4_1(const uint8_t *a, @@ -91,7 +91,7 @@ uint32_t vpx_highbd_10_variance4x4_sse4_1(const uint8_t *a, *sse = (uint32_t)ROUND_POWER_OF_TWO(local_sse, 4); sum = ROUND_POWER_OF_TWO(sum, 2); - return *sse - ((sum * sum) >> 4); + return *sse - (uint32_t)((sum * sum) >> 4); } uint32_t vpx_highbd_12_variance4x4_sse4_1(const uint8_t *a, @@ -106,7 +106,7 @@ uint32_t vpx_highbd_12_variance4x4_sse4_1(const uint8_t *a, *sse = (uint32_t)ROUND_POWER_OF_TWO(local_sse, 8); sum = ROUND_POWER_OF_TWO(sum, 4); - return *sse - ((sum * sum) >> 4); + return *sse - (uint32_t)((sum * sum) >> 4); } // Sub-pixel diff --git a/vpx_dsp/x86/masked_variance_intrin_ssse3.c b/vpx_dsp/x86/masked_variance_intrin_ssse3.c index ca4f6fcff..47e2c32d8 100644 --- a/vpx_dsp/x86/masked_variance_intrin_ssse3.c +++ b/vpx_dsp/x86/masked_variance_intrin_ssse3.c @@ -54,9 +54,9 @@ static INLINE int64_t hsum_epi32_si64(__m128i v_d) { } #endif // CONFIG_VP9_HIGHBITDEPTH -static INLINE int calc_masked_variance(__m128i v_sum_d, __m128i v_sse_q, - unsigned int* sse, - const int w, const int h) { +static INLINE uint32_t calc_masked_variance(__m128i v_sum_d, __m128i v_sse_q, + uint32_t* sse, + const int w, const int h) { int64_t sum64; uint64_t sse64; @@ -71,9 +71,9 @@ static INLINE int calc_masked_variance(__m128i v_sum_d, __m128i v_sse_q, sse64 = ROUND_POWER_OF_TWO(sse64, 12); // Store the SSE - *sse = (unsigned int)sse64; + *sse = (uint32_t)sse64; // Compute the variance - return *sse - ((sum64 * sum64) / (w * h)); + return *sse - (uint32_t)((sum64 * sum64) / (w * h)); } /***************************************************************************** @@ -497,9 +497,9 @@ static INLINE unsigned int highbd_masked_variancewxh_ssse3( &sum64, &sse64); // Store the SSE - *sse = (unsigned int)sse64; + *sse = (uint32_t)sse64; // Compute and return variance - return *sse - ((sum64 * sum64) / (w * h)); + return *sse - (uint32_t)((sum64 * sum64) / (w * h)); } static INLINE unsigned int highbd_10_masked_variancewxh_ssse3( @@ -523,9 +523,9 @@ static INLINE unsigned int highbd_10_masked_variancewxh_ssse3( sse64 = ROUND_POWER_OF_TWO(sse64, 4); // Store the SSE - *sse = (unsigned int)sse64; + *sse = (uint32_t)sse64; // Compute and return variance - return *sse - ((sum64 * sum64) / (w * h)); + return *sse - (uint32_t)((sum64 * sum64) / (w * h)); } static INLINE unsigned int highbd_12_masked_variancewxh_ssse3( @@ -548,9 +548,9 @@ static INLINE unsigned int highbd_12_masked_variancewxh_ssse3( sse64 = ROUND_POWER_OF_TWO(sse64, 8); // Store the SSE - *sse = (unsigned int)sse64; + *sse = (uint32_t)sse64; // Compute and return variance - return *sse - ((sum64 * sum64) / (w * h)); + return *sse - (uint32_t)((sum64 * sum64) / (w * h)); } #define HIGHBD_MASKED_VARWXH(W, H) \ @@ -1460,10 +1460,11 @@ static void highbd_sum_and_sse(const __m128i v_a_w, const __m128i v_b_w, *v_sse_q = _mm_add_epi64(*v_sse_q, v_se_q); } -static INLINE int highbd_10_calc_masked_variance(__m128i v_sum_d, - __m128i v_sse_q, - unsigned int* sse, - const int w, const int h) { +static INLINE uint32_t highbd_10_calc_masked_variance(__m128i v_sum_d, + __m128i v_sse_q, + uint32_t* sse, + const int w, + const int h) { int64_t sum64; uint64_t sse64; @@ -1482,14 +1483,15 @@ static INLINE int highbd_10_calc_masked_variance(__m128i v_sum_d, sse64 = ROUND_POWER_OF_TWO(sse64, 4); // Store the SSE - *sse = (unsigned int)sse64; + *sse = (uint32_t)sse64; // Compute the variance - return *sse - ((sum64 * sum64) / (w * h)); + return *sse - (uint32_t)((sum64 * sum64) / (w * h)); } -static INLINE int highbd_12_calc_masked_variance(__m128i v_sum_d, - __m128i v_sse_q, - unsigned int* sse, - const int w, const int h) { +static INLINE uint32_t highbd_12_calc_masked_variance(__m128i v_sum_d, + __m128i v_sse_q, + uint32_t* sse, + const int w, + const int h) { int64_t sum64; uint64_t sse64; @@ -1508,9 +1510,9 @@ static INLINE int highbd_12_calc_masked_variance(__m128i v_sum_d, sse64 = ROUND_POWER_OF_TWO(sse64, 8); // Store the SSE - *sse = (unsigned int)sse64; + *sse = (uint32_t)sse64; // Compute the variance - return *sse - ((sum64 * sum64) / (w * h)); + return *sse - (uint32_t)((sum64 * sum64) / (w * h)); }