2010-05-18 17:58:33 +02:00
|
|
|
/*
|
2010-09-09 14:16:39 +02:00
|
|
|
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
|
2010-05-18 17:58:33 +02:00
|
|
|
*
|
2010-06-18 18:39:21 +02:00
|
|
|
* Use of this source code is governed by a BSD-style license
|
2010-06-04 22:19:40 +02:00
|
|
|
* that can be found in the LICENSE file in the root of the source
|
|
|
|
* tree. An additional intellectual property rights grant can be found
|
2010-06-18 18:39:21 +02:00
|
|
|
* in the file PATENTS. All contributing project authors may
|
2010-06-04 22:19:40 +02:00
|
|
|
* be found in the AUTHORS file in the root of the source tree.
|
2010-05-18 17:58:33 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
|
2012-11-28 19:41:40 +01:00
|
|
|
#include "vp9/common/vp9_findnearmv.h"
|
2012-11-27 22:59:17 +01:00
|
|
|
#include "vp9/common/vp9_sadmxn.h"
|
|
|
|
#include "vp9/common/vp9_subpelvar.h"
|
2012-08-06 19:51:20 +02:00
|
|
|
#include <limits.h>
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-12-19 00:31:19 +01:00
|
|
|
const uint8_t vp9_mbsplit_offset[4][16] = {
|
2012-07-14 00:21:29 +02:00
|
|
|
{ 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
|
|
|
{ 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
|
|
|
{ 0, 2, 8, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
|
|
|
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
|
2011-03-17 22:07:59 +01:00
|
|
|
};
|
|
|
|
|
2012-09-06 18:07:42 +02:00
|
|
|
static void lower_mv_precision(int_mv *mv, int usehp)
|
2012-08-06 19:51:20 +02:00
|
|
|
{
|
2012-10-31 00:25:53 +01:00
|
|
|
if (!usehp || !vp9_use_nmv_hp(&mv->as_mv)) {
|
2012-09-06 18:07:42 +02:00
|
|
|
if (mv->as_mv.row & 1)
|
|
|
|
mv->as_mv.row += (mv->as_mv.row > 0 ? -1 : 1);
|
|
|
|
if (mv->as_mv.col & 1)
|
|
|
|
mv->as_mv.col += (mv->as_mv.col > 0 ? -1 : 1);
|
|
|
|
}
|
2012-08-06 19:51:20 +02:00
|
|
|
}
|
|
|
|
|
2012-10-31 22:40:53 +01:00
|
|
|
vp9_prob *vp9_mv_ref_probs(VP9_COMMON *pc,
|
2012-11-12 16:09:25 +01:00
|
|
|
vp9_prob p[4], const int context
|
|
|
|
) {
|
|
|
|
p[0] = pc->fc.vp9_mode_contexts[context][0];
|
|
|
|
p[1] = pc->fc.vp9_mode_contexts[context][1];
|
|
|
|
p[2] = pc->fc.vp9_mode_contexts[context][2];
|
|
|
|
p[3] = pc->fc.vp9_mode_contexts[context][3];
|
2012-07-14 00:21:29 +02:00
|
|
|
return p;
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
2012-08-06 19:51:20 +02:00
|
|
|
|
2012-10-26 18:14:15 +02:00
|
|
|
#define SP(x) (((x) & 7) << 1)
|
2012-12-19 00:31:19 +01:00
|
|
|
unsigned int vp9_sad3x16_c(const uint8_t *src_ptr,
|
2012-11-13 01:18:35 +01:00
|
|
|
int src_stride,
|
2012-12-19 00:31:19 +01:00
|
|
|
const uint8_t *ref_ptr,
|
2012-11-21 01:28:08 +01:00
|
|
|
int ref_stride) {
|
2012-10-22 22:45:42 +02:00
|
|
|
return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 3, 16);
|
|
|
|
}
|
2012-12-19 00:31:19 +01:00
|
|
|
unsigned int vp9_sad16x3_c(const uint8_t *src_ptr,
|
2012-11-13 01:18:35 +01:00
|
|
|
int src_stride,
|
2012-12-19 00:31:19 +01:00
|
|
|
const uint8_t *ref_ptr,
|
2012-11-21 01:28:08 +01:00
|
|
|
int ref_stride) {
|
2012-10-22 22:45:42 +02:00
|
|
|
return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 16, 3);
|
|
|
|
}
|
|
|
|
|
2013-01-14 23:39:54 +01:00
|
|
|
|
2012-12-19 00:31:19 +01:00
|
|
|
unsigned int vp9_variance2x16_c(const uint8_t *src_ptr,
|
2013-01-29 01:59:10 +01:00
|
|
|
int source_stride,
|
2012-12-19 00:31:19 +01:00
|
|
|
const uint8_t *ref_ptr,
|
2013-01-29 01:59:10 +01:00
|
|
|
int recon_stride,
|
2012-11-07 00:17:20 +01:00
|
|
|
unsigned int *sse) {
|
2012-11-14 19:32:39 +01:00
|
|
|
int sum;
|
|
|
|
variance(src_ptr, source_stride, ref_ptr, recon_stride, 2, 16, sse, &sum);
|
|
|
|
return (*sse - (((unsigned int)sum * sum) >> 5));
|
2012-11-07 00:17:20 +01:00
|
|
|
}
|
|
|
|
|
2012-12-19 00:31:19 +01:00
|
|
|
unsigned int vp9_variance16x2_c(const uint8_t *src_ptr,
|
2013-01-29 01:59:10 +01:00
|
|
|
int source_stride,
|
2012-12-19 00:31:19 +01:00
|
|
|
const uint8_t *ref_ptr,
|
2013-01-29 01:59:10 +01:00
|
|
|
int recon_stride,
|
2012-11-07 00:17:20 +01:00
|
|
|
unsigned int *sse) {
|
2012-11-14 19:32:39 +01:00
|
|
|
int sum;
|
|
|
|
variance(src_ptr, source_stride, ref_ptr, recon_stride, 16, 2, sse, &sum);
|
|
|
|
return (*sse - (((unsigned int)sum * sum) >> 5));
|
2012-11-07 00:17:20 +01:00
|
|
|
}
|
|
|
|
|
2012-12-19 00:31:19 +01:00
|
|
|
unsigned int vp9_sub_pixel_variance16x2_c(const uint8_t *src_ptr,
|
2013-01-29 01:59:10 +01:00
|
|
|
int src_pixels_per_line,
|
|
|
|
int xoffset,
|
|
|
|
int yoffset,
|
2012-12-19 00:31:19 +01:00
|
|
|
const uint8_t *dst_ptr,
|
2013-01-29 01:59:10 +01:00
|
|
|
int dst_pixels_per_line,
|
2012-11-07 00:17:20 +01:00
|
|
|
unsigned int *sse) {
|
2012-12-19 00:31:19 +01:00
|
|
|
uint16_t FData3[16 * 3]; // Temp data buffer used in filtering
|
|
|
|
uint8_t temp2[2 * 16];
|
|
|
|
const int16_t *HFilter, *VFilter;
|
2012-11-07 00:17:20 +01:00
|
|
|
|
Convert subpixel filters to use convolve framework
Update the code to call the new convolution functions to do subpixel
prediction rather than the existing functions. Remove the old C and
assembly code, since it is unused. This causes a 50% performance
reduction on the decoder, but that will be resolved when the asm for
the new functions is available.
There is no consensus for whether 6-tap or 2-tap predictors will be
supported in the final codec, so these filters are implemented in
terms of the 8-tap code, so that quality testing of these modes
can continue. Implementing the lower complexity algorithms is a
simple exercise, should it be necessary.
This code produces slightly better results in the EIGHTTAP_SMOOTH
case, since the filter is now applied in only one direction when
the subpel motion is only in one direction. Like the previous code,
the filtering is skipped entirely on full-pel MVs. This combination
seems to give the best quality gains, but this may be indicative of a
bug in the encoder's filter selection, since the encoder could
achieve the result of skipping the filtering on full-pel by selecting
one of the other filters. This should be revisited.
Quality gains on derf positive on almost all clips. The only clip
that seemed to be hurt at all datarates was football
(-0.115% PSNR average, -0.587% min). Overall averages 0.375% PSNR,
0.347% SSIM.
Change-Id: I7d469716091b1d89b4b08adde5863999319d69ff
2013-01-29 01:59:03 +01:00
|
|
|
HFilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
|
|
|
|
VFilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
|
2012-11-07 00:17:20 +01:00
|
|
|
|
|
|
|
var_filter_block2d_bil_first_pass(src_ptr, FData3,
|
|
|
|
src_pixels_per_line, 1, 3, 16, HFilter);
|
|
|
|
var_filter_block2d_bil_second_pass(FData3, temp2, 16, 16, 2, 16, VFilter);
|
|
|
|
|
|
|
|
return vp9_variance16x2_c(temp2, 16, dst_ptr, dst_pixels_per_line, sse);
|
|
|
|
}
|
|
|
|
|
2012-12-19 00:31:19 +01:00
|
|
|
unsigned int vp9_sub_pixel_variance2x16_c(const uint8_t *src_ptr,
|
2013-01-29 01:59:10 +01:00
|
|
|
int src_pixels_per_line,
|
|
|
|
int xoffset,
|
|
|
|
int yoffset,
|
2012-12-19 00:31:19 +01:00
|
|
|
const uint8_t *dst_ptr,
|
2013-01-29 01:59:10 +01:00
|
|
|
int dst_pixels_per_line,
|
2012-11-07 00:17:20 +01:00
|
|
|
unsigned int *sse) {
|
2012-12-19 00:31:19 +01:00
|
|
|
uint16_t FData3[2 * 17]; // Temp data buffer used in filtering
|
|
|
|
uint8_t temp2[2 * 16];
|
|
|
|
const int16_t *HFilter, *VFilter;
|
2012-11-07 00:17:20 +01:00
|
|
|
|
Convert subpixel filters to use convolve framework
Update the code to call the new convolution functions to do subpixel
prediction rather than the existing functions. Remove the old C and
assembly code, since it is unused. This causes a 50% performance
reduction on the decoder, but that will be resolved when the asm for
the new functions is available.
There is no consensus for whether 6-tap or 2-tap predictors will be
supported in the final codec, so these filters are implemented in
terms of the 8-tap code, so that quality testing of these modes
can continue. Implementing the lower complexity algorithms is a
simple exercise, should it be necessary.
This code produces slightly better results in the EIGHTTAP_SMOOTH
case, since the filter is now applied in only one direction when
the subpel motion is only in one direction. Like the previous code,
the filtering is skipped entirely on full-pel MVs. This combination
seems to give the best quality gains, but this may be indicative of a
bug in the encoder's filter selection, since the encoder could
achieve the result of skipping the filtering on full-pel by selecting
one of the other filters. This should be revisited.
Quality gains on derf positive on almost all clips. The only clip
that seemed to be hurt at all datarates was football
(-0.115% PSNR average, -0.587% min). Overall averages 0.375% PSNR,
0.347% SSIM.
Change-Id: I7d469716091b1d89b4b08adde5863999319d69ff
2013-01-29 01:59:03 +01:00
|
|
|
HFilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
|
|
|
|
VFilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
|
2012-11-07 00:17:20 +01:00
|
|
|
|
|
|
|
var_filter_block2d_bil_first_pass(src_ptr, FData3,
|
|
|
|
src_pixels_per_line, 1, 17, 2, HFilter);
|
|
|
|
var_filter_block2d_bil_second_pass(FData3, temp2, 2, 2, 16, 2, VFilter);
|
|
|
|
|
|
|
|
return vp9_variance2x16_c(temp2, 2, dst_ptr, dst_pixels_per_line, sse);
|
|
|
|
}
|
|
|
|
|
2012-08-06 19:51:20 +02:00
|
|
|
/* check a list of motion vectors by sad score using a number rows of pixels
|
|
|
|
* above and a number cols of pixels in the left to select the one with best
|
|
|
|
* score to use as ref motion vector
|
|
|
|
*/
|
2012-10-31 22:40:53 +01:00
|
|
|
void vp9_find_best_ref_mvs(MACROBLOCKD *xd,
|
2012-12-19 00:31:19 +01:00
|
|
|
uint8_t *ref_y_buffer,
|
2012-08-24 16:44:01 +02:00
|
|
|
int ref_y_stride,
|
2012-09-07 13:46:41 +02:00
|
|
|
int_mv *mvlist,
|
2012-08-24 16:44:01 +02:00
|
|
|
int_mv *nearest,
|
|
|
|
int_mv *near) {
|
|
|
|
int i, j;
|
2012-12-19 00:31:19 +01:00
|
|
|
uint8_t *above_src;
|
|
|
|
uint8_t *above_ref;
|
2013-01-11 00:38:29 +01:00
|
|
|
#if !CONFIG_ABOVESPREFMV
|
|
|
|
uint8_t *left_src;
|
2012-12-19 00:31:19 +01:00
|
|
|
uint8_t *left_ref;
|
2013-01-11 00:38:29 +01:00
|
|
|
#endif
|
2012-11-14 19:32:39 +01:00
|
|
|
unsigned int score;
|
|
|
|
unsigned int sse;
|
2012-12-04 18:21:05 +01:00
|
|
|
unsigned int ref_scores[MAX_MV_REF_CANDIDATES] = {0};
|
|
|
|
int_mv sorted_mvs[MAX_MV_REF_CANDIDATES];
|
2012-08-24 16:44:01 +02:00
|
|
|
int zero_seen = FALSE;
|
|
|
|
|
2013-01-15 15:43:35 +01:00
|
|
|
if (ref_y_buffer) {
|
2012-08-24 16:44:01 +02:00
|
|
|
|
2013-01-15 15:43:35 +01:00
|
|
|
// Default all to 0,0 if nothing else available
|
|
|
|
nearest->as_int = near->as_int = 0;
|
|
|
|
vpx_memset(sorted_mvs, 0, sizeof(sorted_mvs));
|
|
|
|
|
|
|
|
above_src = xd->dst.y_buffer - xd->dst.y_stride * 2;
|
|
|
|
above_ref = ref_y_buffer - ref_y_stride * 2;
|
2013-01-11 00:38:29 +01:00
|
|
|
#if CONFIG_ABOVESPREFMV
|
2013-01-15 15:43:35 +01:00
|
|
|
above_src -= 4;
|
|
|
|
above_ref -= 4;
|
2013-01-11 00:38:29 +01:00
|
|
|
#else
|
2013-01-15 15:43:35 +01:00
|
|
|
left_src = xd->dst.y_buffer - 2;
|
|
|
|
left_ref = ref_y_buffer - 2;
|
2012-10-30 17:43:24 +01:00
|
|
|
#endif
|
2012-08-24 16:44:01 +02:00
|
|
|
|
2013-01-15 15:43:35 +01:00
|
|
|
// Limit search to the predicted best few candidates
|
|
|
|
for (i = 0; i < MAX_MV_REF_CANDIDATES; ++i) {
|
|
|
|
int_mv this_mv;
|
|
|
|
int offset = 0;
|
|
|
|
int row_offset, col_offset;
|
2012-08-24 16:44:01 +02:00
|
|
|
|
2013-01-15 15:43:35 +01:00
|
|
|
this_mv.as_int = mvlist[i].as_int;
|
2012-08-24 16:44:01 +02:00
|
|
|
|
2013-01-15 15:43:35 +01:00
|
|
|
// If we see a 0,0 vector for a second time we have reached the end of
|
|
|
|
// the list of valid candidate vectors.
|
|
|
|
if (!this_mv.as_int && zero_seen)
|
|
|
|
break;
|
2012-10-22 22:58:26 +02:00
|
|
|
|
2013-01-15 15:43:35 +01:00
|
|
|
zero_seen = zero_seen || !this_mv.as_int;
|
2012-08-24 16:44:01 +02:00
|
|
|
|
2013-01-11 00:38:29 +01:00
|
|
|
#if !CONFIG_ABOVESPREFMV
|
2013-01-15 15:43:35 +01:00
|
|
|
clamp_mv(&this_mv,
|
|
|
|
xd->mb_to_left_edge - LEFT_TOP_MARGIN + 24,
|
|
|
|
xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN,
|
|
|
|
xd->mb_to_top_edge - LEFT_TOP_MARGIN + 24,
|
|
|
|
xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN);
|
2013-01-11 00:38:29 +01:00
|
|
|
#else
|
2013-01-15 15:43:35 +01:00
|
|
|
clamp_mv(&this_mv,
|
|
|
|
xd->mb_to_left_edge - LEFT_TOP_MARGIN + 32,
|
|
|
|
xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN,
|
|
|
|
xd->mb_to_top_edge - LEFT_TOP_MARGIN + 24,
|
|
|
|
xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN);
|
2013-01-11 00:38:29 +01:00
|
|
|
#endif
|
2012-11-14 19:32:39 +01:00
|
|
|
|
2013-01-15 15:43:35 +01:00
|
|
|
row_offset = this_mv.as_mv.row >> 3;
|
|
|
|
col_offset = this_mv.as_mv.col >> 3;
|
|
|
|
offset = ref_y_stride * row_offset + col_offset;
|
|
|
|
score = 0;
|
2013-02-28 22:45:47 +01:00
|
|
|
#if !CONFIG_ABOVESPREFMV
|
2013-01-15 15:43:35 +01:00
|
|
|
if (xd->up_available) {
|
2013-02-28 22:45:47 +01:00
|
|
|
#else
|
|
|
|
if (xd->up_available && xd->left_available) {
|
|
|
|
#endif
|
2013-01-15 15:43:35 +01:00
|
|
|
vp9_sub_pixel_variance16x2(above_ref + offset, ref_y_stride,
|
2013-01-08 19:44:19 +01:00
|
|
|
SP(this_mv.as_mv.col),
|
|
|
|
SP(this_mv.as_mv.row),
|
2013-01-15 15:43:35 +01:00
|
|
|
above_src, xd->dst.y_stride, &sse);
|
2012-11-14 19:32:39 +01:00
|
|
|
score += sse;
|
2013-01-15 15:43:35 +01:00
|
|
|
if (xd->mode_info_context->mbmi.sb_type >= BLOCK_SIZE_SB32X32) {
|
|
|
|
vp9_sub_pixel_variance16x2(above_ref + offset + 16,
|
2012-11-14 19:32:39 +01:00
|
|
|
ref_y_stride,
|
2012-11-13 01:18:35 +01:00
|
|
|
SP(this_mv.as_mv.col),
|
|
|
|
SP(this_mv.as_mv.row),
|
2013-01-15 15:43:35 +01:00
|
|
|
above_src + 16, xd->dst.y_stride, &sse);
|
|
|
|
score += sse;
|
|
|
|
}
|
|
|
|
if (xd->mode_info_context->mbmi.sb_type >= BLOCK_SIZE_SB64X64) {
|
|
|
|
vp9_sub_pixel_variance16x2(above_ref + offset + 32,
|
2013-01-06 03:20:25 +01:00
|
|
|
ref_y_stride,
|
|
|
|
SP(this_mv.as_mv.col),
|
|
|
|
SP(this_mv.as_mv.row),
|
2013-01-15 15:43:35 +01:00
|
|
|
above_src + 32, xd->dst.y_stride, &sse);
|
|
|
|
score += sse;
|
|
|
|
vp9_sub_pixel_variance16x2(above_ref + offset + 48,
|
2013-01-06 03:20:25 +01:00
|
|
|
ref_y_stride,
|
|
|
|
SP(this_mv.as_mv.col),
|
|
|
|
SP(this_mv.as_mv.row),
|
2013-01-15 15:43:35 +01:00
|
|
|
above_src + 48, xd->dst.y_stride, &sse);
|
|
|
|
score += sse;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#if !CONFIG_ABOVESPREFMV
|
|
|
|
if (xd->left_available) {
|
|
|
|
vp9_sub_pixel_variance2x16_c(left_ref + offset, ref_y_stride,
|
|
|
|
SP(this_mv.as_mv.col),
|
|
|
|
SP(this_mv.as_mv.row),
|
|
|
|
left_src, xd->dst.y_stride, &sse);
|
2013-01-06 03:20:25 +01:00
|
|
|
score += sse;
|
2013-01-15 15:43:35 +01:00
|
|
|
if (xd->mode_info_context->mbmi.sb_type >= BLOCK_SIZE_SB32X32) {
|
|
|
|
vp9_sub_pixel_variance2x16_c(left_ref + offset + ref_y_stride * 16,
|
|
|
|
ref_y_stride,
|
|
|
|
SP(this_mv.as_mv.col),
|
|
|
|
SP(this_mv.as_mv.row),
|
|
|
|
left_src + xd->dst.y_stride * 16,
|
|
|
|
xd->dst.y_stride, &sse);
|
|
|
|
score += sse;
|
|
|
|
}
|
|
|
|
if (xd->mode_info_context->mbmi.sb_type >= BLOCK_SIZE_SB64X64) {
|
|
|
|
vp9_sub_pixel_variance2x16_c(left_ref + offset + ref_y_stride * 32,
|
|
|
|
ref_y_stride,
|
|
|
|
SP(this_mv.as_mv.col),
|
|
|
|
SP(this_mv.as_mv.row),
|
|
|
|
left_src + xd->dst.y_stride * 32,
|
|
|
|
xd->dst.y_stride, &sse);
|
|
|
|
score += sse;
|
|
|
|
vp9_sub_pixel_variance2x16_c(left_ref + offset + ref_y_stride * 48,
|
|
|
|
ref_y_stride,
|
|
|
|
SP(this_mv.as_mv.col),
|
|
|
|
SP(this_mv.as_mv.row),
|
|
|
|
left_src + xd->dst.y_stride * 48,
|
|
|
|
xd->dst.y_stride, &sse);
|
|
|
|
score += sse;
|
|
|
|
}
|
2013-01-06 03:20:25 +01:00
|
|
|
}
|
2012-10-30 17:43:24 +01:00
|
|
|
#endif
|
2013-01-15 15:43:35 +01:00
|
|
|
// Add the entry to our list and then resort the list on score.
|
|
|
|
ref_scores[i] = score;
|
|
|
|
sorted_mvs[i].as_int = this_mv.as_int;
|
|
|
|
j = i;
|
|
|
|
while (j > 0) {
|
|
|
|
if (ref_scores[j] < ref_scores[j-1]) {
|
|
|
|
ref_scores[j] = ref_scores[j-1];
|
|
|
|
sorted_mvs[j].as_int = sorted_mvs[j-1].as_int;
|
|
|
|
ref_scores[j-1] = score;
|
|
|
|
sorted_mvs[j-1].as_int = this_mv.as_int;
|
|
|
|
j--;
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2012-08-24 16:44:01 +02:00
|
|
|
}
|
2013-01-15 15:43:35 +01:00
|
|
|
} else {
|
|
|
|
vpx_memcpy(sorted_mvs, mvlist, sizeof(sorted_mvs));
|
2012-08-24 16:44:01 +02:00
|
|
|
}
|
|
|
|
|
2012-10-25 14:58:21 +02:00
|
|
|
// Make sure all the candidates are properly clamped etc
|
2012-12-04 18:21:05 +01:00
|
|
|
for (i = 0; i < MAX_MV_REF_CANDIDATES; ++i) {
|
2012-10-25 14:58:21 +02:00
|
|
|
lower_mv_precision(&sorted_mvs[i], xd->allow_high_precision_mv);
|
2012-10-31 22:40:53 +01:00
|
|
|
clamp_mv2(&sorted_mvs[i], xd);
|
2012-10-25 14:58:21 +02:00
|
|
|
}
|
|
|
|
|
2013-02-05 11:28:26 +01:00
|
|
|
// Nearest may be a 0,0 or non zero vector and now matches the chosen
|
|
|
|
// "best reference". This has advantages when it is used as part of a
|
|
|
|
// compound predictor as it means a non zero vector can be paired using
|
|
|
|
// this mode with a 0 vector. The Near vector is still forced to be a
|
|
|
|
// non zero candidate if one is avaialble.
|
|
|
|
nearest->as_int = sorted_mvs[0].as_int;
|
|
|
|
if ( sorted_mvs[1].as_int ) {
|
|
|
|
near->as_int = sorted_mvs[1].as_int;
|
2012-08-24 16:44:01 +02:00
|
|
|
} else {
|
2013-02-05 11:28:26 +01:00
|
|
|
near->as_int = sorted_mvs[2].as_int;
|
2012-08-24 16:44:01 +02:00
|
|
|
}
|
|
|
|
|
2012-09-07 13:46:41 +02:00
|
|
|
// Copy back the re-ordered mv list
|
|
|
|
vpx_memcpy(mvlist, sorted_mvs, sizeof(sorted_mvs));
|
2012-08-24 16:44:01 +02:00
|
|
|
}
|