Merging in the sixteenth subpel uv experiment
Merges this experiment in to make it easier to run tests on filter precision, vectorized implementation etc. Also removes an experimental filter. Change-Id: I1e8706bb6d4fc469815123939e9c6e0b5ae945cd
This commit is contained in:
parent
b04e87c6ab
commit
7d0656537b
1
configure
vendored
1
configure
vendored
@ -217,7 +217,6 @@ HAVE_LIST="
|
|||||||
EXPERIMENT_LIST="
|
EXPERIMENT_LIST="
|
||||||
csm
|
csm
|
||||||
featureupdates
|
featureupdates
|
||||||
sixteenth_subpel_uv
|
|
||||||
comp_intra_pred
|
comp_intra_pred
|
||||||
superblocks
|
superblocks
|
||||||
pred_filter
|
pred_filter
|
||||||
|
@ -14,7 +14,6 @@
|
|||||||
#include "vpx_ports/mem.h"
|
#include "vpx_ports/mem.h"
|
||||||
|
|
||||||
DECLARE_ALIGNED(16, const short, vp8_bilinear_filters[SUBPEL_SHIFTS][2]) = {
|
DECLARE_ALIGNED(16, const short, vp8_bilinear_filters[SUBPEL_SHIFTS][2]) = {
|
||||||
#if SUBPEL_SHIFTS==16
|
|
||||||
{ 128, 0 },
|
{ 128, 0 },
|
||||||
{ 120, 8 },
|
{ 120, 8 },
|
||||||
{ 112, 16 },
|
{ 112, 16 },
|
||||||
@ -31,22 +30,11 @@ DECLARE_ALIGNED(16, const short, vp8_bilinear_filters[SUBPEL_SHIFTS][2]) = {
|
|||||||
{ 24, 104 },
|
{ 24, 104 },
|
||||||
{ 16, 112 },
|
{ 16, 112 },
|
||||||
{ 8, 120 }
|
{ 8, 120 }
|
||||||
#else
|
|
||||||
{ 128, 0 },
|
|
||||||
{ 112, 16 },
|
|
||||||
{ 96, 32 },
|
|
||||||
{ 80, 48 },
|
|
||||||
{ 64, 64 },
|
|
||||||
{ 48, 80 },
|
|
||||||
{ 32, 96 },
|
|
||||||
{ 16, 112 }
|
|
||||||
#endif /* SUBPEL_SHIFTS==16 */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#define FILTER_ALPHA 0
|
#define FILTER_ALPHA 0
|
||||||
#define FILTER_ALPHA_SHARP 1
|
#define FILTER_ALPHA_SHARP 1
|
||||||
DECLARE_ALIGNED(16, const short, vp8_sub_pel_filters_8[SUBPEL_SHIFTS][8]) = {
|
DECLARE_ALIGNED(16, const short, vp8_sub_pel_filters_8[SUBPEL_SHIFTS][8]) = {
|
||||||
#if SUBPEL_SHIFTS==16
|
|
||||||
#if FILTER_ALPHA == 0
|
#if FILTER_ALPHA == 0
|
||||||
/* Lagrangian interpolation filter */
|
/* Lagrangian interpolation filter */
|
||||||
{ 0, 0, 0, 128, 0, 0, 0, 0},
|
{ 0, 0, 0, 128, 0, 0, 0, 0},
|
||||||
@ -90,32 +78,9 @@ DECLARE_ALIGNED(16, const short, vp8_sub_pel_filters_8[SUBPEL_SHIFTS][8]) = {
|
|||||||
{ 0, 2, -6, 18, 122, -10, 2, 0},
|
{ 0, 2, -6, 18, 122, -10, 2, 0},
|
||||||
{ 0, 1, -3, 8, 126, -5, 1, 0}
|
{ 0, 1, -3, 8, 126, -5, 1, 0}
|
||||||
#endif /* FILTER_ALPHA */
|
#endif /* FILTER_ALPHA */
|
||||||
#else /* SUBPEL_SHIFTS==16 */
|
|
||||||
#if FILTER_ALPHA == 0
|
|
||||||
{ 0, 0, 0, 128, 0, 0, 0, 0},
|
|
||||||
{ -1, 3, -10, 122, 18, -6, 2, 0},
|
|
||||||
{ -1, 4, -16, 112, 37, -11, 4, -1},
|
|
||||||
{ -1, 5, -19, 97, 58, -16, 5, -1},
|
|
||||||
{ -1, 6, -19, 78, 78, -19, 6, -1},
|
|
||||||
{ -1, 5, -16, 58, 97, -19, 5, -1},
|
|
||||||
{ -1, 4, -11, 37, 112, -16, 4, -1},
|
|
||||||
{ 0, 2, -6, 18, 122, -10, 3, -1},
|
|
||||||
#elif FILTER_ALPHA == 50
|
|
||||||
/* alpha = 0.50 */
|
|
||||||
{ 0, 0, 0, 128, 0, 0, 0, 0},
|
|
||||||
{ 0, 2, -10, 122, 18, -6, 2, 0},
|
|
||||||
{ -1, 4, -16, 112, 37, -11, 3, 0},
|
|
||||||
{ -1, 5, -18, 96, 58, -16, 5, -1},
|
|
||||||
{ -1, 5, -18, 78, 78, -18, 5, -1},
|
|
||||||
{ -1, 5, -16, 58, 96, -18, 5, -1},
|
|
||||||
{ 0, 3, -11, 37, 112, -16, 4, -1},
|
|
||||||
{ 0, 2, -6, 18, 122, -10, 2, 0}
|
|
||||||
#endif /* FILTER_ALPHA */
|
|
||||||
#endif /* SUBPEL_SHIFTS==16 */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
DECLARE_ALIGNED(16, const short, vp8_sub_pel_filters_8s[SUBPEL_SHIFTS][8]) = {
|
DECLARE_ALIGNED(16, const short, vp8_sub_pel_filters_8s[SUBPEL_SHIFTS][8]) = {
|
||||||
#if SUBPEL_SHIFTS==16
|
|
||||||
#if FILTER_ALPHA_SHARP == 1
|
#if FILTER_ALPHA_SHARP == 1
|
||||||
/* dct based filter */
|
/* dct based filter */
|
||||||
{0, 0, 0, 128, 0, 0, 0, 0},
|
{0, 0, 0, 128, 0, 0, 0, 0},
|
||||||
@ -152,62 +117,10 @@ DECLARE_ALIGNED(16, const short, vp8_sub_pel_filters_8s[SUBPEL_SHIFTS][8]) = {
|
|||||||
{-2, 5, -10, 28, 119, -16, 6, -2},
|
{-2, 5, -10, 28, 119, -16, 6, -2},
|
||||||
{-1, 3, -7, 18, 123, -11, 4, -1},
|
{-1, 3, -7, 18, 123, -11, 4, -1},
|
||||||
{-1, 2, -3, 9, 126, -6, 2, -1}
|
{-1, 2, -3, 9, 126, -6, 2, -1}
|
||||||
#elif FILTER_ALPHA_SHARP == 65
|
|
||||||
/* alpha = 0.65 */
|
|
||||||
{ 0, 0, 0, 128, 0, 0, 0, 0},
|
|
||||||
{ 0, 2, -6, 126, 8, -3, 1, 0},
|
|
||||||
{ -1, 3, -10, 123, 18, -6, 2, -1},
|
|
||||||
{ -1, 5, -14, 118, 27, -10, 4, -1},
|
|
||||||
{ -1, 5, -17, 112, 38, -13, 5, -1},
|
|
||||||
{ -2, 6, -19, 106, 48, -15, 5, -1},
|
|
||||||
{ -2, 7, -21, 98, 59, -17, 6, -2},
|
|
||||||
{ -2, 7, -21, 89, 69, -19, 7, -2},
|
|
||||||
{ -2, 7, -20, 79, 79, -20, 7, -2},
|
|
||||||
{ -2, 7, -19, 69, 89, -21, 7, -2},
|
|
||||||
{ -2, 6, -17, 59, 98, -21, 7, -2},
|
|
||||||
{ -1, 5, -15, 48, 106, -19, 6, -2},
|
|
||||||
{ -1, 5, -13, 38, 112, -17, 5, -1},
|
|
||||||
{ -1, 4, -10, 27, 118, -14, 5, -1},
|
|
||||||
{ -1, 2, -6, 18, 123, -10, 3, -1},
|
|
||||||
{ 0, 1, -3, 8, 126, -6, 2, 0}
|
|
||||||
#endif /* FILTER_ALPHA_SHARP */
|
#endif /* FILTER_ALPHA_SHARP */
|
||||||
#else /* SUBPEL_SHIFTS==16 */
|
|
||||||
#if FILTER_ALPHA_SHARP == 1
|
|
||||||
/* dct based filter */
|
|
||||||
{0, 0, 0, 128, 0, 0, 0, 0},
|
|
||||||
{-2, 5, -13, 125, 17, -6, 3, -1},
|
|
||||||
{-4, 9, -20, 115, 37, -13, 6, -2},
|
|
||||||
{-4, 10, -24, 100, 59, -19, 9, -3},
|
|
||||||
{-4, 10, -23, 81, 81, -23, 10, -4},
|
|
||||||
{-3, 9, -19, 59, 100, -24, 10, -4},
|
|
||||||
{-2, 6, -13, 37, 115, -20, 9, -4},
|
|
||||||
{-1, 3, -6, 17, 125, -13, 5, -2}
|
|
||||||
#elif FILTER_ALPHA_SHARP == 75
|
|
||||||
/* alpha = 0.75 */
|
|
||||||
{0, 0, 0, 128, 0, 0, 0, 0},
|
|
||||||
{-1, 4, -11, 123, 18, -7, 3, -1},
|
|
||||||
{-2, 7, -19, 113, 38, -13, 6, -2},
|
|
||||||
{-3, 9, -22, 99, 59, -19, 8, -3},
|
|
||||||
{-3, 9, -22, 80, 80, -22, 9, -3},
|
|
||||||
{-3, 8, -19, 59, 99, -22, 9, -3},
|
|
||||||
{-2, 6, -13, 38, 113, -19, 7, -2},
|
|
||||||
{-1, 3, -7, 18, 123, -11, 4, -1}
|
|
||||||
#elif FILTER_ALPHA_SHARP == 65
|
|
||||||
/* alpha = 0.65 */
|
|
||||||
{ 0, 0, 0, 128, 0, 0, 0, 0},
|
|
||||||
{ -1, 3, -10, 123, 18, -6, 2, -1},
|
|
||||||
{ -1, 5, -17, 112, 38, -13, 5, -1},
|
|
||||||
{ -2, 7, -21, 98, 59, -17, 6, -2},
|
|
||||||
{ -2, 7, -20, 79, 79, -20, 7, -2},
|
|
||||||
{ -2, 6, -17, 59, 98, -21, 7, -2},
|
|
||||||
{ -1, 5, -13, 38, 112, -17, 5, -1},
|
|
||||||
{ -1, 2, -6, 18, 123, -10, 3, -1}
|
|
||||||
#endif /* FILTER_ALPHA_SHARP */
|
|
||||||
#endif /* SUBPEL_SHIFTS==16 */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
DECLARE_ALIGNED(16, const short, vp8_sub_pel_filters_6[SUBPEL_SHIFTS][6]) = {
|
DECLARE_ALIGNED(16, const short, vp8_sub_pel_filters_6[SUBPEL_SHIFTS][6]) = {
|
||||||
#if SUBPEL_SHIFTS==16
|
|
||||||
{0, 0, 128, 0, 0, 0},
|
{0, 0, 128, 0, 0, 0},
|
||||||
{1, -5, 125, 8, -2, 1},
|
{1, -5, 125, 8, -2, 1},
|
||||||
{1, -8, 122, 17, -5, 1},
|
{1, -8, 122, 17, -5, 1},
|
||||||
@ -224,16 +137,6 @@ DECLARE_ALIGNED(16, const short, vp8_sub_pel_filters_6[SUBPEL_SHIFTS][6]) = {
|
|||||||
{2, -8, 27, 116, -11, 2},
|
{2, -8, 27, 116, -11, 2},
|
||||||
{1, -5, 17, 122, -8, 1},
|
{1, -5, 17, 122, -8, 1},
|
||||||
{1, -2, 8, 125, -5, 1}
|
{1, -2, 8, 125, -5, 1}
|
||||||
#else
|
|
||||||
{ 0, 0, 128, 0, 0, 0 }, /* note that 1/8 pel positions are just as per alpha -0.5 bicubic */
|
|
||||||
{ 0, -6, 123, 12, -1, 0 },
|
|
||||||
{ 2, -11, 108, 36, -8, 1 }, /* New 1/4 pel 6 tap filter */
|
|
||||||
{ 0, -9, 93, 50, -6, 0 },
|
|
||||||
{ 3, -16, 77, 77, -16, 3 }, /* New 1/2 pel 6 tap filter */
|
|
||||||
{ 0, -6, 50, 93, -9, 0 },
|
|
||||||
{ 1, -8, 36, 108, -11, 2 }, /* New 1/4 pel 6 tap filter */
|
|
||||||
{ 0, -1, 12, 123, -6, 0 },
|
|
||||||
#endif /* SUBPEL_SHIFTS==16 */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static void filter_block2d_first_pass_6
|
static void filter_block2d_first_pass_6
|
||||||
@ -255,8 +158,8 @@ static void filter_block2d_first_pass_6
|
|||||||
((int)src_ptr[-1 * (int)pixel_step] * vp8_filter[1]) +
|
((int)src_ptr[-1 * (int)pixel_step] * vp8_filter[1]) +
|
||||||
((int)src_ptr[0] * vp8_filter[2]) +
|
((int)src_ptr[0] * vp8_filter[2]) +
|
||||||
((int)src_ptr[pixel_step] * vp8_filter[3]) +
|
((int)src_ptr[pixel_step] * vp8_filter[3]) +
|
||||||
((int)src_ptr[2 * pixel_step] * vp8_filter[4]) +
|
((int)src_ptr[2 * pixel_step] * vp8_filter[4]) +
|
||||||
((int)src_ptr[3 * pixel_step] * vp8_filter[5]) +
|
((int)src_ptr[3 * pixel_step] * vp8_filter[5]) +
|
||||||
(VP8_FILTER_WEIGHT >> 1); /* Rounding */
|
(VP8_FILTER_WEIGHT >> 1); /* Rounding */
|
||||||
|
|
||||||
/* Normalize back to 0-255 */
|
/* Normalize back to 0-255 */
|
||||||
|
@ -18,11 +18,7 @@
|
|||||||
#define VP8_FILTER_WEIGHT 128
|
#define VP8_FILTER_WEIGHT 128
|
||||||
#define VP8_FILTER_SHIFT 7
|
#define VP8_FILTER_SHIFT 7
|
||||||
|
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
#define SUBPEL_SHIFTS 16
|
#define SUBPEL_SHIFTS 16
|
||||||
#else
|
|
||||||
#define SUBPEL_SHIFTS 8
|
|
||||||
#endif
|
|
||||||
|
|
||||||
extern const short vp8_bilinear_filters[SUBPEL_SHIFTS][2];
|
extern const short vp8_bilinear_filters[SUBPEL_SHIFTS][2];
|
||||||
extern const short vp8_sub_pel_filters_6[SUBPEL_SHIFTS][6];
|
extern const short vp8_sub_pel_filters_6[SUBPEL_SHIFTS][6];
|
||||||
|
@ -241,11 +241,7 @@ void vp8_build_inter_predictors_b(BLOCKD *d, int pitch, vp8_subpix_fn_t sppf) {
|
|||||||
|
|
||||||
if (mv.as_mv.row & 7 || mv.as_mv.col & 7) {
|
if (mv.as_mv.row & 7 || mv.as_mv.col & 7) {
|
||||||
ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride + (mv.as_mv.col >> 3);
|
ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride + (mv.as_mv.col >> 3);
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
sppf(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1, (mv.as_mv.row & 7) << 1, pred_ptr, pitch);
|
sppf(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1, (mv.as_mv.row & 7) << 1, pred_ptr, pitch);
|
||||||
#else
|
|
||||||
sppf(ptr, d->pre_stride, mv.as_mv.col & 7, mv.as_mv.row & 7, pred_ptr, pitch);
|
|
||||||
#endif
|
|
||||||
} else {
|
} else {
|
||||||
ptr_base += d->pre + (mv.as_mv.row >> 3) * d->pre_stride + (mv.as_mv.col >> 3);
|
ptr_base += d->pre + (mv.as_mv.row >> 3) * d->pre_stride + (mv.as_mv.col >> 3);
|
||||||
ptr = ptr_base;
|
ptr = ptr_base;
|
||||||
@ -283,11 +279,7 @@ void vp8_build_2nd_inter_predictors_b(BLOCKD *d, int pitch, vp8_subpix_fn_t sppf
|
|||||||
|
|
||||||
if (mv.as_mv.row & 7 || mv.as_mv.col & 7) {
|
if (mv.as_mv.row & 7 || mv.as_mv.col & 7) {
|
||||||
ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride + (mv.as_mv.col >> 3);
|
ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride + (mv.as_mv.col >> 3);
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
sppf(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1, (mv.as_mv.row & 7) << 1, pred_ptr, pitch);
|
sppf(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1, (mv.as_mv.row & 7) << 1, pred_ptr, pitch);
|
||||||
#else
|
|
||||||
sppf(ptr, d->pre_stride, mv.as_mv.col & 7, mv.as_mv.row & 7, pred_ptr, pitch);
|
|
||||||
#endif
|
|
||||||
} else {
|
} else {
|
||||||
ptr_base += d->pre + (mv.as_mv.row >> 3) * d->pre_stride + (mv.as_mv.col >> 3);
|
ptr_base += d->pre + (mv.as_mv.row >> 3) * d->pre_stride + (mv.as_mv.col >> 3);
|
||||||
ptr = ptr_base;
|
ptr = ptr_base;
|
||||||
@ -314,11 +306,7 @@ static void build_inter_predictors4b(MACROBLOCKD *x, BLOCKD *d, int pitch) {
|
|||||||
ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride + (mv.as_mv.col >> 3);
|
ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride + (mv.as_mv.col >> 3);
|
||||||
|
|
||||||
if (mv.as_mv.row & 7 || mv.as_mv.col & 7) {
|
if (mv.as_mv.row & 7 || mv.as_mv.col & 7) {
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
x->subpixel_predict8x8(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1, (mv.as_mv.row & 7) << 1, pred_ptr, pitch);
|
x->subpixel_predict8x8(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1, (mv.as_mv.row & 7) << 1, pred_ptr, pitch);
|
||||||
#else
|
|
||||||
x->subpixel_predict8x8(ptr, d->pre_stride, mv.as_mv.col & 7, mv.as_mv.row & 7, pred_ptr, pitch);
|
|
||||||
#endif
|
|
||||||
} else {
|
} else {
|
||||||
RECON_INVOKE(&x->rtcd->recon, copy8x8)(ptr, d->pre_stride, pred_ptr, pitch);
|
RECON_INVOKE(&x->rtcd->recon, copy8x8)(ptr, d->pre_stride, pred_ptr, pitch);
|
||||||
}
|
}
|
||||||
@ -341,11 +329,7 @@ static void build_2nd_inter_predictors4b(MACROBLOCKD *x, BLOCKD *d, int pitch) {
|
|||||||
ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride + (mv.as_mv.col >> 3);
|
ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride + (mv.as_mv.col >> 3);
|
||||||
|
|
||||||
if (mv.as_mv.row & 7 || mv.as_mv.col & 7) {
|
if (mv.as_mv.row & 7 || mv.as_mv.col & 7) {
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
x->subpixel_predict_avg8x8(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1, (mv.as_mv.row & 7) << 1, pred_ptr, pitch);
|
x->subpixel_predict_avg8x8(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1, (mv.as_mv.row & 7) << 1, pred_ptr, pitch);
|
||||||
#else
|
|
||||||
x->subpixel_predict_avg8x8(ptr, d->pre_stride, mv.as_mv.col & 7, mv.as_mv.row & 7, pred_ptr, pitch);
|
|
||||||
#endif
|
|
||||||
} else {
|
} else {
|
||||||
RECON_INVOKE(&x->rtcd->recon, avg8x8)(ptr, d->pre_stride, pred_ptr, pitch);
|
RECON_INVOKE(&x->rtcd->recon, avg8x8)(ptr, d->pre_stride, pred_ptr, pitch);
|
||||||
}
|
}
|
||||||
@ -362,11 +346,7 @@ static void build_inter_predictors2b(MACROBLOCKD *x, BLOCKD *d, int pitch) {
|
|||||||
ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride + (mv.as_mv.col >> 3);
|
ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride + (mv.as_mv.col >> 3);
|
||||||
|
|
||||||
if (mv.as_mv.row & 7 || mv.as_mv.col & 7) {
|
if (mv.as_mv.row & 7 || mv.as_mv.col & 7) {
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
x->subpixel_predict8x4(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1, (mv.as_mv.row & 7) << 1, pred_ptr, pitch);
|
x->subpixel_predict8x4(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1, (mv.as_mv.row & 7) << 1, pred_ptr, pitch);
|
||||||
#else
|
|
||||||
x->subpixel_predict8x4(ptr, d->pre_stride, mv.as_mv.col & 7, mv.as_mv.row & 7, pred_ptr, pitch);
|
|
||||||
#endif
|
|
||||||
} else {
|
} else {
|
||||||
RECON_INVOKE(&x->rtcd->recon, copy8x4)(ptr, d->pre_stride, pred_ptr, pitch);
|
RECON_INVOKE(&x->rtcd->recon, copy8x4)(ptr, d->pre_stride, pred_ptr, pitch);
|
||||||
}
|
}
|
||||||
@ -542,7 +522,6 @@ void vp8_build_inter16x16_predictors_mbuv(MACROBLOCKD *xd) {
|
|||||||
|
|
||||||
// U & V
|
// U & V
|
||||||
for (i = 0; i < 2; i++) {
|
for (i = 0; i < 2; i++) {
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
if ((omv_row | omv_col) & 15) {
|
if ((omv_row | omv_col) & 15) {
|
||||||
// Copy extended MB into Temp array, applying the spatial filter
|
// Copy extended MB into Temp array, applying the spatial filter
|
||||||
filter_mb(pSrc - (INTERP_EXTEND - 1) * (pre_stride + 1), pre_stride,
|
filter_mb(pSrc - (INTERP_EXTEND - 1) * (pre_stride + 1), pre_stride,
|
||||||
@ -551,19 +530,7 @@ void vp8_build_inter16x16_predictors_mbuv(MACROBLOCKD *xd) {
|
|||||||
// Sub-pel interpolation
|
// Sub-pel interpolation
|
||||||
xd->subpixel_predict8x8(pTemp, len, omv_col & 15,
|
xd->subpixel_predict8x8(pTemp, len, omv_col & 15,
|
||||||
omv_row & 15, pDst, 8);
|
omv_row & 15, pDst, 8);
|
||||||
}
|
} else {
|
||||||
#else /* CONFIG_SIXTEENTH_SUBPEL_UV */
|
|
||||||
if ((mv_row | mv_col) & 7) {
|
|
||||||
// Copy extended MB into Temp array, applying the spatial filter
|
|
||||||
filter_mb(pSrc - (INTERP_EXTEND - 1) * (pre_stride + 1), pre_stride,
|
|
||||||
Temp, len, len, len);
|
|
||||||
|
|
||||||
// Sub-pel interpolation
|
|
||||||
xd->subpixel_predict8x8(pTemp, len, mv_col & 7,
|
|
||||||
mv_row & 7, pDst, 8);
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_SIXTEENTH_SUBPEL_UV */
|
|
||||||
else {
|
|
||||||
// Apply prediction filter as we copy from source to destination
|
// Apply prediction filter as we copy from source to destination
|
||||||
filter_mb(pSrc, pre_stride, pDst, 8, 8, 8);
|
filter_mb(pSrc, pre_stride, pDst, 8, 8, 8);
|
||||||
}
|
}
|
||||||
@ -574,18 +541,10 @@ void vp8_build_inter16x16_predictors_mbuv(MACROBLOCKD *xd) {
|
|||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
#endif
|
#endif
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
if ((omv_row | omv_col) & 15) {
|
if ((omv_row | omv_col) & 15) {
|
||||||
xd->subpixel_predict8x8(uptr, pre_stride, omv_col & 15, omv_row & 15, upred_ptr, 8);
|
xd->subpixel_predict8x8(uptr, pre_stride, omv_col & 15, omv_row & 15, upred_ptr, 8);
|
||||||
xd->subpixel_predict8x8(vptr, pre_stride, omv_col & 15, omv_row & 15, vpred_ptr, 8);
|
xd->subpixel_predict8x8(vptr, pre_stride, omv_col & 15, omv_row & 15, vpred_ptr, 8);
|
||||||
}
|
} else {
|
||||||
#else /* CONFIG_SIXTEENTH_SUBPEL_UV */
|
|
||||||
if ((mv_row | mv_col) & 7) {
|
|
||||||
xd->subpixel_predict8x8(uptr, pre_stride, mv_col & 7, mv_row & 7, upred_ptr, 8);
|
|
||||||
xd->subpixel_predict8x8(vptr, pre_stride, mv_col & 7, mv_row & 7, vpred_ptr, 8);
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_SIXTEENTH_SUBPEL_UV */
|
|
||||||
else {
|
|
||||||
RECON_INVOKE(&xd->rtcd->recon, copy8x8)(uptr, pre_stride, upred_ptr, 8);
|
RECON_INVOKE(&xd->rtcd->recon, copy8x8)(uptr, pre_stride, upred_ptr, 8);
|
||||||
RECON_INVOKE(&xd->rtcd->recon, copy8x8)(vptr, pre_stride, vpred_ptr, 8);
|
RECON_INVOKE(&xd->rtcd->recon, copy8x8)(vptr, pre_stride, vpred_ptr, 8);
|
||||||
}
|
}
|
||||||
@ -708,13 +667,8 @@ void vp8_build_inter16x16_predictors_mby(MACROBLOCKD *xd) {
|
|||||||
Temp, len, len, len);
|
Temp, len, len, len);
|
||||||
|
|
||||||
// Sub-pel interpolation
|
// Sub-pel interpolation
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
xd->subpixel_predict16x16(pTemp, len, (mv_col & 7) << 1,
|
xd->subpixel_predict16x16(pTemp, len, (mv_col & 7) << 1,
|
||||||
(mv_row & 7) << 1, pred_ptr, 16);
|
(mv_row & 7) << 1, pred_ptr, 16);
|
||||||
#else
|
|
||||||
xd->subpixel_predict16x16(pTemp, len, mv_col & 7,
|
|
||||||
mv_row & 7, pred_ptr, 16);
|
|
||||||
#endif
|
|
||||||
} else {
|
} else {
|
||||||
// Apply spatial filter to create the prediction directly
|
// Apply spatial filter to create the prediction directly
|
||||||
filter_mb(ptr, pre_stride, pred_ptr, 16, 16, 16);
|
filter_mb(ptr, pre_stride, pred_ptr, 16, 16, 16);
|
||||||
@ -722,13 +676,8 @@ void vp8_build_inter16x16_predictors_mby(MACROBLOCKD *xd) {
|
|||||||
} else
|
} else
|
||||||
#endif
|
#endif
|
||||||
if ((mv_row | mv_col) & 7) {
|
if ((mv_row | mv_col) & 7) {
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
xd->subpixel_predict16x16(ptr, pre_stride, (mv_col & 7) << 1,
|
xd->subpixel_predict16x16(ptr, pre_stride, (mv_col & 7) << 1,
|
||||||
(mv_row & 7) << 1, pred_ptr, 16);
|
(mv_row & 7) << 1, pred_ptr, 16);
|
||||||
#else
|
|
||||||
xd->subpixel_predict16x16(ptr, pre_stride, mv_col & 7,
|
|
||||||
mv_row & 7, pred_ptr, 16);
|
|
||||||
#endif
|
|
||||||
} else {
|
} else {
|
||||||
RECON_INVOKE(&xd->rtcd->recon, copy16x16)(ptr, pre_stride, pred_ptr, 16);
|
RECON_INVOKE(&xd->rtcd->recon, copy16x16)(ptr, pre_stride, pred_ptr, 16);
|
||||||
}
|
}
|
||||||
@ -808,17 +757,10 @@ void vp8_build_inter16x16_predictors_mb(MACROBLOCKD *x,
|
|||||||
Temp, len, len, len);
|
Temp, len, len, len);
|
||||||
|
|
||||||
// Sub-pel filter
|
// Sub-pel filter
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
x->subpixel_predict16x16(pTemp, len,
|
x->subpixel_predict16x16(pTemp, len,
|
||||||
(_16x16mv.as_mv.col & 7) << 1,
|
(_16x16mv.as_mv.col & 7) << 1,
|
||||||
(_16x16mv.as_mv.row & 7) << 1,
|
(_16x16mv.as_mv.row & 7) << 1,
|
||||||
dst_y, dst_ystride);
|
dst_y, dst_ystride);
|
||||||
#else
|
|
||||||
x->subpixel_predict16x16(pTemp, len,
|
|
||||||
_16x16mv.as_mv.col & 7,
|
|
||||||
_16x16mv.as_mv.row & 7,
|
|
||||||
dst_y, dst_ystride);
|
|
||||||
#endif
|
|
||||||
} else {
|
} else {
|
||||||
// Apply spatial filter to create the prediction directly
|
// Apply spatial filter to create the prediction directly
|
||||||
filter_mb(ptr, pre_stride, dst_y, dst_ystride, 16, 16);
|
filter_mb(ptr, pre_stride, dst_y, dst_ystride, 16, 16);
|
||||||
@ -826,14 +768,9 @@ void vp8_build_inter16x16_predictors_mb(MACROBLOCKD *x,
|
|||||||
} else
|
} else
|
||||||
#endif
|
#endif
|
||||||
if (_16x16mv.as_int & 0x00070007) {
|
if (_16x16mv.as_int & 0x00070007) {
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
x->subpixel_predict16x16(ptr, pre_stride, (_16x16mv.as_mv.col & 7) << 1,
|
x->subpixel_predict16x16(ptr, pre_stride, (_16x16mv.as_mv.col & 7) << 1,
|
||||||
(_16x16mv.as_mv.row & 7) << 1,
|
(_16x16mv.as_mv.row & 7) << 1,
|
||||||
dst_y, dst_ystride);
|
dst_y, dst_ystride);
|
||||||
#else
|
|
||||||
x->subpixel_predict16x16(ptr, pre_stride, _16x16mv.as_mv.col & 7,
|
|
||||||
_16x16mv.as_mv.row & 7, dst_y, dst_ystride);
|
|
||||||
#endif
|
|
||||||
} else {
|
} else {
|
||||||
RECON_INVOKE(&x->rtcd->recon, copy16x16)(ptr, pre_stride, dst_y,
|
RECON_INVOKE(&x->rtcd->recon, copy16x16)(ptr, pre_stride, dst_y,
|
||||||
dst_ystride);
|
dst_ystride);
|
||||||
@ -873,7 +810,6 @@ void vp8_build_inter16x16_predictors_mb(MACROBLOCKD *x,
|
|||||||
|
|
||||||
// U & V
|
// U & V
|
||||||
for (i = 0; i < 2; i++) {
|
for (i = 0; i < 2; i++) {
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
if (_o16x16mv.as_int & 0x000f000f) {
|
if (_o16x16mv.as_int & 0x000f000f) {
|
||||||
// Copy extended MB into Temp array, applying the spatial filter
|
// Copy extended MB into Temp array, applying the spatial filter
|
||||||
filter_mb(pSrc - (INTERP_EXTEND - 1) * (pre_stride + 1), pre_stride,
|
filter_mb(pSrc - (INTERP_EXTEND - 1) * (pre_stride + 1), pre_stride,
|
||||||
@ -884,21 +820,7 @@ void vp8_build_inter16x16_predictors_mb(MACROBLOCKD *x,
|
|||||||
_o16x16mv.as_mv.col & 15,
|
_o16x16mv.as_mv.col & 15,
|
||||||
_o16x16mv.as_mv.row & 15,
|
_o16x16mv.as_mv.row & 15,
|
||||||
pDst, dst_uvstride);
|
pDst, dst_uvstride);
|
||||||
}
|
} else {
|
||||||
#else /* CONFIG_SIXTEENTH_SUBPEL_UV */
|
|
||||||
if (_16x16mv.as_int & 0x00070007) {
|
|
||||||
// Copy extended MB into Temp array, applying the spatial filter
|
|
||||||
filter_mb(pSrc - (INTERP_EXTEND - 1) * (pre_stride + 1), pre_stride,
|
|
||||||
Temp, len, len, len);
|
|
||||||
|
|
||||||
// Sub-pel filter
|
|
||||||
x->subpixel_predict8x8(pTemp, len,
|
|
||||||
_16x16mv.as_mv.col & 7,
|
|
||||||
_16x16mv.as_mv.row & 7,
|
|
||||||
pDst, dst_uvstride);
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_SIXTEENTH_SUBPEL_UV */
|
|
||||||
else {
|
|
||||||
filter_mb(pSrc, pre_stride, pDst, dst_uvstride, 8, 8);
|
filter_mb(pSrc, pre_stride, pDst, dst_uvstride, 8, 8);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -908,18 +830,10 @@ void vp8_build_inter16x16_predictors_mb(MACROBLOCKD *x,
|
|||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
#endif
|
#endif
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
if (_o16x16mv.as_int & 0x000f000f) {
|
if (_o16x16mv.as_int & 0x000f000f) {
|
||||||
x->subpixel_predict8x8(uptr, pre_stride, _o16x16mv.as_mv.col & 15, _o16x16mv.as_mv.row & 15, dst_u, dst_uvstride);
|
x->subpixel_predict8x8(uptr, pre_stride, _o16x16mv.as_mv.col & 15, _o16x16mv.as_mv.row & 15, dst_u, dst_uvstride);
|
||||||
x->subpixel_predict8x8(vptr, pre_stride, _o16x16mv.as_mv.col & 15, _o16x16mv.as_mv.row & 15, dst_v, dst_uvstride);
|
x->subpixel_predict8x8(vptr, pre_stride, _o16x16mv.as_mv.col & 15, _o16x16mv.as_mv.row & 15, dst_v, dst_uvstride);
|
||||||
}
|
} else {
|
||||||
#else /* CONFIG_SIXTEENTH_SUBPEL_UV */
|
|
||||||
if (_16x16mv.as_int & 0x00070007) {
|
|
||||||
x->subpixel_predict8x8(uptr, pre_stride, _16x16mv.as_mv.col & 7, _16x16mv.as_mv.row & 7, dst_u, dst_uvstride);
|
|
||||||
x->subpixel_predict8x8(vptr, pre_stride, _16x16mv.as_mv.col & 7, _16x16mv.as_mv.row & 7, dst_v, dst_uvstride);
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_SIXTEENTH_SUBPEL_UV */
|
|
||||||
else {
|
|
||||||
RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, pre_stride, dst_u, dst_uvstride);
|
RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, pre_stride, dst_u, dst_uvstride);
|
||||||
RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, pre_stride, dst_v, dst_uvstride);
|
RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, pre_stride, dst_v, dst_uvstride);
|
||||||
}
|
}
|
||||||
@ -983,13 +897,8 @@ void vp8_build_2nd_inter16x16_predictors_mb(MACROBLOCKD *x,
|
|||||||
Temp, len, len, len);
|
Temp, len, len, len);
|
||||||
|
|
||||||
// Sub-pel filter
|
// Sub-pel filter
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
x->subpixel_predict_avg16x16(pTemp, len, (mv_col & 7) << 1,
|
x->subpixel_predict_avg16x16(pTemp, len, (mv_col & 7) << 1,
|
||||||
(mv_row & 7) << 1, dst_y, dst_ystride);
|
(mv_row & 7) << 1, dst_y, dst_ystride);
|
||||||
#else
|
|
||||||
x->subpixel_predict_avg16x16(pTemp, len, mv_col & 7,
|
|
||||||
mv_row & 7, dst_y, dst_ystride);
|
|
||||||
#endif
|
|
||||||
} else {
|
} else {
|
||||||
// TODO Needs to AVERAGE with the dst_y
|
// TODO Needs to AVERAGE with the dst_y
|
||||||
// For now, do not apply the prediction filter in these cases!
|
// For now, do not apply the prediction filter in these cases!
|
||||||
@ -1000,13 +909,8 @@ void vp8_build_2nd_inter16x16_predictors_mb(MACROBLOCKD *x,
|
|||||||
#endif // CONFIG_PRED_FILTER
|
#endif // CONFIG_PRED_FILTER
|
||||||
{
|
{
|
||||||
if ((mv_row | mv_col) & 7) {
|
if ((mv_row | mv_col) & 7) {
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
x->subpixel_predict_avg16x16(ptr, pre_stride, (mv_col & 7) << 1,
|
x->subpixel_predict_avg16x16(ptr, pre_stride, (mv_col & 7) << 1,
|
||||||
(mv_row & 7) << 1, dst_y, dst_ystride);
|
(mv_row & 7) << 1, dst_y, dst_ystride);
|
||||||
#else
|
|
||||||
x->subpixel_predict_avg16x16(ptr, pre_stride, mv_col & 7,
|
|
||||||
mv_row & 7, dst_y, dst_ystride);
|
|
||||||
#endif
|
|
||||||
} else {
|
} else {
|
||||||
RECON_INVOKE(&x->rtcd->recon, avg16x16)(ptr, pre_stride, dst_y,
|
RECON_INVOKE(&x->rtcd->recon, avg16x16)(ptr, pre_stride, dst_y,
|
||||||
dst_ystride);
|
dst_ystride);
|
||||||
@ -1038,7 +942,6 @@ void vp8_build_2nd_inter16x16_predictors_mb(MACROBLOCKD *x,
|
|||||||
|
|
||||||
// U & V
|
// U & V
|
||||||
for (i = 0; i < 2; i++) {
|
for (i = 0; i < 2; i++) {
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
if ((omv_row | omv_col) & 15) {
|
if ((omv_row | omv_col) & 15) {
|
||||||
// Copy extended MB into Temp array, applying the spatial filter
|
// Copy extended MB into Temp array, applying the spatial filter
|
||||||
filter_mb(pSrc - (INTERP_EXTEND - 1) * (pre_stride + 1), pre_stride,
|
filter_mb(pSrc - (INTERP_EXTEND - 1) * (pre_stride + 1), pre_stride,
|
||||||
@ -1047,19 +950,7 @@ void vp8_build_2nd_inter16x16_predictors_mb(MACROBLOCKD *x,
|
|||||||
// Sub-pel filter
|
// Sub-pel filter
|
||||||
x->subpixel_predict_avg8x8(pTemp, len, omv_col & 15,
|
x->subpixel_predict_avg8x8(pTemp, len, omv_col & 15,
|
||||||
omv_row & 15, pDst, dst_uvstride);
|
omv_row & 15, pDst, dst_uvstride);
|
||||||
}
|
} else {
|
||||||
#else /* CONFIG_SIXTEENTH_SUBPEL_UV */
|
|
||||||
if ((mv_row | mv_col) & 7) {
|
|
||||||
// Copy extended MB into Temp array, applying the spatial filter
|
|
||||||
filter_mb(pSrc - (INTERP_EXTEND - 1) * (pre_stride + 1), pre_stride,
|
|
||||||
Temp, len, len, len);
|
|
||||||
|
|
||||||
// Sub-pel filter
|
|
||||||
x->subpixel_predict_avg8x8(pTemp, len, mv_col & 7, mv_row & 7,
|
|
||||||
pDst, dst_uvstride);
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_SIXTEENTH_SUBPEL_UV */
|
|
||||||
else {
|
|
||||||
// TODO Needs to AVERAGE with the dst_[u|v]
|
// TODO Needs to AVERAGE with the dst_[u|v]
|
||||||
// For now, do not apply the prediction filter here!
|
// For now, do not apply the prediction filter here!
|
||||||
RECON_INVOKE(&x->rtcd->recon, avg8x8)(pSrc, pre_stride, pDst,
|
RECON_INVOKE(&x->rtcd->recon, avg8x8)(pSrc, pre_stride, pDst,
|
||||||
@ -1072,18 +963,10 @@ void vp8_build_2nd_inter16x16_predictors_mb(MACROBLOCKD *x,
|
|||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
#endif // CONFIG_PRED_FILTER
|
#endif // CONFIG_PRED_FILTER
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
if ((omv_row | omv_col) & 15) {
|
if ((omv_row | omv_col) & 15) {
|
||||||
x->subpixel_predict_avg8x8(uptr, pre_stride, omv_col & 15, omv_row & 15, dst_u, dst_uvstride);
|
x->subpixel_predict_avg8x8(uptr, pre_stride, omv_col & 15, omv_row & 15, dst_u, dst_uvstride);
|
||||||
x->subpixel_predict_avg8x8(vptr, pre_stride, omv_col & 15, omv_row & 15, dst_v, dst_uvstride);
|
x->subpixel_predict_avg8x8(vptr, pre_stride, omv_col & 15, omv_row & 15, dst_v, dst_uvstride);
|
||||||
}
|
} else {
|
||||||
#else /* CONFIG_SIXTEENTH_SUBPEL_UV */
|
|
||||||
if ((mv_row | mv_col) & 7) {
|
|
||||||
x->subpixel_predict_avg8x8(uptr, pre_stride, mv_col & 7, mv_row & 7, dst_u, dst_uvstride);
|
|
||||||
x->subpixel_predict_avg8x8(vptr, pre_stride, mv_col & 7, mv_row & 7, dst_v, dst_uvstride);
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_SIXTEENTH_SUBPEL_UV */
|
|
||||||
else {
|
|
||||||
RECON_INVOKE(&x->rtcd->recon, avg8x8)(uptr, pre_stride, dst_u, dst_uvstride);
|
RECON_INVOKE(&x->rtcd->recon, avg8x8)(uptr, pre_stride, dst_u, dst_uvstride);
|
||||||
RECON_INVOKE(&x->rtcd->recon, avg8x8)(vptr, pre_stride, dst_v, dst_uvstride);
|
RECON_INVOKE(&x->rtcd->recon, avg8x8)(vptr, pre_stride, dst_v, dst_uvstride);
|
||||||
}
|
}
|
||||||
|
@ -1495,7 +1495,6 @@ k2_k4:
|
|||||||
times 8 db 36, -11
|
times 8 db 36, -11
|
||||||
times 8 db 12, -6
|
times 8 db 12, -6
|
||||||
align 16
|
align 16
|
||||||
%if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
vp8_bilinear_filters_ssse3:
|
vp8_bilinear_filters_ssse3:
|
||||||
times 8 db 128, 0
|
times 8 db 128, 0
|
||||||
times 8 db 120, 8
|
times 8 db 120, 8
|
||||||
@ -1513,15 +1512,4 @@ vp8_bilinear_filters_ssse3:
|
|||||||
times 8 db 24, 104
|
times 8 db 24, 104
|
||||||
times 8 db 16, 112
|
times 8 db 16, 112
|
||||||
times 8 db 8, 120
|
times 8 db 8, 120
|
||||||
%else
|
|
||||||
vp8_bilinear_filters_ssse3:
|
|
||||||
times 8 db 128, 0
|
|
||||||
times 8 db 112, 16
|
|
||||||
times 8 db 96, 32
|
|
||||||
times 8 db 80, 48
|
|
||||||
times 8 db 64, 64
|
|
||||||
times 8 db 48, 80
|
|
||||||
times 8 db 32, 96
|
|
||||||
times 8 db 16, 112
|
|
||||||
%endif
|
|
||||||
|
|
||||||
|
@ -13,15 +13,8 @@
|
|||||||
#include "vpx_ports/mem.h"
|
#include "vpx_ports/mem.h"
|
||||||
#include "vp8/common/subpixel.h"
|
#include "vp8/common/subpixel.h"
|
||||||
|
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
extern const short vp8_six_tap_mmx[16][6 * 8];
|
extern const short vp8_six_tap_mmx[16][6 * 8];
|
||||||
extern const short vp8_bilinear_filters_mmx[16][2 * 8];
|
extern const short vp8_bilinear_filters_mmx[16][2 * 8];
|
||||||
#else
|
|
||||||
extern const short vp8_six_tap_mmx[8][6 * 8];
|
|
||||||
extern const short vp8_bilinear_filters_mmx[8][2 * 8];
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// #define ANNOUNCE_FUNCTION
|
|
||||||
|
|
||||||
extern void vp8_filter_block1d_h6_mmx
|
extern void vp8_filter_block1d_h6_mmx
|
||||||
(
|
(
|
||||||
|
@ -13,11 +13,7 @@
|
|||||||
#include "vp8/common/filter.h"
|
#include "vp8/common/filter.h"
|
||||||
#include "vp8/common/arm/bilinearfilter_arm.h"
|
#include "vp8/common/arm/bilinearfilter_arm.h"
|
||||||
|
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
#define HALFNDX 8
|
#define HALFNDX 8
|
||||||
#else
|
|
||||||
#define HALFNDX 4
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if HAVE_ARMV6
|
#if HAVE_ARMV6
|
||||||
|
|
||||||
|
@ -205,22 +205,14 @@ void vp8_init3smotion_compensation(MACROBLOCK *x, int stride) {
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#define PRE(r,c) (y + (((r)>>2) * y_stride + ((c)>>2) -(offset))) // pointer to predictor base of a motionvector
|
#define PRE(r,c) (y + (((r)>>2) * y_stride + ((c)>>2) -(offset))) // pointer to predictor base of a motionvector
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
#define SP(x) (((x)&3)<<2) // convert motion vector component to offset for svf calc
|
#define SP(x) (((x)&3)<<2) // convert motion vector component to offset for svf calc
|
||||||
#else
|
|
||||||
#define SP(x) (((x)&3)<<1) // convert motion vector component to offset for svf calc
|
|
||||||
#endif /* CONFIG_SIXTEENTH_SUBPEL_UV */
|
|
||||||
#define MVC(r,c) (mvcost ? ((mvcost[0][(r)-rr] + mvcost[1][(c)-rc]) * error_per_bit + 128 )>>8 : 0) // estimated cost of a motion vector (r,c)
|
#define MVC(r,c) (mvcost ? ((mvcost[0][(r)-rr] + mvcost[1][(c)-rc]) * error_per_bit + 128 )>>8 : 0) // estimated cost of a motion vector (r,c)
|
||||||
#define DIST(r,c) vfp->svf( PRE(r,c), y_stride, SP(c),SP(r), z,b->src_stride,&sse) // returns subpixel variance error function.
|
#define DIST(r,c) vfp->svf( PRE(r,c), y_stride, SP(c),SP(r), z,b->src_stride,&sse) // returns subpixel variance error function.
|
||||||
#define ERR(r,c) (MVC(r,c)+DIST(r,c)) // returns distortion + motion vector cost
|
#define ERR(r,c) (MVC(r,c)+DIST(r,c)) // returns distortion + motion vector cost
|
||||||
#define IFMVCV(r,c,s,e) if ( c >= minc && c <= maxc && r >= minr && r <= maxr) s else e;
|
#define IFMVCV(r,c,s,e) if ( c >= minc && c <= maxc && r >= minr && r <= maxr) s else e;
|
||||||
|
|
||||||
#define PREHP(r,c) (y + (((r)>>3) * y_stride + ((c)>>3) -(offset))) // pointer to predictor base of a motionvector
|
#define PREHP(r,c) (y + (((r)>>3) * y_stride + ((c)>>3) -(offset))) // pointer to predictor base of a motionvector
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
#define SPHP(x) (((x)&7)<<1) // convert motion vector component to offset for svf calc
|
#define SPHP(x) (((x)&7)<<1) // convert motion vector component to offset for svf calc
|
||||||
#else /* CONFIG_SIXTEENTH_SUBPEL_UV */
|
|
||||||
#define SPHP(x) ((x)&7) // convert motion vector component to offset for svf calc
|
|
||||||
#endif /* CONFIG_SIXTEENTH_SUBPEL_UV */
|
|
||||||
#define DISTHP(r,c) vfp->svf( PREHP(r,c), y_stride, SPHP(c),SPHP(r), z,b->src_stride,&sse) // returns subpixel variance error function.
|
#define DISTHP(r,c) vfp->svf( PREHP(r,c), y_stride, SPHP(c),SPHP(r), z,b->src_stride,&sse) // returns subpixel variance error function.
|
||||||
#define ERRHP(r,c) (MVC(r,c)+DISTHP(r,c)) // returns distortion + motion vector cost
|
#define ERRHP(r,c) (MVC(r,c)+DISTHP(r,c)) // returns distortion + motion vector cost
|
||||||
#define CHECK_BETTER(v,r,c) IFMVCV(r,c,{thismse = ((xd->allow_high_precision_mv)?DISTHP(r,c):DIST(r,c)); if((v = (MVC(r,c)+thismse)) < besterr) { besterr = v; br=r; bc=c; *distortion = thismse; *sse1 = sse; }}, v=INT_MAX;)// checks if (r,c) has better score than previous best
|
#define CHECK_BETTER(v,r,c) IFMVCV(r,c,{thismse = ((xd->allow_high_precision_mv)?DISTHP(r,c):DIST(r,c)); if((v = (MVC(r,c)+thismse)) < besterr) { besterr = v; br=r; bc=c; *distortion = thismse; *sse1 = sse; }}, v=INT_MAX;)// checks if (r,c) has better score than previous best
|
||||||
@ -444,11 +436,7 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
|||||||
#undef DISTHP
|
#undef DISTHP
|
||||||
#undef ERRHP
|
#undef ERRHP
|
||||||
|
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
#define SP(x) (((x)&7)<<1) // convert motion vector component to offset for svf calc
|
#define SP(x) (((x)&7)<<1) // convert motion vector component to offset for svf calc
|
||||||
#else
|
|
||||||
#define SP(x) ((x)&7) // convert motion vector component to offset for svf calc
|
|
||||||
#endif /* CONFIG_SIXTEENTH_SUBPEL_UV */
|
|
||||||
int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||||
int_mv *bestmv, int_mv *ref_mv,
|
int_mv *bestmv, int_mv *ref_mv,
|
||||||
int error_per_bit,
|
int error_per_bit,
|
||||||
|
@ -518,17 +518,10 @@ int VP8_UVSSE(MACROBLOCK *x, const vp8_variance_rtcd_vtable_t *rtcd) {
|
|||||||
vptr = x->e_mbd.pre.v_buffer + offset;
|
vptr = x->e_mbd.pre.v_buffer + offset;
|
||||||
|
|
||||||
if ((mv_row | mv_col) & 7) {
|
if ((mv_row | mv_col) & 7) {
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
VARIANCE_INVOKE(rtcd, subpixvar8x8)(uptr, pre_stride,
|
VARIANCE_INVOKE(rtcd, subpixvar8x8)(uptr, pre_stride,
|
||||||
(mv_col & 7) << 1, (mv_row & 7) << 1, upred_ptr, uv_stride, &sse2);
|
(mv_col & 7) << 1, (mv_row & 7) << 1, upred_ptr, uv_stride, &sse2);
|
||||||
VARIANCE_INVOKE(rtcd, subpixvar8x8)(vptr, pre_stride,
|
VARIANCE_INVOKE(rtcd, subpixvar8x8)(vptr, pre_stride,
|
||||||
(mv_col & 7) << 1, (mv_row & 7) << 1, vpred_ptr, uv_stride, &sse1);
|
(mv_col & 7) << 1, (mv_row & 7) << 1, vpred_ptr, uv_stride, &sse1);
|
||||||
#else
|
|
||||||
VARIANCE_INVOKE(rtcd, subpixvar8x8)(uptr, pre_stride,
|
|
||||||
mv_col & 7, mv_row & 7, upred_ptr, uv_stride, &sse2);
|
|
||||||
VARIANCE_INVOKE(rtcd, subpixvar8x8)(vptr, pre_stride,
|
|
||||||
mv_col & 7, mv_row & 7, vpred_ptr, uv_stride, &sse1);
|
|
||||||
#endif
|
|
||||||
sse2 += sse1;
|
sse2 += sse1;
|
||||||
} else {
|
} else {
|
||||||
VARIANCE_INVOKE(rtcd, var8x8)(uptr, pre_stride,
|
VARIANCE_INVOKE(rtcd, var8x8)(uptr, pre_stride,
|
||||||
|
@ -56,13 +56,8 @@ static void vp8_temporal_filter_predictors_mb_c
|
|||||||
yptr = y_mb_ptr + (mv_row >> 3) * stride + (mv_col >> 3);
|
yptr = y_mb_ptr + (mv_row >> 3) * stride + (mv_col >> 3);
|
||||||
|
|
||||||
if ((mv_row | mv_col) & 7) {
|
if ((mv_row | mv_col) & 7) {
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
x->subpixel_predict16x16(yptr, stride,
|
x->subpixel_predict16x16(yptr, stride,
|
||||||
(mv_col & 7) << 1, (mv_row & 7) << 1, &pred[0], 16);
|
(mv_col & 7) << 1, (mv_row & 7) << 1, &pred[0], 16);
|
||||||
#else
|
|
||||||
x->subpixel_predict16x16(yptr, stride,
|
|
||||||
mv_col & 7, mv_row & 7, &pred[0], 16);
|
|
||||||
#endif
|
|
||||||
} else {
|
} else {
|
||||||
RECON_INVOKE(&x->rtcd->recon, copy16x16)(yptr, stride, &pred[0], 16);
|
RECON_INVOKE(&x->rtcd->recon, copy16x16)(yptr, stride, &pred[0], 16);
|
||||||
}
|
}
|
||||||
@ -77,21 +72,12 @@ static void vp8_temporal_filter_predictors_mb_c
|
|||||||
uptr = u_mb_ptr + offset;
|
uptr = u_mb_ptr + offset;
|
||||||
vptr = v_mb_ptr + offset;
|
vptr = v_mb_ptr + offset;
|
||||||
|
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
if ((omv_row | omv_col) & 15) {
|
if ((omv_row | omv_col) & 15) {
|
||||||
x->subpixel_predict8x8(uptr, stride,
|
x->subpixel_predict8x8(uptr, stride,
|
||||||
(omv_col & 15), (omv_row & 15), &pred[256], 8);
|
(omv_col & 15), (omv_row & 15), &pred[256], 8);
|
||||||
x->subpixel_predict8x8(vptr, stride,
|
x->subpixel_predict8x8(vptr, stride,
|
||||||
(omv_col & 15), (omv_row & 15), &pred[320], 8);
|
(omv_col & 15), (omv_row & 15), &pred[320], 8);
|
||||||
}
|
}
|
||||||
#else
|
|
||||||
if ((mv_row | mv_col) & 7) {
|
|
||||||
x->subpixel_predict8x8(uptr, stride,
|
|
||||||
mv_col & 7, mv_row & 7, &pred[256], 8);
|
|
||||||
x->subpixel_predict8x8(vptr, stride,
|
|
||||||
mv_col & 7, mv_row & 7, &pred[320], 8);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
else {
|
else {
|
||||||
RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, stride, &pred[256], 8);
|
RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, stride, &pred[256], 8);
|
||||||
RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, stride, &pred[320], 8);
|
RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, stride, &pred[320], 8);
|
||||||
|
@ -341,13 +341,8 @@ unsigned int vp8_variance_halfpixvar16x16_h_c(
|
|||||||
const unsigned char *ref_ptr,
|
const unsigned char *ref_ptr,
|
||||||
int recon_stride,
|
int recon_stride,
|
||||||
unsigned int *sse) {
|
unsigned int *sse) {
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
return vp8_sub_pixel_variance16x16_c(src_ptr, source_stride, 8, 0,
|
return vp8_sub_pixel_variance16x16_c(src_ptr, source_stride, 8, 0,
|
||||||
ref_ptr, recon_stride, sse);
|
ref_ptr, recon_stride, sse);
|
||||||
#else
|
|
||||||
return vp8_sub_pixel_variance16x16_c(src_ptr, source_stride, 4, 0,
|
|
||||||
ref_ptr, recon_stride, sse);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -357,13 +352,8 @@ unsigned int vp8_variance_halfpixvar16x16_v_c(
|
|||||||
const unsigned char *ref_ptr,
|
const unsigned char *ref_ptr,
|
||||||
int recon_stride,
|
int recon_stride,
|
||||||
unsigned int *sse) {
|
unsigned int *sse) {
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
return vp8_sub_pixel_variance16x16_c(src_ptr, source_stride, 0, 8,
|
return vp8_sub_pixel_variance16x16_c(src_ptr, source_stride, 0, 8,
|
||||||
ref_ptr, recon_stride, sse);
|
ref_ptr, recon_stride, sse);
|
||||||
#else
|
|
||||||
return vp8_sub_pixel_variance16x16_c(src_ptr, source_stride, 0, 4,
|
|
||||||
ref_ptr, recon_stride, sse);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -373,13 +363,8 @@ unsigned int vp8_variance_halfpixvar16x16_hv_c(
|
|||||||
const unsigned char *ref_ptr,
|
const unsigned char *ref_ptr,
|
||||||
int recon_stride,
|
int recon_stride,
|
||||||
unsigned int *sse) {
|
unsigned int *sse) {
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
return vp8_sub_pixel_variance16x16_c(src_ptr, source_stride, 8, 8,
|
return vp8_sub_pixel_variance16x16_c(src_ptr, source_stride, 8, 8,
|
||||||
ref_ptr, recon_stride, sse);
|
ref_ptr, recon_stride, sse);
|
||||||
#else
|
|
||||||
return vp8_sub_pixel_variance16x16_c(src_ptr, source_stride, 4, 4,
|
|
||||||
ref_ptr, recon_stride, sse);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1348,7 +1348,6 @@ align 16
|
|||||||
xmm_bi_rd:
|
xmm_bi_rd:
|
||||||
times 8 dw 64
|
times 8 dw 64
|
||||||
align 16
|
align 16
|
||||||
%if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
vp8_bilinear_filters_sse2:
|
vp8_bilinear_filters_sse2:
|
||||||
dw 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 0, 0, 0, 0
|
dw 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 0, 0, 0, 0
|
||||||
dw 120, 120, 120, 120, 120, 120, 120, 120, 8, 8, 8, 8, 8, 8, 8, 8
|
dw 120, 120, 120, 120, 120, 120, 120, 120, 8, 8, 8, 8, 8, 8, 8, 8
|
||||||
@ -1366,14 +1365,3 @@ vp8_bilinear_filters_sse2:
|
|||||||
dw 24, 24, 24, 24, 24, 24, 24, 24, 104, 104, 104, 104, 104, 104, 104, 104
|
dw 24, 24, 24, 24, 24, 24, 24, 24, 104, 104, 104, 104, 104, 104, 104, 104
|
||||||
dw 16, 16, 16, 16, 16, 16, 16, 16, 112, 112, 112, 112, 112, 112, 112, 112
|
dw 16, 16, 16, 16, 16, 16, 16, 16, 112, 112, 112, 112, 112, 112, 112, 112
|
||||||
dw 8, 8, 8, 8, 8, 8, 8, 8, 120, 120, 120, 120, 120, 120, 120, 120
|
dw 8, 8, 8, 8, 8, 8, 8, 8, 120, 120, 120, 120, 120, 120, 120, 120
|
||||||
%else
|
|
||||||
vp8_bilinear_filters_sse2:
|
|
||||||
dw 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 0, 0, 0, 0
|
|
||||||
dw 112, 112, 112, 112, 112, 112, 112, 112, 16, 16, 16, 16, 16, 16, 16, 16
|
|
||||||
dw 96, 96, 96, 96, 96, 96, 96, 96, 32, 32, 32, 32, 32, 32, 32, 32
|
|
||||||
dw 80, 80, 80, 80, 80, 80, 80, 80, 48, 48, 48, 48, 48, 48, 48, 48
|
|
||||||
dw 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64
|
|
||||||
dw 48, 48, 48, 48, 48, 48, 48, 48, 80, 80, 80, 80, 80, 80, 80, 80
|
|
||||||
dw 32, 32, 32, 32, 32, 32, 32, 32, 96, 96, 96, 96, 96, 96, 96, 96
|
|
||||||
dw 16, 16, 16, 16, 16, 16, 16, 16, 112, 112, 112, 112, 112, 112, 112, 112
|
|
||||||
%endif
|
|
||||||
|
@ -353,7 +353,6 @@ align 16
|
|||||||
xmm_bi_rd:
|
xmm_bi_rd:
|
||||||
times 8 dw 64
|
times 8 dw 64
|
||||||
align 16
|
align 16
|
||||||
%if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
vp8_bilinear_filters_ssse3:
|
vp8_bilinear_filters_ssse3:
|
||||||
times 8 db 128, 0
|
times 8 db 128, 0
|
||||||
times 8 db 120, 8
|
times 8 db 120, 8
|
||||||
@ -371,14 +370,3 @@ vp8_bilinear_filters_ssse3:
|
|||||||
times 8 db 24, 104
|
times 8 db 24, 104
|
||||||
times 8 db 16, 112
|
times 8 db 16, 112
|
||||||
times 8 db 8, 120
|
times 8 db 8, 120
|
||||||
%else
|
|
||||||
vp8_bilinear_filters_ssse3:
|
|
||||||
times 8 db 128, 0
|
|
||||||
times 8 db 112, 16
|
|
||||||
times 8 db 96, 32
|
|
||||||
times 8 db 80, 48
|
|
||||||
times 8 db 64, 64
|
|
||||||
times 8 db 48, 80
|
|
||||||
times 8 db 32, 96
|
|
||||||
times 8 db 16, 112
|
|
||||||
%endif
|
|
||||||
|
@ -198,7 +198,6 @@ unsigned int vp8_variance8x16_mmx(
|
|||||||
// the mmx function that does the bilinear filtering and var calculation //
|
// the mmx function that does the bilinear filtering and var calculation //
|
||||||
// int one pass //
|
// int one pass //
|
||||||
///////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
DECLARE_ALIGNED(16, const short, vp8_vp7_bilinear_filters_mmx[16][8]) = {
|
DECLARE_ALIGNED(16, const short, vp8_vp7_bilinear_filters_mmx[16][8]) = {
|
||||||
{ 128, 128, 128, 128, 0, 0, 0, 0 },
|
{ 128, 128, 128, 128, 0, 0, 0, 0 },
|
||||||
{ 120, 120, 120, 120, 8, 8, 8, 8 },
|
{ 120, 120, 120, 120, 8, 8, 8, 8 },
|
||||||
@ -217,18 +216,6 @@ DECLARE_ALIGNED(16, const short, vp8_vp7_bilinear_filters_mmx[16][8]) = {
|
|||||||
{ 16, 16, 16, 16, 112, 112, 112, 112 },
|
{ 16, 16, 16, 16, 112, 112, 112, 112 },
|
||||||
{ 8, 8, 8, 8, 120, 120, 120, 120 }
|
{ 8, 8, 8, 8, 120, 120, 120, 120 }
|
||||||
};
|
};
|
||||||
#else
|
|
||||||
DECLARE_ALIGNED(16, const short, vp8_vp7_bilinear_filters_mmx[8][8]) = {
|
|
||||||
{ 128, 128, 128, 128, 0, 0, 0, 0 },
|
|
||||||
{ 112, 112, 112, 112, 16, 16, 16, 16 },
|
|
||||||
{ 96, 96, 96, 96, 32, 32, 32, 32 },
|
|
||||||
{ 80, 80, 80, 80, 48, 48, 48, 48 },
|
|
||||||
{ 64, 64, 64, 64, 64, 64, 64, 64 },
|
|
||||||
{ 48, 48, 48, 48, 80, 80, 80, 80 },
|
|
||||||
{ 32, 32, 32, 32, 96, 96, 96, 96 },
|
|
||||||
{ 16, 16, 16, 16, 112, 112, 112, 112 }
|
|
||||||
};
|
|
||||||
#endif
|
|
||||||
|
|
||||||
unsigned int vp8_sub_pixel_variance4x4_mmx
|
unsigned int vp8_sub_pixel_variance4x4_mmx
|
||||||
(
|
(
|
||||||
@ -392,13 +379,8 @@ unsigned int vp8_variance_halfpixvar16x16_h_mmx(
|
|||||||
const unsigned char *ref_ptr,
|
const unsigned char *ref_ptr,
|
||||||
int recon_stride,
|
int recon_stride,
|
||||||
unsigned int *sse) {
|
unsigned int *sse) {
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 8, 0,
|
return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 8, 0,
|
||||||
ref_ptr, recon_stride, sse);
|
ref_ptr, recon_stride, sse);
|
||||||
#else
|
|
||||||
return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 4, 0,
|
|
||||||
ref_ptr, recon_stride, sse);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -408,13 +390,8 @@ unsigned int vp8_variance_halfpixvar16x16_v_mmx(
|
|||||||
const unsigned char *ref_ptr,
|
const unsigned char *ref_ptr,
|
||||||
int recon_stride,
|
int recon_stride,
|
||||||
unsigned int *sse) {
|
unsigned int *sse) {
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 0, 8,
|
return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 0, 8,
|
||||||
ref_ptr, recon_stride, sse);
|
ref_ptr, recon_stride, sse);
|
||||||
#else
|
|
||||||
return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 0, 4,
|
|
||||||
ref_ptr, recon_stride, sse);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -424,11 +401,6 @@ unsigned int vp8_variance_halfpixvar16x16_hv_mmx(
|
|||||||
const unsigned char *ref_ptr,
|
const unsigned char *ref_ptr,
|
||||||
int recon_stride,
|
int recon_stride,
|
||||||
unsigned int *sse) {
|
unsigned int *sse) {
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 8, 8,
|
return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 8, 8,
|
||||||
ref_ptr, recon_stride, sse);
|
ref_ptr, recon_stride, sse);
|
||||||
#else
|
|
||||||
return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 4, 4,
|
|
||||||
ref_ptr, recon_stride, sse);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
@ -13,11 +13,7 @@
|
|||||||
#include "vp8/common/pragmas.h"
|
#include "vp8/common/pragmas.h"
|
||||||
#include "vpx_ports/mem.h"
|
#include "vpx_ports/mem.h"
|
||||||
|
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
#define HALFNDX 8
|
#define HALFNDX 8
|
||||||
#else
|
|
||||||
#define HALFNDX 4
|
|
||||||
#endif
|
|
||||||
|
|
||||||
extern void filter_block1d_h6_mmx(const unsigned char *src_ptr, unsigned short *output_ptr, unsigned int src_pixels_per_line, unsigned int pixel_step, unsigned int output_height, unsigned int output_width, short *vp7_filter);
|
extern void filter_block1d_h6_mmx(const unsigned char *src_ptr, unsigned short *output_ptr, unsigned int src_pixels_per_line, unsigned int pixel_step, unsigned int output_height, unsigned int output_width, short *vp7_filter);
|
||||||
extern void filter_block1d_v6_mmx(const short *src_ptr, unsigned char *output_ptr, unsigned int pixels_per_line, unsigned int pixel_step, unsigned int output_height, unsigned int output_width, short *vp7_filter);
|
extern void filter_block1d_v6_mmx(const short *src_ptr, unsigned char *output_ptr, unsigned int pixels_per_line, unsigned int pixel_step, unsigned int output_height, unsigned int output_width, short *vp7_filter);
|
||||||
@ -141,11 +137,7 @@ void vp8_half_vert_variance16x_h_sse2
|
|||||||
unsigned int *sumsquared
|
unsigned int *sumsquared
|
||||||
);
|
);
|
||||||
|
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
DECLARE_ALIGNED(16, extern short, vp8_vp7_bilinear_filters_mmx[16][8]);
|
DECLARE_ALIGNED(16, extern short, vp8_vp7_bilinear_filters_mmx[16][8]);
|
||||||
#else
|
|
||||||
DECLARE_ALIGNED(16, extern short, vp8_vp7_bilinear_filters_mmx[8][8]);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
unsigned int vp8_variance4x4_wmt(
|
unsigned int vp8_variance4x4_wmt(
|
||||||
const unsigned char *src_ptr,
|
const unsigned char *src_ptr,
|
||||||
|
@ -13,11 +13,7 @@
|
|||||||
#include "vp8/common/pragmas.h"
|
#include "vp8/common/pragmas.h"
|
||||||
#include "vpx_ports/mem.h"
|
#include "vpx_ports/mem.h"
|
||||||
|
|
||||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
||||||
#define HALFNDX 8
|
#define HALFNDX 8
|
||||||
#else
|
|
||||||
#define HALFNDX 4
|
|
||||||
#endif
|
|
||||||
|
|
||||||
extern unsigned int vp8_get16x16var_sse2
|
extern unsigned int vp8_get16x16var_sse2
|
||||||
(
|
(
|
||||||
|
Loading…
x
Reference in New Issue
Block a user