Merge "convert copy16x16 to rtcd" into experimental

This commit is contained in:
Paul Wilkins 2012-10-10 05:45:19 -07:00 committed by Gerrit Code Review
commit a2b4a560b4
10 changed files with 19 additions and 27 deletions

View File

@ -36,7 +36,6 @@ void vp8_machine_specific_config(VP8_COMMON *ctx) {
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
rtcd->idct.idct16x16 = vp8_short_idct16x16_c;
#endif
rtcd->recon.copy16x16 = vp8_copy_mem16x16_c;
rtcd->recon.copy8x8 = vp8_copy_mem8x8_c;
rtcd->recon.avg16x16 = vp8_avg_mem16x16_c;
rtcd->recon.avg8x8 = vp8_avg_mem8x8_c;

View File

@ -14,6 +14,7 @@
#include "vpx_config.h"
#include "vpx/internal/vpx_codec_internal.h"
#include "vpx_rtcd.h"
#include "loopfilter.h"
#include "entropymv.h"
#include "entropy.h"

View File

@ -44,11 +44,6 @@ struct vp8_recon_rtcd_vtable;
#include "arm/recon_arm.h"
#endif
#ifndef vp8_recon_copy16x16
#define vp8_recon_copy16x16 vp8_copy_mem16x16_c
#endif
extern prototype_copy_block(vp8_recon_copy16x16);
#ifndef vp8_recon_copy8x8
#define vp8_recon_copy8x8 vp8_copy_mem8x8_c
#endif

View File

@ -662,8 +662,7 @@ void vp8_build_1st_inter16x16_predictors_mby(MACROBLOCKD *xd,
(ymv.as_mv.row & 7) << 1,
dst_y, dst_ystride);
} else {
RECON_INVOKE(&xd->rtcd->recon, copy16x16)
(ptr, pre_stride, dst_y, dst_ystride);
vp8_copy_mem16x16(ptr, pre_stride, dst_y, dst_ystride);
}
}

View File

@ -5,6 +5,8 @@ EOF
}
forward_decls common_forward_decls
prototype void vp8_filter_block2d_4x4_8 "const unsigned char *src_ptr, const unsigned int src_stride, const short *HFilter_aligned16, const short *VFilter_aligned16, unsigned char *dst_ptr, unsigned int dst_stride"
prototype void vp8_filter_block2d_8x4_8 "const unsigned char *src_ptr, const unsigned int src_stride, const short *HFilter_aligned16, const short *VFilter_aligned16, unsigned char *dst_ptr, unsigned int dst_stride"
prototype void vp8_filter_block2d_8x8_8 "const unsigned char *src_ptr, const unsigned int src_stride, const short *HFilter_aligned16, const short *VFilter_aligned16, unsigned char *dst_ptr, unsigned int dst_stride"
@ -19,3 +21,12 @@ if [ "$CONFIG_GCC" = "yes" ]; then
specialize vp8_filter_block2d_8x8_8 sse4_1 sse2
specialize vp8_filter_block2d_16x16_8 sse4_1 sse2
fi
#
# RECON
#
prototype void vp8_copy_mem16x16 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
specialize vp8_copy_mem16x16 mmx sse2 media neon dspr2
vp8_copy_mem16x16_media=vp8_copy_mem16x16_v6
vp8_copy_mem16x16_dspr2=vp8_copy_mem16x16_dspr2

View File

@ -36,9 +36,6 @@ extern prototype_copy_block(vp8_copy_mem16x16_mmx);
#undef vp8_recon_copy8x4
#define vp8_recon_copy8x4 vp8_copy_mem8x4_mmx
#undef vp8_recon_copy16x16
#define vp8_recon_copy16x16 vp8_copy_mem16x16_mmx
#endif
#endif

View File

@ -44,7 +44,6 @@ void vp8_arch_x86_common_init(VP8_COMMON *ctx) {
rtcd->recon.recon = vp8_recon_b_mmx;
rtcd->recon.copy8x8 = vp8_copy_mem8x8_mmx;
rtcd->recon.copy8x4 = vp8_copy_mem8x4_mmx;
rtcd->recon.copy16x16 = vp8_copy_mem16x16_mmx;
/* Disabled due to unsupported enhanced interpolation/high_prec mv
rtcd->subpix.sixtap16x16 = vp8_sixtap_predict16x16_mmx;
@ -80,7 +79,6 @@ void vp8_arch_x86_common_init(VP8_COMMON *ctx) {
if (flags & HAS_SSE2) {
rtcd->recon.recon2 = vp8_recon2b_sse2;
rtcd->recon.recon4 = vp8_recon4b_sse2;
rtcd->recon.copy16x16 = vp8_copy_mem16x16_sse2;
/* these are disable because of unsupported diagonal pred modes
rtcd->recon.build_intra_predictors_mbuv =

View File

@ -309,9 +309,7 @@ static void build_activity_map(VP8_COMP *cpi) {
recon_yoffset += 16;
#endif
// Copy current mb to a buffer
RECON_INVOKE(&xd->rtcd->recon, copy16x16)(x->src.y_buffer,
x->src.y_stride,
x->thismb, 16);
vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
// measure activity
mb_activity = mb_activity_measure(cpi, x, mb_row, mb_col);
@ -591,9 +589,7 @@ static void pick_mb_modes(VP8_COMP *cpi,
xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
// Copy current MB to a work buffer
RECON_INVOKE(&xd->rtcd->recon, copy16x16)(x->src.y_buffer,
x->src.y_stride,
x->thismb, 16);
vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
x->rddiv = cpi->RDDIV;
x->rdmult = cpi->RDMULT;
@ -776,9 +772,7 @@ static void pick_sb_modes (VP8_COMP *cpi,
xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
#if 0 // FIXME
/* Copy current MB to a work buffer */
RECON_INVOKE(&xd->rtcd->recon, copy16x16)(x->src.y_buffer,
x->src.y_stride,
x->thismb, 16);
vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
#endif
x->rddiv = cpi->RDDIV;
x->rdmult = cpi->RDMULT;
@ -976,9 +970,7 @@ static void encode_sb(VP8_COMP *cpi,
xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
// Copy current MB to a work buffer
RECON_INVOKE(&xd->rtcd->recon, copy16x16)(x->src.y_buffer,
x->src.y_stride,
x->thismb, 16);
vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
vp8_activity_masking(cpi, x);

View File

@ -528,7 +528,7 @@ void vp8_first_pass(VP8_COMP *cpi) {
xd->left_available = (mb_col != 0);
// Copy current mb to a buffer
RECON_INVOKE(&xd->rtcd->recon, copy16x16)(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
// do intra 16x16 prediction
this_error = vp8_encode_intra(cpi, x, use_dc_pred);

View File

@ -59,7 +59,7 @@ static void vp8_temporal_filter_predictors_mb_c
xd->subpixel_predict16x16(yptr, stride,
(mv_col & 7) << 1, (mv_row & 7) << 1, &pred[0], 16);
} else {
RECON_INVOKE(&xd->rtcd->recon, copy16x16)(yptr, stride, &pred[0], 16);
vp8_copy_mem16x16(yptr, stride, &pred[0], 16);
}
// U & V