removed the recon rtcd invoke macro code (unrevert)

This reinstates reverted commit 2113a83157

Change-Id: I9a9af13497d1e58d4f467e3e083fddf06b1b786c
This commit is contained in:
Jim Bankoski 2012-10-13 18:49:44 -07:00 committed by John Koleszar
parent d5955a4231
commit 7c15c18c5e
25 changed files with 188 additions and 611 deletions

View File

@ -14,7 +14,6 @@
#include "vp8/common/g_common.h"
#include "vp8/common/subpixel.h"
#include "vp8/common/loopfilter.h"
#include "vp8/common/recon.h"
#include "vp8/common/idct.h"
#include "vp8/common/onyxc_int.h"
@ -34,57 +33,6 @@ void vp8_machine_specific_config(VP8_COMMON *ctx) {
rtcd->idct.idct1_scalar_add_8x8 = vp8_dc_only_idct_add_8x8_c;
rtcd->idct.ihaar2 = vp8_short_ihaar2x2_c;
rtcd->idct.idct16x16 = vp8_short_idct16x16_c;
rtcd->recon.copy16x16 = vp8_copy_mem16x16_c;
rtcd->recon.copy8x8 = vp8_copy_mem8x8_c;
rtcd->recon.avg16x16 = vp8_avg_mem16x16_c;
rtcd->recon.avg8x8 = vp8_avg_mem8x8_c;
rtcd->recon.copy8x4 = vp8_copy_mem8x4_c;
rtcd->recon.recon = vp8_recon_b_c;
rtcd->recon.recon_uv = vp8_recon_uv_b_c;
rtcd->recon.recon2 = vp8_recon2b_c;
rtcd->recon.recon4 = vp8_recon4b_c;
rtcd->recon.recon_mb = vp8_recon_mb_c;
rtcd->recon.recon_mby = vp8_recon_mby_c;
#if CONFIG_SUPERBLOCKS
rtcd->recon.build_intra_predictors_sby_s =
vp8_build_intra_predictors_sby_s;
rtcd->recon.build_intra_predictors_sbuv_s =
vp8_build_intra_predictors_sbuv_s;
#endif
rtcd->recon.build_intra_predictors_mby =
vp8_build_intra_predictors_mby;
#if CONFIG_COMP_INTRA_PRED
rtcd->recon.build_comp_intra_predictors_mby =
vp8_build_comp_intra_predictors_mby;
#endif
rtcd->recon.build_intra_predictors_mby_s =
vp8_build_intra_predictors_mby_s;
rtcd->recon.build_intra_predictors_mbuv =
vp8_build_intra_predictors_mbuv;
rtcd->recon.build_intra_predictors_mbuv_s =
vp8_build_intra_predictors_mbuv_s;
#if CONFIG_COMP_INTRA_PRED
rtcd->recon.build_comp_intra_predictors_mbuv =
vp8_build_comp_intra_predictors_mbuv;
#endif
rtcd->recon.intra4x4_predict =
vp8_intra4x4_predict;
#if CONFIG_COMP_INTRA_PRED
rtcd->recon.comp_intra4x4_predict =
vp8_comp_intra4x4_predict;
#endif
rtcd->recon.intra8x8_predict =
vp8_intra8x8_predict;
#if CONFIG_COMP_INTRA_PRED
rtcd->recon.comp_intra8x8_predict =
vp8_comp_intra8x8_predict;
#endif
rtcd->recon.intra_uv4x4_predict =
vp8_intra_uv4x4_predict;
#if CONFIG_COMP_INTRA_PRED
rtcd->recon.comp_intra_uv4x4_predict =
vp8_comp_intra_uv4x4_predict;
#endif
rtcd->subpix.eighttap16x16 = vp8_eighttap_predict16x16_c;
rtcd->subpix.eighttap8x8 = vp8_eighttap_predict8x8_c;

View File

@ -20,7 +20,6 @@
#include "entropy.h"
#include "entropymode.h"
#include "idct.h"
#include "recon.h"
#if CONFIG_POSTPROC
#include "postproc.h"
#endif
@ -171,7 +170,6 @@ typedef enum {
typedef struct VP8_COMMON_RTCD {
#if CONFIG_RUNTIME_CPU_DETECT
vp8_idct_rtcd_vtable_t idct;
vp8_recon_rtcd_vtable_t recon;
vp8_subpix_rtcd_vtable_t subpix;
vp8_loopfilter_rtcd_vtable_t loopfilter;
#if CONFIG_POSTPROC

View File

@ -10,7 +10,7 @@
#include "vpx_ports/config.h"
#include "recon.h"
#include "vpx_rtcd.h"
#include "blockd.h"
void vp8_recon_b_c
@ -125,7 +125,7 @@ void vp8_recon2b_c
}
#if CONFIG_SUPERBLOCKS
void vp8_recon_mby_s_c(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *xd, uint8_t *dst) {
void vp8_recon_mby_s_c(MACROBLOCKD *xd, uint8_t *dst) {
int x, y;
BLOCKD *b = &xd->block[0];
int stride = b->dst_stride;
@ -145,7 +145,7 @@ void vp8_recon_mby_s_c(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *xd, uin
}
}
void vp8_recon_mbuv_s_c(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *xd, uint8_t *udst, uint8_t *vdst) {
void vp8_recon_mbuv_s_c(MACROBLOCKD *xd, uint8_t *udst, uint8_t *vdst) {
int x, y, i;
uint8_t *dst = udst;
@ -170,71 +170,28 @@ void vp8_recon_mbuv_s_c(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *xd, ui
}
#endif
void vp8_recon_mby_c(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *xd) {
#if ARCH_ARM
BLOCKD *b = &xd->block[0];
RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
/*b = &xd->block[4];*/
b += 4;
RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
/*b = &xd->block[8];*/
b += 4;
RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
/*b = &xd->block[12];*/
b += 4;
RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
#else
void vp8_recon_mby_c(MACROBLOCKD *xd) {
int i;
for (i = 0; i < 16; i += 4) {
BLOCKD *b = &xd->block[i];
RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
vp8_recon4b(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
}
#endif
}
void vp8_recon_mb_c(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *xd) {
#if ARCH_ARM
BLOCKD *b = &xd->block[0];
RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b += 4;
RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b += 4;
RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b += 4;
RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b += 4;
/*b = &xd->block[16];*/
RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b++;
b++;
RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b++;
b++;
RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b++;
b++;
RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
#else
void vp8_recon_mb_c(MACROBLOCKD *xd) {
int i;
for (i = 0; i < 16; i += 4) {
BLOCKD *b = &xd->block[i];
RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
vp8_recon4b(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
}
for (i = 16; i < 24; i += 2) {
BLOCKD *b = &xd->block[i];
RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
vp8_recon2b(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
}
#endif
}

View File

@ -1,268 +0,0 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef __INC_RECON_H
#define __INC_RECON_H
#include "blockd.h"
#define prototype_copy_block(sym) \
void sym(unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch)
#define prototype_recon_block(sym) \
void sym(unsigned char *pred, short *diff, unsigned char *dst, int pitch)
#define prototype_recon_macroblock(sym) \
void sym(const struct vp8_recon_rtcd_vtable *rtcd, MACROBLOCKD *xd)
#define prototype_build_intra_predictors(sym) \
void sym(MACROBLOCKD *xd)
#define prototype_intra4x4_predict(sym) \
void sym(BLOCKD *x, int b_mode, unsigned char *predictor)
#if CONFIG_COMP_INTRA_PRED
#define prototype_comp_intra4x4_predict(sym) \
void sym(BLOCKD *x, int b_mode, int mode2, unsigned char *predictor)
#endif
struct vp8_recon_rtcd_vtable;
#if ARCH_X86 || ARCH_X86_64
#include "x86/recon_x86.h"
#endif
#if ARCH_ARM
#include "arm/recon_arm.h"
#endif
#ifndef vp8_recon_copy8x8
#define vp8_recon_copy8x8 vp8_copy_mem8x8_c
#endif
extern prototype_copy_block(vp8_recon_copy8x8);
#ifndef vp8_recon_avg16x16
#define vp8_recon_avg16x16 vp8_avg_mem16x16_c
#endif
extern prototype_copy_block(vp8_recon_avg16x16);
#ifndef vp8_recon_avg8x8
#define vp8_recon_avg8x8 vp8_avg_mem8x8_c
#endif
extern prototype_copy_block(vp8_recon_avg8x8);
#ifndef vp8_recon_copy8x4
#define vp8_recon_copy8x4 vp8_copy_mem8x4_c
#endif
extern prototype_copy_block(vp8_recon_copy8x4);
#ifndef vp8_recon_recon
#define vp8_recon_recon vp8_recon_b_c
#endif
extern prototype_recon_block(vp8_recon_recon);
#ifndef vp8_recon_recon_uv
#define vp8_recon_recon_uv vp8_recon_uv_b_c
#endif
extern prototype_recon_block(vp8_recon_recon_uv);
extern prototype_recon_block(vp8_recon_recon);
#ifndef vp8_recon_recon2
#define vp8_recon_recon2 vp8_recon2b_c
#endif
extern prototype_recon_block(vp8_recon_recon2);
#ifndef vp8_recon_recon4
#define vp8_recon_recon4 vp8_recon4b_c
#endif
extern prototype_recon_block(vp8_recon_recon4);
#ifndef vp8_recon_recon_mb
#define vp8_recon_recon_mb vp8_recon_mb_c
#endif
extern prototype_recon_macroblock(vp8_recon_recon_mb);
#ifndef vp8_recon_recon_mby
#define vp8_recon_recon_mby vp8_recon_mby_c
#endif
extern prototype_recon_macroblock(vp8_recon_recon_mby);
#ifndef vp8_recon_build_intra_predictors_sby_s
#define vp8_recon_build_intra_predictors_sby_s vp8_build_intra_predictors_sby_s
#endif
extern prototype_build_intra_predictors(vp8_recon_build_intra_predictors_sby_s);
#ifndef vp8_recon_build_intra_predictors_mby
#define vp8_recon_build_intra_predictors_mby vp8_build_intra_predictors_mby
#endif
extern prototype_build_intra_predictors\
(vp8_recon_build_intra_predictors_mby);
#if CONFIG_COMP_INTRA_PRED
#ifndef vp8_recon_build_comp_intra_predictors_mby
#define vp8_recon_build_comp_intra_predictors_mby vp8_build_comp_intra_predictors_mby
#endif
extern prototype_build_intra_predictors\
(vp8_recon_build_comp_intra_predictors_mby);
#endif
#ifndef vp8_recon_build_intra8x8_predictors_mby
#define vp8_recon_build_intra8x8_predictors_mby vp8_build_intra8x8_predictors_mby
#endif
extern prototype_build_intra_predictors\
(vp8_recon_build_intra8x8_predictors_mby);
#ifndef vp8_recon_build_intra_predictors_mby_s
#define vp8_recon_build_intra_predictors_mby_s vp8_build_intra_predictors_mby_s
#endif
extern prototype_build_intra_predictors\
(vp8_recon_build_intra_predictors_mby_s);
#ifndef vp8_recon_build_intra_predictors_sbuv_s
#define vp8_recon_build_intra_predictors_sbuv_s vp8_build_intra_predictors_sbuv_s
#endif
extern prototype_build_intra_predictors(vp8_recon_build_intra_predictors_sbuv_s);
#ifndef vp8_recon_build_intra_predictors_mbuv
#define vp8_recon_build_intra_predictors_mbuv vp8_build_intra_predictors_mbuv
#endif
extern prototype_build_intra_predictors\
(vp8_recon_build_intra_predictors_mbuv);
#ifndef vp8_recon_build_intra8x8_predictors_mbuv
#define vp8_recon_build_intra8x8_predictors_mbuv vp8_build_intra8x8_predictors_mbuv
#endif
extern prototype_build_intra_predictors\
(vp8_recon_build_intra8x8_predictors_mbuv);
#ifndef vp8_recon_build_intra_predictors_mbuv_s
#define vp8_recon_build_intra_predictors_mbuv_s vp8_build_intra_predictors_mbuv_s
#endif
extern prototype_build_intra_predictors\
(vp8_recon_build_intra_predictors_mbuv_s);
#if CONFIG_COMP_INTRA_PRED
#ifndef vp8_recon_build_comp_intra_predictors_mbuv
#define vp8_recon_build_comp_intra_predictors_mbuv vp8_build_comp_intra_predictors_mbuv
#endif
extern prototype_build_intra_predictors\
(vp8_recon_build_comp_intra_predictors_mbuv);
#endif
#ifndef vp8_recon_intra4x4_predict
#define vp8_recon_intra4x4_predict vp8_intra4x4_predict
#endif
extern prototype_intra4x4_predict\
(vp8_recon_intra4x4_predict);
#if CONFIG_COMP_INTRA_PRED
#ifndef vp8_recon_comp_intra4x4_predict
#define vp8_recon_comp_intra4x4_predict vp8_comp_intra4x4_predict
#endif
extern prototype_comp_intra4x4_predict\
(vp8_recon_comp_intra4x4_predict);
#endif
#ifndef vp8_recon_intra8x8_predict
#define vp8_recon_intra8x8_predict vp8_intra8x8_predict
#endif
extern prototype_intra4x4_predict\
(vp8_recon_intra8x8_predict);
#if CONFIG_COMP_INTRA_PRED
#ifndef vp8_recon_comp_intra8x8_predict
#define vp8_recon_comp_intra8x8_predict vp8_comp_intra8x8_predict
#endif
extern prototype_comp_intra4x4_predict\
(vp8_recon_comp_intra8x8_predict);
#endif
#ifndef vp8_recon_intra_uv4x4_predict
#define vp8_recon_intra_uv4x4_predict vp8_intra_uv4x4_predict
#endif
extern prototype_intra4x4_predict\
(vp8_recon_intra_uv4x4_predict);
#if CONFIG_COMP_INTRA_PRED
#ifndef vp8_recon_comp_intra_uv4x4_predict
#define vp8_recon_comp_intra_uv4x4_predict vp8_comp_intra_uv4x4_predict
#endif
extern prototype_comp_intra4x4_predict\
(vp8_recon_comp_intra_uv4x4_predict);
#endif
typedef prototype_copy_block((*vp8_copy_block_fn_t));
typedef prototype_recon_block((*vp8_recon_fn_t));
typedef prototype_recon_macroblock((*vp8_recon_mb_fn_t));
typedef prototype_build_intra_predictors((*vp8_build_intra_pred_fn_t));
typedef prototype_intra4x4_predict((*vp8_intra4x4_pred_fn_t));
#if CONFIG_COMP_INTRA_PRED
typedef prototype_comp_intra4x4_predict((*vp8_comp_intra4x4_pred_fn_t));
#endif
typedef struct vp8_recon_rtcd_vtable {
vp8_copy_block_fn_t copy16x16;
vp8_copy_block_fn_t copy8x8;
vp8_copy_block_fn_t avg16x16;
vp8_copy_block_fn_t avg8x8;
vp8_copy_block_fn_t copy8x4;
vp8_recon_fn_t recon;
vp8_recon_fn_t recon_uv;
vp8_recon_fn_t recon2;
vp8_recon_fn_t recon4;
vp8_recon_mb_fn_t recon_mb;
vp8_recon_mb_fn_t recon_mby;
#if CONFIG_SUPERBLOCKS
vp8_build_intra_pred_fn_t build_intra_predictors_sby_s;
#endif
vp8_build_intra_pred_fn_t build_intra_predictors_mby_s;
vp8_build_intra_pred_fn_t build_intra_predictors_mby;
#if CONFIG_COMP_INTRA_PRED
vp8_build_intra_pred_fn_t build_comp_intra_predictors_mby;
#endif
#if CONFIG_SUPERBLOCKS
vp8_build_intra_pred_fn_t build_intra_predictors_sbuv_s;
#endif
vp8_build_intra_pred_fn_t build_intra_predictors_mbuv_s;
vp8_build_intra_pred_fn_t build_intra_predictors_mbuv;
#if CONFIG_COMP_INTRA_PRED
vp8_build_intra_pred_fn_t build_comp_intra_predictors_mbuv;
#endif
vp8_intra4x4_pred_fn_t intra4x4_predict;
#if CONFIG_COMP_INTRA_PRED
vp8_comp_intra4x4_pred_fn_t comp_intra4x4_predict;
#endif
vp8_intra4x4_pred_fn_t intra8x8_predict;
#if CONFIG_COMP_INTRA_PRED
vp8_comp_intra4x4_pred_fn_t comp_intra8x8_predict;
#endif
vp8_intra4x4_pred_fn_t intra_uv4x4_predict;
#if CONFIG_COMP_INTRA_PRED
vp8_comp_intra4x4_pred_fn_t comp_intra_uv4x4_predict;
#endif
} vp8_recon_rtcd_vtable_t;
#if CONFIG_RUNTIME_CPU_DETECT
#define RECON_INVOKE(ctx,fn) (ctx)->fn
#else
#define RECON_INVOKE(ctx,fn) vp8_recon_##fn
#endif
void vp8_recon_intra_mbuv(const vp8_recon_rtcd_vtable_t *rtcd,
MACROBLOCKD *xd);
#if CONFIG_SUPERBLOCKS
extern void vp8_recon_mby_s_c(const vp8_recon_rtcd_vtable_t *rtcd,
MACROBLOCKD *xd, uint8_t *dst);
extern void vp8_recon_mbuv_s_c(const vp8_recon_rtcd_vtable_t *rtcd,
MACROBLOCKD *xd, uint8_t *udst, uint8_t *vdst);
#endif
#endif

View File

@ -11,7 +11,6 @@
#include "vpx_ports/config.h"
#include "vpx/vpx_integer.h"
#include "recon.h"
#include "subpixel.h"
#include "blockd.h"
#include "reconinter.h"
@ -310,8 +309,7 @@ static void build_inter_predictors4b(MACROBLOCKD *xd, BLOCKD *d, int pitch) {
xd->subpixel_predict8x8(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1,
(mv.as_mv.row & 7) << 1, pred_ptr, pitch);
} else {
RECON_INVOKE(&xd->rtcd->recon, copy8x8)
(ptr, d->pre_stride, pred_ptr, pitch);
vp8_copy_mem8x8(ptr, d->pre_stride, pred_ptr, pitch);
}
}
@ -337,8 +335,7 @@ static void build_2nd_inter_predictors4b(MACROBLOCKD *xd,
xd->subpixel_predict_avg8x8(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1,
(mv.as_mv.row & 7) << 1, pred_ptr, pitch);
} else {
RECON_INVOKE(&xd->rtcd->recon, avg8x8)
(ptr, d->pre_stride, pred_ptr, pitch);
vp8_avg_mem8x8(ptr, d->pre_stride, pred_ptr, pitch);
}
}
@ -357,7 +354,7 @@ static void build_inter_predictors2b(MACROBLOCKD *xd, BLOCKD *d, int pitch) {
xd->subpixel_predict8x4(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1,
(mv.as_mv.row & 7) << 1, pred_ptr, pitch);
} else {
RECON_INVOKE(&xd->rtcd->recon, copy8x4)(ptr, d->pre_stride, pred_ptr, pitch);
vp8_copy_mem8x4(ptr, d->pre_stride, pred_ptr, pitch);
}
}
@ -741,10 +738,8 @@ void vp8_build_1st_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
xd->subpixel_predict8x8(vptr, pre_stride, _o16x16mv.as_mv.col & 15,
_o16x16mv.as_mv.row & 15, dst_v, dst_uvstride);
} else {
RECON_INVOKE(&xd->rtcd->recon, copy8x8)
(uptr, pre_stride, dst_u, dst_uvstride);
RECON_INVOKE(&xd->rtcd->recon, copy8x8)
(vptr, pre_stride, dst_v, dst_uvstride);
vp8_copy_mem8x8(uptr, pre_stride, dst_u, dst_uvstride);
vp8_copy_mem8x8(vptr, pre_stride, dst_v, dst_uvstride);
}
}
@ -863,8 +858,7 @@ void vp8_build_2nd_inter16x16_predictors_mby(MACROBLOCKD *xd,
} else {
// TODO Needs to AVERAGE with the dst_y
// For now, do not apply the prediction filter in these cases!
RECON_INVOKE(&xd->rtcd->recon, avg16x16)(ptr, pre_stride, dst_y,
dst_ystride);
vp8_avg_mem16x16(ptr, pre_stride, dst_y, dst_ystride);
}
} else
#endif // CONFIG_PRED_FILTER
@ -873,8 +867,7 @@ void vp8_build_2nd_inter16x16_predictors_mby(MACROBLOCKD *xd,
xd->subpixel_predict_avg16x16(ptr, pre_stride, (mv_col & 7) << 1,
(mv_row & 7) << 1, dst_y, dst_ystride);
} else {
RECON_INVOKE(&xd->rtcd->recon, avg16x16)(ptr, pre_stride, dst_y,
dst_ystride);
vp8_avg_mem16x16(ptr, pre_stride, dst_y, dst_ystride);
}
}
}
@ -937,8 +930,7 @@ void vp8_build_2nd_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
} else {
// TODO Needs to AVERAGE with the dst_[u|v]
// For now, do not apply the prediction filter here!
RECON_INVOKE(&xd->rtcd->recon, avg8x8)(pSrc, pre_stride, pDst,
dst_uvstride);
vp8_avg_mem8x8(pSrc, pre_stride, pDst, dst_uvstride);
}
// V
@ -953,8 +945,8 @@ void vp8_build_2nd_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
xd->subpixel_predict_avg8x8(vptr, pre_stride, omv_col & 15,
omv_row & 15, dst_v, dst_uvstride);
} else {
RECON_INVOKE(&xd->rtcd->recon, avg8x8)(uptr, pre_stride, dst_u, dst_uvstride);
RECON_INVOKE(&xd->rtcd->recon, avg8x8)(vptr, pre_stride, dst_v, dst_uvstride);
vp8_avg_mem8x8(uptr, pre_stride, dst_u, dst_uvstride);
vp8_avg_mem8x8(vptr, pre_stride, dst_v, dst_uvstride);
}
}

View File

@ -12,9 +12,7 @@
#ifndef __INC_RECONINTER_H
#define __INC_RECONINTER_H
#if CONFIG_RUNTIME_CPU_DETECT
#include "onyxc_int.h"
#endif
extern void vp8_build_1st_inter16x16_predictors_mby(MACROBLOCKD *xd,
unsigned char *dst_y,

View File

@ -10,7 +10,7 @@
#include <stdio.h>
#include "vpx_ports/config.h"
#include "recon.h"
#include "vpx_rtcd.h"
#include "reconintra.h"
#include "vpx_mem/vpx_mem.h"
@ -196,14 +196,12 @@ void d153_predictor(unsigned char *ypred_ptr, int y_stride, int n,
}
}
void vp8_recon_intra_mbuv(const vp8_recon_rtcd_vtable_t *rtcd,
MACROBLOCKD *xd) {
void vp8_recon_intra_mbuv(MACROBLOCKD *xd) {
int i;
for (i = 16; i < 24; i += 2) {
BLOCKD *b = &xd->block[i];
RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff,
*(b->base_dst) + b->dst, b->dst_stride);
vp8_recon2b(b->predictor, b->diff,*(b->base_dst) + b->dst, b->dst_stride);
}
}

View File

@ -10,13 +10,12 @@
#include "vpx_ports/config.h"
#include "recon.h"
#include "vpx_mem/vpx_mem.h"
#include "reconintra.h"
#include "vpx_rtcd.h"
void vp8_intra4x4_predict(BLOCKD *x,
int b_mode,
unsigned char *predictor) {
void vp8_intra4x4_predict_c(BLOCKD *x, int b_mode,
unsigned char *predictor) {
int i, r, c;
unsigned char *Above = *(x->base_dst) + x->dst - x->dst_stride;
@ -276,7 +275,7 @@ void vp8_intra4x4_predict(BLOCKD *x,
}
#if CONFIG_COMP_INTRA_PRED
void vp8_comp_intra4x4_predict(BLOCKD *x,
void vp8_comp_intra4x4_predict_c(BLOCKD *x,
int b_mode, int b_mode2,
unsigned char *out_predictor) {
unsigned char predictor[2][4 * 16];

View File

@ -1,12 +1,17 @@
common_forward_decls() {
cat <<EOF
struct blockd;
#include "vp8/common/blockd.h"
struct loop_filter_info;
/* Encoder forward decls */
struct variance_vtable;
union int_mv;
struct yv12_buffer_config;
EOF
}
forward_decls common_forward_decls
prototype void vp8_filter_block2d_4x4_8 "const unsigned char *src_ptr, const unsigned int src_stride, const short *HFilter_aligned16, const short *VFilter_aligned16, unsigned char *dst_ptr, unsigned int dst_stride"
prototype void vp8_filter_block2d_8x4_8 "const unsigned char *src_ptr, const unsigned int src_stride, const short *HFilter_aligned16, const short *VFilter_aligned16, unsigned char *dst_ptr, unsigned int dst_stride"
prototype void vp8_filter_block2d_8x8_8 "const unsigned char *src_ptr, const unsigned int src_stride, const short *HFilter_aligned16, const short *VFilter_aligned16, unsigned char *dst_ptr, unsigned int dst_stride"
@ -30,3 +35,88 @@ prototype void vp8_copy_mem16x16 "unsigned char *src, int src_pitch, unsigned ch
specialize vp8_copy_mem16x16 mmx sse2 media neon dspr2
vp8_copy_mem16x16_media=vp8_copy_mem16x16_v6
vp8_copy_mem16x16_dspr2=vp8_copy_mem16x16_dspr2
prototype void vp8_copy_mem8x8 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
specialize vp8_copy_mem8x8 mmx media neon dspr2
vp8_copy_mem8x8_media=vp8_copy_mem8x8_v6
vp8_copy_mem8x8_dspr2=vp8_copy_mem8x8_dspr2
prototype void vp8_copy_mem8x4 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
specialize vp8_copy_mem8x4 mmx
prototype void vp8_intra4x4_predict "unsigned char *Above, unsigned char *yleft, int left_stride, B_PREDICTION_MODE b_mode, unsigned char *dst, int dst_stride, unsigned char top_left"
specialize vp8_intra4x4_predict
prototype void vp8_avg_mem16x16 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
specialize vp8_avg_mem16x16
prototype void vp8_avg_mem8x8 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
specialize vp8_avg_mem8x8
prototype void vp8_copy_mem8x4 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
specialize vp8_copy_mem8x4 mmx media neon dspr2
vp8_copy_mem8x4_media=vp8_copy_mem8x4_v6
vp8_copy_mem8x4_dspr2=vp8_copy_mem8x4_dspr2
prototype void vp8_recon_b "unsigned char *pred_ptr, short *diff_ptr, unsigned char *dst_ptr, int stride"
specialize vp8_recon_b
prototype void vp8_recon_uv_b "unsigned char *pred_ptr, short *diff_ptr, unsigned char *dst_ptr, int stride"
specialize vp8_recon_uv_b
prototype void vp8_recon2b "unsigned char *pred_ptr, short *diff_ptr, unsigned char *dst_ptr, int stride"
specialize vp8_recon2b sse2
prototype void vp8_recon4b "unsigned char *pred_ptr, short *diff_ptr, unsigned char *dst_ptr, int stride"
specialize vp8_recon4b sse2
prototype void vp8_recon_mb "MACROBLOCKD *x"
specialize vp8_recon_mb
prototype void vp8_recon_mby "MACROBLOCKD *x"
specialize vp8_recon_mby
prototype void vp8_build_intra_predictors_mby_s "MACROBLOCKD *x"
specialize vp8_build_intra_predictors_mby_s
prototype void vp8_build_intra_predictors_sby_s "MACROBLOCKD *x"
specialize vp8_build_intra_predictors_sby_s;
prototype void vp8_build_intra_predictors_sbuv_s "MACROBLOCKD *x"
specialize vp8_build_intra_predictors_sbuv_s;
prototype void vp8_build_intra_predictors_mby "MACROBLOCKD *x"
specialize vp8_build_intra_predictors_mby;
prototype void vp8_build_comp_intra_predictors_mby "MACROBLOCKD *x"
specialize vp8_build_comp_intra_predictors_mby;
prototype void vp8_build_intra_predictors_mby_s "MACROBLOCKD *x"
specialize vp8_build_intra_predictors_mby_s;
prototype void vp8_build_intra_predictors_mbuv "MACROBLOCKD *x"
specialize vp8_build_intra_predictors_mbuv;
prototype void vp8_build_intra_predictors_mbuv_s "MACROBLOCKD *x"
specialize vp8_build_intra_predictors_mbuv_s;
prototype void vp8_build_comp_intra_predictors_mbuv "MACROBLOCKD *x"
specialize vp8_build_comp_intra_predictors_mbuv;
prototype void vp8_intra4x4_predict "BLOCKD *x, int b_mode, unsigned char *predictor"
specialize vp8_intra4x4_predict;
prototype void vp8_comp_intra4x4_predict "BLOCKD *x, int b_mode, int second_mode, unsigned char *predictor"
specialize vp8_comp_intra4x4_predict;
prototype void vp8_intra8x8_predict "BLOCKD *x, int b_mode, unsigned char *predictor"
specialize vp8_intra8x8_predict;
prototype void vp8_comp_intra8x8_predict "BLOCKD *x, int b_mode, int second_mode, unsigned char *predictor"
specialize vp8_comp_intra8x8_predict;
prototype void vp8_intra_uv4x4_predict "BLOCKD *x, int b_mode, unsigned char *predictor"
specialize vp8_intra_uv4x4_predict;
prototype void vp8_comp_intra_uv4x4_predict "BLOCKD *x, int b_mode, int second_mode, unsigned char *predictor"
specialize vp8_comp_intra_uv4x4_predict;

View File

@ -9,9 +9,8 @@
*/
#include "vpx_ports/config.h"
#include "vp8/common/recon.h"
#include "recon_x86.h"
#include "vpx_mem/vpx_mem.h"
#include "vp8/common/blockd.h"
#define build_intra_predictors_mbuv_prototype(sym) \
void sym(unsigned char *dst, int dst_stride, \

View File

@ -1,81 +0,0 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef RECON_X86_H
#define RECON_X86_H
/* Note:
*
* This platform is commonly built for runtime CPU detection. If you modify
* any of the function mappings present in this file, be sure to also update
* them in the function pointer initialization code
*/
#if HAVE_MMX
extern prototype_recon_block(vp8_recon_b_mmx);
extern prototype_copy_block(vp8_copy_mem8x8_mmx);
extern prototype_copy_block(vp8_copy_mem8x4_mmx);
extern prototype_copy_block(vp8_copy_mem16x16_mmx);
#if !CONFIG_RUNTIME_CPU_DETECT
#undef vp8_recon_recon
#define vp8_recon_recon vp8_recon_b_mmx
#undef vp8_recon_copy8x8
#define vp8_recon_copy8x8 vp8_copy_mem8x8_mmx
#undef vp8_recon_copy8x4
#define vp8_recon_copy8x4 vp8_copy_mem8x4_mmx
#endif
#endif
#if HAVE_SSE2
extern prototype_recon_block(vp8_recon2b_sse2);
extern prototype_recon_block(vp8_recon4b_sse2);
extern prototype_copy_block(vp8_copy_mem16x16_sse2);
extern prototype_build_intra_predictors(vp8_build_intra_predictors_mbuv_sse2);
extern prototype_build_intra_predictors(vp8_build_intra_predictors_mbuv_s_sse2);
#if !CONFIG_RUNTIME_CPU_DETECT
#undef vp8_recon_recon2
#define vp8_recon_recon2 vp8_recon2b_sse2
#undef vp8_recon_recon4
#define vp8_recon_recon4 vp8_recon4b_sse2
#undef vp8_recon_copy16x16
#define vp8_recon_copy16x16 vp8_copy_mem16x16_sse2
#undef vp8_recon_build_intra_predictors_mbuv
#define vp8_recon_build_intra_predictors_mbuv vp8_build_intra_predictors_mbuv_sse2
#undef vp8_recon_build_intra_predictors_mbuv_s
#define vp8_recon_build_intra_predictors_mbuv_s vp8_build_intra_predictors_mbuv_s_sse2
#endif
#endif
#if HAVE_SSSE3
extern prototype_build_intra_predictors(vp8_build_intra_predictors_mbuv_ssse3);
extern prototype_build_intra_predictors(vp8_build_intra_predictors_mbuv_s_ssse3);
#if !CONFIG_RUNTIME_CPU_DETECT
#undef vp8_recon_build_intra_predictors_mbuv
#define vp8_recon_build_intra_predictors_mbuv vp8_build_intra_predictors_mbuv_ssse3
#undef vp8_recon_build_intra_predictors_mbuv_s
#define vp8_recon_build_intra_predictors_mbuv_s vp8_build_intra_predictors_mbuv_s_ssse3
#endif
#endif
#endif

View File

@ -14,7 +14,6 @@
#include "vp8/common/g_common.h"
#include "vp8/common/subpixel.h"
#include "vp8/common/loopfilter.h"
#include "vp8/common/recon.h"
#include "vp8/common/idct.h"
#include "vp8/common/pragmas.h"
#include "vp8/common/onyxc_int.h"
@ -41,10 +40,6 @@ void vp8_arch_x86_common_init(VP8_COMMON *ctx) {
// rtcd->idct.iwalsh16 = vp8_short_inv_walsh4x4_mmx;
// rtcd->idct.iwalsh1 = vp8_short_inv_walsh4x4_1_mmx;
rtcd->recon.recon = vp8_recon_b_mmx;
rtcd->recon.copy8x8 = vp8_copy_mem8x8_mmx;
rtcd->recon.copy8x4 = vp8_copy_mem8x4_mmx;
/* Disabled due to unsupported enhanced interpolation/high_prec mv
rtcd->subpix.sixtap16x16 = vp8_sixtap_predict16x16_mmx;
rtcd->subpix.sixtap8x8 = vp8_sixtap_predict8x8_mmx;
@ -77,15 +72,7 @@ void vp8_arch_x86_common_init(VP8_COMMON *ctx) {
#if HAVE_SSE2
if (flags & HAS_SSE2) {
rtcd->recon.recon2 = vp8_recon2b_sse2;
rtcd->recon.recon4 = vp8_recon4b_sse2;
/* these are disable because of unsupported diagonal pred modes
rtcd->recon.build_intra_predictors_mbuv =
vp8_build_intra_predictors_mbuv_sse2;
rtcd->recon.build_intra_predictors_mbuv_s =
vp8_build_intra_predictors_mbuv_s_sse2;
*/
// rtcd->idct.iwalsh16 = vp8_short_inv_walsh4x4_sse2;

View File

@ -13,7 +13,6 @@
#include "vp8/common/header.h"
#include "vp8/common/reconintra.h"
#include "vp8/common/reconintra4x4.h"
#include "vp8/common/recon.h"
#include "vp8/common/reconinter.h"
#include "dequantize.h"
#include "detokenize.h"
@ -35,6 +34,7 @@
#include "vp8/common/seg_common.h"
#include "vp8/common/entropy.h"
#include "vpx_rtcd.h"
#include <assert.h>
#include <stdio.h>
@ -173,14 +173,12 @@ static void skip_recon_mb(VP8D_COMP *pbi, MACROBLOCKD *xd) {
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
#if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) {
RECON_INVOKE(&pbi->common.rtcd.recon, build_intra_predictors_sbuv_s)(xd);
RECON_INVOKE(&pbi->common.rtcd.recon,
build_intra_predictors_sby_s)(xd);
vp8_build_intra_predictors_sbuv_s(xd);
vp8_build_intra_predictors_sby_s(xd);
} else {
#endif
RECON_INVOKE(&pbi->common.rtcd.recon, build_intra_predictors_mbuv_s)(xd);
RECON_INVOKE(&pbi->common.rtcd.recon,
build_intra_predictors_mby_s)(xd);
vp8_build_intra_predictors_mbuv_s(xd);
vp8_build_intra_predictors_mby_s(xd);
#if CONFIG_SUPERBLOCKS
}
#endif
@ -331,15 +329,14 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
#if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) {
RECON_INVOKE(&pbi->common.rtcd.recon, build_intra_predictors_sby_s)(xd);
RECON_INVOKE(&pbi->common.rtcd.recon, build_intra_predictors_sbuv_s)(xd);
vp8_build_intra_predictors_sby_s(xd);
vp8_build_intra_predictors_sbuv_s(xd);
} else
#endif
if (mode != I8X8_PRED) {
RECON_INVOKE(&pbi->common.rtcd.recon, build_intra_predictors_mbuv)(xd);
vp8_build_intra_predictors_mbuv(xd);
if (mode != B_PRED) {
RECON_INVOKE(&pbi->common.rtcd.recon,
build_intra_predictors_mby)(xd);
vp8_build_intra_predictors_mby(xd);
}
#if 0
// Intra-modes requiring recon data from top-right
@ -379,8 +376,7 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
b = &xd->block[ib];
i8x8mode = b->bmi.as_mode.first;
RECON_INVOKE(RTCD_VTABLE(recon), intra8x8_predict)(b, i8x8mode,
b->predictor);
vp8_intra8x8_predict(b, i8x8mode, b->predictor);
if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) {
#if CONFIG_HYBRIDTRANSFORM8X8
@ -400,15 +396,13 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
}
b = &xd->block[16 + i];
RECON_INVOKE(RTCD_VTABLE(recon), intra_uv4x4_predict)(b, i8x8mode,
b->predictor);
vp8_intra_uv4x4_predict(b, i8x8mode, b->predictor);
DEQUANT_INVOKE(&pbi->dequant, idct_add)(b->qcoeff, b->dequant,
b->predictor,
*(b->base_dst) + b->dst, 8,
b->dst_stride);
b = &xd->block[20 + i];
RECON_INVOKE(RTCD_VTABLE(recon), intra_uv4x4_predict)(b, i8x8mode,
b->predictor);
vp8_intra_uv4x4_predict(b, i8x8mode, b->predictor);
DEQUANT_INVOKE(&pbi->dequant, idct_add)(b->qcoeff, b->dequant,
b->predictor,
*(b->base_dst) + b->dst, 8,
@ -423,12 +417,10 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
if (b_mode2 == (B_PREDICTION_MODE)(B_DC_PRED - 1)) {
#endif
RECON_INVOKE(RTCD_VTABLE(recon), intra4x4_predict)
(b, b_mode, b->predictor);
vp8_intra4x4_predict(b, b_mode, b->predictor);
#if CONFIG_COMP_INTRA_PRED
} else {
RECON_INVOKE(RTCD_VTABLE(recon), comp_intra4x4_predict)
(b, b_mode, b_mode2, b->predictor);
vp8_comp_intra4x4_predict(b, b_mode, b_mode2, b->predictor);
}
#endif

View File

@ -10,7 +10,6 @@
#include "vpx_ports/config.h"
#include "vp8/common/recon.h"
#include "vp8/common/reconintra.h"
#include "vpx_mem/vpx_mem.h"
#include "onyxd_int.h"

View File

@ -42,6 +42,7 @@ extern prototype_fdct(vp8_fdct_short16x16);
extern prototype_fdct(vp8_fdct_short8x8);
#ifndef vp8_fhaar_short2x2
#define vp8_fdct_haar_short2x2 vp8_fhaar_short2x2
#define vp8_fhaar_short2x2 vp8_short_fhaar2x2_c
#endif
extern prototype_fdct(vp8_fhaar_short2x2);

View File

@ -26,6 +26,7 @@
#include "vp8/common/findnearmv.h"
#include "vp8/common/reconintra.h"
#include "vp8/common/seg_common.h"
#include "vpx_rtcd.h"
#include <stdio.h>
#include <math.h>
#include <limits.h>
@ -1875,8 +1876,8 @@ void vp8cx_encode_intra_super_block(VP8_COMP *cpi,
vp8_update_zbin_extra(cpi, x);
}
RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_sby_s)(&x->e_mbd);
RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_sbuv_s)(&x->e_mbd);
vp8_build_intra_predictors_sby_s(&x->e_mbd);
vp8_build_intra_predictors_sbuv_s(&x->e_mbd);
assert(x->e_mbd.mode_info_context->mbmi.txfm_size == TX_8X8);
for (n = 0; n < 4; n++)
@ -1905,9 +1906,8 @@ void vp8cx_encode_intra_super_block(VP8_COMP *cpi,
vp8_optimize_mbuv_8x8(x, rtcd);
}
vp8_inverse_transform_mb_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
vp8_recon_mby_s_c(IF_RTCD(&rtcd->common->recon), &x->e_mbd,
dst + x_idx * 16 + y_idx * 16 * dst_y_stride);
vp8_recon_mbuv_s_c(IF_RTCD(&rtcd->common->recon), &x->e_mbd,
vp8_recon_mby_s_c(&x->e_mbd, dst + x_idx * 16 + y_idx * 16 * dst_y_stride);
vp8_recon_mbuv_s_c(&x->e_mbd,
udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride);
@ -2241,8 +2241,8 @@ void vp8cx_encode_inter_superblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
set_pred_flag(xd, PRED_REF, ref_pred_flag);
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_sby_s)(&x->e_mbd);
RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_sbuv_s)(&x->e_mbd);
vp8_build_intra_predictors_sby_s(&x->e_mbd);
vp8_build_intra_predictors_sbuv_s(&x->e_mbd);
} else {
int ref_fb_idx;
@ -2304,9 +2304,9 @@ void vp8cx_encode_inter_superblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
vp8_optimize_mbuv_8x8(x, rtcd);
}
vp8_inverse_transform_mb_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
vp8_recon_mby_s_c(IF_RTCD(&rtcd->common->recon), &x->e_mbd,
vp8_recon_mby_s_c( &x->e_mbd,
dst + x_idx * 16 + y_idx * 16 * dst_y_stride);
vp8_recon_mbuv_s_c(IF_RTCD(&rtcd->common->recon), &x->e_mbd,
vp8_recon_mbuv_s_c(&x->e_mbd,
udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride);

View File

@ -15,10 +15,10 @@
#include "vp8/common/reconintra4x4.h"
#include "encodemb.h"
#include "vp8/common/invtrans.h"
#include "vp8/common/recon.h"
#include "dct.h"
#include "vp8/common/g_common.h"
#include "encodeintra.h"
#include "vpx_rtcd.h"
#if CONFIG_RUNTIME_CPU_DETECT
#define IF_RTCD(x) (x)
@ -61,12 +61,11 @@ void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
#if CONFIG_COMP_INTRA_PRED
if (b->bmi.as_mode.second == (B_PREDICTION_MODE)(B_DC_PRED - 1)) {
#endif
RECON_INVOKE(&rtcd->common->recon, intra4x4_predict)
(b, b->bmi.as_mode.first, b->predictor);
vp8_intra4x4_predict(b, b->bmi.as_mode.first, b->predictor);
#if CONFIG_COMP_INTRA_PRED
} else {
RECON_INVOKE(&rtcd->common->recon, comp_intra4x4_predict)
(b, b->bmi.as_mode.first, b->bmi.as_mode.second, b->predictor);
vp8_comp_intra4x4_predict(b, b->bmi.as_mode.first, b->bmi.as_mode.second,
b->predictor);
}
#endif
@ -86,7 +85,7 @@ void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 32) ;
}
RECON_INVOKE(&rtcd->common->recon, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
vp8_recon_b(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
}
void vp8_encode_intra4x4mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *mb) {
@ -114,10 +113,10 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
#if CONFIG_COMP_INTRA_PRED
if (xd->mode_info_context->mbmi.second_mode == (MB_PREDICTION_MODE)(DC_PRED - 1))
#endif
RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_mby)(xd);
vp8_build_intra_predictors_mby(xd);
#if CONFIG_COMP_INTRA_PRED
else
RECON_INVOKE(&rtcd->common->recon, build_comp_intra_predictors_mby)(xd);
vp8_build_comp_intra_predictors_mby(xd);
#endif
ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src),
@ -158,8 +157,7 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
vp8_inverse_transform_mby_4x4(IF_RTCD(&rtcd->common->idct), xd);
}
RECON_INVOKE(&rtcd->common->recon, recon_mby)(IF_RTCD(&rtcd->common->recon),
xd);
vp8_recon_mby(xd);
}
void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
@ -169,10 +167,10 @@ void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
#if CONFIG_COMP_INTRA_PRED
if (xd->mode_info_context->mbmi.second_uv_mode == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
#endif
RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_mbuv)(xd);
vp8_build_intra_predictors_mbuv(xd);
#if CONFIG_COMP_INTRA_PRED
} else {
RECON_INVOKE(&rtcd->common->recon, build_comp_intra_predictors_mbuv)(xd);
vp8_build_comp_intra_predictors_mbuv(xd);
}
#endif
@ -193,7 +191,7 @@ void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
vp8_inverse_transform_mbuv_8x8(IF_RTCD(&rtcd->common->idct), xd);
}
vp8_recon_intra_mbuv(IF_RTCD(&rtcd->common->recon), xd);
vp8_recon_intra_mbuv(xd);
}
void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
@ -207,12 +205,11 @@ void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
#if CONFIG_COMP_INTRA_PRED
if (b->bmi.as_mode.second == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
#endif
RECON_INVOKE(&rtcd->common->recon, intra8x8_predict)
(b, b->bmi.as_mode.first, b->predictor);
vp8_intra8x8_predict(b, b->bmi.as_mode.first, b->predictor);
#if CONFIG_COMP_INTRA_PRED
} else {
RECON_INVOKE(&rtcd->common->recon, comp_intra8x8_predict)
(b, b->bmi.as_mode.first, b->bmi.as_mode.second, b->predictor);
vp8_comp_intra8x8_predict(b, b->bmi.as_mode.first, b->bmi.as_mode.second,
b->predictor);
}
#endif
@ -271,12 +268,10 @@ void vp8_encode_intra_uv4x4(const VP8_ENCODER_RTCD *rtcd,
#if CONFIG_COMP_INTRA_PRED
if (second == -1) {
#endif
RECON_INVOKE(&rtcd->common->recon, intra_uv4x4_predict)
(b, mode, b->predictor);
vp8_intra_uv4x4_predict(b, mode, b->predictor);
#if CONFIG_COMP_INTRA_PRED
} else {
RECON_INVOKE(&rtcd->common->recon, comp_intra_uv4x4_predict)
(b, mode, second, b->predictor);
vp8_comp_intra_uv4x4_predict(b, mode, second, b->predictor);
}
#endif
@ -286,8 +281,7 @@ void vp8_encode_intra_uv4x4(const VP8_ENCODER_RTCD *rtcd,
x->quantize_b_4x4(be, b);
vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 16);
RECON_INVOKE(&rtcd->common->recon, recon_uv)(b->predictor,
b->diff, *(b->base_dst) + b->dst, b->dst_stride);
vp8_recon_uv_b_c(b->predictor,b->diff, *(b->base_dst) + b->dst, b->dst_stride);
}
void vp8_encode_intra8x8mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {

View File

@ -14,7 +14,6 @@
#include "quantize.h"
#include "tokenize.h"
#include "vp8/common/invtrans.h"
#include "vp8/common/recon.h"
#include "vp8/common/reconintra.h"
#include "dct.h"
#include "vpx_mem/vpx_mem.h"
@ -914,8 +913,7 @@ void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
vp8_inverse_transform_mb_4x4(IF_RTCD(&rtcd->common->idct), xd);
}
RECON_INVOKE(&rtcd->common->recon, recon_mb)(IF_RTCD(&rtcd->common->recon),
xd);
vp8_recon_mb(xd);
}
/* this function is used by first pass only */
@ -937,6 +935,5 @@ void vp8_encode_inter16x16y(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
vp8_quantize_mby_4x4(x);
vp8_inverse_transform_mby_4x4(IF_RTCD(&rtcd->common->idct), xd);
RECON_INVOKE(&rtcd->common->recon, recon_mby)(IF_RTCD(&rtcd->common->recon),
xd);
vp8_recon_mby(xd);
}

View File

@ -118,13 +118,18 @@ void vp8_cmachine_specific_config(VP8_COMP *cpi) {
cpi->rtcd.search.refining_search = vp8_refining_search_sad;
cpi->rtcd.search.diamond_search = vp8_diamond_search_sad;
cpi->rtcd.temporal.apply = vp8_temporal_filter_apply_c;
cpi->rtcd.variance.satd16x16 = vp8_satd16x16_c;
cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_c;
cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_c;
cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_c;
cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_c;
cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_c;
#if CONFIG_INTERNAL_STATS
cpi->rtcd.variance.ssimpf_8x8 = vp8_ssim_parms_8x8_c;
cpi->rtcd.variance.ssimpf_16x16 = vp8_ssim_parms_16x16_c;
#endif
#endif
cpi->rtcd.variance.satd16x16 = vp8_satd16x16_c;
vp8_yv12_copy_partial_frame_ptr = vp8_yv12_copy_partial_frame;
#if ARCH_X86 || ARCH_X86_64
@ -135,10 +140,5 @@ void vp8_cmachine_specific_config(VP8_COMP *cpi) {
vp8_arch_arm_encoder_init(cpi);
#endif
cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_c;
cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_c;
cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_c;
cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_c;
cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_c;
}

View File

@ -220,7 +220,7 @@ static int find_best_16x16_intra
unsigned int err;
xd->mode_info_context->mbmi.mode = mode;
RECON_INVOKE(&cpi->rtcd.common->recon, build_intra_predictors_mby)(xd);
vp8_build_intra_predictors_mby(xd);
// VARIANCE_INVOKE(&cpi->rtcd.variance, satd16x16)
err = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16)
(xd->predictor, 16,

View File

@ -4412,8 +4412,9 @@ int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned lon
{
double frame_psnr2, frame_ssim2 = 0;
double weight = 0;
#if CONFIG_POSTPROC
vp8_deblock(cm->frame_to_show, &cm->post_proc_buffer, cm->filter_level * 10 / 6, 1, 0, IF_RTCD(&cm->rtcd.postproc));
#endif
vp8_clear_system_state();
ye = calc_plane_error(orig->y_buffer, orig->y_stride,

View File

@ -1188,12 +1188,10 @@ static int64_t rd_pick_intra4x4block(VP8_COMP *cpi, MACROBLOCK *x, BLOCK *be,
#if CONFIG_COMP_INTRA_PRED
if (mode2 == (B_PREDICTION_MODE)(B_DC_PRED - 1)) {
#endif
RECON_INVOKE(&cpi->rtcd.common->recon, intra4x4_predict)
(b, mode, b->predictor);
vp8_intra4x4_predict(b, mode, b->predictor);
#if CONFIG_COMP_INTRA_PRED
} else {
RECON_INVOKE(&cpi->rtcd.common->recon, comp_intra4x4_predict)
(b, mode, mode2, b->predictor);
vp8_comp_intra4x4_predict(b, mode, mode2, b->predictor);
rate += bmode_costs[mode2];
}
#endif
@ -1263,7 +1261,7 @@ static int64_t rd_pick_intra4x4block(VP8_COMP *cpi, MACROBLOCK *x, BLOCK *be,
IDCT_INVOKE(IF_RTCD(&cpi->rtcd.common->idct), idct16)(best_dqcoeff, b->diff, 32);
#endif
RECON_INVOKE(IF_RTCD(&cpi->rtcd.common->recon), recon)(best_predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
vp8_recon_b(best_predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
return best_rd;
}
@ -1369,8 +1367,7 @@ static int64_t rd_pick_intra_sby_mode(VP8_COMP *cpi,
/* Y Search for 32x32 intra prediction mode */
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
x->e_mbd.mode_info_context->mbmi.mode = mode;
RECON_INVOKE(&cpi->common.rtcd.recon,
build_intra_predictors_sby_s)(&x->e_mbd);
vp8_build_intra_predictors_sby_s(&x->e_mbd);
super_block_yrd_8x8(x, &this_rate_tokenonly,
&this_distortion, IF_RTCD(&cpi->rtcd), &s);
@ -1436,13 +1433,11 @@ static int64_t rd_pick_intra16x16mby_mode(VP8_COMP *cpi,
mbmi->second_mode = mode2;
if (mode2 == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
#endif
RECON_INVOKE(&cpi->common.rtcd.recon, build_intra_predictors_mby)
(&x->e_mbd);
vp8_build_intra_predictors_mby(&x->e_mbd);
#if CONFIG_COMP_INTRA_PRED
} else {
continue; // i.e. disable for now
RECON_INVOKE(&cpi->common.rtcd.recon, build_comp_intra_predictors_mby)
(&x->e_mbd);
vp8_build_comp_intra_predictors_mby(&x->e_mbd);
}
#endif
@ -1548,13 +1543,11 @@ static int64_t rd_pick_intra8x8block(VP8_COMP *cpi, MACROBLOCK *x, int ib,
#if CONFIG_COMP_INTRA_PRED
if (mode2 == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
#endif
RECON_INVOKE(&cpi->rtcd.common->recon, intra8x8_predict)
(b, mode, b->predictor);
vp8_intra8x8_predict(b, mode, b->predictor);
#if CONFIG_COMP_INTRA_PRED
} else {
continue; // i.e. disable for now
RECON_INVOKE(&cpi->rtcd.common->recon, comp_intra8x8_predict)
(b, mode, mode2, b->predictor);
vp8_comp_intra8x8_predict(b, mode, mode2, b->predictor);
}
#endif
@ -1891,13 +1884,11 @@ static void rd_pick_intra_mbuv_mode(VP8_COMP *cpi,
mbmi->second_uv_mode = mode2;
if (mode2 == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
#endif
RECON_INVOKE(&cpi->rtcd.common->recon, build_intra_predictors_mbuv)
(&x->e_mbd);
vp8_build_intra_predictors_mbuv(&x->e_mbd);
#if CONFIG_COMP_INTRA_PRED
} else {
continue;
RECON_INVOKE(&cpi->rtcd.common->recon, build_comp_intra_predictors_mbuv)
(&x->e_mbd);
vp8_build_comp_intra_predictors_mbuv(&x->e_mbd);
}
#endif
@ -1959,8 +1950,7 @@ static void rd_pick_intra_mbuv_mode_8x8(VP8_COMP *cpi,
int64_t this_rd;
mbmi->uv_mode = mode;
RECON_INVOKE(&cpi->rtcd.common->recon, build_intra_predictors_mbuv)
(&x->e_mbd);
vp8_build_intra_predictors_mbuv(&x->e_mbd);
ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), submbuv)(x->src_diff,
x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor,
x->src.uv_stride);
@ -2053,8 +2043,7 @@ static int64_t rd_pick_intra_sbuv_mode(VP8_COMP *cpi,
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
x->e_mbd.mode_info_context->mbmi.uv_mode = mode;
RECON_INVOKE(&cpi->rtcd.common->recon,
build_intra_predictors_sbuv_s)(&x->e_mbd);
vp8_build_intra_predictors_sbuv_s(&x->e_mbd);
super_block_uvrd_8x8(x, &this_rate_tokenonly,
&this_distortion, IF_RTCD(&cpi->rtcd), &s);
@ -3555,8 +3544,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
case D63_PRED:
mbmi->ref_frame = INTRA_FRAME;
// FIXME compound intra prediction
RECON_INVOKE(&cpi->common.rtcd.recon, build_intra_predictors_mby)
(&x->e_mbd);
vp8_build_intra_predictors_mby(&x->e_mbd);
macro_block_yrd(cpi, x, &rate_y, &distortion, &skippable, txfm_cache);
#if CONFIG_HYBRIDTRANSFORM16X16
rd_txtype = x->e_mbd.block[0].bmi.as_mode.tx_type;

View File

@ -79,8 +79,8 @@ static void vp8_temporal_filter_predictors_mb_c
(omv_col & 15), (omv_row & 15), &pred[320], 8);
}
else {
RECON_INVOKE(&xd->rtcd->recon, copy8x8)(uptr, stride, &pred[256], 8);
RECON_INVOKE(&xd->rtcd->recon, copy8x8)(vptr, stride, &pred[320], 8);
vp8_copy_mem8x8(uptr, stride, &pred[256], 8);
vp8_copy_mem8x8(vptr, stride, &pred[320], 8);
}
}
void vp8_temporal_filter_apply_c

View File

@ -115,13 +115,6 @@ void vp8_arch_x86_encoder_init(VP8_COMP *cpi) {
cpi->rtcd.variance.mse16x16 = vp8_mse16x16_mmx;
cpi->rtcd.variance.getmbss = vp8_get_mb_ss_mmx;
cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_mmx;
cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_mmx;
cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_mmx;
cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_mmx;
cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_c;
cpi->rtcd.encodemb.berr = vp8_block_error_mmx;
cpi->rtcd.encodemb.mberr = vp8_mbblock_error_mmx;
cpi->rtcd.encodemb.mbuverr = vp8_mbuverror_mmx;
@ -159,13 +152,6 @@ void vp8_arch_x86_encoder_init(VP8_COMP *cpi) {
cpi->rtcd.variance.mse16x16 = vp8_mse16x16_wmt;
cpi->rtcd.variance.getmbss = vp8_get_mb_ss_sse2;
cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_sse2;
cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_sse2;
cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_sse2;
cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_sse2;
cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_sse2;
cpi->rtcd.encodemb.berr = vp8_block_error_xmm;
cpi->rtcd.encodemb.mberr = vp8_mbblock_error_xmm;
cpi->rtcd.encodemb.mbuverr = vp8_mbuverror_xmm;

View File

@ -79,8 +79,10 @@ VP8_CX_SRCS-$(CONFIG_INTERNAL_STATS) += encoder/ssim.c
VP8_CX_SRCS-yes += encoder/tokenize.c
VP8_CX_SRCS-yes += encoder/treewriter.c
VP8_CX_SRCS-yes += encoder/variance_c.c
ifeq ($(CONFIG_POSTPROC),yes)
VP8_CX_SRCS-$(CONFIG_INTERNAL_STATS) += common/postproc.h
VP8_CX_SRCS-$(CONFIG_INTERNAL_STATS) += common/postproc.c
endif
VP8_CX_SRCS-yes += encoder/temporal_filter.c
VP8_CX_SRCS-yes += encoder/temporal_filter.h
VP8_CX_SRCS-yes += encoder/mbgraph.c