Change common vp8_ public symbol prefixes to vp9_.
Change-Id: Ic5a5f60e1ff9d9ccae4174160d36529466eeb509
This commit is contained in:
parent
5ddcbeeb7d
commit
9bc5f3e3af
@ -48,7 +48,7 @@ void vp9_update_mode_info_in_image(VP8_COMMON *cpi, MODE_INFO *mi) {
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_de_alloc_frame_buffers(VP8_COMMON *oci) {
|
||||
void vp9_de_alloc_frame_buffers(VP8_COMMON *oci) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NUM_YV12_BUFFERS; i++)
|
||||
@ -67,10 +67,10 @@ void vp8_de_alloc_frame_buffers(VP8_COMMON *oci) {
|
||||
|
||||
}
|
||||
|
||||
int vp8_alloc_frame_buffers(VP8_COMMON *oci, int width, int height) {
|
||||
int vp9_alloc_frame_buffers(VP8_COMMON *oci, int width, int height) {
|
||||
int i;
|
||||
|
||||
vp8_de_alloc_frame_buffers(oci);
|
||||
vp9_de_alloc_frame_buffers(oci);
|
||||
|
||||
/* our internal buffers are always multiples of 16 */
|
||||
if ((width & 0xf) != 0)
|
||||
@ -84,7 +84,7 @@ int vp8_alloc_frame_buffers(VP8_COMMON *oci, int width, int height) {
|
||||
oci->fb_idx_ref_cnt[i] = 0;
|
||||
oci->yv12_fb[i].flags = 0;
|
||||
if (vp8_yv12_alloc_frame_buffer(&oci->yv12_fb[i], width, height, VP8BORDERINPIXELS) < 0) {
|
||||
vp8_de_alloc_frame_buffers(oci);
|
||||
vp9_de_alloc_frame_buffers(oci);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
@ -100,12 +100,12 @@ int vp8_alloc_frame_buffers(VP8_COMMON *oci, int width, int height) {
|
||||
oci->fb_idx_ref_cnt[3] = 1;
|
||||
|
||||
if (vp8_yv12_alloc_frame_buffer(&oci->temp_scale_frame, width, 16, VP8BORDERINPIXELS) < 0) {
|
||||
vp8_de_alloc_frame_buffers(oci);
|
||||
vp9_de_alloc_frame_buffers(oci);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (vp8_yv12_alloc_frame_buffer(&oci->post_proc_buffer, width, height, VP8BORDERINPIXELS) < 0) {
|
||||
vp8_de_alloc_frame_buffers(oci);
|
||||
vp9_de_alloc_frame_buffers(oci);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -116,7 +116,7 @@ int vp8_alloc_frame_buffers(VP8_COMMON *oci, int width, int height) {
|
||||
oci->mip = vpx_calloc((oci->mb_cols + 1) * (oci->mb_rows + 1), sizeof(MODE_INFO));
|
||||
|
||||
if (!oci->mip) {
|
||||
vp8_de_alloc_frame_buffers(oci);
|
||||
vp9_de_alloc_frame_buffers(oci);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -127,7 +127,7 @@ int vp8_alloc_frame_buffers(VP8_COMMON *oci, int width, int height) {
|
||||
oci->prev_mip = vpx_calloc((oci->mb_cols + 1) * (oci->mb_rows + 1), sizeof(MODE_INFO));
|
||||
|
||||
if (!oci->prev_mip) {
|
||||
vp8_de_alloc_frame_buffers(oci);
|
||||
vp9_de_alloc_frame_buffers(oci);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -136,7 +136,7 @@ int vp8_alloc_frame_buffers(VP8_COMMON *oci, int width, int height) {
|
||||
oci->above_context = vpx_calloc(sizeof(ENTROPY_CONTEXT_PLANES) * oci->mb_cols, 1);
|
||||
|
||||
if (!oci->above_context) {
|
||||
vp8_de_alloc_frame_buffers(oci);
|
||||
vp9_de_alloc_frame_buffers(oci);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -145,7 +145,7 @@ int vp8_alloc_frame_buffers(VP8_COMMON *oci, int width, int height) {
|
||||
|
||||
return 0;
|
||||
}
|
||||
void vp8_setup_version(VP8_COMMON *cm) {
|
||||
void vp9_setup_version(VP8_COMMON *cm) {
|
||||
if (cm->version & 0x4) {
|
||||
if (!CONFIG_EXPERIMENTAL)
|
||||
vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
|
||||
@ -183,12 +183,12 @@ void vp8_setup_version(VP8_COMMON *cm) {
|
||||
// break;
|
||||
}
|
||||
}
|
||||
void vp8_create_common(VP8_COMMON *oci) {
|
||||
vp8_machine_specific_config(oci);
|
||||
void vp9_create_common(VP8_COMMON *oci) {
|
||||
vp9_machine_specific_config(oci);
|
||||
|
||||
vp8_init_mbmode_probs(oci);
|
||||
vp9_init_mbmode_probs(oci);
|
||||
|
||||
vp8_default_bmode_probs(oci->fc.bmode_prob);
|
||||
vp9_default_bmode_probs(oci->fc.bmode_prob);
|
||||
|
||||
oci->txfm_mode = ONLY_4X4;
|
||||
oci->mb_no_coeff_skip = 1;
|
||||
@ -209,14 +209,14 @@ void vp8_create_common(VP8_COMMON *oci) {
|
||||
oci->kf_ymode_probs_update = 0;
|
||||
}
|
||||
|
||||
void vp8_remove_common(VP8_COMMON *oci) {
|
||||
vp8_de_alloc_frame_buffers(oci);
|
||||
void vp9_remove_common(VP8_COMMON *oci) {
|
||||
vp9_de_alloc_frame_buffers(oci);
|
||||
}
|
||||
|
||||
void vp8_initialize_common() {
|
||||
vp8_coef_tree_initialize();
|
||||
void vp9_initialize_common() {
|
||||
vp9_coef_tree_initialize();
|
||||
|
||||
vp8_entropy_mode_init();
|
||||
vp9_entropy_mode_init();
|
||||
|
||||
vp8_entropy_mv_init();
|
||||
vp9_entropy_mv_init();
|
||||
}
|
||||
|
@ -14,11 +14,11 @@
|
||||
|
||||
#include "onyxc_int.h"
|
||||
|
||||
void vp8_create_common(VP8_COMMON *oci);
|
||||
void vp8_remove_common(VP8_COMMON *oci);
|
||||
void vp8_de_alloc_frame_buffers(VP8_COMMON *oci);
|
||||
int vp8_alloc_frame_buffers(VP8_COMMON *oci, int width, int height);
|
||||
void vp8_setup_version(VP8_COMMON *oci);
|
||||
void vp9_create_common(VP8_COMMON *oci);
|
||||
void vp9_remove_common(VP8_COMMON *oci);
|
||||
void vp9_de_alloc_frame_buffers(VP8_COMMON *oci);
|
||||
int vp9_alloc_frame_buffers(VP8_COMMON *oci, int width, int height);
|
||||
void vp9_setup_version(VP8_COMMON *oci);
|
||||
|
||||
void vp9_update_mode_info_border(VP8_COMMON *cpi, MODE_INFO *mi_base);
|
||||
void vp9_update_mode_info_in_image(VP8_COMMON *cpi, MODE_INFO *mi);
|
||||
|
@ -34,58 +34,58 @@ void vp8_arch_arm_common_init(VP8_COMMON *ctx) {
|
||||
// The commented functions need to be re-written for vpx.
|
||||
#if HAVE_ARMV6
|
||||
if (flags & HAS_MEDIA) {
|
||||
rtcd->subpix.sixtap16x16 = vp8_sixtap_predict16x16_armv6;
|
||||
rtcd->subpix.sixtap8x8 = vp8_sixtap_predict8x8_armv6;
|
||||
rtcd->subpix.sixtap8x4 = vp8_sixtap_predict8x4_armv6;
|
||||
rtcd->subpix.sixtap4x4 = vp8_sixtap_predict_armv6;
|
||||
rtcd->subpix.sixtap16x16 = vp9_sixtap_predict16x16_armv6;
|
||||
rtcd->subpix.sixtap8x8 = vp9_sixtap_predict8x8_armv6;
|
||||
rtcd->subpix.sixtap8x4 = vp9_sixtap_predict8x4_armv6;
|
||||
rtcd->subpix.sixtap4x4 = vp9_sixtap_predict_armv6;
|
||||
|
||||
rtcd->subpix.bilinear16x16 = vp8_bilinear_predict16x16_armv6;
|
||||
rtcd->subpix.bilinear8x8 = vp8_bilinear_predict8x8_armv6;
|
||||
rtcd->subpix.bilinear8x4 = vp8_bilinear_predict8x4_armv6;
|
||||
rtcd->subpix.bilinear4x4 = vp8_bilinear_predict4x4_armv6;
|
||||
rtcd->subpix.bilinear16x16 = vp9_bilinear_predict16x16_armv6;
|
||||
rtcd->subpix.bilinear8x8 = vp9_bilinear_predict8x8_armv6;
|
||||
rtcd->subpix.bilinear8x4 = vp9_bilinear_predict8x4_armv6;
|
||||
rtcd->subpix.bilinear4x4 = vp9_bilinear_predict4x4_armv6;
|
||||
|
||||
// rtcd->idct.idct1 = vp8_short_idct4x4llm_1_v6;
|
||||
// rtcd->idct.idct16 = vp8_short_idct4x4llm_v6_dual;
|
||||
// rtcd->idct.iwalsh1 = vp8_short_inv_walsh4x4_1_v6;
|
||||
// rtcd->idct.iwalsh16 = vp8_short_inv_walsh4x4_v6;
|
||||
// rtcd->idct.idct1 = vp9_short_idct4x4llm_1_v6;
|
||||
// rtcd->idct.idct16 = vp9_short_idct4x4llm_v6_dual;
|
||||
// rtcd->idct.iwalsh1 = vp9_short_inv_walsh4x4_1_v6;
|
||||
// rtcd->idct.iwalsh16 = vp9_short_inv_walsh4x4_v6;
|
||||
|
||||
rtcd->recon.copy16x16 = vp8_copy_mem16x16_v6;
|
||||
rtcd->recon.copy8x8 = vp8_copy_mem8x8_v6;
|
||||
rtcd->recon.copy8x4 = vp8_copy_mem8x4_v6;
|
||||
rtcd->recon.recon = vp8_recon_b_armv6;
|
||||
rtcd->recon.recon2 = vp8_recon2b_armv6;
|
||||
rtcd->recon.recon4 = vp8_recon4b_armv6;
|
||||
rtcd->recon.copy16x16 = vp9_copy_mem16x16_v6;
|
||||
rtcd->recon.copy8x8 = vp9_copy_mem8x8_v6;
|
||||
rtcd->recon.copy8x4 = vp9_copy_mem8x4_v6;
|
||||
rtcd->recon.recon = vp9_recon_b_armv6;
|
||||
rtcd->recon.recon2 = vp9_recon2b_armv6;
|
||||
rtcd->recon.recon4 = vp9_recon4b_armv6;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if HAVE_ARMV7
|
||||
if (flags & HAS_NEON) {
|
||||
rtcd->subpix.sixtap16x16 = vp8_sixtap_predict16x16_neon;
|
||||
rtcd->subpix.sixtap8x8 = vp8_sixtap_predict8x8_neon;
|
||||
rtcd->subpix.sixtap8x4 = vp8_sixtap_predict8x4_neon;
|
||||
rtcd->subpix.sixtap4x4 = vp8_sixtap_predict_neon;
|
||||
rtcd->subpix.sixtap16x16 = vp9_sixtap_predict16x16_neon;
|
||||
rtcd->subpix.sixtap8x8 = vp9_sixtap_predict8x8_neon;
|
||||
rtcd->subpix.sixtap8x4 = vp9_sixtap_predict8x4_neon;
|
||||
rtcd->subpix.sixtap4x4 = vp9_sixtap_predict_neon;
|
||||
|
||||
rtcd->subpix.bilinear16x16 = vp8_bilinear_predict16x16_neon;
|
||||
rtcd->subpix.bilinear8x8 = vp8_bilinear_predict8x8_neon;
|
||||
rtcd->subpix.bilinear8x4 = vp8_bilinear_predict8x4_neon;
|
||||
rtcd->subpix.bilinear4x4 = vp8_bilinear_predict4x4_neon;
|
||||
rtcd->subpix.bilinear16x16 = vp9_bilinear_predict16x16_neon;
|
||||
rtcd->subpix.bilinear8x8 = vp9_bilinear_predict8x8_neon;
|
||||
rtcd->subpix.bilinear8x4 = vp9_bilinear_predict8x4_neon;
|
||||
rtcd->subpix.bilinear4x4 = vp9_bilinear_predict4x4_neon;
|
||||
|
||||
// rtcd->idct.idct1 = vp8_short_idct4x4llm_1_neon;
|
||||
// rtcd->idct.idct16 = vp8_short_idct4x4llm_neon;
|
||||
// rtcd->idct.iwalsh1 = vp8_short_inv_walsh4x4_1_neon;
|
||||
// rtcd->idct.iwalsh16 = vp8_short_inv_walsh4x4_neon;
|
||||
// rtcd->idct.idct1 = vp9_short_idct4x4llm_1_neon;
|
||||
// rtcd->idct.idct16 = vp9_short_idct4x4llm_neon;
|
||||
// rtcd->idct.iwalsh1 = vp9_short_inv_walsh4x4_1_neon;
|
||||
// rtcd->idct.iwalsh16 = vp9_short_inv_walsh4x4_neon;
|
||||
|
||||
rtcd->recon.copy16x16 = vp8_copy_mem16x16_neon;
|
||||
rtcd->recon.copy8x8 = vp8_copy_mem8x8_neon;
|
||||
rtcd->recon.copy8x4 = vp8_copy_mem8x4_neon;
|
||||
rtcd->recon.recon = vp8_recon_b_neon;
|
||||
rtcd->recon.recon2 = vp8_recon2b_neon;
|
||||
rtcd->recon.recon4 = vp8_recon4b_neon;
|
||||
rtcd->recon.recon_mb = vp8_recon_mb_neon;
|
||||
rtcd->recon.copy16x16 = vp9_copy_mem16x16_neon;
|
||||
rtcd->recon.copy8x8 = vp9_copy_mem8x8_neon;
|
||||
rtcd->recon.copy8x4 = vp9_copy_mem8x4_neon;
|
||||
rtcd->recon.recon = vp9_recon_b_neon;
|
||||
rtcd->recon.recon2 = vp9_recon2b_neon;
|
||||
rtcd->recon.recon4 = vp9_recon4b_neon;
|
||||
rtcd->recon.recon_mb = vp9_recon_mb_neon;
|
||||
rtcd->recon.build_intra_predictors_mby =
|
||||
vp8_build_intra_predictors_mby_neon;
|
||||
vp9_build_intra_predictors_mby_neon;
|
||||
rtcd->recon.build_intra_predictors_mby_s =
|
||||
vp8_build_intra_predictors_mby_s_neon;
|
||||
vp9_build_intra_predictors_mby_s_neon;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -35,7 +35,7 @@ void vp8_filter_block2d_bil_armv6
|
||||
}
|
||||
|
||||
|
||||
void vp8_bilinear_predict4x4_armv6
|
||||
void vp9_bilinear_predict4x4_armv6
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -53,7 +53,7 @@ void vp8_bilinear_predict4x4_armv6
|
||||
vp8_filter_block2d_bil_armv6(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter, 4, 4);
|
||||
}
|
||||
|
||||
void vp8_bilinear_predict8x8_armv6
|
||||
void vp9_bilinear_predict8x8_armv6
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -71,7 +71,7 @@ void vp8_bilinear_predict8x8_armv6
|
||||
vp8_filter_block2d_bil_armv6(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter, 8, 8);
|
||||
}
|
||||
|
||||
void vp8_bilinear_predict8x4_armv6
|
||||
void vp9_bilinear_predict8x4_armv6
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -89,7 +89,7 @@ void vp8_bilinear_predict8x4_armv6
|
||||
vp8_filter_block2d_bil_armv6(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter, 8, 4);
|
||||
}
|
||||
|
||||
void vp8_bilinear_predict16x16_armv6
|
||||
void vp9_bilinear_predict16x16_armv6
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
|
@ -87,7 +87,7 @@ extern void vp8_filter_block2d_second_pass_only_armv6
|
||||
);
|
||||
|
||||
#if HAVE_ARMV6
|
||||
void vp8_sixtap_predict_armv6
|
||||
void vp9_sixtap_predict_armv6
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -128,7 +128,7 @@ void vp8_sixtap_predict_armv6
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_sixtap_predict8x8_armv6
|
||||
void vp9_sixtap_predict8x8_armv6
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -162,7 +162,7 @@ void vp8_sixtap_predict8x8_armv6
|
||||
}
|
||||
|
||||
|
||||
void vp8_sixtap_predict16x16_armv6
|
||||
void vp9_sixtap_predict16x16_armv6
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
|
@ -13,52 +13,52 @@
|
||||
#define IDCT_ARM_H
|
||||
|
||||
#if HAVE_ARMV6
|
||||
extern prototype_idct(vp8_short_idct4x4llm_1_v6);
|
||||
extern prototype_idct(vp8_short_idct4x4llm_v6_dual);
|
||||
extern prototype_idct_scalar_add(vp8_dc_only_idct_add_v6);
|
||||
extern prototype_second_order(vp8_short_inv_walsh4x4_1_v6);
|
||||
extern prototype_second_order(vp8_short_inv_walsh4x4_v6);
|
||||
extern prototype_idct(vp9_short_idct4x4llm_1_v6);
|
||||
extern prototype_idct(vp9_short_idct4x4llm_v6_dual);
|
||||
extern prototype_idct_scalar_add(vp9_dc_only_idct_add_v6);
|
||||
extern prototype_second_order(vp9_short_inv_walsh4x4_1_v6);
|
||||
extern prototype_second_order(vp9_short_inv_walsh4x4_v6);
|
||||
|
||||
#if !CONFIG_RUNTIME_CPU_DETECT
|
||||
#undef vp8_idct_idct1
|
||||
#define vp8_idct_idct1 vp8_short_idct4x4llm_1_v6
|
||||
#define vp8_idct_idct1 vp9_short_idct4x4llm_1_v6
|
||||
|
||||
#undef vp8_idct_idct16
|
||||
#define vp8_idct_idct16 vp8_short_idct4x4llm_v6_dual
|
||||
#define vp8_idct_idct16 vp9_short_idct4x4llm_v6_dual
|
||||
|
||||
#undef vp8_idct_idct1_scalar_add
|
||||
#define vp8_idct_idct1_scalar_add vp8_dc_only_idct_add_v6
|
||||
#define vp8_idct_idct1_scalar_add vp9_dc_only_idct_add_v6
|
||||
|
||||
#undef vp8_idct_iwalsh1
|
||||
#define vp8_idct_iwalsh1 vp8_short_inv_walsh4x4_1_v6
|
||||
#define vp8_idct_iwalsh1 vp9_short_inv_walsh4x4_1_v6
|
||||
|
||||
#undef vp8_idct_iwalsh16
|
||||
#define vp8_idct_iwalsh16 vp8_short_inv_walsh4x4_v6
|
||||
#define vp8_idct_iwalsh16 vp9_short_inv_walsh4x4_v6
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if HAVE_ARMV7
|
||||
extern prototype_idct(vp8_short_idct4x4llm_1_neon);
|
||||
extern prototype_idct(vp8_short_idct4x4llm_neon);
|
||||
extern prototype_idct_scalar_add(vp8_dc_only_idct_add_neon);
|
||||
extern prototype_second_order(vp8_short_inv_walsh4x4_1_neon);
|
||||
extern prototype_second_order(vp8_short_inv_walsh4x4_neon);
|
||||
extern prototype_idct(vp9_short_idct4x4llm_1_neon);
|
||||
extern prototype_idct(vp9_short_idct4x4llm_neon);
|
||||
extern prototype_idct_scalar_add(vp9_dc_only_idct_add_neon);
|
||||
extern prototype_second_order(vp9_short_inv_walsh4x4_1_neon);
|
||||
extern prototype_second_order(vp9_short_inv_walsh4x4_neon);
|
||||
|
||||
#if !CONFIG_RUNTIME_CPU_DETECT
|
||||
#undef vp8_idct_idct1
|
||||
#define vp8_idct_idct1 vp8_short_idct4x4llm_1_neon
|
||||
#define vp8_idct_idct1 vp9_short_idct4x4llm_1_neon
|
||||
|
||||
#undef vp8_idct_idct16
|
||||
#define vp8_idct_idct16 vp8_short_idct4x4llm_neon
|
||||
#define vp8_idct_idct16 vp9_short_idct4x4llm_neon
|
||||
|
||||
#undef vp8_idct_idct1_scalar_add
|
||||
#define vp8_idct_idct1_scalar_add vp8_dc_only_idct_add_neon
|
||||
#define vp8_idct_idct1_scalar_add vp9_dc_only_idct_add_neon
|
||||
|
||||
#undef vp8_idct_iwalsh1
|
||||
#define vp8_idct_iwalsh1 vp8_short_inv_walsh4x4_1_neon
|
||||
#define vp8_idct_iwalsh1 vp9_short_inv_walsh4x4_1_neon
|
||||
|
||||
#undef vp8_idct_iwalsh16
|
||||
#define vp8_idct_iwalsh16 vp8_short_inv_walsh4x4_neon
|
||||
#define vp8_idct_iwalsh16 vp9_short_inv_walsh4x4_neon
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
@ -14,10 +14,10 @@
|
||||
#include "vp8/common/onyxc_int.h"
|
||||
|
||||
#if HAVE_ARMV6
|
||||
extern prototype_loopfilter(vp8_loop_filter_horizontal_edge_armv6);
|
||||
extern prototype_loopfilter(vp8_loop_filter_vertical_edge_armv6);
|
||||
extern prototype_loopfilter(vp8_mbloop_filter_horizontal_edge_armv6);
|
||||
extern prototype_loopfilter(vp8_mbloop_filter_vertical_edge_armv6);
|
||||
extern prototype_loopfilter(vp9_loop_filter_horizontal_edge_armv6);
|
||||
extern prototype_loopfilter(vp9_loop_filter_vertical_edge_armv6);
|
||||
extern prototype_loopfilter(vp9_mbloop_filter_horizontal_edge_armv6);
|
||||
extern prototype_loopfilter(vp9_mbloop_filter_vertical_edge_armv6);
|
||||
#endif
|
||||
|
||||
#if HAVE_ARMV7
|
||||
@ -27,140 +27,140 @@ typedef void loopfilter_uv_neon(unsigned char *u, int pitch,
|
||||
unsigned char blimit, unsigned char limit, unsigned char thresh,
|
||||
unsigned char *v);
|
||||
|
||||
extern loopfilter_y_neon vp8_loop_filter_horizontal_edge_y_neon;
|
||||
extern loopfilter_y_neon vp8_loop_filter_vertical_edge_y_neon;
|
||||
extern loopfilter_y_neon vp8_mbloop_filter_horizontal_edge_y_neon;
|
||||
extern loopfilter_y_neon vp8_mbloop_filter_vertical_edge_y_neon;
|
||||
extern loopfilter_y_neon vp9_loop_filter_horizontal_edge_y_neon;
|
||||
extern loopfilter_y_neon vp9_loop_filter_vertical_edge_y_neon;
|
||||
extern loopfilter_y_neon vp9_mbloop_filter_horizontal_edge_y_neon;
|
||||
extern loopfilter_y_neon vp9_mbloop_filter_vertical_edge_y_neon;
|
||||
|
||||
extern loopfilter_uv_neon vp8_loop_filter_horizontal_edge_uv_neon;
|
||||
extern loopfilter_uv_neon vp8_loop_filter_vertical_edge_uv_neon;
|
||||
extern loopfilter_uv_neon vp8_mbloop_filter_horizontal_edge_uv_neon;
|
||||
extern loopfilter_uv_neon vp8_mbloop_filter_vertical_edge_uv_neon;
|
||||
extern loopfilter_uv_neon vp9_loop_filter_horizontal_edge_uv_neon;
|
||||
extern loopfilter_uv_neon vp9_loop_filter_vertical_edge_uv_neon;
|
||||
extern loopfilter_uv_neon vp9_mbloop_filter_horizontal_edge_uv_neon;
|
||||
extern loopfilter_uv_neon vp9_mbloop_filter_vertical_edge_uv_neon;
|
||||
#endif
|
||||
|
||||
#if HAVE_ARMV6
|
||||
/*ARMV6 loopfilter functions*/
|
||||
/* Horizontal MB filtering */
|
||||
void vp8_loop_filter_mbh_armv6(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
|
||||
void vp9_loop_filter_mbh_armv6(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
|
||||
int y_stride, int uv_stride, loop_filter_info *lfi) {
|
||||
vp8_mbloop_filter_horizontal_edge_armv6(y_ptr, y_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp9_mbloop_filter_horizontal_edge_armv6(y_ptr, y_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 2);
|
||||
|
||||
if (u_ptr)
|
||||
vp8_mbloop_filter_horizontal_edge_armv6(u_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
|
||||
vp9_mbloop_filter_horizontal_edge_armv6(u_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
|
||||
|
||||
if (v_ptr)
|
||||
vp8_mbloop_filter_horizontal_edge_armv6(v_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
|
||||
vp9_mbloop_filter_horizontal_edge_armv6(v_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
|
||||
}
|
||||
|
||||
/* Vertical MB Filtering */
|
||||
void vp8_loop_filter_mbv_armv6(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
|
||||
void vp9_loop_filter_mbv_armv6(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
|
||||
int y_stride, int uv_stride, loop_filter_info *lfi) {
|
||||
vp8_mbloop_filter_vertical_edge_armv6(y_ptr, y_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp9_mbloop_filter_vertical_edge_armv6(y_ptr, y_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 2);
|
||||
|
||||
if (u_ptr)
|
||||
vp8_mbloop_filter_vertical_edge_armv6(u_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
|
||||
vp9_mbloop_filter_vertical_edge_armv6(u_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
|
||||
|
||||
if (v_ptr)
|
||||
vp8_mbloop_filter_vertical_edge_armv6(v_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
|
||||
vp9_mbloop_filter_vertical_edge_armv6(v_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
|
||||
}
|
||||
|
||||
/* Horizontal B Filtering */
|
||||
void vp8_loop_filter_bh_armv6(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
|
||||
void vp9_loop_filter_bh_armv6(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
|
||||
int y_stride, int uv_stride, loop_filter_info *lfi) {
|
||||
vp8_loop_filter_horizontal_edge_armv6(y_ptr + 4 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp8_loop_filter_horizontal_edge_armv6(y_ptr + 8 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp8_loop_filter_horizontal_edge_armv6(y_ptr + 12 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp9_loop_filter_horizontal_edge_armv6(y_ptr + 4 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp9_loop_filter_horizontal_edge_armv6(y_ptr + 8 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp9_loop_filter_horizontal_edge_armv6(y_ptr + 12 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
|
||||
if (u_ptr)
|
||||
vp8_loop_filter_horizontal_edge_armv6(u_ptr + 4 * uv_stride, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
|
||||
vp9_loop_filter_horizontal_edge_armv6(u_ptr + 4 * uv_stride, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
|
||||
|
||||
if (v_ptr)
|
||||
vp8_loop_filter_horizontal_edge_armv6(v_ptr + 4 * uv_stride, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
|
||||
vp9_loop_filter_horizontal_edge_armv6(v_ptr + 4 * uv_stride, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
|
||||
}
|
||||
|
||||
void vp8_loop_filter_bhs_armv6(unsigned char *y_ptr, int y_stride,
|
||||
void vp9_loop_filter_bhs_armv6(unsigned char *y_ptr, int y_stride,
|
||||
const unsigned char *blimit) {
|
||||
vp8_loop_filter_simple_horizontal_edge_armv6(y_ptr + 4 * y_stride, y_stride, blimit);
|
||||
vp8_loop_filter_simple_horizontal_edge_armv6(y_ptr + 8 * y_stride, y_stride, blimit);
|
||||
vp8_loop_filter_simple_horizontal_edge_armv6(y_ptr + 12 * y_stride, y_stride, blimit);
|
||||
vp9_loop_filter_simple_horizontal_edge_armv6(y_ptr + 4 * y_stride, y_stride, blimit);
|
||||
vp9_loop_filter_simple_horizontal_edge_armv6(y_ptr + 8 * y_stride, y_stride, blimit);
|
||||
vp9_loop_filter_simple_horizontal_edge_armv6(y_ptr + 12 * y_stride, y_stride, blimit);
|
||||
}
|
||||
|
||||
/* Vertical B Filtering */
|
||||
void vp8_loop_filter_bv_armv6(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
|
||||
void vp9_loop_filter_bv_armv6(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
|
||||
int y_stride, int uv_stride, loop_filter_info *lfi) {
|
||||
vp8_loop_filter_vertical_edge_armv6(y_ptr + 4, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp8_loop_filter_vertical_edge_armv6(y_ptr + 8, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp8_loop_filter_vertical_edge_armv6(y_ptr + 12, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp9_loop_filter_vertical_edge_armv6(y_ptr + 4, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp9_loop_filter_vertical_edge_armv6(y_ptr + 8, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp9_loop_filter_vertical_edge_armv6(y_ptr + 12, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
|
||||
if (u_ptr)
|
||||
vp8_loop_filter_vertical_edge_armv6(u_ptr + 4, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
|
||||
vp9_loop_filter_vertical_edge_armv6(u_ptr + 4, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
|
||||
|
||||
if (v_ptr)
|
||||
vp8_loop_filter_vertical_edge_armv6(v_ptr + 4, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
|
||||
vp9_loop_filter_vertical_edge_armv6(v_ptr + 4, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
|
||||
}
|
||||
|
||||
void vp8_loop_filter_bvs_armv6(unsigned char *y_ptr, int y_stride,
|
||||
void vp9_loop_filter_bvs_armv6(unsigned char *y_ptr, int y_stride,
|
||||
const unsigned char *blimit) {
|
||||
vp8_loop_filter_simple_vertical_edge_armv6(y_ptr + 4, y_stride, blimit);
|
||||
vp8_loop_filter_simple_vertical_edge_armv6(y_ptr + 8, y_stride, blimit);
|
||||
vp8_loop_filter_simple_vertical_edge_armv6(y_ptr + 12, y_stride, blimit);
|
||||
vp9_loop_filter_simple_vertical_edge_armv6(y_ptr + 4, y_stride, blimit);
|
||||
vp9_loop_filter_simple_vertical_edge_armv6(y_ptr + 8, y_stride, blimit);
|
||||
vp9_loop_filter_simple_vertical_edge_armv6(y_ptr + 12, y_stride, blimit);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if HAVE_ARMV7
|
||||
/* NEON loopfilter functions */
|
||||
/* Horizontal MB filtering */
|
||||
void vp8_loop_filter_mbh_neon(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
|
||||
void vp9_loop_filter_mbh_neon(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
|
||||
int y_stride, int uv_stride, loop_filter_info *lfi) {
|
||||
unsigned char mblim = *lfi->mblim;
|
||||
unsigned char lim = *lfi->lim;
|
||||
unsigned char hev_thr = *lfi->hev_thr;
|
||||
vp8_mbloop_filter_horizontal_edge_y_neon(y_ptr, y_stride, mblim, lim, hev_thr);
|
||||
vp9_mbloop_filter_horizontal_edge_y_neon(y_ptr, y_stride, mblim, lim, hev_thr);
|
||||
|
||||
if (u_ptr)
|
||||
vp8_mbloop_filter_horizontal_edge_uv_neon(u_ptr, uv_stride, mblim, lim, hev_thr, v_ptr);
|
||||
vp9_mbloop_filter_horizontal_edge_uv_neon(u_ptr, uv_stride, mblim, lim, hev_thr, v_ptr);
|
||||
}
|
||||
|
||||
/* Vertical MB Filtering */
|
||||
void vp8_loop_filter_mbv_neon(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
|
||||
void vp9_loop_filter_mbv_neon(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
|
||||
int y_stride, int uv_stride, loop_filter_info *lfi) {
|
||||
unsigned char mblim = *lfi->mblim;
|
||||
unsigned char lim = *lfi->lim;
|
||||
unsigned char hev_thr = *lfi->hev_thr;
|
||||
|
||||
vp8_mbloop_filter_vertical_edge_y_neon(y_ptr, y_stride, mblim, lim, hev_thr);
|
||||
vp9_mbloop_filter_vertical_edge_y_neon(y_ptr, y_stride, mblim, lim, hev_thr);
|
||||
|
||||
if (u_ptr)
|
||||
vp8_mbloop_filter_vertical_edge_uv_neon(u_ptr, uv_stride, mblim, lim, hev_thr, v_ptr);
|
||||
vp9_mbloop_filter_vertical_edge_uv_neon(u_ptr, uv_stride, mblim, lim, hev_thr, v_ptr);
|
||||
}
|
||||
|
||||
/* Horizontal B Filtering */
|
||||
void vp8_loop_filter_bh_neon(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
|
||||
void vp9_loop_filter_bh_neon(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
|
||||
int y_stride, int uv_stride, loop_filter_info *lfi) {
|
||||
unsigned char blim = *lfi->blim;
|
||||
unsigned char lim = *lfi->lim;
|
||||
unsigned char hev_thr = *lfi->hev_thr;
|
||||
|
||||
vp8_loop_filter_horizontal_edge_y_neon(y_ptr + 4 * y_stride, y_stride, blim, lim, hev_thr);
|
||||
vp8_loop_filter_horizontal_edge_y_neon(y_ptr + 8 * y_stride, y_stride, blim, lim, hev_thr);
|
||||
vp8_loop_filter_horizontal_edge_y_neon(y_ptr + 12 * y_stride, y_stride, blim, lim, hev_thr);
|
||||
vp9_loop_filter_horizontal_edge_y_neon(y_ptr + 4 * y_stride, y_stride, blim, lim, hev_thr);
|
||||
vp9_loop_filter_horizontal_edge_y_neon(y_ptr + 8 * y_stride, y_stride, blim, lim, hev_thr);
|
||||
vp9_loop_filter_horizontal_edge_y_neon(y_ptr + 12 * y_stride, y_stride, blim, lim, hev_thr);
|
||||
|
||||
if (u_ptr)
|
||||
vp8_loop_filter_horizontal_edge_uv_neon(u_ptr + 4 * uv_stride, uv_stride, blim, lim, hev_thr, v_ptr + 4 * uv_stride);
|
||||
vp9_loop_filter_horizontal_edge_uv_neon(u_ptr + 4 * uv_stride, uv_stride, blim, lim, hev_thr, v_ptr + 4 * uv_stride);
|
||||
}
|
||||
|
||||
/* Vertical B Filtering */
|
||||
void vp8_loop_filter_bv_neon(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
|
||||
void vp9_loop_filter_bv_neon(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
|
||||
int y_stride, int uv_stride, loop_filter_info *lfi) {
|
||||
unsigned char blim = *lfi->blim;
|
||||
unsigned char lim = *lfi->lim;
|
||||
unsigned char hev_thr = *lfi->hev_thr;
|
||||
|
||||
vp8_loop_filter_vertical_edge_y_neon(y_ptr + 4, y_stride, blim, lim, hev_thr);
|
||||
vp8_loop_filter_vertical_edge_y_neon(y_ptr + 8, y_stride, blim, lim, hev_thr);
|
||||
vp8_loop_filter_vertical_edge_y_neon(y_ptr + 12, y_stride, blim, lim, hev_thr);
|
||||
vp9_loop_filter_vertical_edge_y_neon(y_ptr + 4, y_stride, blim, lim, hev_thr);
|
||||
vp9_loop_filter_vertical_edge_y_neon(y_ptr + 8, y_stride, blim, lim, hev_thr);
|
||||
vp9_loop_filter_vertical_edge_y_neon(y_ptr + 12, y_stride, blim, lim, hev_thr);
|
||||
|
||||
if (u_ptr)
|
||||
vp8_loop_filter_vertical_edge_uv_neon(u_ptr + 4, uv_stride, blim, lim, hev_thr, v_ptr + 4);
|
||||
vp9_loop_filter_vertical_edge_uv_neon(u_ptr + 4, uv_stride, blim, lim, hev_thr, v_ptr + 4);
|
||||
}
|
||||
#endif
|
||||
|
@ -15,26 +15,26 @@
|
||||
#include "vpx_config.h"
|
||||
|
||||
#if HAVE_ARMV6
|
||||
extern prototype_loopfilter_block(vp8_loop_filter_mbv_armv6);
|
||||
extern prototype_loopfilter_block(vp8_loop_filter_bv_armv6);
|
||||
extern prototype_loopfilter_block(vp8_loop_filter_mbh_armv6);
|
||||
extern prototype_loopfilter_block(vp8_loop_filter_bh_armv6);
|
||||
extern prototype_simple_loopfilter(vp8_loop_filter_bvs_armv6);
|
||||
extern prototype_simple_loopfilter(vp8_loop_filter_bhs_armv6);
|
||||
extern prototype_simple_loopfilter(vp8_loop_filter_simple_horizontal_edge_armv6);
|
||||
extern prototype_simple_loopfilter(vp8_loop_filter_simple_vertical_edge_armv6);
|
||||
extern prototype_loopfilter_block(vp9_loop_filter_mbv_armv6);
|
||||
extern prototype_loopfilter_block(vp9_loop_filter_bv_armv6);
|
||||
extern prototype_loopfilter_block(vp9_loop_filter_mbh_armv6);
|
||||
extern prototype_loopfilter_block(vp9_loop_filter_bh_armv6);
|
||||
extern prototype_simple_loopfilter(vp9_loop_filter_bvs_armv6);
|
||||
extern prototype_simple_loopfilter(vp9_loop_filter_bhs_armv6);
|
||||
extern prototype_simple_loopfilter(vp9_loop_filter_simple_horizontal_edge_armv6);
|
||||
extern prototype_simple_loopfilter(vp9_loop_filter_simple_vertical_edge_armv6);
|
||||
|
||||
#endif /* HAVE_ARMV6 */
|
||||
|
||||
#if HAVE_ARMV7
|
||||
extern prototype_loopfilter_block(vp8_loop_filter_mbv_neon);
|
||||
extern prototype_loopfilter_block(vp8_loop_filter_bv_neon);
|
||||
extern prototype_loopfilter_block(vp8_loop_filter_mbh_neon);
|
||||
extern prototype_loopfilter_block(vp8_loop_filter_bh_neon);
|
||||
extern prototype_simple_loopfilter(vp8_loop_filter_mbvs_neon);
|
||||
extern prototype_simple_loopfilter(vp8_loop_filter_bvs_neon);
|
||||
extern prototype_simple_loopfilter(vp8_loop_filter_mbhs_neon);
|
||||
extern prototype_simple_loopfilter(vp8_loop_filter_bhs_neon);
|
||||
extern prototype_loopfilter_block(vp9_loop_filter_mbv_neon);
|
||||
extern prototype_loopfilter_block(vp9_loop_filter_bv_neon);
|
||||
extern prototype_loopfilter_block(vp9_loop_filter_mbh_neon);
|
||||
extern prototype_loopfilter_block(vp9_loop_filter_bh_neon);
|
||||
extern prototype_simple_loopfilter(vp9_loop_filter_mbvs_neon);
|
||||
extern prototype_simple_loopfilter(vp9_loop_filter_bvs_neon);
|
||||
extern prototype_simple_loopfilter(vp9_loop_filter_mbhs_neon);
|
||||
extern prototype_simple_loopfilter(vp9_loop_filter_bhs_neon);
|
||||
|
||||
#endif /* HAVE_ARMV7 */
|
||||
|
||||
|
@ -13,76 +13,76 @@
|
||||
#define RECON_ARM_H
|
||||
|
||||
#if HAVE_ARMV6
|
||||
extern prototype_recon_block(vp8_recon_b_armv6);
|
||||
extern prototype_recon_block(vp8_recon2b_armv6);
|
||||
extern prototype_recon_block(vp8_recon4b_armv6);
|
||||
extern prototype_recon_block(vp9_recon_b_armv6);
|
||||
extern prototype_recon_block(vp9_recon2b_armv6);
|
||||
extern prototype_recon_block(vp9_recon4b_armv6);
|
||||
|
||||
extern prototype_copy_block(vp8_copy_mem8x8_v6);
|
||||
extern prototype_copy_block(vp8_copy_mem8x4_v6);
|
||||
extern prototype_copy_block(vp8_copy_mem16x16_v6);
|
||||
extern prototype_copy_block(vp9_copy_mem8x8_v6);
|
||||
extern prototype_copy_block(vp9_copy_mem8x4_v6);
|
||||
extern prototype_copy_block(vp9_copy_mem16x16_v6);
|
||||
|
||||
#if !CONFIG_RUNTIME_CPU_DETECT
|
||||
#undef vp8_recon_recon
|
||||
#define vp8_recon_recon vp8_recon_b_armv6
|
||||
#define vp8_recon_recon vp9_recon_b_armv6
|
||||
|
||||
#undef vp8_recon_recon2
|
||||
#define vp8_recon_recon2 vp8_recon2b_armv6
|
||||
#define vp8_recon_recon2 vp9_recon2b_armv6
|
||||
|
||||
#undef vp8_recon_recon4
|
||||
#define vp8_recon_recon4 vp8_recon4b_armv6
|
||||
#define vp8_recon_recon4 vp9_recon4b_armv6
|
||||
|
||||
#undef vp8_recon_copy8x8
|
||||
#define vp8_recon_copy8x8 vp8_copy_mem8x8_v6
|
||||
#define vp8_recon_copy8x8 vp9_copy_mem8x8_v6
|
||||
|
||||
#undef vp8_recon_copy8x4
|
||||
#define vp8_recon_copy8x4 vp8_copy_mem8x4_v6
|
||||
#define vp8_recon_copy8x4 vp9_copy_mem8x4_v6
|
||||
|
||||
#undef vp8_recon_copy16x16
|
||||
#define vp8_recon_copy16x16 vp8_copy_mem16x16_v6
|
||||
#define vp8_recon_copy16x16 vp9_copy_mem16x16_v6
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if HAVE_ARMV7
|
||||
extern prototype_recon_block(vp8_recon_b_neon);
|
||||
extern prototype_recon_block(vp8_recon2b_neon);
|
||||
extern prototype_recon_block(vp8_recon4b_neon);
|
||||
extern prototype_recon_block(vp9_recon_b_neon);
|
||||
extern prototype_recon_block(vp9_recon2b_neon);
|
||||
extern prototype_recon_block(vp9_recon4b_neon);
|
||||
|
||||
extern prototype_copy_block(vp8_copy_mem8x8_neon);
|
||||
extern prototype_copy_block(vp8_copy_mem8x4_neon);
|
||||
extern prototype_copy_block(vp8_copy_mem16x16_neon);
|
||||
extern prototype_copy_block(vp9_copy_mem8x8_neon);
|
||||
extern prototype_copy_block(vp9_copy_mem8x4_neon);
|
||||
extern prototype_copy_block(vp9_copy_mem16x16_neon);
|
||||
|
||||
extern prototype_recon_macroblock(vp8_recon_mb_neon);
|
||||
extern prototype_recon_macroblock(vp9_recon_mb_neon);
|
||||
|
||||
extern prototype_build_intra_predictors(vp8_build_intra_predictors_mby_neon);
|
||||
extern prototype_build_intra_predictors(vp8_build_intra_predictors_mby_s_neon);
|
||||
extern prototype_build_intra_predictors(vp9_build_intra_predictors_mby_neon);
|
||||
extern prototype_build_intra_predictors(vp9_build_intra_predictors_mby_s_neon);
|
||||
|
||||
#if !CONFIG_RUNTIME_CPU_DETECT
|
||||
#undef vp8_recon_recon
|
||||
#define vp8_recon_recon vp8_recon_b_neon
|
||||
#define vp8_recon_recon vp9_recon_b_neon
|
||||
|
||||
#undef vp8_recon_recon2
|
||||
#define vp8_recon_recon2 vp8_recon2b_neon
|
||||
#define vp8_recon_recon2 vp9_recon2b_neon
|
||||
|
||||
#undef vp8_recon_recon4
|
||||
#define vp8_recon_recon4 vp8_recon4b_neon
|
||||
#define vp8_recon_recon4 vp9_recon4b_neon
|
||||
|
||||
#undef vp8_recon_copy8x8
|
||||
#define vp8_recon_copy8x8 vp8_copy_mem8x8_neon
|
||||
#define vp8_recon_copy8x8 vp9_copy_mem8x8_neon
|
||||
|
||||
#undef vp8_recon_copy8x4
|
||||
#define vp8_recon_copy8x4 vp8_copy_mem8x4_neon
|
||||
#define vp8_recon_copy8x4 vp9_copy_mem8x4_neon
|
||||
|
||||
#undef vp8_recon_copy16x16
|
||||
#define vp8_recon_copy16x16 vp8_copy_mem16x16_neon
|
||||
#define vp8_recon_copy16x16 vp9_copy_mem16x16_neon
|
||||
|
||||
#undef vp8_recon_recon_mb
|
||||
#define vp8_recon_recon_mb vp8_recon_mb_neon
|
||||
#define vp8_recon_recon_mb vp9_recon_mb_neon
|
||||
|
||||
#undef vp8_recon_build_intra_predictors_mby
|
||||
#define vp8_recon_build_intra_predictors_mby vp8_build_intra_predictors_mby_neon
|
||||
#undef vp9_recon_build_intra_predictors_mby
|
||||
#define vp9_recon_build_intra_predictors_mby vp9_build_intra_predictors_mby_neon
|
||||
|
||||
#undef vp8_recon_build_intra_predictors_mby_s
|
||||
#define vp8_recon_build_intra_predictors_mby_s vp8_build_intra_predictors_mby_s_neon
|
||||
#undef vp9_recon_build_intra_predictors_mby_s
|
||||
#define vp9_recon_build_intra_predictors_mby_s vp9_build_intra_predictors_mby_s_neon
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
@ -16,7 +16,7 @@
|
||||
#include "vp8/common/recon.h"
|
||||
|
||||
#if HAVE_ARMV7
|
||||
extern void vp8_build_intra_predictors_mby_neon_func(
|
||||
extern void vp9_build_intra_predictors_mby_neon_func(
|
||||
unsigned char *y_buffer,
|
||||
unsigned char *ypred_ptr,
|
||||
int y_stride,
|
||||
@ -24,7 +24,7 @@ extern void vp8_build_intra_predictors_mby_neon_func(
|
||||
int Up,
|
||||
int Left);
|
||||
|
||||
void vp8_build_intra_predictors_mby_neon(MACROBLOCKD *xd) {
|
||||
void vp9_build_intra_predictors_mby_neon(MACROBLOCKD *xd) {
|
||||
unsigned char *y_buffer = xd->dst.y_buffer;
|
||||
unsigned char *ypred_ptr = xd->predictor;
|
||||
int y_stride = xd->dst.y_stride;
|
||||
@ -32,14 +32,14 @@ void vp8_build_intra_predictors_mby_neon(MACROBLOCKD *xd) {
|
||||
int Up = xd->up_available;
|
||||
int Left = xd->left_available;
|
||||
|
||||
vp8_build_intra_predictors_mby_neon_func(y_buffer, ypred_ptr,
|
||||
vp9_build_intra_predictors_mby_neon_func(y_buffer, ypred_ptr,
|
||||
y_stride, mode, Up, Left);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#if HAVE_ARMV7
|
||||
extern void vp8_build_intra_predictors_mby_s_neon_func(
|
||||
extern void vp9_build_intra_predictors_mby_s_neon_func(
|
||||
unsigned char *y_buffer,
|
||||
unsigned char *ypred_ptr,
|
||||
int y_stride,
|
||||
@ -47,7 +47,7 @@ extern void vp8_build_intra_predictors_mby_s_neon_func(
|
||||
int Up,
|
||||
int Left);
|
||||
|
||||
void vp8_build_intra_predictors_mby_s_neon(MACROBLOCKD *xd) {
|
||||
void vp9_build_intra_predictors_mby_s_neon(MACROBLOCKD *xd) {
|
||||
unsigned char *y_buffer = xd->dst.y_buffer;
|
||||
unsigned char *ypred_ptr = xd->predictor;
|
||||
int y_stride = xd->dst.y_stride;
|
||||
@ -55,7 +55,7 @@ void vp8_build_intra_predictors_mby_s_neon(MACROBLOCKD *xd) {
|
||||
int Up = xd->up_available;
|
||||
int Left = xd->left_available;
|
||||
|
||||
vp8_build_intra_predictors_mby_s_neon_func(y_buffer, ypred_ptr,
|
||||
vp9_build_intra_predictors_mby_s_neon_func(y_buffer, ypred_ptr,
|
||||
y_stride, mode, Up, Left);
|
||||
}
|
||||
|
||||
|
@ -13,76 +13,76 @@
|
||||
#define SUBPIXEL_ARM_H
|
||||
|
||||
#if HAVE_ARMV6
|
||||
extern prototype_subpixel_predict(vp8_sixtap_predict16x16_armv6);
|
||||
extern prototype_subpixel_predict(vp8_sixtap_predict8x8_armv6);
|
||||
extern prototype_subpixel_predict(vp8_sixtap_predict8x4_armv6);
|
||||
extern prototype_subpixel_predict(vp8_sixtap_predict_armv6);
|
||||
extern prototype_subpixel_predict(vp8_bilinear_predict16x16_armv6);
|
||||
extern prototype_subpixel_predict(vp8_bilinear_predict8x8_armv6);
|
||||
extern prototype_subpixel_predict(vp8_bilinear_predict8x4_armv6);
|
||||
extern prototype_subpixel_predict(vp8_bilinear_predict4x4_armv6);
|
||||
extern prototype_subpixel_predict(vp9_sixtap_predict16x16_armv6);
|
||||
extern prototype_subpixel_predict(vp9_sixtap_predict8x8_armv6);
|
||||
extern prototype_subpixel_predict(vp9_sixtap_predict8x4_armv6);
|
||||
extern prototype_subpixel_predict(vp9_sixtap_predict_armv6);
|
||||
extern prototype_subpixel_predict(vp9_bilinear_predict16x16_armv6);
|
||||
extern prototype_subpixel_predict(vp9_bilinear_predict8x8_armv6);
|
||||
extern prototype_subpixel_predict(vp9_bilinear_predict8x4_armv6);
|
||||
extern prototype_subpixel_predict(vp9_bilinear_predict4x4_armv6);
|
||||
|
||||
#if !CONFIG_RUNTIME_CPU_DETECT
|
||||
#undef vp8_subpix_sixtap16x16
|
||||
#define vp8_subpix_sixtap16x16 vp8_sixtap_predict16x16_armv6
|
||||
#define vp8_subpix_sixtap16x16 vp9_sixtap_predict16x16_armv6
|
||||
|
||||
#undef vp8_subpix_sixtap8x8
|
||||
#define vp8_subpix_sixtap8x8 vp8_sixtap_predict8x8_armv6
|
||||
#define vp8_subpix_sixtap8x8 vp9_sixtap_predict8x8_armv6
|
||||
|
||||
#undef vp8_subpix_sixtap8x4
|
||||
#define vp8_subpix_sixtap8x4 vp8_sixtap_predict8x4_armv6
|
||||
#define vp8_subpix_sixtap8x4 vp9_sixtap_predict8x4_armv6
|
||||
|
||||
#undef vp8_subpix_sixtap4x4
|
||||
#define vp8_subpix_sixtap4x4 vp8_sixtap_predict_armv6
|
||||
#define vp8_subpix_sixtap4x4 vp9_sixtap_predict_armv6
|
||||
|
||||
#undef vp8_subpix_bilinear16x16
|
||||
#define vp8_subpix_bilinear16x16 vp8_bilinear_predict16x16_armv6
|
||||
#define vp8_subpix_bilinear16x16 vp9_bilinear_predict16x16_armv6
|
||||
|
||||
#undef vp8_subpix_bilinear8x8
|
||||
#define vp8_subpix_bilinear8x8 vp8_bilinear_predict8x8_armv6
|
||||
#define vp8_subpix_bilinear8x8 vp9_bilinear_predict8x8_armv6
|
||||
|
||||
#undef vp8_subpix_bilinear8x4
|
||||
#define vp8_subpix_bilinear8x4 vp8_bilinear_predict8x4_armv6
|
||||
#define vp8_subpix_bilinear8x4 vp9_bilinear_predict8x4_armv6
|
||||
|
||||
#undef vp8_subpix_bilinear4x4
|
||||
#define vp8_subpix_bilinear4x4 vp8_bilinear_predict4x4_armv6
|
||||
#define vp8_subpix_bilinear4x4 vp9_bilinear_predict4x4_armv6
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if HAVE_ARMV7
|
||||
extern prototype_subpixel_predict(vp8_sixtap_predict16x16_neon);
|
||||
extern prototype_subpixel_predict(vp8_sixtap_predict8x8_neon);
|
||||
extern prototype_subpixel_predict(vp8_sixtap_predict8x4_neon);
|
||||
extern prototype_subpixel_predict(vp8_sixtap_predict_neon);
|
||||
extern prototype_subpixel_predict(vp8_bilinear_predict16x16_neon);
|
||||
extern prototype_subpixel_predict(vp8_bilinear_predict8x8_neon);
|
||||
extern prototype_subpixel_predict(vp8_bilinear_predict8x4_neon);
|
||||
extern prototype_subpixel_predict(vp8_bilinear_predict4x4_neon);
|
||||
extern prototype_subpixel_predict(vp9_sixtap_predict16x16_neon);
|
||||
extern prototype_subpixel_predict(vp9_sixtap_predict8x8_neon);
|
||||
extern prototype_subpixel_predict(vp9_sixtap_predict8x4_neon);
|
||||
extern prototype_subpixel_predict(vp9_sixtap_predict_neon);
|
||||
extern prototype_subpixel_predict(vp9_bilinear_predict16x16_neon);
|
||||
extern prototype_subpixel_predict(vp9_bilinear_predict8x8_neon);
|
||||
extern prototype_subpixel_predict(vp9_bilinear_predict8x4_neon);
|
||||
extern prototype_subpixel_predict(vp9_bilinear_predict4x4_neon);
|
||||
|
||||
#if !CONFIG_RUNTIME_CPU_DETECT
|
||||
#undef vp8_subpix_sixtap16x16
|
||||
#define vp8_subpix_sixtap16x16 vp8_sixtap_predict16x16_neon
|
||||
#define vp8_subpix_sixtap16x16 vp9_sixtap_predict16x16_neon
|
||||
|
||||
#undef vp8_subpix_sixtap8x8
|
||||
#define vp8_subpix_sixtap8x8 vp8_sixtap_predict8x8_neon
|
||||
#define vp8_subpix_sixtap8x8 vp9_sixtap_predict8x8_neon
|
||||
|
||||
#undef vp8_subpix_sixtap8x4
|
||||
#define vp8_subpix_sixtap8x4 vp8_sixtap_predict8x4_neon
|
||||
#define vp8_subpix_sixtap8x4 vp9_sixtap_predict8x4_neon
|
||||
|
||||
#undef vp8_subpix_sixtap4x4
|
||||
#define vp8_subpix_sixtap4x4 vp8_sixtap_predict_neon
|
||||
#define vp8_subpix_sixtap4x4 vp9_sixtap_predict_neon
|
||||
|
||||
#undef vp8_subpix_bilinear16x16
|
||||
#define vp8_subpix_bilinear16x16 vp8_bilinear_predict16x16_neon
|
||||
#define vp8_subpix_bilinear16x16 vp9_bilinear_predict16x16_neon
|
||||
|
||||
#undef vp8_subpix_bilinear8x8
|
||||
#define vp8_subpix_bilinear8x8 vp8_bilinear_predict8x8_neon
|
||||
#define vp8_subpix_bilinear8x8 vp9_bilinear_predict8x8_neon
|
||||
|
||||
#undef vp8_subpix_bilinear8x4
|
||||
#define vp8_subpix_bilinear8x4 vp8_bilinear_predict8x4_neon
|
||||
#define vp8_subpix_bilinear8x4 vp9_bilinear_predict8x4_neon
|
||||
|
||||
#undef vp8_subpix_bilinear4x4
|
||||
#define vp8_subpix_bilinear4x4 vp8_bilinear_predict4x4_neon
|
||||
#define vp8_subpix_bilinear4x4 vp9_bilinear_predict4x4_neon
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
@ -499,8 +499,8 @@ static TX_TYPE get_tx_type(const MACROBLOCKD *xd, const BLOCKD *b) {
|
||||
return tx_type;
|
||||
}
|
||||
|
||||
extern void vp8_build_block_doffsets(MACROBLOCKD *xd);
|
||||
extern void vp8_setup_block_dptrs(MACROBLOCKD *xd);
|
||||
extern void vp9_build_block_doffsets(MACROBLOCKD *xd);
|
||||
extern void vp9_setup_block_dptrs(MACROBLOCKD *xd);
|
||||
|
||||
static void update_blockd_bmi(MACROBLOCKD *xd) {
|
||||
int i;
|
||||
|
@ -13,7 +13,7 @@
|
||||
#include "blockd.h"
|
||||
|
||||
|
||||
void vp8_print_modes_and_motion_vectors(MODE_INFO *mi, int rows, int cols, int frame) {
|
||||
void vp9_print_modes_and_motion_vectors(MODE_INFO *mi, int rows, int cols, int frame) {
|
||||
|
||||
int mb_row;
|
||||
int mb_col;
|
||||
|
@ -202,7 +202,7 @@ vp8_extra_bit_struct vp8_extra_bits[12] = {
|
||||
|
||||
#include "default_coef_probs.h"
|
||||
|
||||
void vp8_default_coef_probs(VP8_COMMON *pc) {
|
||||
void vp9_default_coef_probs(VP8_COMMON *pc) {
|
||||
vpx_memcpy(pc->fc.coef_probs, default_coef_probs,
|
||||
sizeof(pc->fc.coef_probs));
|
||||
vpx_memcpy(pc->fc.hybrid_coef_probs, default_hybrid_coef_probs,
|
||||
@ -220,9 +220,9 @@ void vp8_default_coef_probs(VP8_COMMON *pc) {
|
||||
sizeof(pc->fc.hybrid_coef_probs_16x16));
|
||||
}
|
||||
|
||||
void vp8_coef_tree_initialize() {
|
||||
void vp9_coef_tree_initialize() {
|
||||
init_bit_trees();
|
||||
vp8_tokens_from_tree(vp8_coef_encodings, vp8_coef_tree);
|
||||
vp9_tokens_from_tree(vp8_coef_encodings, vp8_coef_tree);
|
||||
}
|
||||
|
||||
// #define COEF_COUNT_TESTING
|
||||
@ -234,7 +234,7 @@ void vp8_coef_tree_initialize() {
|
||||
#define COEF_COUNT_SAT_AFTER_KEY 24
|
||||
#define COEF_MAX_UPDATE_FACTOR_AFTER_KEY 128
|
||||
|
||||
void vp8_adapt_coef_probs(VP8_COMMON *cm) {
|
||||
void vp9_adapt_coef_probs(VP8_COMMON *cm) {
|
||||
int t, i, j, k, count;
|
||||
unsigned int branch_ct[ENTROPY_NODES][2];
|
||||
vp8_prob coef_probs[ENTROPY_NODES];
|
||||
@ -318,7 +318,7 @@ void vp8_adapt_coef_probs(VP8_COMMON *cm) {
|
||||
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
|
||||
if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
|
||||
continue;
|
||||
vp8_tree_probs_from_distribution(
|
||||
vp9_tree_probs_from_distribution(
|
||||
MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
|
||||
coef_probs, branch_ct, cm->fc.coef_counts [i][j][k],
|
||||
256, 1);
|
||||
@ -340,7 +340,7 @@ void vp8_adapt_coef_probs(VP8_COMMON *cm) {
|
||||
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
|
||||
if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
|
||||
continue;
|
||||
vp8_tree_probs_from_distribution(
|
||||
vp9_tree_probs_from_distribution(
|
||||
MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
|
||||
coef_probs, branch_ct, cm->fc.hybrid_coef_counts [i][j][k],
|
||||
256, 1);
|
||||
@ -362,7 +362,7 @@ void vp8_adapt_coef_probs(VP8_COMMON *cm) {
|
||||
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
|
||||
if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
|
||||
continue;
|
||||
vp8_tree_probs_from_distribution(
|
||||
vp9_tree_probs_from_distribution(
|
||||
MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
|
||||
coef_probs, branch_ct, cm->fc.coef_counts_8x8 [i][j][k],
|
||||
256, 1);
|
||||
@ -384,7 +384,7 @@ void vp8_adapt_coef_probs(VP8_COMMON *cm) {
|
||||
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
|
||||
if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
|
||||
continue;
|
||||
vp8_tree_probs_from_distribution(
|
||||
vp9_tree_probs_from_distribution(
|
||||
MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
|
||||
coef_probs, branch_ct, cm->fc.hybrid_coef_counts_8x8 [i][j][k],
|
||||
256, 1);
|
||||
@ -407,7 +407,7 @@ void vp8_adapt_coef_probs(VP8_COMMON *cm) {
|
||||
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
|
||||
if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
|
||||
continue;
|
||||
vp8_tree_probs_from_distribution(
|
||||
vp9_tree_probs_from_distribution(
|
||||
MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
|
||||
coef_probs, branch_ct, cm->fc.coef_counts_16x16[i][j][k], 256, 1);
|
||||
for (t = 0; t < ENTROPY_NODES; ++t) {
|
||||
@ -429,7 +429,7 @@ void vp8_adapt_coef_probs(VP8_COMMON *cm) {
|
||||
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
|
||||
if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
|
||||
continue;
|
||||
vp8_tree_probs_from_distribution(
|
||||
vp9_tree_probs_from_distribution(
|
||||
MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
|
||||
coef_probs, branch_ct, cm->fc.hybrid_coef_counts_16x16[i][j][k], 256, 1);
|
||||
for (t = 0; t < ENTROPY_NODES; ++t) {
|
||||
|
@ -101,7 +101,7 @@ extern DECLARE_ALIGNED(16, const int, vp8_coef_bands_16x16[256]);
|
||||
extern DECLARE_ALIGNED(16, const unsigned char, vp8_prev_token_class[MAX_ENTROPY_TOKENS]);
|
||||
|
||||
struct VP8Common;
|
||||
void vp8_default_coef_probs(struct VP8Common *);
|
||||
void vp9_default_coef_probs(struct VP8Common *);
|
||||
extern DECLARE_ALIGNED(16, const int, vp8_default_zig_zag1d[16]);
|
||||
|
||||
extern DECLARE_ALIGNED(16, const int, vp8_col_scan[16]);
|
||||
@ -109,9 +109,9 @@ extern DECLARE_ALIGNED(16, const int, vp8_row_scan[16]);
|
||||
|
||||
extern short vp8_default_zig_zag_mask[16];
|
||||
extern DECLARE_ALIGNED(64, const int, vp8_default_zig_zag1d_8x8[64]);
|
||||
void vp8_coef_tree_initialize(void);
|
||||
void vp9_coef_tree_initialize(void);
|
||||
|
||||
extern DECLARE_ALIGNED(16, const int, vp8_default_zig_zag1d_16x16[256]);
|
||||
void vp8_adapt_coef_probs(struct VP8Common *);
|
||||
void vp9_adapt_coef_probs(struct VP8Common *);
|
||||
|
||||
#endif
|
||||
|
@ -81,7 +81,7 @@ typedef enum {
|
||||
SUBMVREF_LEFT_ABOVE_ZED
|
||||
} sumvfref_t;
|
||||
|
||||
int vp8_mv_cont(const int_mv *l, const int_mv *a) {
|
||||
int vp9_mv_cont(const int_mv *l, const int_mv *a) {
|
||||
int lez = (l->as_int == 0);
|
||||
int aez = (a->as_int == 0);
|
||||
int lea = (l->as_int == a->as_int);
|
||||
@ -260,20 +260,20 @@ struct vp8_token_struct vp8_sub_mv_ref_encoding_array [VP8_SUBMVREFS];
|
||||
|
||||
|
||||
|
||||
void vp8_init_mbmode_probs(VP8_COMMON *x) {
|
||||
void vp9_init_mbmode_probs(VP8_COMMON *x) {
|
||||
unsigned int bct [VP8_YMODES] [2]; /* num Ymodes > num UV modes */
|
||||
|
||||
vp8_tree_probs_from_distribution(VP8_YMODES, vp8_ymode_encodings,
|
||||
vp9_tree_probs_from_distribution(VP8_YMODES, vp8_ymode_encodings,
|
||||
vp8_ymode_tree, x->fc.ymode_prob, bct, y_mode_cts, 256, 1);
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < 8; i++) {
|
||||
vp8_tree_probs_from_distribution(
|
||||
vp9_tree_probs_from_distribution(
|
||||
VP8_YMODES, vp8_kf_ymode_encodings, vp8_kf_ymode_tree,
|
||||
x->kf_ymode_prob[i], bct, kf_y_mode_cts[i],
|
||||
256, 1);
|
||||
#if CONFIG_SUPERBLOCKS
|
||||
vp8_tree_probs_from_distribution(
|
||||
vp9_tree_probs_from_distribution(
|
||||
VP8_I32X32_MODES, vp8_sb_kf_ymode_encodings, vp8_sb_ymode_tree,
|
||||
x->sb_kf_ymode_prob[i], bct, kf_y_mode_cts[i],
|
||||
256, 1);
|
||||
@ -283,18 +283,18 @@ void vp8_init_mbmode_probs(VP8_COMMON *x) {
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < VP8_YMODES; i++) {
|
||||
vp8_tree_probs_from_distribution(
|
||||
vp9_tree_probs_from_distribution(
|
||||
VP8_UV_MODES, vp8_uv_mode_encodings, vp8_uv_mode_tree,
|
||||
x->kf_uv_mode_prob[i], bct, kf_uv_mode_cts[i],
|
||||
256, 1);
|
||||
vp8_tree_probs_from_distribution(
|
||||
vp9_tree_probs_from_distribution(
|
||||
VP8_UV_MODES, vp8_uv_mode_encodings, vp8_uv_mode_tree,
|
||||
x->fc.uv_mode_prob[i], bct, uv_mode_cts[i],
|
||||
256, 1);
|
||||
}
|
||||
}
|
||||
|
||||
vp8_tree_probs_from_distribution(
|
||||
vp9_tree_probs_from_distribution(
|
||||
VP8_I8X8_MODES, vp8_i8x8_mode_encodings, vp8_i8x8_mode_tree,
|
||||
x->fc.i8x8_mode_prob, bct, i8x8_mode_cts,
|
||||
256, 1);
|
||||
@ -310,16 +310,16 @@ static void intra_bmode_probs_from_distribution(
|
||||
vp8_prob p [VP8_BINTRAMODES - 1],
|
||||
unsigned int branch_ct [VP8_BINTRAMODES - 1] [2],
|
||||
const unsigned int events [VP8_BINTRAMODES]) {
|
||||
vp8_tree_probs_from_distribution(VP8_BINTRAMODES, vp8_bmode_encodings,
|
||||
vp9_tree_probs_from_distribution(VP8_BINTRAMODES, vp8_bmode_encodings,
|
||||
vp8_bmode_tree, p, branch_ct, events, 256, 1);
|
||||
}
|
||||
|
||||
void vp8_default_bmode_probs(vp8_prob p [VP8_BINTRAMODES - 1]) {
|
||||
void vp9_default_bmode_probs(vp8_prob p [VP8_BINTRAMODES - 1]) {
|
||||
unsigned int branch_ct [VP8_BINTRAMODES - 1] [2];
|
||||
intra_bmode_probs_from_distribution(p, branch_ct, bmode_cts);
|
||||
}
|
||||
|
||||
void vp8_kf_default_bmode_probs(vp8_prob p [VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES - 1]) {
|
||||
void vp9_kf_default_bmode_probs(vp8_prob p [VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES - 1]) {
|
||||
unsigned int branch_ct [VP8_BINTRAMODES - 1] [2];
|
||||
|
||||
int i = 0;
|
||||
@ -364,30 +364,30 @@ const INTERPOLATIONFILTERTYPE vp8_switchable_interp[VP8_SWITCHABLE_FILTERS] = {
|
||||
const int vp8_switchable_interp_map[SWITCHABLE+1] = {-1, -1, 0, 1, -1}; //8, 8s
|
||||
#endif
|
||||
|
||||
void vp8_entropy_mode_init() {
|
||||
vp8_tokens_from_tree(vp8_bmode_encodings, vp8_bmode_tree);
|
||||
vp8_tokens_from_tree(vp8_ymode_encodings, vp8_ymode_tree);
|
||||
vp8_tokens_from_tree(vp8_kf_ymode_encodings, vp8_kf_ymode_tree);
|
||||
void vp9_entropy_mode_init() {
|
||||
vp9_tokens_from_tree(vp8_bmode_encodings, vp8_bmode_tree);
|
||||
vp9_tokens_from_tree(vp8_ymode_encodings, vp8_ymode_tree);
|
||||
vp9_tokens_from_tree(vp8_kf_ymode_encodings, vp8_kf_ymode_tree);
|
||||
#if CONFIG_SUPERBLOCKS
|
||||
vp8_tokens_from_tree(vp8_sb_kf_ymode_encodings, vp8_sb_ymode_tree);
|
||||
vp9_tokens_from_tree(vp8_sb_kf_ymode_encodings, vp8_sb_ymode_tree);
|
||||
#endif
|
||||
vp8_tokens_from_tree(vp8_uv_mode_encodings, vp8_uv_mode_tree);
|
||||
vp8_tokens_from_tree(vp8_i8x8_mode_encodings, vp8_i8x8_mode_tree);
|
||||
vp8_tokens_from_tree(vp8_mbsplit_encodings, vp8_mbsplit_tree);
|
||||
vp8_tokens_from_tree(vp8_switchable_interp_encodings,
|
||||
vp9_tokens_from_tree(vp8_uv_mode_encodings, vp8_uv_mode_tree);
|
||||
vp9_tokens_from_tree(vp8_i8x8_mode_encodings, vp8_i8x8_mode_tree);
|
||||
vp9_tokens_from_tree(vp8_mbsplit_encodings, vp8_mbsplit_tree);
|
||||
vp9_tokens_from_tree(vp8_switchable_interp_encodings,
|
||||
vp8_switchable_interp_tree);
|
||||
|
||||
vp8_tokens_from_tree_offset(vp8_mv_ref_encoding_array,
|
||||
vp9_tokens_from_tree_offset(vp8_mv_ref_encoding_array,
|
||||
vp8_mv_ref_tree, NEARESTMV);
|
||||
#if CONFIG_SUPERBLOCKS
|
||||
vp8_tokens_from_tree_offset(vp8_sb_mv_ref_encoding_array,
|
||||
vp9_tokens_from_tree_offset(vp8_sb_mv_ref_encoding_array,
|
||||
vp8_sb_mv_ref_tree, NEARESTMV);
|
||||
#endif
|
||||
vp8_tokens_from_tree_offset(vp8_sub_mv_ref_encoding_array,
|
||||
vp9_tokens_from_tree_offset(vp8_sub_mv_ref_encoding_array,
|
||||
vp8_sub_mv_ref_tree, LEFT4X4);
|
||||
}
|
||||
|
||||
void vp8_init_mode_contexts(VP8_COMMON *pc) {
|
||||
void vp9_init_mode_contexts(VP8_COMMON *pc) {
|
||||
vpx_memset(pc->fc.mv_ref_ct, 0, sizeof(pc->fc.mv_ref_ct));
|
||||
vpx_memset(pc->fc.mv_ref_ct_a, 0, sizeof(pc->fc.mv_ref_ct_a));
|
||||
|
||||
@ -400,7 +400,7 @@ void vp8_init_mode_contexts(VP8_COMMON *pc) {
|
||||
|
||||
}
|
||||
|
||||
void vp8_accum_mv_refs(VP8_COMMON *pc,
|
||||
void vp9_accum_mv_refs(VP8_COMMON *pc,
|
||||
MB_PREDICTION_MODE m,
|
||||
const int ct[4]) {
|
||||
int (*mv_ref_ct)[4][2];
|
||||
@ -434,7 +434,7 @@ void vp8_accum_mv_refs(VP8_COMMON *pc,
|
||||
|
||||
#define MVREF_COUNT_SAT 20
|
||||
#define MVREF_MAX_UPDATE_FACTOR 144
|
||||
void vp8_update_mode_context(VP8_COMMON *pc) {
|
||||
void vp9_update_mode_context(VP8_COMMON *pc) {
|
||||
int i, j;
|
||||
int (*mv_ref_ct)[4][2];
|
||||
int (*mode_context)[4];
|
||||
@ -489,7 +489,7 @@ void print_mode_contexts(VP8_COMMON *pc) {
|
||||
// #define MODE_COUNT_TESTING
|
||||
#define MODE_COUNT_SAT 20
|
||||
#define MODE_MAX_UPDATE_FACTOR 144
|
||||
void vp8_adapt_mode_probs(VP8_COMMON *cm) {
|
||||
void vp9_adapt_mode_probs(VP8_COMMON *cm) {
|
||||
int i, t, count, factor;
|
||||
unsigned int branch_ct[32][2];
|
||||
vp8_prob ymode_probs[VP8_YMODES - 1];
|
||||
@ -532,7 +532,7 @@ void vp8_adapt_mode_probs(VP8_COMMON *cm) {
|
||||
for (t = 0; t < VP8_NUMMBSPLITS; ++t) printf("%d, ", cm->fc.mbsplit_counts[t]);
|
||||
printf("};\n");
|
||||
#endif
|
||||
vp8_tree_probs_from_distribution(
|
||||
vp9_tree_probs_from_distribution(
|
||||
VP8_YMODES, vp8_ymode_encodings, vp8_ymode_tree,
|
||||
ymode_probs, branch_ct, cm->fc.ymode_counts,
|
||||
256, 1);
|
||||
@ -548,7 +548,7 @@ void vp8_adapt_mode_probs(VP8_COMMON *cm) {
|
||||
else cm->fc.ymode_prob[t] = prob;
|
||||
}
|
||||
for (i = 0; i < VP8_YMODES; ++i) {
|
||||
vp8_tree_probs_from_distribution(
|
||||
vp9_tree_probs_from_distribution(
|
||||
VP8_UV_MODES, vp8_uv_mode_encodings, vp8_uv_mode_tree,
|
||||
uvmode_probs, branch_ct, cm->fc.uv_mode_counts[i],
|
||||
256, 1);
|
||||
@ -564,7 +564,7 @@ void vp8_adapt_mode_probs(VP8_COMMON *cm) {
|
||||
else cm->fc.uv_mode_prob[i][t] = prob;
|
||||
}
|
||||
}
|
||||
vp8_tree_probs_from_distribution(
|
||||
vp9_tree_probs_from_distribution(
|
||||
VP8_BINTRAMODES, vp8_bmode_encodings, vp8_bmode_tree,
|
||||
bmode_probs, branch_ct, cm->fc.bmode_counts,
|
||||
256, 1);
|
||||
@ -579,7 +579,7 @@ void vp8_adapt_mode_probs(VP8_COMMON *cm) {
|
||||
else if (prob > 255) cm->fc.bmode_prob[t] = 255;
|
||||
else cm->fc.bmode_prob[t] = prob;
|
||||
}
|
||||
vp8_tree_probs_from_distribution(
|
||||
vp9_tree_probs_from_distribution(
|
||||
VP8_I8X8_MODES, vp8_i8x8_mode_encodings, vp8_i8x8_mode_tree,
|
||||
i8x8_mode_probs, branch_ct, cm->fc.i8x8_mode_counts,
|
||||
256, 1);
|
||||
@ -595,7 +595,7 @@ void vp8_adapt_mode_probs(VP8_COMMON *cm) {
|
||||
else cm->fc.i8x8_mode_prob[t] = prob;
|
||||
}
|
||||
for (i = 0; i < SUBMVREF_COUNT; ++i) {
|
||||
vp8_tree_probs_from_distribution(
|
||||
vp9_tree_probs_from_distribution(
|
||||
VP8_SUBMVREFS, vp8_sub_mv_ref_encoding_array, vp8_sub_mv_ref_tree,
|
||||
sub_mv_ref_probs, branch_ct, cm->fc.sub_mv_ref_counts[i],
|
||||
256, 1);
|
||||
@ -611,7 +611,7 @@ void vp8_adapt_mode_probs(VP8_COMMON *cm) {
|
||||
else cm->fc.sub_mv_ref_prob[i][t] = prob;
|
||||
}
|
||||
}
|
||||
vp8_tree_probs_from_distribution(
|
||||
vp9_tree_probs_from_distribution(
|
||||
VP8_NUMMBSPLITS, vp8_mbsplit_encodings, vp8_mbsplit_tree,
|
||||
mbsplit_probs, branch_ct, cm->fc.mbsplit_counts,
|
||||
256, 1);
|
||||
|
@ -26,7 +26,7 @@ extern const int vp8_mbsplit_count [VP8_NUMMBSPLITS]; /* # of subsets */
|
||||
|
||||
extern const vp8_prob vp8_mbsplit_probs [VP8_NUMMBSPLITS - 1];
|
||||
|
||||
extern int vp8_mv_cont(const int_mv *l, const int_mv *a);
|
||||
extern int vp9_mv_cont(const int_mv *l, const int_mv *a);
|
||||
|
||||
extern const vp8_prob vp8_sub_mv_ref_prob [VP8_SUBMVREFS - 1];
|
||||
extern const vp8_prob vp8_sub_mv_ref_prob2 [SUBMVREF_COUNT][VP8_SUBMVREFS - 1];
|
||||
@ -61,20 +61,20 @@ extern struct vp8_token_struct vp8_mv_ref_encoding_array [VP8_MVREFS];
|
||||
extern struct vp8_token_struct vp8_sb_mv_ref_encoding_array [VP8_MVREFS];
|
||||
extern struct vp8_token_struct vp8_sub_mv_ref_encoding_array [VP8_SUBMVREFS];
|
||||
|
||||
void vp8_entropy_mode_init(void);
|
||||
void vp9_entropy_mode_init(void);
|
||||
|
||||
struct VP8Common;
|
||||
void vp8_init_mbmode_probs(struct VP8Common *x);
|
||||
extern void vp8_init_mode_contexts(struct VP8Common *pc);
|
||||
extern void vp8_update_mode_context(struct VP8Common *pc);;
|
||||
extern void vp8_accum_mv_refs(struct VP8Common *pc,
|
||||
void vp9_init_mbmode_probs(struct VP8Common *x);
|
||||
extern void vp9_init_mode_contexts(struct VP8Common *pc);
|
||||
extern void vp9_update_mode_context(struct VP8Common *pc);;
|
||||
extern void vp9_accum_mv_refs(struct VP8Common *pc,
|
||||
MB_PREDICTION_MODE m,
|
||||
const int ct[4]);
|
||||
|
||||
void vp8_default_bmode_probs(vp8_prob dest [VP8_BINTRAMODES - 1]);
|
||||
void vp8_kf_default_bmode_probs(vp8_prob dest [VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES - 1]);
|
||||
void vp9_default_bmode_probs(vp8_prob dest [VP8_BINTRAMODES - 1]);
|
||||
void vp9_kf_default_bmode_probs(vp8_prob dest [VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES - 1]);
|
||||
|
||||
void vp8_adapt_mode_probs(struct VP8Common *);
|
||||
void vp9_adapt_mode_probs(struct VP8Common *);
|
||||
|
||||
#define VP8_SWITCHABLE_FILTERS 2 /* number of switchable filters */
|
||||
extern const INTERPOLATIONFILTERTYPE vp8_switchable_interp
|
||||
|
@ -84,7 +84,7 @@ const nmv_context vp8_default_nmv_context = {
|
||||
},
|
||||
};
|
||||
|
||||
MV_JOINT_TYPE vp8_get_mv_joint(MV mv) {
|
||||
MV_JOINT_TYPE vp9_get_mv_joint(MV mv) {
|
||||
if (mv.row == 0 && mv.col == 0) return MV_JOINT_ZERO;
|
||||
else if (mv.row == 0 && mv.col != 0) return MV_JOINT_HNZVZ;
|
||||
else if (mv.row != 0 && mv.col == 0) return MV_JOINT_HZVNZ;
|
||||
@ -93,7 +93,7 @@ MV_JOINT_TYPE vp8_get_mv_joint(MV mv) {
|
||||
|
||||
#define mv_class_base(c) ((c) ? (CLASS0_SIZE << (c + 2)) : 0)
|
||||
|
||||
MV_CLASS_TYPE vp8_get_mv_class(int z, int *offset) {
|
||||
MV_CLASS_TYPE vp9_get_mv_class(int z, int *offset) {
|
||||
MV_CLASS_TYPE c;
|
||||
if (z < CLASS0_SIZE * 8) c = MV_CLASS_0;
|
||||
else if (z < CLASS0_SIZE * 16) c = MV_CLASS_1;
|
||||
@ -109,7 +109,7 @@ MV_CLASS_TYPE vp8_get_mv_class(int z, int *offset) {
|
||||
return c;
|
||||
}
|
||||
|
||||
int vp8_use_nmv_hp(const MV *ref) {
|
||||
int vp9_use_nmv_hp(const MV *ref) {
|
||||
if ((abs(ref->row) >> 3) < COMPANDED_MVREF_THRESH &&
|
||||
(abs(ref->col) >> 3) < COMPANDED_MVREF_THRESH)
|
||||
return 1;
|
||||
@ -117,7 +117,7 @@ int vp8_use_nmv_hp(const MV *ref) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vp8_get_mv_mag(MV_CLASS_TYPE c, int offset) {
|
||||
int vp9_get_mv_mag(MV_CLASS_TYPE c, int offset) {
|
||||
return mv_class_base(c) + offset;
|
||||
}
|
||||
|
||||
@ -139,7 +139,7 @@ static void increment_nmv_component(int v,
|
||||
mvcomp->sign[s] += incr;
|
||||
z = (s ? -v : v) - 1; /* magnitude - 1 */
|
||||
|
||||
c = vp8_get_mv_class(z, &o);
|
||||
c = vp9_get_mv_class(z, &o);
|
||||
mvcomp->classes[c] += incr;
|
||||
|
||||
d = (o >> 3); /* int mv data */
|
||||
@ -198,11 +198,11 @@ static void counts_to_context(nmv_component_counts *mvcomp, int usehp) {
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_increment_nmv(const MV *mv, const MV *ref, nmv_context_counts *mvctx,
|
||||
void vp9_increment_nmv(const MV *mv, const MV *ref, nmv_context_counts *mvctx,
|
||||
int usehp) {
|
||||
MV_JOINT_TYPE j = vp8_get_mv_joint(*mv);
|
||||
MV_JOINT_TYPE j = vp9_get_mv_joint(*mv);
|
||||
mvctx->joints[j]++;
|
||||
usehp = usehp && vp8_use_nmv_hp(ref);
|
||||
usehp = usehp && vp9_use_nmv_hp(ref);
|
||||
if (j == MV_JOINT_HZVNZ || j == MV_JOINT_HNZVNZ) {
|
||||
increment_nmv_component_count(mv->row, &mvctx->comps[0], 1, usehp);
|
||||
}
|
||||
@ -226,7 +226,7 @@ static void adapt_prob(vp8_prob *dest, vp8_prob prep, vp8_prob newp,
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_counts_to_nmv_context(
|
||||
void vp9_counts_to_nmv_context(
|
||||
nmv_context_counts *NMVcount,
|
||||
nmv_context *prob,
|
||||
int usehp,
|
||||
@ -242,7 +242,7 @@ void vp8_counts_to_nmv_context(
|
||||
int i, j, k;
|
||||
counts_to_context(&NMVcount->comps[0], usehp);
|
||||
counts_to_context(&NMVcount->comps[1], usehp);
|
||||
vp8_tree_probs_from_distribution(MV_JOINTS,
|
||||
vp9_tree_probs_from_distribution(MV_JOINTS,
|
||||
vp8_mv_joint_encodings,
|
||||
vp8_mv_joint_tree,
|
||||
prob->joints,
|
||||
@ -251,17 +251,17 @@ void vp8_counts_to_nmv_context(
|
||||
256, 1);
|
||||
for (i = 0; i < 2; ++i) {
|
||||
prob->comps[i].sign =
|
||||
vp8_bin_prob_from_distribution(NMVcount->comps[i].sign);
|
||||
vp9_bin_prob_from_distribution(NMVcount->comps[i].sign);
|
||||
branch_ct_sign[i][0] = NMVcount->comps[i].sign[0];
|
||||
branch_ct_sign[i][1] = NMVcount->comps[i].sign[1];
|
||||
vp8_tree_probs_from_distribution(MV_CLASSES,
|
||||
vp9_tree_probs_from_distribution(MV_CLASSES,
|
||||
vp8_mv_class_encodings,
|
||||
vp8_mv_class_tree,
|
||||
prob->comps[i].classes,
|
||||
branch_ct_classes[i],
|
||||
NMVcount->comps[i].classes,
|
||||
256, 1);
|
||||
vp8_tree_probs_from_distribution(CLASS0_SIZE,
|
||||
vp9_tree_probs_from_distribution(CLASS0_SIZE,
|
||||
vp8_mv_class0_encodings,
|
||||
vp8_mv_class0_tree,
|
||||
prob->comps[i].class0,
|
||||
@ -269,7 +269,7 @@ void vp8_counts_to_nmv_context(
|
||||
NMVcount->comps[i].class0,
|
||||
256, 1);
|
||||
for (j = 0; j < MV_OFFSET_BITS; ++j) {
|
||||
prob->comps[i].bits[j] = vp8_bin_prob_from_distribution(
|
||||
prob->comps[i].bits[j] = vp9_bin_prob_from_distribution(
|
||||
NMVcount->comps[i].bits[j]);
|
||||
branch_ct_bits[i][j][0] = NMVcount->comps[i].bits[j][0];
|
||||
branch_ct_bits[i][j][1] = NMVcount->comps[i].bits[j][1];
|
||||
@ -277,7 +277,7 @@ void vp8_counts_to_nmv_context(
|
||||
}
|
||||
for (i = 0; i < 2; ++i) {
|
||||
for (k = 0; k < CLASS0_SIZE; ++k) {
|
||||
vp8_tree_probs_from_distribution(4,
|
||||
vp9_tree_probs_from_distribution(4,
|
||||
vp8_mv_fp_encodings,
|
||||
vp8_mv_fp_tree,
|
||||
prob->comps[i].class0_fp[k],
|
||||
@ -285,7 +285,7 @@ void vp8_counts_to_nmv_context(
|
||||
NMVcount->comps[i].class0_fp[k],
|
||||
256, 1);
|
||||
}
|
||||
vp8_tree_probs_from_distribution(4,
|
||||
vp9_tree_probs_from_distribution(4,
|
||||
vp8_mv_fp_encodings,
|
||||
vp8_mv_fp_tree,
|
||||
prob->comps[i].fp,
|
||||
@ -295,20 +295,20 @@ void vp8_counts_to_nmv_context(
|
||||
}
|
||||
if (usehp) {
|
||||
for (i = 0; i < 2; ++i) {
|
||||
prob->comps[i].class0_hp = vp8_bin_prob_from_distribution(
|
||||
prob->comps[i].class0_hp = vp9_bin_prob_from_distribution(
|
||||
NMVcount->comps[i].class0_hp);
|
||||
branch_ct_class0_hp[i][0] = NMVcount->comps[i].class0_hp[0];
|
||||
branch_ct_class0_hp[i][1] = NMVcount->comps[i].class0_hp[1];
|
||||
|
||||
prob->comps[i].hp =
|
||||
vp8_bin_prob_from_distribution(NMVcount->comps[i].hp);
|
||||
vp9_bin_prob_from_distribution(NMVcount->comps[i].hp);
|
||||
branch_ct_hp[i][0] = NMVcount->comps[i].hp[0];
|
||||
branch_ct_hp[i][1] = NMVcount->comps[i].hp[1];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_adapt_nmv_probs(VP8_COMMON *cm, int usehp) {
|
||||
void vp9_adapt_nmv_probs(VP8_COMMON *cm, int usehp) {
|
||||
int i, j, k;
|
||||
nmv_context prob;
|
||||
unsigned int branch_ct_joint[MV_JOINTS - 1][2];
|
||||
@ -380,7 +380,7 @@ void vp8_adapt_nmv_probs(VP8_COMMON *cm, int usehp) {
|
||||
smooth_counts(&cm->fc.NMVcount.comps[0]);
|
||||
smooth_counts(&cm->fc.NMVcount.comps[1]);
|
||||
#endif
|
||||
vp8_counts_to_nmv_context(&cm->fc.NMVcount,
|
||||
vp9_counts_to_nmv_context(&cm->fc.NMVcount,
|
||||
&prob,
|
||||
usehp,
|
||||
branch_ct_joint,
|
||||
@ -453,13 +453,13 @@ void vp8_adapt_nmv_probs(VP8_COMMON *cm, int usehp) {
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_entropy_mv_init() {
|
||||
vp8_tokens_from_tree(vp8_mv_joint_encodings, vp8_mv_joint_tree);
|
||||
vp8_tokens_from_tree(vp8_mv_class_encodings, vp8_mv_class_tree);
|
||||
vp8_tokens_from_tree(vp8_mv_class0_encodings, vp8_mv_class0_tree);
|
||||
vp8_tokens_from_tree(vp8_mv_fp_encodings, vp8_mv_fp_tree);
|
||||
void vp9_entropy_mv_init() {
|
||||
vp9_tokens_from_tree(vp8_mv_joint_encodings, vp8_mv_joint_tree);
|
||||
vp9_tokens_from_tree(vp8_mv_class_encodings, vp8_mv_class_tree);
|
||||
vp9_tokens_from_tree(vp8_mv_class0_encodings, vp8_mv_class0_tree);
|
||||
vp9_tokens_from_tree(vp8_mv_fp_encodings, vp8_mv_fp_tree);
|
||||
}
|
||||
|
||||
void vp8_init_mv_probs(VP8_COMMON *cm) {
|
||||
void vp9_init_mv_probs(VP8_COMMON *cm) {
|
||||
vpx_memcpy(&cm->fc.nmvc, &vp8_default_nmv_context, sizeof(nmv_context));
|
||||
}
|
||||
|
@ -18,13 +18,13 @@
|
||||
|
||||
struct VP8Common;
|
||||
|
||||
void vp8_entropy_mv_init();
|
||||
void vp8_init_mv_probs(struct VP8Common *cm);
|
||||
void vp9_entropy_mv_init();
|
||||
void vp9_init_mv_probs(struct VP8Common *cm);
|
||||
void vp8_adapt_mv_probs(struct VP8Common *cm);
|
||||
|
||||
void vp8_adapt_nmv_probs(struct VP8Common *cm, int usehp);
|
||||
void vp9_adapt_nmv_probs(struct VP8Common *cm, int usehp);
|
||||
void vp8_lower_mv_precision(MV *mv);
|
||||
int vp8_use_nmv_hp(const MV *ref);
|
||||
int vp9_use_nmv_hp(const MV *ref);
|
||||
|
||||
#define VP8_NMV_UPDATE_PROB 255
|
||||
//#define MV_GROUP_UPDATE
|
||||
@ -89,9 +89,9 @@ typedef struct {
|
||||
nmv_component comps[2];
|
||||
} nmv_context;
|
||||
|
||||
MV_JOINT_TYPE vp8_get_mv_joint(MV mv);
|
||||
MV_CLASS_TYPE vp8_get_mv_class(int z, int *offset);
|
||||
int vp8_get_mv_mag(MV_CLASS_TYPE c, int offset);
|
||||
MV_JOINT_TYPE vp9_get_mv_joint(MV mv);
|
||||
MV_CLASS_TYPE vp9_get_mv_class(int z, int *offset);
|
||||
int vp9_get_mv_mag(MV_CLASS_TYPE c, int offset);
|
||||
|
||||
|
||||
typedef struct {
|
||||
@ -111,10 +111,10 @@ typedef struct {
|
||||
nmv_component_counts comps[2];
|
||||
} nmv_context_counts;
|
||||
|
||||
void vp8_increment_nmv(const MV *mv, const MV *ref, nmv_context_counts *mvctx,
|
||||
void vp9_increment_nmv(const MV *mv, const MV *ref, nmv_context_counts *mvctx,
|
||||
int usehp);
|
||||
extern const nmv_context vp8_default_nmv_context;
|
||||
void vp8_counts_to_nmv_context(
|
||||
void vp9_counts_to_nmv_context(
|
||||
nmv_context_counts *NMVcount,
|
||||
nmv_context *prob,
|
||||
int usehp,
|
||||
|
@ -68,7 +68,7 @@ static void copy_and_extend_plane
|
||||
}
|
||||
|
||||
|
||||
void vp8_copy_and_extend_frame(YV12_BUFFER_CONFIG *src,
|
||||
void vp9_copy_and_extend_frame(YV12_BUFFER_CONFIG *src,
|
||||
YV12_BUFFER_CONFIG *dst) {
|
||||
int et = dst->border;
|
||||
int el = dst->border;
|
||||
@ -97,7 +97,7 @@ void vp8_copy_and_extend_frame(YV12_BUFFER_CONFIG *src,
|
||||
}
|
||||
|
||||
|
||||
void vp8_copy_and_extend_frame_with_rect(YV12_BUFFER_CONFIG *src,
|
||||
void vp9_copy_and_extend_frame_with_rect(YV12_BUFFER_CONFIG *src,
|
||||
YV12_BUFFER_CONFIG *dst,
|
||||
int srcy, int srcx,
|
||||
int srch, int srcw) {
|
||||
@ -151,7 +151,7 @@ void vp8_copy_and_extend_frame_with_rect(YV12_BUFFER_CONFIG *src,
|
||||
|
||||
|
||||
/* note the extension is only for the last row, for intra prediction purpose */
|
||||
void vp8_extend_mb_row(YV12_BUFFER_CONFIG *ybf, unsigned char *YPtr, unsigned char *UPtr, unsigned char *VPtr) {
|
||||
void vp9_extend_mb_row(YV12_BUFFER_CONFIG *ybf, unsigned char *YPtr, unsigned char *UPtr, unsigned char *VPtr) {
|
||||
int i;
|
||||
|
||||
YPtr += ybf->y_stride * 14;
|
||||
|
@ -14,10 +14,10 @@
|
||||
|
||||
#include "vpx_scale/yv12config.h"
|
||||
|
||||
void vp8_extend_mb_row(YV12_BUFFER_CONFIG *ybf, unsigned char *YPtr, unsigned char *UPtr, unsigned char *VPtr);
|
||||
void vp8_copy_and_extend_frame(YV12_BUFFER_CONFIG *src,
|
||||
void vp9_extend_mb_row(YV12_BUFFER_CONFIG *ybf, unsigned char *YPtr, unsigned char *UPtr, unsigned char *VPtr);
|
||||
void vp9_copy_and_extend_frame(YV12_BUFFER_CONFIG *src,
|
||||
YV12_BUFFER_CONFIG *dst);
|
||||
void vp8_copy_and_extend_frame_with_rect(YV12_BUFFER_CONFIG *src,
|
||||
void vp9_copy_and_extend_frame_with_rect(YV12_BUFFER_CONFIG *src,
|
||||
YV12_BUFFER_CONFIG *dst,
|
||||
int srcy, int srcx,
|
||||
int srch, int srcw);
|
||||
|
@ -296,7 +296,7 @@ static void filter_block2d_6
|
||||
}
|
||||
|
||||
|
||||
void vp8_sixtap_predict_c
|
||||
void vp9_sixtap_predict_c
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -342,7 +342,7 @@ static void filter_block2d_avg_6
|
||||
output_pitch, 4, 4, 4, 4, VFilter);
|
||||
}
|
||||
|
||||
void vp8_sixtap_predict_avg_c
|
||||
void vp9_sixtap_predict_avg_c
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -361,7 +361,7 @@ void vp8_sixtap_predict_avg_c
|
||||
dst_pitch, HFilter, VFilter);
|
||||
}
|
||||
|
||||
void vp8_sixtap_predict8x8_c
|
||||
void vp9_sixtap_predict8x8_c
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -388,7 +388,7 @@ void vp8_sixtap_predict8x8_c
|
||||
|
||||
}
|
||||
|
||||
void vp8_sixtap_predict_avg8x8_c
|
||||
void vp9_sixtap_predict_avg8x8_c
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -413,7 +413,7 @@ void vp8_sixtap_predict_avg8x8_c
|
||||
filter_block2d_second_pass_avg_6(FData + 8 * (Interp_Extend - 1), dst_ptr, dst_pitch, 8, 8, 8, 8, VFilter);
|
||||
}
|
||||
|
||||
void vp8_sixtap_predict8x4_c
|
||||
void vp9_sixtap_predict8x4_c
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -440,7 +440,7 @@ void vp8_sixtap_predict8x4_c
|
||||
|
||||
}
|
||||
|
||||
void vp8_sixtap_predict16x16_c
|
||||
void vp9_sixtap_predict16x16_c
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -467,7 +467,7 @@ void vp8_sixtap_predict16x16_c
|
||||
|
||||
}
|
||||
|
||||
void vp8_sixtap_predict_avg16x16_c
|
||||
void vp9_sixtap_predict_avg16x16_c
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -607,7 +607,7 @@ static void vp8_filter_block2d_8_c
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_filter_block2d_4x4_8_c
|
||||
void vp9_filter_block2d_4x4_8_c
|
||||
(
|
||||
const unsigned char *src_ptr, const unsigned int src_stride,
|
||||
const short *HFilter_aligned16, const short *VFilter_aligned16,
|
||||
@ -618,7 +618,7 @@ void vp8_filter_block2d_4x4_8_c
|
||||
VPX_FILTER_4x4, dst_ptr, dst_stride);
|
||||
}
|
||||
|
||||
void vp8_filter_block2d_8x4_8_c
|
||||
void vp9_filter_block2d_8x4_8_c
|
||||
(
|
||||
const unsigned char *src_ptr, const unsigned int src_stride,
|
||||
const short *HFilter_aligned16, const short *VFilter_aligned16,
|
||||
@ -629,7 +629,7 @@ void vp8_filter_block2d_8x4_8_c
|
||||
VPX_FILTER_8x4, dst_ptr, dst_stride);
|
||||
}
|
||||
|
||||
void vp8_filter_block2d_8x8_8_c
|
||||
void vp9_filter_block2d_8x8_8_c
|
||||
(
|
||||
const unsigned char *src_ptr, const unsigned int src_stride,
|
||||
const short *HFilter_aligned16, const short *VFilter_aligned16,
|
||||
@ -640,7 +640,7 @@ void vp8_filter_block2d_8x8_8_c
|
||||
VPX_FILTER_8x8, dst_ptr, dst_stride);
|
||||
}
|
||||
|
||||
void vp8_filter_block2d_16x16_8_c
|
||||
void vp9_filter_block2d_16x16_8_c
|
||||
(
|
||||
const unsigned char *src_ptr, const unsigned int src_stride,
|
||||
const short *HFilter_aligned16, const short *VFilter_aligned16,
|
||||
@ -673,7 +673,7 @@ static void vp8_block2d_average_c
|
||||
|
||||
#define vp8_block2d_average vp8_block2d_average_c
|
||||
|
||||
void vp8_eighttap_predict_c
|
||||
void vp9_eighttap_predict_c
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -688,12 +688,12 @@ void vp8_eighttap_predict_c
|
||||
HFilter = vp8_sub_pel_filters_8[xoffset];
|
||||
VFilter = vp8_sub_pel_filters_8[yoffset];
|
||||
|
||||
vp8_filter_block2d_4x4_8(src_ptr, src_pixels_per_line,
|
||||
vp9_filter_block2d_4x4_8(src_ptr, src_pixels_per_line,
|
||||
HFilter, VFilter,
|
||||
dst_ptr, dst_pitch);
|
||||
}
|
||||
|
||||
void vp8_eighttap_predict_avg4x4_c
|
||||
void vp9_eighttap_predict_avg4x4_c
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -706,13 +706,13 @@ void vp8_eighttap_predict_avg4x4_c
|
||||
const short *VFilter = vp8_sub_pel_filters_8[yoffset];
|
||||
unsigned char tmp[4 * 4];
|
||||
|
||||
vp8_filter_block2d_4x4_8(src_ptr, src_pixels_per_line,
|
||||
vp9_filter_block2d_4x4_8(src_ptr, src_pixels_per_line,
|
||||
HFilter, VFilter,
|
||||
tmp, 4);
|
||||
vp8_block2d_average(tmp, 4, dst_ptr, dst_pitch, VPX_FILTER_4x4);
|
||||
}
|
||||
|
||||
void vp8_eighttap_predict_sharp_c
|
||||
void vp9_eighttap_predict_sharp_c
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -727,12 +727,12 @@ void vp8_eighttap_predict_sharp_c
|
||||
HFilter = vp8_sub_pel_filters_8s[xoffset];
|
||||
VFilter = vp8_sub_pel_filters_8s[yoffset];
|
||||
|
||||
vp8_filter_block2d_4x4_8(src_ptr, src_pixels_per_line,
|
||||
vp9_filter_block2d_4x4_8(src_ptr, src_pixels_per_line,
|
||||
HFilter, VFilter,
|
||||
dst_ptr, dst_pitch);
|
||||
}
|
||||
|
||||
void vp8_eighttap_predict_avg4x4_sharp_c
|
||||
void vp9_eighttap_predict_avg4x4_sharp_c
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -745,13 +745,13 @@ void vp8_eighttap_predict_avg4x4_sharp_c
|
||||
const short *VFilter = vp8_sub_pel_filters_8s[yoffset];
|
||||
unsigned char tmp[4 * 4];
|
||||
|
||||
vp8_filter_block2d_4x4_8(src_ptr, src_pixels_per_line,
|
||||
vp9_filter_block2d_4x4_8(src_ptr, src_pixels_per_line,
|
||||
HFilter, VFilter,
|
||||
tmp, 4);
|
||||
vp8_block2d_average(tmp, 4, dst_ptr, dst_pitch, VPX_FILTER_4x4);
|
||||
}
|
||||
|
||||
void vp8_eighttap_predict8x8_c
|
||||
void vp9_eighttap_predict8x8_c
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -763,12 +763,12 @@ void vp8_eighttap_predict8x8_c
|
||||
const short *HFilter = vp8_sub_pel_filters_8[xoffset];
|
||||
const short *VFilter = vp8_sub_pel_filters_8[yoffset];
|
||||
|
||||
vp8_filter_block2d_8x8_8(src_ptr, src_pixels_per_line,
|
||||
vp9_filter_block2d_8x8_8(src_ptr, src_pixels_per_line,
|
||||
HFilter, VFilter,
|
||||
dst_ptr, dst_pitch);
|
||||
}
|
||||
|
||||
void vp8_eighttap_predict8x8_sharp_c
|
||||
void vp9_eighttap_predict8x8_sharp_c
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -780,12 +780,12 @@ void vp8_eighttap_predict8x8_sharp_c
|
||||
const short *HFilter = vp8_sub_pel_filters_8s[xoffset];
|
||||
const short *VFilter = vp8_sub_pel_filters_8s[yoffset];
|
||||
|
||||
vp8_filter_block2d_8x8_8(src_ptr, src_pixels_per_line,
|
||||
vp9_filter_block2d_8x8_8(src_ptr, src_pixels_per_line,
|
||||
HFilter, VFilter,
|
||||
dst_ptr, dst_pitch);
|
||||
}
|
||||
|
||||
void vp8_eighttap_predict_avg8x8_c
|
||||
void vp9_eighttap_predict_avg8x8_c
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -798,13 +798,13 @@ void vp8_eighttap_predict_avg8x8_c
|
||||
const short *HFilter = vp8_sub_pel_filters_8[xoffset];
|
||||
const short *VFilter = vp8_sub_pel_filters_8[yoffset];
|
||||
|
||||
vp8_filter_block2d_8x8_8(src_ptr, src_pixels_per_line,
|
||||
vp9_filter_block2d_8x8_8(src_ptr, src_pixels_per_line,
|
||||
HFilter, VFilter,
|
||||
tmp, 8);
|
||||
vp8_block2d_average(tmp, 8, dst_ptr, dst_pitch, VPX_FILTER_8x8);
|
||||
}
|
||||
|
||||
void vp8_eighttap_predict_avg8x8_sharp_c
|
||||
void vp9_eighttap_predict_avg8x8_sharp_c
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -817,13 +817,13 @@ void vp8_eighttap_predict_avg8x8_sharp_c
|
||||
const short *HFilter = vp8_sub_pel_filters_8s[xoffset];
|
||||
const short *VFilter = vp8_sub_pel_filters_8s[yoffset];
|
||||
|
||||
vp8_filter_block2d_8x8_8(src_ptr, src_pixels_per_line,
|
||||
vp9_filter_block2d_8x8_8(src_ptr, src_pixels_per_line,
|
||||
HFilter, VFilter,
|
||||
tmp, 8);
|
||||
vp8_block2d_average(tmp, 8, dst_ptr, dst_pitch, VPX_FILTER_8x8);
|
||||
}
|
||||
|
||||
void vp8_eighttap_predict8x4_c
|
||||
void vp9_eighttap_predict8x4_c
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -836,12 +836,12 @@ void vp8_eighttap_predict8x4_c
|
||||
const short *HFilter = vp8_sub_pel_filters_8[xoffset];
|
||||
const short *VFilter = vp8_sub_pel_filters_8[yoffset];
|
||||
|
||||
vp8_filter_block2d_8x4_8(src_ptr, src_pixels_per_line,
|
||||
vp9_filter_block2d_8x4_8(src_ptr, src_pixels_per_line,
|
||||
HFilter, VFilter,
|
||||
dst_ptr, dst_pitch);
|
||||
}
|
||||
|
||||
void vp8_eighttap_predict8x4_sharp_c
|
||||
void vp9_eighttap_predict8x4_sharp_c
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -853,12 +853,12 @@ void vp8_eighttap_predict8x4_sharp_c
|
||||
const short *HFilter = vp8_sub_pel_filters_8s[xoffset];
|
||||
const short *VFilter = vp8_sub_pel_filters_8s[yoffset];
|
||||
|
||||
vp8_filter_block2d_8x4_8(src_ptr, src_pixels_per_line,
|
||||
vp9_filter_block2d_8x4_8(src_ptr, src_pixels_per_line,
|
||||
HFilter, VFilter,
|
||||
dst_ptr, dst_pitch);
|
||||
}
|
||||
|
||||
void vp8_eighttap_predict16x16_c
|
||||
void vp9_eighttap_predict16x16_c
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -870,12 +870,12 @@ void vp8_eighttap_predict16x16_c
|
||||
const short *HFilter = vp8_sub_pel_filters_8[xoffset];
|
||||
const short *VFilter = vp8_sub_pel_filters_8[yoffset];
|
||||
|
||||
vp8_filter_block2d_16x16_8(src_ptr, src_pixels_per_line,
|
||||
vp9_filter_block2d_16x16_8(src_ptr, src_pixels_per_line,
|
||||
HFilter, VFilter,
|
||||
dst_ptr, dst_pitch);
|
||||
}
|
||||
|
||||
void vp8_eighttap_predict16x16_sharp_c
|
||||
void vp9_eighttap_predict16x16_sharp_c
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -887,12 +887,12 @@ void vp8_eighttap_predict16x16_sharp_c
|
||||
const short *HFilter = vp8_sub_pel_filters_8s[xoffset];
|
||||
const short *VFilter = vp8_sub_pel_filters_8s[yoffset];
|
||||
|
||||
vp8_filter_block2d_16x16_8(src_ptr, src_pixels_per_line,
|
||||
vp9_filter_block2d_16x16_8(src_ptr, src_pixels_per_line,
|
||||
HFilter, VFilter,
|
||||
dst_ptr, dst_pitch);
|
||||
}
|
||||
|
||||
void vp8_eighttap_predict_avg16x16_c
|
||||
void vp9_eighttap_predict_avg16x16_c
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -905,13 +905,13 @@ void vp8_eighttap_predict_avg16x16_c
|
||||
const short *HFilter = vp8_sub_pel_filters_8[xoffset];
|
||||
const short *VFilter = vp8_sub_pel_filters_8[yoffset];
|
||||
|
||||
vp8_filter_block2d_16x16_8(src_ptr, src_pixels_per_line,
|
||||
vp9_filter_block2d_16x16_8(src_ptr, src_pixels_per_line,
|
||||
HFilter, VFilter,
|
||||
tmp, 16);
|
||||
vp8_block2d_average(tmp, 16, dst_ptr, dst_pitch, VPX_FILTER_16x16);
|
||||
}
|
||||
|
||||
void vp8_eighttap_predict_avg16x16_sharp_c
|
||||
void vp9_eighttap_predict_avg16x16_sharp_c
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -924,7 +924,7 @@ void vp8_eighttap_predict_avg16x16_sharp_c
|
||||
const short *HFilter = vp8_sub_pel_filters_8s[xoffset];
|
||||
const short *VFilter = vp8_sub_pel_filters_8s[yoffset];
|
||||
|
||||
vp8_filter_block2d_16x16_8(src_ptr, src_pixels_per_line,
|
||||
vp9_filter_block2d_16x16_8(src_ptr, src_pixels_per_line,
|
||||
HFilter, VFilter,
|
||||
tmp, 16);
|
||||
vp8_block2d_average(tmp, 16, dst_ptr, dst_pitch, VPX_FILTER_16x16);
|
||||
@ -1127,7 +1127,7 @@ static void filter_block2d_bil_avg
|
||||
filter_block2d_bil_second_pass_avg(FData, dst_ptr, dst_pitch, Height, Width, VFilter);
|
||||
}
|
||||
|
||||
void vp8_bilinear_predict4x4_c
|
||||
void vp9_bilinear_predict4x4_c
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -1162,7 +1162,7 @@ void vp8_bilinear_predict4x4_c
|
||||
|
||||
}
|
||||
|
||||
void vp8_bilinear_predict_avg4x4_c
|
||||
void vp9_bilinear_predict_avg4x4_c
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -1181,7 +1181,7 @@ void vp8_bilinear_predict_avg4x4_c
|
||||
dst_pitch, HFilter, VFilter, 4, 4);
|
||||
}
|
||||
|
||||
void vp8_bilinear_predict8x8_c
|
||||
void vp9_bilinear_predict8x8_c
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -1200,7 +1200,7 @@ void vp8_bilinear_predict8x8_c
|
||||
|
||||
}
|
||||
|
||||
void vp8_bilinear_predict_avg8x8_c
|
||||
void vp9_bilinear_predict_avg8x8_c
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -1219,7 +1219,7 @@ void vp8_bilinear_predict_avg8x8_c
|
||||
dst_pitch, HFilter, VFilter, 8, 8);
|
||||
}
|
||||
|
||||
void vp8_bilinear_predict8x4_c
|
||||
void vp9_bilinear_predict8x4_c
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -1238,7 +1238,7 @@ void vp8_bilinear_predict8x4_c
|
||||
|
||||
}
|
||||
|
||||
void vp8_bilinear_predict16x16_c
|
||||
void vp9_bilinear_predict16x16_c
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -1256,7 +1256,7 @@ void vp8_bilinear_predict16x16_c
|
||||
filter_block2d_bil(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter, 16, 16);
|
||||
}
|
||||
|
||||
void vp8_bilinear_predict_avg16x16_c
|
||||
void vp9_bilinear_predict_avg16x16_c
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
|
@ -22,7 +22,7 @@ const unsigned char vp8_mbsplit_offset[4][16] = {
|
||||
|
||||
static void lower_mv_precision(int_mv *mv, int usehp)
|
||||
{
|
||||
if (!usehp || !vp8_use_nmv_hp(&mv->as_mv)) {
|
||||
if (!usehp || !vp9_use_nmv_hp(&mv->as_mv)) {
|
||||
if (mv->as_mv.row & 1)
|
||||
mv->as_mv.row += (mv->as_mv.row > 0 ? -1 : 1);
|
||||
if (mv->as_mv.col & 1)
|
||||
@ -34,7 +34,7 @@ static void lower_mv_precision(int_mv *mv, int usehp)
|
||||
Note that we only consider one 4x4 subblock from each candidate 16x16
|
||||
macroblock. */
|
||||
|
||||
void vp8_find_near_mvs
|
||||
void vp9_find_near_mvs
|
||||
(
|
||||
MACROBLOCKD *xd,
|
||||
const MODE_INFO *here,
|
||||
@ -157,7 +157,7 @@ void vp8_find_near_mvs
|
||||
vp8_clamp_mv2(best_mv, xd);
|
||||
}
|
||||
|
||||
vp8_prob *vp8_mv_ref_probs(VP8_COMMON *pc,
|
||||
vp8_prob *vp9_mv_ref_probs(VP8_COMMON *pc,
|
||||
vp8_prob p[VP8_MVREFS - 1], const int near_mv_ref_ct[4]
|
||||
) {
|
||||
p[0] = pc->fc.vp8_mode_contexts [near_mv_ref_ct[0]] [0];
|
||||
|
@ -85,7 +85,7 @@ static unsigned int vp8_check_mv_bounds(int_mv *mv,
|
||||
(mv->as_mv.row > mb_to_bottom_edge);
|
||||
}
|
||||
|
||||
void vp8_find_near_mvs
|
||||
void vp9_find_near_mvs
|
||||
(
|
||||
MACROBLOCKD *xd,
|
||||
const MODE_INFO *here,
|
||||
@ -96,7 +96,7 @@ void vp8_find_near_mvs
|
||||
int *ref_frame_sign_bias
|
||||
);
|
||||
|
||||
vp8_prob *vp8_mv_ref_probs(VP8_COMMON *pc,
|
||||
vp8_prob *vp9_mv_ref_probs(VP8_COMMON *pc,
|
||||
vp8_prob p[VP8_MVREFS - 1], const int near_mv_ref_ct[4]
|
||||
);
|
||||
|
||||
|
@ -10,7 +10,7 @@
|
||||
|
||||
|
||||
extern void (*vp8_clear_system_state)(void);
|
||||
extern void (*vp8_plane_add_noise)(unsigned char *Start, unsigned int Width, unsigned int Height, int Pitch, int DPitch, int q);
|
||||
extern void (*vp9_plane_add_noise)(unsigned char *Start, unsigned int Width, unsigned int Height, int Pitch, int DPitch, int q);
|
||||
extern void (*de_interlace)
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
|
@ -17,67 +17,67 @@
|
||||
#include "vp8/common/idct.h"
|
||||
#include "vp8/common/onyxc_int.h"
|
||||
|
||||
extern void vp8_arch_x86_common_init(VP8_COMMON *ctx);
|
||||
extern void vp9_arch_x86_common_init(VP8_COMMON *ctx);
|
||||
extern void vp8_arch_arm_common_init(VP8_COMMON *ctx);
|
||||
|
||||
void vp8_machine_specific_config(VP8_COMMON *ctx) {
|
||||
void vp9_machine_specific_config(VP8_COMMON *ctx) {
|
||||
#if CONFIG_RUNTIME_CPU_DETECT
|
||||
VP8_COMMON_RTCD *rtcd = &ctx->rtcd;
|
||||
|
||||
rtcd->idct.idct1 = vp8_short_idct4x4llm_1_c;
|
||||
rtcd->idct.idct16 = vp8_short_idct4x4llm_c;
|
||||
rtcd->idct.idct1_scalar_add = vp8_dc_only_idct_add_c;
|
||||
rtcd->idct.iwalsh1 = vp8_short_inv_walsh4x4_1_c;
|
||||
rtcd->idct.iwalsh16 = vp8_short_inv_walsh4x4_c;
|
||||
rtcd->idct.idct8 = vp8_short_idct8x8_c;
|
||||
rtcd->idct.idct1_scalar_add_8x8 = vp8_dc_only_idct_add_8x8_c;
|
||||
rtcd->idct.ihaar2 = vp8_short_ihaar2x2_c;
|
||||
rtcd->idct.idct16x16 = vp8_short_idct16x16_c;
|
||||
rtcd->idct.idct1 = vp9_short_idct4x4llm_1_c;
|
||||
rtcd->idct.idct16 = vp9_short_idct4x4llm_c;
|
||||
rtcd->idct.idct1_scalar_add = vp9_dc_only_idct_add_c;
|
||||
rtcd->idct.iwalsh1 = vp9_short_inv_walsh4x4_1_c;
|
||||
rtcd->idct.iwalsh16 = vp9_short_inv_walsh4x4_c;
|
||||
rtcd->idct.idct8 = vp9_short_idct8x8_c;
|
||||
rtcd->idct.idct1_scalar_add_8x8 = vp9_dc_only_idct_add_8x8_c;
|
||||
rtcd->idct.ihaar2 = vp9_short_ihaar2x2_c;
|
||||
rtcd->idct.idct16x16 = vp9_short_idct16x16_c;
|
||||
|
||||
rtcd->subpix.eighttap16x16 = vp8_eighttap_predict16x16_c;
|
||||
rtcd->subpix.eighttap8x8 = vp8_eighttap_predict8x8_c;
|
||||
rtcd->subpix.eighttap_avg16x16 = vp8_eighttap_predict_avg16x16_c;
|
||||
rtcd->subpix.eighttap_avg8x8 = vp8_eighttap_predict_avg8x8_c;
|
||||
rtcd->subpix.eighttap_avg4x4 = vp8_eighttap_predict_avg4x4_c;
|
||||
rtcd->subpix.eighttap8x4 = vp8_eighttap_predict8x4_c;
|
||||
rtcd->subpix.eighttap4x4 = vp8_eighttap_predict_c;
|
||||
rtcd->subpix.eighttap16x16_sharp = vp8_eighttap_predict16x16_sharp_c;
|
||||
rtcd->subpix.eighttap8x8_sharp = vp8_eighttap_predict8x8_sharp_c;
|
||||
rtcd->subpix.eighttap_avg16x16_sharp = vp8_eighttap_predict_avg16x16_sharp_c;
|
||||
rtcd->subpix.eighttap_avg8x8_sharp = vp8_eighttap_predict_avg8x8_sharp_c;
|
||||
rtcd->subpix.eighttap_avg4x4_sharp = vp8_eighttap_predict_avg4x4_sharp_c;
|
||||
rtcd->subpix.eighttap8x4_sharp = vp8_eighttap_predict8x4_sharp_c;
|
||||
rtcd->subpix.eighttap4x4_sharp = vp8_eighttap_predict_sharp_c;
|
||||
rtcd->subpix.eighttap16x16 = vp9_eighttap_predict16x16_c;
|
||||
rtcd->subpix.eighttap8x8 = vp9_eighttap_predict8x8_c;
|
||||
rtcd->subpix.eighttap_avg16x16 = vp9_eighttap_predict_avg16x16_c;
|
||||
rtcd->subpix.eighttap_avg8x8 = vp9_eighttap_predict_avg8x8_c;
|
||||
rtcd->subpix.eighttap_avg4x4 = vp9_eighttap_predict_avg4x4_c;
|
||||
rtcd->subpix.eighttap8x4 = vp9_eighttap_predict8x4_c;
|
||||
rtcd->subpix.eighttap4x4 = vp9_eighttap_predict_c;
|
||||
rtcd->subpix.eighttap16x16_sharp = vp9_eighttap_predict16x16_sharp_c;
|
||||
rtcd->subpix.eighttap8x8_sharp = vp9_eighttap_predict8x8_sharp_c;
|
||||
rtcd->subpix.eighttap_avg16x16_sharp = vp9_eighttap_predict_avg16x16_sharp_c;
|
||||
rtcd->subpix.eighttap_avg8x8_sharp = vp9_eighttap_predict_avg8x8_sharp_c;
|
||||
rtcd->subpix.eighttap_avg4x4_sharp = vp9_eighttap_predict_avg4x4_sharp_c;
|
||||
rtcd->subpix.eighttap8x4_sharp = vp9_eighttap_predict8x4_sharp_c;
|
||||
rtcd->subpix.eighttap4x4_sharp = vp9_eighttap_predict_sharp_c;
|
||||
|
||||
rtcd->subpix.sixtap16x16 = vp8_sixtap_predict16x16_c;
|
||||
rtcd->subpix.sixtap8x8 = vp8_sixtap_predict8x8_c;
|
||||
rtcd->subpix.sixtap_avg16x16 = vp8_sixtap_predict_avg16x16_c;
|
||||
rtcd->subpix.sixtap_avg8x8 = vp8_sixtap_predict_avg8x8_c;
|
||||
rtcd->subpix.sixtap8x4 = vp8_sixtap_predict8x4_c;
|
||||
rtcd->subpix.sixtap4x4 = vp8_sixtap_predict_c;
|
||||
rtcd->subpix.sixtap_avg4x4 = vp8_sixtap_predict_avg_c;
|
||||
rtcd->subpix.bilinear16x16 = vp8_bilinear_predict16x16_c;
|
||||
rtcd->subpix.bilinear8x8 = vp8_bilinear_predict8x8_c;
|
||||
rtcd->subpix.bilinear_avg16x16 = vp8_bilinear_predict_avg16x16_c;
|
||||
rtcd->subpix.bilinear_avg8x8 = vp8_bilinear_predict_avg8x8_c;
|
||||
rtcd->subpix.bilinear8x4 = vp8_bilinear_predict8x4_c;
|
||||
rtcd->subpix.bilinear4x4 = vp8_bilinear_predict4x4_c;
|
||||
rtcd->subpix.bilinear_avg4x4 = vp8_bilinear_predict_avg4x4_c;
|
||||
rtcd->subpix.sixtap16x16 = vp9_sixtap_predict16x16_c;
|
||||
rtcd->subpix.sixtap8x8 = vp9_sixtap_predict8x8_c;
|
||||
rtcd->subpix.sixtap_avg16x16 = vp9_sixtap_predict_avg16x16_c;
|
||||
rtcd->subpix.sixtap_avg8x8 = vp9_sixtap_predict_avg8x8_c;
|
||||
rtcd->subpix.sixtap8x4 = vp9_sixtap_predict8x4_c;
|
||||
rtcd->subpix.sixtap4x4 = vp9_sixtap_predict_c;
|
||||
rtcd->subpix.sixtap_avg4x4 = vp9_sixtap_predict_avg_c;
|
||||
rtcd->subpix.bilinear16x16 = vp9_bilinear_predict16x16_c;
|
||||
rtcd->subpix.bilinear8x8 = vp9_bilinear_predict8x8_c;
|
||||
rtcd->subpix.bilinear_avg16x16 = vp9_bilinear_predict_avg16x16_c;
|
||||
rtcd->subpix.bilinear_avg8x8 = vp9_bilinear_predict_avg8x8_c;
|
||||
rtcd->subpix.bilinear8x4 = vp9_bilinear_predict8x4_c;
|
||||
rtcd->subpix.bilinear4x4 = vp9_bilinear_predict4x4_c;
|
||||
rtcd->subpix.bilinear_avg4x4 = vp9_bilinear_predict_avg4x4_c;
|
||||
|
||||
#if CONFIG_POSTPROC || (CONFIG_VP8_ENCODER && CONFIG_INTERNAL_STATS)
|
||||
rtcd->postproc.down = vp8_mbpost_proc_down_c;
|
||||
rtcd->postproc.across = vp8_mbpost_proc_across_ip_c;
|
||||
rtcd->postproc.downacross = vp8_post_proc_down_and_across_c;
|
||||
rtcd->postproc.addnoise = vp8_plane_add_noise_c;
|
||||
rtcd->postproc.blend_mb_inner = vp8_blend_mb_inner_c;
|
||||
rtcd->postproc.blend_mb_outer = vp8_blend_mb_outer_c;
|
||||
rtcd->postproc.blend_b = vp8_blend_b_c;
|
||||
rtcd->postproc.down = vp9_mbpost_proc_down_c;
|
||||
rtcd->postproc.across = vp9_mbpost_proc_across_ip_c;
|
||||
rtcd->postproc.downacross = vp9_post_proc_down_and_across_c;
|
||||
rtcd->postproc.addnoise = vp9_plane_add_noise_c;
|
||||
rtcd->postproc.blend_mb_inner = vp9_blend_mb_inner_c;
|
||||
rtcd->postproc.blend_mb_outer = vp9_blend_mb_outer_c;
|
||||
rtcd->postproc.blend_b = vp9_blend_b_c;
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#if ARCH_X86 || ARCH_X86_64
|
||||
vp8_arch_x86_common_init(ctx);
|
||||
vp9_arch_x86_common_init(ctx);
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -12,6 +12,8 @@
|
||||
#ifndef __INC_IDCT_H
|
||||
#define __INC_IDCT_H
|
||||
|
||||
#include "vp8/common/blockd.h"
|
||||
|
||||
#define prototype_second_order(sym) \
|
||||
void sym(short *input, short *output)
|
||||
|
||||
@ -44,73 +46,72 @@
|
||||
#endif
|
||||
|
||||
#ifndef vp8_idct_idct16x16
|
||||
#define vp8_idct_idct16x16 vp8_short_idct16x16_c
|
||||
#define vp8_idct_idct16x16 vp9_short_idct16x16_c
|
||||
#endif
|
||||
extern prototype_idct(vp8_idct_idct16x16);
|
||||
|
||||
#ifndef vp8_idct_idct8
|
||||
#define vp8_idct_idct8 vp8_short_idct8x8_c
|
||||
#define vp8_idct_idct8 vp9_short_idct8x8_c
|
||||
#endif
|
||||
extern prototype_idct(vp8_idct_idct8);
|
||||
|
||||
#ifndef vp8_idct_idct8_1
|
||||
#define vp8_idct_idct8_1 vp8_short_idct8x8_1_c
|
||||
#define vp8_idct_idct8_1 vp9_short_idct8x8_1_c
|
||||
#endif
|
||||
extern prototype_idct(vp8_idct_idct8_1);
|
||||
|
||||
#ifndef vp8_idct_ihaar2
|
||||
#define vp8_idct_ihaar2 vp8_short_ihaar2x2_c
|
||||
#define vp8_idct_ihaar2 vp9_short_ihaar2x2_c
|
||||
#endif
|
||||
extern prototype_idct(vp8_idct_ihaar2);
|
||||
|
||||
#ifndef vp8_idct_ihaar2_1
|
||||
#define vp8_idct_ihaar2_1 vp8_short_ihaar2x2_1_c
|
||||
#define vp8_idct_ihaar2_1 vp9_short_ihaar2x2_1_c
|
||||
#endif
|
||||
extern prototype_idct(vp8_idct_ihaar2_1);
|
||||
|
||||
#ifndef vp8_idct_idct1_scalar_add_8x8
|
||||
#define vp8_idct_idct1_scalar_add_8x8 vp8_dc_only_idct_add_8x8_c
|
||||
#define vp8_idct_idct1_scalar_add_8x8 vp9_dc_only_idct_add_8x8_c
|
||||
#endif
|
||||
extern prototype_idct_scalar_add(vp8_idct_idct1_scalar_add_8x8);
|
||||
|
||||
|
||||
|
||||
#ifndef vp8_idct_idct1
|
||||
#define vp8_idct_idct1 vp8_short_idct4x4llm_1_c
|
||||
#define vp8_idct_idct1 vp9_short_idct4x4llm_1_c
|
||||
#endif
|
||||
extern prototype_idct(vp8_idct_idct1);
|
||||
|
||||
#ifndef vp8_idct_idct16
|
||||
#define vp8_idct_idct16 vp8_short_idct4x4llm_c
|
||||
#define vp8_idct_idct16 vp9_short_idct4x4llm_c
|
||||
#endif
|
||||
extern prototype_idct(vp8_idct_idct16);
|
||||
|
||||
#ifndef vp8_idct_idct1_scalar_add
|
||||
#define vp8_idct_idct1_scalar_add vp8_dc_only_idct_add_c
|
||||
#define vp8_idct_idct1_scalar_add vp9_dc_only_idct_add_c
|
||||
#endif
|
||||
extern prototype_idct_scalar_add(vp8_idct_idct1_scalar_add);
|
||||
|
||||
|
||||
#ifndef vp8_idct_iwalsh1
|
||||
#define vp8_idct_iwalsh1 vp8_short_inv_walsh4x4_1_c
|
||||
#define vp8_idct_iwalsh1 vp9_short_inv_walsh4x4_1_c
|
||||
#endif
|
||||
extern prototype_second_order(vp8_idct_iwalsh1);
|
||||
|
||||
#ifndef vp8_idct_iwalsh16
|
||||
#define vp8_idct_iwalsh16 vp8_short_inv_walsh4x4_c
|
||||
#define vp8_idct_iwalsh16 vp9_short_inv_walsh4x4_c
|
||||
#endif
|
||||
extern prototype_second_order(vp8_idct_iwalsh16);
|
||||
|
||||
#if CONFIG_LOSSLESS
|
||||
extern prototype_idct(vp8_short_inv_walsh4x4_x8_c);
|
||||
extern prototype_idct(vp8_short_inv_walsh4x4_1_x8_c);
|
||||
extern prototype_idct_scalar_add(vp8_dc_only_inv_walsh_add_c);
|
||||
extern prototype_second_order(vp8_short_inv_walsh4x4_lossless_c);
|
||||
extern prototype_second_order(vp8_short_inv_walsh4x4_1_lossless_c);
|
||||
extern prototype_idct(vp9_short_inv_walsh4x4_x8_c);
|
||||
extern prototype_idct(vp9_short_inv_walsh4x4_1_x8_c);
|
||||
extern prototype_idct_scalar_add(vp9_dc_only_inv_walsh_add_c);
|
||||
extern prototype_second_order(vp9_short_inv_walsh4x4_lossless_c);
|
||||
extern prototype_second_order(vp9_short_inv_walsh4x4_1_lossless_c);
|
||||
#endif
|
||||
|
||||
#include "vp8/common/blockd.h"
|
||||
void vp8_ihtllm_c(const int16_t *input, int16_t *output, int pitch,
|
||||
void vp9_ihtllm_c(const int16_t *input, int16_t *output, int pitch,
|
||||
TX_TYPE tx_type, int tx_dim);
|
||||
|
||||
typedef prototype_idct((*vp8_idct_fn_t));
|
||||
|
@ -283,12 +283,12 @@ const int16_t iadst_i16[256] = {
|
||||
/* For test */
|
||||
#define TEST_INT 1
|
||||
#if TEST_INT
|
||||
#define vp8_ihtllm_int_c vp8_ihtllm_c
|
||||
#define vp9_ihtllm_int_c vp9_ihtllm_c
|
||||
#else
|
||||
#define vp8_ihtllm_float_c vp8_ihtllm_c
|
||||
#define vp9_ihtllm_float_c vp9_ihtllm_c
|
||||
#endif
|
||||
|
||||
void vp8_ihtllm_float_c(const int16_t *input, int16_t *output, int pitch,
|
||||
void vp9_ihtllm_float_c(const int16_t *input, int16_t *output, int pitch,
|
||||
TX_TYPE tx_type, int tx_dim) {
|
||||
vp8_clear_system_state(); // Make it simd safe : __asm emms;
|
||||
{
|
||||
@ -412,7 +412,7 @@ void vp8_ihtllm_float_c(const int16_t *input, int16_t *output, int pitch,
|
||||
#define VERTICAL_ROUNDING ((1 << (VERTICAL_SHIFT - 1)) - 1)
|
||||
#define HORIZONTAL_SHIFT 17 // 15
|
||||
#define HORIZONTAL_ROUNDING ((1 << (HORIZONTAL_SHIFT - 1)) - 1)
|
||||
void vp8_ihtllm_int_c(const int16_t *input, int16_t *output, int pitch,
|
||||
void vp9_ihtllm_int_c(const int16_t *input, int16_t *output, int pitch,
|
||||
TX_TYPE tx_type, int tx_dim) {
|
||||
int i, j, k;
|
||||
int16_t imbuf[256];
|
||||
@ -492,7 +492,7 @@ void vp8_ihtllm_int_c(const int16_t *input, int16_t *output, int pitch,
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_short_idct4x4llm_c(short *input, short *output, int pitch) {
|
||||
void vp9_short_idct4x4llm_c(short *input, short *output, int pitch) {
|
||||
int i;
|
||||
int a1, b1, c1, d1;
|
||||
|
||||
@ -549,7 +549,7 @@ void vp8_short_idct4x4llm_c(short *input, short *output, int pitch) {
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_short_idct4x4llm_1_c(short *input, short *output, int pitch) {
|
||||
void vp9_short_idct4x4llm_1_c(short *input, short *output, int pitch) {
|
||||
int i;
|
||||
int a1;
|
||||
short *op = output;
|
||||
@ -564,7 +564,7 @@ void vp8_short_idct4x4llm_1_c(short *input, short *output, int pitch) {
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_dc_only_idct_add_c(short input_dc, unsigned char *pred_ptr, unsigned char *dst_ptr, int pitch, int stride) {
|
||||
void vp9_dc_only_idct_add_c(short input_dc, unsigned char *pred_ptr, unsigned char *dst_ptr, int pitch, int stride) {
|
||||
int a1 = ((input_dc + 16) >> 5);
|
||||
int r, c;
|
||||
|
||||
@ -587,7 +587,7 @@ void vp8_dc_only_idct_add_c(short input_dc, unsigned char *pred_ptr, unsigned ch
|
||||
|
||||
}
|
||||
|
||||
void vp8_short_inv_walsh4x4_c(short *input, short *output) {
|
||||
void vp9_short_inv_walsh4x4_c(short *input, short *output) {
|
||||
int i;
|
||||
int a1, b1, c1, d1;
|
||||
short *ip = input;
|
||||
@ -624,7 +624,7 @@ void vp8_short_inv_walsh4x4_c(short *input, short *output) {
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_short_inv_walsh4x4_1_c(short *in, short *out) {
|
||||
void vp9_short_inv_walsh4x4_1_c(short *in, short *out) {
|
||||
int i;
|
||||
short tmp[4];
|
||||
short *ip = in;
|
||||
@ -644,7 +644,7 @@ void vp8_short_inv_walsh4x4_1_c(short *in, short *out) {
|
||||
}
|
||||
|
||||
#if CONFIG_LOSSLESS
|
||||
void vp8_short_inv_walsh4x4_lossless_c(short *input, short *output) {
|
||||
void vp9_short_inv_walsh4x4_lossless_c(short *input, short *output) {
|
||||
int i;
|
||||
int a1, b1, c1, d1;
|
||||
short *ip = input;
|
||||
@ -684,7 +684,7 @@ void vp8_short_inv_walsh4x4_lossless_c(short *input, short *output) {
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_short_inv_walsh4x4_1_lossless_c(short *in, short *out) {
|
||||
void vp9_short_inv_walsh4x4_1_lossless_c(short *in, short *out) {
|
||||
int i;
|
||||
short tmp[4];
|
||||
short *ip = in;
|
||||
@ -703,7 +703,7 @@ void vp8_short_inv_walsh4x4_1_lossless_c(short *in, short *out) {
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_short_inv_walsh4x4_x8_c(short *input, short *output, int pitch) {
|
||||
void vp9_short_inv_walsh4x4_x8_c(short *input, short *output, int pitch) {
|
||||
int i;
|
||||
int a1, b1, c1, d1;
|
||||
short *ip = input;
|
||||
@ -744,7 +744,7 @@ void vp8_short_inv_walsh4x4_x8_c(short *input, short *output, int pitch) {
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_short_inv_walsh4x4_1_x8_c(short *in, short *out, int pitch) {
|
||||
void vp9_short_inv_walsh4x4_1_x8_c(short *in, short *out, int pitch) {
|
||||
int i;
|
||||
short tmp[4];
|
||||
short *ip = in;
|
||||
@ -765,10 +765,10 @@ void vp8_short_inv_walsh4x4_1_x8_c(short *in, short *out, int pitch) {
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_dc_only_inv_walsh_add_c(short input_dc, unsigned char *pred_ptr, unsigned char *dst_ptr, int pitch, int stride) {
|
||||
void vp9_dc_only_inv_walsh_add_c(short input_dc, unsigned char *pred_ptr, unsigned char *dst_ptr, int pitch, int stride) {
|
||||
int r, c;
|
||||
short tmp[16];
|
||||
vp8_short_inv_walsh4x4_1_x8_c(&input_dc, tmp, 4 << 1);
|
||||
vp9_short_inv_walsh4x4_1_x8_c(&input_dc, tmp, 4 << 1);
|
||||
|
||||
for (r = 0; r < 4; r++) {
|
||||
for (c = 0; c < 4; c++) {
|
||||
@ -788,7 +788,7 @@ void vp8_dc_only_inv_walsh_add_c(short input_dc, unsigned char *pred_ptr, unsign
|
||||
}
|
||||
#endif
|
||||
|
||||
void vp8_dc_only_idct_add_8x8_c(short input_dc,
|
||||
void vp9_dc_only_idct_add_8x8_c(short input_dc,
|
||||
unsigned char *pred_ptr,
|
||||
unsigned char *dst_ptr,
|
||||
int pitch, int stride) {
|
||||
@ -941,7 +941,7 @@ static void idctcol(int *blk) {
|
||||
}
|
||||
|
||||
#define TX_DIM 8
|
||||
void vp8_short_idct8x8_c(short *coefs, short *block, int pitch) {
|
||||
void vp9_short_idct8x8_c(short *coefs, short *block, int pitch) {
|
||||
int X[TX_DIM * TX_DIM];
|
||||
int i, j;
|
||||
int shortpitch = pitch >> 1;
|
||||
@ -966,7 +966,7 @@ void vp8_short_idct8x8_c(short *coefs, short *block, int pitch) {
|
||||
}
|
||||
|
||||
|
||||
void vp8_short_ihaar2x2_c(short *input, short *output, int pitch) {
|
||||
void vp9_short_ihaar2x2_c(short *input, short *output, int pitch) {
|
||||
int i;
|
||||
short *ip = input; // 0,1, 4, 8
|
||||
short *op = output;
|
||||
@ -983,7 +983,7 @@ void vp8_short_ihaar2x2_c(short *input, short *output, int pitch) {
|
||||
|
||||
#if 0
|
||||
// Keep a really bad float version as reference for now.
|
||||
void vp8_short_idct16x16_c(short *input, short *output, int pitch) {
|
||||
void vp9_short_idct16x16_c(short *input, short *output, int pitch) {
|
||||
|
||||
vp8_clear_system_state(); // Make it simd safe : __asm emms;
|
||||
{
|
||||
@ -1241,7 +1241,7 @@ void reference_16x16_idct_1d(double input[16], double output[16]) {
|
||||
}
|
||||
#endif
|
||||
|
||||
void vp8_short_idct16x16_c(short *input, short *output, int pitch) {
|
||||
void vp9_short_idct16x16_c(short *input, short *output, int pitch) {
|
||||
|
||||
vp8_clear_system_state(); // Make it simd safe : __asm emms;
|
||||
{
|
||||
|
@ -28,7 +28,7 @@ static void recon_dcblock_8x8(MACROBLOCKD *xd) {
|
||||
xd->block[12].dqcoeff[0] = b->diff[8];
|
||||
}
|
||||
|
||||
void vp8_inverse_transform_b_4x4(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
void vp9_inverse_transform_b_4x4(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
BLOCKD *b, int pitch) {
|
||||
if (b->eob <= 1)
|
||||
IDCT_INVOKE(rtcd, idct1)(b->dqcoeff, b->diff, pitch);
|
||||
@ -36,7 +36,7 @@ void vp8_inverse_transform_b_4x4(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
IDCT_INVOKE(rtcd, idct16)(b->dqcoeff, b->diff, pitch);
|
||||
}
|
||||
|
||||
void vp8_inverse_transform_mby_4x4(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
void vp9_inverse_transform_mby_4x4(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
MACROBLOCKD *xd) {
|
||||
int i;
|
||||
BLOCKD *blockd = xd->block;
|
||||
@ -48,27 +48,27 @@ void vp8_inverse_transform_mby_4x4(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
}
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
vp8_inverse_transform_b_4x4(rtcd, &blockd[i], 32);
|
||||
vp9_inverse_transform_b_4x4(rtcd, &blockd[i], 32);
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_inverse_transform_mbuv_4x4(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
void vp9_inverse_transform_mbuv_4x4(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
MACROBLOCKD *xd) {
|
||||
int i;
|
||||
BLOCKD *blockd = xd->block;
|
||||
|
||||
for (i = 16; i < 24; i++) {
|
||||
vp8_inverse_transform_b_4x4(rtcd, &blockd[i], 16);
|
||||
vp9_inverse_transform_b_4x4(rtcd, &blockd[i], 16);
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_inverse_transform_mb_4x4(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
void vp9_inverse_transform_mb_4x4(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
MACROBLOCKD *xd) {
|
||||
vp8_inverse_transform_mby_4x4(rtcd, xd);
|
||||
vp8_inverse_transform_mbuv_4x4(rtcd, xd);
|
||||
vp9_inverse_transform_mby_4x4(rtcd, xd);
|
||||
vp9_inverse_transform_mbuv_4x4(rtcd, xd);
|
||||
}
|
||||
|
||||
void vp8_inverse_transform_b_8x8(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
void vp9_inverse_transform_b_8x8(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
short *input_dqcoeff, short *output_coeff,
|
||||
int pitch) {
|
||||
// int b,i;
|
||||
@ -78,7 +78,7 @@ void vp8_inverse_transform_b_8x8(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
// IDCT_INVOKE(rtcd, idct8_1)(b->dqcoeff, b->diff, pitch);//pitch
|
||||
}
|
||||
|
||||
void vp8_inverse_transform_mby_8x8(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
void vp9_inverse_transform_mby_8x8(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
MACROBLOCKD *xd) {
|
||||
int i;
|
||||
BLOCKD *blockd = xd->block;
|
||||
@ -90,46 +90,46 @@ void vp8_inverse_transform_mby_8x8(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
}
|
||||
|
||||
for (i = 0; i < 9; i += 8) {
|
||||
vp8_inverse_transform_b_8x8(rtcd, &blockd[i].dqcoeff[0],
|
||||
vp9_inverse_transform_b_8x8(rtcd, &blockd[i].dqcoeff[0],
|
||||
&blockd[i].diff[0], 32);
|
||||
}
|
||||
for (i = 2; i < 11; i += 8) {
|
||||
vp8_inverse_transform_b_8x8(rtcd, &blockd[i + 2].dqcoeff[0],
|
||||
vp9_inverse_transform_b_8x8(rtcd, &blockd[i + 2].dqcoeff[0],
|
||||
&blockd[i].diff[0], 32);
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_inverse_transform_mbuv_8x8(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
void vp9_inverse_transform_mbuv_8x8(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
MACROBLOCKD *xd) {
|
||||
int i;
|
||||
BLOCKD *blockd = xd->block;
|
||||
|
||||
for (i = 16; i < 24; i += 4) {
|
||||
vp8_inverse_transform_b_8x8(rtcd, &blockd[i].dqcoeff[0],
|
||||
vp9_inverse_transform_b_8x8(rtcd, &blockd[i].dqcoeff[0],
|
||||
&blockd[i].diff[0], 16);
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_inverse_transform_mb_8x8(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
void vp9_inverse_transform_mb_8x8(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
MACROBLOCKD *xd) {
|
||||
vp8_inverse_transform_mby_8x8(rtcd, xd);
|
||||
vp8_inverse_transform_mbuv_8x8(rtcd, xd);
|
||||
vp9_inverse_transform_mby_8x8(rtcd, xd);
|
||||
vp9_inverse_transform_mbuv_8x8(rtcd, xd);
|
||||
}
|
||||
|
||||
void vp8_inverse_transform_b_16x16(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
void vp9_inverse_transform_b_16x16(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
short *input_dqcoeff,
|
||||
short *output_coeff, int pitch) {
|
||||
IDCT_INVOKE(rtcd, idct16x16)(input_dqcoeff, output_coeff, pitch);
|
||||
}
|
||||
|
||||
void vp8_inverse_transform_mby_16x16(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
void vp9_inverse_transform_mby_16x16(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
MACROBLOCKD *xd) {
|
||||
vp8_inverse_transform_b_16x16(rtcd, &xd->block[0].dqcoeff[0],
|
||||
vp9_inverse_transform_b_16x16(rtcd, &xd->block[0].dqcoeff[0],
|
||||
&xd->block[0].diff[0], 32);
|
||||
}
|
||||
|
||||
void vp8_inverse_transform_mb_16x16(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
void vp9_inverse_transform_mb_16x16(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
MACROBLOCKD *xd) {
|
||||
vp8_inverse_transform_mby_16x16(rtcd, xd);
|
||||
vp8_inverse_transform_mbuv_8x8(rtcd, xd);
|
||||
vp9_inverse_transform_mby_16x16(rtcd, xd);
|
||||
vp9_inverse_transform_mbuv_8x8(rtcd, xd);
|
||||
}
|
||||
|
@ -16,19 +16,19 @@
|
||||
#include "idct.h"
|
||||
#include "blockd.h"
|
||||
|
||||
extern void vp8_inverse_transform_b_4x4(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int pitch);
|
||||
extern void vp8_inverse_transform_mb_4x4(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
|
||||
extern void vp8_inverse_transform_mby_4x4(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
|
||||
extern void vp8_inverse_transform_mbuv_4x4(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
|
||||
extern void vp9_inverse_transform_b_4x4(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int pitch);
|
||||
extern void vp9_inverse_transform_mb_4x4(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
|
||||
extern void vp9_inverse_transform_mby_4x4(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
|
||||
extern void vp9_inverse_transform_mbuv_4x4(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
|
||||
|
||||
extern void vp8_inverse_transform_b_8x8(const vp8_idct_rtcd_vtable_t *rtcd, short *input_dqcoeff, short *output_coeff, int pitch);
|
||||
extern void vp8_inverse_transform_mb_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
|
||||
extern void vp8_inverse_transform_mby_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
|
||||
extern void vp8_inverse_transform_mbuv_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
|
||||
extern void vp9_inverse_transform_b_8x8(const vp8_idct_rtcd_vtable_t *rtcd, short *input_dqcoeff, short *output_coeff, int pitch);
|
||||
extern void vp9_inverse_transform_mb_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
|
||||
extern void vp9_inverse_transform_mby_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
|
||||
extern void vp9_inverse_transform_mbuv_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
|
||||
|
||||
extern void vp8_inverse_transform_b_16x16(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
extern void vp9_inverse_transform_b_16x16(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
short *input_dqcoeff, short *output_coeff,
|
||||
int pitch);
|
||||
extern void vp8_inverse_transform_mb_16x16(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
|
||||
extern void vp8_inverse_transform_mby_16x16(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
|
||||
extern void vp9_inverse_transform_mb_16x16(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
|
||||
extern void vp9_inverse_transform_mby_16x16(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
|
||||
#endif
|
||||
|
@ -55,7 +55,7 @@ static void lf_init_lut(loop_filter_info_n *lfi) {
|
||||
|
||||
}
|
||||
|
||||
void vp8_loop_filter_update_sharpness(loop_filter_info_n *lfi,
|
||||
void vp9_loop_filter_update_sharpness(loop_filter_info_n *lfi,
|
||||
int sharpness_lvl) {
|
||||
int i;
|
||||
|
||||
@ -84,12 +84,12 @@ void vp8_loop_filter_update_sharpness(loop_filter_info_n *lfi,
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_loop_filter_init(VP8_COMMON *cm) {
|
||||
void vp9_loop_filter_init(VP8_COMMON *cm) {
|
||||
loop_filter_info_n *lfi = &cm->lf_info;
|
||||
int i;
|
||||
|
||||
/* init limits for given sharpness*/
|
||||
vp8_loop_filter_update_sharpness(lfi, cm->sharpness_level);
|
||||
vp9_loop_filter_update_sharpness(lfi, cm->sharpness_level);
|
||||
cm->last_sharpness_level = cm->sharpness_level;
|
||||
|
||||
/* init LUT for lvl and hev thr picking */
|
||||
@ -101,7 +101,7 @@ void vp8_loop_filter_init(VP8_COMMON *cm) {
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_loop_filter_frame_init(VP8_COMMON *cm,
|
||||
void vp9_loop_filter_frame_init(VP8_COMMON *cm,
|
||||
MACROBLOCKD *xd,
|
||||
int default_filt_lvl) {
|
||||
int seg, /* segment number */
|
||||
@ -112,7 +112,7 @@ void vp8_loop_filter_frame_init(VP8_COMMON *cm,
|
||||
|
||||
/* update limits if sharpness has changed */
|
||||
if (cm->last_sharpness_level != cm->sharpness_level) {
|
||||
vp8_loop_filter_update_sharpness(lfi, cm->sharpness_level);
|
||||
vp9_loop_filter_update_sharpness(lfi, cm->sharpness_level);
|
||||
cm->last_sharpness_level = cm->sharpness_level;
|
||||
}
|
||||
|
||||
@ -178,7 +178,7 @@ void vp8_loop_filter_frame_init(VP8_COMMON *cm,
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_loop_filter_frame
|
||||
void vp9_loop_filter_frame
|
||||
(
|
||||
VP8_COMMON *cm,
|
||||
MACROBLOCKD *xd
|
||||
@ -200,7 +200,7 @@ void vp8_loop_filter_frame
|
||||
const MODE_INFO *mode_info_context = cm->mi;
|
||||
|
||||
/* Initialize the loop filter for this frame. */
|
||||
vp8_loop_filter_frame_init(cm, xd, cm->filter_level);
|
||||
vp9_loop_filter_frame_init(cm, xd, cm->filter_level);
|
||||
|
||||
/* Set up the buffer pointers */
|
||||
y_ptr = post->y_buffer;
|
||||
@ -236,15 +236,15 @@ void vp8_loop_filter_frame
|
||||
mode_info_context[-1].mbmi.mb_skip_coeff)
|
||||
#endif
|
||||
)
|
||||
vp8_loop_filter_mbv(y_ptr, u_ptr, v_ptr, post->y_stride,
|
||||
vp9_loop_filter_mbv(y_ptr, u_ptr, v_ptr, post->y_stride,
|
||||
post->uv_stride, &lfi);
|
||||
|
||||
if (!skip_lf && tx_type != TX_16X16) {
|
||||
if (tx_type == TX_8X8)
|
||||
vp8_loop_filter_bv8x8(y_ptr, u_ptr, v_ptr, post->y_stride,
|
||||
vp9_loop_filter_bv8x8(y_ptr, u_ptr, v_ptr, post->y_stride,
|
||||
post->uv_stride, &lfi);
|
||||
else
|
||||
vp8_loop_filter_bv(y_ptr, u_ptr, v_ptr, post->y_stride,
|
||||
vp9_loop_filter_bv(y_ptr, u_ptr, v_ptr, post->y_stride,
|
||||
post->uv_stride, &lfi);
|
||||
|
||||
}
|
||||
@ -257,15 +257,15 @@ void vp8_loop_filter_frame
|
||||
mode_info_context[-cm->mode_info_stride].mbmi.mb_skip_coeff)
|
||||
#endif
|
||||
)
|
||||
vp8_loop_filter_mbh(y_ptr, u_ptr, v_ptr, post->y_stride,
|
||||
vp9_loop_filter_mbh(y_ptr, u_ptr, v_ptr, post->y_stride,
|
||||
post->uv_stride, &lfi);
|
||||
|
||||
if (!skip_lf && tx_type != TX_16X16) {
|
||||
if (tx_type == TX_8X8)
|
||||
vp8_loop_filter_bh8x8(y_ptr, u_ptr, v_ptr, post->y_stride,
|
||||
vp9_loop_filter_bh8x8(y_ptr, u_ptr, v_ptr, post->y_stride,
|
||||
post->uv_stride, &lfi);
|
||||
else
|
||||
vp8_loop_filter_bh(y_ptr, u_ptr, v_ptr, post->y_stride,
|
||||
vp9_loop_filter_bh(y_ptr, u_ptr, v_ptr, post->y_stride,
|
||||
post->uv_stride, &lfi);
|
||||
}
|
||||
} else {
|
||||
@ -316,7 +316,7 @@ void vp8_loop_filter_frame
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_loop_filter_frame_yonly
|
||||
void vp9_loop_filter_frame_yonly
|
||||
(
|
||||
VP8_COMMON *cm,
|
||||
MACROBLOCKD *xd,
|
||||
@ -343,7 +343,7 @@ void vp8_loop_filter_frame_yonly
|
||||
#endif
|
||||
|
||||
/* Initialize the loop filter for this frame. */
|
||||
vp8_loop_filter_frame_init(cm, xd, default_filt_lvl);
|
||||
vp9_loop_filter_frame_init(cm, xd, default_filt_lvl);
|
||||
|
||||
/* Set up the buffer pointers */
|
||||
y_ptr = post->y_buffer;
|
||||
@ -371,24 +371,24 @@ void vp8_loop_filter_frame_yonly
|
||||
lfi.hev_thr = lfi_n->hev_thr[hev_index];
|
||||
|
||||
if (mb_col > 0)
|
||||
vp8_loop_filter_mbv(y_ptr, 0, 0, post->y_stride, 0, &lfi);
|
||||
vp9_loop_filter_mbv(y_ptr, 0, 0, post->y_stride, 0, &lfi);
|
||||
|
||||
if (!skip_lf && tx_type != TX_16X16) {
|
||||
if (tx_type == TX_8X8)
|
||||
vp8_loop_filter_bv8x8(y_ptr, 0, 0, post->y_stride, 0, &lfi);
|
||||
vp9_loop_filter_bv8x8(y_ptr, 0, 0, post->y_stride, 0, &lfi);
|
||||
else
|
||||
vp8_loop_filter_bv(y_ptr, 0, 0, post->y_stride, 0, &lfi);
|
||||
vp9_loop_filter_bv(y_ptr, 0, 0, post->y_stride, 0, &lfi);
|
||||
}
|
||||
|
||||
/* don't apply across umv border */
|
||||
if (mb_row > 0)
|
||||
vp8_loop_filter_mbh(y_ptr, 0, 0, post->y_stride, 0, &lfi);
|
||||
vp9_loop_filter_mbh(y_ptr, 0, 0, post->y_stride, 0, &lfi);
|
||||
|
||||
if (!skip_lf && tx_type != TX_16X16) {
|
||||
if (tx_type == TX_8X8)
|
||||
vp8_loop_filter_bh8x8(y_ptr, 0, 0, post->y_stride, 0, &lfi);
|
||||
vp9_loop_filter_bh8x8(y_ptr, 0, 0, post->y_stride, 0, &lfi);
|
||||
else
|
||||
vp8_loop_filter_bh(y_ptr, 0, 0, post->y_stride, 0, &lfi);
|
||||
vp9_loop_filter_bh(y_ptr, 0, 0, post->y_stride, 0, &lfi);
|
||||
}
|
||||
} else {
|
||||
// FIXME: Not 8x8 aware
|
||||
@ -422,7 +422,7 @@ void vp8_loop_filter_frame_yonly
|
||||
|
||||
}
|
||||
|
||||
void vp8_loop_filter_partial_frame
|
||||
void vp9_loop_filter_partial_frame
|
||||
(
|
||||
VP8_COMMON *cm,
|
||||
MACROBLOCKD *xd,
|
||||
@ -459,7 +459,7 @@ void vp8_loop_filter_partial_frame
|
||||
linestocopy <<= 4;
|
||||
|
||||
/* Note the baseline filter values for each segment */
|
||||
/* See vp8_loop_filter_frame_init. Rather than call that for each change
|
||||
/* See vp9_loop_filter_frame_init. Rather than call that for each change
|
||||
* to default_filt_lvl, copy the relevant calculation here.
|
||||
*/
|
||||
if (alt_flt_enabled) {
|
||||
@ -503,15 +503,15 @@ void vp8_loop_filter_partial_frame
|
||||
lfi.hev_thr = lfi_n->hev_thr[hev_index];
|
||||
|
||||
if (mb_col > 0)
|
||||
vp8_loop_filter_mbv(y_ptr, 0, 0, post->y_stride, 0, &lfi);
|
||||
vp9_loop_filter_mbv(y_ptr, 0, 0, post->y_stride, 0, &lfi);
|
||||
|
||||
if (!skip_lf)
|
||||
vp8_loop_filter_bv(y_ptr, 0, 0, post->y_stride, 0, &lfi);
|
||||
vp9_loop_filter_bv(y_ptr, 0, 0, post->y_stride, 0, &lfi);
|
||||
|
||||
vp8_loop_filter_mbh(y_ptr, 0, 0, post->y_stride, 0, &lfi);
|
||||
vp9_loop_filter_mbh(y_ptr, 0, 0, post->y_stride, 0, &lfi);
|
||||
|
||||
if (!skip_lf)
|
||||
vp8_loop_filter_bh(y_ptr, 0, 0, post->y_stride, 0, &lfi);
|
||||
vp9_loop_filter_bh(y_ptr, 0, 0, post->y_stride, 0, &lfi);
|
||||
} else {
|
||||
if (mb_col > 0)
|
||||
vp8_loop_filter_simple_mbv (y_ptr, post->y_stride,
|
||||
|
@ -83,23 +83,23 @@ typedef void loop_filter_uvfunction
|
||||
struct VP8Common;
|
||||
struct macroblockd;
|
||||
|
||||
void vp8_loop_filter_init(struct VP8Common *cm);
|
||||
void vp9_loop_filter_init(struct VP8Common *cm);
|
||||
|
||||
void vp8_loop_filter_frame_init(struct VP8Common *cm,
|
||||
void vp9_loop_filter_frame_init(struct VP8Common *cm,
|
||||
struct macroblockd *mbd,
|
||||
int default_filt_lvl);
|
||||
|
||||
void vp8_loop_filter_frame(struct VP8Common *cm, struct macroblockd *mbd);
|
||||
void vp9_loop_filter_frame(struct VP8Common *cm, struct macroblockd *mbd);
|
||||
|
||||
void vp8_loop_filter_partial_frame(struct VP8Common *cm,
|
||||
void vp9_loop_filter_partial_frame(struct VP8Common *cm,
|
||||
struct macroblockd *mbd,
|
||||
int default_filt_lvl);
|
||||
|
||||
void vp8_loop_filter_frame_yonly(struct VP8Common *cm,
|
||||
void vp9_loop_filter_frame_yonly(struct VP8Common *cm,
|
||||
struct macroblockd *mbd,
|
||||
int default_filt_lvl);
|
||||
|
||||
void vp8_loop_filter_update_sharpness(loop_filter_info_n *lfi,
|
||||
void vp9_loop_filter_update_sharpness(loop_filter_info_n *lfi,
|
||||
int sharpness_lvl);
|
||||
|
||||
#endif
|
||||
|
@ -93,7 +93,7 @@ static __inline void vp8_filter(signed char mask, uc hev, uc *op1,
|
||||
|
||||
}
|
||||
|
||||
void vp8_loop_filter_horizontal_edge_c
|
||||
void vp9_loop_filter_horizontal_edge_c
|
||||
(
|
||||
unsigned char *s,
|
||||
int p, /* pitch */
|
||||
@ -122,7 +122,7 @@ void vp8_loop_filter_horizontal_edge_c
|
||||
} while (++i < count * 8);
|
||||
}
|
||||
|
||||
void vp8_loop_filter_vertical_edge_c
|
||||
void vp9_loop_filter_vertical_edge_c
|
||||
(
|
||||
unsigned char *s,
|
||||
int p,
|
||||
@ -235,7 +235,7 @@ static __inline void vp8_mbfilter(signed char mask, uc hev, uc flat,
|
||||
*op1 = u ^ 0x80;
|
||||
}
|
||||
}
|
||||
void vp8_mbloop_filter_horizontal_edge_c
|
||||
void vp9_mbloop_filter_horizontal_edge_c
|
||||
(
|
||||
unsigned char *s,
|
||||
int p,
|
||||
@ -271,7 +271,7 @@ void vp8_mbloop_filter_horizontal_edge_c
|
||||
} while (++i < count * 8);
|
||||
|
||||
}
|
||||
void vp8_mbloop_filter_vertical_edge_c
|
||||
void vp9_mbloop_filter_vertical_edge_c
|
||||
(
|
||||
unsigned char *s,
|
||||
int p,
|
||||
@ -341,7 +341,7 @@ static __inline void vp8_simple_filter(signed char mask,
|
||||
*op0 = u ^ 0x80;
|
||||
}
|
||||
|
||||
void vp8_loop_filter_simple_horizontal_edge_c
|
||||
void vp9_loop_filter_simple_horizontal_edge_c
|
||||
(
|
||||
unsigned char *s,
|
||||
int p,
|
||||
@ -361,7 +361,7 @@ void vp8_loop_filter_simple_horizontal_edge_c
|
||||
} while (++i < 16);
|
||||
}
|
||||
|
||||
void vp8_loop_filter_simple_vertical_edge_c
|
||||
void vp9_loop_filter_simple_vertical_edge_c
|
||||
(
|
||||
unsigned char *s,
|
||||
int p,
|
||||
@ -379,85 +379,85 @@ void vp8_loop_filter_simple_vertical_edge_c
|
||||
}
|
||||
|
||||
/* Vertical MB Filtering */
|
||||
void vp8_loop_filter_mbv_c(unsigned char *y_ptr, unsigned char *u_ptr,
|
||||
void vp9_loop_filter_mbv_c(unsigned char *y_ptr, unsigned char *u_ptr,
|
||||
unsigned char *v_ptr, int y_stride, int uv_stride,
|
||||
struct loop_filter_info *lfi) {
|
||||
vp8_mbloop_filter_vertical_edge_c(y_ptr, y_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp9_mbloop_filter_vertical_edge_c(y_ptr, y_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 2);
|
||||
|
||||
if (u_ptr)
|
||||
vp8_mbloop_filter_vertical_edge_c(u_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
|
||||
vp9_mbloop_filter_vertical_edge_c(u_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
|
||||
|
||||
if (v_ptr)
|
||||
vp8_mbloop_filter_vertical_edge_c(v_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
|
||||
vp9_mbloop_filter_vertical_edge_c(v_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
|
||||
}
|
||||
|
||||
/* Vertical B Filtering */
|
||||
void vp8_loop_filter_bv_c(unsigned char *y_ptr, unsigned char *u_ptr,
|
||||
void vp9_loop_filter_bv_c(unsigned char *y_ptr, unsigned char *u_ptr,
|
||||
unsigned char *v_ptr, int y_stride, int uv_stride,
|
||||
struct loop_filter_info *lfi) {
|
||||
vp8_loop_filter_vertical_edge_c(y_ptr + 4, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp8_loop_filter_vertical_edge_c(y_ptr + 8, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp8_loop_filter_vertical_edge_c(y_ptr + 12, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp9_loop_filter_vertical_edge_c(y_ptr + 4, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp9_loop_filter_vertical_edge_c(y_ptr + 8, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp9_loop_filter_vertical_edge_c(y_ptr + 12, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
|
||||
if (u_ptr)
|
||||
vp8_loop_filter_vertical_edge_c(u_ptr + 4, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
|
||||
vp9_loop_filter_vertical_edge_c(u_ptr + 4, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
|
||||
|
||||
if (v_ptr)
|
||||
vp8_loop_filter_vertical_edge_c(v_ptr + 4, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
|
||||
vp9_loop_filter_vertical_edge_c(v_ptr + 4, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
|
||||
}
|
||||
|
||||
/* Horizontal MB filtering */
|
||||
void vp8_loop_filter_mbh_c(unsigned char *y_ptr, unsigned char *u_ptr,
|
||||
void vp9_loop_filter_mbh_c(unsigned char *y_ptr, unsigned char *u_ptr,
|
||||
unsigned char *v_ptr, int y_stride, int uv_stride,
|
||||
struct loop_filter_info *lfi) {
|
||||
vp8_mbloop_filter_horizontal_edge_c(y_ptr, y_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp9_mbloop_filter_horizontal_edge_c(y_ptr, y_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 2);
|
||||
|
||||
if (u_ptr)
|
||||
vp8_mbloop_filter_horizontal_edge_c(u_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
|
||||
vp9_mbloop_filter_horizontal_edge_c(u_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
|
||||
|
||||
if (v_ptr)
|
||||
vp8_mbloop_filter_horizontal_edge_c(v_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
|
||||
vp9_mbloop_filter_horizontal_edge_c(v_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
|
||||
}
|
||||
|
||||
/* Horizontal B Filtering */
|
||||
void vp8_loop_filter_bh_c(unsigned char *y_ptr, unsigned char *u_ptr,
|
||||
void vp9_loop_filter_bh_c(unsigned char *y_ptr, unsigned char *u_ptr,
|
||||
unsigned char *v_ptr, int y_stride, int uv_stride,
|
||||
struct loop_filter_info *lfi) {
|
||||
vp8_loop_filter_horizontal_edge_c(y_ptr + 4 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp8_loop_filter_horizontal_edge_c(y_ptr + 8 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp8_loop_filter_horizontal_edge_c(y_ptr + 12 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp9_loop_filter_horizontal_edge_c(y_ptr + 4 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp9_loop_filter_horizontal_edge_c(y_ptr + 8 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp9_loop_filter_horizontal_edge_c(y_ptr + 12 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
|
||||
if (u_ptr)
|
||||
vp8_loop_filter_horizontal_edge_c(u_ptr + 4 * uv_stride, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
|
||||
vp9_loop_filter_horizontal_edge_c(u_ptr + 4 * uv_stride, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
|
||||
|
||||
if (v_ptr)
|
||||
vp8_loop_filter_horizontal_edge_c(v_ptr + 4 * uv_stride, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
|
||||
vp9_loop_filter_horizontal_edge_c(v_ptr + 4 * uv_stride, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
|
||||
}
|
||||
|
||||
void vp8_loop_filter_bh8x8_c(unsigned char *y_ptr, unsigned char *u_ptr,
|
||||
void vp9_loop_filter_bh8x8_c(unsigned char *y_ptr, unsigned char *u_ptr,
|
||||
unsigned char *v_ptr, int y_stride, int uv_stride,
|
||||
struct loop_filter_info *lfi) {
|
||||
vp8_mbloop_filter_horizontal_edge_c(
|
||||
vp9_mbloop_filter_horizontal_edge_c(
|
||||
y_ptr + 8 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
}
|
||||
|
||||
void vp8_loop_filter_bhs_c(unsigned char *y_ptr, int y_stride,
|
||||
void vp9_loop_filter_bhs_c(unsigned char *y_ptr, int y_stride,
|
||||
const unsigned char *blimit) {
|
||||
vp8_loop_filter_simple_horizontal_edge_c(y_ptr + 4 * y_stride, y_stride, blimit);
|
||||
vp8_loop_filter_simple_horizontal_edge_c(y_ptr + 8 * y_stride, y_stride, blimit);
|
||||
vp8_loop_filter_simple_horizontal_edge_c(y_ptr + 12 * y_stride, y_stride, blimit);
|
||||
vp9_loop_filter_simple_horizontal_edge_c(y_ptr + 4 * y_stride, y_stride, blimit);
|
||||
vp9_loop_filter_simple_horizontal_edge_c(y_ptr + 8 * y_stride, y_stride, blimit);
|
||||
vp9_loop_filter_simple_horizontal_edge_c(y_ptr + 12 * y_stride, y_stride, blimit);
|
||||
}
|
||||
|
||||
void vp8_loop_filter_bv8x8_c(unsigned char *y_ptr, unsigned char *u_ptr,
|
||||
void vp9_loop_filter_bv8x8_c(unsigned char *y_ptr, unsigned char *u_ptr,
|
||||
unsigned char *v_ptr, int y_stride, int uv_stride,
|
||||
struct loop_filter_info *lfi) {
|
||||
vp8_mbloop_filter_vertical_edge_c(
|
||||
vp9_mbloop_filter_vertical_edge_c(
|
||||
y_ptr + 8, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
}
|
||||
|
||||
void vp8_loop_filter_bvs_c(unsigned char *y_ptr, int y_stride,
|
||||
void vp9_loop_filter_bvs_c(unsigned char *y_ptr, int y_stride,
|
||||
const unsigned char *blimit) {
|
||||
vp8_loop_filter_simple_vertical_edge_c(y_ptr + 4, y_stride, blimit);
|
||||
vp8_loop_filter_simple_vertical_edge_c(y_ptr + 8, y_stride, blimit);
|
||||
vp8_loop_filter_simple_vertical_edge_c(y_ptr + 12, y_stride, blimit);
|
||||
vp9_loop_filter_simple_vertical_edge_c(y_ptr + 4, y_stride, blimit);
|
||||
vp9_loop_filter_simple_vertical_edge_c(y_ptr + 8, y_stride, blimit);
|
||||
vp9_loop_filter_simple_vertical_edge_c(y_ptr + 12, y_stride, blimit);
|
||||
}
|
||||
|
@ -79,7 +79,7 @@ static void setup_macroblock(MACROBLOCKD *xd, BLOCKSET bs) {
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_setup_block_dptrs(MACROBLOCKD *xd) {
|
||||
void vp9_setup_block_dptrs(MACROBLOCKD *xd) {
|
||||
int r, c;
|
||||
BLOCKD *blockd = xd->block;
|
||||
|
||||
@ -116,7 +116,7 @@ void vp8_setup_block_dptrs(MACROBLOCKD *xd) {
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_build_block_doffsets(MACROBLOCKD *xd) {
|
||||
void vp9_build_block_doffsets(MACROBLOCKD *xd) {
|
||||
|
||||
/* handle the destination pitch features */
|
||||
setup_macroblock(xd, DEST);
|
||||
|
@ -30,7 +30,7 @@
|
||||
|
||||
/* Create/destroy static data structures. */
|
||||
|
||||
void vp8_initialize_common(void);
|
||||
void vp9_initialize_common(void);
|
||||
|
||||
#define MINQ 0
|
||||
|
||||
|
@ -128,7 +128,7 @@ extern void vp8_blit_text(const char *msg, unsigned char *address, const int pit
|
||||
extern void vp8_blit_line(int x0, int x1, int y0, int y1, unsigned char *image, const int pitch);
|
||||
/***********************************************************************************************************
|
||||
*/
|
||||
void vp8_post_proc_down_and_across_c
|
||||
void vp9_post_proc_down_and_across_c
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
unsigned char *dst_ptr,
|
||||
@ -213,7 +213,7 @@ static int q2mbl(int x) {
|
||||
x = 50 + (x - 50) * 10 / 8;
|
||||
return x * x / 3;
|
||||
}
|
||||
void vp8_mbpost_proc_across_ip_c(unsigned char *src, int pitch, int rows, int cols, int flimit) {
|
||||
void vp9_mbpost_proc_across_ip_c(unsigned char *src, int pitch, int rows, int cols, int flimit) {
|
||||
int r, c, i;
|
||||
|
||||
unsigned char *s = src;
|
||||
@ -254,7 +254,7 @@ void vp8_mbpost_proc_across_ip_c(unsigned char *src, int pitch, int rows, int co
|
||||
|
||||
|
||||
|
||||
void vp8_mbpost_proc_down_c(unsigned char *dst, int pitch, int rows, int cols, int flimit) {
|
||||
void vp9_mbpost_proc_down_c(unsigned char *dst, int pitch, int rows, int cols, int flimit) {
|
||||
int r, c, i;
|
||||
const short *rv3 = &vp8_rv[63 & rand()];
|
||||
|
||||
@ -286,7 +286,7 @@ void vp8_mbpost_proc_down_c(unsigned char *dst, int pitch, int rows, int cols, i
|
||||
}
|
||||
|
||||
|
||||
static void vp8_deblock_and_de_macro_block(YV12_BUFFER_CONFIG *source,
|
||||
static void vp9_deblock_and_de_macro_block(YV12_BUFFER_CONFIG *source,
|
||||
YV12_BUFFER_CONFIG *post,
|
||||
int q,
|
||||
int low_var_thresh,
|
||||
@ -306,7 +306,7 @@ static void vp8_deblock_and_de_macro_block(YV12_BUFFER_CONFIG *source,
|
||||
|
||||
}
|
||||
|
||||
void vp8_deblock(YV12_BUFFER_CONFIG *source,
|
||||
void vp9_deblock(YV12_BUFFER_CONFIG *source,
|
||||
YV12_BUFFER_CONFIG *post,
|
||||
int q,
|
||||
int low_var_thresh,
|
||||
@ -322,7 +322,7 @@ void vp8_deblock(YV12_BUFFER_CONFIG *source,
|
||||
POSTPROC_INVOKE(rtcd, downacross)(source->v_buffer, post->v_buffer, source->uv_stride, post->uv_stride, source->uv_height, source->uv_width, ppl);
|
||||
}
|
||||
|
||||
void vp8_de_noise(YV12_BUFFER_CONFIG *source,
|
||||
void vp9_de_noise(YV12_BUFFER_CONFIG *source,
|
||||
YV12_BUFFER_CONFIG *post,
|
||||
int q,
|
||||
int low_var_thresh,
|
||||
@ -359,7 +359,7 @@ void vp8_de_noise(YV12_BUFFER_CONFIG *source,
|
||||
|
||||
}
|
||||
|
||||
double vp8_gaussian(double sigma, double mu, double x) {
|
||||
double vp9_gaussian(double sigma, double mu, double x) {
|
||||
return 1 / (sigma * sqrt(2.0 * 3.14159265)) *
|
||||
(exp(-(x - mu) * (x - mu) / (2 * sigma * sigma)));
|
||||
}
|
||||
@ -388,7 +388,7 @@ static void fillrd(struct postproc_state *state, int q, int a) {
|
||||
next = 0;
|
||||
|
||||
for (i = -32; i < 32; i++) {
|
||||
int a = (int)(.5 + 256 * vp8_gaussian(sigma, 0, i));
|
||||
int a = (int)(.5 + 256 * vp9_gaussian(sigma, 0, i));
|
||||
|
||||
if (a) {
|
||||
for (j = 0; j < a; j++) {
|
||||
@ -440,7 +440,7 @@ static void fillrd(struct postproc_state *state, int q, int a) {
|
||||
* SPECIAL NOTES : None.
|
||||
*
|
||||
****************************************************************************/
|
||||
void vp8_plane_add_noise_c(unsigned char *Start, char *noise,
|
||||
void vp9_plane_add_noise_c(unsigned char *Start, char *noise,
|
||||
char blackclamp[16],
|
||||
char whiteclamp[16],
|
||||
char bothclamp[16],
|
||||
@ -467,7 +467,7 @@ void vp8_plane_add_noise_c(unsigned char *Start, char *noise,
|
||||
* edges unblended to give distinction to macro blocks in areas
|
||||
* filled with the same color block.
|
||||
*/
|
||||
void vp8_blend_mb_inner_c(unsigned char *y, unsigned char *u, unsigned char *v,
|
||||
void vp9_blend_mb_inner_c(unsigned char *y, unsigned char *u, unsigned char *v,
|
||||
int y1, int u1, int v1, int alpha, int stride) {
|
||||
int i, j;
|
||||
int y1_const = y1 * ((1 << 16) - alpha);
|
||||
@ -500,7 +500,7 @@ void vp8_blend_mb_inner_c(unsigned char *y, unsigned char *u, unsigned char *v,
|
||||
/* Blend only the edge of the macro block. Leave center
|
||||
* unblended to allow for other visualizations to be layered.
|
||||
*/
|
||||
void vp8_blend_mb_outer_c(unsigned char *y, unsigned char *u, unsigned char *v,
|
||||
void vp9_blend_mb_outer_c(unsigned char *y, unsigned char *u, unsigned char *v,
|
||||
int y1, int u1, int v1, int alpha, int stride) {
|
||||
int i, j;
|
||||
int y1_const = y1 * ((1 << 16) - alpha);
|
||||
@ -555,7 +555,7 @@ void vp8_blend_mb_outer_c(unsigned char *y, unsigned char *u, unsigned char *v,
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_blend_b_c(unsigned char *y, unsigned char *u, unsigned char *v,
|
||||
void vp9_blend_b_c(unsigned char *y, unsigned char *u, unsigned char *v,
|
||||
int y1, int u1, int v1, int alpha, int stride) {
|
||||
int i, j;
|
||||
int y1_const = y1 * ((1 << 16) - alpha);
|
||||
@ -626,7 +626,7 @@ static void constrain_line(int x0, int *x1, int y0, int *y1, int width, int heig
|
||||
#define RTCD_VTABLE(oci) NULL
|
||||
#endif
|
||||
|
||||
int vp8_post_proc_frame(VP8_COMMON *oci, YV12_BUFFER_CONFIG *dest, vp8_ppflags_t *ppflags) {
|
||||
int vp9_post_proc_frame(VP8_COMMON *oci, YV12_BUFFER_CONFIG *dest, vp8_ppflags_t *ppflags) {
|
||||
int q = oci->filter_level * 10 / 6;
|
||||
int flags = ppflags->post_proc_flag;
|
||||
int deblock_level = ppflags->deblocking_level;
|
||||
@ -654,10 +654,10 @@ int vp8_post_proc_frame(VP8_COMMON *oci, YV12_BUFFER_CONFIG *dest, vp8_ppflags_t
|
||||
#endif
|
||||
|
||||
if (flags & VP8D_DEMACROBLOCK) {
|
||||
vp8_deblock_and_de_macro_block(oci->frame_to_show, &oci->post_proc_buffer,
|
||||
vp9_deblock_and_de_macro_block(oci->frame_to_show, &oci->post_proc_buffer,
|
||||
q + (deblock_level - 5) * 10, 1, 0, RTCD_VTABLE(oci));
|
||||
} else if (flags & VP8D_DEBLOCK) {
|
||||
vp8_deblock(oci->frame_to_show, &oci->post_proc_buffer,
|
||||
vp9_deblock(oci->frame_to_show, &oci->post_proc_buffer,
|
||||
q, 1, 0, RTCD_VTABLE(oci));
|
||||
} else {
|
||||
vp8_yv12_copy_frame_ptr(oci->frame_to_show, &oci->post_proc_buffer);
|
||||
|
@ -41,37 +41,37 @@
|
||||
#endif
|
||||
|
||||
#ifndef vp8_postproc_down
|
||||
#define vp8_postproc_down vp8_mbpost_proc_down_c
|
||||
#define vp8_postproc_down vp9_mbpost_proc_down_c
|
||||
#endif
|
||||
extern prototype_postproc_inplace(vp8_postproc_down);
|
||||
|
||||
#ifndef vp8_postproc_across
|
||||
#define vp8_postproc_across vp8_mbpost_proc_across_ip_c
|
||||
#define vp8_postproc_across vp9_mbpost_proc_across_ip_c
|
||||
#endif
|
||||
extern prototype_postproc_inplace(vp8_postproc_across);
|
||||
|
||||
#ifndef vp8_postproc_downacross
|
||||
#define vp8_postproc_downacross vp8_post_proc_down_and_across_c
|
||||
#define vp8_postproc_downacross vp9_post_proc_down_and_across_c
|
||||
#endif
|
||||
extern prototype_postproc(vp8_postproc_downacross);
|
||||
|
||||
#ifndef vp8_postproc_addnoise
|
||||
#define vp8_postproc_addnoise vp8_plane_add_noise_c
|
||||
#define vp8_postproc_addnoise vp9_plane_add_noise_c
|
||||
#endif
|
||||
extern prototype_postproc_addnoise(vp8_postproc_addnoise);
|
||||
|
||||
#ifndef vp8_postproc_blend_mb_inner
|
||||
#define vp8_postproc_blend_mb_inner vp8_blend_mb_inner_c
|
||||
#define vp8_postproc_blend_mb_inner vp9_blend_mb_inner_c
|
||||
#endif
|
||||
extern prototype_postproc_blend_mb_inner(vp8_postproc_blend_mb_inner);
|
||||
|
||||
#ifndef vp8_postproc_blend_mb_outer
|
||||
#define vp8_postproc_blend_mb_outer vp8_blend_mb_outer_c
|
||||
#define vp8_postproc_blend_mb_outer vp9_blend_mb_outer_c
|
||||
#endif
|
||||
extern prototype_postproc_blend_mb_outer(vp8_postproc_blend_mb_outer);
|
||||
|
||||
#ifndef vp8_postproc_blend_b
|
||||
#define vp8_postproc_blend_b vp8_blend_b_c
|
||||
#define vp8_postproc_blend_b vp9_blend_b_c
|
||||
#endif
|
||||
extern prototype_postproc_blend_b(vp8_postproc_blend_b);
|
||||
|
||||
@ -108,18 +108,18 @@ struct postproc_state {
|
||||
};
|
||||
#include "onyxc_int.h"
|
||||
#include "ppflags.h"
|
||||
int vp8_post_proc_frame(struct VP8Common *oci, YV12_BUFFER_CONFIG *dest,
|
||||
int vp9_post_proc_frame(struct VP8Common *oci, YV12_BUFFER_CONFIG *dest,
|
||||
vp8_ppflags_t *flags);
|
||||
|
||||
|
||||
void vp8_de_noise(YV12_BUFFER_CONFIG *source,
|
||||
void vp9_de_noise(YV12_BUFFER_CONFIG *source,
|
||||
YV12_BUFFER_CONFIG *post,
|
||||
int q,
|
||||
int low_var_thresh,
|
||||
int flag,
|
||||
vp8_postproc_rtcd_vtable_t *rtcd);
|
||||
|
||||
void vp8_deblock(YV12_BUFFER_CONFIG *source,
|
||||
void vp9_deblock(YV12_BUFFER_CONFIG *source,
|
||||
YV12_BUFFER_CONFIG *post,
|
||||
int q,
|
||||
int low_var_thresh,
|
||||
|
@ -20,7 +20,7 @@ void (*vp8_short_idct4x4)(short *input, short *output, int pitch);
|
||||
void (*vp8_short_idct4x4_1)(short *input, short *output, int pitch);
|
||||
void (*vp8_dc_only_idct)(short input_dc, short *output, int pitch);
|
||||
|
||||
extern void (*vp8_post_proc_down_and_across)(
|
||||
extern void (*vp9_post_proc_down_and_across)(
|
||||
unsigned char *src_ptr,
|
||||
unsigned char *dst_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -30,12 +30,12 @@ extern void (*vp8_post_proc_down_and_across)(
|
||||
int flimit
|
||||
);
|
||||
|
||||
extern void (*vp8_mbpost_proc_down)(unsigned char *dst, int pitch, int rows, int cols, int flimit);
|
||||
extern void vp8_mbpost_proc_down_c(unsigned char *dst, int pitch, int rows, int cols, int flimit);
|
||||
extern void (*vp8_mbpost_proc_across_ip)(unsigned char *src, int pitch, int rows, int cols, int flimit);
|
||||
extern void vp8_mbpost_proc_across_ip_c(unsigned char *src, int pitch, int rows, int cols, int flimit);
|
||||
extern void (*vp9_mbpost_proc_down)(unsigned char *dst, int pitch, int rows, int cols, int flimit);
|
||||
extern void vp9_mbpost_proc_down_c(unsigned char *dst, int pitch, int rows, int cols, int flimit);
|
||||
extern void (*vp9_mbpost_proc_across_ip)(unsigned char *src, int pitch, int rows, int cols, int flimit);
|
||||
extern void vp9_mbpost_proc_across_ip_c(unsigned char *src, int pitch, int rows, int cols, int flimit);
|
||||
|
||||
extern void vp8_post_proc_down_and_across_c
|
||||
extern void vp9_post_proc_down_and_across_c
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
unsigned char *dst_ptr,
|
||||
@ -45,11 +45,11 @@ extern void vp8_post_proc_down_and_across_c
|
||||
int cols,
|
||||
int flimit
|
||||
);
|
||||
void vp8_plane_add_noise_c(unsigned char *Start, unsigned int Width, unsigned int Height, int Pitch, int q, int a);
|
||||
void vp9_plane_add_noise_c(unsigned char *Start, unsigned int Width, unsigned int Height, int Pitch, int q, int a);
|
||||
|
||||
extern copy_mem_block_function *vp8_copy_mem16x16;
|
||||
extern copy_mem_block_function *vp8_copy_mem8x8;
|
||||
extern copy_mem_block_function *vp8_copy_mem8x4;
|
||||
extern copy_mem_block_function *vp9_copy_mem16x16;
|
||||
extern copy_mem_block_function *vp9_copy_mem8x8;
|
||||
extern copy_mem_block_function *vp9_copy_mem8x4;
|
||||
|
||||
// PPC
|
||||
extern subpixel_predict_function sixtap_predict_ppc;
|
||||
@ -70,25 +70,25 @@ void recon4b_ppc(short *diff_ptr, unsigned char *pred_ptr, unsigned char *dst_pt
|
||||
extern void short_idct4x4llm_ppc(short *input, short *output, int pitch);
|
||||
|
||||
// Generic C
|
||||
extern subpixel_predict_function vp8_sixtap_predict_c;
|
||||
extern subpixel_predict_function vp8_sixtap_predict8x4_c;
|
||||
extern subpixel_predict_function vp8_sixtap_predict8x8_c;
|
||||
extern subpixel_predict_function vp8_sixtap_predict16x16_c;
|
||||
extern subpixel_predict_function vp8_bilinear_predict4x4_c;
|
||||
extern subpixel_predict_function vp8_bilinear_predict8x4_c;
|
||||
extern subpixel_predict_function vp8_bilinear_predict8x8_c;
|
||||
extern subpixel_predict_function vp8_bilinear_predict16x16_c;
|
||||
extern subpixel_predict_function vp9_sixtap_predict_c;
|
||||
extern subpixel_predict_function vp9_sixtap_predict8x4_c;
|
||||
extern subpixel_predict_function vp9_sixtap_predict8x8_c;
|
||||
extern subpixel_predict_function vp9_sixtap_predict16x16_c;
|
||||
extern subpixel_predict_function vp9_bilinear_predict4x4_c;
|
||||
extern subpixel_predict_function vp9_bilinear_predict8x4_c;
|
||||
extern subpixel_predict_function vp9_bilinear_predict8x8_c;
|
||||
extern subpixel_predict_function vp9_bilinear_predict16x16_c;
|
||||
|
||||
extern copy_mem_block_function vp8_copy_mem16x16_c;
|
||||
extern copy_mem_block_function vp8_copy_mem8x8_c;
|
||||
extern copy_mem_block_function vp8_copy_mem8x4_c;
|
||||
extern copy_mem_block_function vp9_copy_mem16x16_c;
|
||||
extern copy_mem_block_function vp9_copy_mem8x8_c;
|
||||
extern copy_mem_block_function vp9_copy_mem8x4_c;
|
||||
|
||||
void vp8_recon_b_c(short *diff_ptr, unsigned char *pred_ptr, unsigned char *dst_ptr, int stride);
|
||||
void vp8_recon2b_c(short *diff_ptr, unsigned char *pred_ptr, unsigned char *dst_ptr, int stride);
|
||||
void vp8_recon4b_c(short *diff_ptr, unsigned char *pred_ptr, unsigned char *dst_ptr, int stride);
|
||||
void vp9_recon_b_c(short *diff_ptr, unsigned char *pred_ptr, unsigned char *dst_ptr, int stride);
|
||||
void vp9_recon2b_c(short *diff_ptr, unsigned char *pred_ptr, unsigned char *dst_ptr, int stride);
|
||||
void vp9_recon4b_c(short *diff_ptr, unsigned char *pred_ptr, unsigned char *dst_ptr, int stride);
|
||||
|
||||
extern void vp8_short_idct4x4llm_1_c(short *input, short *output, int pitch);
|
||||
extern void vp8_short_idct4x4llm_c(short *input, short *output, int pitch);
|
||||
extern void vp9_short_idct4x4llm_1_c(short *input, short *output, int pitch);
|
||||
extern void vp9_short_idct4x4llm_c(short *input, short *output, int pitch);
|
||||
extern void vp8_dc_only_idct_c(short input_dc, short *output, int pitch);
|
||||
|
||||
// PPC
|
||||
@ -103,15 +103,15 @@ extern loop_filter_block_function loop_filter_mbhs_ppc;
|
||||
extern loop_filter_block_function loop_filter_bhs_ppc;
|
||||
|
||||
// Generic C
|
||||
extern loop_filter_block_function vp8_loop_filter_mbv_c;
|
||||
extern loop_filter_block_function vp8_loop_filter_bv_c;
|
||||
extern loop_filter_block_function vp8_loop_filter_mbh_c;
|
||||
extern loop_filter_block_function vp8_loop_filter_bh_c;
|
||||
extern loop_filter_block_function vp9_loop_filter_mbv_c;
|
||||
extern loop_filter_block_function vp9_loop_filter_bv_c;
|
||||
extern loop_filter_block_function vp9_loop_filter_mbh_c;
|
||||
extern loop_filter_block_function vp9_loop_filter_bh_c;
|
||||
|
||||
extern loop_filter_block_function vp8_loop_filter_mbvs_c;
|
||||
extern loop_filter_block_function vp8_loop_filter_bvs_c;
|
||||
extern loop_filter_block_function vp8_loop_filter_mbhs_c;
|
||||
extern loop_filter_block_function vp8_loop_filter_bhs_c;
|
||||
extern loop_filter_block_function vp9_loop_filter_mbvs_c;
|
||||
extern loop_filter_block_function vp9_loop_filter_bvs_c;
|
||||
extern loop_filter_block_function vp9_loop_filter_mbhs_c;
|
||||
extern loop_filter_block_function vp9_loop_filter_bhs_c;
|
||||
|
||||
extern loop_filter_block_function *vp8_lf_mbvfull;
|
||||
extern loop_filter_block_function *vp8_lf_mbhfull;
|
||||
@ -126,24 +126,24 @@ extern loop_filter_block_function *vp8_lf_bhsimple;
|
||||
void vp8_clear_c(void) {
|
||||
}
|
||||
|
||||
void vp8_machine_specific_config(void) {
|
||||
void vp9_machine_specific_config(void) {
|
||||
// Pure C:
|
||||
vp8_clear_system_state = vp8_clear_c;
|
||||
vp8_recon_b = vp8_recon_b_c;
|
||||
vp8_recon4b = vp8_recon4b_c;
|
||||
vp8_recon2b = vp8_recon2b_c;
|
||||
vp9_recon_b = vp9_recon_b_c;
|
||||
vp9_recon4b = vp9_recon4b_c;
|
||||
vp9_recon2b = vp9_recon2b_c;
|
||||
|
||||
vp8_bilinear_predict16x16 = bilinear_predict16x16_ppc;
|
||||
vp8_bilinear_predict8x8 = bilinear_predict8x8_ppc;
|
||||
vp8_bilinear_predict8x4 = bilinear_predict8x4_ppc;
|
||||
vp9_bilinear_predict16x16 = bilinear_predict16x16_ppc;
|
||||
vp9_bilinear_predict8x8 = bilinear_predict8x8_ppc;
|
||||
vp9_bilinear_predict8x4 = bilinear_predict8x4_ppc;
|
||||
vp8_bilinear_predict = bilinear_predict4x4_ppc;
|
||||
|
||||
vp8_sixtap_predict16x16 = sixtap_predict16x16_ppc;
|
||||
vp8_sixtap_predict8x8 = sixtap_predict8x8_ppc;
|
||||
vp8_sixtap_predict8x4 = sixtap_predict8x4_ppc;
|
||||
vp8_sixtap_predict = sixtap_predict_ppc;
|
||||
vp9_sixtap_predict16x16 = sixtap_predict16x16_ppc;
|
||||
vp9_sixtap_predict8x8 = sixtap_predict8x8_ppc;
|
||||
vp9_sixtap_predict8x4 = sixtap_predict8x4_ppc;
|
||||
vp9_sixtap_predict = sixtap_predict_ppc;
|
||||
|
||||
vp8_short_idct4x4_1 = vp8_short_idct4x4llm_1_c;
|
||||
vp8_short_idct4x4_1 = vp9_short_idct4x4llm_1_c;
|
||||
vp8_short_idct4x4 = short_idct4x4llm_ppc;
|
||||
vp8_dc_only_idct = vp8_dc_only_idct_c;
|
||||
|
||||
@ -157,13 +157,13 @@ void vp8_machine_specific_config(void) {
|
||||
vp8_lf_mbhsimple = loop_filter_mbhs_ppc;
|
||||
vp8_lf_bhsimple = loop_filter_bhs_ppc;
|
||||
|
||||
vp8_post_proc_down_and_across = vp8_post_proc_down_and_across_c;
|
||||
vp8_mbpost_proc_down = vp8_mbpost_proc_down_c;
|
||||
vp8_mbpost_proc_across_ip = vp8_mbpost_proc_across_ip_c;
|
||||
vp8_plane_add_noise = vp8_plane_add_noise_c;
|
||||
vp9_post_proc_down_and_across = vp9_post_proc_down_and_across_c;
|
||||
vp9_mbpost_proc_down = vp9_mbpost_proc_down_c;
|
||||
vp9_mbpost_proc_across_ip = vp9_mbpost_proc_across_ip_c;
|
||||
vp9_plane_add_noise = vp9_plane_add_noise_c;
|
||||
|
||||
vp8_copy_mem16x16 = copy_mem16x16_ppc;
|
||||
vp8_copy_mem8x8 = vp8_copy_mem8x8_c;
|
||||
vp8_copy_mem8x4 = vp8_copy_mem8x4_c;
|
||||
vp9_copy_mem16x16 = copy_mem16x16_ppc;
|
||||
vp9_copy_mem8x8 = vp9_copy_mem8x8_c;
|
||||
vp9_copy_mem8x4 = vp9_copy_mem8x4_c;
|
||||
|
||||
}
|
||||
|
@ -16,7 +16,7 @@ static int ac_qlookup[QINDEX_RANGE];
|
||||
|
||||
#define ACDC_MIN 4
|
||||
|
||||
void vp8_init_quant_tables() {
|
||||
void vp9_init_quant_tables() {
|
||||
int i;
|
||||
int current_val = 4;
|
||||
int last_val = 4;
|
||||
@ -38,7 +38,7 @@ void vp8_init_quant_tables() {
|
||||
}
|
||||
}
|
||||
|
||||
int vp8_dc_quant(int QIndex, int Delta) {
|
||||
int vp9_dc_quant(int QIndex, int Delta) {
|
||||
int retval;
|
||||
|
||||
QIndex = QIndex + Delta;
|
||||
@ -52,7 +52,7 @@ int vp8_dc_quant(int QIndex, int Delta) {
|
||||
return retval;
|
||||
}
|
||||
|
||||
int vp8_dc2quant(int QIndex, int Delta) {
|
||||
int vp9_dc2quant(int QIndex, int Delta) {
|
||||
int retval;
|
||||
|
||||
QIndex = QIndex + Delta;
|
||||
@ -67,7 +67,7 @@ int vp8_dc2quant(int QIndex, int Delta) {
|
||||
return retval;
|
||||
|
||||
}
|
||||
int vp8_dc_uv_quant(int QIndex, int Delta) {
|
||||
int vp9_dc_uv_quant(int QIndex, int Delta) {
|
||||
int retval;
|
||||
|
||||
QIndex = QIndex + Delta;
|
||||
@ -82,7 +82,7 @@ int vp8_dc_uv_quant(int QIndex, int Delta) {
|
||||
return retval;
|
||||
}
|
||||
|
||||
int vp8_ac_yquant(int QIndex) {
|
||||
int vp9_ac_yquant(int QIndex) {
|
||||
int retval;
|
||||
|
||||
if (QIndex > MAXQ)
|
||||
@ -94,7 +94,7 @@ int vp8_ac_yquant(int QIndex) {
|
||||
return retval;
|
||||
}
|
||||
|
||||
int vp8_ac2quant(int QIndex, int Delta) {
|
||||
int vp9_ac2quant(int QIndex, int Delta) {
|
||||
int retval;
|
||||
|
||||
QIndex = QIndex + Delta;
|
||||
@ -110,7 +110,7 @@ int vp8_ac2quant(int QIndex, int Delta) {
|
||||
|
||||
return retval;
|
||||
}
|
||||
int vp8_ac_uv_quant(int QIndex, int Delta) {
|
||||
int vp9_ac_uv_quant(int QIndex, int Delta) {
|
||||
int retval;
|
||||
|
||||
QIndex = QIndex + Delta;
|
||||
|
@ -13,10 +13,10 @@
|
||||
#include "blockd.h"
|
||||
#include "onyxc_int.h"
|
||||
|
||||
extern void vp8_init_quant_tables();
|
||||
extern int vp8_ac_yquant(int QIndex);
|
||||
extern int vp8_dc_quant(int QIndex, int Delta);
|
||||
extern int vp8_dc2quant(int QIndex, int Delta);
|
||||
extern int vp8_ac2quant(int QIndex, int Delta);
|
||||
extern int vp8_dc_uv_quant(int QIndex, int Delta);
|
||||
extern int vp8_ac_uv_quant(int QIndex, int Delta);
|
||||
extern void vp9_init_quant_tables();
|
||||
extern int vp9_ac_yquant(int QIndex);
|
||||
extern int vp9_dc_quant(int QIndex, int Delta);
|
||||
extern int vp9_dc2quant(int QIndex, int Delta);
|
||||
extern int vp9_ac2quant(int QIndex, int Delta);
|
||||
extern int vp9_dc_uv_quant(int QIndex, int Delta);
|
||||
extern int vp9_ac_uv_quant(int QIndex, int Delta);
|
||||
|
@ -13,7 +13,7 @@
|
||||
#include "vpx_rtcd.h"
|
||||
#include "blockd.h"
|
||||
|
||||
void vp8_recon_b_c
|
||||
void vp9_recon_b_c
|
||||
(
|
||||
unsigned char *pred_ptr,
|
||||
short *diff_ptr,
|
||||
@ -41,7 +41,7 @@ void vp8_recon_b_c
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_recon_uv_b_c
|
||||
void vp9_recon_uv_b_c
|
||||
(
|
||||
unsigned char *pred_ptr,
|
||||
short *diff_ptr,
|
||||
@ -68,7 +68,7 @@ void vp8_recon_uv_b_c
|
||||
pred_ptr += 8;
|
||||
}
|
||||
}
|
||||
void vp8_recon4b_c
|
||||
void vp9_recon4b_c
|
||||
(
|
||||
unsigned char *pred_ptr,
|
||||
short *diff_ptr,
|
||||
@ -96,7 +96,7 @@ void vp8_recon4b_c
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_recon2b_c
|
||||
void vp9_recon2b_c
|
||||
(
|
||||
unsigned char *pred_ptr,
|
||||
short *diff_ptr,
|
||||
@ -125,7 +125,7 @@ void vp8_recon2b_c
|
||||
}
|
||||
|
||||
#if CONFIG_SUPERBLOCKS
|
||||
void vp8_recon_mby_s_c(MACROBLOCKD *xd, uint8_t *dst) {
|
||||
void vp9_recon_mby_s_c(MACROBLOCKD *xd, uint8_t *dst) {
|
||||
int x, y;
|
||||
BLOCKD *b = &xd->block[0];
|
||||
int stride = b->dst_stride;
|
||||
@ -145,7 +145,7 @@ void vp8_recon_mby_s_c(MACROBLOCKD *xd, uint8_t *dst) {
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_recon_mbuv_s_c(MACROBLOCKD *xd, uint8_t *udst, uint8_t *vdst) {
|
||||
void vp9_recon_mbuv_s_c(MACROBLOCKD *xd, uint8_t *udst, uint8_t *vdst) {
|
||||
int x, y, i;
|
||||
uint8_t *dst = udst;
|
||||
|
||||
@ -170,28 +170,28 @@ void vp8_recon_mbuv_s_c(MACROBLOCKD *xd, uint8_t *udst, uint8_t *vdst) {
|
||||
}
|
||||
#endif
|
||||
|
||||
void vp8_recon_mby_c(MACROBLOCKD *xd) {
|
||||
void vp9_recon_mby_c(MACROBLOCKD *xd) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 16; i += 4) {
|
||||
BLOCKD *b = &xd->block[i];
|
||||
|
||||
vp8_recon4b(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
|
||||
vp9_recon4b(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_recon_mb_c(MACROBLOCKD *xd) {
|
||||
void vp9_recon_mb_c(MACROBLOCKD *xd) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 16; i += 4) {
|
||||
BLOCKD *b = &xd->block[i];
|
||||
|
||||
vp8_recon4b(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
|
||||
vp9_recon4b(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
|
||||
}
|
||||
|
||||
for (i = 16; i < 24; i += 2) {
|
||||
BLOCKD *b = &xd->block[i];
|
||||
|
||||
vp8_recon2b(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
|
||||
vp9_recon2b(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
|
||||
}
|
||||
}
|
||||
|
@ -18,7 +18,7 @@
|
||||
#include "onyxc_int.h"
|
||||
#endif
|
||||
|
||||
void vp8_setup_interp_filters(MACROBLOCKD *xd,
|
||||
void vp9_setup_interp_filters(MACROBLOCKD *xd,
|
||||
INTERPOLATIONFILTERTYPE mcomp_filter_type,
|
||||
VP8_COMMON *cm) {
|
||||
if (mcomp_filter_type == SIXTAP) {
|
||||
@ -85,7 +85,7 @@ void vp8_setup_interp_filters(MACROBLOCKD *xd,
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_copy_mem16x16_c(
|
||||
void vp9_copy_mem16x16_c(
|
||||
unsigned char *src,
|
||||
int src_stride,
|
||||
unsigned char *dst,
|
||||
@ -126,7 +126,7 @@ void vp8_copy_mem16x16_c(
|
||||
|
||||
}
|
||||
|
||||
void vp8_avg_mem16x16_c(
|
||||
void vp9_avg_mem16x16_c(
|
||||
unsigned char *src,
|
||||
int src_stride,
|
||||
unsigned char *dst,
|
||||
@ -145,7 +145,7 @@ void vp8_avg_mem16x16_c(
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_copy_mem8x8_c(
|
||||
void vp9_copy_mem8x8_c(
|
||||
unsigned char *src,
|
||||
int src_stride,
|
||||
unsigned char *dst,
|
||||
@ -173,7 +173,7 @@ void vp8_copy_mem8x8_c(
|
||||
|
||||
}
|
||||
|
||||
void vp8_avg_mem8x8_c(
|
||||
void vp9_avg_mem8x8_c(
|
||||
unsigned char *src,
|
||||
int src_stride,
|
||||
unsigned char *dst,
|
||||
@ -192,7 +192,7 @@ void vp8_avg_mem8x8_c(
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_copy_mem8x4_c(
|
||||
void vp9_copy_mem8x4_c(
|
||||
unsigned char *src,
|
||||
int src_stride,
|
||||
unsigned char *dst,
|
||||
@ -222,7 +222,7 @@ void vp8_copy_mem8x4_c(
|
||||
|
||||
|
||||
|
||||
void vp8_build_inter_predictors_b(BLOCKD *d, int pitch, vp8_subpix_fn_t sppf) {
|
||||
void vp9_build_inter_predictors_b(BLOCKD *d, int pitch, vp8_subpix_fn_t sppf) {
|
||||
int r;
|
||||
unsigned char *ptr_base;
|
||||
unsigned char *ptr;
|
||||
@ -255,12 +255,12 @@ void vp8_build_inter_predictors_b(BLOCKD *d, int pitch, vp8_subpix_fn_t sppf) {
|
||||
}
|
||||
|
||||
/*
|
||||
* Similar to vp8_build_inter_predictors_b(), but instead of storing the
|
||||
* Similar to vp9_build_inter_predictors_b(), but instead of storing the
|
||||
* results in d->predictor, we average the contents of d->predictor (which
|
||||
* come from an earlier call to vp8_build_inter_predictors_b()) with the
|
||||
* come from an earlier call to vp9_build_inter_predictors_b()) with the
|
||||
* predictor of the second reference frame / motion vector.
|
||||
*/
|
||||
void vp8_build_2nd_inter_predictors_b(BLOCKD *d, int pitch, vp8_subpix_fn_t sppf) {
|
||||
void vp9_build_2nd_inter_predictors_b(BLOCKD *d, int pitch, vp8_subpix_fn_t sppf) {
|
||||
int r;
|
||||
unsigned char *ptr_base;
|
||||
unsigned char *ptr;
|
||||
@ -288,7 +288,7 @@ void vp8_build_2nd_inter_predictors_b(BLOCKD *d, int pitch, vp8_subpix_fn_t sppf
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_build_inter_predictors4b(MACROBLOCKD *xd, BLOCKD *d, int pitch) {
|
||||
void vp9_build_inter_predictors4b(MACROBLOCKD *xd, BLOCKD *d, int pitch) {
|
||||
unsigned char *ptr_base;
|
||||
unsigned char *ptr;
|
||||
unsigned char *pred_ptr = d->predictor;
|
||||
@ -303,7 +303,7 @@ void vp8_build_inter_predictors4b(MACROBLOCKD *xd, BLOCKD *d, int pitch) {
|
||||
xd->subpixel_predict8x8(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1,
|
||||
(mv.as_mv.row & 7) << 1, pred_ptr, pitch);
|
||||
} else {
|
||||
vp8_copy_mem8x8(ptr, d->pre_stride, pred_ptr, pitch);
|
||||
vp9_copy_mem8x8(ptr, d->pre_stride, pred_ptr, pitch);
|
||||
}
|
||||
}
|
||||
|
||||
@ -313,7 +313,7 @@ void vp8_build_inter_predictors4b(MACROBLOCKD *xd, BLOCKD *d, int pitch) {
|
||||
* come from an earlier call to build_inter_predictors_4b()) with the
|
||||
* predictor of the second reference frame / motion vector.
|
||||
*/
|
||||
void vp8_build_2nd_inter_predictors4b(MACROBLOCKD *xd,
|
||||
void vp9_build_2nd_inter_predictors4b(MACROBLOCKD *xd,
|
||||
BLOCKD *d, int pitch) {
|
||||
unsigned char *ptr_base;
|
||||
unsigned char *ptr;
|
||||
@ -329,7 +329,7 @@ void vp8_build_2nd_inter_predictors4b(MACROBLOCKD *xd,
|
||||
xd->subpixel_predict_avg8x8(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1,
|
||||
(mv.as_mv.row & 7) << 1, pred_ptr, pitch);
|
||||
} else {
|
||||
vp8_avg_mem8x8(ptr, d->pre_stride, pred_ptr, pitch);
|
||||
vp9_avg_mem8x8(ptr, d->pre_stride, pred_ptr, pitch);
|
||||
}
|
||||
}
|
||||
|
||||
@ -348,7 +348,7 @@ static void build_inter_predictors2b(MACROBLOCKD *xd, BLOCKD *d, int pitch) {
|
||||
xd->subpixel_predict8x4(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1,
|
||||
(mv.as_mv.row & 7) << 1, pred_ptr, pitch);
|
||||
} else {
|
||||
vp8_copy_mem8x4(ptr, d->pre_stride, pred_ptr, pitch);
|
||||
vp9_copy_mem8x4(ptr, d->pre_stride, pred_ptr, pitch);
|
||||
}
|
||||
}
|
||||
|
||||
@ -403,7 +403,7 @@ void filter_mb(unsigned char *src, int src_stride,
|
||||
}
|
||||
}
|
||||
#else
|
||||
// Based on vp8_post_proc_down_and_across_c (postproc.c)
|
||||
// Based on vp9_post_proc_down_and_across_c (postproc.c)
|
||||
void filter_mb(unsigned char *src, int src_stride,
|
||||
unsigned char *dst, int dst_stride,
|
||||
int width, int height) {
|
||||
@ -479,7 +479,7 @@ void filter_mb(unsigned char *src, int src_stride,
|
||||
#endif // CONFIG_PRED_FILTER
|
||||
|
||||
/*encoder only*/
|
||||
void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *xd) {
|
||||
void vp9_build_inter4x4_predictors_mbuv(MACROBLOCKD *xd) {
|
||||
int i, j;
|
||||
BLOCKD *blockd = xd->block;
|
||||
|
||||
@ -562,13 +562,13 @@ void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *xd) {
|
||||
if (d0->bmi.as_mv.first.as_int == d1->bmi.as_mv.first.as_int)
|
||||
build_inter_predictors2b(xd, d0, 8);
|
||||
else {
|
||||
vp8_build_inter_predictors_b(d0, 8, xd->subpixel_predict);
|
||||
vp8_build_inter_predictors_b(d1, 8, xd->subpixel_predict);
|
||||
vp9_build_inter_predictors_b(d0, 8, xd->subpixel_predict);
|
||||
vp9_build_inter_predictors_b(d1, 8, xd->subpixel_predict);
|
||||
}
|
||||
|
||||
if (xd->mode_info_context->mbmi.second_ref_frame) {
|
||||
vp8_build_2nd_inter_predictors_b(d0, 8, xd->subpixel_predict_avg);
|
||||
vp8_build_2nd_inter_predictors_b(d1, 8, xd->subpixel_predict_avg);
|
||||
vp9_build_2nd_inter_predictors_b(d0, 8, xd->subpixel_predict_avg);
|
||||
vp9_build_2nd_inter_predictors_b(d1, 8, xd->subpixel_predict_avg);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -608,7 +608,7 @@ static void clamp_uvmv_to_umv_border(MV *mv, const MACROBLOCKD *xd) {
|
||||
}
|
||||
|
||||
/*encoder only*/
|
||||
void vp8_build_1st_inter16x16_predictors_mby(MACROBLOCKD *xd,
|
||||
void vp9_build_1st_inter16x16_predictors_mby(MACROBLOCKD *xd,
|
||||
unsigned char *dst_y,
|
||||
int dst_ystride,
|
||||
int clamp_mvs) {
|
||||
@ -653,11 +653,11 @@ void vp8_build_1st_inter16x16_predictors_mby(MACROBLOCKD *xd,
|
||||
(ymv.as_mv.row & 7) << 1,
|
||||
dst_y, dst_ystride);
|
||||
} else {
|
||||
vp8_copy_mem16x16(ptr, pre_stride, dst_y, dst_ystride);
|
||||
vp9_copy_mem16x16(ptr, pre_stride, dst_y, dst_ystride);
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_build_1st_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
|
||||
void vp9_build_1st_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
|
||||
unsigned char *dst_u,
|
||||
unsigned char *dst_v,
|
||||
int dst_uvstride) {
|
||||
@ -732,24 +732,24 @@ void vp8_build_1st_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
|
||||
xd->subpixel_predict8x8(vptr, pre_stride, _o16x16mv.as_mv.col & 15,
|
||||
_o16x16mv.as_mv.row & 15, dst_v, dst_uvstride);
|
||||
} else {
|
||||
vp8_copy_mem8x8(uptr, pre_stride, dst_u, dst_uvstride);
|
||||
vp8_copy_mem8x8(vptr, pre_stride, dst_v, dst_uvstride);
|
||||
vp9_copy_mem8x8(uptr, pre_stride, dst_u, dst_uvstride);
|
||||
vp9_copy_mem8x8(vptr, pre_stride, dst_v, dst_uvstride);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void vp8_build_1st_inter16x16_predictors_mb(MACROBLOCKD *xd,
|
||||
void vp9_build_1st_inter16x16_predictors_mb(MACROBLOCKD *xd,
|
||||
unsigned char *dst_y,
|
||||
unsigned char *dst_u,
|
||||
unsigned char *dst_v,
|
||||
int dst_ystride, int dst_uvstride) {
|
||||
vp8_build_1st_inter16x16_predictors_mby(xd, dst_y, dst_ystride,
|
||||
vp9_build_1st_inter16x16_predictors_mby(xd, dst_y, dst_ystride,
|
||||
xd->mode_info_context->mbmi.need_to_clamp_mvs);
|
||||
vp8_build_1st_inter16x16_predictors_mbuv(xd, dst_u, dst_v, dst_uvstride);
|
||||
vp9_build_1st_inter16x16_predictors_mbuv(xd, dst_u, dst_v, dst_uvstride);
|
||||
}
|
||||
|
||||
#if CONFIG_SUPERBLOCKS
|
||||
void vp8_build_inter32x32_predictors_sb(MACROBLOCKD *x,
|
||||
void vp9_build_inter32x32_predictors_sb(MACROBLOCKD *x,
|
||||
unsigned char *dst_y,
|
||||
unsigned char *dst_u,
|
||||
unsigned char *dst_v,
|
||||
@ -768,7 +768,7 @@ void vp8_build_inter32x32_predictors_sb(MACROBLOCKD *x,
|
||||
x->pre.u_buffer = u1 + y_idx * 8 * x->pre.uv_stride + x_idx * 8;
|
||||
x->pre.v_buffer = v1 + y_idx * 8 * x->pre.uv_stride + x_idx * 8;
|
||||
|
||||
vp8_build_1st_inter16x16_predictors_mb(x,
|
||||
vp9_build_1st_inter16x16_predictors_mb(x,
|
||||
dst_y + y_idx * 16 * dst_ystride + x_idx * 16,
|
||||
dst_u + y_idx * 8 * dst_uvstride + x_idx * 8,
|
||||
dst_v + y_idx * 8 * dst_uvstride + x_idx * 8,
|
||||
@ -778,7 +778,7 @@ void vp8_build_inter32x32_predictors_sb(MACROBLOCKD *x,
|
||||
x->second_pre.u_buffer = u2 + y_idx * 8 * x->pre.uv_stride + x_idx * 8;
|
||||
x->second_pre.v_buffer = v2 + y_idx * 8 * x->pre.uv_stride + x_idx * 8;
|
||||
|
||||
vp8_build_2nd_inter16x16_predictors_mb(x,
|
||||
vp9_build_2nd_inter16x16_predictors_mb(x,
|
||||
dst_y + y_idx * 16 * dst_ystride + x_idx * 16,
|
||||
dst_u + y_idx * 8 * dst_uvstride + x_idx * 8,
|
||||
dst_v + y_idx * 8 * dst_uvstride + x_idx * 8,
|
||||
@ -812,7 +812,7 @@ void vp8_build_inter32x32_predictors_sb(MACROBLOCKD *x,
|
||||
* which sometimes leads to better prediction than from a
|
||||
* single reference framer.
|
||||
*/
|
||||
void vp8_build_2nd_inter16x16_predictors_mby(MACROBLOCKD *xd,
|
||||
void vp9_build_2nd_inter16x16_predictors_mby(MACROBLOCKD *xd,
|
||||
unsigned char *dst_y,
|
||||
int dst_ystride) {
|
||||
unsigned char *ptr;
|
||||
@ -852,7 +852,7 @@ void vp8_build_2nd_inter16x16_predictors_mby(MACROBLOCKD *xd,
|
||||
} else {
|
||||
// TODO Needs to AVERAGE with the dst_y
|
||||
// For now, do not apply the prediction filter in these cases!
|
||||
vp8_avg_mem16x16(ptr, pre_stride, dst_y, dst_ystride);
|
||||
vp9_avg_mem16x16(ptr, pre_stride, dst_y, dst_ystride);
|
||||
}
|
||||
} else
|
||||
#endif // CONFIG_PRED_FILTER
|
||||
@ -861,12 +861,12 @@ void vp8_build_2nd_inter16x16_predictors_mby(MACROBLOCKD *xd,
|
||||
xd->subpixel_predict_avg16x16(ptr, pre_stride, (mv_col & 7) << 1,
|
||||
(mv_row & 7) << 1, dst_y, dst_ystride);
|
||||
} else {
|
||||
vp8_avg_mem16x16(ptr, pre_stride, dst_y, dst_ystride);
|
||||
vp9_avg_mem16x16(ptr, pre_stride, dst_y, dst_ystride);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_build_2nd_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
|
||||
void vp9_build_2nd_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
|
||||
unsigned char *dst_u,
|
||||
unsigned char *dst_v,
|
||||
int dst_uvstride) {
|
||||
@ -924,7 +924,7 @@ void vp8_build_2nd_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
|
||||
} else {
|
||||
// TODO Needs to AVERAGE with the dst_[u|v]
|
||||
// For now, do not apply the prediction filter here!
|
||||
vp8_avg_mem8x8(pSrc, pre_stride, pDst, dst_uvstride);
|
||||
vp9_avg_mem8x8(pSrc, pre_stride, pDst, dst_uvstride);
|
||||
}
|
||||
|
||||
// V
|
||||
@ -939,19 +939,19 @@ void vp8_build_2nd_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
|
||||
xd->subpixel_predict_avg8x8(vptr, pre_stride, omv_col & 15,
|
||||
omv_row & 15, dst_v, dst_uvstride);
|
||||
} else {
|
||||
vp8_avg_mem8x8(uptr, pre_stride, dst_u, dst_uvstride);
|
||||
vp8_avg_mem8x8(vptr, pre_stride, dst_v, dst_uvstride);
|
||||
vp9_avg_mem8x8(uptr, pre_stride, dst_u, dst_uvstride);
|
||||
vp9_avg_mem8x8(vptr, pre_stride, dst_v, dst_uvstride);
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_build_2nd_inter16x16_predictors_mb(MACROBLOCKD *xd,
|
||||
void vp9_build_2nd_inter16x16_predictors_mb(MACROBLOCKD *xd,
|
||||
unsigned char *dst_y,
|
||||
unsigned char *dst_u,
|
||||
unsigned char *dst_v,
|
||||
int dst_ystride,
|
||||
int dst_uvstride) {
|
||||
vp8_build_2nd_inter16x16_predictors_mby(xd, dst_y, dst_ystride);
|
||||
vp8_build_2nd_inter16x16_predictors_mbuv(xd, dst_u, dst_v, dst_uvstride);
|
||||
vp9_build_2nd_inter16x16_predictors_mby(xd, dst_y, dst_ystride);
|
||||
vp9_build_2nd_inter16x16_predictors_mbuv(xd, dst_u, dst_v, dst_uvstride);
|
||||
}
|
||||
|
||||
static void build_inter4x4_predictors_mb(MACROBLOCKD *xd) {
|
||||
@ -979,16 +979,16 @@ static void build_inter4x4_predictors_mb(MACROBLOCKD *xd) {
|
||||
}
|
||||
|
||||
|
||||
vp8_build_inter_predictors4b(xd, &blockd[ 0], 16);
|
||||
vp8_build_inter_predictors4b(xd, &blockd[ 2], 16);
|
||||
vp8_build_inter_predictors4b(xd, &blockd[ 8], 16);
|
||||
vp8_build_inter_predictors4b(xd, &blockd[10], 16);
|
||||
vp9_build_inter_predictors4b(xd, &blockd[ 0], 16);
|
||||
vp9_build_inter_predictors4b(xd, &blockd[ 2], 16);
|
||||
vp9_build_inter_predictors4b(xd, &blockd[ 8], 16);
|
||||
vp9_build_inter_predictors4b(xd, &blockd[10], 16);
|
||||
|
||||
if (mbmi->second_ref_frame) {
|
||||
vp8_build_2nd_inter_predictors4b(xd, &blockd[ 0], 16);
|
||||
vp8_build_2nd_inter_predictors4b(xd, &blockd[ 2], 16);
|
||||
vp8_build_2nd_inter_predictors4b(xd, &blockd[ 8], 16);
|
||||
vp8_build_2nd_inter_predictors4b(xd, &blockd[10], 16);
|
||||
vp9_build_2nd_inter_predictors4b(xd, &blockd[ 0], 16);
|
||||
vp9_build_2nd_inter_predictors4b(xd, &blockd[ 2], 16);
|
||||
vp9_build_2nd_inter_predictors4b(xd, &blockd[ 8], 16);
|
||||
vp9_build_2nd_inter_predictors4b(xd, &blockd[10], 16);
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < 16; i += 2) {
|
||||
@ -1010,13 +1010,13 @@ static void build_inter4x4_predictors_mb(MACROBLOCKD *xd) {
|
||||
if (d0->bmi.as_mv.first.as_int == d1->bmi.as_mv.first.as_int)
|
||||
build_inter_predictors2b(xd, d0, 16);
|
||||
else {
|
||||
vp8_build_inter_predictors_b(d0, 16, xd->subpixel_predict);
|
||||
vp8_build_inter_predictors_b(d1, 16, xd->subpixel_predict);
|
||||
vp9_build_inter_predictors_b(d0, 16, xd->subpixel_predict);
|
||||
vp9_build_inter_predictors_b(d1, 16, xd->subpixel_predict);
|
||||
}
|
||||
|
||||
if (mbmi->second_ref_frame) {
|
||||
vp8_build_2nd_inter_predictors_b(d0, 16, xd->subpixel_predict_avg);
|
||||
vp8_build_2nd_inter_predictors_b(d1, 16, xd->subpixel_predict_avg);
|
||||
vp9_build_2nd_inter_predictors_b(d0, 16, xd->subpixel_predict_avg);
|
||||
vp9_build_2nd_inter_predictors_b(d1, 16, xd->subpixel_predict_avg);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1028,13 +1028,13 @@ static void build_inter4x4_predictors_mb(MACROBLOCKD *xd) {
|
||||
if (d0->bmi.as_mv.first.as_int == d1->bmi.as_mv.first.as_int)
|
||||
build_inter_predictors2b(xd, d0, 8);
|
||||
else {
|
||||
vp8_build_inter_predictors_b(d0, 8, xd->subpixel_predict);
|
||||
vp8_build_inter_predictors_b(d1, 8, xd->subpixel_predict);
|
||||
vp9_build_inter_predictors_b(d0, 8, xd->subpixel_predict);
|
||||
vp9_build_inter_predictors_b(d1, 8, xd->subpixel_predict);
|
||||
}
|
||||
|
||||
if (mbmi->second_ref_frame) {
|
||||
vp8_build_2nd_inter_predictors_b(d0, 8, xd->subpixel_predict_avg);
|
||||
vp8_build_2nd_inter_predictors_b(d1, 8, xd->subpixel_predict_avg);
|
||||
vp9_build_2nd_inter_predictors_b(d0, 8, xd->subpixel_predict_avg);
|
||||
vp9_build_2nd_inter_predictors_b(d1, 8, xd->subpixel_predict_avg);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1131,9 +1131,9 @@ void build_4x4uvmvs(MACROBLOCKD *xd) {
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_build_inter_predictors_mb(MACROBLOCKD *xd) {
|
||||
void vp9_build_inter_predictors_mb(MACROBLOCKD *xd) {
|
||||
if (xd->mode_info_context->mbmi.mode != SPLITMV) {
|
||||
vp8_build_1st_inter16x16_predictors_mb(xd, xd->predictor,
|
||||
vp9_build_1st_inter16x16_predictors_mb(xd, xd->predictor,
|
||||
&xd->predictor[256],
|
||||
&xd->predictor[320], 16, 8);
|
||||
|
||||
@ -1141,7 +1141,7 @@ void vp8_build_inter_predictors_mb(MACROBLOCKD *xd) {
|
||||
/* 256 = offset of U plane in Y+U+V buffer;
|
||||
* 320 = offset of V plane in Y+U+V buffer.
|
||||
* (256=16x16, 320=16x16+8x8). */
|
||||
vp8_build_2nd_inter16x16_predictors_mb(xd, xd->predictor,
|
||||
vp9_build_2nd_inter16x16_predictors_mb(xd, xd->predictor,
|
||||
&xd->predictor[256],
|
||||
&xd->predictor[320], 16, 8);
|
||||
}
|
||||
|
@ -14,29 +14,29 @@
|
||||
|
||||
#include "onyxc_int.h"
|
||||
|
||||
extern void vp8_build_1st_inter16x16_predictors_mby(MACROBLOCKD *xd,
|
||||
extern void vp9_build_1st_inter16x16_predictors_mby(MACROBLOCKD *xd,
|
||||
unsigned char *dst_y,
|
||||
int dst_ystride,
|
||||
int clamp_mvs);
|
||||
extern void vp8_build_1st_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
|
||||
extern void vp9_build_1st_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
|
||||
unsigned char *dst_u,
|
||||
unsigned char *dst_v,
|
||||
int dst_uvstride);
|
||||
extern void vp8_build_1st_inter16x16_predictors_mb(MACROBLOCKD *xd,
|
||||
extern void vp9_build_1st_inter16x16_predictors_mb(MACROBLOCKD *xd,
|
||||
unsigned char *dst_y,
|
||||
unsigned char *dst_u,
|
||||
unsigned char *dst_v,
|
||||
int dst_ystride,
|
||||
int dst_uvstride);
|
||||
|
||||
extern void vp8_build_2nd_inter16x16_predictors_mby(MACROBLOCKD *xd,
|
||||
extern void vp9_build_2nd_inter16x16_predictors_mby(MACROBLOCKD *xd,
|
||||
unsigned char *dst_y,
|
||||
int dst_ystride);
|
||||
extern void vp8_build_2nd_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
|
||||
extern void vp9_build_2nd_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
|
||||
unsigned char *dst_u,
|
||||
unsigned char *dst_v,
|
||||
int dst_uvstride);
|
||||
extern void vp8_build_2nd_inter16x16_predictors_mb(MACROBLOCKD *xd,
|
||||
extern void vp9_build_2nd_inter16x16_predictors_mb(MACROBLOCKD *xd,
|
||||
unsigned char *dst_y,
|
||||
unsigned char *dst_u,
|
||||
unsigned char *dst_v,
|
||||
@ -44,7 +44,7 @@ extern void vp8_build_2nd_inter16x16_predictors_mb(MACROBLOCKD *xd,
|
||||
int dst_uvstride);
|
||||
|
||||
#if CONFIG_SUPERBLOCKS
|
||||
extern void vp8_build_inter32x32_predictors_sb(MACROBLOCKD *x,
|
||||
extern void vp9_build_inter32x32_predictors_sb(MACROBLOCKD *x,
|
||||
unsigned char *dst_y,
|
||||
unsigned char *dst_u,
|
||||
unsigned char *dst_v,
|
||||
@ -52,20 +52,20 @@ extern void vp8_build_inter32x32_predictors_sb(MACROBLOCKD *x,
|
||||
int dst_uvstride);
|
||||
#endif
|
||||
|
||||
extern void vp8_build_inter_predictors_mb(MACROBLOCKD *xd);
|
||||
extern void vp9_build_inter_predictors_mb(MACROBLOCKD *xd);
|
||||
|
||||
extern void vp8_build_inter_predictors_b(BLOCKD *d, int pitch,
|
||||
extern void vp9_build_inter_predictors_b(BLOCKD *d, int pitch,
|
||||
vp8_subpix_fn_t sppf);
|
||||
extern void vp8_build_2nd_inter_predictors_b(BLOCKD *d, int pitch,
|
||||
extern void vp9_build_2nd_inter_predictors_b(BLOCKD *d, int pitch,
|
||||
vp8_subpix_fn_t sppf);
|
||||
|
||||
extern void vp8_build_inter_predictors4b(MACROBLOCKD *xd, BLOCKD *d,
|
||||
extern void vp9_build_inter_predictors4b(MACROBLOCKD *xd, BLOCKD *d,
|
||||
int pitch);
|
||||
extern void vp8_build_2nd_inter_predictors4b(MACROBLOCKD *xd,
|
||||
extern void vp9_build_2nd_inter_predictors4b(MACROBLOCKD *xd,
|
||||
BLOCKD *d, int pitch);
|
||||
|
||||
extern void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *xd);
|
||||
extern void vp8_setup_interp_filters(MACROBLOCKD *xd,
|
||||
extern void vp9_build_inter4x4_predictors_mbuv(MACROBLOCKD *xd);
|
||||
extern void vp9_setup_interp_filters(MACROBLOCKD *xd,
|
||||
INTERPOLATIONFILTERTYPE filter,
|
||||
VP8_COMMON *cm);
|
||||
|
||||
|
@ -14,8 +14,8 @@
|
||||
#include "reconintra.h"
|
||||
#include "vpx_mem/vpx_mem.h"
|
||||
|
||||
/* For skip_recon_mb(), add vp8_build_intra_predictors_mby_s(MACROBLOCKD *xd)
|
||||
* and vp8_build_intra_predictors_mbuv_s(MACROBLOCKD *xd).
|
||||
/* For skip_recon_mb(), add vp9_build_intra_predictors_mby_s(MACROBLOCKD *xd)
|
||||
* and vp9_build_intra_predictors_mbuv_s(MACROBLOCKD *xd).
|
||||
*/
|
||||
|
||||
static void d27_predictor(uint8_t *ypred_ptr, int y_stride, int n,
|
||||
@ -196,16 +196,16 @@ static void d153_predictor(uint8_t *ypred_ptr, int y_stride, int n,
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_recon_intra_mbuv(MACROBLOCKD *xd) {
|
||||
void vp9_recon_intra_mbuv(MACROBLOCKD *xd) {
|
||||
int i;
|
||||
|
||||
for (i = 16; i < 24; i += 2) {
|
||||
BLOCKD *b = &xd->block[i];
|
||||
vp8_recon2b(b->predictor, b->diff,*(b->base_dst) + b->dst, b->dst_stride);
|
||||
vp9_recon2b(b->predictor, b->diff,*(b->base_dst) + b->dst, b->dst_stride);
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_build_intra_predictors_internal(unsigned char *src, int src_stride,
|
||||
void vp9_build_intra_predictors_internal(unsigned char *src, int src_stride,
|
||||
unsigned char *ypred_ptr,
|
||||
int y_stride, int mode, int bsize,
|
||||
int up_available, int left_available) {
|
||||
@ -331,23 +331,23 @@ void vp8_build_intra_predictors_internal(unsigned char *src, int src_stride,
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_build_intra_predictors_mby(MACROBLOCKD *xd) {
|
||||
vp8_build_intra_predictors_internal(xd->dst.y_buffer, xd->dst.y_stride,
|
||||
void vp9_build_intra_predictors_mby(MACROBLOCKD *xd) {
|
||||
vp9_build_intra_predictors_internal(xd->dst.y_buffer, xd->dst.y_stride,
|
||||
xd->predictor, 16,
|
||||
xd->mode_info_context->mbmi.mode, 16,
|
||||
xd->up_available, xd->left_available);
|
||||
}
|
||||
|
||||
void vp8_build_intra_predictors_mby_s(MACROBLOCKD *xd) {
|
||||
vp8_build_intra_predictors_internal(xd->dst.y_buffer, xd->dst.y_stride,
|
||||
void vp9_build_intra_predictors_mby_s(MACROBLOCKD *xd) {
|
||||
vp9_build_intra_predictors_internal(xd->dst.y_buffer, xd->dst.y_stride,
|
||||
xd->dst.y_buffer, xd->dst.y_stride,
|
||||
xd->mode_info_context->mbmi.mode, 16,
|
||||
xd->up_available, xd->left_available);
|
||||
}
|
||||
|
||||
#if CONFIG_SUPERBLOCKS
|
||||
void vp8_build_intra_predictors_sby_s(MACROBLOCKD *xd) {
|
||||
vp8_build_intra_predictors_internal(xd->dst.y_buffer, xd->dst.y_stride,
|
||||
void vp9_build_intra_predictors_sby_s(MACROBLOCKD *xd) {
|
||||
vp9_build_intra_predictors_internal(xd->dst.y_buffer, xd->dst.y_stride,
|
||||
xd->dst.y_buffer, xd->dst.y_stride,
|
||||
xd->mode_info_context->mbmi.mode, 32,
|
||||
xd->up_available, xd->left_available);
|
||||
@ -359,12 +359,12 @@ void vp8_build_comp_intra_predictors_mby(MACROBLOCKD *xd) {
|
||||
unsigned char predictor[2][256];
|
||||
int i;
|
||||
|
||||
vp8_build_intra_predictors_internal(xd->dst.y_buffer, xd->dst.y_stride,
|
||||
vp9_build_intra_predictors_internal(xd->dst.y_buffer, xd->dst.y_stride,
|
||||
predictor[0], 16,
|
||||
xd->mode_info_context->mbmi.mode,
|
||||
16, xd->up_available,
|
||||
xd->left_available);
|
||||
vp8_build_intra_predictors_internal(xd->dst.y_buffer, xd->dst.y_stride,
|
||||
vp9_build_intra_predictors_internal(xd->dst.y_buffer, xd->dst.y_stride,
|
||||
predictor[1], 16,
|
||||
xd->mode_info_context->mbmi.second_mode,
|
||||
16, xd->up_available,
|
||||
@ -376,28 +376,28 @@ void vp8_build_comp_intra_predictors_mby(MACROBLOCKD *xd) {
|
||||
}
|
||||
#endif
|
||||
|
||||
void vp8_build_intra_predictors_mbuv_internal(MACROBLOCKD *xd,
|
||||
void vp9_build_intra_predictors_mbuv_internal(MACROBLOCKD *xd,
|
||||
unsigned char *upred_ptr,
|
||||
unsigned char *vpred_ptr,
|
||||
int uv_stride,
|
||||
int mode, int bsize) {
|
||||
vp8_build_intra_predictors_internal(xd->dst.u_buffer, xd->dst.uv_stride,
|
||||
vp9_build_intra_predictors_internal(xd->dst.u_buffer, xd->dst.uv_stride,
|
||||
upred_ptr, uv_stride, mode, bsize,
|
||||
xd->up_available, xd->left_available);
|
||||
vp8_build_intra_predictors_internal(xd->dst.v_buffer, xd->dst.uv_stride,
|
||||
vp9_build_intra_predictors_internal(xd->dst.v_buffer, xd->dst.uv_stride,
|
||||
vpred_ptr, uv_stride, mode, bsize,
|
||||
xd->up_available, xd->left_available);
|
||||
}
|
||||
|
||||
void vp8_build_intra_predictors_mbuv(MACROBLOCKD *xd) {
|
||||
vp8_build_intra_predictors_mbuv_internal(xd, &xd->predictor[256],
|
||||
void vp9_build_intra_predictors_mbuv(MACROBLOCKD *xd) {
|
||||
vp9_build_intra_predictors_mbuv_internal(xd, &xd->predictor[256],
|
||||
&xd->predictor[320], 8,
|
||||
xd->mode_info_context->mbmi.uv_mode,
|
||||
8);
|
||||
}
|
||||
|
||||
void vp8_build_intra_predictors_mbuv_s(MACROBLOCKD *xd) {
|
||||
vp8_build_intra_predictors_mbuv_internal(xd, xd->dst.u_buffer,
|
||||
void vp9_build_intra_predictors_mbuv_s(MACROBLOCKD *xd) {
|
||||
vp9_build_intra_predictors_mbuv_internal(xd, xd->dst.u_buffer,
|
||||
xd->dst.v_buffer,
|
||||
xd->dst.uv_stride,
|
||||
xd->mode_info_context->mbmi.uv_mode,
|
||||
@ -405,8 +405,8 @@ void vp8_build_intra_predictors_mbuv_s(MACROBLOCKD *xd) {
|
||||
}
|
||||
|
||||
#if CONFIG_SUPERBLOCKS
|
||||
void vp8_build_intra_predictors_sbuv_s(MACROBLOCKD *xd) {
|
||||
vp8_build_intra_predictors_mbuv_internal(xd, xd->dst.u_buffer,
|
||||
void vp9_build_intra_predictors_sbuv_s(MACROBLOCKD *xd) {
|
||||
vp9_build_intra_predictors_mbuv_internal(xd, xd->dst.u_buffer,
|
||||
xd->dst.v_buffer, xd->dst.uv_stride,
|
||||
xd->mode_info_context->mbmi.uv_mode,
|
||||
16);
|
||||
@ -418,10 +418,10 @@ void vp8_build_comp_intra_predictors_mbuv(MACROBLOCKD *xd) {
|
||||
unsigned char predictor[2][2][64];
|
||||
int i;
|
||||
|
||||
vp8_build_intra_predictors_mbuv_internal(
|
||||
vp9_build_intra_predictors_mbuv_internal(
|
||||
xd, predictor[0][0], predictor[1][0], 8,
|
||||
xd->mode_info_context->mbmi.uv_mode, 8);
|
||||
vp8_build_intra_predictors_mbuv_internal(
|
||||
vp9_build_intra_predictors_mbuv_internal(
|
||||
xd, predictor[0][1], predictor[1][1], 8,
|
||||
xd->mode_info_context->mbmi.second_uv_mode, 8);
|
||||
for (i = 0; i < 64; i++) {
|
||||
@ -432,10 +432,10 @@ void vp8_build_comp_intra_predictors_mbuv(MACROBLOCKD *xd) {
|
||||
}
|
||||
#endif
|
||||
|
||||
void vp8_intra8x8_predict(BLOCKD *xd,
|
||||
void vp9_intra8x8_predict(BLOCKD *xd,
|
||||
int mode,
|
||||
unsigned char *predictor) {
|
||||
vp8_build_intra_predictors_internal(*(xd->base_dst) + xd->dst,
|
||||
vp9_build_intra_predictors_internal(*(xd->base_dst) + xd->dst,
|
||||
xd->dst_stride, predictor, 16,
|
||||
mode, 8, 1, 1);
|
||||
}
|
||||
@ -447,8 +447,8 @@ void vp8_comp_intra8x8_predict(BLOCKD *xd,
|
||||
unsigned char predictor[2][8 * 16];
|
||||
int i, j;
|
||||
|
||||
vp8_intra8x8_predict(xd, mode, predictor[0]);
|
||||
vp8_intra8x8_predict(xd, second_mode, predictor[1]);
|
||||
vp9_intra8x8_predict(xd, mode, predictor[0]);
|
||||
vp9_intra8x8_predict(xd, second_mode, predictor[1]);
|
||||
|
||||
for (i = 0; i < 8 * 16; i += 16) {
|
||||
for (j = i; j < i + 8; j++) {
|
||||
@ -458,10 +458,10 @@ void vp8_comp_intra8x8_predict(BLOCKD *xd,
|
||||
}
|
||||
#endif
|
||||
|
||||
void vp8_intra_uv4x4_predict(BLOCKD *xd,
|
||||
void vp9_intra_uv4x4_predict(BLOCKD *xd,
|
||||
int mode,
|
||||
unsigned char *predictor) {
|
||||
vp8_build_intra_predictors_internal(*(xd->base_dst) + xd->dst,
|
||||
vp9_build_intra_predictors_internal(*(xd->base_dst) + xd->dst,
|
||||
xd->dst_stride, predictor, 8,
|
||||
mode, 4, 1, 1);
|
||||
}
|
||||
@ -473,8 +473,8 @@ void vp8_comp_intra_uv4x4_predict(BLOCKD *xd,
|
||||
unsigned char predictor[2][8 * 4];
|
||||
int i, j;
|
||||
|
||||
vp8_intra_uv4x4_predict(xd, mode, predictor[0]);
|
||||
vp8_intra_uv4x4_predict(xd, mode2, predictor[1]);
|
||||
vp9_intra_uv4x4_predict(xd, mode, predictor[0]);
|
||||
vp9_intra_uv4x4_predict(xd, mode2, predictor[1]);
|
||||
|
||||
for (i = 0; i < 4 * 8; i += 8) {
|
||||
for (j = i; j < i + 4; j++) {
|
||||
|
@ -14,7 +14,7 @@
|
||||
#include "reconintra.h"
|
||||
#include "vpx_rtcd.h"
|
||||
|
||||
void vp8_intra4x4_predict_c(BLOCKD *x, int b_mode,
|
||||
void vp9_intra4x4_predict_c(BLOCKD *x, int b_mode,
|
||||
unsigned char *predictor) {
|
||||
int i, r, c;
|
||||
|
||||
@ -281,8 +281,8 @@ void vp8_comp_intra4x4_predict_c(BLOCKD *x,
|
||||
unsigned char predictor[2][4 * 16];
|
||||
int i, j;
|
||||
|
||||
vp8_intra4x4_predict(x, b_mode, predictor[0]);
|
||||
vp8_intra4x4_predict(x, b_mode2, predictor[1]);
|
||||
vp9_intra4x4_predict(x, b_mode, predictor[0]);
|
||||
vp9_intra4x4_predict(x, b_mode2, predictor[1]);
|
||||
|
||||
for (i = 0; i < 16 * 4; i += 16) {
|
||||
for (j = i; j < i + 4; j++) {
|
||||
@ -295,7 +295,7 @@ void vp8_comp_intra4x4_predict_c(BLOCKD *x,
|
||||
/* copy 4 bytes from the above right down so that the 4x4 prediction modes using pixels above and
|
||||
* to the right prediction have filled in pixels to use.
|
||||
*/
|
||||
void vp8_intra_prediction_down_copy(MACROBLOCKD *xd) {
|
||||
void vp9_intra_prediction_down_copy(MACROBLOCKD *xd) {
|
||||
int extend_edge = (xd->mb_to_right_edge == 0 && xd->mb_index < 2);
|
||||
unsigned char *above_right = *(xd->block[0].base_dst) + xd->block[0].dst -
|
||||
xd->block[0].dst_stride + 16;
|
||||
|
@ -12,6 +12,6 @@
|
||||
#ifndef __INC_RECONINTRA4x4_H
|
||||
#define __INC_RECONINTRA4x4_H
|
||||
|
||||
extern void vp8_intra_prediction_down_copy(MACROBLOCKD *xd);
|
||||
extern void vp9_intra_prediction_down_copy(MACROBLOCKD *xd);
|
||||
|
||||
#endif
|
||||
|
@ -19,20 +19,20 @@ EOF
|
||||
}
|
||||
forward_decls common_forward_decls
|
||||
|
||||
prototype void vp8_filter_block2d_4x4_8 "const unsigned char *src_ptr, const unsigned int src_stride, const short *HFilter_aligned16, const short *VFilter_aligned16, unsigned char *dst_ptr, unsigned int dst_stride"
|
||||
prototype void vp8_filter_block2d_8x4_8 "const unsigned char *src_ptr, const unsigned int src_stride, const short *HFilter_aligned16, const short *VFilter_aligned16, unsigned char *dst_ptr, unsigned int dst_stride"
|
||||
prototype void vp8_filter_block2d_8x8_8 "const unsigned char *src_ptr, const unsigned int src_stride, const short *HFilter_aligned16, const short *VFilter_aligned16, unsigned char *dst_ptr, unsigned int dst_stride"
|
||||
prototype void vp8_filter_block2d_16x16_8 "const unsigned char *src_ptr, const unsigned int src_stride, const short *HFilter_aligned16, const short *VFilter_aligned16, unsigned char *dst_ptr, unsigned int dst_stride"
|
||||
prototype void vp9_filter_block2d_4x4_8 "const unsigned char *src_ptr, const unsigned int src_stride, const short *HFilter_aligned16, const short *VFilter_aligned16, unsigned char *dst_ptr, unsigned int dst_stride"
|
||||
prototype void vp9_filter_block2d_8x4_8 "const unsigned char *src_ptr, const unsigned int src_stride, const short *HFilter_aligned16, const short *VFilter_aligned16, unsigned char *dst_ptr, unsigned int dst_stride"
|
||||
prototype void vp9_filter_block2d_8x8_8 "const unsigned char *src_ptr, const unsigned int src_stride, const short *HFilter_aligned16, const short *VFilter_aligned16, unsigned char *dst_ptr, unsigned int dst_stride"
|
||||
prototype void vp9_filter_block2d_16x16_8 "const unsigned char *src_ptr, const unsigned int src_stride, const short *HFilter_aligned16, const short *VFilter_aligned16, unsigned char *dst_ptr, unsigned int dst_stride"
|
||||
|
||||
# At the very least, MSVC 2008 has compiler bug exhibited by this code; code
|
||||
# compiles warning free but a dissassembly of generated code show bugs. To be
|
||||
# on the safe side, only enabled when compiled with 'gcc'.
|
||||
if [ "$CONFIG_GCC" = "yes" ]; then
|
||||
specialize vp8_filter_block2d_4x4_8 sse4_1 sse2
|
||||
specialize vp9_filter_block2d_4x4_8 sse4_1 sse2
|
||||
fi
|
||||
specialize vp8_filter_block2d_8x4_8 ssse3 #sse4_1 sse2
|
||||
specialize vp8_filter_block2d_8x8_8 ssse3 #sse4_1 sse2
|
||||
specialize vp8_filter_block2d_16x16_8 ssse3 #sse4_1 sse2
|
||||
specialize vp9_filter_block2d_8x4_8 ssse3 #sse4_1 sse2
|
||||
specialize vp9_filter_block2d_8x8_8 ssse3 #sse4_1 sse2
|
||||
specialize vp9_filter_block2d_16x16_8 ssse3 #sse4_1 sse2
|
||||
|
||||
#
|
||||
# Dequant
|
||||
@ -73,92 +73,92 @@ specialize vp9_dequant_idct_add_uv_block mmx
|
||||
#
|
||||
# RECON
|
||||
#
|
||||
prototype void vp8_copy_mem16x16 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
|
||||
specialize vp8_copy_mem16x16 mmx sse2 media neon dspr2
|
||||
vp8_copy_mem16x16_media=vp8_copy_mem16x16_v6
|
||||
vp8_copy_mem16x16_dspr2=vp8_copy_mem16x16_dspr2
|
||||
prototype void vp9_copy_mem16x16 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
|
||||
specialize vp9_copy_mem16x16 mmx sse2 media neon dspr2
|
||||
vp9_copy_mem16x16_media=vp9_copy_mem16x16_v6
|
||||
vp9_copy_mem16x16_dspr2=vp9_copy_mem16x16_dspr2
|
||||
|
||||
prototype void vp8_copy_mem8x8 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
|
||||
specialize vp8_copy_mem8x8 mmx media neon dspr2
|
||||
vp8_copy_mem8x8_media=vp8_copy_mem8x8_v6
|
||||
vp8_copy_mem8x8_dspr2=vp8_copy_mem8x8_dspr2
|
||||
prototype void vp9_copy_mem8x8 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
|
||||
specialize vp9_copy_mem8x8 mmx media neon dspr2
|
||||
vp9_copy_mem8x8_media=vp9_copy_mem8x8_v6
|
||||
vp9_copy_mem8x8_dspr2=vp9_copy_mem8x8_dspr2
|
||||
|
||||
prototype void vp8_copy_mem8x4 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
|
||||
specialize vp8_copy_mem8x4 mmx
|
||||
prototype void vp9_copy_mem8x4 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
|
||||
specialize vp9_copy_mem8x4 mmx
|
||||
|
||||
prototype void vp8_intra4x4_predict "unsigned char *Above, unsigned char *yleft, int left_stride, B_PREDICTION_MODE b_mode, unsigned char *dst, int dst_stride, unsigned char top_left"
|
||||
specialize vp8_intra4x4_predict
|
||||
prototype void vp9_intra4x4_predict "unsigned char *Above, unsigned char *yleft, int left_stride, B_PREDICTION_MODE b_mode, unsigned char *dst, int dst_stride, unsigned char top_left"
|
||||
specialize vp9_intra4x4_predict
|
||||
|
||||
prototype void vp8_avg_mem16x16 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
|
||||
specialize vp8_avg_mem16x16
|
||||
prototype void vp9_avg_mem16x16 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
|
||||
specialize vp9_avg_mem16x16
|
||||
|
||||
prototype void vp8_avg_mem8x8 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
|
||||
specialize vp8_avg_mem8x8
|
||||
prototype void vp9_avg_mem8x8 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
|
||||
specialize vp9_avg_mem8x8
|
||||
|
||||
prototype void vp8_copy_mem8x4 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
|
||||
specialize vp8_copy_mem8x4 mmx media neon dspr2
|
||||
vp8_copy_mem8x4_media=vp8_copy_mem8x4_v6
|
||||
vp8_copy_mem8x4_dspr2=vp8_copy_mem8x4_dspr2
|
||||
prototype void vp9_copy_mem8x4 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
|
||||
specialize vp9_copy_mem8x4 mmx media neon dspr2
|
||||
vp9_copy_mem8x4_media=vp9_copy_mem8x4_v6
|
||||
vp9_copy_mem8x4_dspr2=vp9_copy_mem8x4_dspr2
|
||||
|
||||
prototype void vp8_recon_b "unsigned char *pred_ptr, short *diff_ptr, unsigned char *dst_ptr, int stride"
|
||||
specialize vp8_recon_b
|
||||
prototype void vp9_recon_b "unsigned char *pred_ptr, short *diff_ptr, unsigned char *dst_ptr, int stride"
|
||||
specialize vp9_recon_b
|
||||
|
||||
prototype void vp8_recon_uv_b "unsigned char *pred_ptr, short *diff_ptr, unsigned char *dst_ptr, int stride"
|
||||
specialize vp8_recon_uv_b
|
||||
prototype void vp9_recon_uv_b "unsigned char *pred_ptr, short *diff_ptr, unsigned char *dst_ptr, int stride"
|
||||
specialize vp9_recon_uv_b
|
||||
|
||||
prototype void vp8_recon2b "unsigned char *pred_ptr, short *diff_ptr, unsigned char *dst_ptr, int stride"
|
||||
specialize vp8_recon2b sse2
|
||||
prototype void vp9_recon2b "unsigned char *pred_ptr, short *diff_ptr, unsigned char *dst_ptr, int stride"
|
||||
specialize vp9_recon2b sse2
|
||||
|
||||
prototype void vp8_recon4b "unsigned char *pred_ptr, short *diff_ptr, unsigned char *dst_ptr, int stride"
|
||||
specialize vp8_recon4b sse2
|
||||
prototype void vp9_recon4b "unsigned char *pred_ptr, short *diff_ptr, unsigned char *dst_ptr, int stride"
|
||||
specialize vp9_recon4b sse2
|
||||
|
||||
prototype void vp8_recon_mb "struct macroblockd *x"
|
||||
specialize vp8_recon_mb
|
||||
prototype void vp9_recon_mb "struct macroblockd *x"
|
||||
specialize vp9_recon_mb
|
||||
|
||||
prototype void vp8_recon_mby "struct macroblockd *x"
|
||||
specialize vp8_recon_mby
|
||||
prototype void vp9_recon_mby "struct macroblockd *x"
|
||||
specialize vp9_recon_mby
|
||||
|
||||
prototype void vp8_build_intra_predictors_mby_s "struct macroblockd *x"
|
||||
specialize vp8_build_intra_predictors_mby_s
|
||||
prototype void vp9_build_intra_predictors_mby_s "struct macroblockd *x"
|
||||
specialize vp9_build_intra_predictors_mby_s
|
||||
|
||||
prototype void vp8_build_intra_predictors_sby_s "struct macroblockd *x"
|
||||
specialize vp8_build_intra_predictors_sby_s;
|
||||
prototype void vp9_build_intra_predictors_sby_s "struct macroblockd *x"
|
||||
specialize vp9_build_intra_predictors_sby_s;
|
||||
|
||||
prototype void vp8_build_intra_predictors_sbuv_s "struct macroblockd *x"
|
||||
specialize vp8_build_intra_predictors_sbuv_s;
|
||||
prototype void vp9_build_intra_predictors_sbuv_s "struct macroblockd *x"
|
||||
specialize vp9_build_intra_predictors_sbuv_s;
|
||||
|
||||
prototype void vp8_build_intra_predictors_mby "struct macroblockd *x"
|
||||
specialize vp8_build_intra_predictors_mby;
|
||||
prototype void vp9_build_intra_predictors_mby "struct macroblockd *x"
|
||||
specialize vp9_build_intra_predictors_mby;
|
||||
|
||||
prototype void vp8_build_comp_intra_predictors_mby "struct macroblockd *x"
|
||||
specialize vp8_build_comp_intra_predictors_mby;
|
||||
|
||||
prototype void vp8_build_intra_predictors_mby_s "struct macroblockd *x"
|
||||
specialize vp8_build_intra_predictors_mby_s;
|
||||
prototype void vp9_build_intra_predictors_mby_s "struct macroblockd *x"
|
||||
specialize vp9_build_intra_predictors_mby_s;
|
||||
|
||||
prototype void vp8_build_intra_predictors_mbuv "struct macroblockd *x"
|
||||
specialize vp8_build_intra_predictors_mbuv;
|
||||
prototype void vp9_build_intra_predictors_mbuv "struct macroblockd *x"
|
||||
specialize vp9_build_intra_predictors_mbuv;
|
||||
|
||||
prototype void vp8_build_intra_predictors_mbuv_s "struct macroblockd *x"
|
||||
specialize vp8_build_intra_predictors_mbuv_s;
|
||||
prototype void vp9_build_intra_predictors_mbuv_s "struct macroblockd *x"
|
||||
specialize vp9_build_intra_predictors_mbuv_s;
|
||||
|
||||
prototype void vp8_build_comp_intra_predictors_mbuv "struct macroblockd *x"
|
||||
specialize vp8_build_comp_intra_predictors_mbuv;
|
||||
|
||||
prototype void vp8_intra4x4_predict "struct blockd *x, int b_mode, unsigned char *predictor"
|
||||
specialize vp8_intra4x4_predict;
|
||||
prototype void vp9_intra4x4_predict "struct blockd *x, int b_mode, unsigned char *predictor"
|
||||
specialize vp9_intra4x4_predict;
|
||||
|
||||
prototype void vp8_comp_intra4x4_predict "struct blockd *x, int b_mode, int second_mode, unsigned char *predictor"
|
||||
specialize vp8_comp_intra4x4_predict;
|
||||
|
||||
prototype void vp8_intra8x8_predict "struct blockd *x, int b_mode, unsigned char *predictor"
|
||||
specialize vp8_intra8x8_predict;
|
||||
prototype void vp9_intra8x8_predict "struct blockd *x, int b_mode, unsigned char *predictor"
|
||||
specialize vp9_intra8x8_predict;
|
||||
|
||||
prototype void vp8_comp_intra8x8_predict "struct blockd *x, int b_mode, int second_mode, unsigned char *predictor"
|
||||
specialize vp8_comp_intra8x8_predict;
|
||||
|
||||
prototype void vp8_intra_uv4x4_predict "struct blockd *x, int b_mode, unsigned char *predictor"
|
||||
specialize vp8_intra_uv4x4_predict;
|
||||
prototype void vp9_intra_uv4x4_predict "struct blockd *x, int b_mode, unsigned char *predictor"
|
||||
specialize vp9_intra_uv4x4_predict;
|
||||
|
||||
prototype void vp8_comp_intra_uv4x4_predict "struct blockd *x, int b_mode, int second_mode, unsigned char *predictor"
|
||||
specialize vp8_comp_intra_uv4x4_predict;
|
||||
@ -166,55 +166,55 @@ specialize vp8_comp_intra_uv4x4_predict;
|
||||
#
|
||||
# Loopfilter
|
||||
#
|
||||
prototype void vp8_loop_filter_mbv "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi"
|
||||
specialize vp8_loop_filter_mbv sse2
|
||||
prototype void vp9_loop_filter_mbv "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi"
|
||||
specialize vp9_loop_filter_mbv sse2
|
||||
|
||||
prototype void vp8_loop_filter_bv "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi"
|
||||
specialize vp8_loop_filter_bv sse2
|
||||
prototype void vp9_loop_filter_bv "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi"
|
||||
specialize vp9_loop_filter_bv sse2
|
||||
|
||||
prototype void vp8_loop_filter_bv8x8 "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi"
|
||||
specialize vp8_loop_filter_bv8x8 sse2
|
||||
prototype void vp9_loop_filter_bv8x8 "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi"
|
||||
specialize vp9_loop_filter_bv8x8 sse2
|
||||
|
||||
prototype void vp8_loop_filter_mbh "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi"
|
||||
specialize vp8_loop_filter_mbh sse2
|
||||
prototype void vp9_loop_filter_mbh "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi"
|
||||
specialize vp9_loop_filter_mbh sse2
|
||||
|
||||
prototype void vp8_loop_filter_bh "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi"
|
||||
specialize vp8_loop_filter_bh sse2
|
||||
prototype void vp9_loop_filter_bh "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi"
|
||||
specialize vp9_loop_filter_bh sse2
|
||||
|
||||
prototype void vp8_loop_filter_bh8x8 "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi"
|
||||
specialize vp8_loop_filter_bh8x8 sse2
|
||||
prototype void vp9_loop_filter_bh8x8 "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi"
|
||||
specialize vp9_loop_filter_bh8x8 sse2
|
||||
|
||||
prototype void vp8_loop_filter_simple_mbv "unsigned char *y, int ystride, const unsigned char *blimit"
|
||||
specialize vp8_loop_filter_simple_mbv mmx sse2 media neon
|
||||
vp8_loop_filter_simple_mbv_c=vp8_loop_filter_simple_vertical_edge_c
|
||||
vp8_loop_filter_simple_mbv_mmx=vp8_loop_filter_simple_vertical_edge_mmx
|
||||
vp8_loop_filter_simple_mbv_sse2=vp8_loop_filter_simple_vertical_edge_sse2
|
||||
vp8_loop_filter_simple_mbv_media=vp8_loop_filter_simple_vertical_edge_armv6
|
||||
vp8_loop_filter_simple_mbv_neon=vp8_loop_filter_mbvs_neon
|
||||
vp8_loop_filter_simple_mbv_c=vp9_loop_filter_simple_vertical_edge_c
|
||||
vp8_loop_filter_simple_mbv_mmx=vp9_loop_filter_simple_vertical_edge_mmx
|
||||
vp8_loop_filter_simple_mbv_sse2=vp9_loop_filter_simple_vertical_edge_sse2
|
||||
vp8_loop_filter_simple_mbv_media=vp9_loop_filter_simple_vertical_edge_armv6
|
||||
vp8_loop_filter_simple_mbv_neon=vp9_loop_filter_mbvs_neon
|
||||
|
||||
prototype void vp8_loop_filter_simple_mbh "unsigned char *y, int ystride, const unsigned char *blimit"
|
||||
specialize vp8_loop_filter_simple_mbh mmx sse2 media neon
|
||||
vp8_loop_filter_simple_mbh_c=vp8_loop_filter_simple_horizontal_edge_c
|
||||
vp8_loop_filter_simple_mbh_mmx=vp8_loop_filter_simple_horizontal_edge_mmx
|
||||
vp8_loop_filter_simple_mbh_sse2=vp8_loop_filter_simple_horizontal_edge_sse2
|
||||
vp8_loop_filter_simple_mbh_media=vp8_loop_filter_simple_horizontal_edge_armv6
|
||||
vp8_loop_filter_simple_mbh_neon=vp8_loop_filter_mbhs_neon
|
||||
vp8_loop_filter_simple_mbh_c=vp9_loop_filter_simple_horizontal_edge_c
|
||||
vp8_loop_filter_simple_mbh_mmx=vp9_loop_filter_simple_horizontal_edge_mmx
|
||||
vp8_loop_filter_simple_mbh_sse2=vp9_loop_filter_simple_horizontal_edge_sse2
|
||||
vp8_loop_filter_simple_mbh_media=vp9_loop_filter_simple_horizontal_edge_armv6
|
||||
vp8_loop_filter_simple_mbh_neon=vp9_loop_filter_mbhs_neon
|
||||
|
||||
prototype void vp8_loop_filter_simple_bv "unsigned char *y, int ystride, const unsigned char *blimit"
|
||||
specialize vp8_loop_filter_simple_bv mmx sse2 media neon
|
||||
vp8_loop_filter_simple_bv_c=vp8_loop_filter_bvs_c
|
||||
vp8_loop_filter_simple_bv_mmx=vp8_loop_filter_bvs_mmx
|
||||
vp8_loop_filter_simple_bv_sse2=vp8_loop_filter_bvs_sse2
|
||||
vp8_loop_filter_simple_bv_media=vp8_loop_filter_bvs_armv6
|
||||
vp8_loop_filter_simple_bv_neon=vp8_loop_filter_bvs_neon
|
||||
vp8_loop_filter_simple_bv_c=vp9_loop_filter_bvs_c
|
||||
vp8_loop_filter_simple_bv_mmx=vp9_loop_filter_bvs_mmx
|
||||
vp8_loop_filter_simple_bv_sse2=vp9_loop_filter_bvs_sse2
|
||||
vp8_loop_filter_simple_bv_media=vp9_loop_filter_bvs_armv6
|
||||
vp8_loop_filter_simple_bv_neon=vp9_loop_filter_bvs_neon
|
||||
|
||||
prototype void vp8_loop_filter_simple_bh "unsigned char *y, int ystride, const unsigned char *blimit"
|
||||
specialize vp8_loop_filter_simple_bh mmx sse2 media neon
|
||||
vp8_loop_filter_simple_bh_c=vp8_loop_filter_bhs_c
|
||||
vp8_loop_filter_simple_bh_mmx=vp8_loop_filter_bhs_mmx
|
||||
vp8_loop_filter_simple_bh_sse2=vp8_loop_filter_bhs_sse2
|
||||
vp8_loop_filter_simple_bh_media=vp8_loop_filter_bhs_armv6
|
||||
vp8_loop_filter_simple_bh_neon=vp8_loop_filter_bhs_neon
|
||||
vp8_loop_filter_simple_bh_c=vp9_loop_filter_bhs_c
|
||||
vp8_loop_filter_simple_bh_mmx=vp9_loop_filter_bhs_mmx
|
||||
vp8_loop_filter_simple_bh_sse2=vp9_loop_filter_bhs_sse2
|
||||
vp8_loop_filter_simple_bh_media=vp9_loop_filter_bhs_armv6
|
||||
vp8_loop_filter_simple_bh_neon=vp9_loop_filter_bhs_neon
|
||||
|
||||
#
|
||||
# sad 16x3, 3x16
|
||||
|
@ -12,7 +12,7 @@
|
||||
#include "setupintrarecon.h"
|
||||
#include "vpx_mem/vpx_mem.h"
|
||||
|
||||
void vp8_setup_intra_recon(YV12_BUFFER_CONFIG *ybf) {
|
||||
void vp9_setup_intra_recon(YV12_BUFFER_CONFIG *ybf) {
|
||||
int i;
|
||||
|
||||
/* set up frame new frame for intra coded blocks */
|
||||
|
@ -10,4 +10,4 @@
|
||||
|
||||
|
||||
#include "vpx_scale/yv12config.h"
|
||||
extern void vp8_setup_intra_recon(YV12_BUFFER_CONFIG *ybf);
|
||||
extern void vp9_setup_intra_recon(YV12_BUFFER_CONFIG *ybf);
|
||||
|
@ -25,141 +25,141 @@
|
||||
#endif
|
||||
|
||||
#ifndef vp8_subpix_sixtap16x16
|
||||
#define vp8_subpix_sixtap16x16 vp8_sixtap_predict16x16_c
|
||||
#define vp8_subpix_sixtap16x16 vp9_sixtap_predict16x16_c
|
||||
#endif
|
||||
extern prototype_subpixel_predict(vp8_subpix_sixtap16x16);
|
||||
|
||||
#ifndef vp8_subpix_sixtap8x8
|
||||
#define vp8_subpix_sixtap8x8 vp8_sixtap_predict8x8_c
|
||||
#define vp8_subpix_sixtap8x8 vp9_sixtap_predict8x8_c
|
||||
#endif
|
||||
extern prototype_subpixel_predict(vp8_subpix_sixtap8x8);
|
||||
|
||||
#ifndef vp8_subpix_sixtap_avg16x16
|
||||
#define vp8_subpix_sixtap_avg16x16 vp8_sixtap_predict_avg16x16_c
|
||||
#define vp8_subpix_sixtap_avg16x16 vp9_sixtap_predict_avg16x16_c
|
||||
#endif
|
||||
extern prototype_subpixel_predict(vp8_subpix_sixtap_avg16x16);
|
||||
|
||||
#ifndef vp8_subpix_sixtap_avg8x8
|
||||
#define vp8_subpix_sixtap_avg8x8 vp8_sixtap_predict_avg8x8_c
|
||||
#define vp8_subpix_sixtap_avg8x8 vp9_sixtap_predict_avg8x8_c
|
||||
#endif
|
||||
extern prototype_subpixel_predict(vp8_subpix_sixtap_avg8x8);
|
||||
#ifndef vp8_subpix_sixtap8x4
|
||||
#define vp8_subpix_sixtap8x4 vp8_sixtap_predict8x4_c
|
||||
#define vp8_subpix_sixtap8x4 vp9_sixtap_predict8x4_c
|
||||
#endif
|
||||
extern prototype_subpixel_predict(vp8_subpix_sixtap8x4);
|
||||
|
||||
#ifndef vp8_subpix_sixtap4x4
|
||||
#define vp8_subpix_sixtap4x4 vp8_sixtap_predict_c
|
||||
#define vp8_subpix_sixtap4x4 vp9_sixtap_predict_c
|
||||
#endif
|
||||
extern prototype_subpixel_predict(vp8_subpix_sixtap4x4);
|
||||
|
||||
#ifndef vp8_subpix_sixtap_avg4x4
|
||||
#define vp8_subpix_sixtap_avg4x4 vp8_sixtap_predict_avg_c
|
||||
#define vp8_subpix_sixtap_avg4x4 vp9_sixtap_predict_avg_c
|
||||
#endif
|
||||
extern prototype_subpixel_predict(vp8_subpix_sixtap_avg4x4);
|
||||
|
||||
#ifndef vp8_subpix_eighttap16x16
|
||||
#define vp8_subpix_eighttap16x16 vp8_eighttap_predict16x16_c
|
||||
#define vp8_subpix_eighttap16x16 vp9_eighttap_predict16x16_c
|
||||
#endif
|
||||
extern prototype_subpixel_predict(vp8_subpix_eighttap16x16);
|
||||
|
||||
#ifndef vp8_subpix_eighttap8x8
|
||||
#define vp8_subpix_eighttap8x8 vp8_eighttap_predict8x8_c
|
||||
#define vp8_subpix_eighttap8x8 vp9_eighttap_predict8x8_c
|
||||
#endif
|
||||
extern prototype_subpixel_predict(vp8_subpix_eighttap8x8);
|
||||
|
||||
#ifndef vp8_subpix_eighttap_avg16x16
|
||||
#define vp8_subpix_eighttap_avg16x16 vp8_eighttap_predict_avg16x16_c
|
||||
#define vp8_subpix_eighttap_avg16x16 vp9_eighttap_predict_avg16x16_c
|
||||
#endif
|
||||
extern prototype_subpixel_predict(vp8_subpix_eighttap_avg16x16);
|
||||
|
||||
#ifndef vp8_subpix_eighttap_avg8x8
|
||||
#define vp8_subpix_eighttap_avg8x8 vp8_eighttap_predict_avg8x8_c
|
||||
#define vp8_subpix_eighttap_avg8x8 vp9_eighttap_predict_avg8x8_c
|
||||
#endif
|
||||
extern prototype_subpixel_predict(vp8_subpix_eighttap_avg8x8);
|
||||
|
||||
#ifndef vp8_subpix_eighttap8x4
|
||||
#define vp8_subpix_eighttap8x4 vp8_eighttap_predict8x4_c
|
||||
#define vp8_subpix_eighttap8x4 vp9_eighttap_predict8x4_c
|
||||
#endif
|
||||
extern prototype_subpixel_predict(vp8_subpix_eighttap8x4);
|
||||
|
||||
#ifndef vp8_subpix_eighttap4x4
|
||||
#define vp8_subpix_eighttap4x4 vp8_eighttap_predict_c
|
||||
#define vp8_subpix_eighttap4x4 vp9_eighttap_predict_c
|
||||
#endif
|
||||
extern prototype_subpixel_predict(vp8_subpix_eighttap4x4);
|
||||
|
||||
#ifndef vp8_subpix_eighttap_avg4x4
|
||||
#define vp8_subpix_eighttap_avg4x4 vp8_eighttap_predict_avg4x4_c
|
||||
#define vp8_subpix_eighttap_avg4x4 vp9_eighttap_predict_avg4x4_c
|
||||
#endif
|
||||
extern prototype_subpixel_predict(vp8_subpix_eighttap_avg4x4);
|
||||
|
||||
#ifndef vp8_subpix_eighttap16x16_sharp
|
||||
#define vp8_subpix_eighttap16x16_sharp vp8_eighttap_predict16x16_sharp_c
|
||||
#define vp8_subpix_eighttap16x16_sharp vp9_eighttap_predict16x16_sharp_c
|
||||
#endif
|
||||
extern prototype_subpixel_predict(vp8_subpix_eighttap16x16_sharp);
|
||||
|
||||
#ifndef vp8_subpix_eighttap8x8_sharp
|
||||
#define vp8_subpix_eighttap8x8_sharp vp8_eighttap_predict8x8_sharp_c
|
||||
#define vp8_subpix_eighttap8x8_sharp vp9_eighttap_predict8x8_sharp_c
|
||||
#endif
|
||||
extern prototype_subpixel_predict(vp8_subpix_eighttap8x8_sharp);
|
||||
|
||||
#ifndef vp8_subpix_eighttap_avg16x16_sharp
|
||||
#define vp8_subpix_eighttap_avg16x16_sharp vp8_eighttap_predict_avg16x16_sharp_c
|
||||
#define vp8_subpix_eighttap_avg16x16_sharp vp9_eighttap_predict_avg16x16_sharp_c
|
||||
#endif
|
||||
extern prototype_subpixel_predict(vp8_subpix_eighttap_avg16x16_sharp);
|
||||
|
||||
#ifndef vp8_subpix_eighttap_avg8x8_sharp
|
||||
#define vp8_subpix_eighttap_avg8x8_sharp vp8_eighttap_predict_avg8x8_sharp_c
|
||||
#define vp8_subpix_eighttap_avg8x8_sharp vp9_eighttap_predict_avg8x8_sharp_c
|
||||
#endif
|
||||
extern prototype_subpixel_predict(vp8_subpix_eighttap_avg8x8_sharp);
|
||||
|
||||
#ifndef vp8_subpix_eighttap8x4_sharp
|
||||
#define vp8_subpix_eighttap8x4_sharp vp8_eighttap_predict8x4_sharp_c
|
||||
#define vp8_subpix_eighttap8x4_sharp vp9_eighttap_predict8x4_sharp_c
|
||||
#endif
|
||||
extern prototype_subpixel_predict(vp8_subpix_eighttap8x4_sharp);
|
||||
|
||||
#ifndef vp8_subpix_eighttap4x4_sharp
|
||||
#define vp8_subpix_eighttap4x4_sharp vp8_eighttap_predict_sharp_c
|
||||
#define vp8_subpix_eighttap4x4_sharp vp9_eighttap_predict_sharp_c
|
||||
#endif
|
||||
extern prototype_subpixel_predict(vp8_subpix_eighttap4x4_sharp);
|
||||
|
||||
#ifndef vp8_subpix_eighttap_avg4x4_sharp
|
||||
#define vp8_subpix_eighttap_avg4x4_sharp vp8_eighttap_predict_avg4x4_sharp_c
|
||||
#define vp8_subpix_eighttap_avg4x4_sharp vp9_eighttap_predict_avg4x4_sharp_c
|
||||
#endif
|
||||
extern prototype_subpixel_predict(vp8_subpix_eighttap_avg4x4_sharp);
|
||||
|
||||
#ifndef vp8_subpix_bilinear16x16
|
||||
#define vp8_subpix_bilinear16x16 vp8_bilinear_predict16x16_c
|
||||
#define vp8_subpix_bilinear16x16 vp9_bilinear_predict16x16_c
|
||||
#endif
|
||||
extern prototype_subpixel_predict(vp8_subpix_bilinear16x16);
|
||||
|
||||
#ifndef vp8_subpix_bilinear8x8
|
||||
#define vp8_subpix_bilinear8x8 vp8_bilinear_predict8x8_c
|
||||
#define vp8_subpix_bilinear8x8 vp9_bilinear_predict8x8_c
|
||||
#endif
|
||||
extern prototype_subpixel_predict(vp8_subpix_bilinear8x8);
|
||||
|
||||
#ifndef vp8_subpix_bilinear_avg16x16
|
||||
#define vp8_subpix_bilinear_avg16x16 vp8_bilinear_predict_avg16x16_c
|
||||
#define vp8_subpix_bilinear_avg16x16 vp9_bilinear_predict_avg16x16_c
|
||||
#endif
|
||||
extern prototype_subpixel_predict(vp8_subpix_bilinear_avg16x16);
|
||||
|
||||
#ifndef vp8_subpix_bilinear_avg8x8
|
||||
#define vp8_subpix_bilinear_avg8x8 vp8_bilinear_predict_avg8x8_c
|
||||
#define vp8_subpix_bilinear_avg8x8 vp9_bilinear_predict_avg8x8_c
|
||||
#endif
|
||||
extern prototype_subpixel_predict(vp8_subpix_bilinear_avg8x8);
|
||||
|
||||
#ifndef vp8_subpix_bilinear8x4
|
||||
#define vp8_subpix_bilinear8x4 vp8_bilinear_predict8x4_c
|
||||
#define vp8_subpix_bilinear8x4 vp9_bilinear_predict8x4_c
|
||||
#endif
|
||||
extern prototype_subpixel_predict(vp8_subpix_bilinear8x4);
|
||||
|
||||
#ifndef vp8_subpix_bilinear4x4
|
||||
#define vp8_subpix_bilinear4x4 vp8_bilinear_predict4x4_c
|
||||
#define vp8_subpix_bilinear4x4 vp9_bilinear_predict4x4_c
|
||||
#endif
|
||||
extern prototype_subpixel_predict(vp8_subpix_bilinear4x4);
|
||||
|
||||
#ifndef vp8_subpix_bilinear_avg4x4
|
||||
#define vp8_subpix_bilinear_avg4x4 vp8_bilinear_predict_avg4x4_c
|
||||
#define vp8_subpix_bilinear_avg4x4 vp9_bilinear_predict_avg4x4_c
|
||||
#endif
|
||||
extern prototype_subpixel_predict(vp8_subpix_bilinear_avg4x4);
|
||||
|
||||
|
@ -11,7 +11,7 @@
|
||||
|
||||
#include "swapyv12buffer.h"
|
||||
|
||||
void vp8_swap_yv12_buffer(YV12_BUFFER_CONFIG *new_frame, YV12_BUFFER_CONFIG *last_frame) {
|
||||
void vp9_swap_yv12_buffer(YV12_BUFFER_CONFIG *new_frame, YV12_BUFFER_CONFIG *last_frame) {
|
||||
unsigned char *temp;
|
||||
|
||||
temp = last_frame->buffer_alloc;
|
||||
|
@ -14,6 +14,6 @@
|
||||
|
||||
#include "vpx_scale/yv12config.h"
|
||||
|
||||
void vp8_swap_yv12_buffer(YV12_BUFFER_CONFIG *new_frame, YV12_BUFFER_CONFIG *last_frame);
|
||||
void vp9_swap_yv12_buffer(YV12_BUFFER_CONFIG *new_frame, YV12_BUFFER_CONFIG *last_frame);
|
||||
|
||||
#endif
|
||||
|
@ -18,4 +18,4 @@ void vpx_reset_mmx_state(void);
|
||||
#endif
|
||||
|
||||
struct VP8Common;
|
||||
void vp8_machine_specific_config(struct VP8Common *);
|
||||
void vp9_machine_specific_config(struct VP8Common *);
|
||||
|
@ -39,11 +39,11 @@ static void tree2tok(
|
||||
} while (++v & 1);
|
||||
}
|
||||
|
||||
void vp8_tokens_from_tree(struct vp8_token_struct *p, vp8_tree t) {
|
||||
void vp9_tokens_from_tree(struct vp8_token_struct *p, vp8_tree t) {
|
||||
tree2tok(p, t, 0, 0, 0);
|
||||
}
|
||||
|
||||
void vp8_tokens_from_tree_offset(struct vp8_token_struct *p, vp8_tree t,
|
||||
void vp9_tokens_from_tree_offset(struct vp8_token_struct *p, vp8_tree t,
|
||||
int offset) {
|
||||
tree2tok(p - offset, t, 0, 0, 0);
|
||||
}
|
||||
@ -94,7 +94,7 @@ static void branch_counts(
|
||||
}
|
||||
|
||||
|
||||
void vp8_tree_probs_from_distribution(
|
||||
void vp9_tree_probs_from_distribution(
|
||||
int n, /* n = size of alphabet */
|
||||
vp8_token tok [ /* n */ ],
|
||||
vp8_tree tree,
|
||||
@ -125,7 +125,7 @@ void vp8_tree_probs_from_distribution(
|
||||
} while (++t < tree_len);
|
||||
}
|
||||
|
||||
vp8_prob vp8_bin_prob_from_distribution(const unsigned int counts[2]) {
|
||||
vp8_prob vp9_bin_prob_from_distribution(const unsigned int counts[2]) {
|
||||
int tot_count = counts[0] + counts[1];
|
||||
vp8_prob prob;
|
||||
if (tot_count) {
|
||||
|
@ -52,8 +52,8 @@ typedef const struct vp8_token_struct {
|
||||
|
||||
/* Construct encoding array from tree. */
|
||||
|
||||
void vp8_tokens_from_tree(struct vp8_token_struct *, vp8_tree);
|
||||
void vp8_tokens_from_tree_offset(struct vp8_token_struct *, vp8_tree,
|
||||
void vp9_tokens_from_tree(struct vp8_token_struct *, vp8_tree);
|
||||
void vp9_tokens_from_tree_offset(struct vp8_token_struct *, vp8_tree,
|
||||
int offset);
|
||||
|
||||
|
||||
@ -62,7 +62,7 @@ void vp8_tokens_from_tree_offset(struct vp8_token_struct *, vp8_tree,
|
||||
taken for each node on the tree; this facilitiates decisions as to
|
||||
probability updates. */
|
||||
|
||||
void vp8_tree_probs_from_distribution(
|
||||
void vp9_tree_probs_from_distribution(
|
||||
int n, /* n = size of alphabet */
|
||||
vp8_token tok [ /* n */ ],
|
||||
vp8_tree tree,
|
||||
@ -85,6 +85,6 @@ void vp8bc_tree_probs_from_distribution(
|
||||
c_bool_coder_spec *s
|
||||
);
|
||||
|
||||
vp8_prob vp8_bin_prob_from_distribution(const unsigned int counts[2]);
|
||||
vp8_prob vp9_bin_prob_from_distribution(const unsigned int counts[2]);
|
||||
|
||||
#endif
|
||||
|
@ -65,7 +65,7 @@ DECLARE_ALIGNED(16, static const unsigned int, rounding_c[4]) = {
|
||||
result = _mm_srai_epi32(mad_all, VP8_FILTER_SHIFT); \
|
||||
}
|
||||
|
||||
void vp8_filter_block2d_4x4_8_sse2
|
||||
void vp9_filter_block2d_4x4_8_sse2
|
||||
(
|
||||
const unsigned char *src_ptr, const unsigned int src_stride,
|
||||
const short *HFilter_aligned16, const short *VFilter_aligned16,
|
||||
@ -242,7 +242,7 @@ void vp8_filter_block2d_4x4_8_sse2
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_filter_block2d_8x4_8_sse2
|
||||
void vp9_filter_block2d_8x4_8_sse2
|
||||
(
|
||||
const unsigned char *src_ptr, const unsigned int src_stride,
|
||||
const short *HFilter_aligned16, const short *VFilter_aligned16,
|
||||
@ -250,13 +250,13 @@ void vp8_filter_block2d_8x4_8_sse2
|
||||
) {
|
||||
int j;
|
||||
for (j=0; j<8; j+=4) {
|
||||
vp8_filter_block2d_4x4_8_sse2(src_ptr + j, src_stride,
|
||||
vp9_filter_block2d_4x4_8_sse2(src_ptr + j, src_stride,
|
||||
HFilter_aligned16, VFilter_aligned16,
|
||||
dst_ptr + j, dst_stride);
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_filter_block2d_8x8_8_sse2
|
||||
void vp9_filter_block2d_8x8_8_sse2
|
||||
(
|
||||
const unsigned char *src_ptr, const unsigned int src_stride,
|
||||
const short *HFilter_aligned16, const short *VFilter_aligned16,
|
||||
@ -265,14 +265,14 @@ void vp8_filter_block2d_8x8_8_sse2
|
||||
int i, j;
|
||||
for (i=0; i<8; i+=4) {
|
||||
for (j=0; j<8; j+=4) {
|
||||
vp8_filter_block2d_4x4_8_sse2(src_ptr + j + i*src_stride, src_stride,
|
||||
vp9_filter_block2d_4x4_8_sse2(src_ptr + j + i*src_stride, src_stride,
|
||||
HFilter_aligned16, VFilter_aligned16,
|
||||
dst_ptr + j + i*dst_stride, dst_stride);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_filter_block2d_16x16_8_sse2
|
||||
void vp9_filter_block2d_16x16_8_sse2
|
||||
(
|
||||
const unsigned char *src_ptr, const unsigned int src_stride,
|
||||
const short *HFilter_aligned16, const short *VFilter_aligned16,
|
||||
@ -281,7 +281,7 @@ void vp8_filter_block2d_16x16_8_sse2
|
||||
int i, j;
|
||||
for (i=0; i<16; i+=4) {
|
||||
for (j=0; j<16; j+=4) {
|
||||
vp8_filter_block2d_4x4_8_sse2(src_ptr + j + i*src_stride, src_stride,
|
||||
vp9_filter_block2d_4x4_8_sse2(src_ptr + j + i*src_stride, src_stride,
|
||||
HFilter_aligned16, VFilter_aligned16,
|
||||
dst_ptr + j + i*dst_stride, dst_stride);
|
||||
}
|
||||
|
@ -84,7 +84,7 @@ DECLARE_ALIGNED(16, static const unsigned char, transpose_c[16]) = {
|
||||
result = _mm_srai_epi32(mad_all, VP8_FILTER_SHIFT); \
|
||||
}
|
||||
|
||||
void vp8_filter_block2d_4x4_8_sse4_1
|
||||
void vp9_filter_block2d_4x4_8_sse4_1
|
||||
(
|
||||
const unsigned char *src_ptr, const unsigned int src_stride,
|
||||
const short *HFilter_aligned16, const short *VFilter_aligned16,
|
||||
@ -315,7 +315,7 @@ void vp8_filter_block2d_4x4_8_sse4_1
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_filter_block2d_8x4_8_sse4_1
|
||||
void vp9_filter_block2d_8x4_8_sse4_1
|
||||
(
|
||||
const unsigned char *src_ptr, const unsigned int src_stride,
|
||||
const short *HFilter_aligned16, const short *VFilter_aligned16,
|
||||
@ -323,13 +323,13 @@ void vp8_filter_block2d_8x4_8_sse4_1
|
||||
) {
|
||||
int j;
|
||||
for (j=0; j<8; j+=4) {
|
||||
vp8_filter_block2d_4x4_8_sse4_1(src_ptr + j, src_stride,
|
||||
vp9_filter_block2d_4x4_8_sse4_1(src_ptr + j, src_stride,
|
||||
HFilter_aligned16, VFilter_aligned16,
|
||||
dst_ptr + j, dst_stride);
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_filter_block2d_8x8_8_sse4_1
|
||||
void vp9_filter_block2d_8x8_8_sse4_1
|
||||
(
|
||||
const unsigned char *src_ptr, const unsigned int src_stride,
|
||||
const short *HFilter_aligned16, const short *VFilter_aligned16,
|
||||
@ -338,14 +338,14 @@ void vp8_filter_block2d_8x8_8_sse4_1
|
||||
int i, j;
|
||||
for (i=0; i<8; i+=4) {
|
||||
for (j=0; j<8; j+=4) {
|
||||
vp8_filter_block2d_4x4_8_sse4_1(src_ptr + j + i*src_stride, src_stride,
|
||||
vp9_filter_block2d_4x4_8_sse4_1(src_ptr + j + i*src_stride, src_stride,
|
||||
HFilter_aligned16, VFilter_aligned16,
|
||||
dst_ptr + j + i*dst_stride, dst_stride);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_filter_block2d_16x16_8_sse4_1
|
||||
void vp9_filter_block2d_16x16_8_sse4_1
|
||||
(
|
||||
const unsigned char *src_ptr, const unsigned int src_stride,
|
||||
const short *HFilter_aligned16, const short *VFilter_aligned16,
|
||||
@ -354,7 +354,7 @@ void vp8_filter_block2d_16x16_8_sse4_1
|
||||
int i, j;
|
||||
for (i=0; i<16; i+=4) {
|
||||
for (j=0; j<16; j+=4) {
|
||||
vp8_filter_block2d_4x4_8_sse4_1(src_ptr + j + i*src_stride, src_stride,
|
||||
vp9_filter_block2d_4x4_8_sse4_1(src_ptr + j + i*src_stride, src_stride,
|
||||
HFilter_aligned16, VFilter_aligned16,
|
||||
dst_ptr + j + i*dst_stride, dst_stride);
|
||||
}
|
||||
|
@ -24,8 +24,8 @@ extern prototype_idct(vp9_short_idct4x4llm_1_mmx);
|
||||
extern prototype_idct(vp9_short_idct4x4llm_mmx);
|
||||
extern prototype_idct_scalar_add(vp9_dc_only_idct_add_mmx);
|
||||
|
||||
extern prototype_second_order(vp8_short_inv_walsh4x4_mmx);
|
||||
extern prototype_second_order(vp8_short_inv_walsh4x4_1_mmx);
|
||||
extern prototype_second_order(vp9_short_inv_walsh4x4_mmx);
|
||||
extern prototype_second_order(vp9_short_inv_walsh4x4_1_mmx);
|
||||
|
||||
#if !CONFIG_RUNTIME_CPU_DETECT
|
||||
#undef vp8_idct_idct1
|
||||
@ -38,22 +38,22 @@ extern prototype_second_order(vp8_short_inv_walsh4x4_1_mmx);
|
||||
#define vp8_idct_idct1_scalar_add vp9_dc_only_idct_add_mmx
|
||||
|
||||
#undef vp8_idct_iwalsh16
|
||||
#define vp8_idct_iwalsh16 vp8_short_inv_walsh4x4_mmx
|
||||
#define vp8_idct_iwalsh16 vp9_short_inv_walsh4x4_mmx
|
||||
|
||||
#undef vp8_idct_iwalsh1
|
||||
#define vp8_idct_iwalsh1 vp8_short_inv_walsh4x4_1_mmx
|
||||
#define vp8_idct_iwalsh1 vp9_short_inv_walsh4x4_1_mmx
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if HAVE_SSE2
|
||||
|
||||
extern prototype_second_order(vp8_short_inv_walsh4x4_sse2);
|
||||
extern prototype_second_order(vp9_short_inv_walsh4x4_sse2);
|
||||
|
||||
#if !CONFIG_RUNTIME_CPU_DETECT
|
||||
|
||||
#undef vp8_idct_iwalsh16
|
||||
#define vp8_idct_iwalsh16 vp8_short_inv_walsh4x4_sse2
|
||||
#define vp8_idct_iwalsh16 vp9_short_inv_walsh4x4_sse2
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -11,7 +11,7 @@
|
||||
|
||||
%include "vpx_ports/x86_abi_support.asm"
|
||||
|
||||
;void vp8_idct_dequant_0_2x_sse2
|
||||
;void vp9_idct_dequant_0_2x_sse2
|
||||
; (
|
||||
; short *qcoeff - 0
|
||||
; short *dequant - 1
|
||||
@ -21,8 +21,8 @@
|
||||
; int blk_stride - 5
|
||||
; )
|
||||
|
||||
global sym(vp8_idct_dequant_0_2x_sse2)
|
||||
sym(vp8_idct_dequant_0_2x_sse2):
|
||||
global sym(vp9_idct_dequant_0_2x_sse2)
|
||||
sym(vp9_idct_dequant_0_2x_sse2):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 6
|
||||
@ -97,8 +97,8 @@ sym(vp8_idct_dequant_0_2x_sse2):
|
||||
pop rbp
|
||||
ret
|
||||
|
||||
global sym(vp8_idct_dequant_full_2x_sse2)
|
||||
sym(vp8_idct_dequant_full_2x_sse2):
|
||||
global sym(vp9_idct_dequant_full_2x_sse2)
|
||||
sym(vp9_idct_dequant_full_2x_sse2):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 7
|
||||
@ -353,7 +353,7 @@ sym(vp8_idct_dequant_full_2x_sse2):
|
||||
pop rbp
|
||||
ret
|
||||
|
||||
;void vp8_idct_dequant_dc_0_2x_sse2
|
||||
;void vp9_idct_dequant_dc_0_2x_sse2
|
||||
; (
|
||||
; short *qcoeff - 0
|
||||
; short *dequant - 1
|
||||
@ -362,8 +362,8 @@ sym(vp8_idct_dequant_full_2x_sse2):
|
||||
; int dst_stride - 4
|
||||
; short *dc - 5
|
||||
; )
|
||||
global sym(vp8_idct_dequant_dc_0_2x_sse2)
|
||||
sym(vp8_idct_dequant_dc_0_2x_sse2):
|
||||
global sym(vp9_idct_dequant_dc_0_2x_sse2)
|
||||
sym(vp9_idct_dequant_dc_0_2x_sse2):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 7
|
||||
@ -438,8 +438,8 @@ sym(vp8_idct_dequant_dc_0_2x_sse2):
|
||||
pop rbp
|
||||
ret
|
||||
|
||||
global sym(vp8_idct_dequant_dc_full_2x_sse2)
|
||||
sym(vp8_idct_dequant_dc_full_2x_sse2):
|
||||
global sym(vp9_idct_dequant_dc_full_2x_sse2)
|
||||
sym(vp9_idct_dequant_dc_full_2x_sse2):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 7
|
||||
|
@ -11,9 +11,9 @@
|
||||
|
||||
%include "vpx_ports/x86_abi_support.asm"
|
||||
|
||||
;void vp8_short_inv_walsh4x4_1_mmx(short *input, short *output)
|
||||
global sym(vp8_short_inv_walsh4x4_1_mmx)
|
||||
sym(vp8_short_inv_walsh4x4_1_mmx):
|
||||
;void vp9_short_inv_walsh4x4_1_mmx(short *input, short *output)
|
||||
global sym(vp9_short_inv_walsh4x4_1_mmx)
|
||||
sym(vp9_short_inv_walsh4x4_1_mmx):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 2
|
||||
@ -47,9 +47,9 @@ sym(vp8_short_inv_walsh4x4_1_mmx):
|
||||
pop rbp
|
||||
ret
|
||||
|
||||
;void vp8_short_inv_walsh4x4_mmx(short *input, short *output)
|
||||
global sym(vp8_short_inv_walsh4x4_mmx)
|
||||
sym(vp8_short_inv_walsh4x4_mmx):
|
||||
;void vp9_short_inv_walsh4x4_mmx(short *input, short *output)
|
||||
global sym(vp9_short_inv_walsh4x4_mmx)
|
||||
sym(vp9_short_inv_walsh4x4_mmx):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 2
|
||||
|
@ -11,9 +11,9 @@
|
||||
|
||||
%include "vpx_ports/x86_abi_support.asm"
|
||||
|
||||
;void vp8_short_inv_walsh4x4_sse2(short *input, short *output)
|
||||
global sym(vp8_short_inv_walsh4x4_sse2)
|
||||
sym(vp8_short_inv_walsh4x4_sse2):
|
||||
;void vp9_short_inv_walsh4x4_sse2(short *input, short *output)
|
||||
global sym(vp9_short_inv_walsh4x4_sse2)
|
||||
sym(vp9_short_inv_walsh4x4_sse2):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 2
|
||||
|
@ -12,7 +12,7 @@
|
||||
%include "vpx_ports/x86_abi_support.asm"
|
||||
|
||||
|
||||
;void vp8_loop_filter_horizontal_edge_mmx
|
||||
;void vp9_loop_filter_horizontal_edge_mmx
|
||||
;(
|
||||
; unsigned char *src_ptr,
|
||||
; int src_pixel_step,
|
||||
@ -21,8 +21,8 @@
|
||||
; const char *thresh,
|
||||
; int count
|
||||
;)
|
||||
global sym(vp8_loop_filter_horizontal_edge_mmx)
|
||||
sym(vp8_loop_filter_horizontal_edge_mmx):
|
||||
global sym(vp9_loop_filter_horizontal_edge_mmx)
|
||||
sym(vp9_loop_filter_horizontal_edge_mmx):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 6
|
||||
@ -224,7 +224,7 @@ sym(vp8_loop_filter_horizontal_edge_mmx):
|
||||
ret
|
||||
|
||||
|
||||
;void vp8_loop_filter_vertical_edge_mmx
|
||||
;void vp9_loop_filter_vertical_edge_mmx
|
||||
;(
|
||||
; unsigned char *src_ptr,
|
||||
; int src_pixel_step,
|
||||
@ -233,8 +233,8 @@ sym(vp8_loop_filter_horizontal_edge_mmx):
|
||||
; const char *thresh,
|
||||
; int count
|
||||
;)
|
||||
global sym(vp8_loop_filter_vertical_edge_mmx)
|
||||
sym(vp8_loop_filter_vertical_edge_mmx):
|
||||
global sym(vp9_loop_filter_vertical_edge_mmx)
|
||||
sym(vp9_loop_filter_vertical_edge_mmx):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 6
|
||||
@ -594,14 +594,14 @@ sym(vp8_loop_filter_vertical_edge_mmx):
|
||||
ret
|
||||
|
||||
|
||||
;void vp8_loop_filter_simple_horizontal_edge_mmx
|
||||
;void vp9_loop_filter_simple_horizontal_edge_mmx
|
||||
;(
|
||||
; unsigned char *src_ptr,
|
||||
; int src_pixel_step,
|
||||
; const char *blimit
|
||||
;)
|
||||
global sym(vp8_loop_filter_simple_horizontal_edge_mmx)
|
||||
sym(vp8_loop_filter_simple_horizontal_edge_mmx):
|
||||
global sym(vp9_loop_filter_simple_horizontal_edge_mmx)
|
||||
sym(vp9_loop_filter_simple_horizontal_edge_mmx):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 3
|
||||
@ -710,14 +710,14 @@ sym(vp8_loop_filter_simple_horizontal_edge_mmx):
|
||||
ret
|
||||
|
||||
|
||||
;void vp8_loop_filter_simple_vertical_edge_mmx
|
||||
;void vp9_loop_filter_simple_vertical_edge_mmx
|
||||
;(
|
||||
; unsigned char *src_ptr,
|
||||
; int src_pixel_step,
|
||||
; const char *blimit
|
||||
;)
|
||||
global sym(vp8_loop_filter_simple_vertical_edge_mmx)
|
||||
sym(vp8_loop_filter_simple_vertical_edge_mmx):
|
||||
global sym(vp9_loop_filter_simple_vertical_edge_mmx)
|
||||
sym(vp9_loop_filter_simple_vertical_edge_mmx):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 3
|
||||
@ -931,9 +931,9 @@ sym(vp8_loop_filter_simple_vertical_edge_mmx):
|
||||
;{
|
||||
;
|
||||
;
|
||||
; vp8_loop_filter_simple_vertical_edge_mmx(y_ptr+4, y_stride, lfi->flim,lfi->lim,lfi->thr,2);
|
||||
; vp8_loop_filter_simple_vertical_edge_mmx(y_ptr+8, y_stride, lfi->flim,lfi->lim,lfi->thr,2);
|
||||
; vp8_loop_filter_simple_vertical_edge_mmx(y_ptr+12, y_stride, lfi->flim,lfi->lim,lfi->thr,2);
|
||||
; vp9_loop_filter_simple_vertical_edge_mmx(y_ptr+4, y_stride, lfi->flim,lfi->lim,lfi->thr,2);
|
||||
; vp9_loop_filter_simple_vertical_edge_mmx(y_ptr+8, y_stride, lfi->flim,lfi->lim,lfi->thr,2);
|
||||
; vp9_loop_filter_simple_vertical_edge_mmx(y_ptr+12, y_stride, lfi->flim,lfi->lim,lfi->thr,2);
|
||||
;}
|
||||
|
||||
SECTION_RODATA
|
||||
|
@ -272,7 +272,7 @@
|
||||
%endmacro
|
||||
|
||||
|
||||
;void vp8_loop_filter_horizontal_edge_sse2
|
||||
;void vp9_loop_filter_horizontal_edge_sse2
|
||||
;(
|
||||
; unsigned char *src_ptr,
|
||||
; int src_pixel_step,
|
||||
@ -281,8 +281,8 @@
|
||||
; const char *thresh,
|
||||
; int count
|
||||
;)
|
||||
global sym(vp8_loop_filter_horizontal_edge_sse2)
|
||||
sym(vp8_loop_filter_horizontal_edge_sse2):
|
||||
global sym(vp9_loop_filter_horizontal_edge_sse2)
|
||||
sym(vp9_loop_filter_horizontal_edge_sse2):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 6
|
||||
@ -322,7 +322,7 @@ sym(vp8_loop_filter_horizontal_edge_sse2):
|
||||
ret
|
||||
|
||||
|
||||
;void vp8_loop_filter_horizontal_edge_uv_sse2
|
||||
;void vp9_loop_filter_horizontal_edge_uv_sse2
|
||||
;(
|
||||
; unsigned char *src_ptr,
|
||||
; int src_pixel_step,
|
||||
@ -331,8 +331,8 @@ sym(vp8_loop_filter_horizontal_edge_sse2):
|
||||
; const char *thresh,
|
||||
; int count
|
||||
;)
|
||||
global sym(vp8_loop_filter_horizontal_edge_uv_sse2)
|
||||
sym(vp8_loop_filter_horizontal_edge_uv_sse2):
|
||||
global sym(vp9_loop_filter_horizontal_edge_uv_sse2)
|
||||
sym(vp9_loop_filter_horizontal_edge_uv_sse2):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 6
|
||||
@ -710,7 +710,7 @@ sym(vp8_loop_filter_horizontal_edge_uv_sse2):
|
||||
%endmacro
|
||||
|
||||
|
||||
;void vp8_loop_filter_vertical_edge_sse2
|
||||
;void vp9_loop_filter_vertical_edge_sse2
|
||||
;(
|
||||
; unsigned char *src_ptr,
|
||||
; int src_pixel_step,
|
||||
@ -719,8 +719,8 @@ sym(vp8_loop_filter_horizontal_edge_uv_sse2):
|
||||
; const char *thresh,
|
||||
; int count
|
||||
;)
|
||||
global sym(vp8_loop_filter_vertical_edge_sse2)
|
||||
sym(vp8_loop_filter_vertical_edge_sse2):
|
||||
global sym(vp9_loop_filter_vertical_edge_sse2)
|
||||
sym(vp9_loop_filter_vertical_edge_sse2):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 6
|
||||
@ -777,7 +777,7 @@ sym(vp8_loop_filter_vertical_edge_sse2):
|
||||
ret
|
||||
|
||||
|
||||
;void vp8_loop_filter_vertical_edge_uv_sse2
|
||||
;void vp9_loop_filter_vertical_edge_uv_sse2
|
||||
;(
|
||||
; unsigned char *u,
|
||||
; int src_pixel_step,
|
||||
@ -786,8 +786,8 @@ sym(vp8_loop_filter_vertical_edge_sse2):
|
||||
; const char *thresh,
|
||||
; unsigned char *v
|
||||
;)
|
||||
global sym(vp8_loop_filter_vertical_edge_uv_sse2)
|
||||
sym(vp8_loop_filter_vertical_edge_uv_sse2):
|
||||
global sym(vp9_loop_filter_vertical_edge_uv_sse2)
|
||||
sym(vp9_loop_filter_vertical_edge_uv_sse2):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 6
|
||||
@ -845,14 +845,14 @@ sym(vp8_loop_filter_vertical_edge_uv_sse2):
|
||||
pop rbp
|
||||
ret
|
||||
|
||||
;void vp8_loop_filter_simple_horizontal_edge_sse2
|
||||
;void vp9_loop_filter_simple_horizontal_edge_sse2
|
||||
;(
|
||||
; unsigned char *src_ptr,
|
||||
; int src_pixel_step,
|
||||
; const char *blimit,
|
||||
;)
|
||||
global sym(vp8_loop_filter_simple_horizontal_edge_sse2)
|
||||
sym(vp8_loop_filter_simple_horizontal_edge_sse2):
|
||||
global sym(vp9_loop_filter_simple_horizontal_edge_sse2)
|
||||
sym(vp9_loop_filter_simple_horizontal_edge_sse2):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 3
|
||||
@ -954,14 +954,14 @@ sym(vp8_loop_filter_simple_horizontal_edge_sse2):
|
||||
ret
|
||||
|
||||
|
||||
;void vp8_loop_filter_simple_vertical_edge_sse2
|
||||
;void vp9_loop_filter_simple_vertical_edge_sse2
|
||||
;(
|
||||
; unsigned char *src_ptr,
|
||||
; int src_pixel_step,
|
||||
; const char *blimit,
|
||||
;)
|
||||
global sym(vp8_loop_filter_simple_vertical_edge_sse2)
|
||||
sym(vp8_loop_filter_simple_vertical_edge_sse2):
|
||||
global sym(vp9_loop_filter_simple_vertical_edge_sse2)
|
||||
sym(vp9_loop_filter_simple_vertical_edge_sse2):
|
||||
push rbp ; save old base pointer value.
|
||||
mov rbp, rsp ; set new base pointer value.
|
||||
SHADOW_ARGS_TO_STACK 3
|
||||
|
@ -13,67 +13,67 @@
|
||||
#include "vpx_config.h"
|
||||
#include "vp8/common/loopfilter.h"
|
||||
|
||||
prototype_loopfilter(vp8_loop_filter_vertical_edge_mmx);
|
||||
prototype_loopfilter(vp8_loop_filter_horizontal_edge_mmx);
|
||||
prototype_loopfilter(vp9_loop_filter_vertical_edge_mmx);
|
||||
prototype_loopfilter(vp9_loop_filter_horizontal_edge_mmx);
|
||||
|
||||
prototype_loopfilter(vp8_loop_filter_vertical_edge_sse2);
|
||||
prototype_loopfilter(vp8_loop_filter_horizontal_edge_sse2);
|
||||
prototype_loopfilter(vp9_loop_filter_vertical_edge_sse2);
|
||||
prototype_loopfilter(vp9_loop_filter_horizontal_edge_sse2);
|
||||
|
||||
extern loop_filter_uvfunction vp8_loop_filter_horizontal_edge_uv_sse2;
|
||||
extern loop_filter_uvfunction vp8_loop_filter_vertical_edge_uv_sse2;
|
||||
extern loop_filter_uvfunction vp9_loop_filter_horizontal_edge_uv_sse2;
|
||||
extern loop_filter_uvfunction vp9_loop_filter_vertical_edge_uv_sse2;
|
||||
|
||||
#if HAVE_MMX
|
||||
/* Horizontal MB filtering */
|
||||
void vp8_loop_filter_mbh_mmx(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
|
||||
void vp9_loop_filter_mbh_mmx(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
|
||||
int y_stride, int uv_stride, struct loop_filter_info *lfi) {
|
||||
}
|
||||
|
||||
|
||||
/* Vertical MB Filtering */
|
||||
void vp8_loop_filter_mbv_mmx(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
|
||||
void vp9_loop_filter_mbv_mmx(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
|
||||
int y_stride, int uv_stride, struct loop_filter_info *lfi) {
|
||||
}
|
||||
|
||||
|
||||
/* Horizontal B Filtering */
|
||||
void vp8_loop_filter_bh_mmx(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
|
||||
void vp9_loop_filter_bh_mmx(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
|
||||
int y_stride, int uv_stride, struct loop_filter_info *lfi) {
|
||||
|
||||
}
|
||||
|
||||
|
||||
void vp8_loop_filter_bhs_mmx(unsigned char *y_ptr, int y_stride, const unsigned char *blimit) {
|
||||
vp8_loop_filter_simple_horizontal_edge_mmx(y_ptr + 4 * y_stride, y_stride, blimit);
|
||||
vp8_loop_filter_simple_horizontal_edge_mmx(y_ptr + 8 * y_stride, y_stride, blimit);
|
||||
vp8_loop_filter_simple_horizontal_edge_mmx(y_ptr + 12 * y_stride, y_stride, blimit);
|
||||
void vp9_loop_filter_bhs_mmx(unsigned char *y_ptr, int y_stride, const unsigned char *blimit) {
|
||||
vp9_loop_filter_simple_horizontal_edge_mmx(y_ptr + 4 * y_stride, y_stride, blimit);
|
||||
vp9_loop_filter_simple_horizontal_edge_mmx(y_ptr + 8 * y_stride, y_stride, blimit);
|
||||
vp9_loop_filter_simple_horizontal_edge_mmx(y_ptr + 12 * y_stride, y_stride, blimit);
|
||||
}
|
||||
|
||||
|
||||
/* Vertical B Filtering */
|
||||
void vp8_loop_filter_bv_mmx(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
|
||||
void vp9_loop_filter_bv_mmx(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
|
||||
int y_stride, int uv_stride, struct loop_filter_info *lfi) {
|
||||
vp8_loop_filter_vertical_edge_mmx(y_ptr + 4, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp8_loop_filter_vertical_edge_mmx(y_ptr + 8, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp8_loop_filter_vertical_edge_mmx(y_ptr + 12, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp9_loop_filter_vertical_edge_mmx(y_ptr + 4, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp9_loop_filter_vertical_edge_mmx(y_ptr + 8, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp9_loop_filter_vertical_edge_mmx(y_ptr + 12, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
|
||||
if (u_ptr)
|
||||
vp8_loop_filter_vertical_edge_mmx(u_ptr + 4, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
|
||||
vp9_loop_filter_vertical_edge_mmx(u_ptr + 4, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
|
||||
|
||||
if (v_ptr)
|
||||
vp8_loop_filter_vertical_edge_mmx(v_ptr + 4, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
|
||||
vp9_loop_filter_vertical_edge_mmx(v_ptr + 4, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
|
||||
}
|
||||
|
||||
|
||||
void vp8_loop_filter_bvs_mmx(unsigned char *y_ptr, int y_stride, const unsigned char *blimit) {
|
||||
vp8_loop_filter_simple_vertical_edge_mmx(y_ptr + 4, y_stride, blimit);
|
||||
vp8_loop_filter_simple_vertical_edge_mmx(y_ptr + 8, y_stride, blimit);
|
||||
vp8_loop_filter_simple_vertical_edge_mmx(y_ptr + 12, y_stride, blimit);
|
||||
void vp9_loop_filter_bvs_mmx(unsigned char *y_ptr, int y_stride, const unsigned char *blimit) {
|
||||
vp9_loop_filter_simple_vertical_edge_mmx(y_ptr + 4, y_stride, blimit);
|
||||
vp9_loop_filter_simple_vertical_edge_mmx(y_ptr + 8, y_stride, blimit);
|
||||
vp9_loop_filter_simple_vertical_edge_mmx(y_ptr + 12, y_stride, blimit);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#if HAVE_SSE2
|
||||
void vp8_mbloop_filter_horizontal_edge_c_sse2
|
||||
void vp9_mbloop_filter_horizontal_edge_c_sse2
|
||||
(
|
||||
unsigned char *s,
|
||||
int p,
|
||||
@ -392,7 +392,7 @@ static __inline void transpose(unsigned char *src[], int in_p,
|
||||
_mm_castsi128_pd(x7)); // 07 17 27 37 47 57 67 77
|
||||
} while (++idx8x8 < num_8x8_to_transpose);
|
||||
}
|
||||
void vp8_mbloop_filter_vertical_edge_c_sse2
|
||||
void vp9_mbloop_filter_vertical_edge_c_sse2
|
||||
(
|
||||
unsigned char *s,
|
||||
int p,
|
||||
@ -418,7 +418,7 @@ void vp8_mbloop_filter_vertical_edge_c_sse2
|
||||
// 16x16->16x16 or 16x8->8x16
|
||||
transpose(src, p, dst, 16, (1 << count));
|
||||
|
||||
vp8_mbloop_filter_horizontal_edge_c_sse2(t_dst + 5*16, 16, blimit, limit,
|
||||
vp9_mbloop_filter_horizontal_edge_c_sse2(t_dst + 5*16, 16, blimit, limit,
|
||||
thresh, count);
|
||||
|
||||
dst[0] = s - 5;
|
||||
@ -432,88 +432,88 @@ void vp8_mbloop_filter_vertical_edge_c_sse2
|
||||
}
|
||||
|
||||
/* Horizontal MB filtering */
|
||||
void vp8_loop_filter_mbh_sse2(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
|
||||
void vp9_loop_filter_mbh_sse2(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
|
||||
int y_stride, int uv_stride, struct loop_filter_info *lfi) {
|
||||
|
||||
vp8_mbloop_filter_horizontal_edge_c_sse2(y_ptr, y_stride, lfi->mblim,
|
||||
vp9_mbloop_filter_horizontal_edge_c_sse2(y_ptr, y_stride, lfi->mblim,
|
||||
lfi->lim, lfi->hev_thr, 2);
|
||||
|
||||
/* TODO: write sse2 version with u,v interleaved */
|
||||
if (u_ptr)
|
||||
vp8_mbloop_filter_horizontal_edge_c_sse2(u_ptr, uv_stride, lfi->mblim,
|
||||
vp9_mbloop_filter_horizontal_edge_c_sse2(u_ptr, uv_stride, lfi->mblim,
|
||||
lfi->lim, lfi->hev_thr, 1);
|
||||
|
||||
if (v_ptr)
|
||||
vp8_mbloop_filter_horizontal_edge_c_sse2(v_ptr, uv_stride, lfi->mblim,
|
||||
vp9_mbloop_filter_horizontal_edge_c_sse2(v_ptr, uv_stride, lfi->mblim,
|
||||
lfi->lim, lfi->hev_thr, 1);
|
||||
}
|
||||
|
||||
void vp8_loop_filter_bh8x8_sse2(unsigned char *y_ptr, unsigned char *u_ptr,
|
||||
void vp9_loop_filter_bh8x8_sse2(unsigned char *y_ptr, unsigned char *u_ptr,
|
||||
unsigned char *v_ptr, int y_stride, int uv_stride,
|
||||
struct loop_filter_info *lfi) {
|
||||
vp8_mbloop_filter_horizontal_edge_c_sse2(
|
||||
vp9_mbloop_filter_horizontal_edge_c_sse2(
|
||||
y_ptr + 8 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
}
|
||||
|
||||
/* Vertical MB Filtering */
|
||||
void vp8_loop_filter_mbv_sse2(unsigned char *y_ptr, unsigned char *u_ptr,
|
||||
void vp9_loop_filter_mbv_sse2(unsigned char *y_ptr, unsigned char *u_ptr,
|
||||
unsigned char *v_ptr, int y_stride, int uv_stride,
|
||||
struct loop_filter_info *lfi) {
|
||||
vp8_mbloop_filter_vertical_edge_c_sse2(y_ptr, y_stride, lfi->mblim, lfi->lim,
|
||||
vp9_mbloop_filter_vertical_edge_c_sse2(y_ptr, y_stride, lfi->mblim, lfi->lim,
|
||||
lfi->hev_thr, 2);
|
||||
|
||||
/* TODO: write sse2 version with u,v interleaved */
|
||||
if (u_ptr)
|
||||
vp8_mbloop_filter_vertical_edge_c_sse2(u_ptr, uv_stride, lfi->mblim,
|
||||
vp9_mbloop_filter_vertical_edge_c_sse2(u_ptr, uv_stride, lfi->mblim,
|
||||
lfi->lim, lfi->hev_thr, 1);
|
||||
|
||||
if (v_ptr)
|
||||
vp8_mbloop_filter_vertical_edge_c_sse2(v_ptr, uv_stride, lfi->mblim,
|
||||
vp9_mbloop_filter_vertical_edge_c_sse2(v_ptr, uv_stride, lfi->mblim,
|
||||
lfi->lim, lfi->hev_thr, 1);
|
||||
}
|
||||
|
||||
void vp8_loop_filter_bv8x8_sse2(unsigned char *y_ptr, unsigned char *u_ptr,
|
||||
void vp9_loop_filter_bv8x8_sse2(unsigned char *y_ptr, unsigned char *u_ptr,
|
||||
unsigned char *v_ptr, int y_stride, int uv_stride,
|
||||
struct loop_filter_info *lfi) {
|
||||
vp8_mbloop_filter_vertical_edge_c_sse2(
|
||||
vp9_mbloop_filter_vertical_edge_c_sse2(
|
||||
y_ptr + 8, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
}
|
||||
|
||||
/* Horizontal B Filtering */
|
||||
void vp8_loop_filter_bh_sse2(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
|
||||
void vp9_loop_filter_bh_sse2(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
|
||||
int y_stride, int uv_stride, struct loop_filter_info *lfi) {
|
||||
vp8_loop_filter_horizontal_edge_sse2(y_ptr + 4 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp8_loop_filter_horizontal_edge_sse2(y_ptr + 8 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp8_loop_filter_horizontal_edge_sse2(y_ptr + 12 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp9_loop_filter_horizontal_edge_sse2(y_ptr + 4 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp9_loop_filter_horizontal_edge_sse2(y_ptr + 8 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp9_loop_filter_horizontal_edge_sse2(y_ptr + 12 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
|
||||
if (u_ptr)
|
||||
vp8_loop_filter_horizontal_edge_uv_sse2(u_ptr + 4 * uv_stride, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, v_ptr + 4 * uv_stride);
|
||||
vp9_loop_filter_horizontal_edge_uv_sse2(u_ptr + 4 * uv_stride, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, v_ptr + 4 * uv_stride);
|
||||
}
|
||||
|
||||
|
||||
void vp8_loop_filter_bhs_sse2(unsigned char *y_ptr, int y_stride, const unsigned char *blimit) {
|
||||
vp8_loop_filter_simple_horizontal_edge_sse2(y_ptr + 4 * y_stride, y_stride, blimit);
|
||||
vp8_loop_filter_simple_horizontal_edge_sse2(y_ptr + 8 * y_stride, y_stride, blimit);
|
||||
vp8_loop_filter_simple_horizontal_edge_sse2(y_ptr + 12 * y_stride, y_stride, blimit);
|
||||
void vp9_loop_filter_bhs_sse2(unsigned char *y_ptr, int y_stride, const unsigned char *blimit) {
|
||||
vp9_loop_filter_simple_horizontal_edge_sse2(y_ptr + 4 * y_stride, y_stride, blimit);
|
||||
vp9_loop_filter_simple_horizontal_edge_sse2(y_ptr + 8 * y_stride, y_stride, blimit);
|
||||
vp9_loop_filter_simple_horizontal_edge_sse2(y_ptr + 12 * y_stride, y_stride, blimit);
|
||||
}
|
||||
|
||||
|
||||
/* Vertical B Filtering */
|
||||
void vp8_loop_filter_bv_sse2(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
|
||||
void vp9_loop_filter_bv_sse2(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
|
||||
int y_stride, int uv_stride, struct loop_filter_info *lfi) {
|
||||
vp8_loop_filter_vertical_edge_sse2(y_ptr + 4, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp8_loop_filter_vertical_edge_sse2(y_ptr + 8, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp8_loop_filter_vertical_edge_sse2(y_ptr + 12, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp9_loop_filter_vertical_edge_sse2(y_ptr + 4, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp9_loop_filter_vertical_edge_sse2(y_ptr + 8, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
vp9_loop_filter_vertical_edge_sse2(y_ptr + 12, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
|
||||
|
||||
if (u_ptr)
|
||||
vp8_loop_filter_vertical_edge_uv_sse2(u_ptr + 4, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, v_ptr + 4);
|
||||
vp9_loop_filter_vertical_edge_uv_sse2(u_ptr + 4, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, v_ptr + 4);
|
||||
}
|
||||
|
||||
|
||||
void vp8_loop_filter_bvs_sse2(unsigned char *y_ptr, int y_stride, const unsigned char *blimit) {
|
||||
vp8_loop_filter_simple_vertical_edge_sse2(y_ptr + 4, y_stride, blimit);
|
||||
vp8_loop_filter_simple_vertical_edge_sse2(y_ptr + 8, y_stride, blimit);
|
||||
vp8_loop_filter_simple_vertical_edge_sse2(y_ptr + 12, y_stride, blimit);
|
||||
void vp9_loop_filter_bvs_sse2(unsigned char *y_ptr, int y_stride, const unsigned char *blimit) {
|
||||
vp9_loop_filter_simple_vertical_edge_sse2(y_ptr + 4, y_stride, blimit);
|
||||
vp9_loop_filter_simple_vertical_edge_sse2(y_ptr + 8, y_stride, blimit);
|
||||
vp9_loop_filter_simple_vertical_edge_sse2(y_ptr + 12, y_stride, blimit);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -20,26 +20,26 @@
|
||||
*/
|
||||
|
||||
#if HAVE_MMX
|
||||
extern prototype_loopfilter_block(vp8_loop_filter_mbv_mmx);
|
||||
extern prototype_loopfilter_block(vp8_loop_filter_bv_mmx);
|
||||
extern prototype_loopfilter_block(vp8_loop_filter_mbh_mmx);
|
||||
extern prototype_loopfilter_block(vp8_loop_filter_bh_mmx);
|
||||
extern prototype_simple_loopfilter(vp8_loop_filter_simple_vertical_edge_mmx);
|
||||
extern prototype_simple_loopfilter(vp8_loop_filter_bvs_mmx);
|
||||
extern prototype_simple_loopfilter(vp8_loop_filter_simple_horizontal_edge_mmx);
|
||||
extern prototype_simple_loopfilter(vp8_loop_filter_bhs_mmx);
|
||||
extern prototype_loopfilter_block(vp9_loop_filter_mbv_mmx);
|
||||
extern prototype_loopfilter_block(vp9_loop_filter_bv_mmx);
|
||||
extern prototype_loopfilter_block(vp9_loop_filter_mbh_mmx);
|
||||
extern prototype_loopfilter_block(vp9_loop_filter_bh_mmx);
|
||||
extern prototype_simple_loopfilter(vp9_loop_filter_simple_vertical_edge_mmx);
|
||||
extern prototype_simple_loopfilter(vp9_loop_filter_bvs_mmx);
|
||||
extern prototype_simple_loopfilter(vp9_loop_filter_simple_horizontal_edge_mmx);
|
||||
extern prototype_simple_loopfilter(vp9_loop_filter_bhs_mmx);
|
||||
#endif
|
||||
|
||||
|
||||
#if HAVE_SSE2
|
||||
extern prototype_loopfilter_block(vp8_loop_filter_mbv_sse2);
|
||||
extern prototype_loopfilter_block(vp8_loop_filter_bv_sse2);
|
||||
extern prototype_loopfilter_block(vp8_loop_filter_mbh_sse2);
|
||||
extern prototype_loopfilter_block(vp8_loop_filter_bh_sse2);
|
||||
extern prototype_simple_loopfilter(vp8_loop_filter_simple_vertical_edge_sse2);
|
||||
extern prototype_simple_loopfilter(vp8_loop_filter_bvs_sse2);
|
||||
extern prototype_simple_loopfilter(vp8_loop_filter_simple_horizontal_edge_sse2);
|
||||
extern prototype_simple_loopfilter(vp8_loop_filter_bhs_sse2);
|
||||
extern prototype_loopfilter_block(vp9_loop_filter_mbv_sse2);
|
||||
extern prototype_loopfilter_block(vp9_loop_filter_bv_sse2);
|
||||
extern prototype_loopfilter_block(vp9_loop_filter_mbh_sse2);
|
||||
extern prototype_loopfilter_block(vp9_loop_filter_bh_sse2);
|
||||
extern prototype_simple_loopfilter(vp9_loop_filter_simple_vertical_edge_sse2);
|
||||
extern prototype_simple_loopfilter(vp9_loop_filter_bvs_sse2);
|
||||
extern prototype_simple_loopfilter(vp9_loop_filter_simple_horizontal_edge_sse2);
|
||||
extern prototype_simple_loopfilter(vp9_loop_filter_bhs_sse2);
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -14,7 +14,7 @@
|
||||
%define VP8_FILTER_WEIGHT 128
|
||||
%define VP8_FILTER_SHIFT 7
|
||||
|
||||
;void vp8_post_proc_down_and_across_mmx
|
||||
;void vp9_post_proc_down_and_across_mmx
|
||||
;(
|
||||
; unsigned char *src_ptr,
|
||||
; unsigned char *dst_ptr,
|
||||
@ -24,8 +24,8 @@
|
||||
; int cols,
|
||||
; int flimit
|
||||
;)
|
||||
global sym(vp8_post_proc_down_and_across_mmx)
|
||||
sym(vp8_post_proc_down_and_across_mmx):
|
||||
global sym(vp9_post_proc_down_and_across_mmx)
|
||||
sym(vp9_post_proc_down_and_across_mmx):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 7
|
||||
@ -262,11 +262,11 @@ sym(vp8_post_proc_down_and_across_mmx):
|
||||
%undef RD
|
||||
|
||||
|
||||
;void vp8_mbpost_proc_down_mmx(unsigned char *dst,
|
||||
;void vp9_mbpost_proc_down_mmx(unsigned char *dst,
|
||||
; int pitch, int rows, int cols,int flimit)
|
||||
extern sym(vp8_rv)
|
||||
global sym(vp8_mbpost_proc_down_mmx)
|
||||
sym(vp8_mbpost_proc_down_mmx):
|
||||
global sym(vp9_mbpost_proc_down_mmx)
|
||||
sym(vp9_mbpost_proc_down_mmx):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 5
|
||||
@ -459,14 +459,14 @@ sym(vp8_mbpost_proc_down_mmx):
|
||||
%undef flimit2
|
||||
|
||||
|
||||
;void vp8_plane_add_noise_mmx (unsigned char *Start, unsigned char *noise,
|
||||
;void vp9_plane_add_noise_mmx (unsigned char *Start, unsigned char *noise,
|
||||
; unsigned char blackclamp[16],
|
||||
; unsigned char whiteclamp[16],
|
||||
; unsigned char bothclamp[16],
|
||||
; unsigned int Width, unsigned int Height, int Pitch)
|
||||
extern sym(rand)
|
||||
global sym(vp8_plane_add_noise_mmx)
|
||||
sym(vp8_plane_add_noise_mmx):
|
||||
global sym(vp9_plane_add_noise_mmx)
|
||||
sym(vp9_plane_add_noise_mmx):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 8
|
||||
|
@ -11,7 +11,7 @@
|
||||
|
||||
%include "vpx_ports/x86_abi_support.asm"
|
||||
|
||||
;void vp8_post_proc_down_and_across_xmm
|
||||
;void vp9_post_proc_down_and_across_xmm
|
||||
;(
|
||||
; unsigned char *src_ptr,
|
||||
; unsigned char *dst_ptr,
|
||||
@ -21,8 +21,8 @@
|
||||
; int cols,
|
||||
; int flimit
|
||||
;)
|
||||
global sym(vp8_post_proc_down_and_across_xmm)
|
||||
sym(vp8_post_proc_down_and_across_xmm):
|
||||
global sym(vp9_post_proc_down_and_across_xmm)
|
||||
sym(vp9_post_proc_down_and_across_xmm):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 7
|
||||
@ -248,11 +248,11 @@ sym(vp8_post_proc_down_and_across_xmm):
|
||||
%undef RD42
|
||||
|
||||
|
||||
;void vp8_mbpost_proc_down_xmm(unsigned char *dst,
|
||||
;void vp9_mbpost_proc_down_xmm(unsigned char *dst,
|
||||
; int pitch, int rows, int cols,int flimit)
|
||||
extern sym(vp8_rv)
|
||||
global sym(vp8_mbpost_proc_down_xmm)
|
||||
sym(vp8_mbpost_proc_down_xmm):
|
||||
global sym(vp9_mbpost_proc_down_xmm)
|
||||
sym(vp9_mbpost_proc_down_xmm):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 5
|
||||
@ -449,10 +449,10 @@ sym(vp8_mbpost_proc_down_xmm):
|
||||
%undef flimit4
|
||||
|
||||
|
||||
;void vp8_mbpost_proc_across_ip_xmm(unsigned char *src,
|
||||
;void vp9_mbpost_proc_across_ip_xmm(unsigned char *src,
|
||||
; int pitch, int rows, int cols,int flimit)
|
||||
global sym(vp8_mbpost_proc_across_ip_xmm)
|
||||
sym(vp8_mbpost_proc_across_ip_xmm):
|
||||
global sym(vp9_mbpost_proc_across_ip_xmm)
|
||||
sym(vp9_mbpost_proc_across_ip_xmm):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 5
|
||||
@ -624,14 +624,14 @@ sym(vp8_mbpost_proc_across_ip_xmm):
|
||||
%undef flimit4
|
||||
|
||||
|
||||
;void vp8_plane_add_noise_wmt (unsigned char *Start, unsigned char *noise,
|
||||
;void vp9_plane_add_noise_wmt (unsigned char *Start, unsigned char *noise,
|
||||
; unsigned char blackclamp[16],
|
||||
; unsigned char whiteclamp[16],
|
||||
; unsigned char bothclamp[16],
|
||||
; unsigned int Width, unsigned int Height, int Pitch)
|
||||
extern sym(rand)
|
||||
global sym(vp8_plane_add_noise_wmt)
|
||||
sym(vp8_plane_add_noise_wmt):
|
||||
global sym(vp9_plane_add_noise_wmt)
|
||||
sym(vp9_plane_add_noise_wmt):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 8
|
||||
|
@ -20,42 +20,42 @@
|
||||
*/
|
||||
|
||||
#if HAVE_MMX
|
||||
extern prototype_postproc_inplace(vp8_mbpost_proc_down_mmx);
|
||||
extern prototype_postproc(vp8_post_proc_down_and_across_mmx);
|
||||
extern prototype_postproc_addnoise(vp8_plane_add_noise_mmx);
|
||||
extern prototype_postproc_inplace(vp9_mbpost_proc_down_mmx);
|
||||
extern prototype_postproc(vp9_post_proc_down_and_across_mmx);
|
||||
extern prototype_postproc_addnoise(vp9_plane_add_noise_mmx);
|
||||
|
||||
#if !CONFIG_RUNTIME_CPU_DETECT
|
||||
#undef vp8_postproc_down
|
||||
#define vp8_postproc_down vp8_mbpost_proc_down_mmx
|
||||
#define vp8_postproc_down vp9_mbpost_proc_down_mmx
|
||||
|
||||
#undef vp8_postproc_downacross
|
||||
#define vp8_postproc_downacross vp8_post_proc_down_and_across_mmx
|
||||
#define vp8_postproc_downacross vp9_post_proc_down_and_across_mmx
|
||||
|
||||
#undef vp8_postproc_addnoise
|
||||
#define vp8_postproc_addnoise vp8_plane_add_noise_mmx
|
||||
#define vp8_postproc_addnoise vp9_plane_add_noise_mmx
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
#if HAVE_SSE2
|
||||
extern prototype_postproc_inplace(vp8_mbpost_proc_down_xmm);
|
||||
extern prototype_postproc_inplace(vp8_mbpost_proc_across_ip_xmm);
|
||||
extern prototype_postproc(vp8_post_proc_down_and_across_xmm);
|
||||
extern prototype_postproc_addnoise(vp8_plane_add_noise_wmt);
|
||||
extern prototype_postproc_inplace(vp9_mbpost_proc_down_xmm);
|
||||
extern prototype_postproc_inplace(vp9_mbpost_proc_across_ip_xmm);
|
||||
extern prototype_postproc(vp9_post_proc_down_and_across_xmm);
|
||||
extern prototype_postproc_addnoise(vp9_plane_add_noise_wmt);
|
||||
|
||||
#if !CONFIG_RUNTIME_CPU_DETECT
|
||||
#undef vp8_postproc_down
|
||||
#define vp8_postproc_down vp8_mbpost_proc_down_xmm
|
||||
#define vp8_postproc_down vp9_mbpost_proc_down_xmm
|
||||
|
||||
#undef vp8_postproc_across
|
||||
#define vp8_postproc_across vp8_mbpost_proc_across_ip_xmm
|
||||
#define vp8_postproc_across vp9_mbpost_proc_across_ip_xmm
|
||||
|
||||
#undef vp8_postproc_downacross
|
||||
#define vp8_postproc_downacross vp8_post_proc_down_and_across_xmm
|
||||
#define vp8_postproc_downacross vp9_post_proc_down_and_across_xmm
|
||||
|
||||
#undef vp8_postproc_addnoise
|
||||
#define vp8_postproc_addnoise vp8_plane_add_noise_wmt
|
||||
#define vp8_postproc_addnoise vp9_plane_add_noise_wmt
|
||||
|
||||
|
||||
#endif
|
||||
|
@ -10,9 +10,9 @@
|
||||
|
||||
|
||||
%include "vpx_ports/x86_abi_support.asm"
|
||||
;void vp8_recon_b_mmx(unsigned char *s, short *q, unsigned char *d, int stride)
|
||||
global sym(vp8_recon_b_mmx)
|
||||
sym(vp8_recon_b_mmx):
|
||||
;void vp9_recon_b_mmx(unsigned char *s, short *q, unsigned char *d, int stride)
|
||||
global sym(vp9_recon_b_mmx)
|
||||
sym(vp9_recon_b_mmx):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 4
|
||||
@ -65,8 +65,8 @@ sym(vp8_recon_b_mmx):
|
||||
; unsigned char *dst,
|
||||
; int dst_stride
|
||||
; )
|
||||
global sym(vp8_copy_mem8x8_mmx)
|
||||
sym(vp8_copy_mem8x8_mmx):
|
||||
global sym(vp9_copy_mem8x8_mmx)
|
||||
sym(vp9_copy_mem8x8_mmx):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 4
|
||||
@ -128,8 +128,8 @@ sym(vp8_copy_mem8x8_mmx):
|
||||
; unsigned char *dst,
|
||||
; int dst_stride
|
||||
; )
|
||||
global sym(vp8_copy_mem8x4_mmx)
|
||||
sym(vp8_copy_mem8x4_mmx):
|
||||
global sym(vp9_copy_mem8x4_mmx)
|
||||
sym(vp9_copy_mem8x4_mmx):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 4
|
||||
@ -172,8 +172,8 @@ sym(vp8_copy_mem8x4_mmx):
|
||||
; unsigned char *dst,
|
||||
; int dst_stride
|
||||
; )
|
||||
global sym(vp8_copy_mem16x16_mmx)
|
||||
sym(vp8_copy_mem16x16_mmx):
|
||||
global sym(vp9_copy_mem16x16_mmx)
|
||||
sym(vp9_copy_mem16x16_mmx):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 4
|
||||
|
@ -10,9 +10,9 @@
|
||||
|
||||
|
||||
%include "vpx_ports/x86_abi_support.asm"
|
||||
;void vp8_recon2b_sse2(unsigned char *s, short *q, unsigned char *d, int stride)
|
||||
global sym(vp8_recon2b_sse2)
|
||||
sym(vp8_recon2b_sse2):
|
||||
;void vp9_recon2b_sse2(unsigned char *s, short *q, unsigned char *d, int stride)
|
||||
global sym(vp9_recon2b_sse2)
|
||||
sym(vp9_recon2b_sse2):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 4
|
||||
@ -61,9 +61,9 @@ sym(vp8_recon2b_sse2):
|
||||
ret
|
||||
|
||||
|
||||
;void vp8_recon4b_sse2(unsigned char *s, short *q, unsigned char *d, int stride)
|
||||
global sym(vp8_recon4b_sse2)
|
||||
sym(vp8_recon4b_sse2):
|
||||
;void vp9_recon4b_sse2(unsigned char *s, short *q, unsigned char *d, int stride)
|
||||
global sym(vp9_recon4b_sse2)
|
||||
sym(vp9_recon4b_sse2):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 4
|
||||
@ -132,8 +132,8 @@ sym(vp8_recon4b_sse2):
|
||||
; unsigned char *dst,
|
||||
; int dst_stride
|
||||
; )
|
||||
global sym(vp8_copy_mem16x16_sse2)
|
||||
sym(vp8_copy_mem16x16_sse2):
|
||||
global sym(vp9_copy_mem16x16_sse2)
|
||||
sym(vp9_copy_mem16x16_sse2):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 4
|
||||
@ -231,14 +231,14 @@ sym(vp8_copy_mem16x16_sse2):
|
||||
ret
|
||||
|
||||
|
||||
;void vp8_intra_pred_uv_dc_mmx2(
|
||||
;void vp9_intra_pred_uv_dc_mmx2(
|
||||
; unsigned char *dst,
|
||||
; int dst_stride
|
||||
; unsigned char *src,
|
||||
; int src_stride,
|
||||
; )
|
||||
global sym(vp8_intra_pred_uv_dc_mmx2)
|
||||
sym(vp8_intra_pred_uv_dc_mmx2):
|
||||
global sym(vp9_intra_pred_uv_dc_mmx2)
|
||||
sym(vp9_intra_pred_uv_dc_mmx2):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 4
|
||||
@ -304,14 +304,14 @@ sym(vp8_intra_pred_uv_dc_mmx2):
|
||||
pop rbp
|
||||
ret
|
||||
|
||||
;void vp8_intra_pred_uv_dctop_mmx2(
|
||||
;void vp9_intra_pred_uv_dctop_mmx2(
|
||||
; unsigned char *dst,
|
||||
; int dst_stride
|
||||
; unsigned char *src,
|
||||
; int src_stride,
|
||||
; )
|
||||
global sym(vp8_intra_pred_uv_dctop_mmx2)
|
||||
sym(vp8_intra_pred_uv_dctop_mmx2):
|
||||
global sym(vp9_intra_pred_uv_dctop_mmx2)
|
||||
sym(vp9_intra_pred_uv_dctop_mmx2):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 4
|
||||
@ -357,14 +357,14 @@ sym(vp8_intra_pred_uv_dctop_mmx2):
|
||||
pop rbp
|
||||
ret
|
||||
|
||||
;void vp8_intra_pred_uv_dcleft_mmx2(
|
||||
;void vp9_intra_pred_uv_dcleft_mmx2(
|
||||
; unsigned char *dst,
|
||||
; int dst_stride
|
||||
; unsigned char *src,
|
||||
; int src_stride,
|
||||
; )
|
||||
global sym(vp8_intra_pred_uv_dcleft_mmx2)
|
||||
sym(vp8_intra_pred_uv_dcleft_mmx2):
|
||||
global sym(vp9_intra_pred_uv_dcleft_mmx2)
|
||||
sym(vp9_intra_pred_uv_dcleft_mmx2):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 4
|
||||
@ -422,14 +422,14 @@ sym(vp8_intra_pred_uv_dcleft_mmx2):
|
||||
pop rbp
|
||||
ret
|
||||
|
||||
;void vp8_intra_pred_uv_dc128_mmx(
|
||||
;void vp9_intra_pred_uv_dc128_mmx(
|
||||
; unsigned char *dst,
|
||||
; int dst_stride
|
||||
; unsigned char *src,
|
||||
; int src_stride,
|
||||
; )
|
||||
global sym(vp8_intra_pred_uv_dc128_mmx)
|
||||
sym(vp8_intra_pred_uv_dc128_mmx):
|
||||
global sym(vp9_intra_pred_uv_dc128_mmx)
|
||||
sym(vp9_intra_pred_uv_dc128_mmx):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 4
|
||||
@ -458,15 +458,15 @@ sym(vp8_intra_pred_uv_dc128_mmx):
|
||||
pop rbp
|
||||
ret
|
||||
|
||||
;void vp8_intra_pred_uv_tm_sse2(
|
||||
;void vp9_intra_pred_uv_tm_sse2(
|
||||
; unsigned char *dst,
|
||||
; int dst_stride
|
||||
; unsigned char *src,
|
||||
; int src_stride,
|
||||
; )
|
||||
%macro vp8_intra_pred_uv_tm 1
|
||||
global sym(vp8_intra_pred_uv_tm_%1)
|
||||
sym(vp8_intra_pred_uv_tm_%1):
|
||||
%macro vp9_intra_pred_uv_tm 1
|
||||
global sym(vp9_intra_pred_uv_tm_%1)
|
||||
sym(vp9_intra_pred_uv_tm_%1):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 4
|
||||
@ -503,7 +503,7 @@ sym(vp8_intra_pred_uv_tm_%1):
|
||||
mov rdi, arg(0) ;dst;
|
||||
movsxd rcx, dword ptr arg(1) ;dst_stride
|
||||
|
||||
.vp8_intra_pred_uv_tm_%1_loop:
|
||||
.vp9_intra_pred_uv_tm_%1_loop:
|
||||
movd xmm3, [rsi]
|
||||
movd xmm5, [rsi+rax]
|
||||
%ifidn %1, sse2
|
||||
@ -525,7 +525,7 @@ sym(vp8_intra_pred_uv_tm_%1):
|
||||
lea rsi, [rsi+rax*2]
|
||||
lea rdi, [rdi+rcx*2]
|
||||
dec edx
|
||||
jnz .vp8_intra_pred_uv_tm_%1_loop
|
||||
jnz .vp9_intra_pred_uv_tm_%1_loop
|
||||
|
||||
; begin epilog
|
||||
pop rdi
|
||||
@ -536,17 +536,17 @@ sym(vp8_intra_pred_uv_tm_%1):
|
||||
ret
|
||||
%endmacro
|
||||
|
||||
vp8_intra_pred_uv_tm sse2
|
||||
vp8_intra_pred_uv_tm ssse3
|
||||
vp9_intra_pred_uv_tm sse2
|
||||
vp9_intra_pred_uv_tm ssse3
|
||||
|
||||
;void vp8_intra_pred_uv_ve_mmx(
|
||||
;void vp9_intra_pred_uv_ve_mmx(
|
||||
; unsigned char *dst,
|
||||
; int dst_stride
|
||||
; unsigned char *src,
|
||||
; int src_stride,
|
||||
; )
|
||||
global sym(vp8_intra_pred_uv_ve_mmx)
|
||||
sym(vp8_intra_pred_uv_ve_mmx):
|
||||
global sym(vp9_intra_pred_uv_ve_mmx)
|
||||
sym(vp9_intra_pred_uv_ve_mmx):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 4
|
||||
@ -578,15 +578,15 @@ sym(vp8_intra_pred_uv_ve_mmx):
|
||||
pop rbp
|
||||
ret
|
||||
|
||||
;void vp8_intra_pred_uv_ho_mmx2(
|
||||
;void vp9_intra_pred_uv_ho_mmx2(
|
||||
; unsigned char *dst,
|
||||
; int dst_stride
|
||||
; unsigned char *src,
|
||||
; int src_stride,
|
||||
; )
|
||||
%macro vp8_intra_pred_uv_ho 1
|
||||
global sym(vp8_intra_pred_uv_ho_%1)
|
||||
sym(vp8_intra_pred_uv_ho_%1):
|
||||
%macro vp9_intra_pred_uv_ho 1
|
||||
global sym(vp9_intra_pred_uv_ho_%1)
|
||||
sym(vp9_intra_pred_uv_ho_%1):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 4
|
||||
@ -615,7 +615,7 @@ sym(vp8_intra_pred_uv_ho_%1):
|
||||
%endif
|
||||
dec rsi
|
||||
%ifidn %1, mmx2
|
||||
.vp8_intra_pred_uv_ho_%1_loop:
|
||||
.vp9_intra_pred_uv_ho_%1_loop:
|
||||
movd mm0, [rsi]
|
||||
movd mm1, [rsi+rax]
|
||||
punpcklbw mm0, mm0
|
||||
@ -627,7 +627,7 @@ sym(vp8_intra_pred_uv_ho_%1):
|
||||
lea rsi, [rsi+rax*2]
|
||||
lea rdi, [rdi+rcx*2]
|
||||
dec edx
|
||||
jnz .vp8_intra_pred_uv_ho_%1_loop
|
||||
jnz .vp9_intra_pred_uv_ho_%1_loop
|
||||
%else
|
||||
movd xmm0, [rsi]
|
||||
movd xmm3, [rsi+rax]
|
||||
@ -671,8 +671,8 @@ sym(vp8_intra_pred_uv_ho_%1):
|
||||
ret
|
||||
%endmacro
|
||||
|
||||
vp8_intra_pred_uv_ho mmx2
|
||||
vp8_intra_pred_uv_ho ssse3
|
||||
vp9_intra_pred_uv_ho mmx2
|
||||
vp9_intra_pred_uv_ho ssse3
|
||||
|
||||
SECTION_RODATA
|
||||
dc_128:
|
||||
|
@ -17,17 +17,17 @@
|
||||
const unsigned char *src, int src_stride)
|
||||
typedef build_intra_predictors_mbuv_prototype((*build_intra_predictors_mbuv_fn_t));
|
||||
|
||||
extern build_intra_predictors_mbuv_prototype(vp8_intra_pred_uv_dc_mmx2);
|
||||
extern build_intra_predictors_mbuv_prototype(vp8_intra_pred_uv_dctop_mmx2);
|
||||
extern build_intra_predictors_mbuv_prototype(vp8_intra_pred_uv_dcleft_mmx2);
|
||||
extern build_intra_predictors_mbuv_prototype(vp8_intra_pred_uv_dc128_mmx);
|
||||
extern build_intra_predictors_mbuv_prototype(vp8_intra_pred_uv_ho_mmx2);
|
||||
extern build_intra_predictors_mbuv_prototype(vp8_intra_pred_uv_ho_ssse3);
|
||||
extern build_intra_predictors_mbuv_prototype(vp8_intra_pred_uv_ve_mmx);
|
||||
extern build_intra_predictors_mbuv_prototype(vp8_intra_pred_uv_tm_sse2);
|
||||
extern build_intra_predictors_mbuv_prototype(vp8_intra_pred_uv_tm_ssse3);
|
||||
extern build_intra_predictors_mbuv_prototype(vp9_intra_pred_uv_dc_mmx2);
|
||||
extern build_intra_predictors_mbuv_prototype(vp9_intra_pred_uv_dctop_mmx2);
|
||||
extern build_intra_predictors_mbuv_prototype(vp9_intra_pred_uv_dcleft_mmx2);
|
||||
extern build_intra_predictors_mbuv_prototype(vp9_intra_pred_uv_dc128_mmx);
|
||||
extern build_intra_predictors_mbuv_prototype(vp9_intra_pred_uv_ho_mmx2);
|
||||
extern build_intra_predictors_mbuv_prototype(vp9_intra_pred_uv_ho_ssse3);
|
||||
extern build_intra_predictors_mbuv_prototype(vp9_intra_pred_uv_ve_mmx);
|
||||
extern build_intra_predictors_mbuv_prototype(vp9_intra_pred_uv_tm_sse2);
|
||||
extern build_intra_predictors_mbuv_prototype(vp9_intra_pred_uv_tm_ssse3);
|
||||
|
||||
static void vp8_build_intra_predictors_mbuv_x86(MACROBLOCKD *xd,
|
||||
static void vp9_build_intra_predictors_mbuv_x86(MACROBLOCKD *xd,
|
||||
unsigned char *dst_u,
|
||||
unsigned char *dst_v,
|
||||
int dst_stride,
|
||||
@ -39,7 +39,7 @@ static void vp8_build_intra_predictors_mbuv_x86(MACROBLOCKD *xd,
|
||||
|
||||
switch (mode) {
|
||||
case V_PRED:
|
||||
fn = vp8_intra_pred_uv_ve_mmx;
|
||||
fn = vp9_intra_pred_uv_ve_mmx;
|
||||
break;
|
||||
case H_PRED:
|
||||
fn = ho_func;
|
||||
@ -50,17 +50,17 @@ static void vp8_build_intra_predictors_mbuv_x86(MACROBLOCKD *xd,
|
||||
case DC_PRED:
|
||||
if (xd->up_available) {
|
||||
if (xd->left_available) {
|
||||
fn = vp8_intra_pred_uv_dc_mmx2;
|
||||
fn = vp9_intra_pred_uv_dc_mmx2;
|
||||
break;
|
||||
} else {
|
||||
fn = vp8_intra_pred_uv_dctop_mmx2;
|
||||
fn = vp9_intra_pred_uv_dctop_mmx2;
|
||||
break;
|
||||
}
|
||||
} else if (xd->left_available) {
|
||||
fn = vp8_intra_pred_uv_dcleft_mmx2;
|
||||
fn = vp9_intra_pred_uv_dcleft_mmx2;
|
||||
break;
|
||||
} else {
|
||||
fn = vp8_intra_pred_uv_dc128_mmx;
|
||||
fn = vp9_intra_pred_uv_dc128_mmx;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
@ -72,30 +72,30 @@ static void vp8_build_intra_predictors_mbuv_x86(MACROBLOCKD *xd,
|
||||
fn(dst_v, dst_stride, xd->dst.v_buffer, src_stride);
|
||||
}
|
||||
|
||||
void vp8_build_intra_predictors_mbuv_sse2(MACROBLOCKD *xd) {
|
||||
vp8_build_intra_predictors_mbuv_x86(xd, &xd->predictor[256],
|
||||
void vp9_build_intra_predictors_mbuv_sse2(MACROBLOCKD *xd) {
|
||||
vp9_build_intra_predictors_mbuv_x86(xd, &xd->predictor[256],
|
||||
&xd->predictor[320], 8,
|
||||
vp8_intra_pred_uv_tm_sse2,
|
||||
vp8_intra_pred_uv_ho_mmx2);
|
||||
vp9_intra_pred_uv_tm_sse2,
|
||||
vp9_intra_pred_uv_ho_mmx2);
|
||||
}
|
||||
|
||||
void vp8_build_intra_predictors_mbuv_ssse3(MACROBLOCKD *xd) {
|
||||
vp8_build_intra_predictors_mbuv_x86(xd, &xd->predictor[256],
|
||||
void vp9_build_intra_predictors_mbuv_ssse3(MACROBLOCKD *xd) {
|
||||
vp9_build_intra_predictors_mbuv_x86(xd, &xd->predictor[256],
|
||||
&xd->predictor[320], 8,
|
||||
vp8_intra_pred_uv_tm_ssse3,
|
||||
vp8_intra_pred_uv_ho_ssse3);
|
||||
vp9_intra_pred_uv_tm_ssse3,
|
||||
vp9_intra_pred_uv_ho_ssse3);
|
||||
}
|
||||
|
||||
void vp8_build_intra_predictors_mbuv_s_sse2(MACROBLOCKD *xd) {
|
||||
vp8_build_intra_predictors_mbuv_x86(xd, xd->dst.u_buffer,
|
||||
void vp9_build_intra_predictors_mbuv_s_sse2(MACROBLOCKD *xd) {
|
||||
vp9_build_intra_predictors_mbuv_x86(xd, xd->dst.u_buffer,
|
||||
xd->dst.v_buffer, xd->dst.uv_stride,
|
||||
vp8_intra_pred_uv_tm_sse2,
|
||||
vp8_intra_pred_uv_ho_mmx2);
|
||||
vp9_intra_pred_uv_tm_sse2,
|
||||
vp9_intra_pred_uv_ho_mmx2);
|
||||
}
|
||||
|
||||
void vp8_build_intra_predictors_mbuv_s_ssse3(MACROBLOCKD *xd) {
|
||||
vp8_build_intra_predictors_mbuv_x86(xd, xd->dst.u_buffer,
|
||||
void vp9_build_intra_predictors_mbuv_s_ssse3(MACROBLOCKD *xd) {
|
||||
vp9_build_intra_predictors_mbuv_x86(xd, xd->dst.u_buffer,
|
||||
xd->dst.v_buffer, xd->dst.uv_stride,
|
||||
vp8_intra_pred_uv_tm_ssse3,
|
||||
vp8_intra_pred_uv_ho_ssse3);
|
||||
vp9_intra_pred_uv_tm_ssse3,
|
||||
vp9_intra_pred_uv_ho_ssse3);
|
||||
}
|
||||
|
@ -21,7 +21,7 @@
|
||||
;
|
||||
;*************************************************************************************/
|
||||
|
||||
;void vp8_filter_block1d8_v8_ssse3
|
||||
;void vp9_filter_block1d8_v8_ssse3
|
||||
;(
|
||||
; unsigned char *src_ptr,
|
||||
; unsigned int src_pitch,
|
||||
@ -30,8 +30,8 @@
|
||||
; unsigned int output_height,
|
||||
; short *filter
|
||||
;)
|
||||
global sym(vp8_filter_block1d8_v8_ssse3)
|
||||
sym(vp8_filter_block1d8_v8_ssse3):
|
||||
global sym(vp9_filter_block1d8_v8_ssse3)
|
||||
sym(vp9_filter_block1d8_v8_ssse3):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 6
|
||||
@ -86,7 +86,7 @@ sym(vp8_filter_block1d8_v8_ssse3):
|
||||
lea rbx, [rdx + rdx*4]
|
||||
add rbx, rdx ;pitch * 6
|
||||
|
||||
.vp8_filter_block1d8_v8_ssse3_loop:
|
||||
.vp9_filter_block1d8_v8_ssse3_loop:
|
||||
movq xmm0, [rsi] ;A
|
||||
movq xmm1, [rsi + rdx] ;B
|
||||
movq xmm2, [rsi + rdx * 2] ;C
|
||||
@ -126,7 +126,7 @@ sym(vp8_filter_block1d8_v8_ssse3):
|
||||
add rdi, r8
|
||||
%endif
|
||||
dec rcx
|
||||
jnz .vp8_filter_block1d8_v8_ssse3_loop
|
||||
jnz .vp9_filter_block1d8_v8_ssse3_loop
|
||||
|
||||
add rsp, 16*5
|
||||
pop rsp
|
||||
@ -139,7 +139,7 @@ sym(vp8_filter_block1d8_v8_ssse3):
|
||||
pop rbp
|
||||
ret
|
||||
|
||||
;void vp8_filter_block1d16_v8_ssse3
|
||||
;void vp9_filter_block1d16_v8_ssse3
|
||||
;(
|
||||
; unsigned char *src_ptr,
|
||||
; unsigned int src_pitch,
|
||||
@ -148,8 +148,8 @@ sym(vp8_filter_block1d8_v8_ssse3):
|
||||
; unsigned int output_height,
|
||||
; short *filter
|
||||
;)
|
||||
global sym(vp8_filter_block1d16_v8_ssse3)
|
||||
sym(vp8_filter_block1d16_v8_ssse3):
|
||||
global sym(vp9_filter_block1d16_v8_ssse3)
|
||||
sym(vp9_filter_block1d16_v8_ssse3):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 6
|
||||
@ -204,7 +204,7 @@ sym(vp8_filter_block1d16_v8_ssse3):
|
||||
lea rbx, [rdx + rdx*4]
|
||||
add rbx, rdx ;pitch * 6
|
||||
|
||||
.vp8_filter_block1d16_v8_ssse3_loop:
|
||||
.vp9_filter_block1d16_v8_ssse3_loop:
|
||||
movq xmm0, [rsi] ;A
|
||||
movq xmm1, [rsi + rdx] ;B
|
||||
movq xmm2, [rsi + rdx * 2] ;C
|
||||
@ -276,7 +276,7 @@ sym(vp8_filter_block1d16_v8_ssse3):
|
||||
add rdi, r8
|
||||
%endif
|
||||
dec rcx
|
||||
jnz .vp8_filter_block1d16_v8_ssse3_loop
|
||||
jnz .vp9_filter_block1d16_v8_ssse3_loop
|
||||
|
||||
add rsp, 16*5
|
||||
pop rsp
|
||||
@ -289,7 +289,7 @@ sym(vp8_filter_block1d16_v8_ssse3):
|
||||
pop rbp
|
||||
ret
|
||||
|
||||
;void vp8_filter_block1d8_h8_ssse3
|
||||
;void vp9_filter_block1d8_h8_ssse3
|
||||
;(
|
||||
; unsigned char *src_ptr,
|
||||
; unsigned int src_pixels_per_line,
|
||||
@ -298,8 +298,8 @@ sym(vp8_filter_block1d16_v8_ssse3):
|
||||
; unsigned int output_height,
|
||||
; short *filter
|
||||
;)
|
||||
global sym(vp8_filter_block1d8_h8_ssse3)
|
||||
sym(vp8_filter_block1d8_h8_ssse3):
|
||||
global sym(vp9_filter_block1d8_h8_ssse3)
|
||||
sym(vp9_filter_block1d8_h8_ssse3):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 6
|
||||
@ -396,7 +396,7 @@ sym(vp8_filter_block1d8_h8_ssse3):
|
||||
pop rbp
|
||||
ret
|
||||
|
||||
;void vp8_filter_block1d16_h8_ssse3
|
||||
;void vp9_filter_block1d16_h8_ssse3
|
||||
;(
|
||||
; unsigned char *src_ptr,
|
||||
; unsigned int src_pixels_per_line,
|
||||
@ -405,8 +405,8 @@ sym(vp8_filter_block1d8_h8_ssse3):
|
||||
; unsigned int output_height,
|
||||
; short *filter
|
||||
;)
|
||||
global sym(vp8_filter_block1d16_h8_ssse3)
|
||||
sym(vp8_filter_block1d16_h8_ssse3):
|
||||
global sym(vp9_filter_block1d16_h8_ssse3)
|
||||
sym(vp9_filter_block1d16_h8_ssse3):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 6
|
||||
|
@ -17,7 +17,7 @@
|
||||
%define VP8_FILTER_SHIFT 7
|
||||
|
||||
|
||||
;void vp8_filter_block1d_h6_mmx
|
||||
;void vp9_filter_block1d_h6_mmx
|
||||
;(
|
||||
; unsigned char *src_ptr,
|
||||
; unsigned short *output_ptr,
|
||||
@ -27,8 +27,8 @@
|
||||
; unsigned int output_width,
|
||||
; short * vp8_filter
|
||||
;)
|
||||
global sym(vp8_filter_block1d_h6_mmx)
|
||||
sym(vp8_filter_block1d_h6_mmx):
|
||||
global sym(vp9_filter_block1d_h6_mmx)
|
||||
sym(vp9_filter_block1d_h6_mmx):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 7
|
||||
@ -113,7 +113,7 @@ sym(vp8_filter_block1d_h6_mmx):
|
||||
ret
|
||||
|
||||
|
||||
;void vp8_filter_block1dc_v6_mmx
|
||||
;void vp9_filter_block1dc_v6_mmx
|
||||
;(
|
||||
; short *src_ptr,
|
||||
; unsigned char *output_ptr,
|
||||
@ -124,8 +124,8 @@ sym(vp8_filter_block1d_h6_mmx):
|
||||
; unsigned int output_width,
|
||||
; short * vp8_filter
|
||||
;)
|
||||
global sym(vp8_filter_block1dc_v6_mmx)
|
||||
sym(vp8_filter_block1dc_v6_mmx):
|
||||
global sym(vp9_filter_block1dc_v6_mmx)
|
||||
sym(vp9_filter_block1dc_v6_mmx):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 8
|
||||
@ -212,8 +212,8 @@ sym(vp8_filter_block1dc_v6_mmx):
|
||||
; unsigned char *dst_ptr,
|
||||
; int dst_pitch
|
||||
;)
|
||||
global sym(vp8_bilinear_predict8x8_mmx)
|
||||
sym(vp8_bilinear_predict8x8_mmx):
|
||||
global sym(vp9_bilinear_predict8x8_mmx)
|
||||
sym(vp9_bilinear_predict8x8_mmx):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 6
|
||||
@ -229,7 +229,7 @@ sym(vp8_bilinear_predict8x8_mmx):
|
||||
mov rdi, arg(4) ;dst_ptr ;
|
||||
|
||||
shl rax, 5 ; offset * 32
|
||||
lea rcx, [GLOBAL(sym(vp8_bilinear_filters_mmx))]
|
||||
lea rcx, [GLOBAL(sym(vp9_bilinear_filters_mmx))]
|
||||
|
||||
add rax, rcx ; HFilter
|
||||
mov rsi, arg(0) ;src_ptr ;
|
||||
@ -369,8 +369,8 @@ sym(vp8_bilinear_predict8x8_mmx):
|
||||
; unsigned char *dst_ptr,
|
||||
; int dst_pitch
|
||||
;)
|
||||
global sym(vp8_bilinear_predict8x4_mmx)
|
||||
sym(vp8_bilinear_predict8x4_mmx):
|
||||
global sym(vp9_bilinear_predict8x4_mmx)
|
||||
sym(vp9_bilinear_predict8x4_mmx):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 6
|
||||
@ -385,7 +385,7 @@ sym(vp8_bilinear_predict8x4_mmx):
|
||||
movsxd rax, dword ptr arg(2) ;xoffset
|
||||
mov rdi, arg(4) ;dst_ptr ;
|
||||
|
||||
lea rcx, [GLOBAL(sym(vp8_bilinear_filters_mmx))]
|
||||
lea rcx, [GLOBAL(sym(vp9_bilinear_filters_mmx))]
|
||||
shl rax, 5
|
||||
|
||||
mov rsi, arg(0) ;src_ptr ;
|
||||
@ -524,8 +524,8 @@ sym(vp8_bilinear_predict8x4_mmx):
|
||||
; unsigned char *dst_ptr,
|
||||
; int dst_pitch
|
||||
;)
|
||||
global sym(vp8_bilinear_predict4x4_mmx)
|
||||
sym(vp8_bilinear_predict4x4_mmx):
|
||||
global sym(vp9_bilinear_predict4x4_mmx)
|
||||
sym(vp9_bilinear_predict4x4_mmx):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 6
|
||||
@ -540,7 +540,7 @@ sym(vp8_bilinear_predict4x4_mmx):
|
||||
movsxd rax, dword ptr arg(2) ;xoffset
|
||||
mov rdi, arg(4) ;dst_ptr ;
|
||||
|
||||
lea rcx, [GLOBAL(sym(vp8_bilinear_filters_mmx))]
|
||||
lea rcx, [GLOBAL(sym(vp9_bilinear_filters_mmx))]
|
||||
shl rax, 5
|
||||
|
||||
add rax, rcx ; HFilter
|
||||
@ -640,8 +640,8 @@ rd:
|
||||
times 4 dw 0x40
|
||||
|
||||
align 16
|
||||
global HIDDEN_DATA(sym(vp8_six_tap_mmx))
|
||||
sym(vp8_six_tap_mmx):
|
||||
global HIDDEN_DATA(sym(vp9_six_tap_mmx))
|
||||
sym(vp9_six_tap_mmx):
|
||||
times 8 dw 0
|
||||
times 8 dw 0
|
||||
times 8 dw 128
|
||||
@ -700,8 +700,8 @@ sym(vp8_six_tap_mmx):
|
||||
|
||||
|
||||
align 16
|
||||
global HIDDEN_DATA(sym(vp8_bilinear_filters_mmx))
|
||||
sym(vp8_bilinear_filters_mmx):
|
||||
global HIDDEN_DATA(sym(vp9_bilinear_filters_mmx))
|
||||
sym(vp9_bilinear_filters_mmx):
|
||||
times 8 dw 128
|
||||
times 8 dw 0
|
||||
|
||||
|
@ -22,7 +22,7 @@
|
||||
; even number. This function handles 8 pixels in horizontal direction, calculating ONE
|
||||
; rows each iteration to take advantage of the 128 bits operations.
|
||||
;*************************************************************************************/
|
||||
;void vp8_filter_block1d8_h6_sse2
|
||||
;void vp9_filter_block1d8_h6_sse2
|
||||
;(
|
||||
; unsigned char *src_ptr,
|
||||
; unsigned short *output_ptr,
|
||||
@ -32,8 +32,8 @@
|
||||
; unsigned int output_width,
|
||||
; short *vp8_filter
|
||||
;)
|
||||
global sym(vp8_filter_block1d8_h6_sse2)
|
||||
sym(vp8_filter_block1d8_h6_sse2):
|
||||
global sym(vp9_filter_block1d8_h6_sse2)
|
||||
sym(vp9_filter_block1d8_h6_sse2):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 7
|
||||
@ -136,7 +136,7 @@ sym(vp8_filter_block1d8_h6_sse2):
|
||||
ret
|
||||
|
||||
|
||||
;void vp8_filter_block1d16_h6_sse2
|
||||
;void vp9_filter_block1d16_h6_sse2
|
||||
;(
|
||||
; unsigned char *src_ptr,
|
||||
; unsigned short *output_ptr,
|
||||
@ -152,8 +152,8 @@ sym(vp8_filter_block1d8_h6_sse2):
|
||||
; even number. This function handles 8 pixels in horizontal direction, calculating ONE
|
||||
; rows each iteration to take advantage of the 128 bits operations.
|
||||
;*************************************************************************************/
|
||||
global sym(vp8_filter_block1d16_h6_sse2)
|
||||
sym(vp8_filter_block1d16_h6_sse2):
|
||||
global sym(vp9_filter_block1d16_h6_sse2)
|
||||
sym(vp9_filter_block1d16_h6_sse2):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 7
|
||||
@ -313,7 +313,7 @@ sym(vp8_filter_block1d16_h6_sse2):
|
||||
ret
|
||||
|
||||
|
||||
;void vp8_filter_block1d8_v6_sse2
|
||||
;void vp9_filter_block1d8_v6_sse2
|
||||
;(
|
||||
; short *src_ptr,
|
||||
; unsigned char *output_ptr,
|
||||
@ -328,8 +328,8 @@ sym(vp8_filter_block1d16_h6_sse2):
|
||||
; Notes: filter_block1d8_v6 applies a 6 tap filter vertically to the input pixels. The
|
||||
; input pixel array has output_height rows.
|
||||
;*************************************************************************************/
|
||||
global sym(vp8_filter_block1d8_v6_sse2)
|
||||
sym(vp8_filter_block1d8_v6_sse2):
|
||||
global sym(vp9_filter_block1d8_v6_sse2)
|
||||
sym(vp9_filter_block1d8_v6_sse2):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 8
|
||||
@ -356,7 +356,7 @@ sym(vp8_filter_block1d8_v6_sse2):
|
||||
movsxd r8, dword ptr arg(2) ; dst_ptich
|
||||
%endif
|
||||
|
||||
.vp8_filter_block1d8_v6_sse2_loop:
|
||||
.vp9_filter_block1d8_v6_sse2_loop:
|
||||
movdqa xmm1, XMMWORD PTR [rsi]
|
||||
pmullw xmm1, [rax]
|
||||
|
||||
@ -396,7 +396,7 @@ sym(vp8_filter_block1d8_v6_sse2):
|
||||
add rdi, r8
|
||||
%endif
|
||||
dec rcx ; decrement count
|
||||
jnz .vp8_filter_block1d8_v6_sse2_loop ; next row
|
||||
jnz .vp9_filter_block1d8_v6_sse2_loop ; next row
|
||||
|
||||
; begin epilog
|
||||
pop rdi
|
||||
@ -408,7 +408,7 @@ sym(vp8_filter_block1d8_v6_sse2):
|
||||
ret
|
||||
|
||||
|
||||
;void vp8_filter_block1d16_v6_sse2
|
||||
;void vp9_filter_block1d16_v6_sse2
|
||||
;(
|
||||
; unsigned short *src_ptr,
|
||||
; unsigned char *output_ptr,
|
||||
@ -423,8 +423,8 @@ sym(vp8_filter_block1d8_v6_sse2):
|
||||
; Notes: filter_block1d16_v6 applies a 6 tap filter vertically to the input pixels. The
|
||||
; input pixel array has output_height rows.
|
||||
;*************************************************************************************/
|
||||
global sym(vp8_filter_block1d16_v6_sse2)
|
||||
sym(vp8_filter_block1d16_v6_sse2):
|
||||
global sym(vp9_filter_block1d16_v6_sse2)
|
||||
sym(vp9_filter_block1d16_v6_sse2):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 8
|
||||
@ -448,7 +448,7 @@ sym(vp8_filter_block1d16_v6_sse2):
|
||||
movsxd r8, dword ptr arg(2) ; dst_ptich
|
||||
%endif
|
||||
|
||||
.vp8_filter_block1d16_v6_sse2_loop:
|
||||
.vp9_filter_block1d16_v6_sse2_loop:
|
||||
; The order for adding 6-tap is 2 5 3 1 4 6. Read in data in that order.
|
||||
movdqa xmm1, XMMWORD PTR [rsi + rdx] ; line 2
|
||||
movdqa xmm2, XMMWORD PTR [rsi + rdx + 16]
|
||||
@ -511,7 +511,7 @@ sym(vp8_filter_block1d16_v6_sse2):
|
||||
add rdi, r8
|
||||
%endif
|
||||
dec rcx ; decrement count
|
||||
jnz .vp8_filter_block1d16_v6_sse2_loop ; next row
|
||||
jnz .vp9_filter_block1d16_v6_sse2_loop ; next row
|
||||
|
||||
; begin epilog
|
||||
pop rdi
|
||||
@ -523,7 +523,7 @@ sym(vp8_filter_block1d16_v6_sse2):
|
||||
ret
|
||||
|
||||
|
||||
;void vp8_filter_block1d8_h6_only_sse2
|
||||
;void vp9_filter_block1d8_h6_only_sse2
|
||||
;(
|
||||
; unsigned char *src_ptr,
|
||||
; unsigned int src_pixels_per_line,
|
||||
@ -533,8 +533,8 @@ sym(vp8_filter_block1d16_v6_sse2):
|
||||
; const short *vp8_filter
|
||||
;)
|
||||
; First-pass filter only when yoffset==0
|
||||
global sym(vp8_filter_block1d8_h6_only_sse2)
|
||||
sym(vp8_filter_block1d8_h6_only_sse2):
|
||||
global sym(vp9_filter_block1d8_h6_only_sse2)
|
||||
sym(vp9_filter_block1d8_h6_only_sse2):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 6
|
||||
@ -636,7 +636,7 @@ sym(vp8_filter_block1d8_h6_only_sse2):
|
||||
ret
|
||||
|
||||
|
||||
;void vp8_filter_block1d16_h6_only_sse2
|
||||
;void vp9_filter_block1d16_h6_only_sse2
|
||||
;(
|
||||
; unsigned char *src_ptr,
|
||||
; unsigned int src_pixels_per_line,
|
||||
@ -646,8 +646,8 @@ sym(vp8_filter_block1d8_h6_only_sse2):
|
||||
; const short *vp8_filter
|
||||
;)
|
||||
; First-pass filter only when yoffset==0
|
||||
global sym(vp8_filter_block1d16_h6_only_sse2)
|
||||
sym(vp8_filter_block1d16_h6_only_sse2):
|
||||
global sym(vp9_filter_block1d16_h6_only_sse2)
|
||||
sym(vp9_filter_block1d16_h6_only_sse2):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 6
|
||||
@ -801,7 +801,7 @@ sym(vp8_filter_block1d16_h6_only_sse2):
|
||||
ret
|
||||
|
||||
|
||||
;void vp8_filter_block1d8_v6_only_sse2
|
||||
;void vp9_filter_block1d8_v6_only_sse2
|
||||
;(
|
||||
; unsigned char *src_ptr,
|
||||
; unsigned int src_pixels_per_line,
|
||||
@ -811,8 +811,8 @@ sym(vp8_filter_block1d16_h6_only_sse2):
|
||||
; const short *vp8_filter
|
||||
;)
|
||||
; Second-pass filter only when xoffset==0
|
||||
global sym(vp8_filter_block1d8_v6_only_sse2)
|
||||
sym(vp8_filter_block1d8_v6_only_sse2):
|
||||
global sym(vp9_filter_block1d8_v6_only_sse2)
|
||||
sym(vp9_filter_block1d8_v6_only_sse2):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 6
|
||||
@ -837,7 +837,7 @@ sym(vp8_filter_block1d8_v6_only_sse2):
|
||||
movsxd r8, dword ptr arg(3) ; dst_ptich
|
||||
%endif
|
||||
|
||||
.vp8_filter_block1d8_v6_only_sse2_loop:
|
||||
.vp9_filter_block1d8_v6_only_sse2_loop:
|
||||
movq xmm1, MMWORD PTR [rsi]
|
||||
movq xmm2, MMWORD PTR [rsi + rdx]
|
||||
movq xmm3, MMWORD PTR [rsi + rdx * 2]
|
||||
@ -883,7 +883,7 @@ sym(vp8_filter_block1d8_v6_only_sse2):
|
||||
add rdi, r8
|
||||
%endif
|
||||
dec rcx ; decrement count
|
||||
jnz .vp8_filter_block1d8_v6_only_sse2_loop ; next row
|
||||
jnz .vp9_filter_block1d8_v6_only_sse2_loop ; next row
|
||||
|
||||
; begin epilog
|
||||
pop rdi
|
||||
@ -895,7 +895,7 @@ sym(vp8_filter_block1d8_v6_only_sse2):
|
||||
ret
|
||||
|
||||
|
||||
;void vp8_unpack_block1d16_h6_sse2
|
||||
;void vp9_unpack_block1d16_h6_sse2
|
||||
;(
|
||||
; unsigned char *src_ptr,
|
||||
; unsigned short *output_ptr,
|
||||
@ -903,8 +903,8 @@ sym(vp8_filter_block1d8_v6_only_sse2):
|
||||
; unsigned int output_height,
|
||||
; unsigned int output_width
|
||||
;)
|
||||
global sym(vp8_unpack_block1d16_h6_sse2)
|
||||
sym(vp8_unpack_block1d16_h6_sse2):
|
||||
global sym(vp9_unpack_block1d16_h6_sse2)
|
||||
sym(vp9_unpack_block1d16_h6_sse2):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 5
|
||||
@ -952,7 +952,7 @@ sym(vp8_unpack_block1d16_h6_sse2):
|
||||
ret
|
||||
|
||||
|
||||
;void vp8_bilinear_predict16x16_sse2
|
||||
;void vp9_bilinear_predict16x16_sse2
|
||||
;(
|
||||
; unsigned char *src_ptr,
|
||||
; int src_pixels_per_line,
|
||||
@ -961,9 +961,9 @@ sym(vp8_unpack_block1d16_h6_sse2):
|
||||
; unsigned char *dst_ptr,
|
||||
; int dst_pitch
|
||||
;)
|
||||
extern sym(vp8_bilinear_filters_mmx)
|
||||
global sym(vp8_bilinear_predict16x16_sse2)
|
||||
sym(vp8_bilinear_predict16x16_sse2):
|
||||
extern sym(vp9_bilinear_filters_mmx)
|
||||
global sym(vp9_bilinear_predict16x16_sse2)
|
||||
sym(vp9_bilinear_predict16x16_sse2):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 6
|
||||
@ -976,7 +976,7 @@ sym(vp8_bilinear_predict16x16_sse2):
|
||||
;const short *HFilter = bilinear_filters_mmx[xoffset]
|
||||
;const short *VFilter = bilinear_filters_mmx[yoffset]
|
||||
|
||||
lea rcx, [GLOBAL(sym(vp8_bilinear_filters_mmx))]
|
||||
lea rcx, [GLOBAL(sym(vp9_bilinear_filters_mmx))]
|
||||
movsxd rax, dword ptr arg(2) ;xoffset
|
||||
|
||||
cmp rax, 0 ;skip first_pass filter if xoffset=0
|
||||
@ -1221,7 +1221,7 @@ sym(vp8_bilinear_predict16x16_sse2):
|
||||
ret
|
||||
|
||||
|
||||
;void vp8_bilinear_predict8x8_sse2
|
||||
;void vp9_bilinear_predict8x8_sse2
|
||||
;(
|
||||
; unsigned char *src_ptr,
|
||||
; int src_pixels_per_line,
|
||||
@ -1230,9 +1230,9 @@ sym(vp8_bilinear_predict16x16_sse2):
|
||||
; unsigned char *dst_ptr,
|
||||
; int dst_pitch
|
||||
;)
|
||||
extern sym(vp8_bilinear_filters_mmx)
|
||||
global sym(vp8_bilinear_predict8x8_sse2)
|
||||
sym(vp8_bilinear_predict8x8_sse2):
|
||||
extern sym(vp9_bilinear_filters_mmx)
|
||||
global sym(vp9_bilinear_predict8x8_sse2)
|
||||
sym(vp9_bilinear_predict8x8_sse2):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 6
|
||||
@ -1247,7 +1247,7 @@ sym(vp8_bilinear_predict8x8_sse2):
|
||||
|
||||
;const short *HFilter = bilinear_filters_mmx[xoffset]
|
||||
;const short *VFilter = bilinear_filters_mmx[yoffset]
|
||||
lea rcx, [GLOBAL(sym(vp8_bilinear_filters_mmx))]
|
||||
lea rcx, [GLOBAL(sym(vp9_bilinear_filters_mmx))]
|
||||
|
||||
mov rsi, arg(0) ;src_ptr
|
||||
movsxd rdx, dword ptr arg(1) ;src_pixels_per_line
|
||||
|
@ -25,7 +25,7 @@
|
||||
; This is an implementation of some of the SSE optimizations first seen in ffvp8
|
||||
;
|
||||
;*************************************************************************************/
|
||||
;void vp8_filter_block1d8_h6_ssse3
|
||||
;void vp9_filter_block1d8_h6_ssse3
|
||||
;(
|
||||
; unsigned char *src_ptr,
|
||||
; unsigned int src_pixels_per_line,
|
||||
@ -34,8 +34,8 @@
|
||||
; unsigned int output_height,
|
||||
; unsigned int vp8_filter_index
|
||||
;)
|
||||
global sym(vp8_filter_block1d8_h6_ssse3)
|
||||
sym(vp8_filter_block1d8_h6_ssse3):
|
||||
global sym(vp9_filter_block1d8_h6_ssse3)
|
||||
sym(vp9_filter_block1d8_h6_ssse3):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 6
|
||||
@ -168,7 +168,7 @@ vp8_filter_block1d8_h4_ssse3:
|
||||
UNSHADOW_ARGS
|
||||
pop rbp
|
||||
ret
|
||||
;void vp8_filter_block1d16_h6_ssse3
|
||||
;void vp9_filter_block1d16_h6_ssse3
|
||||
;(
|
||||
; unsigned char *src_ptr,
|
||||
; unsigned int src_pixels_per_line,
|
||||
@ -177,8 +177,8 @@ vp8_filter_block1d8_h4_ssse3:
|
||||
; unsigned int output_height,
|
||||
; unsigned int vp8_filter_index
|
||||
;)
|
||||
global sym(vp8_filter_block1d16_h6_ssse3)
|
||||
sym(vp8_filter_block1d16_h6_ssse3):
|
||||
global sym(vp9_filter_block1d16_h6_ssse3)
|
||||
sym(vp9_filter_block1d16_h6_ssse3):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 6
|
||||
@ -275,7 +275,7 @@ sym(vp8_filter_block1d16_h6_ssse3):
|
||||
pop rbp
|
||||
ret
|
||||
|
||||
;void vp8_filter_block1d4_h6_ssse3
|
||||
;void vp9_filter_block1d4_h6_ssse3
|
||||
;(
|
||||
; unsigned char *src_ptr,
|
||||
; unsigned int src_pixels_per_line,
|
||||
@ -284,8 +284,8 @@ sym(vp8_filter_block1d16_h6_ssse3):
|
||||
; unsigned int output_height,
|
||||
; unsigned int vp8_filter_index
|
||||
;)
|
||||
global sym(vp8_filter_block1d4_h6_ssse3)
|
||||
sym(vp8_filter_block1d4_h6_ssse3):
|
||||
global sym(vp9_filter_block1d4_h6_ssse3)
|
||||
sym(vp9_filter_block1d4_h6_ssse3):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 6
|
||||
@ -404,7 +404,7 @@ sym(vp8_filter_block1d4_h6_ssse3):
|
||||
|
||||
|
||||
|
||||
;void vp8_filter_block1d16_v6_ssse3
|
||||
;void vp9_filter_block1d16_v6_ssse3
|
||||
;(
|
||||
; unsigned char *src_ptr,
|
||||
; unsigned int src_pitch,
|
||||
@ -413,8 +413,8 @@ sym(vp8_filter_block1d4_h6_ssse3):
|
||||
; unsigned int output_height,
|
||||
; unsigned int vp8_filter_index
|
||||
;)
|
||||
global sym(vp8_filter_block1d16_v6_ssse3)
|
||||
sym(vp8_filter_block1d16_v6_ssse3):
|
||||
global sym(vp9_filter_block1d16_v6_ssse3)
|
||||
sym(vp9_filter_block1d16_v6_ssse3):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 6
|
||||
@ -450,7 +450,7 @@ sym(vp8_filter_block1d16_v6_ssse3):
|
||||
add rax, rdx
|
||||
|
||||
|
||||
.vp8_filter_block1d16_v6_ssse3_loop:
|
||||
.vp9_filter_block1d16_v6_ssse3_loop:
|
||||
movq xmm1, MMWORD PTR [rsi] ;A
|
||||
movq xmm2, MMWORD PTR [rsi + rdx] ;B
|
||||
movq xmm3, MMWORD PTR [rsi + rdx * 2] ;C
|
||||
@ -508,7 +508,7 @@ sym(vp8_filter_block1d16_v6_ssse3):
|
||||
add rdi, r8
|
||||
%endif
|
||||
dec rcx
|
||||
jnz .vp8_filter_block1d16_v6_ssse3_loop
|
||||
jnz .vp9_filter_block1d16_v6_ssse3_loop
|
||||
|
||||
; begin epilog
|
||||
pop rdi
|
||||
@ -592,7 +592,7 @@ sym(vp8_filter_block1d16_v6_ssse3):
|
||||
pop rbp
|
||||
ret
|
||||
|
||||
;void vp8_filter_block1d8_v6_ssse3
|
||||
;void vp9_filter_block1d8_v6_ssse3
|
||||
;(
|
||||
; unsigned char *src_ptr,
|
||||
; unsigned int src_pitch,
|
||||
@ -601,8 +601,8 @@ sym(vp8_filter_block1d16_v6_ssse3):
|
||||
; unsigned int output_height,
|
||||
; unsigned int vp8_filter_index
|
||||
;)
|
||||
global sym(vp8_filter_block1d8_v6_ssse3)
|
||||
sym(vp8_filter_block1d8_v6_ssse3):
|
||||
global sym(vp9_filter_block1d8_v6_ssse3)
|
||||
sym(vp9_filter_block1d8_v6_ssse3):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 6
|
||||
@ -638,7 +638,7 @@ sym(vp8_filter_block1d8_v6_ssse3):
|
||||
mov rax, rsi
|
||||
add rax, rdx
|
||||
|
||||
.vp8_filter_block1d8_v6_ssse3_loop:
|
||||
.vp9_filter_block1d8_v6_ssse3_loop:
|
||||
movq xmm1, MMWORD PTR [rsi] ;A
|
||||
movq xmm2, MMWORD PTR [rsi + rdx] ;B
|
||||
movq xmm3, MMWORD PTR [rsi + rdx * 2] ;C
|
||||
@ -673,7 +673,7 @@ sym(vp8_filter_block1d8_v6_ssse3):
|
||||
add rdi, r8
|
||||
%endif
|
||||
dec rcx
|
||||
jnz .vp8_filter_block1d8_v6_ssse3_loop
|
||||
jnz .vp9_filter_block1d8_v6_ssse3_loop
|
||||
|
||||
; begin epilog
|
||||
pop rdi
|
||||
@ -732,7 +732,7 @@ sym(vp8_filter_block1d8_v6_ssse3):
|
||||
UNSHADOW_ARGS
|
||||
pop rbp
|
||||
ret
|
||||
;void vp8_filter_block1d4_v6_ssse3
|
||||
;void vp9_filter_block1d4_v6_ssse3
|
||||
;(
|
||||
; unsigned char *src_ptr,
|
||||
; unsigned int src_pitch,
|
||||
@ -741,8 +741,8 @@ sym(vp8_filter_block1d8_v6_ssse3):
|
||||
; unsigned int output_height,
|
||||
; unsigned int vp8_filter_index
|
||||
;)
|
||||
global sym(vp8_filter_block1d4_v6_ssse3)
|
||||
sym(vp8_filter_block1d4_v6_ssse3):
|
||||
global sym(vp9_filter_block1d4_v6_ssse3)
|
||||
sym(vp9_filter_block1d4_v6_ssse3):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 6
|
||||
@ -777,7 +777,7 @@ sym(vp8_filter_block1d4_v6_ssse3):
|
||||
mov rax, rsi
|
||||
add rax, rdx
|
||||
|
||||
.vp8_filter_block1d4_v6_ssse3_loop:
|
||||
.vp9_filter_block1d4_v6_ssse3_loop:
|
||||
movd mm1, DWORD PTR [rsi] ;A
|
||||
movd mm2, DWORD PTR [rsi + rdx] ;B
|
||||
movd mm3, DWORD PTR [rsi + rdx * 2] ;C
|
||||
@ -813,7 +813,7 @@ sym(vp8_filter_block1d4_v6_ssse3):
|
||||
add rdi, r8
|
||||
%endif
|
||||
dec rcx
|
||||
jnz .vp8_filter_block1d4_v6_ssse3_loop
|
||||
jnz .vp9_filter_block1d4_v6_ssse3_loop
|
||||
|
||||
; begin epilog
|
||||
pop rdi
|
||||
@ -871,7 +871,7 @@ sym(vp8_filter_block1d4_v6_ssse3):
|
||||
pop rbp
|
||||
ret
|
||||
|
||||
;void vp8_bilinear_predict16x16_ssse3
|
||||
;void vp9_bilinear_predict16x16_ssse3
|
||||
;(
|
||||
; unsigned char *src_ptr,
|
||||
; int src_pixels_per_line,
|
||||
@ -880,8 +880,8 @@ sym(vp8_filter_block1d4_v6_ssse3):
|
||||
; unsigned char *dst_ptr,
|
||||
; int dst_pitch
|
||||
;)
|
||||
global sym(vp8_bilinear_predict16x16_ssse3)
|
||||
sym(vp8_bilinear_predict16x16_ssse3):
|
||||
global sym(vp9_bilinear_predict16x16_ssse3)
|
||||
sym(vp9_bilinear_predict16x16_ssse3):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 6
|
||||
@ -1134,7 +1134,7 @@ sym(vp8_bilinear_predict16x16_ssse3):
|
||||
pop rbp
|
||||
ret
|
||||
|
||||
;void vp8_bilinear_predict8x8_ssse3
|
||||
;void vp9_bilinear_predict8x8_ssse3
|
||||
;(
|
||||
; unsigned char *src_ptr,
|
||||
; int src_pixels_per_line,
|
||||
@ -1143,8 +1143,8 @@ sym(vp8_bilinear_predict16x16_ssse3):
|
||||
; unsigned char *dst_ptr,
|
||||
; int dst_pitch
|
||||
;)
|
||||
global sym(vp8_bilinear_predict8x8_ssse3)
|
||||
sym(vp8_bilinear_predict8x8_ssse3):
|
||||
global sym(vp9_bilinear_predict8x8_ssse3)
|
||||
sym(vp9_bilinear_predict8x8_ssse3):
|
||||
push rbp
|
||||
mov rbp, rsp
|
||||
SHADOW_ARGS_TO_STACK 6
|
||||
|
@ -20,99 +20,99 @@
|
||||
*/
|
||||
|
||||
#if HAVE_MMX
|
||||
extern prototype_subpixel_predict(vp8_sixtap_predict16x16_mmx);
|
||||
extern prototype_subpixel_predict(vp8_sixtap_predict8x8_mmx);
|
||||
extern prototype_subpixel_predict(vp8_sixtap_predict8x4_mmx);
|
||||
extern prototype_subpixel_predict(vp8_sixtap_predict4x4_mmx);
|
||||
extern prototype_subpixel_predict(vp8_bilinear_predict16x16_mmx);
|
||||
extern prototype_subpixel_predict(vp8_bilinear_predict8x8_mmx);
|
||||
extern prototype_subpixel_predict(vp8_bilinear_predict8x4_mmx);
|
||||
extern prototype_subpixel_predict(vp8_bilinear_predict4x4_mmx);
|
||||
extern prototype_subpixel_predict(vp9_sixtap_predict16x16_mmx);
|
||||
extern prototype_subpixel_predict(vp9_sixtap_predict8x8_mmx);
|
||||
extern prototype_subpixel_predict(vp9_sixtap_predict8x4_mmx);
|
||||
extern prototype_subpixel_predict(vp9_sixtap_predict4x4_mmx);
|
||||
extern prototype_subpixel_predict(vp9_bilinear_predict16x16_mmx);
|
||||
extern prototype_subpixel_predict(vp9_bilinear_predict8x8_mmx);
|
||||
extern prototype_subpixel_predict(vp9_bilinear_predict8x4_mmx);
|
||||
extern prototype_subpixel_predict(vp9_bilinear_predict4x4_mmx);
|
||||
|
||||
|
||||
#if !CONFIG_RUNTIME_CPU_DETECT
|
||||
#undef vp8_subpix_sixtap16x16
|
||||
#define vp8_subpix_sixtap16x16 vp8_sixtap_predict16x16_mmx
|
||||
#define vp8_subpix_sixtap16x16 vp9_sixtap_predict16x16_mmx
|
||||
|
||||
#undef vp8_subpix_sixtap8x8
|
||||
#define vp8_subpix_sixtap8x8 vp8_sixtap_predict8x8_mmx
|
||||
#define vp8_subpix_sixtap8x8 vp9_sixtap_predict8x8_mmx
|
||||
|
||||
#undef vp8_subpix_sixtap8x4
|
||||
#define vp8_subpix_sixtap8x4 vp8_sixtap_predict8x4_mmx
|
||||
#define vp8_subpix_sixtap8x4 vp9_sixtap_predict8x4_mmx
|
||||
|
||||
#undef vp8_subpix_sixtap4x4
|
||||
#define vp8_subpix_sixtap4x4 vp8_sixtap_predict4x4_mmx
|
||||
#define vp8_subpix_sixtap4x4 vp9_sixtap_predict4x4_mmx
|
||||
|
||||
#undef vp8_subpix_bilinear16x16
|
||||
#define vp8_subpix_bilinear16x16 vp8_bilinear_predict16x16_mmx
|
||||
#define vp8_subpix_bilinear16x16 vp9_bilinear_predict16x16_mmx
|
||||
|
||||
#undef vp8_subpix_bilinear8x8
|
||||
#define vp8_subpix_bilinear8x8 vp8_bilinear_predict8x8_mmx
|
||||
#define vp8_subpix_bilinear8x8 vp9_bilinear_predict8x8_mmx
|
||||
|
||||
#undef vp8_subpix_bilinear8x4
|
||||
#define vp8_subpix_bilinear8x4 vp8_bilinear_predict8x4_mmx
|
||||
#define vp8_subpix_bilinear8x4 vp9_bilinear_predict8x4_mmx
|
||||
|
||||
#undef vp8_subpix_bilinear4x4
|
||||
#define vp8_subpix_bilinear4x4 vp8_bilinear_predict4x4_mmx
|
||||
#define vp8_subpix_bilinear4x4 vp9_bilinear_predict4x4_mmx
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
#if HAVE_SSE2
|
||||
extern prototype_subpixel_predict(vp8_sixtap_predict16x16_sse2);
|
||||
extern prototype_subpixel_predict(vp8_sixtap_predict8x8_sse2);
|
||||
extern prototype_subpixel_predict(vp8_sixtap_predict8x4_sse2);
|
||||
extern prototype_subpixel_predict(vp8_bilinear_predict16x16_sse2);
|
||||
extern prototype_subpixel_predict(vp8_bilinear_predict8x8_sse2);
|
||||
extern prototype_subpixel_predict(vp9_sixtap_predict16x16_sse2);
|
||||
extern prototype_subpixel_predict(vp9_sixtap_predict8x8_sse2);
|
||||
extern prototype_subpixel_predict(vp9_sixtap_predict8x4_sse2);
|
||||
extern prototype_subpixel_predict(vp9_bilinear_predict16x16_sse2);
|
||||
extern prototype_subpixel_predict(vp9_bilinear_predict8x8_sse2);
|
||||
|
||||
|
||||
#if !CONFIG_RUNTIME_CPU_DETECT
|
||||
#undef vp8_subpix_sixtap16x16
|
||||
#define vp8_subpix_sixtap16x16 vp8_sixtap_predict16x16_sse2
|
||||
#define vp8_subpix_sixtap16x16 vp9_sixtap_predict16x16_sse2
|
||||
|
||||
#undef vp8_subpix_sixtap8x8
|
||||
#define vp8_subpix_sixtap8x8 vp8_sixtap_predict8x8_sse2
|
||||
#define vp8_subpix_sixtap8x8 vp9_sixtap_predict8x8_sse2
|
||||
|
||||
#undef vp8_subpix_sixtap8x4
|
||||
#define vp8_subpix_sixtap8x4 vp8_sixtap_predict8x4_sse2
|
||||
#define vp8_subpix_sixtap8x4 vp9_sixtap_predict8x4_sse2
|
||||
|
||||
#undef vp8_subpix_bilinear16x16
|
||||
#define vp8_subpix_bilinear16x16 vp8_bilinear_predict16x16_sse2
|
||||
#define vp8_subpix_bilinear16x16 vp9_bilinear_predict16x16_sse2
|
||||
|
||||
#undef vp8_subpix_bilinear8x8
|
||||
#define vp8_subpix_bilinear8x8 vp8_bilinear_predict8x8_sse2
|
||||
#define vp8_subpix_bilinear8x8 vp9_bilinear_predict8x8_sse2
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if HAVE_SSSE3
|
||||
extern prototype_subpixel_predict(vp8_sixtap_predict16x16_ssse3);
|
||||
extern prototype_subpixel_predict(vp8_sixtap_predict8x8_ssse3);
|
||||
extern prototype_subpixel_predict(vp8_sixtap_predict8x4_ssse3);
|
||||
extern prototype_subpixel_predict(vp8_sixtap_predict4x4_ssse3);
|
||||
extern prototype_subpixel_predict(vp8_bilinear_predict16x16_ssse3);
|
||||
extern prototype_subpixel_predict(vp8_bilinear_predict8x8_ssse3);
|
||||
extern prototype_subpixel_predict(vp9_sixtap_predict16x16_ssse3);
|
||||
extern prototype_subpixel_predict(vp9_sixtap_predict8x8_ssse3);
|
||||
extern prototype_subpixel_predict(vp9_sixtap_predict8x4_ssse3);
|
||||
extern prototype_subpixel_predict(vp9_sixtap_predict4x4_ssse3);
|
||||
extern prototype_subpixel_predict(vp9_bilinear_predict16x16_ssse3);
|
||||
extern prototype_subpixel_predict(vp9_bilinear_predict8x8_ssse3);
|
||||
|
||||
#if !CONFIG_RUNTIME_CPU_DETECT
|
||||
#undef vp8_subpix_sixtap16x16
|
||||
#define vp8_subpix_sixtap16x16 vp8_sixtap_predict16x16_ssse3
|
||||
#define vp8_subpix_sixtap16x16 vp9_sixtap_predict16x16_ssse3
|
||||
|
||||
#undef vp8_subpix_sixtap8x8
|
||||
#define vp8_subpix_sixtap8x8 vp8_sixtap_predict8x8_ssse3
|
||||
#define vp8_subpix_sixtap8x8 vp9_sixtap_predict8x8_ssse3
|
||||
|
||||
#undef vp8_subpix_sixtap8x4
|
||||
#define vp8_subpix_sixtap8x4 vp8_sixtap_predict8x4_ssse3
|
||||
#define vp8_subpix_sixtap8x4 vp9_sixtap_predict8x4_ssse3
|
||||
|
||||
#undef vp8_subpix_sixtap4x4
|
||||
#define vp8_subpix_sixtap4x4 vp8_sixtap_predict4x4_ssse3
|
||||
#define vp8_subpix_sixtap4x4 vp9_sixtap_predict4x4_ssse3
|
||||
|
||||
|
||||
#undef vp8_subpix_bilinear16x16
|
||||
#define vp8_subpix_bilinear16x16 vp8_bilinear_predict16x16_ssse3
|
||||
#define vp8_subpix_bilinear16x16 vp9_bilinear_predict16x16_ssse3
|
||||
|
||||
#undef vp8_subpix_bilinear8x8
|
||||
#define vp8_subpix_bilinear8x8 vp8_bilinear_predict8x8_ssse3
|
||||
#define vp8_subpix_bilinear8x8 vp9_bilinear_predict8x8_ssse3
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
@ -13,10 +13,10 @@
|
||||
#include "vpx_ports/mem.h"
|
||||
#include "vp8/common/subpixel.h"
|
||||
|
||||
extern const short vp8_six_tap_mmx[16][6 * 8];
|
||||
extern const short vp8_bilinear_filters_mmx[16][2 * 8];
|
||||
extern const short vp9_six_tap_mmx[16][6 * 8];
|
||||
extern const short vp9_bilinear_filters_mmx[16][2 * 8];
|
||||
|
||||
extern void vp8_filter_block1d_h6_mmx
|
||||
extern void vp9_filter_block1d_h6_mmx
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
unsigned short *output_ptr,
|
||||
@ -26,7 +26,7 @@ extern void vp8_filter_block1d_h6_mmx
|
||||
unsigned int output_width,
|
||||
const short *vp8_filter
|
||||
);
|
||||
extern void vp8_filter_block1dc_v6_mmx
|
||||
extern void vp9_filter_block1dc_v6_mmx
|
||||
(
|
||||
unsigned short *src_ptr,
|
||||
unsigned char *output_ptr,
|
||||
@ -37,7 +37,7 @@ extern void vp8_filter_block1dc_v6_mmx
|
||||
unsigned int output_width,
|
||||
const short *vp8_filter
|
||||
);
|
||||
extern void vp8_filter_block1d8_h6_sse2
|
||||
extern void vp9_filter_block1d8_h6_sse2
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
unsigned short *output_ptr,
|
||||
@ -47,7 +47,7 @@ extern void vp8_filter_block1d8_h6_sse2
|
||||
unsigned int output_width,
|
||||
const short *vp8_filter
|
||||
);
|
||||
extern void vp8_filter_block1d16_h6_sse2
|
||||
extern void vp9_filter_block1d16_h6_sse2
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
unsigned short *output_ptr,
|
||||
@ -57,7 +57,7 @@ extern void vp8_filter_block1d16_h6_sse2
|
||||
unsigned int output_width,
|
||||
const short *vp8_filter
|
||||
);
|
||||
extern void vp8_filter_block1d8_v6_sse2
|
||||
extern void vp9_filter_block1d8_v6_sse2
|
||||
(
|
||||
unsigned short *src_ptr,
|
||||
unsigned char *output_ptr,
|
||||
@ -68,7 +68,7 @@ extern void vp8_filter_block1d8_v6_sse2
|
||||
unsigned int output_width,
|
||||
const short *vp8_filter
|
||||
);
|
||||
extern void vp8_filter_block1d16_v6_sse2
|
||||
extern void vp9_filter_block1d16_v6_sse2
|
||||
(
|
||||
unsigned short *src_ptr,
|
||||
unsigned char *output_ptr,
|
||||
@ -79,7 +79,7 @@ extern void vp8_filter_block1d16_v6_sse2
|
||||
unsigned int output_width,
|
||||
const short *vp8_filter
|
||||
);
|
||||
extern void vp8_unpack_block1d16_h6_sse2
|
||||
extern void vp9_unpack_block1d16_h6_sse2
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
unsigned short *output_ptr,
|
||||
@ -87,7 +87,7 @@ extern void vp8_unpack_block1d16_h6_sse2
|
||||
unsigned int output_height,
|
||||
unsigned int output_width
|
||||
);
|
||||
extern void vp8_filter_block1d8_h6_only_sse2
|
||||
extern void vp9_filter_block1d8_h6_only_sse2
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
unsigned int src_pixels_per_line,
|
||||
@ -96,7 +96,7 @@ extern void vp8_filter_block1d8_h6_only_sse2
|
||||
unsigned int output_height,
|
||||
const short *vp8_filter
|
||||
);
|
||||
extern void vp8_filter_block1d16_h6_only_sse2
|
||||
extern void vp9_filter_block1d16_h6_only_sse2
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
unsigned int src_pixels_per_line,
|
||||
@ -105,7 +105,7 @@ extern void vp8_filter_block1d16_h6_only_sse2
|
||||
unsigned int output_height,
|
||||
const short *vp8_filter
|
||||
);
|
||||
extern void vp8_filter_block1d8_v6_only_sse2
|
||||
extern void vp9_filter_block1d8_v6_only_sse2
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
unsigned int src_pixels_per_line,
|
||||
@ -114,11 +114,11 @@ extern void vp8_filter_block1d8_v6_only_sse2
|
||||
unsigned int output_height,
|
||||
const short *vp8_filter
|
||||
);
|
||||
extern prototype_subpixel_predict(vp8_bilinear_predict8x8_mmx);
|
||||
extern prototype_subpixel_predict(vp9_bilinear_predict8x8_mmx);
|
||||
|
||||
|
||||
#if HAVE_MMX
|
||||
void vp8_sixtap_predict4x4_mmx
|
||||
void vp9_sixtap_predict4x4_mmx
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -128,19 +128,19 @@ void vp8_sixtap_predict4x4_mmx
|
||||
int dst_pitch
|
||||
) {
|
||||
#ifdef ANNOUNCE_FUNCTION
|
||||
printf("vp8_sixtap_predict4x4_mmx\n");
|
||||
printf("vp9_sixtap_predict4x4_mmx\n");
|
||||
#endif
|
||||
DECLARE_ALIGNED_ARRAY(16, unsigned short, FData2, 16 * 16); /* Temp data bufffer used in filtering */
|
||||
const short *HFilter, *VFilter;
|
||||
HFilter = vp8_six_tap_mmx[xoffset];
|
||||
vp8_filter_block1d_h6_mmx(src_ptr - (2 * src_pixels_per_line), FData2, src_pixels_per_line, 1, 9, 8, HFilter);
|
||||
VFilter = vp8_six_tap_mmx[yoffset];
|
||||
vp8_filter_block1dc_v6_mmx(FData2 + 8, dst_ptr, dst_pitch, 8, 4, 4, 4, VFilter);
|
||||
HFilter = vp9_six_tap_mmx[xoffset];
|
||||
vp9_filter_block1d_h6_mmx(src_ptr - (2 * src_pixels_per_line), FData2, src_pixels_per_line, 1, 9, 8, HFilter);
|
||||
VFilter = vp9_six_tap_mmx[yoffset];
|
||||
vp9_filter_block1dc_v6_mmx(FData2 + 8, dst_ptr, dst_pitch, 8, 4, 4, 4, VFilter);
|
||||
|
||||
}
|
||||
|
||||
|
||||
void vp8_sixtap_predict16x16_mmx
|
||||
void vp9_sixtap_predict16x16_mmx
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -151,30 +151,30 @@ void vp8_sixtap_predict16x16_mmx
|
||||
) {
|
||||
|
||||
#ifdef ANNOUNCE_FUNCTION
|
||||
printf("vp8_sixtap_predict16x16_mmx\n");
|
||||
printf("vp9_sixtap_predict16x16_mmx\n");
|
||||
#endif
|
||||
DECLARE_ALIGNED_ARRAY(16, unsigned short, FData2, 24 * 24); /* Temp data bufffer used in filtering */
|
||||
|
||||
const short *HFilter, *VFilter;
|
||||
|
||||
|
||||
HFilter = vp8_six_tap_mmx[xoffset];
|
||||
HFilter = vp9_six_tap_mmx[xoffset];
|
||||
|
||||
vp8_filter_block1d_h6_mmx(src_ptr - (2 * src_pixels_per_line), FData2, src_pixels_per_line, 1, 21, 32, HFilter);
|
||||
vp8_filter_block1d_h6_mmx(src_ptr - (2 * src_pixels_per_line) + 4, FData2 + 4, src_pixels_per_line, 1, 21, 32, HFilter);
|
||||
vp8_filter_block1d_h6_mmx(src_ptr - (2 * src_pixels_per_line) + 8, FData2 + 8, src_pixels_per_line, 1, 21, 32, HFilter);
|
||||
vp8_filter_block1d_h6_mmx(src_ptr - (2 * src_pixels_per_line) + 12, FData2 + 12, src_pixels_per_line, 1, 21, 32, HFilter);
|
||||
vp9_filter_block1d_h6_mmx(src_ptr - (2 * src_pixels_per_line), FData2, src_pixels_per_line, 1, 21, 32, HFilter);
|
||||
vp9_filter_block1d_h6_mmx(src_ptr - (2 * src_pixels_per_line) + 4, FData2 + 4, src_pixels_per_line, 1, 21, 32, HFilter);
|
||||
vp9_filter_block1d_h6_mmx(src_ptr - (2 * src_pixels_per_line) + 8, FData2 + 8, src_pixels_per_line, 1, 21, 32, HFilter);
|
||||
vp9_filter_block1d_h6_mmx(src_ptr - (2 * src_pixels_per_line) + 12, FData2 + 12, src_pixels_per_line, 1, 21, 32, HFilter);
|
||||
|
||||
VFilter = vp8_six_tap_mmx[yoffset];
|
||||
vp8_filter_block1dc_v6_mmx(FData2 + 32, dst_ptr, dst_pitch, 32, 16, 16, 16, VFilter);
|
||||
vp8_filter_block1dc_v6_mmx(FData2 + 36, dst_ptr + 4, dst_pitch, 32, 16, 16, 16, VFilter);
|
||||
vp8_filter_block1dc_v6_mmx(FData2 + 40, dst_ptr + 8, dst_pitch, 32, 16, 16, 16, VFilter);
|
||||
vp8_filter_block1dc_v6_mmx(FData2 + 44, dst_ptr + 12, dst_pitch, 32, 16, 16, 16, VFilter);
|
||||
VFilter = vp9_six_tap_mmx[yoffset];
|
||||
vp9_filter_block1dc_v6_mmx(FData2 + 32, dst_ptr, dst_pitch, 32, 16, 16, 16, VFilter);
|
||||
vp9_filter_block1dc_v6_mmx(FData2 + 36, dst_ptr + 4, dst_pitch, 32, 16, 16, 16, VFilter);
|
||||
vp9_filter_block1dc_v6_mmx(FData2 + 40, dst_ptr + 8, dst_pitch, 32, 16, 16, 16, VFilter);
|
||||
vp9_filter_block1dc_v6_mmx(FData2 + 44, dst_ptr + 12, dst_pitch, 32, 16, 16, 16, VFilter);
|
||||
|
||||
}
|
||||
|
||||
|
||||
void vp8_sixtap_predict8x8_mmx
|
||||
void vp9_sixtap_predict8x8_mmx
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -185,24 +185,24 @@ void vp8_sixtap_predict8x8_mmx
|
||||
) {
|
||||
|
||||
#ifdef ANNOUNCE_FUNCTION
|
||||
printf("vp8_sixtap_predict8x8_mmx\n");
|
||||
printf("vp9_sixtap_predict8x8_mmx\n");
|
||||
#endif
|
||||
DECLARE_ALIGNED_ARRAY(16, unsigned short, FData2, 256); /* Temp data bufffer used in filtering */
|
||||
|
||||
const short *HFilter, *VFilter;
|
||||
|
||||
HFilter = vp8_six_tap_mmx[xoffset];
|
||||
vp8_filter_block1d_h6_mmx(src_ptr - (2 * src_pixels_per_line), FData2, src_pixels_per_line, 1, 13, 16, HFilter);
|
||||
vp8_filter_block1d_h6_mmx(src_ptr - (2 * src_pixels_per_line) + 4, FData2 + 4, src_pixels_per_line, 1, 13, 16, HFilter);
|
||||
HFilter = vp9_six_tap_mmx[xoffset];
|
||||
vp9_filter_block1d_h6_mmx(src_ptr - (2 * src_pixels_per_line), FData2, src_pixels_per_line, 1, 13, 16, HFilter);
|
||||
vp9_filter_block1d_h6_mmx(src_ptr - (2 * src_pixels_per_line) + 4, FData2 + 4, src_pixels_per_line, 1, 13, 16, HFilter);
|
||||
|
||||
VFilter = vp8_six_tap_mmx[yoffset];
|
||||
vp8_filter_block1dc_v6_mmx(FData2 + 16, dst_ptr, dst_pitch, 16, 8, 8, 8, VFilter);
|
||||
vp8_filter_block1dc_v6_mmx(FData2 + 20, dst_ptr + 4, dst_pitch, 16, 8, 8, 8, VFilter);
|
||||
VFilter = vp9_six_tap_mmx[yoffset];
|
||||
vp9_filter_block1dc_v6_mmx(FData2 + 16, dst_ptr, dst_pitch, 16, 8, 8, 8, VFilter);
|
||||
vp9_filter_block1dc_v6_mmx(FData2 + 20, dst_ptr + 4, dst_pitch, 16, 8, 8, 8, VFilter);
|
||||
|
||||
}
|
||||
|
||||
|
||||
void vp8_sixtap_predict8x4_mmx
|
||||
void vp9_sixtap_predict8x4_mmx
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -212,25 +212,25 @@ void vp8_sixtap_predict8x4_mmx
|
||||
int dst_pitch
|
||||
) {
|
||||
#ifdef ANNOUNCE_FUNCTION
|
||||
printf("vp8_sixtap_predict8x4_mmx\n");
|
||||
printf("vp9_sixtap_predict8x4_mmx\n");
|
||||
#endif
|
||||
DECLARE_ALIGNED_ARRAY(16, unsigned short, FData2, 256); /* Temp data bufffer used in filtering */
|
||||
|
||||
const short *HFilter, *VFilter;
|
||||
|
||||
HFilter = vp8_six_tap_mmx[xoffset];
|
||||
vp8_filter_block1d_h6_mmx(src_ptr - (2 * src_pixels_per_line), FData2, src_pixels_per_line, 1, 9, 16, HFilter);
|
||||
vp8_filter_block1d_h6_mmx(src_ptr - (2 * src_pixels_per_line) + 4, FData2 + 4, src_pixels_per_line, 1, 9, 16, HFilter);
|
||||
HFilter = vp9_six_tap_mmx[xoffset];
|
||||
vp9_filter_block1d_h6_mmx(src_ptr - (2 * src_pixels_per_line), FData2, src_pixels_per_line, 1, 9, 16, HFilter);
|
||||
vp9_filter_block1d_h6_mmx(src_ptr - (2 * src_pixels_per_line) + 4, FData2 + 4, src_pixels_per_line, 1, 9, 16, HFilter);
|
||||
|
||||
VFilter = vp8_six_tap_mmx[yoffset];
|
||||
vp8_filter_block1dc_v6_mmx(FData2 + 16, dst_ptr, dst_pitch, 16, 8, 4, 8, VFilter);
|
||||
vp8_filter_block1dc_v6_mmx(FData2 + 20, dst_ptr + 4, dst_pitch, 16, 8, 4, 8, VFilter);
|
||||
VFilter = vp9_six_tap_mmx[yoffset];
|
||||
vp9_filter_block1dc_v6_mmx(FData2 + 16, dst_ptr, dst_pitch, 16, 8, 4, 8, VFilter);
|
||||
vp9_filter_block1dc_v6_mmx(FData2 + 20, dst_ptr + 4, dst_pitch, 16, 8, 4, 8, VFilter);
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
void vp8_bilinear_predict16x16_mmx
|
||||
void vp9_bilinear_predict16x16_mmx
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -239,16 +239,16 @@ void vp8_bilinear_predict16x16_mmx
|
||||
unsigned char *dst_ptr,
|
||||
int dst_pitch
|
||||
) {
|
||||
vp8_bilinear_predict8x8_mmx(src_ptr, src_pixels_per_line, xoffset, yoffset, dst_ptr, dst_pitch);
|
||||
vp8_bilinear_predict8x8_mmx(src_ptr + 8, src_pixels_per_line, xoffset, yoffset, dst_ptr + 8, dst_pitch);
|
||||
vp8_bilinear_predict8x8_mmx(src_ptr + 8 * src_pixels_per_line, src_pixels_per_line, xoffset, yoffset, dst_ptr + dst_pitch * 8, dst_pitch);
|
||||
vp8_bilinear_predict8x8_mmx(src_ptr + 8 * src_pixels_per_line + 8, src_pixels_per_line, xoffset, yoffset, dst_ptr + dst_pitch * 8 + 8, dst_pitch);
|
||||
vp9_bilinear_predict8x8_mmx(src_ptr, src_pixels_per_line, xoffset, yoffset, dst_ptr, dst_pitch);
|
||||
vp9_bilinear_predict8x8_mmx(src_ptr + 8, src_pixels_per_line, xoffset, yoffset, dst_ptr + 8, dst_pitch);
|
||||
vp9_bilinear_predict8x8_mmx(src_ptr + 8 * src_pixels_per_line, src_pixels_per_line, xoffset, yoffset, dst_ptr + dst_pitch * 8, dst_pitch);
|
||||
vp9_bilinear_predict8x8_mmx(src_ptr + 8 * src_pixels_per_line + 8, src_pixels_per_line, xoffset, yoffset, dst_ptr + dst_pitch * 8 + 8, dst_pitch);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#if HAVE_SSE2
|
||||
void vp8_sixtap_predict16x16_sse2
|
||||
void vp9_sixtap_predict16x16_sse2
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -262,30 +262,30 @@ void vp8_sixtap_predict16x16_sse2
|
||||
|
||||
const short *HFilter, *VFilter;
|
||||
#ifdef ANNOUNCE_FUNCTION
|
||||
printf("vp8_sixtap_predict16x16_sse2\n");
|
||||
printf("vp9_sixtap_predict16x16_sse2\n");
|
||||
#endif
|
||||
|
||||
if (xoffset) {
|
||||
if (yoffset) {
|
||||
HFilter = vp8_six_tap_mmx[xoffset];
|
||||
vp8_filter_block1d16_h6_sse2(src_ptr - (2 * src_pixels_per_line), FData2, src_pixels_per_line, 1, 21, 32, HFilter);
|
||||
VFilter = vp8_six_tap_mmx[yoffset];
|
||||
vp8_filter_block1d16_v6_sse2(FData2 + 32, dst_ptr, dst_pitch, 32, 16, 16, dst_pitch, VFilter);
|
||||
HFilter = vp9_six_tap_mmx[xoffset];
|
||||
vp9_filter_block1d16_h6_sse2(src_ptr - (2 * src_pixels_per_line), FData2, src_pixels_per_line, 1, 21, 32, HFilter);
|
||||
VFilter = vp9_six_tap_mmx[yoffset];
|
||||
vp9_filter_block1d16_v6_sse2(FData2 + 32, dst_ptr, dst_pitch, 32, 16, 16, dst_pitch, VFilter);
|
||||
} else {
|
||||
/* First-pass only */
|
||||
HFilter = vp8_six_tap_mmx[xoffset];
|
||||
vp8_filter_block1d16_h6_only_sse2(src_ptr, src_pixels_per_line, dst_ptr, dst_pitch, 16, HFilter);
|
||||
HFilter = vp9_six_tap_mmx[xoffset];
|
||||
vp9_filter_block1d16_h6_only_sse2(src_ptr, src_pixels_per_line, dst_ptr, dst_pitch, 16, HFilter);
|
||||
}
|
||||
} else {
|
||||
/* Second-pass only */
|
||||
VFilter = vp8_six_tap_mmx[yoffset];
|
||||
vp8_unpack_block1d16_h6_sse2(src_ptr - (2 * src_pixels_per_line), FData2, src_pixels_per_line, 21, 32);
|
||||
vp8_filter_block1d16_v6_sse2(FData2 + 32, dst_ptr, dst_pitch, 32, 16, 16, dst_pitch, VFilter);
|
||||
VFilter = vp9_six_tap_mmx[yoffset];
|
||||
vp9_unpack_block1d16_h6_sse2(src_ptr - (2 * src_pixels_per_line), FData2, src_pixels_per_line, 21, 32);
|
||||
vp9_filter_block1d16_v6_sse2(FData2 + 32, dst_ptr, dst_pitch, 32, 16, 16, dst_pitch, VFilter);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void vp8_sixtap_predict8x8_sse2
|
||||
void vp9_sixtap_predict8x8_sse2
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -297,29 +297,29 @@ void vp8_sixtap_predict8x8_sse2
|
||||
DECLARE_ALIGNED_ARRAY(16, unsigned short, FData2, 256); /* Temp data bufffer used in filtering */
|
||||
const short *HFilter, *VFilter;
|
||||
#ifdef ANNOUNCE_FUNCTION
|
||||
printf("vp8_sixtap_predict8x8_sse2\n");
|
||||
printf("vp9_sixtap_predict8x8_sse2\n");
|
||||
#endif
|
||||
|
||||
if (xoffset) {
|
||||
if (yoffset) {
|
||||
HFilter = vp8_six_tap_mmx[xoffset];
|
||||
vp8_filter_block1d8_h6_sse2(src_ptr - (2 * src_pixels_per_line), FData2, src_pixels_per_line, 1, 13, 16, HFilter);
|
||||
VFilter = vp8_six_tap_mmx[yoffset];
|
||||
vp8_filter_block1d8_v6_sse2(FData2 + 16, dst_ptr, dst_pitch, 16, 8, 8, dst_pitch, VFilter);
|
||||
HFilter = vp9_six_tap_mmx[xoffset];
|
||||
vp9_filter_block1d8_h6_sse2(src_ptr - (2 * src_pixels_per_line), FData2, src_pixels_per_line, 1, 13, 16, HFilter);
|
||||
VFilter = vp9_six_tap_mmx[yoffset];
|
||||
vp9_filter_block1d8_v6_sse2(FData2 + 16, dst_ptr, dst_pitch, 16, 8, 8, dst_pitch, VFilter);
|
||||
} else {
|
||||
/* First-pass only */
|
||||
HFilter = vp8_six_tap_mmx[xoffset];
|
||||
vp8_filter_block1d8_h6_only_sse2(src_ptr, src_pixels_per_line, dst_ptr, dst_pitch, 8, HFilter);
|
||||
HFilter = vp9_six_tap_mmx[xoffset];
|
||||
vp9_filter_block1d8_h6_only_sse2(src_ptr, src_pixels_per_line, dst_ptr, dst_pitch, 8, HFilter);
|
||||
}
|
||||
} else {
|
||||
/* Second-pass only */
|
||||
VFilter = vp8_six_tap_mmx[yoffset];
|
||||
vp8_filter_block1d8_v6_only_sse2(src_ptr - (2 * src_pixels_per_line), src_pixels_per_line, dst_ptr, dst_pitch, 8, VFilter);
|
||||
VFilter = vp9_six_tap_mmx[yoffset];
|
||||
vp9_filter_block1d8_v6_only_sse2(src_ptr - (2 * src_pixels_per_line), src_pixels_per_line, dst_ptr, dst_pitch, 8, VFilter);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void vp8_sixtap_predict8x4_sse2
|
||||
void vp9_sixtap_predict8x4_sse2
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -331,24 +331,24 @@ void vp8_sixtap_predict8x4_sse2
|
||||
DECLARE_ALIGNED_ARRAY(16, unsigned short, FData2, 256); /* Temp data bufffer used in filtering */
|
||||
const short *HFilter, *VFilter;
|
||||
#ifdef ANNOUNCE_FUNCTION
|
||||
printf("vp8_sixtap_predict8x4_sse2\n");
|
||||
printf("vp9_sixtap_predict8x4_sse2\n");
|
||||
#endif
|
||||
|
||||
if (xoffset) {
|
||||
if (yoffset) {
|
||||
HFilter = vp8_six_tap_mmx[xoffset];
|
||||
vp8_filter_block1d8_h6_sse2(src_ptr - (2 * src_pixels_per_line), FData2, src_pixels_per_line, 1, 9, 16, HFilter);
|
||||
VFilter = vp8_six_tap_mmx[yoffset];
|
||||
vp8_filter_block1d8_v6_sse2(FData2 + 16, dst_ptr, dst_pitch, 16, 8, 4, dst_pitch, VFilter);
|
||||
HFilter = vp9_six_tap_mmx[xoffset];
|
||||
vp9_filter_block1d8_h6_sse2(src_ptr - (2 * src_pixels_per_line), FData2, src_pixels_per_line, 1, 9, 16, HFilter);
|
||||
VFilter = vp9_six_tap_mmx[yoffset];
|
||||
vp9_filter_block1d8_v6_sse2(FData2 + 16, dst_ptr, dst_pitch, 16, 8, 4, dst_pitch, VFilter);
|
||||
} else {
|
||||
/* First-pass only */
|
||||
HFilter = vp8_six_tap_mmx[xoffset];
|
||||
vp8_filter_block1d8_h6_only_sse2(src_ptr, src_pixels_per_line, dst_ptr, dst_pitch, 4, HFilter);
|
||||
HFilter = vp9_six_tap_mmx[xoffset];
|
||||
vp9_filter_block1d8_h6_only_sse2(src_ptr, src_pixels_per_line, dst_ptr, dst_pitch, 4, HFilter);
|
||||
}
|
||||
} else {
|
||||
/* Second-pass only */
|
||||
VFilter = vp8_six_tap_mmx[yoffset];
|
||||
vp8_filter_block1d8_v6_only_sse2(src_ptr - (2 * src_pixels_per_line), src_pixels_per_line, dst_ptr, dst_pitch, 4, VFilter);
|
||||
VFilter = vp9_six_tap_mmx[yoffset];
|
||||
vp9_filter_block1d8_v6_only_sse2(src_ptr - (2 * src_pixels_per_line), src_pixels_per_line, dst_ptr, dst_pitch, 4, VFilter);
|
||||
}
|
||||
}
|
||||
|
||||
@ -356,7 +356,7 @@ void vp8_sixtap_predict8x4_sse2
|
||||
|
||||
#if HAVE_SSSE3
|
||||
|
||||
extern void vp8_filter_block1d8_h6_ssse3
|
||||
extern void vp9_filter_block1d8_h6_ssse3
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
unsigned int src_pixels_per_line,
|
||||
@ -366,7 +366,7 @@ extern void vp8_filter_block1d8_h6_ssse3
|
||||
unsigned int vp8_filter_index
|
||||
);
|
||||
|
||||
extern void vp8_filter_block1d16_h6_ssse3
|
||||
extern void vp9_filter_block1d16_h6_ssse3
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
unsigned int src_pixels_per_line,
|
||||
@ -376,7 +376,7 @@ extern void vp8_filter_block1d16_h6_ssse3
|
||||
unsigned int vp8_filter_index
|
||||
);
|
||||
|
||||
extern void vp8_filter_block1d16_v6_ssse3
|
||||
extern void vp9_filter_block1d16_v6_ssse3
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
unsigned int src_pitch,
|
||||
@ -386,7 +386,7 @@ extern void vp8_filter_block1d16_v6_ssse3
|
||||
unsigned int vp8_filter_index
|
||||
);
|
||||
|
||||
extern void vp8_filter_block1d8_v6_ssse3
|
||||
extern void vp9_filter_block1d8_v6_ssse3
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
unsigned int src_pitch,
|
||||
@ -396,7 +396,7 @@ extern void vp8_filter_block1d8_v6_ssse3
|
||||
unsigned int vp8_filter_index
|
||||
);
|
||||
|
||||
extern void vp8_filter_block1d4_h6_ssse3
|
||||
extern void vp9_filter_block1d4_h6_ssse3
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
unsigned int src_pixels_per_line,
|
||||
@ -406,7 +406,7 @@ extern void vp8_filter_block1d4_h6_ssse3
|
||||
unsigned int vp8_filter_index
|
||||
);
|
||||
|
||||
extern void vp8_filter_block1d4_v6_ssse3
|
||||
extern void vp9_filter_block1d4_v6_ssse3
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
unsigned int src_pitch,
|
||||
@ -416,7 +416,7 @@ extern void vp8_filter_block1d4_v6_ssse3
|
||||
unsigned int vp8_filter_index
|
||||
);
|
||||
|
||||
void vp8_sixtap_predict16x16_ssse3
|
||||
void vp9_sixtap_predict16x16_ssse3
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -428,24 +428,24 @@ void vp8_sixtap_predict16x16_ssse3
|
||||
) {
|
||||
DECLARE_ALIGNED_ARRAY(16, unsigned char, FData2, 24 * 24);
|
||||
#ifdef ANNOUNCE_FUNCTION
|
||||
printf("vp8_sixtap_predict16x16_ssse3\n");
|
||||
printf("vp9_sixtap_predict16x16_ssse3\n");
|
||||
#endif
|
||||
|
||||
if (xoffset) {
|
||||
if (yoffset) {
|
||||
vp8_filter_block1d16_h6_ssse3(src_ptr - (2 * src_pixels_per_line), src_pixels_per_line, FData2, 16, 21, xoffset);
|
||||
vp8_filter_block1d16_v6_ssse3(FData2, 16, dst_ptr, dst_pitch, 16, yoffset);
|
||||
vp9_filter_block1d16_h6_ssse3(src_ptr - (2 * src_pixels_per_line), src_pixels_per_line, FData2, 16, 21, xoffset);
|
||||
vp9_filter_block1d16_v6_ssse3(FData2, 16, dst_ptr, dst_pitch, 16, yoffset);
|
||||
} else {
|
||||
/* First-pass only */
|
||||
vp8_filter_block1d16_h6_ssse3(src_ptr, src_pixels_per_line, dst_ptr, dst_pitch, 16, xoffset);
|
||||
vp9_filter_block1d16_h6_ssse3(src_ptr, src_pixels_per_line, dst_ptr, dst_pitch, 16, xoffset);
|
||||
}
|
||||
} else {
|
||||
/* Second-pass only */
|
||||
vp8_filter_block1d16_v6_ssse3(src_ptr - (2 * src_pixels_per_line), src_pixels_per_line, dst_ptr, dst_pitch, 16, yoffset);
|
||||
vp9_filter_block1d16_v6_ssse3(src_ptr - (2 * src_pixels_per_line), src_pixels_per_line, dst_ptr, dst_pitch, 16, yoffset);
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_sixtap_predict8x8_ssse3
|
||||
void vp9_sixtap_predict8x8_ssse3
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -456,24 +456,24 @@ void vp8_sixtap_predict8x8_ssse3
|
||||
) {
|
||||
DECLARE_ALIGNED_ARRAY(16, unsigned char, FData2, 256);
|
||||
#ifdef ANNOUNCE_FUNCTION
|
||||
printf("vp8_sixtap_predict8x8_ssse3\n");
|
||||
printf("vp9_sixtap_predict8x8_ssse3\n");
|
||||
#endif
|
||||
|
||||
if (xoffset) {
|
||||
if (yoffset) {
|
||||
vp8_filter_block1d8_h6_ssse3(src_ptr - (2 * src_pixels_per_line), src_pixels_per_line, FData2, 8, 13, xoffset);
|
||||
vp8_filter_block1d8_v6_ssse3(FData2, 8, dst_ptr, dst_pitch, 8, yoffset);
|
||||
vp9_filter_block1d8_h6_ssse3(src_ptr - (2 * src_pixels_per_line), src_pixels_per_line, FData2, 8, 13, xoffset);
|
||||
vp9_filter_block1d8_v6_ssse3(FData2, 8, dst_ptr, dst_pitch, 8, yoffset);
|
||||
} else {
|
||||
vp8_filter_block1d8_h6_ssse3(src_ptr, src_pixels_per_line, dst_ptr, dst_pitch, 8, xoffset);
|
||||
vp9_filter_block1d8_h6_ssse3(src_ptr, src_pixels_per_line, dst_ptr, dst_pitch, 8, xoffset);
|
||||
}
|
||||
} else {
|
||||
/* Second-pass only */
|
||||
vp8_filter_block1d8_v6_ssse3(src_ptr - (2 * src_pixels_per_line), src_pixels_per_line, dst_ptr, dst_pitch, 8, yoffset);
|
||||
vp9_filter_block1d8_v6_ssse3(src_ptr - (2 * src_pixels_per_line), src_pixels_per_line, dst_ptr, dst_pitch, 8, yoffset);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void vp8_sixtap_predict8x4_ssse3
|
||||
void vp9_sixtap_predict8x4_ssse3
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -484,24 +484,24 @@ void vp8_sixtap_predict8x4_ssse3
|
||||
) {
|
||||
DECLARE_ALIGNED_ARRAY(16, unsigned char, FData2, 256);
|
||||
#ifdef ANNOUNCE_FUNCTION
|
||||
printf("vp8_sixtap_predict8x4_ssse3\n");
|
||||
printf("vp9_sixtap_predict8x4_ssse3\n");
|
||||
#endif
|
||||
|
||||
if (xoffset) {
|
||||
if (yoffset) {
|
||||
vp8_filter_block1d8_h6_ssse3(src_ptr - (2 * src_pixels_per_line), src_pixels_per_line, FData2, 8, 9, xoffset);
|
||||
vp8_filter_block1d8_v6_ssse3(FData2, 8, dst_ptr, dst_pitch, 4, yoffset);
|
||||
vp9_filter_block1d8_h6_ssse3(src_ptr - (2 * src_pixels_per_line), src_pixels_per_line, FData2, 8, 9, xoffset);
|
||||
vp9_filter_block1d8_v6_ssse3(FData2, 8, dst_ptr, dst_pitch, 4, yoffset);
|
||||
} else {
|
||||
/* First-pass only */
|
||||
vp8_filter_block1d8_h6_ssse3(src_ptr, src_pixels_per_line, dst_ptr, dst_pitch, 4, xoffset);
|
||||
vp9_filter_block1d8_h6_ssse3(src_ptr, src_pixels_per_line, dst_ptr, dst_pitch, 4, xoffset);
|
||||
}
|
||||
} else {
|
||||
/* Second-pass only */
|
||||
vp8_filter_block1d8_v6_ssse3(src_ptr - (2 * src_pixels_per_line), src_pixels_per_line, dst_ptr, dst_pitch, 4, yoffset);
|
||||
vp9_filter_block1d8_v6_ssse3(src_ptr - (2 * src_pixels_per_line), src_pixels_per_line, dst_ptr, dst_pitch, 4, yoffset);
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_sixtap_predict4x4_ssse3
|
||||
void vp9_sixtap_predict4x4_ssse3
|
||||
(
|
||||
unsigned char *src_ptr,
|
||||
int src_pixels_per_line,
|
||||
@ -512,35 +512,35 @@ void vp8_sixtap_predict4x4_ssse3
|
||||
) {
|
||||
DECLARE_ALIGNED_ARRAY(16, unsigned char, FData2, 4 * 9);
|
||||
#ifdef ANNOUNCE_FUNCTION
|
||||
printf("vp8_sixtap_predict4x4_ssse3\n");
|
||||
printf("vp9_sixtap_predict4x4_ssse3\n");
|
||||
#endif
|
||||
|
||||
if (xoffset) {
|
||||
if (yoffset) {
|
||||
vp8_filter_block1d4_h6_ssse3(src_ptr - (2 * src_pixels_per_line), src_pixels_per_line, FData2, 4, 9, xoffset);
|
||||
vp8_filter_block1d4_v6_ssse3(FData2, 4, dst_ptr, dst_pitch, 4, yoffset);
|
||||
vp9_filter_block1d4_h6_ssse3(src_ptr - (2 * src_pixels_per_line), src_pixels_per_line, FData2, 4, 9, xoffset);
|
||||
vp9_filter_block1d4_v6_ssse3(FData2, 4, dst_ptr, dst_pitch, 4, yoffset);
|
||||
} else {
|
||||
vp8_filter_block1d4_h6_ssse3(src_ptr, src_pixels_per_line, dst_ptr, dst_pitch, 4, xoffset);
|
||||
vp9_filter_block1d4_h6_ssse3(src_ptr, src_pixels_per_line, dst_ptr, dst_pitch, 4, xoffset);
|
||||
}
|
||||
} else {
|
||||
vp8_filter_block1d4_v6_ssse3(src_ptr - (2 * src_pixels_per_line), src_pixels_per_line, dst_ptr, dst_pitch, 4, yoffset);
|
||||
vp9_filter_block1d4_v6_ssse3(src_ptr - (2 * src_pixels_per_line), src_pixels_per_line, dst_ptr, dst_pitch, 4, yoffset);
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_filter_block1d16_v8_ssse3(const unsigned char *src_ptr,
|
||||
void vp9_filter_block1d16_v8_ssse3(const unsigned char *src_ptr,
|
||||
const unsigned int src_pitch,
|
||||
unsigned char *output_ptr,
|
||||
unsigned int out_pitch,
|
||||
unsigned int output_height,
|
||||
const short *filter);
|
||||
void vp8_filter_block1d16_h8_ssse3(const unsigned char *src_ptr,
|
||||
void vp9_filter_block1d16_h8_ssse3(const unsigned char *src_ptr,
|
||||
const unsigned int src_pitch,
|
||||
unsigned char *output_ptr,
|
||||
unsigned int out_pitch,
|
||||
unsigned int output_height,
|
||||
const short *filter);
|
||||
|
||||
void vp8_filter_block2d_16x16_8_ssse3
|
||||
void vp9_filter_block2d_16x16_8_ssse3
|
||||
(
|
||||
const unsigned char *src_ptr, const unsigned int src_stride,
|
||||
const short *HFilter_aligned16, const short *VFilter_aligned16,
|
||||
@ -548,34 +548,34 @@ void vp8_filter_block2d_16x16_8_ssse3
|
||||
) {
|
||||
if (HFilter_aligned16[3] !=128 && VFilter_aligned16[3] != 128) {
|
||||
DECLARE_ALIGNED_ARRAY(16, unsigned char, FData2, 23 * 16);
|
||||
vp8_filter_block1d16_h8_ssse3(src_ptr - (3 * src_stride), src_stride,
|
||||
vp9_filter_block1d16_h8_ssse3(src_ptr - (3 * src_stride), src_stride,
|
||||
FData2, 16, 23, HFilter_aligned16);
|
||||
vp8_filter_block1d16_v8_ssse3(FData2, 16, dst_ptr, dst_stride, 16,
|
||||
vp9_filter_block1d16_v8_ssse3(FData2, 16, dst_ptr, dst_stride, 16,
|
||||
VFilter_aligned16);
|
||||
} else {
|
||||
if (HFilter_aligned16[3] !=128) {
|
||||
vp8_filter_block1d16_h8_ssse3(src_ptr, src_stride, dst_ptr, dst_stride,
|
||||
vp9_filter_block1d16_h8_ssse3(src_ptr, src_stride, dst_ptr, dst_stride,
|
||||
16, HFilter_aligned16);
|
||||
} else {
|
||||
vp8_filter_block1d16_v8_ssse3(src_ptr - (3 * src_stride), src_stride,
|
||||
vp9_filter_block1d16_v8_ssse3(src_ptr - (3 * src_stride), src_stride,
|
||||
dst_ptr, dst_stride, 16, VFilter_aligned16);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_filter_block1d8_v8_ssse3(const unsigned char *src_ptr,
|
||||
void vp9_filter_block1d8_v8_ssse3(const unsigned char *src_ptr,
|
||||
const unsigned int src_pitch,
|
||||
unsigned char *output_ptr,
|
||||
unsigned int out_pitch,
|
||||
unsigned int output_height,
|
||||
const short *filter);
|
||||
void vp8_filter_block1d8_h8_ssse3(const unsigned char *src_ptr,
|
||||
void vp9_filter_block1d8_h8_ssse3(const unsigned char *src_ptr,
|
||||
const unsigned int src_pitch,
|
||||
unsigned char *output_ptr,
|
||||
unsigned int out_pitch,
|
||||
unsigned int output_height,
|
||||
const short *filter);
|
||||
void vp8_filter_block2d_8x8_8_ssse3
|
||||
void vp9_filter_block2d_8x8_8_ssse3
|
||||
(
|
||||
const unsigned char *src_ptr, const unsigned int src_stride,
|
||||
const short *HFilter_aligned16, const short *VFilter_aligned16,
|
||||
@ -583,22 +583,22 @@ void vp8_filter_block2d_8x8_8_ssse3
|
||||
) {
|
||||
if (HFilter_aligned16[3] !=128 && VFilter_aligned16[3] != 128) {
|
||||
DECLARE_ALIGNED_ARRAY(16, unsigned char, FData2, 23 * 16);
|
||||
vp8_filter_block1d8_h8_ssse3(src_ptr - (3 * src_stride), src_stride,
|
||||
vp9_filter_block1d8_h8_ssse3(src_ptr - (3 * src_stride), src_stride,
|
||||
FData2, 16, 15, HFilter_aligned16);
|
||||
vp8_filter_block1d8_v8_ssse3(FData2, 16, dst_ptr, dst_stride, 8,
|
||||
vp9_filter_block1d8_v8_ssse3(FData2, 16, dst_ptr, dst_stride, 8,
|
||||
VFilter_aligned16);
|
||||
} else {
|
||||
if (HFilter_aligned16[3] !=128) {
|
||||
vp8_filter_block1d8_h8_ssse3(src_ptr, src_stride, dst_ptr, dst_stride, 8,
|
||||
vp9_filter_block1d8_h8_ssse3(src_ptr, src_stride, dst_ptr, dst_stride, 8,
|
||||
HFilter_aligned16);
|
||||
} else {
|
||||
vp8_filter_block1d8_v8_ssse3(src_ptr - (3 * src_stride), src_stride,
|
||||
vp9_filter_block1d8_v8_ssse3(src_ptr - (3 * src_stride), src_stride,
|
||||
dst_ptr, dst_stride, 8, VFilter_aligned16);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_filter_block2d_8x4_8_ssse3
|
||||
void vp9_filter_block2d_8x4_8_ssse3
|
||||
(
|
||||
const unsigned char *src_ptr, const unsigned int src_stride,
|
||||
const short *HFilter_aligned16, const short *VFilter_aligned16,
|
||||
@ -606,16 +606,16 @@ void vp8_filter_block2d_8x4_8_ssse3
|
||||
) {
|
||||
if (HFilter_aligned16[3] !=128 && VFilter_aligned16[3] != 128) {
|
||||
DECLARE_ALIGNED_ARRAY(16, unsigned char, FData2, 23 * 16);
|
||||
vp8_filter_block1d8_h8_ssse3(src_ptr - (3 * src_stride), src_stride,
|
||||
vp9_filter_block1d8_h8_ssse3(src_ptr - (3 * src_stride), src_stride,
|
||||
FData2, 16, 11, HFilter_aligned16);
|
||||
vp8_filter_block1d8_v8_ssse3(FData2, 16, dst_ptr, dst_stride, 4,
|
||||
vp9_filter_block1d8_v8_ssse3(FData2, 16, dst_ptr, dst_stride, 4,
|
||||
VFilter_aligned16);
|
||||
} else {
|
||||
if (HFilter_aligned16[3] !=128) {
|
||||
vp8_filter_block1d8_h8_ssse3(src_ptr, src_stride, dst_ptr, dst_stride, 4,
|
||||
vp9_filter_block1d8_h8_ssse3(src_ptr, src_stride, dst_ptr, dst_stride, 4,
|
||||
HFilter_aligned16);
|
||||
} else {
|
||||
vp8_filter_block1d8_v8_ssse3(src_ptr - (3 * src_stride), src_stride,
|
||||
vp9_filter_block1d8_v8_ssse3(src_ptr - (3 * src_stride), src_stride,
|
||||
dst_ptr, dst_stride, 4, VFilter_aligned16);
|
||||
}
|
||||
}
|
||||
|
@ -18,7 +18,7 @@
|
||||
#include "vp8/common/pragmas.h"
|
||||
#include "vp8/common/onyxc_int.h"
|
||||
|
||||
void vp8_arch_x86_common_init(VP8_COMMON *ctx) {
|
||||
void vp9_arch_x86_common_init(VP8_COMMON *ctx) {
|
||||
#if CONFIG_RUNTIME_CPU_DETECT
|
||||
VP8_COMMON_RTCD *rtcd = &ctx->rtcd;
|
||||
int flags = x86_simd_caps();
|
||||
@ -37,25 +37,25 @@ void vp8_arch_x86_common_init(VP8_COMMON *ctx) {
|
||||
rtcd->idct.idct1 = vp9_short_idct4x4llm_1_mmx;
|
||||
rtcd->idct.idct16 = vp9_short_idct4x4llm_mmx;
|
||||
rtcd->idct.idct1_scalar_add = vp9_dc_only_idct_add_mmx;
|
||||
// rtcd->idct.iwalsh16 = vp8_short_inv_walsh4x4_mmx;
|
||||
// rtcd->idct.iwalsh1 = vp8_short_inv_walsh4x4_1_mmx;
|
||||
// rtcd->idct.iwalsh16 = vp9_short_inv_walsh4x4_mmx;
|
||||
// rtcd->idct.iwalsh1 = vp9_short_inv_walsh4x4_1_mmx;
|
||||
|
||||
/* Disabled due to unsupported enhanced interpolation/high_prec mv
|
||||
rtcd->subpix.sixtap16x16 = vp8_sixtap_predict16x16_mmx;
|
||||
rtcd->subpix.sixtap8x8 = vp8_sixtap_predict8x8_mmx;
|
||||
rtcd->subpix.sixtap8x4 = vp8_sixtap_predict8x4_mmx;
|
||||
rtcd->subpix.sixtap4x4 = vp8_sixtap_predict4x4_mmx;
|
||||
rtcd->subpix.sixtap16x16 = vp9_sixtap_predict16x16_mmx;
|
||||
rtcd->subpix.sixtap8x8 = vp9_sixtap_predict8x8_mmx;
|
||||
rtcd->subpix.sixtap8x4 = vp9_sixtap_predict8x4_mmx;
|
||||
rtcd->subpix.sixtap4x4 = vp9_sixtap_predict4x4_mmx;
|
||||
*/
|
||||
rtcd->subpix.bilinear16x16 = vp8_bilinear_predict16x16_mmx;
|
||||
rtcd->subpix.bilinear8x8 = vp8_bilinear_predict8x8_mmx;
|
||||
rtcd->subpix.bilinear8x4 = vp8_bilinear_predict8x4_mmx;
|
||||
rtcd->subpix.bilinear4x4 = vp8_bilinear_predict4x4_mmx;
|
||||
rtcd->subpix.bilinear16x16 = vp9_bilinear_predict16x16_mmx;
|
||||
rtcd->subpix.bilinear8x8 = vp9_bilinear_predict8x8_mmx;
|
||||
rtcd->subpix.bilinear8x4 = vp9_bilinear_predict8x4_mmx;
|
||||
rtcd->subpix.bilinear4x4 = vp9_bilinear_predict4x4_mmx;
|
||||
|
||||
#if CONFIG_POSTPROC
|
||||
rtcd->postproc.down = vp8_mbpost_proc_down_mmx;
|
||||
/*rtcd->postproc.across = vp8_mbpost_proc_across_ip_c;*/
|
||||
rtcd->postproc.downacross = vp8_post_proc_down_and_across_mmx;
|
||||
rtcd->postproc.addnoise = vp8_plane_add_noise_mmx;
|
||||
rtcd->postproc.down = vp9_mbpost_proc_down_mmx;
|
||||
/*rtcd->postproc.across = vp9_mbpost_proc_across_ip_c;*/
|
||||
rtcd->postproc.downacross = vp9_post_proc_down_and_across_mmx;
|
||||
rtcd->postproc.addnoise = vp9_plane_add_noise_mmx;
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -65,21 +65,21 @@ void vp8_arch_x86_common_init(VP8_COMMON *ctx) {
|
||||
if (flags & HAS_SSE2) {
|
||||
|
||||
|
||||
// rtcd->idct.iwalsh16 = vp8_short_inv_walsh4x4_sse2;
|
||||
// rtcd->idct.iwalsh16 = vp9_short_inv_walsh4x4_sse2;
|
||||
|
||||
/* Disabled due to unsupported enhanced interpolation/high_prec mv
|
||||
rtcd->subpix.sixtap16x16 = vp8_sixtap_predict16x16_sse2;
|
||||
rtcd->subpix.sixtap8x8 = vp8_sixtap_predict8x8_sse2;
|
||||
rtcd->subpix.sixtap8x4 = vp8_sixtap_predict8x4_sse2;
|
||||
rtcd->subpix.sixtap16x16 = vp9_sixtap_predict16x16_sse2;
|
||||
rtcd->subpix.sixtap8x8 = vp9_sixtap_predict8x8_sse2;
|
||||
rtcd->subpix.sixtap8x4 = vp9_sixtap_predict8x4_sse2;
|
||||
*/
|
||||
rtcd->subpix.bilinear16x16 = vp8_bilinear_predict16x16_sse2;
|
||||
rtcd->subpix.bilinear8x8 = vp8_bilinear_predict8x8_sse2;
|
||||
rtcd->subpix.bilinear16x16 = vp9_bilinear_predict16x16_sse2;
|
||||
rtcd->subpix.bilinear8x8 = vp9_bilinear_predict8x8_sse2;
|
||||
|
||||
#if CONFIG_POSTPROC
|
||||
rtcd->postproc.down = vp8_mbpost_proc_down_xmm;
|
||||
rtcd->postproc.across = vp8_mbpost_proc_across_ip_xmm;
|
||||
rtcd->postproc.downacross = vp8_post_proc_down_and_across_xmm;
|
||||
rtcd->postproc.addnoise = vp8_plane_add_noise_wmt;
|
||||
rtcd->postproc.down = vp9_mbpost_proc_down_xmm;
|
||||
rtcd->postproc.across = vp9_mbpost_proc_across_ip_xmm;
|
||||
rtcd->postproc.downacross = vp9_post_proc_down_and_across_xmm;
|
||||
rtcd->postproc.addnoise = vp9_plane_add_noise_wmt;
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -89,19 +89,19 @@ void vp8_arch_x86_common_init(VP8_COMMON *ctx) {
|
||||
|
||||
if (flags & HAS_SSSE3) {
|
||||
/* Disabled due to unsupported enhanced interpolation/high_prec mv
|
||||
rtcd->subpix.sixtap16x16 = vp8_sixtap_predict16x16_ssse3;
|
||||
rtcd->subpix.sixtap8x8 = vp8_sixtap_predict8x8_ssse3;
|
||||
rtcd->subpix.sixtap8x4 = vp8_sixtap_predict8x4_ssse3;
|
||||
rtcd->subpix.sixtap4x4 = vp8_sixtap_predict4x4_ssse3;
|
||||
rtcd->subpix.bilinear16x16 = vp8_bilinear_predict16x16_ssse3;
|
||||
rtcd->subpix.bilinear8x8 = vp8_bilinear_predict8x8_ssse3;
|
||||
rtcd->subpix.sixtap16x16 = vp9_sixtap_predict16x16_ssse3;
|
||||
rtcd->subpix.sixtap8x8 = vp9_sixtap_predict8x8_ssse3;
|
||||
rtcd->subpix.sixtap8x4 = vp9_sixtap_predict8x4_ssse3;
|
||||
rtcd->subpix.sixtap4x4 = vp9_sixtap_predict4x4_ssse3;
|
||||
rtcd->subpix.bilinear16x16 = vp9_bilinear_predict16x16_ssse3;
|
||||
rtcd->subpix.bilinear8x8 = vp9_bilinear_predict8x8_ssse3;
|
||||
*/
|
||||
|
||||
/* these are disable because of unsupported diagonal pred modes
|
||||
rtcd->recon.build_intra_predictors_mbuv =
|
||||
vp8_build_intra_predictors_mbuv_ssse3;
|
||||
vp9_build_intra_predictors_mbuv_ssse3;
|
||||
rtcd->recon.build_intra_predictors_mbuv_s =
|
||||
vp8_build_intra_predictors_mbuv_s_ssse3;
|
||||
vp9_build_intra_predictors_mbuv_s_ssse3;
|
||||
*/
|
||||
}
|
||||
#endif
|
||||
|
@ -225,7 +225,7 @@ static int read_nmv_component(vp8_reader *r,
|
||||
}
|
||||
o = d << 3;
|
||||
|
||||
z = vp8_get_mv_mag(c, o);
|
||||
z = vp9_get_mv_mag(c, o);
|
||||
v = (s ? -(z + 8) : (z + 8));
|
||||
return v;
|
||||
}
|
||||
@ -240,7 +240,7 @@ static int read_nmv_component_fp(vp8_reader *r,
|
||||
z = (s ? -v : v) - 1; /* magnitude - 1 */
|
||||
z &= ~7;
|
||||
|
||||
c = vp8_get_mv_class(z, &o);
|
||||
c = vp9_get_mv_class(z, &o);
|
||||
d = o >> 3;
|
||||
|
||||
if (c == MV_CLASS_0) {
|
||||
@ -260,7 +260,7 @@ static int read_nmv_component_fp(vp8_reader *r,
|
||||
} else {
|
||||
++o; /* Note if hp is not used, the default value of the hp bit is 1 */
|
||||
}
|
||||
z = vp8_get_mv_mag(c, o);
|
||||
z = vp9_get_mv_mag(c, o);
|
||||
v = (s ? -(z + 1) : (z + 1));
|
||||
return v;
|
||||
}
|
||||
@ -279,8 +279,8 @@ static void read_nmv(vp8_reader *r, MV *mv, const MV *ref,
|
||||
|
||||
static void read_nmv_fp(vp8_reader *r, MV *mv, const MV *ref,
|
||||
const nmv_context *mvctx, int usehp) {
|
||||
MV_JOINT_TYPE j = vp8_get_mv_joint(*mv);
|
||||
usehp = usehp && vp8_use_nmv_hp(ref);
|
||||
MV_JOINT_TYPE j = vp9_get_mv_joint(*mv);
|
||||
usehp = usehp && vp9_use_nmv_hp(ref);
|
||||
if (j == MV_JOINT_HZVNZ || j == MV_JOINT_HNZVNZ) {
|
||||
mv->row = read_nmv_component_fp(r, mv->row, ref->row, &mvctx->comps[0],
|
||||
usehp);
|
||||
@ -481,7 +481,7 @@ static B_PREDICTION_MODE sub_mv_ref(vp8_reader *bc, const vp8_prob *p) {
|
||||
}
|
||||
|
||||
#ifdef VPX_MODE_COUNT
|
||||
unsigned int vp8_mv_cont_count[5][4] = {
|
||||
unsigned int vp9_mv_cont_count[5][4] = {
|
||||
{ 0, 0, 0, 0 },
|
||||
{ 0, 0, 0, 0 },
|
||||
{ 0, 0, 0, 0 },
|
||||
@ -719,7 +719,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
|
||||
int recon_uv_stride, recon_uvoffset;
|
||||
#endif
|
||||
|
||||
vp8_find_near_mvs(xd, mi,
|
||||
vp9_find_near_mvs(xd, mi,
|
||||
prev_mi,
|
||||
&nearest, &nearby, &best_mv, rct,
|
||||
mbmi->ref_frame, cm->ref_frame_sign_bias);
|
||||
@ -759,7 +759,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
|
||||
}
|
||||
#endif
|
||||
|
||||
vp8_mv_ref_probs(&pbi->common, mv_ref_p, rct);
|
||||
vp9_mv_ref_probs(&pbi->common, mv_ref_p, rct);
|
||||
|
||||
// Is the segment level mode feature enabled for this segment
|
||||
if (vp9_segfeature_active(xd, mbmi->segment_id, SEG_LVL_MODE)) {
|
||||
@ -773,7 +773,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
|
||||
#endif
|
||||
mbmi->mode = read_mv_ref(bc, mv_ref_p);
|
||||
|
||||
vp8_accum_mv_refs(&pbi->common, mbmi->mode, rct);
|
||||
vp9_accum_mv_refs(&pbi->common, mbmi->mode, rct);
|
||||
}
|
||||
|
||||
#if CONFIG_PRED_FILTER
|
||||
@ -828,7 +828,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
|
||||
cm->yv12_fb[second_ref_fb_idx].u_buffer + recon_uvoffset;
|
||||
xd->second_pre.v_buffer =
|
||||
cm->yv12_fb[second_ref_fb_idx].v_buffer + recon_uvoffset;
|
||||
vp8_find_near_mvs(xd, mi, prev_mi,
|
||||
vp9_find_near_mvs(xd, mi, prev_mi,
|
||||
&nearest_second, &nearby_second, &best_mv_second,
|
||||
rct,
|
||||
mbmi->second_ref_frame,
|
||||
@ -848,7 +848,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
|
||||
&nearby_second);
|
||||
}
|
||||
#else
|
||||
vp8_find_near_mvs(xd, mi, prev_mi,
|
||||
vp9_find_near_mvs(xd, mi, prev_mi,
|
||||
&nearest_second, &nearby_second, &best_mv_second,
|
||||
rct,
|
||||
mbmi->second_ref_frame,
|
||||
@ -883,7 +883,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
|
||||
second_leftmv.as_int = left_block_second_mv(mi, k);
|
||||
second_abovemv.as_int = above_block_second_mv(mi, k, mis);
|
||||
}
|
||||
mv_contz = vp8_mv_cont(&leftmv, &abovemv);
|
||||
mv_contz = vp9_mv_cont(&leftmv, &abovemv);
|
||||
blockmode = sub_mv_ref(bc, cm->fc.sub_mv_ref_prob [mv_contz]);
|
||||
cm->fc.sub_mv_ref_counts[mv_contz][blockmode - LEFT4X4]++;
|
||||
|
||||
@ -892,7 +892,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
|
||||
read_nmv(bc, &blockmv.as_mv, &best_mv.as_mv, nmvc);
|
||||
read_nmv_fp(bc, &blockmv.as_mv, &best_mv.as_mv, nmvc,
|
||||
xd->allow_high_precision_mv);
|
||||
vp8_increment_nmv(&blockmv.as_mv, &best_mv.as_mv,
|
||||
vp9_increment_nmv(&blockmv.as_mv, &best_mv.as_mv,
|
||||
&cm->fc.NMVcount, xd->allow_high_precision_mv);
|
||||
blockmv.as_mv.row += best_mv.as_mv.row;
|
||||
blockmv.as_mv.col += best_mv.as_mv.col;
|
||||
@ -901,13 +901,13 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
|
||||
read_nmv(bc, &secondmv.as_mv, &best_mv_second.as_mv, nmvc);
|
||||
read_nmv_fp(bc, &secondmv.as_mv, &best_mv_second.as_mv, nmvc,
|
||||
xd->allow_high_precision_mv);
|
||||
vp8_increment_nmv(&secondmv.as_mv, &best_mv_second.as_mv,
|
||||
vp9_increment_nmv(&secondmv.as_mv, &best_mv_second.as_mv,
|
||||
&cm->fc.NMVcount, xd->allow_high_precision_mv);
|
||||
secondmv.as_mv.row += best_mv_second.as_mv.row;
|
||||
secondmv.as_mv.col += best_mv_second.as_mv.col;
|
||||
}
|
||||
#ifdef VPX_MODE_COUNT
|
||||
vp8_mv_cont_count[mv_contz][3]++;
|
||||
vp9_mv_cont_count[mv_contz][3]++;
|
||||
#endif
|
||||
break;
|
||||
case LEFT4X4:
|
||||
@ -915,7 +915,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
|
||||
if (mbmi->second_ref_frame)
|
||||
secondmv.as_int = second_leftmv.as_int;
|
||||
#ifdef VPX_MODE_COUNT
|
||||
vp8_mv_cont_count[mv_contz][0]++;
|
||||
vp9_mv_cont_count[mv_contz][0]++;
|
||||
#endif
|
||||
break;
|
||||
case ABOVE4X4:
|
||||
@ -923,7 +923,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
|
||||
if (mbmi->second_ref_frame)
|
||||
secondmv.as_int = second_abovemv.as_int;
|
||||
#ifdef VPX_MODE_COUNT
|
||||
vp8_mv_cont_count[mv_contz][1]++;
|
||||
vp9_mv_cont_count[mv_contz][1]++;
|
||||
#endif
|
||||
break;
|
||||
case ZERO4X4:
|
||||
@ -931,7 +931,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
|
||||
if (mbmi->second_ref_frame)
|
||||
secondmv.as_int = 0;
|
||||
#ifdef VPX_MODE_COUNT
|
||||
vp8_mv_cont_count[mv_contz][2]++;
|
||||
vp9_mv_cont_count[mv_contz][2]++;
|
||||
#endif
|
||||
break;
|
||||
default:
|
||||
@ -1024,7 +1024,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
|
||||
read_nmv(bc, &mv->as_mv, &best_mv.as_mv, nmvc);
|
||||
read_nmv_fp(bc, &mv->as_mv, &best_mv.as_mv, nmvc,
|
||||
xd->allow_high_precision_mv);
|
||||
vp8_increment_nmv(&mv->as_mv, &best_mv.as_mv, &cm->fc.NMVcount,
|
||||
vp9_increment_nmv(&mv->as_mv, &best_mv.as_mv, &cm->fc.NMVcount,
|
||||
xd->allow_high_precision_mv);
|
||||
|
||||
mv->as_mv.row += best_mv.as_mv.row;
|
||||
@ -1057,7 +1057,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
|
||||
read_nmv(bc, &mbmi->mv[1].as_mv, &best_mv_second.as_mv, nmvc);
|
||||
read_nmv_fp(bc, &mbmi->mv[1].as_mv, &best_mv_second.as_mv, nmvc,
|
||||
xd->allow_high_precision_mv);
|
||||
vp8_increment_nmv(&mbmi->mv[1].as_mv, &best_mv_second.as_mv,
|
||||
vp9_increment_nmv(&mbmi->mv[1].as_mv, &best_mv_second.as_mv,
|
||||
&cm->fc.NMVcount, xd->allow_high_precision_mv);
|
||||
mbmi->mv[1].as_mv.row += best_mv_second.as_mv.row;
|
||||
mbmi->mv[1].as_mv.col += best_mv_second.as_mv.col;
|
||||
|
@ -78,17 +78,17 @@ void vp9_init_de_quantizer(VP8D_COMP *pbi) {
|
||||
VP8_COMMON *const pc = &pbi->common;
|
||||
|
||||
for (Q = 0; Q < QINDEX_RANGE; Q++) {
|
||||
pc->Y1dequant[Q][0] = (short)vp8_dc_quant(Q, pc->y1dc_delta_q);
|
||||
pc->Y2dequant[Q][0] = (short)vp8_dc2quant(Q, pc->y2dc_delta_q);
|
||||
pc->UVdequant[Q][0] = (short)vp8_dc_uv_quant(Q, pc->uvdc_delta_q);
|
||||
pc->Y1dequant[Q][0] = (short)vp9_dc_quant(Q, pc->y1dc_delta_q);
|
||||
pc->Y2dequant[Q][0] = (short)vp9_dc2quant(Q, pc->y2dc_delta_q);
|
||||
pc->UVdequant[Q][0] = (short)vp9_dc_uv_quant(Q, pc->uvdc_delta_q);
|
||||
|
||||
/* all the ac values =; */
|
||||
for (i = 1; i < 16; i++) {
|
||||
int rc = vp8_default_zig_zag1d[i];
|
||||
|
||||
pc->Y1dequant[Q][rc] = (short)vp8_ac_yquant(Q);
|
||||
pc->Y2dequant[Q][rc] = (short)vp8_ac2quant(Q, pc->y2ac_delta_q);
|
||||
pc->UVdequant[Q][rc] = (short)vp8_ac_uv_quant(Q, pc->uvac_delta_q);
|
||||
pc->Y1dequant[Q][rc] = (short)vp9_ac_yquant(Q);
|
||||
pc->Y2dequant[Q][rc] = (short)vp9_ac2quant(Q, pc->y2ac_delta_q);
|
||||
pc->UVdequant[Q][rc] = (short)vp9_ac_uv_quant(Q, pc->uvac_delta_q);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -122,22 +122,22 @@ static void mb_init_dequantizer(VP8D_COMP *pbi, MACROBLOCKD *xd) {
|
||||
|
||||
#if CONFIG_LOSSLESS
|
||||
if (!QIndex) {
|
||||
pbi->common.rtcd.idct.idct1 = vp8_short_inv_walsh4x4_1_x8_c;
|
||||
pbi->common.rtcd.idct.idct16 = vp8_short_inv_walsh4x4_x8_c;
|
||||
pbi->common.rtcd.idct.idct1_scalar_add = vp8_dc_only_inv_walsh_add_c;
|
||||
pbi->common.rtcd.idct.iwalsh1 = vp8_short_inv_walsh4x4_1_lossless_c;
|
||||
pbi->common.rtcd.idct.iwalsh16 = vp8_short_inv_walsh4x4_lossless_c;
|
||||
pbi->common.rtcd.idct.idct1 = vp9_short_inv_walsh4x4_1_x8_c;
|
||||
pbi->common.rtcd.idct.idct16 = vp9_short_inv_walsh4x4_x8_c;
|
||||
pbi->common.rtcd.idct.idct1_scalar_add = vp9_dc_only_inv_walsh_add_c;
|
||||
pbi->common.rtcd.idct.iwalsh1 = vp9_short_inv_walsh4x4_1_lossless_c;
|
||||
pbi->common.rtcd.idct.iwalsh16 = vp9_short_inv_walsh4x4_lossless_c;
|
||||
pbi->idct_add = vp9_dequant_idct_add_lossless_c;
|
||||
pbi->dc_idct_add = vp9_dequant_dc_idct_add_lossless_c;
|
||||
pbi->dc_idct_add_y_block = vp9_dequant_dc_idct_add_y_block_lossless_c;
|
||||
pbi->idct_add_y_block = vp9_dequant_idct_add_y_block_lossless_c;
|
||||
pbi->idct_add_uv_block = vp9_dequant_idct_add_uv_block_lossless_c;
|
||||
} else {
|
||||
pbi->common.rtcd.idct.idct1 = vp8_short_idct4x4llm_1_c;
|
||||
pbi->common.rtcd.idct.idct16 = vp8_short_idct4x4llm_c;
|
||||
pbi->common.rtcd.idct.idct1_scalar_add = vp8_dc_only_idct_add_c;
|
||||
pbi->common.rtcd.idct.iwalsh1 = vp8_short_inv_walsh4x4_1_c;
|
||||
pbi->common.rtcd.idct.iwalsh16 = vp8_short_inv_walsh4x4_c;
|
||||
pbi->common.rtcd.idct.idct1 = vp9_short_idct4x4llm_1_c;
|
||||
pbi->common.rtcd.idct.idct16 = vp9_short_idct4x4llm_c;
|
||||
pbi->common.rtcd.idct.idct1_scalar_add = vp9_dc_only_idct_add_c;
|
||||
pbi->common.rtcd.idct.iwalsh1 = vp9_short_inv_walsh4x4_1_c;
|
||||
pbi->common.rtcd.idct.iwalsh16 = vp9_short_inv_walsh4x4_c;
|
||||
pbi->idct_add = vp9_dequant_idct_add;
|
||||
pbi->dc_idct_add = vp9_dequant_dc_idct_add;
|
||||
pbi->dc_idct_add_y_block = vp9_dequant_dc_idct_add_y_block;
|
||||
@ -173,29 +173,29 @@ static void skip_recon_mb(VP8D_COMP *pbi, MACROBLOCKD *xd) {
|
||||
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
|
||||
#if CONFIG_SUPERBLOCKS
|
||||
if (xd->mode_info_context->mbmi.encoded_as_sb) {
|
||||
vp8_build_intra_predictors_sbuv_s(xd);
|
||||
vp8_build_intra_predictors_sby_s(xd);
|
||||
vp9_build_intra_predictors_sbuv_s(xd);
|
||||
vp9_build_intra_predictors_sby_s(xd);
|
||||
} else {
|
||||
#endif
|
||||
vp8_build_intra_predictors_mbuv_s(xd);
|
||||
vp8_build_intra_predictors_mby_s(xd);
|
||||
vp9_build_intra_predictors_mbuv_s(xd);
|
||||
vp9_build_intra_predictors_mby_s(xd);
|
||||
#if CONFIG_SUPERBLOCKS
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
#if CONFIG_SUPERBLOCKS
|
||||
if (xd->mode_info_context->mbmi.encoded_as_sb) {
|
||||
vp8_build_inter32x32_predictors_sb(xd, xd->dst.y_buffer,
|
||||
vp9_build_inter32x32_predictors_sb(xd, xd->dst.y_buffer,
|
||||
xd->dst.u_buffer, xd->dst.v_buffer,
|
||||
xd->dst.y_stride, xd->dst.uv_stride);
|
||||
} else {
|
||||
#endif
|
||||
vp8_build_1st_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
|
||||
vp9_build_1st_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
|
||||
xd->dst.u_buffer, xd->dst.v_buffer,
|
||||
xd->dst.y_stride, xd->dst.uv_stride);
|
||||
|
||||
if (xd->mode_info_context->mbmi.second_ref_frame) {
|
||||
vp8_build_2nd_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
|
||||
vp9_build_2nd_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
|
||||
xd->dst.u_buffer, xd->dst.v_buffer,
|
||||
xd->dst.y_stride, xd->dst.uv_stride);
|
||||
}
|
||||
@ -257,7 +257,7 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
|
||||
|
||||
//mode = xd->mode_info_context->mbmi.mode;
|
||||
if (pbi->common.frame_type != KEY_FRAME)
|
||||
vp8_setup_interp_filters(xd, xd->mode_info_context->mbmi.interp_filter,
|
||||
vp9_setup_interp_filters(xd, xd->mode_info_context->mbmi.interp_filter,
|
||||
&pbi->common);
|
||||
|
||||
if (eobtotal == 0 && mode != B_PRED && mode != SPLITMV
|
||||
@ -285,25 +285,25 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
|
||||
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
|
||||
#if CONFIG_SUPERBLOCKS
|
||||
if (xd->mode_info_context->mbmi.encoded_as_sb) {
|
||||
vp8_build_intra_predictors_sby_s(xd);
|
||||
vp8_build_intra_predictors_sbuv_s(xd);
|
||||
vp9_build_intra_predictors_sby_s(xd);
|
||||
vp9_build_intra_predictors_sbuv_s(xd);
|
||||
} else
|
||||
#endif
|
||||
if (mode != I8X8_PRED) {
|
||||
vp8_build_intra_predictors_mbuv(xd);
|
||||
vp9_build_intra_predictors_mbuv(xd);
|
||||
if (mode != B_PRED) {
|
||||
vp8_build_intra_predictors_mby(xd);
|
||||
vp9_build_intra_predictors_mby(xd);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
#if CONFIG_SUPERBLOCKS
|
||||
if (xd->mode_info_context->mbmi.encoded_as_sb) {
|
||||
vp8_build_inter32x32_predictors_sb(xd, xd->dst.y_buffer,
|
||||
vp9_build_inter32x32_predictors_sb(xd, xd->dst.y_buffer,
|
||||
xd->dst.u_buffer, xd->dst.v_buffer,
|
||||
xd->dst.y_stride, xd->dst.uv_stride);
|
||||
} else
|
||||
#endif
|
||||
vp8_build_inter_predictors_mb(xd);
|
||||
vp9_build_inter_predictors_mb(xd);
|
||||
}
|
||||
|
||||
/* dequantization and idct */
|
||||
@ -325,7 +325,7 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
|
||||
|
||||
b = &xd->block[ib];
|
||||
i8x8mode = b->bmi.as_mode.first;
|
||||
vp8_intra8x8_predict(b, i8x8mode, b->predictor);
|
||||
vp9_intra8x8_predict(b, i8x8mode, b->predictor);
|
||||
|
||||
if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) {
|
||||
tx_type = get_tx_type(xd, &xd->block[idx]);
|
||||
@ -344,11 +344,11 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
|
||||
}
|
||||
}
|
||||
b = &xd->block[16 + i];
|
||||
vp8_intra_uv4x4_predict(b, i8x8mode, b->predictor);
|
||||
vp9_intra_uv4x4_predict(b, i8x8mode, b->predictor);
|
||||
pbi->idct_add(b->qcoeff, b->dequant, b->predictor,
|
||||
*(b->base_dst) + b->dst, 8, b->dst_stride);
|
||||
b = &xd->block[20 + i];
|
||||
vp8_intra_uv4x4_predict(b, i8x8mode, b->predictor);
|
||||
vp9_intra_uv4x4_predict(b, i8x8mode, b->predictor);
|
||||
pbi->idct_add(b->qcoeff, b->dequant, b->predictor,
|
||||
*(b->base_dst) + b->dst, 8, b->dst_stride);
|
||||
}
|
||||
@ -361,7 +361,7 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
|
||||
|
||||
if (b_mode2 == (B_PREDICTION_MODE)(B_DC_PRED - 1)) {
|
||||
#endif
|
||||
vp8_intra4x4_predict(b, b_mode, b->predictor);
|
||||
vp9_intra4x4_predict(b, b_mode, b->predictor);
|
||||
#if CONFIG_COMP_INTRA_PRED
|
||||
} else {
|
||||
vp8_comp_intra4x4_predict(b, b_mode, b_mode2, b->predictor);
|
||||
@ -662,7 +662,7 @@ decode_sb_row(VP8D_COMP *pbi, VP8_COMMON *pc, int mbrow, MACROBLOCKD *xd,
|
||||
}
|
||||
}
|
||||
#endif
|
||||
vp8_intra_prediction_down_copy(xd);
|
||||
vp9_intra_prediction_down_copy(xd);
|
||||
decode_macroblock(pbi, xd, mb_row, mb_col, bc);
|
||||
|
||||
/* check if the boolean decoder has suffered an error */
|
||||
@ -740,13 +740,13 @@ static void init_frame(VP8D_COMP *pbi) {
|
||||
|
||||
if (pc->frame_type == KEY_FRAME) {
|
||||
/* Various keyframe initializations */
|
||||
vp8_init_mv_probs(pc);
|
||||
vp9_init_mv_probs(pc);
|
||||
|
||||
vp8_init_mbmode_probs(pc);
|
||||
vp8_default_bmode_probs(pc->fc.bmode_prob);
|
||||
vp9_init_mbmode_probs(pc);
|
||||
vp9_default_bmode_probs(pc->fc.bmode_prob);
|
||||
|
||||
vp8_default_coef_probs(pc);
|
||||
vp8_kf_default_bmode_probs(pc->kf_bmode_prob);
|
||||
vp9_default_coef_probs(pc);
|
||||
vp9_kf_default_bmode_probs(pc->kf_bmode_prob);
|
||||
|
||||
// Reset the segment feature data to the default stats:
|
||||
// Features disabled, 0, with delta coding (Default state).
|
||||
@ -770,7 +770,7 @@ static void init_frame(VP8D_COMP *pbi) {
|
||||
pc->ref_frame_sign_bias[GOLDEN_FRAME] = 0;
|
||||
pc->ref_frame_sign_bias[ALTREF_FRAME] = 0;
|
||||
|
||||
vp8_init_mode_contexts(&pbi->common);
|
||||
vp9_init_mode_contexts(&pbi->common);
|
||||
vpx_memcpy(&pc->lfc, &pc->fc, sizeof(pc->fc));
|
||||
vpx_memcpy(&pc->lfc_a, &pc->fc, sizeof(pc->fc));
|
||||
|
||||
@ -793,7 +793,7 @@ static void init_frame(VP8D_COMP *pbi) {
|
||||
pc->mcomp_filter_type = BILINEAR;
|
||||
|
||||
/* To enable choice of different interpolation filters */
|
||||
vp8_setup_interp_filters(xd, pc->mcomp_filter_type, pc);
|
||||
vp9_setup_interp_filters(xd, pc->mcomp_filter_type, pc);
|
||||
}
|
||||
|
||||
xd->mode_info_context = pc->mi;
|
||||
@ -931,7 +931,7 @@ int vp9_decode_frame(VP8D_COMP *pbi) {
|
||||
|
||||
data += 3;
|
||||
|
||||
vp8_setup_version(pc);
|
||||
vp9_setup_version(pc);
|
||||
|
||||
if (pc->frame_type == KEY_FRAME) {
|
||||
const int Width = pc->Width;
|
||||
@ -972,7 +972,7 @@ int vp9_decode_frame(VP8D_COMP *pbi) {
|
||||
"Invalid frame height");
|
||||
}
|
||||
|
||||
if (vp8_alloc_frame_buffers(pc, pc->Width, pc->Height))
|
||||
if (vp9_alloc_frame_buffers(pc, pc->Width, pc->Height))
|
||||
vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
|
||||
"Failed to allocate frame buffers");
|
||||
}
|
||||
@ -1193,7 +1193,7 @@ int vp9_decode_frame(VP8D_COMP *pbi) {
|
||||
pc->mcomp_filter_type = vp8_read_literal(&header_bc, 2);
|
||||
}
|
||||
/* To enable choice of different interploation filters */
|
||||
vp8_setup_interp_filters(xd, pc->mcomp_filter_type, pc);
|
||||
vp9_setup_interp_filters(xd, pc->mcomp_filter_type, pc);
|
||||
}
|
||||
|
||||
pc->refresh_entropy_probs = vp8_read_bit(&header_bc);
|
||||
@ -1262,11 +1262,11 @@ int vp9_decode_frame(VP8D_COMP *pbi) {
|
||||
vpx_calloc((pc->mb_rows * pc->mb_cols), 1));
|
||||
|
||||
/* set up frame new frame for intra coded blocks */
|
||||
vp8_setup_intra_recon(&pc->yv12_fb[pc->new_fb_idx]);
|
||||
vp9_setup_intra_recon(&pc->yv12_fb[pc->new_fb_idx]);
|
||||
|
||||
vp8_setup_block_dptrs(xd);
|
||||
vp9_setup_block_dptrs(xd);
|
||||
|
||||
vp8_build_block_doffsets(xd);
|
||||
vp9_build_block_doffsets(xd);
|
||||
|
||||
/* clear out the coeff buffer */
|
||||
vpx_memset(xd->qcoeff, 0, sizeof(xd->qcoeff));
|
||||
@ -1303,11 +1303,11 @@ int vp9_decode_frame(VP8D_COMP *pbi) {
|
||||
"A stream must start with a complete key frame");
|
||||
}
|
||||
|
||||
vp8_adapt_coef_probs(pc);
|
||||
vp9_adapt_coef_probs(pc);
|
||||
if (pc->frame_type != KEY_FRAME) {
|
||||
vp8_adapt_mode_probs(pc);
|
||||
vp8_adapt_nmv_probs(pc, xd->allow_high_precision_mv);
|
||||
vp8_update_mode_context(&pbi->common);
|
||||
vp9_adapt_mode_probs(pc);
|
||||
vp9_adapt_nmv_probs(pc, xd->allow_high_precision_mv);
|
||||
vp9_update_mode_context(&pbi->common);
|
||||
}
|
||||
|
||||
/* If this was a kf or Gf note the Q used */
|
||||
|
@ -15,15 +15,15 @@
|
||||
#include "vpx_mem/vpx_mem.h"
|
||||
#include "onyxd_int.h"
|
||||
|
||||
extern void vp8_short_idct4x4llm_c(short *input, short *output, int pitch);
|
||||
extern void vp8_short_idct4x4llm_1_c(short *input, short *output, int pitch);
|
||||
extern void vp8_short_idct8x8_c(short *input, short *output, int pitch);
|
||||
extern void vp8_short_idct8x8_1_c(short *input, short *output, int pitch);
|
||||
extern void vp9_short_idct4x4llm_c(short *input, short *output, int pitch);
|
||||
extern void vp9_short_idct4x4llm_1_c(short *input, short *output, int pitch);
|
||||
extern void vp9_short_idct8x8_c(short *input, short *output, int pitch);
|
||||
extern void vp9_short_idct8x8_1_c(short *input, short *output, int pitch);
|
||||
|
||||
#if CONFIG_LOSSLESS
|
||||
extern void vp8_short_inv_walsh4x4_x8_c(short *input, short *output,
|
||||
extern void vp9_short_inv_walsh4x4_x8_c(short *input, short *output,
|
||||
int pitch);
|
||||
extern void vp8_short_inv_walsh4x4_1_x8_c(short *input, short *output,
|
||||
extern void vp9_short_inv_walsh4x4_1_x8_c(short *input, short *output,
|
||||
int pitch);
|
||||
#endif
|
||||
|
||||
@ -56,7 +56,7 @@ void vp9_ht_dequant_idct_add_c(TX_TYPE tx_type, short *input, short *dq,
|
||||
input[i] = dq[i] * input[i];
|
||||
}
|
||||
|
||||
vp8_ihtllm_c(input, output, 4 << 1, tx_type, 4);
|
||||
vp9_ihtllm_c(input, output, 4 << 1, tx_type, 4);
|
||||
|
||||
vpx_memset(input, 0, 32);
|
||||
|
||||
@ -94,7 +94,7 @@ void vp9_ht_dequant_idct_add_8x8_c(TX_TYPE tx_type, short *input, short *dq,
|
||||
input[i] = dq[1] * input[i];
|
||||
}
|
||||
|
||||
vp8_ihtllm_c(input, output, 16, tx_type, 8);
|
||||
vp9_ihtllm_c(input, output, 16, tx_type, 8);
|
||||
|
||||
vpx_memset(input, 0, 128);
|
||||
|
||||
@ -135,7 +135,7 @@ void vp9_dequant_idct_add_c(short *input, short *dq, unsigned char *pred,
|
||||
}
|
||||
|
||||
/* the idct halves ( >> 1) the pitch */
|
||||
vp8_short_idct4x4llm_c(input, output, 4 << 1);
|
||||
vp9_short_idct4x4llm_c(input, output, 4 << 1);
|
||||
|
||||
vpx_memset(input, 0, 32);
|
||||
|
||||
@ -173,7 +173,7 @@ void vp9_dequant_dc_idct_add_c(short *input, short *dq, unsigned char *pred,
|
||||
}
|
||||
|
||||
/* the idct halves ( >> 1) the pitch */
|
||||
vp8_short_idct4x4llm_c(input, output, 4 << 1);
|
||||
vp9_short_idct4x4llm_c(input, output, 4 << 1);
|
||||
|
||||
vpx_memset(input, 0, 32);
|
||||
|
||||
@ -209,7 +209,7 @@ void vp9_dequant_idct_add_lossless_c(short *input, short *dq,
|
||||
input[i] = dq[i] * input[i];
|
||||
}
|
||||
|
||||
vp8_short_inv_walsh4x4_x8_c(input, output, 4 << 1);
|
||||
vp9_short_inv_walsh4x4_x8_c(input, output, 4 << 1);
|
||||
|
||||
vpx_memset(input, 0, 32);
|
||||
|
||||
@ -247,7 +247,7 @@ void vp9_dequant_dc_idct_add_lossless_c(short *input, short *dq,
|
||||
input[i] = dq[i] * input[i];
|
||||
}
|
||||
|
||||
vp8_short_inv_walsh4x4_x8_c(input, output, 4 << 1);
|
||||
vp9_short_inv_walsh4x4_x8_c(input, output, 4 << 1);
|
||||
vpx_memset(input, 0, 32);
|
||||
|
||||
for (r = 0; r < 4; r++) {
|
||||
@ -329,7 +329,7 @@ void vp9_dequant_idct_add_8x8_c(short *input, short *dq, unsigned char *pred,
|
||||
#endif
|
||||
|
||||
// the idct halves ( >> 1) the pitch
|
||||
vp8_short_idct8x8_c(input, output, 16);
|
||||
vp9_short_idct8x8_c(input, output, 16);
|
||||
#ifdef DEC_DEBUG
|
||||
if (dec_debug) {
|
||||
int j;
|
||||
@ -418,7 +418,7 @@ void vp9_dequant_dc_idct_add_8x8_c(short *input, short *dq, unsigned char *pred,
|
||||
#endif
|
||||
|
||||
// the idct halves ( >> 1) the pitch
|
||||
vp8_short_idct8x8_c(input, output, 16);
|
||||
vp9_short_idct8x8_c(input, output, 16);
|
||||
#ifdef DEC_DEBUG
|
||||
if (dec_debug) {
|
||||
int j;
|
||||
@ -482,10 +482,10 @@ void vp9_ht_dequant_idct_add_16x16_c(TX_TYPE tx_type, short *input, short *dq,
|
||||
input[i] = input[i] * dq[1];
|
||||
|
||||
// inverse hybrid transform
|
||||
vp8_ihtllm_c(input, output, 32, tx_type, 16);
|
||||
vp9_ihtllm_c(input, output, 32, tx_type, 16);
|
||||
|
||||
// the idct halves ( >> 1) the pitch
|
||||
// vp8_short_idct16x16_c(input, output, 32);
|
||||
// vp9_short_idct16x16_c(input, output, 32);
|
||||
|
||||
vpx_memset(input, 0, 512);
|
||||
|
||||
@ -520,7 +520,7 @@ void vp9_dequant_idct_add_16x16_c(short *input, short *dq, unsigned char *pred,
|
||||
input[i] = input[i] * dq[1];
|
||||
|
||||
// the idct halves ( >> 1) the pitch
|
||||
vp8_short_idct16x16_c(input, output, 32);
|
||||
vp9_short_idct16x16_c(input, output, 32);
|
||||
|
||||
vpx_memset(input, 0, 512);
|
||||
|
||||
|
@ -17,13 +17,13 @@ void vp9_dequant_dc_idct_add_c(short *input, short *dq, unsigned char *pred,
|
||||
int Dc);
|
||||
void vp9_dequant_idct_add_c(short *input, short *dq, unsigned char *pred,
|
||||
unsigned char *dest, int pitch, int stride);
|
||||
void vp8_dc_only_idct_add_c(short input_dc, unsigned char *pred_ptr,
|
||||
void vp9_dc_only_idct_add_c(short input_dc, unsigned char *pred_ptr,
|
||||
unsigned char *dst_ptr, int pitch, int stride);
|
||||
#if CONFIG_LOSSLESS
|
||||
void vp9_dequant_idct_add_lossless_c(short *input, short *dq,
|
||||
unsigned char *pred, unsigned char *dest,
|
||||
int pitch, int stride);
|
||||
void vp8_dc_only_idct_add_lossless_c(short input_dc, unsigned char *pred_ptr,
|
||||
void vp9_dc_only_idct_add_lossless_c(short input_dc, unsigned char *pred_ptr,
|
||||
unsigned char *dst_ptr,
|
||||
int pitch, int stride);
|
||||
#endif
|
||||
@ -40,7 +40,7 @@ void vp9_dequant_dc_idct_add_y_block_c(short *q, short *dq,
|
||||
if (*eobs++ > 1)
|
||||
vp9_dequant_dc_idct_add_c(q, dq, pre, dst, 16, stride, dc[0]);
|
||||
else
|
||||
vp8_dc_only_idct_add_c(dc[0], pre, dst, 16, stride);
|
||||
vp9_dc_only_idct_add_c(dc[0], pre, dst, 16, stride);
|
||||
|
||||
q += 16;
|
||||
pre += 4;
|
||||
@ -64,7 +64,7 @@ void vp9_dequant_idct_add_y_block_c(short *q, short *dq,
|
||||
if (*eobs++ > 1)
|
||||
vp9_dequant_idct_add_c(q, dq, pre, dst, 16, stride);
|
||||
else {
|
||||
vp8_dc_only_idct_add_c(q[0]*dq[0], pre, dst, 16, stride);
|
||||
vp9_dc_only_idct_add_c(q[0]*dq[0], pre, dst, 16, stride);
|
||||
((int *)q)[0] = 0;
|
||||
}
|
||||
|
||||
@ -88,7 +88,7 @@ void vp9_dequant_idct_add_uv_block_c(short *q, short *dq, unsigned char *pre,
|
||||
if (*eobs++ > 1)
|
||||
vp9_dequant_idct_add_c(q, dq, pre, dstu, 8, stride);
|
||||
else {
|
||||
vp8_dc_only_idct_add_c(q[0]*dq[0], pre, dstu, 8, stride);
|
||||
vp9_dc_only_idct_add_c(q[0]*dq[0], pre, dstu, 8, stride);
|
||||
((int *)q)[0] = 0;
|
||||
}
|
||||
|
||||
@ -106,7 +106,7 @@ void vp9_dequant_idct_add_uv_block_c(short *q, short *dq, unsigned char *pre,
|
||||
if (*eobs++ > 1)
|
||||
vp9_dequant_idct_add_c(q, dq, pre, dstv, 8, stride);
|
||||
else {
|
||||
vp8_dc_only_idct_add_c(q[0]*dq[0], pre, dstv, 8, stride);
|
||||
vp9_dc_only_idct_add_c(q[0]*dq[0], pre, dstv, 8, stride);
|
||||
((int *)q)[0] = 0;
|
||||
}
|
||||
|
||||
@ -207,7 +207,7 @@ void vp9_dequant_dc_idct_add_y_block_lossless_c(short *q, short *dq,
|
||||
if (*eobs++ > 1)
|
||||
vp9_dequant_dc_idct_add_lossless_c(q, dq, pre, dst, 16, stride, dc[0]);
|
||||
else
|
||||
vp8_dc_only_inv_walsh_add_c(dc[0], pre, dst, 16, stride);
|
||||
vp9_dc_only_inv_walsh_add_c(dc[0], pre, dst, 16, stride);
|
||||
|
||||
q += 16;
|
||||
pre += 4;
|
||||
@ -231,7 +231,7 @@ void vp9_dequant_idct_add_y_block_lossless_c(short *q, short *dq,
|
||||
if (*eobs++ > 1)
|
||||
vp9_dequant_idct_add_lossless_c(q, dq, pre, dst, 16, stride);
|
||||
else {
|
||||
vp8_dc_only_inv_walsh_add_c(q[0]*dq[0], pre, dst, 16, stride);
|
||||
vp9_dc_only_inv_walsh_add_c(q[0]*dq[0], pre, dst, 16, stride);
|
||||
((int *)q)[0] = 0;
|
||||
}
|
||||
|
||||
@ -257,7 +257,7 @@ void vp9_dequant_idct_add_uv_block_lossless_c(short *q, short *dq,
|
||||
if (*eobs++ > 1)
|
||||
vp9_dequant_idct_add_lossless_c(q, dq, pre, dstu, 8, stride);
|
||||
else {
|
||||
vp8_dc_only_inv_walsh_add_c(q[0]*dq[0], pre, dstu, 8, stride);
|
||||
vp9_dc_only_inv_walsh_add_c(q[0]*dq[0], pre, dstu, 8, stride);
|
||||
((int *)q)[0] = 0;
|
||||
}
|
||||
|
||||
@ -275,7 +275,7 @@ void vp9_dequant_idct_add_uv_block_lossless_c(short *q, short *dq,
|
||||
if (*eobs++ > 1)
|
||||
vp9_dequant_idct_add_lossless_c(q, dq, pre, dstv, 8, stride);
|
||||
else {
|
||||
vp8_dc_only_inv_walsh_add_c(q[0]*dq[0], pre, dstv, 8, stride);
|
||||
vp9_dc_only_inv_walsh_add_c(q[0]*dq[0], pre, dstv, 8, stride);
|
||||
((int *)q)[0] = 0;
|
||||
}
|
||||
|
||||
|
@ -108,8 +108,8 @@ void vp9_initialize_dec(void) {
|
||||
static int init_done = 0;
|
||||
|
||||
if (!init_done) {
|
||||
vp8_initialize_common();
|
||||
vp8_init_quant_tables();
|
||||
vp9_initialize_common();
|
||||
vp9_init_quant_tables();
|
||||
vp8_scale_machine_specific_config();
|
||||
init_done = 1;
|
||||
}
|
||||
@ -132,7 +132,7 @@ VP8D_PTR vp9_create_decompressor(VP8D_CONFIG *oxcf) {
|
||||
pbi->common.error.setjmp = 1;
|
||||
vp9_initialize_dec();
|
||||
|
||||
vp8_create_common(&pbi->common);
|
||||
vp9_create_common(&pbi->common);
|
||||
|
||||
pbi->common.current_video_frame = 0;
|
||||
pbi->ready_for_new_data = 1;
|
||||
@ -143,7 +143,7 @@ VP8D_PTR vp9_create_decompressor(VP8D_CONFIG *oxcf) {
|
||||
*/
|
||||
vp9_init_de_quantizer(pbi);
|
||||
|
||||
vp8_loop_filter_init(&pbi->common);
|
||||
vp9_loop_filter_init(&pbi->common);
|
||||
|
||||
pbi->common.error.setjmp = 0;
|
||||
|
||||
@ -162,7 +162,7 @@ void vp9_remove_decompressor(VP8D_PTR ptr) {
|
||||
if (pbi->common.last_frame_seg_map != 0)
|
||||
vpx_free(pbi->common.last_frame_seg_map);
|
||||
|
||||
vp8_remove_common(&pbi->common);
|
||||
vp9_remove_common(&pbi->common);
|
||||
vpx_free(pbi->mbc);
|
||||
vpx_free(pbi);
|
||||
}
|
||||
@ -444,7 +444,7 @@ int vp9_receive_compressed_data(VP8D_PTR ptr, unsigned long size,
|
||||
|
||||
if (cm->filter_level) {
|
||||
/* Apply the loop filter if appropriate. */
|
||||
vp8_loop_filter_frame(cm, &pbi->mb);
|
||||
vp9_loop_filter_frame(cm, &pbi->mb);
|
||||
}
|
||||
vp8_yv12_extend_frame_borders_ptr(cm->frame_to_show);
|
||||
}
|
||||
@ -464,7 +464,7 @@ int vp9_receive_compressed_data(VP8D_PTR ptr, unsigned long size,
|
||||
(cm->mb_cols + 1) * (cm->mb_rows + 1)* sizeof(MODE_INFO));
|
||||
}
|
||||
|
||||
/*vp8_print_modes_and_motion_vectors(cm->mi, cm->mb_rows,cm->mb_cols,
|
||||
/*vp9_print_modes_and_motion_vectors(cm->mi, cm->mb_rows,cm->mb_cols,
|
||||
cm->current_video_frame);*/
|
||||
|
||||
if (cm->show_frame)
|
||||
@ -505,7 +505,7 @@ int vp9_get_raw_frame(VP8D_PTR ptr, YV12_BUFFER_CONFIG *sd,
|
||||
|
||||
sd->clrtype = pbi->common.clr_type;
|
||||
#if CONFIG_POSTPROC
|
||||
ret = vp8_post_proc_frame(&pbi->common, sd, flags);
|
||||
ret = vp9_post_proc_frame(&pbi->common, sd, flags);
|
||||
#else
|
||||
|
||||
if (pbi->common.frame_to_show) {
|
||||
|
@ -14,8 +14,8 @@
|
||||
#include "vpx_mem/vpx_mem.h"
|
||||
#include "onyxd_int.h"
|
||||
|
||||
/* For skip_recon_mb(), add vp8_build_intra_predictors_mby_s(MACROBLOCKD *xd)
|
||||
* and vp8_build_intra_predictors_mbuv_s(MACROBLOCKD *xd).
|
||||
/* For skip_recon_mb(), add vp9_build_intra_predictors_mby_s(MACROBLOCKD *xd)
|
||||
* and vp9_build_intra_predictors_mbuv_s(MACROBLOCKD *xd).
|
||||
*/
|
||||
|
||||
void vp8mt_build_intra_predictors_mby(VP8D_COMP *pbi, MACROBLOCKD *xd,
|
||||
|
@ -12,17 +12,17 @@
|
||||
#include "vp8/common/idct.h"
|
||||
#include "vp8/decoder/dequantize.h"
|
||||
|
||||
void vp8_idct_dequant_dc_0_2x_sse2
|
||||
void vp9_idct_dequant_dc_0_2x_sse2
|
||||
(short *q, short *dq, unsigned char *pre,
|
||||
unsigned char *dst, int dst_stride, short *dc);
|
||||
void vp8_idct_dequant_dc_full_2x_sse2
|
||||
void vp9_idct_dequant_dc_full_2x_sse2
|
||||
(short *q, short *dq, unsigned char *pre,
|
||||
unsigned char *dst, int dst_stride, short *dc);
|
||||
|
||||
void vp8_idct_dequant_0_2x_sse2
|
||||
void vp9_idct_dequant_0_2x_sse2
|
||||
(short *q, short *dq, unsigned char *pre,
|
||||
unsigned char *dst, int dst_stride, int blk_stride);
|
||||
void vp8_idct_dequant_full_2x_sse2
|
||||
void vp9_idct_dequant_full_2x_sse2
|
||||
(short *q, short *dq, unsigned char *pre,
|
||||
unsigned char *dst, int dst_stride, int blk_stride);
|
||||
|
||||
@ -33,14 +33,14 @@ void vp9_dequant_dc_idct_add_y_block_sse2
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (((short *)(eobs))[0] & 0xfefe)
|
||||
vp8_idct_dequant_dc_full_2x_sse2(q, dq, pre, dst, stride, dc);
|
||||
vp9_idct_dequant_dc_full_2x_sse2(q, dq, pre, dst, stride, dc);
|
||||
else
|
||||
vp8_idct_dequant_dc_0_2x_sse2(q, dq, pre, dst, stride, dc);
|
||||
vp9_idct_dequant_dc_0_2x_sse2(q, dq, pre, dst, stride, dc);
|
||||
|
||||
if (((short *)(eobs))[1] & 0xfefe)
|
||||
vp8_idct_dequant_dc_full_2x_sse2(q + 32, dq, pre + 8, dst + 8, stride, dc + 2);
|
||||
vp9_idct_dequant_dc_full_2x_sse2(q + 32, dq, pre + 8, dst + 8, stride, dc + 2);
|
||||
else
|
||||
vp8_idct_dequant_dc_0_2x_sse2(q + 32, dq, pre + 8, dst + 8, stride, dc + 2);
|
||||
vp9_idct_dequant_dc_0_2x_sse2(q + 32, dq, pre + 8, dst + 8, stride, dc + 2);
|
||||
|
||||
q += 64;
|
||||
dc += 4;
|
||||
@ -57,14 +57,14 @@ void vp9_dequant_idct_add_y_block_sse2
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (((short *)(eobs))[0] & 0xfefe)
|
||||
vp8_idct_dequant_full_2x_sse2(q, dq, pre, dst, stride, 16);
|
||||
vp9_idct_dequant_full_2x_sse2(q, dq, pre, dst, stride, 16);
|
||||
else
|
||||
vp8_idct_dequant_0_2x_sse2(q, dq, pre, dst, stride, 16);
|
||||
vp9_idct_dequant_0_2x_sse2(q, dq, pre, dst, stride, 16);
|
||||
|
||||
if (((short *)(eobs))[1] & 0xfefe)
|
||||
vp8_idct_dequant_full_2x_sse2(q + 32, dq, pre + 8, dst + 8, stride, 16);
|
||||
vp9_idct_dequant_full_2x_sse2(q + 32, dq, pre + 8, dst + 8, stride, 16);
|
||||
else
|
||||
vp8_idct_dequant_0_2x_sse2(q + 32, dq, pre + 8, dst + 8, stride, 16);
|
||||
vp9_idct_dequant_0_2x_sse2(q + 32, dq, pre + 8, dst + 8, stride, 16);
|
||||
|
||||
q += 64;
|
||||
pre += 64;
|
||||
@ -77,33 +77,33 @@ void vp9_dequant_idct_add_uv_block_sse2
|
||||
(short *q, short *dq, unsigned char *pre,
|
||||
unsigned char *dstu, unsigned char *dstv, int stride, char *eobs) {
|
||||
if (((short *)(eobs))[0] & 0xfefe)
|
||||
vp8_idct_dequant_full_2x_sse2(q, dq, pre, dstu, stride, 8);
|
||||
vp9_idct_dequant_full_2x_sse2(q, dq, pre, dstu, stride, 8);
|
||||
else
|
||||
vp8_idct_dequant_0_2x_sse2(q, dq, pre, dstu, stride, 8);
|
||||
vp9_idct_dequant_0_2x_sse2(q, dq, pre, dstu, stride, 8);
|
||||
|
||||
q += 32;
|
||||
pre += 32;
|
||||
dstu += stride * 4;
|
||||
|
||||
if (((short *)(eobs))[1] & 0xfefe)
|
||||
vp8_idct_dequant_full_2x_sse2(q, dq, pre, dstu, stride, 8);
|
||||
vp9_idct_dequant_full_2x_sse2(q, dq, pre, dstu, stride, 8);
|
||||
else
|
||||
vp8_idct_dequant_0_2x_sse2(q, dq, pre, dstu, stride, 8);
|
||||
vp9_idct_dequant_0_2x_sse2(q, dq, pre, dstu, stride, 8);
|
||||
|
||||
q += 32;
|
||||
pre += 32;
|
||||
|
||||
if (((short *)(eobs))[2] & 0xfefe)
|
||||
vp8_idct_dequant_full_2x_sse2(q, dq, pre, dstv, stride, 8);
|
||||
vp9_idct_dequant_full_2x_sse2(q, dq, pre, dstv, stride, 8);
|
||||
else
|
||||
vp8_idct_dequant_0_2x_sse2(q, dq, pre, dstv, stride, 8);
|
||||
vp9_idct_dequant_0_2x_sse2(q, dq, pre, dstv, stride, 8);
|
||||
|
||||
q += 32;
|
||||
pre += 32;
|
||||
dstv += stride * 4;
|
||||
|
||||
if (((short *)(eobs))[3] & 0xfefe)
|
||||
vp8_idct_dequant_full_2x_sse2(q, dq, pre, dstv, stride, 8);
|
||||
vp9_idct_dequant_full_2x_sse2(q, dq, pre, dstv, stride, 8);
|
||||
else
|
||||
vp8_idct_dequant_0_2x_sse2(q, dq, pre, dstv, stride, 8);
|
||||
vp9_idct_dequant_0_2x_sse2(q, dq, pre, dstv, stride, 8);
|
||||
}
|
||||
|
@ -130,7 +130,7 @@ static void update_mode(
|
||||
unsigned int new_b = 0, old_b = 0;
|
||||
int i = 0;
|
||||
|
||||
vp8_tree_probs_from_distribution(
|
||||
vp9_tree_probs_from_distribution(
|
||||
n--, tok, tree,
|
||||
Pnew, bct, num_events,
|
||||
256, 1
|
||||
@ -203,7 +203,7 @@ static void update_switchable_interp_probs(VP8_COMP *cpi,
|
||||
unsigned int branch_ct[32][2];
|
||||
int i, j;
|
||||
for (j = 0; j <= VP8_SWITCHABLE_FILTERS; ++j) {
|
||||
vp8_tree_probs_from_distribution(
|
||||
vp9_tree_probs_from_distribution(
|
||||
VP8_SWITCHABLE_FILTERS,
|
||||
vp8_switchable_interp_encodings, vp8_switchable_interp_tree,
|
||||
pc->fc.switchable_interp_prob[j], branch_ct,
|
||||
@ -275,13 +275,13 @@ static void update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
|
||||
- best_ref_mv->as_mv.row);
|
||||
mv.col = (x->partition_info->bmi[i].mv.as_mv.col
|
||||
- best_ref_mv->as_mv.col);
|
||||
vp8_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 1);
|
||||
vp9_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 1);
|
||||
if (x->e_mbd.mode_info_context->mbmi.second_ref_frame) {
|
||||
mv.row = (x->partition_info->bmi[i].second_mv.as_mv.row
|
||||
- second_best_ref_mv->as_mv.row);
|
||||
mv.col = (x->partition_info->bmi[i].second_mv.as_mv.col
|
||||
- second_best_ref_mv->as_mv.col);
|
||||
vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv,
|
||||
vp9_increment_nmv(&mv, &second_best_ref_mv->as_mv,
|
||||
&cpi->NMVcount, 1);
|
||||
}
|
||||
} else {
|
||||
@ -289,13 +289,13 @@ static void update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
|
||||
- best_ref_mv->as_mv.row);
|
||||
mv.col = (x->partition_info->bmi[i].mv.as_mv.col
|
||||
- best_ref_mv->as_mv.col);
|
||||
vp8_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 0);
|
||||
vp9_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 0);
|
||||
if (x->e_mbd.mode_info_context->mbmi.second_ref_frame) {
|
||||
mv.row = (x->partition_info->bmi[i].second_mv.as_mv.row
|
||||
- second_best_ref_mv->as_mv.row);
|
||||
mv.col = (x->partition_info->bmi[i].second_mv.as_mv.col
|
||||
- second_best_ref_mv->as_mv.col);
|
||||
vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv,
|
||||
vp9_increment_nmv(&mv, &second_best_ref_mv->as_mv,
|
||||
&cpi->NMVcount, 0);
|
||||
}
|
||||
}
|
||||
@ -305,20 +305,20 @@ static void update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
|
||||
if (x->e_mbd.allow_high_precision_mv) {
|
||||
mv.row = (mbmi->mv[0].as_mv.row - best_ref_mv->as_mv.row);
|
||||
mv.col = (mbmi->mv[0].as_mv.col - best_ref_mv->as_mv.col);
|
||||
vp8_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 1);
|
||||
vp9_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 1);
|
||||
if (mbmi->second_ref_frame) {
|
||||
mv.row = (mbmi->mv[1].as_mv.row - second_best_ref_mv->as_mv.row);
|
||||
mv.col = (mbmi->mv[1].as_mv.col - second_best_ref_mv->as_mv.col);
|
||||
vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv, &cpi->NMVcount, 1);
|
||||
vp9_increment_nmv(&mv, &second_best_ref_mv->as_mv, &cpi->NMVcount, 1);
|
||||
}
|
||||
} else {
|
||||
mv.row = (mbmi->mv[0].as_mv.row - best_ref_mv->as_mv.row);
|
||||
mv.col = (mbmi->mv[0].as_mv.col - best_ref_mv->as_mv.col);
|
||||
vp8_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 0);
|
||||
vp9_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 0);
|
||||
if (mbmi->second_ref_frame) {
|
||||
mv.row = (mbmi->mv[1].as_mv.row - second_best_ref_mv->as_mv.row);
|
||||
mv.col = (mbmi->mv[1].as_mv.col - second_best_ref_mv->as_mv.col);
|
||||
vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv, &cpi->NMVcount, 0);
|
||||
vp9_increment_nmv(&mv, &second_best_ref_mv->as_mv, &cpi->NMVcount, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1066,13 +1066,13 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) {
|
||||
int_mv n1, n2;
|
||||
|
||||
// Only used for context just now and soon to be deprecated.
|
||||
vp8_find_near_mvs(xd, m, prev_m, &n1, &n2, &best_mv, ct,
|
||||
vp9_find_near_mvs(xd, m, prev_m, &n1, &n2, &best_mv, ct,
|
||||
rf, cpi->common.ref_frame_sign_bias);
|
||||
#if CONFIG_NEWBESTREFMV
|
||||
best_mv.as_int = mi->ref_mvs[rf][0].as_int;
|
||||
#endif
|
||||
|
||||
vp8_mv_ref_probs(&cpi->common, mv_ref_p, ct);
|
||||
vp9_mv_ref_probs(&cpi->common, mv_ref_p, ct);
|
||||
|
||||
#ifdef ENTROPY_STATS
|
||||
accum_mv_refs(mode, ct);
|
||||
@ -1093,7 +1093,7 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) {
|
||||
{
|
||||
write_mv_ref(bc, mode, mv_ref_p);
|
||||
}
|
||||
vp8_accum_mv_refs(&cpi->common, mode, ct);
|
||||
vp9_accum_mv_refs(&cpi->common, mode, ct);
|
||||
}
|
||||
|
||||
#if CONFIG_PRED_FILTER
|
||||
@ -1125,7 +1125,7 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) {
|
||||
int_mv n1, n2;
|
||||
|
||||
// Only used for context just now and soon to be deprecated.
|
||||
vp8_find_near_mvs(xd, m, prev_m,
|
||||
vp9_find_near_mvs(xd, m, prev_m,
|
||||
&n1, &n2, &best_second_mv, ct,
|
||||
mi->second_ref_frame,
|
||||
cpi->common.ref_frame_sign_bias);
|
||||
@ -1223,7 +1223,7 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) {
|
||||
#endif
|
||||
leftmv.as_int = left_block_mv(m, k);
|
||||
abovemv.as_int = above_block_mv(m, k, mis);
|
||||
mv_contz = vp8_mv_cont(&leftmv, &abovemv);
|
||||
mv_contz = vp9_mv_cont(&leftmv, &abovemv);
|
||||
|
||||
write_sub_mv_ref(bc, blockmode,
|
||||
cpi->common.fc.sub_mv_ref_prob [mv_contz]);
|
||||
@ -1523,7 +1523,7 @@ static void build_coeff_contexts(VP8_COMP *cpi) {
|
||||
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
|
||||
if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
|
||||
continue;
|
||||
vp8_tree_probs_from_distribution(
|
||||
vp9_tree_probs_from_distribution(
|
||||
MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
|
||||
cpi->frame_coef_probs [i][j][k],
|
||||
cpi->frame_branch_ct [i][j][k],
|
||||
@ -1543,7 +1543,7 @@ static void build_coeff_contexts(VP8_COMP *cpi) {
|
||||
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
|
||||
if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
|
||||
continue;
|
||||
vp8_tree_probs_from_distribution(
|
||||
vp9_tree_probs_from_distribution(
|
||||
MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
|
||||
cpi->frame_hybrid_coef_probs [i][j][k],
|
||||
cpi->frame_hybrid_branch_ct [i][j][k],
|
||||
@ -1569,7 +1569,7 @@ static void build_coeff_contexts(VP8_COMP *cpi) {
|
||||
// unsigned int branch_ct [ENTROPY_NODES] [2];
|
||||
if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
|
||||
continue;
|
||||
vp8_tree_probs_from_distribution(
|
||||
vp9_tree_probs_from_distribution(
|
||||
MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
|
||||
cpi->frame_coef_probs_8x8 [i][j][k],
|
||||
cpi->frame_branch_ct_8x8 [i][j][k],
|
||||
@ -1593,7 +1593,7 @@ static void build_coeff_contexts(VP8_COMP *cpi) {
|
||||
// unsigned int branch_ct [ENTROPY_NODES] [2];
|
||||
if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
|
||||
continue;
|
||||
vp8_tree_probs_from_distribution(
|
||||
vp9_tree_probs_from_distribution(
|
||||
MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
|
||||
cpi->frame_hybrid_coef_probs_8x8 [i][j][k],
|
||||
cpi->frame_hybrid_branch_ct_8x8 [i][j][k],
|
||||
@ -1616,7 +1616,7 @@ static void build_coeff_contexts(VP8_COMP *cpi) {
|
||||
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
|
||||
if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
|
||||
continue;
|
||||
vp8_tree_probs_from_distribution(
|
||||
vp9_tree_probs_from_distribution(
|
||||
MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
|
||||
cpi->frame_coef_probs_16x16[i][j][k],
|
||||
cpi->frame_branch_ct_16x16[i][j][k],
|
||||
@ -1635,7 +1635,7 @@ static void build_coeff_contexts(VP8_COMP *cpi) {
|
||||
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
|
||||
if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
|
||||
continue;
|
||||
vp8_tree_probs_from_distribution(
|
||||
vp9_tree_probs_from_distribution(
|
||||
MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
|
||||
cpi->frame_hybrid_coef_probs_16x16[i][j][k],
|
||||
cpi->frame_hybrid_branch_ct_16x16[i][j][k],
|
||||
@ -1884,10 +1884,10 @@ void vp9_pack_bitstream(VP8_COMP *cpi, unsigned char *dest,
|
||||
|
||||
compute_update_table();
|
||||
|
||||
/* vp8_kf_default_bmode_probs() is called in vp9_setup_key_frame() once
|
||||
/* vp9_kf_default_bmode_probs() is called in vp9_setup_key_frame() once
|
||||
* for each K frame before encode frame. pc->kf_bmode_prob doesn't get
|
||||
* changed anywhere else. No need to call it again here. --yw
|
||||
* vp8_kf_default_bmode_probs( pc->kf_bmode_prob);
|
||||
* vp9_kf_default_bmode_probs( pc->kf_bmode_prob);
|
||||
*/
|
||||
|
||||
/* every keyframe send startcode, width, height, scale factor, clamp
|
||||
@ -2303,7 +2303,7 @@ void vp9_pack_bitstream(VP8_COMP *cpi, unsigned char *dest,
|
||||
write_kfmodes(cpi, &residual_bc);
|
||||
} else {
|
||||
pack_inter_mode_mvs(cpi, &residual_bc);
|
||||
vp8_update_mode_context(&cpi->common);
|
||||
vp9_update_mode_context(&cpi->common);
|
||||
}
|
||||
|
||||
|
||||
|
@ -311,7 +311,7 @@ static void build_activity_map(VP8_COMP *cpi) {
|
||||
recon_yoffset += 16;
|
||||
#endif
|
||||
// Copy current mb to a buffer
|
||||
vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
|
||||
vp9_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
|
||||
|
||||
// measure activity
|
||||
mb_activity = mb_activity_measure(cpi, x, mb_row, mb_col);
|
||||
@ -335,7 +335,7 @@ static void build_activity_map(VP8_COMP *cpi) {
|
||||
|
||||
#if ALT_ACT_MEASURE
|
||||
// extend the recon for intra prediction
|
||||
vp8_extend_mb_row(new_yv12, xd->dst.y_buffer + 16,
|
||||
vp9_extend_mb_row(new_yv12, xd->dst.y_buffer + 16,
|
||||
xd->dst.u_buffer + 8, xd->dst.v_buffer + 8);
|
||||
#endif
|
||||
|
||||
@ -592,7 +592,7 @@ static void pick_mb_modes(VP8_COMP *cpi,
|
||||
xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
|
||||
|
||||
// Copy current MB to a work buffer
|
||||
vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
|
||||
vp9_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
|
||||
|
||||
x->rddiv = cpi->RDDIV;
|
||||
x->rdmult = cpi->RDMULT;
|
||||
@ -623,7 +623,7 @@ static void pick_mb_modes(VP8_COMP *cpi,
|
||||
|
||||
cpi->update_context = 0; // TODO Do we need this now??
|
||||
|
||||
vp8_intra_prediction_down_copy(xd);
|
||||
vp9_intra_prediction_down_copy(xd);
|
||||
|
||||
// Find best coding mode & reconstruct the MB so it is available
|
||||
// as a predictor for MBs that follow in the SB
|
||||
@ -777,7 +777,7 @@ static void pick_sb_modes (VP8_COMP *cpi,
|
||||
xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
|
||||
#if 0 // FIXME
|
||||
/* Copy current MB to a work buffer */
|
||||
vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
|
||||
vp9_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
|
||||
#endif
|
||||
x->rddiv = cpi->RDDIV;
|
||||
x->rdmult = cpi->RDMULT;
|
||||
@ -970,7 +970,7 @@ static void encode_sb(VP8_COMP *cpi,
|
||||
xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
|
||||
|
||||
// Copy current MB to a work buffer
|
||||
vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
|
||||
vp9_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
|
||||
|
||||
if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
|
||||
vp9_activity_masking(cpi, x);
|
||||
@ -987,7 +987,7 @@ static void encode_sb(VP8_COMP *cpi,
|
||||
#if CONFIG_SUPERBLOCKS
|
||||
if (!xd->mode_info_context->mbmi.encoded_as_sb)
|
||||
#endif
|
||||
vp8_intra_prediction_down_copy(xd);
|
||||
vp9_intra_prediction_down_copy(xd);
|
||||
|
||||
if (cm->frame_type == KEY_FRAME) {
|
||||
#if CONFIG_SUPERBLOCKS
|
||||
@ -1257,7 +1257,7 @@ static void init_encode_frame_mb_context(VP8_COMP *cpi) {
|
||||
|
||||
// reset intra mode contexts
|
||||
if (cm->frame_type == KEY_FRAME)
|
||||
vp8_init_mbmode_probs(cm);
|
||||
vp9_init_mbmode_probs(cm);
|
||||
|
||||
// Copy data over into macro block data structures.
|
||||
x->src = * cpi->Source;
|
||||
@ -1265,11 +1265,11 @@ static void init_encode_frame_mb_context(VP8_COMP *cpi) {
|
||||
xd->dst = cm->yv12_fb[cm->new_fb_idx];
|
||||
|
||||
// set up frame for intra coded blocks
|
||||
vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]);
|
||||
vp9_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]);
|
||||
|
||||
vp9_build_block_offsets(x);
|
||||
|
||||
vp8_setup_block_dptrs(&x->e_mbd);
|
||||
vp9_setup_block_dptrs(&x->e_mbd);
|
||||
|
||||
vp9_setup_block_ptrs(x);
|
||||
|
||||
@ -1333,7 +1333,7 @@ static void encode_frame_internal(VP8_COMP *cpi) {
|
||||
totalrate = 0;
|
||||
|
||||
// Functions setup for all frame types so we can use MC in AltRef
|
||||
vp8_setup_interp_filters(xd, cm->mcomp_filter_type, cm);
|
||||
vp9_setup_interp_filters(xd, cm->mcomp_filter_type, cm);
|
||||
|
||||
// Reset frame count of inter 0,0 motion vector usage.
|
||||
cpi->inter_zz_count = 0;
|
||||
@ -1681,7 +1681,7 @@ void vp9_build_block_offsets(MACROBLOCK *x) {
|
||||
int block = 0;
|
||||
int br, bc;
|
||||
|
||||
vp8_build_block_doffsets(&x->e_mbd);
|
||||
vp9_build_block_doffsets(&x->e_mbd);
|
||||
|
||||
// y blocks
|
||||
x->thismb_ptr = &x->thismb[0];
|
||||
@ -1879,8 +1879,8 @@ void vp9_encode_intra_super_block(VP8_COMP *cpi,
|
||||
vp9_update_zbin_extra(cpi, x);
|
||||
}
|
||||
|
||||
vp8_build_intra_predictors_sby_s(&x->e_mbd);
|
||||
vp8_build_intra_predictors_sbuv_s(&x->e_mbd);
|
||||
vp9_build_intra_predictors_sby_s(&x->e_mbd);
|
||||
vp9_build_intra_predictors_sbuv_s(&x->e_mbd);
|
||||
|
||||
assert(x->e_mbd.mode_info_context->mbmi.txfm_size == TX_8X8);
|
||||
for (n = 0; n < 4; n++) {
|
||||
@ -1907,9 +1907,9 @@ void vp9_encode_intra_super_block(VP8_COMP *cpi,
|
||||
vp9_optimize_mby_8x8(x, rtcd);
|
||||
vp9_optimize_mbuv_8x8(x, rtcd);
|
||||
}
|
||||
vp8_inverse_transform_mb_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
|
||||
vp8_recon_mby_s_c(&x->e_mbd, dst + x_idx * 16 + y_idx * 16 * dst_y_stride);
|
||||
vp8_recon_mbuv_s_c(&x->e_mbd,
|
||||
vp9_inverse_transform_mb_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
|
||||
vp9_recon_mby_s_c(&x->e_mbd, dst + x_idx * 16 + y_idx * 16 * dst_y_stride);
|
||||
vp9_recon_mbuv_s_c(&x->e_mbd,
|
||||
udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
|
||||
vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride);
|
||||
|
||||
@ -2005,7 +2005,7 @@ void vp9_encode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x,
|
||||
assert(!xd->mode_info_context->mbmi.encoded_as_sb);
|
||||
#endif
|
||||
|
||||
vp8_setup_interp_filters(xd, mbmi->interp_filter, cm);
|
||||
vp9_setup_interp_filters(xd, mbmi->interp_filter, cm);
|
||||
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
|
||||
// Adjust the zbin based on this MB rate.
|
||||
adjust_act_zbin(cpi, x);
|
||||
@ -2094,7 +2094,7 @@ void vp9_encode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x,
|
||||
mbmi->mb_skip_coeff = 0;
|
||||
|
||||
} else {
|
||||
vp8_build_1st_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
|
||||
vp9_build_1st_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
|
||||
xd->dst.u_buffer, xd->dst.v_buffer,
|
||||
xd->dst.y_stride,
|
||||
xd->dst.uv_stride);
|
||||
@ -2238,8 +2238,8 @@ void vp9_encode_inter_superblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
|
||||
vp9_set_pred_flag(xd, PRED_REF, ref_pred_flag);
|
||||
|
||||
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
|
||||
vp8_build_intra_predictors_sby_s(&x->e_mbd);
|
||||
vp8_build_intra_predictors_sbuv_s(&x->e_mbd);
|
||||
vp9_build_intra_predictors_sby_s(&x->e_mbd);
|
||||
vp9_build_intra_predictors_sbuv_s(&x->e_mbd);
|
||||
} else {
|
||||
int ref_fb_idx;
|
||||
|
||||
@ -2272,7 +2272,7 @@ void vp9_encode_inter_superblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
|
||||
recon_uvoffset;
|
||||
}
|
||||
|
||||
vp8_build_inter32x32_predictors_sb(xd, xd->dst.y_buffer,
|
||||
vp9_build_inter32x32_predictors_sb(xd, xd->dst.y_buffer,
|
||||
xd->dst.u_buffer, xd->dst.v_buffer,
|
||||
xd->dst.y_stride, xd->dst.uv_stride);
|
||||
}
|
||||
@ -2299,10 +2299,10 @@ void vp9_encode_inter_superblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
|
||||
vp9_optimize_mby_8x8(x, rtcd);
|
||||
vp9_optimize_mbuv_8x8(x, rtcd);
|
||||
}
|
||||
vp8_inverse_transform_mb_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
|
||||
vp8_recon_mby_s_c( &x->e_mbd,
|
||||
vp9_inverse_transform_mb_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
|
||||
vp9_recon_mby_s_c( &x->e_mbd,
|
||||
dst + x_idx * 16 + y_idx * 16 * dst_y_stride);
|
||||
vp8_recon_mbuv_s_c(&x->e_mbd,
|
||||
vp9_recon_mbuv_s_c(&x->e_mbd,
|
||||
udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
|
||||
vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride);
|
||||
|
||||
|
@ -61,7 +61,7 @@ void vp9_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
|
||||
#if CONFIG_COMP_INTRA_PRED
|
||||
if (b->bmi.as_mode.second == (B_PREDICTION_MODE)(B_DC_PRED - 1)) {
|
||||
#endif
|
||||
vp8_intra4x4_predict(b, b->bmi.as_mode.first, b->predictor);
|
||||
vp9_intra4x4_predict(b, b->bmi.as_mode.first, b->predictor);
|
||||
#if CONFIG_COMP_INTRA_PRED
|
||||
} else {
|
||||
vp8_comp_intra4x4_predict(b, b->bmi.as_mode.first, b->bmi.as_mode.second,
|
||||
@ -75,14 +75,14 @@ void vp9_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
|
||||
if (tx_type != DCT_DCT) {
|
||||
vp9_fht_c(be->src_diff, 32, be->coeff, tx_type, 4);
|
||||
vp9_ht_quantize_b_4x4(be, b, tx_type);
|
||||
vp8_ihtllm_c(b->dqcoeff, b->diff, 32, tx_type, 4);
|
||||
vp9_ihtllm_c(b->dqcoeff, b->diff, 32, tx_type, 4);
|
||||
} else {
|
||||
x->vp9_short_fdct4x4(be->src_diff, be->coeff, 32);
|
||||
x->quantize_b_4x4(be, b) ;
|
||||
vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 32) ;
|
||||
vp9_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 32) ;
|
||||
}
|
||||
|
||||
vp8_recon_b(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
|
||||
vp9_recon_b(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
|
||||
}
|
||||
|
||||
void vp9_encode_intra4x4mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *mb) {
|
||||
@ -102,7 +102,7 @@ void vp9_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
|
||||
#if CONFIG_COMP_INTRA_PRED
|
||||
if (xd->mode_info_context->mbmi.second_mode == (MB_PREDICTION_MODE)(DC_PRED - 1))
|
||||
#endif
|
||||
vp8_build_intra_predictors_mby(xd);
|
||||
vp9_build_intra_predictors_mby(xd);
|
||||
#if CONFIG_COMP_INTRA_PRED
|
||||
else
|
||||
vp8_build_comp_intra_predictors_mby(xd);
|
||||
@ -118,29 +118,29 @@ void vp9_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
|
||||
vp9_quantize_mby_16x16(x);
|
||||
if (x->optimize)
|
||||
vp9_optimize_mby_16x16(x, rtcd);
|
||||
vp8_ihtllm_c(bd->dqcoeff, bd->diff, 32, tx_type, 16);
|
||||
vp9_ihtllm_c(bd->dqcoeff, bd->diff, 32, tx_type, 16);
|
||||
} else {
|
||||
vp9_transform_mby_16x16(x);
|
||||
vp9_quantize_mby_16x16(x);
|
||||
if (x->optimize)
|
||||
vp9_optimize_mby_16x16(x, rtcd);
|
||||
vp8_inverse_transform_mby_16x16(IF_RTCD(&rtcd->common->idct), xd);
|
||||
vp9_inverse_transform_mby_16x16(IF_RTCD(&rtcd->common->idct), xd);
|
||||
}
|
||||
} else if (tx_size == TX_8X8) {
|
||||
vp9_transform_mby_8x8(x);
|
||||
vp9_quantize_mby_8x8(x);
|
||||
if (x->optimize)
|
||||
vp9_optimize_mby_8x8(x, rtcd);
|
||||
vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), xd);
|
||||
vp9_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), xd);
|
||||
} else {
|
||||
vp9_transform_mby_4x4(x);
|
||||
vp9_quantize_mby_4x4(x);
|
||||
if (x->optimize)
|
||||
vp9_optimize_mby_4x4(x, rtcd);
|
||||
vp8_inverse_transform_mby_4x4(IF_RTCD(&rtcd->common->idct), xd);
|
||||
vp9_inverse_transform_mby_4x4(IF_RTCD(&rtcd->common->idct), xd);
|
||||
}
|
||||
|
||||
vp8_recon_mby(xd);
|
||||
vp9_recon_mby(xd);
|
||||
}
|
||||
|
||||
void vp9_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
|
||||
@ -150,7 +150,7 @@ void vp9_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
|
||||
#if CONFIG_COMP_INTRA_PRED
|
||||
if (xd->mode_info_context->mbmi.second_uv_mode == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
|
||||
#endif
|
||||
vp8_build_intra_predictors_mbuv(xd);
|
||||
vp9_build_intra_predictors_mbuv(xd);
|
||||
#if CONFIG_COMP_INTRA_PRED
|
||||
} else {
|
||||
vp8_build_comp_intra_predictors_mbuv(xd);
|
||||
@ -165,16 +165,16 @@ void vp9_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
|
||||
vp9_quantize_mbuv_4x4(x);
|
||||
if (x->optimize)
|
||||
vp9_optimize_mbuv_4x4(x, rtcd);
|
||||
vp8_inverse_transform_mbuv_4x4(IF_RTCD(&rtcd->common->idct), xd);
|
||||
vp9_inverse_transform_mbuv_4x4(IF_RTCD(&rtcd->common->idct), xd);
|
||||
} else /* 16x16 or 8x8 */ {
|
||||
vp9_transform_mbuv_8x8(x);
|
||||
vp9_quantize_mbuv_8x8(x);
|
||||
if (x->optimize)
|
||||
vp9_optimize_mbuv_8x8(x, rtcd);
|
||||
vp8_inverse_transform_mbuv_8x8(IF_RTCD(&rtcd->common->idct), xd);
|
||||
vp9_inverse_transform_mbuv_8x8(IF_RTCD(&rtcd->common->idct), xd);
|
||||
}
|
||||
|
||||
vp8_recon_intra_mbuv(xd);
|
||||
vp9_recon_intra_mbuv(xd);
|
||||
}
|
||||
|
||||
void vp9_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
|
||||
@ -189,7 +189,7 @@ void vp9_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
|
||||
#if CONFIG_COMP_INTRA_PRED
|
||||
if (b->bmi.as_mode.second == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
|
||||
#endif
|
||||
vp8_intra8x8_predict(b, b->bmi.as_mode.first, b->predictor);
|
||||
vp9_intra8x8_predict(b, b->bmi.as_mode.first, b->predictor);
|
||||
#if CONFIG_COMP_INTRA_PRED
|
||||
} else {
|
||||
vp8_comp_intra8x8_predict(b, b->bmi.as_mode.first, b->bmi.as_mode.second,
|
||||
@ -208,7 +208,7 @@ void vp9_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
|
||||
vp9_fht_c(be->src_diff, 32, (x->block + idx)->coeff,
|
||||
tx_type, 8);
|
||||
x->quantize_b_8x8(x->block + idx, xd->block + idx);
|
||||
vp8_ihtllm_c(xd->block[idx].dqcoeff, xd->block[ib].diff, 32,
|
||||
vp9_ihtllm_c(xd->block[idx].dqcoeff, xd->block[ib].diff, 32,
|
||||
tx_type, 8);
|
||||
} else {
|
||||
x->vp9_short_fdct8x8(be->src_diff, (x->block + idx)->coeff, 32);
|
||||
@ -222,14 +222,14 @@ void vp9_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
|
||||
vp9_subtract_b(be, b, 16);
|
||||
x->vp9_short_fdct4x4(be->src_diff, be->coeff, 32);
|
||||
x->quantize_b_4x4(be, b);
|
||||
vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 32);
|
||||
vp9_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 32);
|
||||
}
|
||||
}
|
||||
|
||||
// reconstruct submacroblock
|
||||
for (i = 0; i < 4; i++) {
|
||||
b = &xd->block[ib + iblock[i]];
|
||||
vp8_recon_b_c(b->predictor, b->diff, *(b->base_dst) + b->dst,
|
||||
vp9_recon_b_c(b->predictor, b->diff, *(b->base_dst) + b->dst,
|
||||
b->dst_stride);
|
||||
}
|
||||
}
|
||||
@ -252,7 +252,7 @@ void vp9_encode_intra_uv4x4(const VP8_ENCODER_RTCD *rtcd,
|
||||
#if CONFIG_COMP_INTRA_PRED
|
||||
if (second == -1) {
|
||||
#endif
|
||||
vp8_intra_uv4x4_predict(b, mode, b->predictor);
|
||||
vp9_intra_uv4x4_predict(b, mode, b->predictor);
|
||||
#if CONFIG_COMP_INTRA_PRED
|
||||
} else {
|
||||
vp8_comp_intra_uv4x4_predict(b, mode, second, b->predictor);
|
||||
@ -263,9 +263,9 @@ void vp9_encode_intra_uv4x4(const VP8_ENCODER_RTCD *rtcd,
|
||||
|
||||
x->vp9_short_fdct4x4(be->src_diff, be->coeff, 16);
|
||||
x->quantize_b_4x4(be, b);
|
||||
vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 16);
|
||||
vp9_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 16);
|
||||
|
||||
vp8_recon_uv_b_c(b->predictor,b->diff, *(b->base_dst) + b->dst, b->dst_stride);
|
||||
vp9_recon_uv_b_c(b->predictor,b->diff, *(b->base_dst) + b->dst, b->dst_stride);
|
||||
}
|
||||
|
||||
void vp9_encode_intra8x8mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
|
||||
|
@ -892,7 +892,7 @@ void vp9_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
|
||||
MACROBLOCKD *xd = &x->e_mbd;
|
||||
TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size;
|
||||
|
||||
vp8_build_inter_predictors_mb(xd);
|
||||
vp9_build_inter_predictors_mb(xd);
|
||||
vp8_subtract_mb(rtcd, x);
|
||||
|
||||
if (tx_size == TX_16X16) {
|
||||
@ -900,7 +900,7 @@ void vp9_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
|
||||
vp9_quantize_mb_16x16(x);
|
||||
if (x->optimize)
|
||||
optimize_mb_16x16(x, rtcd);
|
||||
vp8_inverse_transform_mb_16x16(IF_RTCD(&rtcd->common->idct), xd);
|
||||
vp9_inverse_transform_mb_16x16(IF_RTCD(&rtcd->common->idct), xd);
|
||||
} else if (tx_size == TX_8X8) {
|
||||
if (xd->mode_info_context->mbmi.mode == SPLITMV) {
|
||||
assert(xd->mode_info_context->mbmi.partitioning != PARTITIONING_4X4);
|
||||
@ -912,24 +912,24 @@ void vp9_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
|
||||
vp9_optimize_mby_8x8(x, rtcd);
|
||||
vp9_optimize_mbuv_4x4(x, rtcd);
|
||||
}
|
||||
vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), xd);
|
||||
vp8_inverse_transform_mbuv_4x4(IF_RTCD(&rtcd->common->idct), xd);
|
||||
vp9_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), xd);
|
||||
vp9_inverse_transform_mbuv_4x4(IF_RTCD(&rtcd->common->idct), xd);
|
||||
} else {
|
||||
vp9_transform_mb_8x8(x);
|
||||
vp9_quantize_mb_8x8(x);
|
||||
if (x->optimize)
|
||||
optimize_mb_8x8(x, rtcd);
|
||||
vp8_inverse_transform_mb_8x8(IF_RTCD(&rtcd->common->idct), xd);
|
||||
vp9_inverse_transform_mb_8x8(IF_RTCD(&rtcd->common->idct), xd);
|
||||
}
|
||||
} else {
|
||||
transform_mb_4x4(x);
|
||||
vp9_quantize_mb_4x4(x);
|
||||
if (x->optimize)
|
||||
optimize_mb_4x4(x, rtcd);
|
||||
vp8_inverse_transform_mb_4x4(IF_RTCD(&rtcd->common->idct), xd);
|
||||
vp9_inverse_transform_mb_4x4(IF_RTCD(&rtcd->common->idct), xd);
|
||||
}
|
||||
|
||||
vp8_recon_mb(xd);
|
||||
vp9_recon_mb(xd);
|
||||
}
|
||||
|
||||
/* this function is used by first pass only */
|
||||
@ -942,13 +942,13 @@ void vp9_encode_inter16x16y(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
|
||||
xd->mode_info_context->mbmi.pred_filter_enabled = 0;
|
||||
#endif
|
||||
|
||||
vp8_build_1st_inter16x16_predictors_mby(xd, xd->predictor, 16, 0);
|
||||
vp9_build_1st_inter16x16_predictors_mby(xd, xd->predictor, 16, 0);
|
||||
|
||||
vp9_subtract_mby(x->src_diff, *(b->base_src), xd->predictor, b->src_stride);
|
||||
|
||||
vp9_transform_mby_4x4(x);
|
||||
vp9_quantize_mby_4x4(x);
|
||||
vp8_inverse_transform_mby_4x4(IF_RTCD(&rtcd->common->idct), xd);
|
||||
vp9_inverse_transform_mby_4x4(IF_RTCD(&rtcd->common->idct), xd);
|
||||
|
||||
vp8_recon_mby(xd);
|
||||
vp9_recon_mby(xd);
|
||||
}
|
||||
|
@ -36,7 +36,7 @@ static void encode_nmv_component(vp8_writer* const bc,
|
||||
vp8_write(bc, s, mvcomp->sign);
|
||||
z = (s ? -v : v) - 1; /* magnitude - 1 */
|
||||
|
||||
c = vp8_get_mv_class(z, &o);
|
||||
c = vp9_get_mv_class(z, &o);
|
||||
|
||||
vp8_write_token(bc, vp8_mv_class_tree, mvcomp->classes,
|
||||
vp8_mv_class_encodings + c);
|
||||
@ -64,7 +64,7 @@ static void encode_nmv_component_fp(vp8_writer *bc,
|
||||
s = v < 0;
|
||||
z = (s ? -v : v) - 1; /* magnitude - 1 */
|
||||
|
||||
c = vp8_get_mv_class(z, &o);
|
||||
c = vp9_get_mv_class(z, &o);
|
||||
|
||||
d = (o >> 3); /* int mv data */
|
||||
f = (o >> 1) & 3; /* fractional pel mv data */
|
||||
@ -120,7 +120,7 @@ static void build_nmv_component_cost_table(int *mvcost,
|
||||
for (v = 1; v <= MV_MAX; ++v) {
|
||||
int z, c, o, d, e, f, cost = 0;
|
||||
z = v - 1;
|
||||
c = vp8_get_mv_class(z, &o);
|
||||
c = vp9_get_mv_class(z, &o);
|
||||
cost += class_cost[c];
|
||||
d = (o >> 3); /* int mv data */
|
||||
f = (o >> 1) & 3; /* fractional pel mv data */
|
||||
@ -227,7 +227,7 @@ void print_nmvstats() {
|
||||
unsigned int branch_ct_class0_hp[2][2];
|
||||
unsigned int branch_ct_hp[2][2];
|
||||
int i, j, k;
|
||||
vp8_counts_to_nmv_context(&tnmvcounts, &prob, 1,
|
||||
vp9_counts_to_nmv_context(&tnmvcounts, &prob, 1,
|
||||
branch_ct_joint, branch_ct_sign, branch_ct_classes,
|
||||
branch_ct_class0, branch_ct_bits,
|
||||
branch_ct_class0_fp, branch_ct_fp,
|
||||
@ -374,7 +374,7 @@ void vp9_write_nmvprobs(VP8_COMP* const cpi, int usehp, vp8_writer* const bc) {
|
||||
if (!cpi->dummy_packing)
|
||||
add_nmvcount(&tnmvcounts, &cpi->NMVcount);
|
||||
#endif
|
||||
vp8_counts_to_nmv_context(&cpi->NMVcount, &prob, usehp,
|
||||
vp9_counts_to_nmv_context(&cpi->NMVcount, &prob, usehp,
|
||||
branch_ct_joint, branch_ct_sign, branch_ct_classes,
|
||||
branch_ct_class0, branch_ct_bits,
|
||||
branch_ct_class0_fp, branch_ct_fp,
|
||||
@ -510,7 +510,7 @@ void vp9_write_nmvprobs(VP8_COMP* const cpi, int usehp, vp8_writer* const bc) {
|
||||
|
||||
void vp9_encode_nmv(vp8_writer* const bc, const MV* const mv,
|
||||
const MV* const ref, const nmv_context* const mvctx) {
|
||||
MV_JOINT_TYPE j = vp8_get_mv_joint(*mv);
|
||||
MV_JOINT_TYPE j = vp9_get_mv_joint(*mv);
|
||||
vp8_write_token(bc, vp8_mv_joint_tree, mvctx->joints,
|
||||
vp8_mv_joint_encodings + j);
|
||||
if (j == MV_JOINT_HZVNZ || j == MV_JOINT_HNZVNZ) {
|
||||
@ -524,8 +524,8 @@ void vp9_encode_nmv(vp8_writer* const bc, const MV* const mv,
|
||||
void vp9_encode_nmv_fp(vp8_writer* const bc, const MV* const mv,
|
||||
const MV* const ref, const nmv_context* const mvctx,
|
||||
int usehp) {
|
||||
MV_JOINT_TYPE j = vp8_get_mv_joint(*mv);
|
||||
usehp = usehp && vp8_use_nmv_hp(ref);
|
||||
MV_JOINT_TYPE j = vp9_get_mv_joint(*mv);
|
||||
usehp = usehp && vp9_use_nmv_hp(ref);
|
||||
if (j == MV_JOINT_HZVNZ || j == MV_JOINT_HNZVNZ) {
|
||||
encode_nmv_component_fp(bc, mv->row, ref->row, &mvctx->comps[0], usehp);
|
||||
}
|
||||
|
@ -484,12 +484,12 @@ void vp9_first_pass(VP8_COMP *cpi) {
|
||||
|
||||
vp9_build_block_offsets(x);
|
||||
|
||||
vp8_setup_block_dptrs(&x->e_mbd);
|
||||
vp9_setup_block_dptrs(&x->e_mbd);
|
||||
|
||||
vp9_setup_block_ptrs(x);
|
||||
|
||||
// set up frame new frame for intra coded blocks
|
||||
vp8_setup_intra_recon(new_yv12);
|
||||
vp9_setup_intra_recon(new_yv12);
|
||||
vp9_frame_init_quantizer(cpi);
|
||||
|
||||
// Initialise the MV cost table to the defaults
|
||||
@ -497,7 +497,7 @@ void vp9_first_pass(VP8_COMP *cpi) {
|
||||
// if ( 0 )
|
||||
{
|
||||
int flag[2] = {1, 1};
|
||||
vp8_init_mv_probs(cm);
|
||||
vp9_init_mv_probs(cm);
|
||||
vp9_initialize_rd_consts(cpi, cm->base_qindex + cm->y1dc_delta_q);
|
||||
}
|
||||
|
||||
@ -529,7 +529,7 @@ void vp9_first_pass(VP8_COMP *cpi) {
|
||||
xd->left_available = (mb_col != 0);
|
||||
|
||||
// Copy current mb to a buffer
|
||||
vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
|
||||
vp9_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
|
||||
|
||||
// do intra 16x16 prediction
|
||||
this_error = vp9_encode_intra(cpi, x, use_dc_pred);
|
||||
@ -692,7 +692,7 @@ void vp9_first_pass(VP8_COMP *cpi) {
|
||||
x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
|
||||
|
||||
// extend the recon for intra prediction
|
||||
vp8_extend_mb_row(new_yv12, xd->dst.y_buffer + 16, xd->dst.u_buffer + 8, xd->dst.v_buffer + 8);
|
||||
vp9_extend_mb_row(new_yv12, xd->dst.y_buffer + 16, xd->dst.u_buffer + 8, xd->dst.v_buffer + 8);
|
||||
vp8_clear_system_state(); // __asm emms;
|
||||
}
|
||||
|
||||
@ -769,7 +769,7 @@ void vp9_first_pass(VP8_COMP *cpi) {
|
||||
cpi->twopass.sr_update_lag++;
|
||||
|
||||
// swap frame pointers so last frame refers to the frame we just compressed
|
||||
vp8_swap_yv12_buffer(lst_yv12, new_yv12);
|
||||
vp9_swap_yv12_buffer(lst_yv12, new_yv12);
|
||||
vp8_yv12_extend_frame_borders(lst_yv12);
|
||||
|
||||
// Special case for the first frame. Copy into the GF buffer as a second reference.
|
||||
|
@ -135,7 +135,7 @@ vp9_lookahead_push(struct lookahead_ctx *ctx,
|
||||
}
|
||||
|
||||
// Only copy this active region.
|
||||
vp8_copy_and_extend_frame_with_rect(src, &buf->img,
|
||||
vp9_copy_and_extend_frame_with_rect(src, &buf->img,
|
||||
row << 4,
|
||||
col << 4, 16,
|
||||
(active_end - col) << 4);
|
||||
@ -147,7 +147,7 @@ vp9_lookahead_push(struct lookahead_ctx *ctx,
|
||||
active_map += mb_cols;
|
||||
}
|
||||
} else {
|
||||
vp8_copy_and_extend_frame(src, &buf->img);
|
||||
vp9_copy_and_extend_frame(src, &buf->img);
|
||||
}
|
||||
buf->ts_start = ts_start;
|
||||
buf->ts_end = ts_end;
|
||||
|
@ -82,7 +82,7 @@ static unsigned int do_16x16_motion_iteration
|
||||
#endif
|
||||
|
||||
vp9_set_mbmode_and_mvs(x, NEWMV, dst_mv);
|
||||
vp8_build_1st_inter16x16_predictors_mby(xd, xd->predictor, 16, 0);
|
||||
vp9_build_1st_inter16x16_predictors_mby(xd, xd->predictor, 16, 0);
|
||||
best_err = vp9_sad16x16(xd->dst.y_buffer, xd->dst.y_stride,
|
||||
xd->predictor, 16, INT_MAX);
|
||||
|
||||
@ -213,7 +213,7 @@ static int find_best_16x16_intra
|
||||
unsigned int err;
|
||||
|
||||
xd->mode_info_context->mbmi.mode = mode;
|
||||
vp8_build_intra_predictors_mby(xd);
|
||||
vp9_build_intra_predictors_mby(xd);
|
||||
err = vp9_sad16x16(xd->predictor, 16, buf->y_buffer + mb_y_offset,
|
||||
buf->y_stride, best_err);
|
||||
// find best
|
||||
|
@ -47,7 +47,7 @@ int vp9_mv_bit_cost(int_mv *mv, int_mv *ref, DEC_MVCOSTS,
|
||||
MV v;
|
||||
v.row = (mv->as_mv.row - ref->as_mv.row);
|
||||
v.col = (mv->as_mv.col - ref->as_mv.col);
|
||||
return ((mvjcost[vp8_get_mv_joint(v)] +
|
||||
return ((mvjcost[vp9_get_mv_joint(v)] +
|
||||
mvcost[0][v.row] + mvcost[1][v.col]) *
|
||||
Weight) >> 7;
|
||||
}
|
||||
@ -58,7 +58,7 @@ static int mv_err_cost(int_mv *mv, int_mv *ref, DEC_MVCOSTS,
|
||||
MV v;
|
||||
v.row = (mv->as_mv.row - ref->as_mv.row);
|
||||
v.col = (mv->as_mv.col - ref->as_mv.col);
|
||||
return ((mvjcost[vp8_get_mv_joint(v)] +
|
||||
return ((mvjcost[vp9_get_mv_joint(v)] +
|
||||
mvcost[0][v.row] + mvcost[1][v.col]) *
|
||||
error_per_bit + 128) >> 8;
|
||||
}
|
||||
@ -72,7 +72,7 @@ static int mvsad_err_cost(int_mv *mv, int_mv *ref, DEC_MVSADCOSTS,
|
||||
MV v;
|
||||
v.row = (mv->as_mv.row - ref->as_mv.row);
|
||||
v.col = (mv->as_mv.col - ref->as_mv.col);
|
||||
return ((mvjsadcost[vp8_get_mv_joint(v)] +
|
||||
return ((mvjsadcost[vp9_get_mv_joint(v)] +
|
||||
mvsadcost[0][v.row] + mvsadcost[1][v.col]) *
|
||||
error_per_bit + 128) >> 8;
|
||||
}
|
||||
@ -388,7 +388,7 @@ int vp9_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||
}
|
||||
|
||||
if (xd->allow_high_precision_mv) {
|
||||
usehp = vp8_use_nmv_hp(&ref_mv->as_mv);
|
||||
usehp = vp9_use_nmv_hp(&ref_mv->as_mv);
|
||||
} else {
|
||||
usehp = 0;
|
||||
}
|
||||
@ -754,7 +754,7 @@ int vp9_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||
}
|
||||
|
||||
if (x->e_mbd.allow_high_precision_mv) {
|
||||
usehp = vp8_use_nmv_hp(&ref_mv->as_mv);
|
||||
usehp = vp9_use_nmv_hp(&ref_mv->as_mv);
|
||||
} else {
|
||||
usehp = 0;
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ extern void vp9_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi);
|
||||
|
||||
extern void vp9_cmachine_specific_config(VP8_COMP *cpi);
|
||||
|
||||
extern void vp8_deblock_frame(YV12_BUFFER_CONFIG *source,
|
||||
extern void vp9_deblock_frame(YV12_BUFFER_CONFIG *source,
|
||||
YV12_BUFFER_CONFIG *post,
|
||||
int filt_lvl, int low_var_thresh, int flag);
|
||||
|
||||
@ -325,9 +325,9 @@ void vp9_initialize_enc() {
|
||||
|
||||
if (!init_done) {
|
||||
vp8_scale_machine_specific_config();
|
||||
vp8_initialize_common();
|
||||
vp9_initialize_common();
|
||||
vp9_tokenize_initialize();
|
||||
vp8_init_quant_tables();
|
||||
vp9_init_quant_tables();
|
||||
vp9_init_me_luts();
|
||||
init_minq_luts();
|
||||
init_base_skip_probs();
|
||||
@ -388,7 +388,7 @@ static void dealloc_compressor_data(VP8_COMP *cpi) {
|
||||
vpx_free(cpi->active_map);
|
||||
cpi->active_map = 0;
|
||||
|
||||
vp8_de_alloc_frame_buffers(&cpi->common);
|
||||
vp9_de_alloc_frame_buffers(&cpi->common);
|
||||
|
||||
vp8_yv12_de_alloc_frame_buffer(&cpi->last_frame_uf);
|
||||
vp8_yv12_de_alloc_frame_buffer(&cpi->scaled_source);
|
||||
@ -1281,7 +1281,7 @@ void vp9_alloc_compressor_data(VP8_COMP *cpi) {
|
||||
int width = cm->Width;
|
||||
int height = cm->Height;
|
||||
|
||||
if (vp8_alloc_frame_buffers(cm, width, height))
|
||||
if (vp9_alloc_frame_buffers(cm, width, height))
|
||||
vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
|
||||
"Failed to allocate frame buffers");
|
||||
|
||||
@ -1443,7 +1443,7 @@ static void init_config(VP8_PTR ptr, VP8_CONFIG *oxcf) {
|
||||
cpi->goldfreq = 7;
|
||||
|
||||
cm->version = oxcf->Version;
|
||||
vp8_setup_version(cm);
|
||||
vp9_setup_version(cm);
|
||||
|
||||
// change includes all joint functionality
|
||||
vp9_change_config(ptr, oxcf);
|
||||
@ -1492,7 +1492,7 @@ void vp9_change_config(VP8_PTR ptr, VP8_CONFIG *oxcf) {
|
||||
|
||||
if (cm->version != oxcf->Version) {
|
||||
cm->version = oxcf->Version;
|
||||
vp8_setup_version(cm);
|
||||
vp9_setup_version(cm);
|
||||
}
|
||||
|
||||
cpi->oxcf = *oxcf;
|
||||
@ -1530,11 +1530,11 @@ void vp9_change_config(VP8_PTR ptr, VP8_CONFIG *oxcf) {
|
||||
#if CONFIG_LOSSLESS
|
||||
cpi->oxcf.lossless = oxcf->lossless;
|
||||
if (cpi->oxcf.lossless) {
|
||||
cpi->common.rtcd.idct.idct1 = vp8_short_inv_walsh4x4_1_x8_c;
|
||||
cpi->common.rtcd.idct.idct16 = vp8_short_inv_walsh4x4_x8_c;
|
||||
cpi->common.rtcd.idct.idct1_scalar_add = vp8_dc_only_inv_walsh_add_c;
|
||||
cpi->common.rtcd.idct.iwalsh1 = vp8_short_inv_walsh4x4_1_c;
|
||||
cpi->common.rtcd.idct.iwalsh16 = vp8_short_inv_walsh4x4_lossless_c;
|
||||
cpi->common.rtcd.idct.idct1 = vp9_short_inv_walsh4x4_1_x8_c;
|
||||
cpi->common.rtcd.idct.idct16 = vp9_short_inv_walsh4x4_x8_c;
|
||||
cpi->common.rtcd.idct.idct1_scalar_add = vp9_dc_only_inv_walsh_add_c;
|
||||
cpi->common.rtcd.idct.iwalsh1 = vp9_short_inv_walsh4x4_1_c;
|
||||
cpi->common.rtcd.idct.iwalsh16 = vp9_short_inv_walsh4x4_lossless_c;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -1757,7 +1757,7 @@ VP8_PTR vp9_create_compressor(VP8_CONFIG *oxcf) {
|
||||
|
||||
CHECK_MEM_ERROR(cpi->mb.ss, vpx_calloc(sizeof(search_site), (MAX_MVSEARCH_STEPS * 8) + 1));
|
||||
|
||||
vp8_create_common(&cpi->common);
|
||||
vp9_create_common(&cpi->common);
|
||||
vp9_cmachine_specific_config(cpi);
|
||||
|
||||
init_config((VP8_PTR)cpi, oxcf);
|
||||
@ -2020,7 +2020,7 @@ VP8_PTR vp9_create_compressor(VP8_CONFIG *oxcf) {
|
||||
*/
|
||||
vp9_init_quantizer(cpi);
|
||||
|
||||
vp8_loop_filter_init(cm);
|
||||
vp9_loop_filter_init(cm);
|
||||
|
||||
cpi->common.error.setjmp = 0;
|
||||
|
||||
@ -2251,7 +2251,7 @@ void vp9_remove_compressor(VP8_PTR *ptr) {
|
||||
vpx_free(cpi->mbgraph_stats[i].mb_stats);
|
||||
}
|
||||
|
||||
vp8_remove_common(&cpi->common);
|
||||
vp9_remove_common(&cpi->common);
|
||||
vpx_free(cpi);
|
||||
*ptr = 0;
|
||||
|
||||
@ -2818,7 +2818,7 @@ static void loopfilter_frame(VP8_COMP *cpi, VP8_COMMON *cm) {
|
||||
|
||||
if (cm->filter_level > 0) {
|
||||
vp9_set_alt_lf_level(cpi, cm->filter_level);
|
||||
vp8_loop_filter_frame(cm, &cpi->mb.e_mbd);
|
||||
vp9_loop_filter_frame(cm, &cpi->mb.e_mbd);
|
||||
}
|
||||
|
||||
vp8_yv12_extend_frame_borders_ptr(cm->frame_to_show);
|
||||
@ -3182,9 +3182,9 @@ static void encode_frame_to_data_rate
|
||||
|
||||
|
||||
if (cm->frame_type == KEY_FRAME) {
|
||||
vp8_de_noise(cpi->Source, cpi->Source, l, 1, 0, RTCD(postproc));
|
||||
vp9_de_noise(cpi->Source, cpi->Source, l, 1, 0, RTCD(postproc));
|
||||
} else {
|
||||
vp8_de_noise(cpi->Source, cpi->Source, l, 1, 0, RTCD(postproc));
|
||||
vp9_de_noise(cpi->Source, cpi->Source, l, 1, 0, RTCD(postproc));
|
||||
|
||||
src = cpi->Source->y_buffer;
|
||||
|
||||
@ -3636,7 +3636,7 @@ static void encode_frame_to_data_rate
|
||||
vp8_copy(cpi->common.fc.coef_counts_16x16, cpi->coef_counts_16x16);
|
||||
vp8_copy(cpi->common.fc.hybrid_coef_counts_16x16,
|
||||
cpi->hybrid_coef_counts_16x16);
|
||||
vp8_adapt_coef_probs(&cpi->common);
|
||||
vp9_adapt_coef_probs(&cpi->common);
|
||||
if (cpi->common.frame_type != KEY_FRAME) {
|
||||
vp8_copy(cpi->common.fc.ymode_counts, cpi->ymode_count);
|
||||
vp8_copy(cpi->common.fc.uv_mode_counts, cpi->y_uv_mode_count);
|
||||
@ -3644,11 +3644,11 @@ static void encode_frame_to_data_rate
|
||||
vp8_copy(cpi->common.fc.i8x8_mode_counts, cpi->i8x8_mode_count);
|
||||
vp8_copy(cpi->common.fc.sub_mv_ref_counts, cpi->sub_mv_ref_count);
|
||||
vp8_copy(cpi->common.fc.mbsplit_counts, cpi->mbsplit_count);
|
||||
vp8_adapt_mode_probs(&cpi->common);
|
||||
vp9_adapt_mode_probs(&cpi->common);
|
||||
|
||||
cpi->common.fc.NMVcount = cpi->NMVcount;
|
||||
vp8_adapt_nmv_probs(&cpi->common, cpi->mb.e_mbd.allow_high_precision_mv);
|
||||
vp8_update_mode_context(&cpi->common);
|
||||
vp9_adapt_nmv_probs(&cpi->common, cpi->mb.e_mbd.allow_high_precision_mv);
|
||||
vp9_update_mode_context(&cpi->common);
|
||||
}
|
||||
|
||||
/* Move storing frame_type out of the above loop since it is also
|
||||
@ -3777,7 +3777,7 @@ static void encode_frame_to_data_rate
|
||||
(cpi->oxcf.starting_buffer_level - cpi->bits_off_target),
|
||||
(int)cpi->total_actual_bits,
|
||||
vp9_convert_qindex_to_q(cm->base_qindex),
|
||||
(double)vp8_dc_quant(cm->base_qindex, 0) / 4.0,
|
||||
(double)vp9_dc_quant(cm->base_qindex, 0) / 4.0,
|
||||
vp9_convert_qindex_to_q(cpi->active_best_quality),
|
||||
vp9_convert_qindex_to_q(cpi->active_worst_quality),
|
||||
cpi->avg_q,
|
||||
@ -3807,7 +3807,7 @@ static void encode_frame_to_data_rate
|
||||
(cpi->oxcf.starting_buffer_level - cpi->bits_off_target),
|
||||
(int)cpi->total_actual_bits,
|
||||
vp9_convert_qindex_to_q(cm->base_qindex),
|
||||
(double)vp8_dc_quant(cm->base_qindex, 0) / 4.0,
|
||||
(double)vp9_dc_quant(cm->base_qindex, 0) / 4.0,
|
||||
vp9_convert_qindex_to_q(cpi->active_best_quality),
|
||||
vp9_convert_qindex_to_q(cpi->active_worst_quality),
|
||||
cpi->avg_q,
|
||||
@ -4278,7 +4278,7 @@ int vp9_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags,
|
||||
double frame_psnr2, frame_ssim2 = 0;
|
||||
double weight = 0;
|
||||
#if CONFIG_POSTPROC
|
||||
vp8_deblock(cm->frame_to_show, &cm->post_proc_buffer, cm->filter_level * 10 / 6, 1, 0, IF_RTCD(&cm->rtcd.postproc));
|
||||
vp9_deblock(cm->frame_to_show, &cm->post_proc_buffer, cm->filter_level * 10 / 6, 1, 0, IF_RTCD(&cm->rtcd.postproc));
|
||||
#endif
|
||||
vp8_clear_system_state();
|
||||
|
||||
@ -4357,7 +4357,7 @@ int vp9_get_preview_raw_frame(VP8_PTR comp, YV12_BUFFER_CONFIG *dest,
|
||||
else {
|
||||
int ret;
|
||||
#if CONFIG_POSTPROC
|
||||
ret = vp8_post_proc_frame(&cpi->common, dest, flags);
|
||||
ret = vp9_post_proc_frame(&cpi->common, dest, flags);
|
||||
#else
|
||||
|
||||
if (cpi->common.frame_to_show) {
|
||||
|
@ -157,7 +157,7 @@ void vp9_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
|
||||
cm->sharpness_level = cpi->oxcf.Sharpness;
|
||||
|
||||
if (cm->sharpness_level != cm->last_sharpness_level) {
|
||||
vp8_loop_filter_update_sharpness(&cm->lf_info, cm->sharpness_level);
|
||||
vp9_loop_filter_update_sharpness(&cm->lf_info, cm->sharpness_level);
|
||||
cm->last_sharpness_level = cm->sharpness_level;
|
||||
}
|
||||
|
||||
@ -171,7 +171,7 @@ void vp9_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
|
||||
best_filt_val = filt_val;
|
||||
|
||||
// Get the err using the previous frame's filter value.
|
||||
vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
|
||||
vp9_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
|
||||
|
||||
best_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3);
|
||||
|
||||
@ -183,7 +183,7 @@ void vp9_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
|
||||
// Search lower filter levels
|
||||
while (filt_val >= min_filter_level) {
|
||||
// Apply the loop filter
|
||||
vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
|
||||
vp9_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
|
||||
|
||||
// Get the err for filtered frame
|
||||
filt_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3);
|
||||
@ -212,7 +212,7 @@ void vp9_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
|
||||
|
||||
while (filt_val < max_filter_level) {
|
||||
// Apply the loop filter
|
||||
vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
|
||||
vp9_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
|
||||
|
||||
// Get the err for filtered frame
|
||||
filt_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3);
|
||||
@ -301,7 +301,7 @@ void vp9_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
|
||||
|
||||
// Get baseline error score
|
||||
vp9_set_alt_lf_level(cpi, filt_mid);
|
||||
vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_mid);
|
||||
vp9_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_mid);
|
||||
|
||||
best_err = vp9_calc_ss_err(sd, cm->frame_to_show);
|
||||
filt_best = filt_mid;
|
||||
@ -341,7 +341,7 @@ void vp9_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
|
||||
if ((filt_direction <= 0) && (filt_low != filt_mid)) {
|
||||
// Get Low filter error score
|
||||
vp9_set_alt_lf_level(cpi, filt_low);
|
||||
vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_low);
|
||||
vp9_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_low);
|
||||
|
||||
filt_err = vp9_calc_ss_err(sd, cm->frame_to_show);
|
||||
|
||||
@ -376,7 +376,7 @@ void vp9_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
|
||||
// Now look at filt_high
|
||||
if ((filt_direction >= 0) && (filt_high != filt_mid)) {
|
||||
vp9_set_alt_lf_level(cpi, filt_high);
|
||||
vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_high);
|
||||
vp9_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_high);
|
||||
|
||||
filt_err = vp9_calc_ss_err(sd, cm->frame_to_show);
|
||||
|
||||
|
@ -406,7 +406,7 @@ void vp9_init_quantizer(VP8_COMP *cpi) {
|
||||
|
||||
|
||||
for (Q = 0; Q < QINDEX_RANGE; Q++) {
|
||||
int qzbin_factor = (vp8_dc_quant(Q, 0) < 148) ? 84 : 80;
|
||||
int qzbin_factor = (vp9_dc_quant(Q, 0) < 148) ? 84 : 80;
|
||||
|
||||
#if CONFIG_LOSSLESS
|
||||
if (cpi->oxcf.lossless) {
|
||||
@ -418,7 +418,7 @@ void vp9_init_quantizer(VP8_COMP *cpi) {
|
||||
#endif
|
||||
|
||||
// dc values
|
||||
quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
|
||||
quant_val = vp9_dc_quant(Q, cpi->common.y1dc_delta_q);
|
||||
invert_quant(cpi->Y1quant[Q] + 0,
|
||||
cpi->Y1quant_shift[Q] + 0, quant_val);
|
||||
cpi->Y1zbin[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
|
||||
@ -432,7 +432,7 @@ void vp9_init_quantizer(VP8_COMP *cpi) {
|
||||
cpi->zrun_zbin_boost_y1_16x16[Q][0] = ((quant_val * zbin_boost_16x16[0]) + 64) >> 7;
|
||||
|
||||
|
||||
quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
|
||||
quant_val = vp9_dc2quant(Q, cpi->common.y2dc_delta_q);
|
||||
invert_quant(cpi->Y2quant[Q] + 0,
|
||||
cpi->Y2quant_shift[Q] + 0, quant_val);
|
||||
cpi->Y2zbin[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
|
||||
@ -445,7 +445,7 @@ void vp9_init_quantizer(VP8_COMP *cpi) {
|
||||
((quant_val * zbin_boost_8x8[0]) + 64) >> 7;
|
||||
cpi->zrun_zbin_boost_y2_16x16[Q][0] = ((quant_val * zbin_boost_16x16[0]) + 64) >> 7;
|
||||
|
||||
quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
|
||||
quant_val = vp9_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
|
||||
invert_quant(cpi->UVquant[Q] + 0,
|
||||
cpi->UVquant_shift[Q] + 0, quant_val);
|
||||
cpi->UVzbin[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
|
||||
@ -462,7 +462,7 @@ void vp9_init_quantizer(VP8_COMP *cpi) {
|
||||
for (i = 1; i < 16; i++) {
|
||||
int rc = vp8_default_zig_zag1d[i];
|
||||
|
||||
quant_val = vp8_ac_yquant(Q);
|
||||
quant_val = vp9_ac_yquant(Q);
|
||||
invert_quant(cpi->Y1quant[Q] + rc,
|
||||
cpi->Y1quant_shift[Q] + rc, quant_val);
|
||||
cpi->Y1zbin[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
|
||||
@ -471,7 +471,7 @@ void vp9_init_quantizer(VP8_COMP *cpi) {
|
||||
cpi->zrun_zbin_boost_y1[Q][i] =
|
||||
((quant_val * zbin_boost[i]) + 64) >> 7;
|
||||
|
||||
quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
|
||||
quant_val = vp9_ac2quant(Q, cpi->common.y2ac_delta_q);
|
||||
invert_quant(cpi->Y2quant[Q] + rc,
|
||||
cpi->Y2quant_shift[Q] + rc, quant_val);
|
||||
cpi->Y2zbin[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
|
||||
@ -480,7 +480,7 @@ void vp9_init_quantizer(VP8_COMP *cpi) {
|
||||
cpi->zrun_zbin_boost_y2[Q][i] =
|
||||
((quant_val * zbin_boost[i]) + 64) >> 7;
|
||||
|
||||
quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
|
||||
quant_val = vp9_ac_uv_quant(Q, cpi->common.uvac_delta_q);
|
||||
invert_quant(cpi->UVquant[Q] + rc,
|
||||
cpi->UVquant_shift[Q] + rc, quant_val);
|
||||
cpi->UVzbin[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
|
||||
@ -496,17 +496,17 @@ void vp9_init_quantizer(VP8_COMP *cpi) {
|
||||
for (i = 1; i < 64; i++) {
|
||||
int rc = vp8_default_zig_zag1d_8x8[i];
|
||||
|
||||
quant_val = vp8_ac_yquant(Q);
|
||||
quant_val = vp9_ac_yquant(Q);
|
||||
cpi->Y1zbin_8x8[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
|
||||
cpi->zrun_zbin_boost_y1_8x8[Q][i] =
|
||||
((quant_val * zbin_boost_8x8[i]) + 64) >> 7;
|
||||
|
||||
quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
|
||||
quant_val = vp9_ac2quant(Q, cpi->common.y2ac_delta_q);
|
||||
cpi->Y2zbin_8x8[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
|
||||
cpi->zrun_zbin_boost_y2_8x8[Q][i] =
|
||||
((quant_val * zbin_boost_8x8[i]) + 64) >> 7;
|
||||
|
||||
quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
|
||||
quant_val = vp9_ac_uv_quant(Q, cpi->common.uvac_delta_q);
|
||||
cpi->UVzbin_8x8[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
|
||||
cpi->zrun_zbin_boost_uv_8x8[Q][i] =
|
||||
((quant_val * zbin_boost_8x8[i]) + 64) >> 7;
|
||||
@ -516,15 +516,15 @@ void vp9_init_quantizer(VP8_COMP *cpi) {
|
||||
for (i = 1; i < 256; i++) {
|
||||
int rc = vp8_default_zig_zag1d_16x16[i];
|
||||
|
||||
quant_val = vp8_ac_yquant(Q);
|
||||
quant_val = vp9_ac_yquant(Q);
|
||||
cpi->Y1zbin_16x16[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
|
||||
cpi->zrun_zbin_boost_y1_16x16[Q][i] = ((quant_val * zbin_boost_16x16[i]) + 64) >> 7;
|
||||
|
||||
quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
|
||||
quant_val = vp9_ac2quant(Q, cpi->common.y2ac_delta_q);
|
||||
cpi->Y2zbin_16x16[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
|
||||
cpi->zrun_zbin_boost_y2_16x16[Q][i] = ((quant_val * zbin_boost_16x16[i]) + 64) >> 7;
|
||||
|
||||
quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
|
||||
quant_val = vp9_ac_uv_quant(Q, cpi->common.uvac_delta_q);
|
||||
cpi->UVzbin_16x16[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
|
||||
cpi->zrun_zbin_boost_uv_16x16[Q][i] = ((quant_val * zbin_boost_16x16[i]) + 64) >> 7;
|
||||
}
|
||||
|
@ -90,7 +90,7 @@ static const unsigned int prior_key_frame_weight[KEY_FRAME_CONTEXT] = { 1, 2, 3,
|
||||
// tables if and when things settle down in the experimental bitstream
|
||||
double vp9_convert_qindex_to_q(int qindex) {
|
||||
// Convert the index to a real Q value (scaled down to match old Q values)
|
||||
return (double)vp8_ac_yquant(qindex) / 4.0;
|
||||
return (double)vp9_ac_yquant(qindex) / 4.0;
|
||||
}
|
||||
|
||||
int vp9_gfboost_qadjust(int qindex) {
|
||||
@ -240,12 +240,12 @@ void vp9_restore_coding_context(VP8_COMP *cpi) {
|
||||
void vp9_setup_key_frame(VP8_COMP *cpi) {
|
||||
VP8_COMMON *cm = &cpi->common;
|
||||
// Setup for Key frame:
|
||||
vp8_default_coef_probs(& cpi->common);
|
||||
vp8_kf_default_bmode_probs(cpi->common.kf_bmode_prob);
|
||||
vp8_init_mbmode_probs(& cpi->common);
|
||||
vp8_default_bmode_probs(cm->fc.bmode_prob);
|
||||
vp9_default_coef_probs(& cpi->common);
|
||||
vp9_kf_default_bmode_probs(cpi->common.kf_bmode_prob);
|
||||
vp9_init_mbmode_probs(& cpi->common);
|
||||
vp9_default_bmode_probs(cm->fc.bmode_prob);
|
||||
|
||||
vp8_init_mv_probs(& cpi->common);
|
||||
vp9_init_mv_probs(& cpi->common);
|
||||
|
||||
// cpi->common.filter_level = 0; // Reset every key frame.
|
||||
cpi->common.filter_level = cpi->common.base_qindex * 3 / 8;
|
||||
@ -256,7 +256,7 @@ void vp9_setup_key_frame(VP8_COMP *cpi) {
|
||||
cpi->common.refresh_golden_frame = TRUE;
|
||||
cpi->common.refresh_alt_ref_frame = TRUE;
|
||||
|
||||
vp8_init_mode_contexts(&cpi->common);
|
||||
vp9_init_mode_contexts(&cpi->common);
|
||||
vpx_memcpy(&cpi->common.lfc, &cpi->common.fc, sizeof(cpi->common.fc));
|
||||
vpx_memcpy(&cpi->common.lfc_a, &cpi->common.fc, sizeof(cpi->common.fc));
|
||||
|
||||
|
@ -264,7 +264,7 @@ void vp9_init_me_luts() {
|
||||
static int compute_rd_mult(int qindex) {
|
||||
int q;
|
||||
|
||||
q = vp8_dc_quant(qindex, 0);
|
||||
q = vp9_dc_quant(qindex, 0);
|
||||
return (11 * q * q) >> 6;
|
||||
}
|
||||
|
||||
@ -313,7 +313,7 @@ void vp9_initialize_rd_consts(VP8_COMP *cpi, int QIndex) {
|
||||
|
||||
vp9_set_speed_features(cpi);
|
||||
|
||||
q = (int)pow(vp8_dc_quant(QIndex, 0) >> 2, 1.25);
|
||||
q = (int)pow(vp9_dc_quant(QIndex, 0) >> 2, 1.25);
|
||||
q = q << 2;
|
||||
cpi->RDMULT = cpi->RDMULT << 4;
|
||||
|
||||
@ -1103,7 +1103,7 @@ static int64_t rd_pick_intra4x4block(VP8_COMP *cpi, MACROBLOCK *x, BLOCK *be,
|
||||
#if CONFIG_COMP_INTRA_PRED
|
||||
if (mode2 == (B_PREDICTION_MODE)(B_DC_PRED - 1)) {
|
||||
#endif
|
||||
vp8_intra4x4_predict(b, mode, b->predictor);
|
||||
vp9_intra4x4_predict(b, mode, b->predictor);
|
||||
#if CONFIG_COMP_INTRA_PRED
|
||||
} else {
|
||||
vp8_comp_intra4x4_predict(b, mode, mode2, b->predictor);
|
||||
@ -1158,12 +1158,12 @@ static int64_t rd_pick_intra4x4block(VP8_COMP *cpi, MACROBLOCK *x, BLOCK *be,
|
||||
|
||||
// inverse transform
|
||||
if (best_tx_type != DCT_DCT)
|
||||
vp8_ihtllm_c(best_dqcoeff, b->diff, 32, best_tx_type, 4);
|
||||
vp9_ihtllm_c(best_dqcoeff, b->diff, 32, best_tx_type, 4);
|
||||
else
|
||||
IDCT_INVOKE(IF_RTCD(&cpi->rtcd.common->idct), idct16)(
|
||||
best_dqcoeff, b->diff, 32);
|
||||
|
||||
vp8_recon_b(best_predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
|
||||
vp9_recon_b(best_predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
|
||||
|
||||
return best_rd;
|
||||
}
|
||||
@ -1266,7 +1266,7 @@ static int64_t rd_pick_intra_sby_mode(VP8_COMP *cpi,
|
||||
/* Y Search for 32x32 intra prediction mode */
|
||||
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
|
||||
x->e_mbd.mode_info_context->mbmi.mode = mode;
|
||||
vp8_build_intra_predictors_sby_s(&x->e_mbd);
|
||||
vp9_build_intra_predictors_sby_s(&x->e_mbd);
|
||||
|
||||
super_block_yrd_8x8(x, &this_rate_tokenonly,
|
||||
&this_distortion, IF_RTCD(&cpi->rtcd), &s);
|
||||
@ -1327,7 +1327,7 @@ static int64_t rd_pick_intra16x16mby_mode(VP8_COMP *cpi,
|
||||
mbmi->second_mode = mode2;
|
||||
if (mode2 == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
|
||||
#endif
|
||||
vp8_build_intra_predictors_mby(&x->e_mbd);
|
||||
vp9_build_intra_predictors_mby(&x->e_mbd);
|
||||
#if CONFIG_COMP_INTRA_PRED
|
||||
} else {
|
||||
continue; // i.e. disable for now
|
||||
@ -1427,7 +1427,7 @@ static int64_t rd_pick_intra8x8block(VP8_COMP *cpi, MACROBLOCK *x, int ib,
|
||||
#if CONFIG_COMP_INTRA_PRED
|
||||
if (mode2 == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
|
||||
#endif
|
||||
vp8_intra8x8_predict(b, mode, b->predictor);
|
||||
vp9_intra8x8_predict(b, mode, b->predictor);
|
||||
#if CONFIG_COMP_INTRA_PRED
|
||||
} else {
|
||||
continue; // i.e. disable for now
|
||||
@ -1715,7 +1715,7 @@ static int64_t rd_inter16x16_uv_8x8(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
|
||||
|
||||
static int64_t rd_inter4x4_uv(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
|
||||
int *distortion, int *skippable, int fullpixel) {
|
||||
vp8_build_inter4x4_predictors_mbuv(&x->e_mbd);
|
||||
vp9_build_inter4x4_predictors_mbuv(&x->e_mbd);
|
||||
vp9_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
|
||||
x->e_mbd.predictor, x->src.uv_stride);
|
||||
|
||||
@ -1760,7 +1760,7 @@ static void rd_pick_intra_mbuv_mode(VP8_COMP *cpi,
|
||||
mbmi->second_uv_mode = mode2;
|
||||
if (mode2 == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
|
||||
#endif
|
||||
vp8_build_intra_predictors_mbuv(&x->e_mbd);
|
||||
vp9_build_intra_predictors_mbuv(&x->e_mbd);
|
||||
#if CONFIG_COMP_INTRA_PRED
|
||||
} else {
|
||||
continue;
|
||||
@ -1825,7 +1825,7 @@ static void rd_pick_intra_mbuv_mode_8x8(VP8_COMP *cpi,
|
||||
int64_t this_rd;
|
||||
|
||||
mbmi->uv_mode = mode;
|
||||
vp8_build_intra_predictors_mbuv(&x->e_mbd);
|
||||
vp9_build_intra_predictors_mbuv(&x->e_mbd);
|
||||
vp9_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
|
||||
x->e_mbd.predictor, x->src.uv_stride);
|
||||
vp9_transform_mbuv_8x8(x);
|
||||
@ -1917,7 +1917,7 @@ static int64_t rd_pick_intra_sbuv_mode(VP8_COMP *cpi,
|
||||
|
||||
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
|
||||
x->e_mbd.mode_info_context->mbmi.uv_mode = mode;
|
||||
vp8_build_intra_predictors_sbuv_s(&x->e_mbd);
|
||||
vp9_build_intra_predictors_sbuv_s(&x->e_mbd);
|
||||
|
||||
super_block_uvrd_8x8(x, &this_rate_tokenonly,
|
||||
&this_distortion, IF_RTCD(&cpi->rtcd), &s);
|
||||
@ -1958,7 +1958,7 @@ int vp9_cost_mv_ref(VP8_COMP *cpi,
|
||||
|
||||
vp8_prob p [VP8_MVREFS - 1];
|
||||
assert(NEARESTMV <= m && m <= SPLITMV);
|
||||
vp8_mv_ref_probs(pc, p, near_mv_ref_ct);
|
||||
vp9_mv_ref_probs(pc, p, near_mv_ref_ct);
|
||||
return vp8_cost_token(vp8_mv_ref_tree, p,
|
||||
vp8_mv_ref_encoding_array - NEARESTMV + m);
|
||||
} else
|
||||
@ -2092,9 +2092,9 @@ static int64_t encode_inter_mb_segment(MACROBLOCK *x,
|
||||
BLOCK *be = &x->block[i];
|
||||
int thisdistortion;
|
||||
|
||||
vp8_build_inter_predictors_b(bd, 16, xd->subpixel_predict);
|
||||
vp9_build_inter_predictors_b(bd, 16, xd->subpixel_predict);
|
||||
if (xd->mode_info_context->mbmi.second_ref_frame)
|
||||
vp8_build_2nd_inter_predictors_b(bd, 16, xd->subpixel_predict_avg);
|
||||
vp9_build_2nd_inter_predictors_b(bd, 16, xd->subpixel_predict_avg);
|
||||
vp9_subtract_b(be, bd, 16);
|
||||
x->vp9_short_fdct4x4(be->src_diff, be->coeff, 32);
|
||||
x->quantize_b_4x4(be, bd);
|
||||
@ -2142,9 +2142,9 @@ static int64_t encode_inter_mb_segment_8x8(MACROBLOCK *x,
|
||||
BLOCK *be = &x->block[ib], *be2 = &x->block[idx];
|
||||
int thisdistortion;
|
||||
|
||||
vp8_build_inter_predictors4b(xd, bd, 16);
|
||||
vp9_build_inter_predictors4b(xd, bd, 16);
|
||||
if (xd->mode_info_context->mbmi.second_ref_frame)
|
||||
vp8_build_2nd_inter_predictors4b(xd, bd, 16);
|
||||
vp9_build_2nd_inter_predictors4b(xd, bd, 16);
|
||||
vp9_subtract_4b_c(be, bd, 16);
|
||||
|
||||
if (xd->mode_info_context->mbmi.txfm_size == TX_4X4) {
|
||||
@ -3254,7 +3254,7 @@ static void setup_buffer_inter(VP8_COMP *cpi, MACROBLOCK *x,
|
||||
MB_MODE_INFO * mbmi = &xd->mode_info_context->mbmi;
|
||||
|
||||
|
||||
vp8_find_near_mvs(xd, xd->mode_info_context,
|
||||
vp9_find_near_mvs(xd, xd->mode_info_context,
|
||||
xd->prev_mode_info_context,
|
||||
&frame_nearest_mv[frame_type], &frame_near_mv[frame_type],
|
||||
&frame_best_ref_mv[frame_type], frame_mdcounts[frame_type],
|
||||
@ -3421,12 +3421,12 @@ static int64_t handle_inter_mode(VP8_COMP *cpi, MACROBLOCK *x,
|
||||
*rate2 += vp9_cost_mv_ref(cpi, this_mode, mdcounts);
|
||||
|
||||
if (block_size == BLOCK_16X16) {
|
||||
vp8_build_1st_inter16x16_predictors_mby(xd, xd->predictor, 16, 0);
|
||||
vp9_build_1st_inter16x16_predictors_mby(xd, xd->predictor, 16, 0);
|
||||
if (is_comp_pred)
|
||||
vp8_build_2nd_inter16x16_predictors_mby(xd, xd->predictor, 16);
|
||||
vp9_build_2nd_inter16x16_predictors_mby(xd, xd->predictor, 16);
|
||||
} else {
|
||||
#if CONFIG_SUPERBLOCKS
|
||||
vp8_build_inter32x32_predictors_sb(xd,
|
||||
vp9_build_inter32x32_predictors_sb(xd,
|
||||
xd->dst.y_buffer,
|
||||
xd->dst.u_buffer,
|
||||
xd->dst.v_buffer,
|
||||
@ -3493,10 +3493,10 @@ static int64_t handle_inter_mode(VP8_COMP *cpi, MACROBLOCK *x,
|
||||
|
||||
if (!x->skip) {
|
||||
if (block_size == BLOCK_16X16) {
|
||||
vp8_build_1st_inter16x16_predictors_mbuv(xd, &xd->predictor[256],
|
||||
vp9_build_1st_inter16x16_predictors_mbuv(xd, &xd->predictor[256],
|
||||
&xd->predictor[320], 8);
|
||||
if (is_comp_pred)
|
||||
vp8_build_2nd_inter16x16_predictors_mbuv(xd, &xd->predictor[256],
|
||||
vp9_build_2nd_inter16x16_predictors_mbuv(xd, &xd->predictor[256],
|
||||
&xd->predictor[320], 8);
|
||||
inter_mode_cost(cpi, x, this_mode, rate2, distortion,
|
||||
rate_y, distortion_y, rate_uv, distortion_uv,
|
||||
@ -3685,7 +3685,7 @@ void vp9_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x,
|
||||
} else {
|
||||
mbmi->interp_filter = cpi->common.mcomp_filter_type;
|
||||
}
|
||||
vp8_setup_interp_filters(xd, mbmi->interp_filter, &cpi->common);
|
||||
vp9_setup_interp_filters(xd, mbmi->interp_filter, &cpi->common);
|
||||
|
||||
// Test best rd so far against threshold for trying this mode.
|
||||
if (best_rd <= cpi->rd_threshes[mode_index])
|
||||
@ -3780,7 +3780,7 @@ void vp9_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x,
|
||||
case D63_PRED:
|
||||
mbmi->ref_frame = INTRA_FRAME;
|
||||
// FIXME compound intra prediction
|
||||
vp8_build_intra_predictors_mby(&x->e_mbd);
|
||||
vp9_build_intra_predictors_mby(&x->e_mbd);
|
||||
macro_block_yrd(cpi, x, &rate_y, &distortion, &skippable, txfm_cache);
|
||||
rate2 += rate_y;
|
||||
distortion2 += distortion;
|
||||
|
@ -59,7 +59,7 @@ static void vp8_temporal_filter_predictors_mb_c
|
||||
xd->subpixel_predict16x16(yptr, stride,
|
||||
(mv_col & 7) << 1, (mv_row & 7) << 1, &pred[0], 16);
|
||||
} else {
|
||||
vp8_copy_mem16x16(yptr, stride, &pred[0], 16);
|
||||
vp9_copy_mem16x16(yptr, stride, &pred[0], 16);
|
||||
}
|
||||
|
||||
// U & V
|
||||
@ -79,8 +79,8 @@ static void vp8_temporal_filter_predictors_mb_c
|
||||
(omv_col & 15), (omv_row & 15), &pred[320], 8);
|
||||
}
|
||||
else {
|
||||
vp8_copy_mem8x8(uptr, stride, &pred[256], 8);
|
||||
vp8_copy_mem8x8(vptr, stride, &pred[320], 8);
|
||||
vp9_copy_mem8x8(uptr, stride, &pred[256], 8);
|
||||
vp9_copy_mem8x8(vptr, stride, &pred[320], 8);
|
||||
}
|
||||
}
|
||||
void vp9_temporal_filter_apply_c
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user