Change encoder vp8_ and vp8cx_ public symbol prefixes to vp9_.

Change-Id: Ie2e3652591b010ded10c216501ce24fd95d0aec5
This commit is contained in:
Ronald S. Bultje 2012-10-30 12:58:42 -07:00
parent fe1788500c
commit f88558fb1d
86 changed files with 1959 additions and 1959 deletions

View File

@ -11,14 +11,14 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
extern unsigned int vp8_sad16x16_sse3(
extern unsigned int vp9_sad16x16_sse3(
unsigned char *src_ptr,
int src_stride,
unsigned char *ref_ptr,
int ref_stride,
int max_err);
extern void vp8_sad16x16x3_sse3(
extern void vp9_sad16x16x3_sse3(
unsigned char *src_ptr,
int src_stride,
unsigned char *ref_ptr,
@ -43,14 +43,14 @@ extern void vp8_makemask_sse3(
int ut,
int vt);
unsigned int vp8_sad16x16_unmasked_wmt(
unsigned int vp9_sad16x16_unmasked_wmt(
unsigned char *src_ptr,
int src_stride,
unsigned char *ref_ptr,
int ref_stride,
unsigned char *mask);
unsigned int vp8_sad16x16_masked_wmt(
unsigned int vp9_sad16x16_masked_wmt(
unsigned char *src_ptr,
int src_stride,
unsigned char *ref_ptr,
@ -503,7 +503,7 @@ int fast_masked_motion_search(unsigned char *y, unsigned char *u, unsigned char
vp8_growmaskmb_sse3(dym, dym2);
e = vp8_sad16x16_unmasked_wmt(y, yp, dyz + j, dyp, dym2);
e = vp9_sad16x16_unmasked_wmt(y, yp, dyz + j, dyp, dym2);
if (e < beste) {
bui = i;
@ -529,7 +529,7 @@ int fast_masked_motion_search(unsigned char *y, unsigned char *u, unsigned char
for (i = -32; i < 32; i++) {
unsigned char *dyz = i * dyp + dy;
for (j = -32; j < 32; j++) {
e = vp8_sad16x16_masked_wmt(y, yp, dyz + j, dyp, dym2);
e = vp9_sad16x16_masked_wmt(y, yp, dyz + j, dyp, dym2);
if (e < beste) {
bmi = i;
bmj = j;
@ -581,7 +581,7 @@ int fast_masked_motion_search(unsigned char *y, unsigned char *u, unsigned char
vp8_growmaskmb_sse3(dym, dym2);
obeste = vp8_sad16x16_masked_wmt(y, yp, dy + bmi * dyp + bmj, dyp, dym2);
obeste = vp9_sad16x16_masked_wmt(y, yp, dy + bmi * dyp + bmj, dyp, dym2);
beste = 0xffffffff;
@ -589,7 +589,7 @@ int fast_masked_motion_search(unsigned char *y, unsigned char *u, unsigned char
for (i = -32; i < 32; i++) {
unsigned char *dyz = i * dyp + dy;
for (j = -32; j < 32; j++) {
e = vp8_sad16x16_unmasked_wmt(y, yp, dyz + j, dyp, dym2);
e = vp9_sad16x16_unmasked_wmt(y, yp, dyz + j, dyp, dym2);
if (e < beste) {
bui = i;
@ -698,8 +698,8 @@ int mainz(int argc, char *argv[]) {
vp8_growmaskmb_sse3(ym, ym3);
a = vp8_sad16x16_masked_wmt(str, 16, sts, 16, ym3);
b = vp8_sad16x16_unmasked_wmt(str, 16, sts, 16, ym3);
a = vp9_sad16x16_masked_wmt(str, 16, sts, 16, ym3);
b = vp9_sad16x16_unmasked_wmt(str, 16, sts, 16, ym3);
vp8_masked_predictor_wmt(str, sts, 16, ym, 16, ym3);
@ -738,7 +738,7 @@ int mainz(int argc, char *argv[]) {
int bmi, bmj, bui, buj, bwm;
unsigned char ym[256];
if (vp8_sad16x16_sse3(ys + c, y_stride, yd + c, y_stride, 0xffff) == 0)
if (vp9_sad16x16_sse3(ys + c, y_stride, yd + c, y_stride, 0xffff) == 0)
bmi = bmj = bui = buj = bwm = 0;
else {
COLOR_SEG_ELEMENT cs[5];

View File

@ -172,29 +172,29 @@ extern "C"
} VP8_CONFIG;
void vp8_initialize();
void vp9_initialize();
VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf);
void vp8_remove_compressor(VP8_PTR *comp);
VP8_PTR vp9_create_compressor(VP8_CONFIG *oxcf);
void vp9_remove_compressor(VP8_PTR *comp);
void vp8_init_config(VP8_PTR onyx, VP8_CONFIG *oxcf);
void vp8_change_config(VP8_PTR onyx, VP8_CONFIG *oxcf);
void vp9_change_config(VP8_PTR onyx, VP8_CONFIG *oxcf);
// receive a frames worth of data caller can assume that a copy of this frame is made
// and not just a copy of the pointer..
int vp8_receive_raw_frame(VP8_PTR comp, unsigned int frame_flags, YV12_BUFFER_CONFIG *sd, int64_t time_stamp, int64_t end_time_stamp);
int vp8_get_compressed_data(VP8_PTR comp, unsigned int *frame_flags, unsigned long *size, unsigned char *dest, int64_t *time_stamp, int64_t *time_end, int flush);
int vp8_get_preview_raw_frame(VP8_PTR comp, YV12_BUFFER_CONFIG *dest, vp8_ppflags_t *flags);
int vp9_receive_raw_frame(VP8_PTR comp, unsigned int frame_flags, YV12_BUFFER_CONFIG *sd, int64_t time_stamp, int64_t end_time_stamp);
int vp9_get_compressed_data(VP8_PTR comp, unsigned int *frame_flags, unsigned long *size, unsigned char *dest, int64_t *time_stamp, int64_t *time_end, int flush);
int vp9_get_preview_raw_frame(VP8_PTR comp, YV12_BUFFER_CONFIG *dest, vp8_ppflags_t *flags);
int vp8_use_as_reference(VP8_PTR comp, int ref_frame_flags);
int vp8_update_reference(VP8_PTR comp, int ref_frame_flags);
int vp8_get_reference(VP8_PTR comp, VP8_REFFRAME ref_frame_flag, YV12_BUFFER_CONFIG *sd);
int vp8_set_reference(VP8_PTR comp, VP8_REFFRAME ref_frame_flag, YV12_BUFFER_CONFIG *sd);
int vp8_update_entropy(VP8_PTR comp, int update);
int vp8_set_roimap(VP8_PTR comp, unsigned char *map, unsigned int rows, unsigned int cols, int delta_q[4], int delta_lf[4], unsigned int threshold[4]);
int vp8_set_active_map(VP8_PTR comp, unsigned char *map, unsigned int rows, unsigned int cols);
int vp8_set_internal_size(VP8_PTR comp, VPX_SCALING horiz_mode, VPX_SCALING vert_mode);
int vp8_get_quantizer(VP8_PTR c);
int vp9_use_as_reference(VP8_PTR comp, int ref_frame_flags);
int vp9_update_reference(VP8_PTR comp, int ref_frame_flags);
int vp9_get_reference(VP8_PTR comp, VP8_REFFRAME ref_frame_flag, YV12_BUFFER_CONFIG *sd);
int vp9_set_reference(VP8_PTR comp, VP8_REFFRAME ref_frame_flag, YV12_BUFFER_CONFIG *sd);
int vp9_update_entropy(VP8_PTR comp, int update);
int vp9_set_roimap(VP8_PTR comp, unsigned char *map, unsigned int rows, unsigned int cols, int delta_q[4], int delta_lf[4], unsigned int threshold[4]);
int vp9_set_active_map(VP8_PTR comp, unsigned char *map, unsigned int rows, unsigned int cols);
int vp9_set_internal_size(VP8_PTR comp, VPX_SCALING horiz_mode, VPX_SCALING vert_mode);
int vp9_get_quantizer(VP8_PTR c);
#ifdef __cplusplus
}

View File

@ -236,203 +236,203 @@ if [ "$CONFIG_VP8_ENCODER" = "yes" ]; then
# variance
[ $arch = "x86_64" ] && mmx_x86_64=mmx && sse2_x86_64=sse2
prototype unsigned int vp8_variance32x32 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp8_variance32x32
prototype unsigned int vp9_variance32x32 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp9_variance32x32
prototype unsigned int vp8_variance16x16 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp8_variance16x16 mmx sse2
vp8_variance16x16_sse2=vp8_variance16x16_wmt
vp8_variance16x16_mmx=vp8_variance16x16_mmx
prototype unsigned int vp9_variance16x16 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp9_variance16x16 mmx sse2
vp9_variance16x16_sse2=vp9_variance16x16_wmt
vp9_variance16x16_mmx=vp9_variance16x16_mmx
prototype unsigned int vp8_variance16x8 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp8_variance16x8 mmx sse2
vp8_variance16x8_sse2=vp8_variance16x8_wmt
vp8_variance16x8_mmx=vp8_variance16x8_mmx
prototype unsigned int vp9_variance16x8 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp9_variance16x8 mmx sse2
vp9_variance16x8_sse2=vp9_variance16x8_wmt
vp9_variance16x8_mmx=vp9_variance16x8_mmx
prototype unsigned int vp8_variance8x16 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp8_variance8x16 mmx sse2
vp8_variance8x16_sse2=vp8_variance8x16_wmt
vp8_variance8x16_mmx=vp8_variance8x16_mmx
prototype unsigned int vp9_variance8x16 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp9_variance8x16 mmx sse2
vp9_variance8x16_sse2=vp9_variance8x16_wmt
vp9_variance8x16_mmx=vp9_variance8x16_mmx
prototype unsigned int vp8_variance8x8 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp8_variance8x8 mmx sse2
vp8_variance8x8_sse2=vp8_variance8x8_wmt
vp8_variance8x8_mmx=vp8_variance8x8_mmx
prototype unsigned int vp9_variance8x8 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp9_variance8x8 mmx sse2
vp9_variance8x8_sse2=vp9_variance8x8_wmt
vp9_variance8x8_mmx=vp9_variance8x8_mmx
prototype unsigned int vp8_variance4x4 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp8_variance4x4 mmx sse2
vp8_variance4x4_sse2=vp8_variance4x4_wmt
vp8_variance4x4_mmx=vp8_variance4x4_mmx
prototype unsigned int vp9_variance4x4 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp9_variance4x4 mmx sse2
vp9_variance4x4_sse2=vp9_variance4x4_wmt
vp9_variance4x4_mmx=vp9_variance4x4_mmx
prototype unsigned int vp8_sub_pixel_variance32x32 "const unsigned char *src_ptr, int source_stride, int xoffset, int yoffset, const unsigned char *ref_ptr, int Refstride, unsigned int *sse"
specialize vp8_sub_pixel_variance32x32
prototype unsigned int vp9_sub_pixel_variance32x32 "const unsigned char *src_ptr, int source_stride, int xoffset, int yoffset, const unsigned char *ref_ptr, int Refstride, unsigned int *sse"
specialize vp9_sub_pixel_variance32x32
prototype unsigned int vp8_sub_pixel_variance16x16 "const unsigned char *src_ptr, int source_stride, int xoffset, int yoffset, const unsigned char *ref_ptr, int Refstride, unsigned int *sse"
specialize vp8_sub_pixel_variance16x16 sse2 mmx ssse3
vp8_sub_pixel_variance16x16_sse2=vp8_sub_pixel_variance16x16_wmt
prototype unsigned int vp9_sub_pixel_variance16x16 "const unsigned char *src_ptr, int source_stride, int xoffset, int yoffset, const unsigned char *ref_ptr, int Refstride, unsigned int *sse"
specialize vp9_sub_pixel_variance16x16 sse2 mmx ssse3
vp9_sub_pixel_variance16x16_sse2=vp9_sub_pixel_variance16x16_wmt
prototype unsigned int vp8_sub_pixel_variance8x16 "const unsigned char *src_ptr, int source_stride, int xoffset, int yoffset, const unsigned char *ref_ptr, int Refstride, unsigned int *sse"
specialize vp8_sub_pixel_variance8x16 sse2 mmx
vp8_sub_pixel_variance8x16_sse2=vp8_sub_pixel_variance8x16_wmt
prototype unsigned int vp9_sub_pixel_variance8x16 "const unsigned char *src_ptr, int source_stride, int xoffset, int yoffset, const unsigned char *ref_ptr, int Refstride, unsigned int *sse"
specialize vp9_sub_pixel_variance8x16 sse2 mmx
vp9_sub_pixel_variance8x16_sse2=vp9_sub_pixel_variance8x16_wmt
prototype unsigned int vp8_sub_pixel_variance16x8 "const unsigned char *src_ptr, int source_stride, int xoffset, int yoffset, const unsigned char *ref_ptr, int Refstride, unsigned int *sse"
specialize vp8_sub_pixel_variance16x8 sse2 mmx ssse3
vp8_sub_pixel_variance16x8_sse2=vp8_sub_pixel_variance16x8_ssse3;
vp8_sub_pixel_variance16x8_sse2=vp8_sub_pixel_variance16x8_wmt
prototype unsigned int vp9_sub_pixel_variance16x8 "const unsigned char *src_ptr, int source_stride, int xoffset, int yoffset, const unsigned char *ref_ptr, int Refstride, unsigned int *sse"
specialize vp9_sub_pixel_variance16x8 sse2 mmx ssse3
vp9_sub_pixel_variance16x8_sse2=vp9_sub_pixel_variance16x8_ssse3;
vp9_sub_pixel_variance16x8_sse2=vp9_sub_pixel_variance16x8_wmt
prototype unsigned int vp8_sub_pixel_variance8x8 "const unsigned char *src_ptr, int source_stride, int xoffset, int yoffset, const unsigned char *ref_ptr, int Refstride, unsigned int *sse"
specialize vp8_sub_pixel_variance8x8 sse2 mmx
vp8_sub_pixel_variance8x8_sse2=vp8_sub_pixel_variance8x8_wmt
prototype unsigned int vp9_sub_pixel_variance8x8 "const unsigned char *src_ptr, int source_stride, int xoffset, int yoffset, const unsigned char *ref_ptr, int Refstride, unsigned int *sse"
specialize vp9_sub_pixel_variance8x8 sse2 mmx
vp9_sub_pixel_variance8x8_sse2=vp9_sub_pixel_variance8x8_wmt
prototype unsigned int vp8_sub_pixel_variance4x4 "const unsigned char *src_ptr, int source_stride, int xoffset, int yoffset, const unsigned char *ref_ptr, int Refstride, unsigned int *sse"
specialize vp8_sub_pixel_variance4x4 sse2 mmx
vp8_sub_pixel_variance4x4_sse2=vp8_sub_pixel_variance4x4_wmt
prototype unsigned int vp9_sub_pixel_variance4x4 "const unsigned char *src_ptr, int source_stride, int xoffset, int yoffset, const unsigned char *ref_ptr, int Refstride, unsigned int *sse"
specialize vp9_sub_pixel_variance4x4 sse2 mmx
vp9_sub_pixel_variance4x4_sse2=vp9_sub_pixel_variance4x4_wmt
prototype unsigned int vp8_sad32x32 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int max_sad"
specialize vp8_sad32x32
prototype unsigned int vp9_sad32x32 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int max_sad"
specialize vp9_sad32x32
prototype unsigned int vp8_sad16x16 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int max_sad"
specialize vp8_sad16x16 mmx sse2 sse3
vp8_sad16x16_sse2=vp8_sad16x16_wmt
prototype unsigned int vp9_sad16x16 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int max_sad"
specialize vp9_sad16x16 mmx sse2 sse3
vp9_sad16x16_sse2=vp9_sad16x16_wmt
prototype unsigned int vp8_sad16x8 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int max_sad"
specialize vp8_sad16x8 mmx sse2
vp8_sad16x8_sse2=vp8_sad16x8_wmt
prototype unsigned int vp9_sad16x8 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int max_sad"
specialize vp9_sad16x8 mmx sse2
vp9_sad16x8_sse2=vp9_sad16x8_wmt
prototype unsigned int vp8_sad8x16 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int max_sad"
specialize vp8_sad8x16 mmx sse2
vp8_sad8x16_sse2=vp8_sad8x16_wmt
prototype unsigned int vp9_sad8x16 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int max_sad"
specialize vp9_sad8x16 mmx sse2
vp9_sad8x16_sse2=vp9_sad8x16_wmt
prototype unsigned int vp8_sad8x8 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int max_sad"
specialize vp8_sad8x8 mmx sse2
vp8_sad8x8_sse2=vp8_sad8x8_wmt
prototype unsigned int vp9_sad8x8 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int max_sad"
specialize vp9_sad8x8 mmx sse2
vp9_sad8x8_sse2=vp9_sad8x8_wmt
prototype unsigned int vp8_sad4x4 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int max_sad"
specialize vp8_sad4x4 mmx sse2
vp8_sad4x4_sse2=vp8_sad4x4_wmt
prototype unsigned int vp9_sad4x4 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int max_sad"
specialize vp9_sad4x4 mmx sse2
vp9_sad4x4_sse2=vp9_sad4x4_wmt
prototype unsigned int vp8_variance_halfpixvar16x16_h "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp8_variance_halfpixvar16x16_h mmx sse2
vp8_variance_halfpixvar16x16_h_sse2=vp8_variance_halfpixvar16x16_h_wmt
prototype unsigned int vp9_variance_halfpixvar16x16_h "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp9_variance_halfpixvar16x16_h mmx sse2
vp9_variance_halfpixvar16x16_h_sse2=vp9_variance_halfpixvar16x16_h_wmt
prototype unsigned int vp8_variance_halfpixvar16x16_v "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp8_variance_halfpixvar16x16_v mmx sse2
vp8_variance_halfpixvar16x16_v_sse2=vp8_variance_halfpixvar16x16_v_wmt
prototype unsigned int vp9_variance_halfpixvar16x16_v "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp9_variance_halfpixvar16x16_v mmx sse2
vp9_variance_halfpixvar16x16_v_sse2=vp9_variance_halfpixvar16x16_v_wmt
prototype unsigned int vp8_variance_halfpixvar16x16_hv "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp8_variance_halfpixvar16x16_hv mmx sse2
vp8_variance_halfpixvar16x16_hv_sse2=vp8_variance_halfpixvar16x16_hv_wmt
prototype unsigned int vp9_variance_halfpixvar16x16_hv "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp9_variance_halfpixvar16x16_hv mmx sse2
vp9_variance_halfpixvar16x16_hv_sse2=vp9_variance_halfpixvar16x16_hv_wmt
prototype unsigned int vp8_variance_halfpixvar32x32_h "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp8_variance_halfpixvar32x32_h
prototype unsigned int vp9_variance_halfpixvar32x32_h "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp9_variance_halfpixvar32x32_h
prototype unsigned int vp8_variance_halfpixvar32x32_v "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp8_variance_halfpixvar32x32_v
prototype unsigned int vp9_variance_halfpixvar32x32_v "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp9_variance_halfpixvar32x32_v
prototype unsigned int vp8_variance_halfpixvar32x32_hv "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp8_variance_halfpixvar32x32_hv
prototype unsigned int vp9_variance_halfpixvar32x32_hv "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp9_variance_halfpixvar32x32_hv
prototype void vp8_sad32x32x3 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sad_array"
specialize vp8_sad32x32x3
prototype void vp9_sad32x32x3 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sad_array"
specialize vp9_sad32x32x3
prototype void vp8_sad16x16x3 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sad_array"
specialize vp8_sad16x16x3 sse3 ssse3
prototype void vp9_sad16x16x3 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sad_array"
specialize vp9_sad16x16x3 sse3 ssse3
prototype void vp8_sad16x8x3 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sad_array"
specialize vp8_sad16x8x3 sse3 ssse3
prototype void vp9_sad16x8x3 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sad_array"
specialize vp9_sad16x8x3 sse3 ssse3
prototype void vp8_sad8x16x3 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sad_array"
specialize vp8_sad8x16x3 sse3
prototype void vp9_sad8x16x3 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sad_array"
specialize vp9_sad8x16x3 sse3
prototype void vp8_sad8x8x3 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sad_array"
specialize vp8_sad8x8x3 sse3
prototype void vp9_sad8x8x3 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sad_array"
specialize vp9_sad8x8x3 sse3
prototype void vp8_sad4x4x3 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sad_array"
specialize vp8_sad4x4x3 sse3
prototype void vp9_sad4x4x3 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sad_array"
specialize vp9_sad4x4x3 sse3
prototype void vp8_sad32x32x8 "const unsigned char *src_ptr, int src_stride, const unsigned char *ref_ptr, int ref_stride, unsigned short *sad_array"
specialize vp8_sad32x32x8
prototype void vp9_sad32x32x8 "const unsigned char *src_ptr, int src_stride, const unsigned char *ref_ptr, int ref_stride, unsigned short *sad_array"
specialize vp9_sad32x32x8
prototype void vp8_sad16x16x8 "const unsigned char *src_ptr, int src_stride, const unsigned char *ref_ptr, int ref_stride, unsigned short *sad_array"
specialize vp8_sad16x16x8 sse4
prototype void vp9_sad16x16x8 "const unsigned char *src_ptr, int src_stride, const unsigned char *ref_ptr, int ref_stride, unsigned short *sad_array"
specialize vp9_sad16x16x8 sse4
prototype void vp8_sad16x8x8 "const unsigned char *src_ptr, int src_stride, const unsigned char *ref_ptr, int ref_stride, unsigned short *sad_array"
specialize vp8_sad16x8x8 sse4
prototype void vp9_sad16x8x8 "const unsigned char *src_ptr, int src_stride, const unsigned char *ref_ptr, int ref_stride, unsigned short *sad_array"
specialize vp9_sad16x8x8 sse4
prototype void vp8_sad8x16x8 "const unsigned char *src_ptr, int src_stride, const unsigned char *ref_ptr, int ref_stride, unsigned short *sad_array"
specialize vp8_sad8x16x8 sse4
prototype void vp9_sad8x16x8 "const unsigned char *src_ptr, int src_stride, const unsigned char *ref_ptr, int ref_stride, unsigned short *sad_array"
specialize vp9_sad8x16x8 sse4
prototype void vp8_sad8x8x8 "const unsigned char *src_ptr, int src_stride, const unsigned char *ref_ptr, int ref_stride, unsigned short *sad_array"
specialize vp8_sad8x8x8 sse4
prototype void vp9_sad8x8x8 "const unsigned char *src_ptr, int src_stride, const unsigned char *ref_ptr, int ref_stride, unsigned short *sad_array"
specialize vp9_sad8x8x8 sse4
prototype void vp8_sad4x4x8 "const unsigned char *src_ptr, int src_stride, const unsigned char *ref_ptr, int ref_stride, unsigned short *sad_array"
specialize vp8_sad4x4x8 sse4
prototype void vp9_sad4x4x8 "const unsigned char *src_ptr, int src_stride, const unsigned char *ref_ptr, int ref_stride, unsigned short *sad_array"
specialize vp9_sad4x4x8 sse4
prototype void vp8_sad32x32x4d "const unsigned char *src_ptr, int src_stride, unsigned char *ref_ptr[], int ref_stride, unsigned int *sad_array"
specialize vp8_sad32x32x4d
prototype void vp9_sad32x32x4d "const unsigned char *src_ptr, int src_stride, unsigned char *ref_ptr[], int ref_stride, unsigned int *sad_array"
specialize vp9_sad32x32x4d
prototype void vp8_sad16x16x4d "const unsigned char *src_ptr, int src_stride, unsigned char *ref_ptr[], int ref_stride, unsigned int *sad_array"
specialize vp8_sad16x16x4d sse3
prototype void vp9_sad16x16x4d "const unsigned char *src_ptr, int src_stride, unsigned char *ref_ptr[], int ref_stride, unsigned int *sad_array"
specialize vp9_sad16x16x4d sse3
prototype void vp8_sad16x8x4d "const unsigned char *src_ptr, int src_stride, unsigned char *ref_ptr[], int ref_stride, unsigned int *sad_array"
specialize vp8_sad16x8x4d sse3
prototype void vp9_sad16x8x4d "const unsigned char *src_ptr, int src_stride, unsigned char *ref_ptr[], int ref_stride, unsigned int *sad_array"
specialize vp9_sad16x8x4d sse3
prototype void vp8_sad8x16x4d "const unsigned char *src_ptr, int src_stride, unsigned char *ref_ptr[], int ref_stride, unsigned int *sad_array"
specialize vp8_sad8x16x4d sse3
prototype void vp9_sad8x16x4d "const unsigned char *src_ptr, int src_stride, unsigned char *ref_ptr[], int ref_stride, unsigned int *sad_array"
specialize vp9_sad8x16x4d sse3
prototype void vp8_sad8x8x4d "const unsigned char *src_ptr, int src_stride, unsigned char *ref_ptr[], int ref_stride, unsigned int *sad_array"
specialize vp8_sad8x8x4d sse3
prototype void vp9_sad8x8x4d "const unsigned char *src_ptr, int src_stride, unsigned char *ref_ptr[], int ref_stride, unsigned int *sad_array"
specialize vp9_sad8x8x4d sse3
prototype void vp8_sad4x4x4d "const unsigned char *src_ptr, int src_stride, unsigned char *ref_ptr[], int ref_stride, unsigned int *sad_array"
specialize vp8_sad4x4x4d sse3
prototype void vp9_sad4x4x4d "const unsigned char *src_ptr, int src_stride, unsigned char *ref_ptr[], int ref_stride, unsigned int *sad_array"
specialize vp9_sad4x4x4d sse3
#
# Block copy
#
case $arch in
x86*)
prototype void vp8_copy32xn "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, int n"
specialize vp8_copy32xn sse2 sse3
prototype void vp9_copy32xn "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, int n"
specialize vp9_copy32xn sse2 sse3
;;
esac
prototype unsigned int vp8_sub_pixel_mse16x16 "const unsigned char *src_ptr, int src_pixels_per_line, int xoffset, int yoffset, const unsigned char *dst_ptr, int dst_pixels_per_line, unsigned int *sse"
specialize vp8_sub_pixel_mse16x16 sse2 mmx
vp8_sub_pixel_mse16x16_sse2=vp8_sub_pixel_mse16x16_wmt
prototype unsigned int vp9_sub_pixel_mse16x16 "const unsigned char *src_ptr, int src_pixels_per_line, int xoffset, int yoffset, const unsigned char *dst_ptr, int dst_pixels_per_line, unsigned int *sse"
specialize vp9_sub_pixel_mse16x16 sse2 mmx
vp9_sub_pixel_mse16x16_sse2=vp9_sub_pixel_mse16x16_wmt
prototype unsigned int vp8_mse16x16 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int recon_stride, unsigned int *sse"
specialize vp8_mse16x16 mmx sse2
vp8_mse16x16_sse2=vp8_mse16x16_wmt
prototype unsigned int vp9_mse16x16 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int recon_stride, unsigned int *sse"
specialize vp9_mse16x16 mmx sse2
vp9_mse16x16_sse2=vp9_mse16x16_wmt
prototype unsigned int vp8_sub_pixel_mse32x32 "const unsigned char *src_ptr, int source_stride, int xoffset, int yoffset, const unsigned char *ref_ptr, int Refstride, unsigned int *sse"
specialize vp8_sub_pixel_mse32x32
prototype unsigned int vp9_sub_pixel_mse32x32 "const unsigned char *src_ptr, int source_stride, int xoffset, int yoffset, const unsigned char *ref_ptr, int Refstride, unsigned int *sse"
specialize vp9_sub_pixel_mse32x32
prototype unsigned int vp8_get_mb_ss "const short *"
specialize vp8_get_mb_ss mmx sse2
prototype unsigned int vp9_get_mb_ss "const short *"
specialize vp9_get_mb_ss mmx sse2
# ENCODEMB INVOKE
prototype int vp8_mbblock_error "struct macroblock *mb, int dc"
specialize vp8_mbblock_error mmx sse2
vp8_mbblock_error_sse2=vp8_mbblock_error_xmm
prototype int vp9_mbblock_error "struct macroblock *mb, int dc"
specialize vp9_mbblock_error mmx sse2
vp9_mbblock_error_sse2=vp9_mbblock_error_xmm
prototype int vp8_block_error "short *coeff, short *dqcoeff, int block_size"
specialize vp8_block_error mmx sse2
vp8_block_error_sse2=vp8_block_error_xmm
prototype int vp9_block_error "short *coeff, short *dqcoeff, int block_size"
specialize vp9_block_error mmx sse2
vp9_block_error_sse2=vp9_block_error_xmm
prototype void vp8_subtract_b "struct block *be, struct blockd *bd, int pitch"
specialize vp8_subtract_b mmx sse2
prototype void vp9_subtract_b "struct block *be, struct blockd *bd, int pitch"
specialize vp9_subtract_b mmx sse2
prototype int vp8_mbuverror "struct macroblock *mb"
specialize vp8_mbuverror mmx sse2
vp8_mbuverror_sse2=vp8_mbuverror_xmm
prototype int vp9_mbuverror "struct macroblock *mb"
specialize vp9_mbuverror mmx sse2
vp9_mbuverror_sse2=vp9_mbuverror_xmm
prototype void vp8_subtract_b "struct block *be, struct blockd *bd, int pitch"
specialize vp8_subtract_b mmx sse2
prototype void vp9_subtract_b "struct block *be, struct blockd *bd, int pitch"
specialize vp9_subtract_b mmx sse2
prototype void vp8_subtract_mby "short *diff, unsigned char *src, unsigned char *pred, int stride"
specialize vp8_subtract_mby mmx sse2
prototype void vp9_subtract_mby "short *diff, unsigned char *src, unsigned char *pred, int stride"
specialize vp9_subtract_mby mmx sse2
prototype void vp8_subtract_mbuv "short *diff, unsigned char *usrc, unsigned char *vsrc, unsigned char *pred, int stride"
specialize vp8_subtract_mbuv mmx sse2
prototype void vp9_subtract_mbuv "short *diff, unsigned char *usrc, unsigned char *vsrc, unsigned char *pred, int stride"
specialize vp9_subtract_mbuv mmx sse2
#
# Structured Similarity (SSIM)
@ -440,40 +440,40 @@ specialize vp8_subtract_mbuv mmx sse2
if [ "$CONFIG_INTERNAL_STATS" = "yes" ]; then
[ $arch = "x86_64" ] && sse2_on_x86_64=sse2
prototype void vp8_ssim_parms_8x8 "unsigned char *s, int sp, unsigned char *r, int rp, unsigned long *sum_s, unsigned long *sum_r, unsigned long *sum_sq_s, unsigned long *sum_sq_r, unsigned long *sum_sxr"
specialize vp8_ssim_parms_8x8 $sse2_on_x86_64
prototype void vp9_ssim_parms_8x8 "unsigned char *s, int sp, unsigned char *r, int rp, unsigned long *sum_s, unsigned long *sum_r, unsigned long *sum_sq_s, unsigned long *sum_sq_r, unsigned long *sum_sxr"
specialize vp9_ssim_parms_8x8 $sse2_on_x86_64
prototype void vp8_ssim_parms_16x16 "unsigned char *s, int sp, unsigned char *r, int rp, unsigned long *sum_s, unsigned long *sum_r, unsigned long *sum_sq_s, unsigned long *sum_sq_r, unsigned long *sum_sxr"
specialize vp8_ssim_parms_16x16 $sse2_on_x86_64
prototype void vp9_ssim_parms_16x16 "unsigned char *s, int sp, unsigned char *r, int rp, unsigned long *sum_s, unsigned long *sum_r, unsigned long *sum_sq_s, unsigned long *sum_sq_r, unsigned long *sum_sxr"
specialize vp9_ssim_parms_16x16 $sse2_on_x86_64
fi
# fdct functions
prototype void vp8_short_fdct8x8 "short *InputData, short *OutputData, int pitch"
specialize vp8_short_fdct8x8
prototype void vp9_short_fdct8x8 "short *InputData, short *OutputData, int pitch"
specialize vp9_short_fdct8x8
prototype void vp8_short_fhaar2x2 "short *InputData, short *OutputData, int pitch"
specialize vp8_short_fhaar2x2
prototype void vp9_short_fhaar2x2 "short *InputData, short *OutputData, int pitch"
specialize vp9_short_fhaar2x2
prototype void vp8_short_fdct4x4 "short *InputData, short *OutputData, int pitch"
specialize vp8_short_fdct4x4
prototype void vp9_short_fdct4x4 "short *InputData, short *OutputData, int pitch"
specialize vp9_short_fdct4x4
prototype void vp8_short_fdct8x4 "short *InputData, short *OutputData, int pitch"
specialize vp8_short_fdct8x4
prototype void vp9_short_fdct8x4 "short *InputData, short *OutputData, int pitch"
specialize vp9_short_fdct8x4
prototype void vp8_short_walsh4x4 "short *InputData, short *OutputData, int pitch"
specialize vp8_short_walsh4x4
prototype void vp9_short_walsh4x4 "short *InputData, short *OutputData, int pitch"
specialize vp9_short_walsh4x4
prototype void vp8_short_fdct16x16 "short *InputData, short *OutputData, int pitch"
specialize vp8_short_fdct16x16
prototype void vp9_short_fdct16x16 "short *InputData, short *OutputData, int pitch"
specialize vp9_short_fdct16x16
prototype void vp8_short_walsh4x4_lossless "short *InputData, short *OutputData, int pitch"
specialize vp8_short_walsh4x4_lossless
prototype void vp9_short_walsh4x4_lossless "short *InputData, short *OutputData, int pitch"
specialize vp9_short_walsh4x4_lossless
prototype void vp8_short_walsh4x4_x8 "short *InputData, short *OutputData, int pitch"
specialize vp8_short_walsh4x4_x8
prototype void vp9_short_walsh4x4_x8 "short *InputData, short *OutputData, int pitch"
specialize vp9_short_walsh4x4_x8
prototype void vp8_short_walsh8x4_x8 "short *InputData, short *OutputData, int pitch"
specialize vp8_short_walsh8x4_x8
prototype void vp9_short_walsh8x4_x8 "short *InputData, short *OutputData, int pitch"
specialize vp9_short_walsh8x4_x8
fi
# end encoder functions

View File

@ -14,8 +14,8 @@
#include "vp8/encoder/variance.h"
#include "vp8/encoder/onyx_int.h"
extern void (*vp8_yv12_copy_partial_frame_ptr)(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc, int Fraction);
extern void vp8_yv12_copy_partial_frame(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc, int Fraction);
extern void (*vp9_yv12_copy_partial_frame_ptr)(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc, int Fraction);
extern void vp9_yv12_copy_partial_frame(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc, int Fraction);
extern void vpxyv12_copy_partial_frame_neon(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc, int Fraction);
void vp8_arch_arm_encoder_init(VP8_COMP *cpi) {
@ -29,42 +29,42 @@ void vp8_arch_arm_encoder_init(VP8_COMP *cpi) {
#if HAVE_ARMV6
if (flags & HAS_MEDIA) {
cpi->rtcd.variance.sad16x16 = vp8_sad16x16_armv6;
/*cpi->rtcd.variance.sad16x8 = vp8_sad16x8_c;
cpi->rtcd.variance.sad8x16 = vp8_sad8x16_c;
cpi->rtcd.variance.sad8x8 = vp8_sad8x8_c;
cpi->rtcd.variance.sad4x4 = vp8_sad4x4_c;*/
cpi->rtcd.variance.sad16x16 = vp9_sad16x16_armv6;
/*cpi->rtcd.variance.sad16x8 = vp9_sad16x8_c;
cpi->rtcd.variance.sad8x16 = vp9_sad8x16_c;
cpi->rtcd.variance.sad8x8 = vp9_sad8x8_c;
cpi->rtcd.variance.sad4x4 = vp9_sad4x4_c;*/
/*cpi->rtcd.variance.var4x4 = vp8_variance4x4_c;*/
cpi->rtcd.variance.var8x8 = vp8_variance8x8_armv6;
/*cpi->rtcd.variance.var8x16 = vp8_variance8x16_c;
cpi->rtcd.variance.var16x8 = vp8_variance16x8_c;*/
cpi->rtcd.variance.var16x16 = vp8_variance16x16_armv6;
/*cpi->rtcd.variance.var4x4 = vp9_variance4x4_c;*/
cpi->rtcd.variance.var8x8 = vp9_variance8x8_armv6;
/*cpi->rtcd.variance.var8x16 = vp9_variance8x16_c;
cpi->rtcd.variance.var16x8 = vp9_variance16x8_c;*/
cpi->rtcd.variance.var16x16 = vp9_variance16x16_armv6;
/*cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_c;*/
cpi->rtcd.variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_armv6;
/*cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_c;
cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_c;*/
cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_armv6;
cpi->rtcd.variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_armv6;
cpi->rtcd.variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_armv6;
cpi->rtcd.variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_armv6;
/*cpi->rtcd.variance.subpixvar4x4 = vp9_sub_pixel_variance4x4_c;*/
cpi->rtcd.variance.subpixvar8x8 = vp9_sub_pixel_variance8x8_armv6;
/*cpi->rtcd.variance.subpixvar8x16 = vp9_sub_pixel_variance8x16_c;
cpi->rtcd.variance.subpixvar16x8 = vp9_sub_pixel_variance16x8_c;*/
cpi->rtcd.variance.subpixvar16x16 = vp9_sub_pixel_variance16x16_armv6;
cpi->rtcd.variance.halfpixvar16x16_h = vp9_variance_halfpixvar16x16_h_armv6;
cpi->rtcd.variance.halfpixvar16x16_v = vp9_variance_halfpixvar16x16_v_armv6;
cpi->rtcd.variance.halfpixvar16x16_hv = vp9_variance_halfpixvar16x16_hv_armv6;
cpi->rtcd.variance.mse16x16 = vp8_mse16x16_armv6;
/*cpi->rtcd.variance.getmbss = vp8_get_mb_ss_c;*/
cpi->rtcd.variance.mse16x16 = vp9_mse16x16_armv6;
/*cpi->rtcd.variance.getmbss = vp9_get_mb_ss_c;*/
cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_armv6;
cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_armv6;
cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_armv6;
cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_armv6;
cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_armv6;
cpi->rtcd.fdct.short4x4 = vp9_short_fdct4x4_armv6;
cpi->rtcd.fdct.short8x4 = vp9_short_fdct8x4_armv6;
cpi->rtcd.fdct.fast4x4 = vp9_short_fdct4x4_armv6;
cpi->rtcd.fdct.fast8x4 = vp9_short_fdct8x4_armv6;
cpi->rtcd.fdct.walsh_short4x4 = vp9_short_walsh4x4_armv6;
/*cpi->rtcd.encodemb.berr = vp8_block_error_c;
cpi->rtcd.encodemb.mberr = vp8_mbblock_error_c;
cpi->rtcd.encodemb.mbuverr = vp8_mbuverror_c;*/
cpi->rtcd.encodemb.subb = vp8_subtract_b_armv6;
cpi->rtcd.encodemb.submby = vp8_subtract_mby_armv6;
cpi->rtcd.encodemb.submbuv = vp8_subtract_mbuv_armv6;
/*cpi->rtcd.encodemb.berr = vp9_block_error_c;
cpi->rtcd.encodemb.mberr = vp9_mbblock_error_c;
cpi->rtcd.encodemb.mbuverr = vp9_mbuverror_c;*/
cpi->rtcd.encodemb.subb = vp9_subtract_b_armv6;
cpi->rtcd.encodemb.submby = vp9_subtract_mby_armv6;
cpi->rtcd.encodemb.submbuv = vp9_subtract_mbuv_armv6;
/*cpi->rtcd.quantize.quantb = vp8_regular_quantize_b;*/
cpi->rtcd.quantize.fastquantb = vp8_fast_quantize_b_armv6;
@ -73,42 +73,42 @@ void vp8_arch_arm_encoder_init(VP8_COMP *cpi) {
#if HAVE_ARMV7
if (flags & HAS_NEON) {
cpi->rtcd.variance.sad16x16 = vp8_sad16x16_neon;
cpi->rtcd.variance.sad16x8 = vp8_sad16x8_neon;
cpi->rtcd.variance.sad8x16 = vp8_sad8x16_neon;
cpi->rtcd.variance.sad8x8 = vp8_sad8x8_neon;
cpi->rtcd.variance.sad4x4 = vp8_sad4x4_neon;
cpi->rtcd.variance.sad16x16 = vp9_sad16x16_neon;
cpi->rtcd.variance.sad16x8 = vp9_sad16x8_neon;
cpi->rtcd.variance.sad8x16 = vp9_sad8x16_neon;
cpi->rtcd.variance.sad8x8 = vp9_sad8x8_neon;
cpi->rtcd.variance.sad4x4 = vp9_sad4x4_neon;
/*cpi->rtcd.variance.var4x4 = vp8_variance4x4_c;*/
cpi->rtcd.variance.var8x8 = vp8_variance8x8_neon;
cpi->rtcd.variance.var8x16 = vp8_variance8x16_neon;
cpi->rtcd.variance.var16x8 = vp8_variance16x8_neon;
cpi->rtcd.variance.var16x16 = vp8_variance16x16_neon;
/*cpi->rtcd.variance.var4x4 = vp9_variance4x4_c;*/
cpi->rtcd.variance.var8x8 = vp9_variance8x8_neon;
cpi->rtcd.variance.var8x16 = vp9_variance8x16_neon;
cpi->rtcd.variance.var16x8 = vp9_variance16x8_neon;
cpi->rtcd.variance.var16x16 = vp9_variance16x16_neon;
/*cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_c;*/
cpi->rtcd.variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_neon;
/*cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_c;
cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_c;*/
cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_neon;
cpi->rtcd.variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_neon;
cpi->rtcd.variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_neon;
cpi->rtcd.variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_neon;
/*cpi->rtcd.variance.subpixvar4x4 = vp9_sub_pixel_variance4x4_c;*/
cpi->rtcd.variance.subpixvar8x8 = vp9_sub_pixel_variance8x8_neon;
/*cpi->rtcd.variance.subpixvar8x16 = vp9_sub_pixel_variance8x16_c;
cpi->rtcd.variance.subpixvar16x8 = vp9_sub_pixel_variance16x8_c;*/
cpi->rtcd.variance.subpixvar16x16 = vp9_sub_pixel_variance16x16_neon;
cpi->rtcd.variance.halfpixvar16x16_h = vp9_variance_halfpixvar16x16_h_neon;
cpi->rtcd.variance.halfpixvar16x16_v = vp9_variance_halfpixvar16x16_v_neon;
cpi->rtcd.variance.halfpixvar16x16_hv = vp9_variance_halfpixvar16x16_hv_neon;
cpi->rtcd.variance.mse16x16 = vp8_mse16x16_neon;
/*cpi->rtcd.variance.getmbss = vp8_get_mb_ss_c;*/
cpi->rtcd.variance.mse16x16 = vp9_mse16x16_neon;
/*cpi->rtcd.variance.getmbss = vp9_get_mb_ss_c;*/
cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_neon;
cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_neon;
cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_neon;
cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_neon;
cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_neon;
cpi->rtcd.fdct.short4x4 = vp9_short_fdct4x4_neon;
cpi->rtcd.fdct.short8x4 = vp9_short_fdct8x4_neon;
cpi->rtcd.fdct.fast4x4 = vp9_short_fdct4x4_neon;
cpi->rtcd.fdct.fast8x4 = vp9_short_fdct8x4_neon;
cpi->rtcd.fdct.walsh_short4x4 = vp9_short_walsh4x4_neon;
/*cpi->rtcd.encodemb.berr = vp8_block_error_c;
cpi->rtcd.encodemb.mberr = vp8_mbblock_error_c;
cpi->rtcd.encodemb.mbuverr = vp8_mbuverror_c;*/
cpi->rtcd.encodemb.subb = vp8_subtract_b_neon;
cpi->rtcd.encodemb.submby = vp8_subtract_mby_neon;
cpi->rtcd.encodemb.submbuv = vp8_subtract_mbuv_neon;
/*cpi->rtcd.encodemb.berr = vp9_block_error_c;
cpi->rtcd.encodemb.mberr = vp9_mbblock_error_c;
cpi->rtcd.encodemb.mbuverr = vp9_mbuverror_c;*/
cpi->rtcd.encodemb.subb = vp9_subtract_b_neon;
cpi->rtcd.encodemb.submby = vp9_subtract_mby_neon;
cpi->rtcd.encodemb.submbuv = vp9_subtract_mbuv_neon;
/*cpi->rtcd.quantize.quantb = vp8_regular_quantize_b;
cpi->rtcd.quantize.quantb_pair = vp8_regular_quantize_b_pair;*/
@ -122,7 +122,7 @@ void vp8_arch_arm_encoder_init(VP8_COMP *cpi) {
if (flags & HAS_NEON)
#endif
{
vp8_yv12_copy_partial_frame_ptr = vpxyv12_copy_partial_frame_neon;
vp9_yv12_copy_partial_frame_ptr = vpxyv12_copy_partial_frame_neon;
}
#endif
#endif

View File

@ -13,9 +13,9 @@
#if HAVE_ARMV6
void vp8_short_fdct8x4_armv6(short *input, short *output, int pitch) {
vp8_short_fdct4x4_armv6(input, output, pitch);
vp8_short_fdct4x4_armv6(input + 4, output + 16, pitch);
void vp9_short_fdct8x4_armv6(short *input, short *output, int pitch) {
vp9_short_fdct4x4_armv6(input, output, pitch);
vp9_short_fdct4x4_armv6(input + 4, output + 16, pitch);
}
#endif /* HAVE_ARMV6 */

View File

@ -13,51 +13,51 @@
#define DCT_ARM_H
#if HAVE_ARMV6
extern prototype_fdct(vp8_short_walsh4x4_armv6);
extern prototype_fdct(vp8_short_fdct4x4_armv6);
extern prototype_fdct(vp8_short_fdct8x4_armv6);
extern prototype_fdct(vp9_short_walsh4x4_armv6);
extern prototype_fdct(vp9_short_fdct4x4_armv6);
extern prototype_fdct(vp9_short_fdct8x4_armv6);
#if !CONFIG_RUNTIME_CPU_DETECT
#undef vp8_fdct_walsh_short4x4
#define vp8_fdct_walsh_short4x4 vp8_short_walsh4x4_armv6
#define vp8_fdct_walsh_short4x4 vp9_short_walsh4x4_armv6
#undef vp8_fdct_short4x4
#define vp8_fdct_short4x4 vp8_short_fdct4x4_armv6
#define vp8_fdct_short4x4 vp9_short_fdct4x4_armv6
#undef vp8_fdct_short8x4
#define vp8_fdct_short8x4 vp8_short_fdct8x4_armv6
#define vp8_fdct_short8x4 vp9_short_fdct8x4_armv6
#undef vp8_fdct_fast4x4
#define vp8_fdct_fast4x4 vp8_short_fdct4x4_armv6
#define vp8_fdct_fast4x4 vp9_short_fdct4x4_armv6
#undef vp8_fdct_fast8x4
#define vp8_fdct_fast8x4 vp8_short_fdct8x4_armv6
#define vp8_fdct_fast8x4 vp9_short_fdct8x4_armv6
#endif
#endif /* HAVE_ARMV6 */
#if HAVE_ARMV7
extern prototype_fdct(vp8_short_fdct4x4_neon);
extern prototype_fdct(vp8_short_fdct8x4_neon);
extern prototype_fdct(vp9_short_fdct4x4_neon);
extern prototype_fdct(vp9_short_fdct8x4_neon);
extern prototype_fdct(vp8_fast_fdct4x4_neon);
extern prototype_fdct(vp8_fast_fdct8x4_neon);
extern prototype_fdct(vp8_short_walsh4x4_neon);
extern prototype_fdct(vp9_short_walsh4x4_neon);
#if !CONFIG_RUNTIME_CPU_DETECT
#undef vp8_fdct_short4x4
#define vp8_fdct_short4x4 vp8_short_fdct4x4_neon
#define vp8_fdct_short4x4 vp9_short_fdct4x4_neon
#undef vp8_fdct_short8x4
#define vp8_fdct_short8x4 vp8_short_fdct8x4_neon
#define vp8_fdct_short8x4 vp9_short_fdct8x4_neon
#undef vp8_fdct_fast4x4
#define vp8_fdct_fast4x4 vp8_short_fdct4x4_neon
#define vp8_fdct_fast4x4 vp9_short_fdct4x4_neon
#undef vp8_fdct_fast8x4
#define vp8_fdct_fast8x4 vp8_short_fdct8x4_neon
#define vp8_fdct_fast8x4 vp9_short_fdct8x4_neon
#undef vp8_fdct_walsh_short4x4
#define vp8_fdct_walsh_short4x4 vp8_short_walsh4x4_neon
#define vp8_fdct_walsh_short4x4 vp9_short_walsh4x4_neon
#endif
#endif

View File

@ -13,50 +13,50 @@
#define ENCODEMB_ARM_H
#if HAVE_ARMV6
extern prototype_subb(vp8_subtract_b_armv6);
extern prototype_submby(vp8_subtract_mby_armv6);
extern prototype_submbuv(vp8_subtract_mbuv_armv6);
extern prototype_subb(vp9_subtract_b_armv6);
extern prototype_submby(vp9_subtract_mby_armv6);
extern prototype_submbuv(vp9_subtract_mbuv_armv6);
#if !CONFIG_RUNTIME_CPU_DETECT
#undef vp8_encodemb_subb
#define vp8_encodemb_subb vp8_subtract_b_armv6
#define vp8_encodemb_subb vp9_subtract_b_armv6
#undef vp8_encodemb_submby
#define vp8_encodemb_submby vp8_subtract_mby_armv6
#define vp8_encodemb_submby vp9_subtract_mby_armv6
#undef vp8_encodemb_submbuv
#define vp8_encodemb_submbuv vp8_subtract_mbuv_armv6
#define vp8_encodemb_submbuv vp9_subtract_mbuv_armv6
#endif
#endif /* HAVE_ARMV6 */
#if HAVE_ARMV7
// extern prototype_berr(vp8_block_error_c);
// extern prototype_mberr(vp8_mbblock_error_c);
// extern prototype_mbuverr(vp8_mbuverror_c);
// extern prototype_berr(vp9_block_error_c);
// extern prototype_mberr(vp9_mbblock_error_c);
// extern prototype_mbuverr(vp9_mbuverror_c);
extern prototype_subb(vp8_subtract_b_neon);
extern prototype_submby(vp8_subtract_mby_neon);
extern prototype_submbuv(vp8_subtract_mbuv_neon);
extern prototype_subb(vp9_subtract_b_neon);
extern prototype_submby(vp9_subtract_mby_neon);
extern prototype_submbuv(vp9_subtract_mbuv_neon);
// #undef vp8_encodemb_berr
// #define vp8_encodemb_berr vp8_block_error_c
// #define vp8_encodemb_berr vp9_block_error_c
// #undef vp8_encodemb_mberr
// #define vp8_encodemb_mberr vp8_mbblock_error_c
// #define vp8_encodemb_mberr vp9_mbblock_error_c
// #undef vp8_encodemb_mbuverr
// #define vp8_encodemb_mbuverr vp8_mbuverror_c
// #define vp8_encodemb_mbuverr vp9_mbuverror_c
#if !CONFIG_RUNTIME_CPU_DETECT
#undef vp8_encodemb_subb
#define vp8_encodemb_subb vp8_subtract_b_neon
#define vp8_encodemb_subb vp9_subtract_b_neon
#undef vp8_encodemb_submby
#define vp8_encodemb_submby vp8_subtract_mby_neon
#define vp8_encodemb_submby vp9_subtract_mby_neon
#undef vp8_encodemb_submbuv
#define vp8_encodemb_submbuv vp8_subtract_mbuv_neon
#define vp8_encodemb_submbuv vp9_subtract_mbuv_neon
#endif
#endif

View File

@ -17,7 +17,7 @@
#if HAVE_ARMV6
unsigned int vp8_sub_pixel_variance8x8_armv6
unsigned int vp9_sub_pixel_variance8x8_armv6
(
const unsigned char *src_ptr,
int src_pixels_per_line,
@ -40,11 +40,11 @@ unsigned int vp8_sub_pixel_variance8x8_armv6
vp8_filter_block2d_bil_second_pass_armv6(first_pass, second_pass,
8, 8, 8, VFilter);
return vp8_variance8x8_armv6(second_pass, 8, dst_ptr,
return vp9_variance8x8_armv6(second_pass, 8, dst_ptr,
dst_pixels_per_line, sse);
}
unsigned int vp8_sub_pixel_variance16x16_armv6
unsigned int vp9_sub_pixel_variance16x16_armv6
(
const unsigned char *src_ptr,
int src_pixels_per_line,
@ -60,13 +60,13 @@ unsigned int vp8_sub_pixel_variance16x16_armv6
unsigned int var;
if (xoffset == HALFNDX && yoffset == 0) {
var = vp8_variance_halfpixvar16x16_h_armv6(src_ptr, src_pixels_per_line,
var = vp9_variance_halfpixvar16x16_h_armv6(src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, sse);
} else if (xoffset == 0 && yoffset == HALFNDX) {
var = vp8_variance_halfpixvar16x16_v_armv6(src_ptr, src_pixels_per_line,
var = vp9_variance_halfpixvar16x16_v_armv6(src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, sse);
} else if (xoffset == HALFNDX && yoffset == HALFNDX) {
var = vp8_variance_halfpixvar16x16_hv_armv6(src_ptr, src_pixels_per_line,
var = vp9_variance_halfpixvar16x16_hv_armv6(src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, sse);
} else {
HFilter = vp8_bilinear_filters[xoffset];
@ -78,7 +78,7 @@ unsigned int vp8_sub_pixel_variance16x16_armv6
vp8_filter_block2d_bil_second_pass_armv6(first_pass, second_pass,
16, 16, 16, VFilter);
var = vp8_variance16x16_armv6(second_pass, 16, dst_ptr,
var = vp9_variance16x16_armv6(second_pass, 16, dst_ptr,
dst_pixels_per_line, sse);
}
return var;
@ -89,7 +89,7 @@ unsigned int vp8_sub_pixel_variance16x16_armv6
#if HAVE_ARMV7
unsigned int vp8_sub_pixel_variance16x16_neon
unsigned int vp9_sub_pixel_variance16x16_neon
(
const unsigned char *src_ptr,
int src_pixels_per_line,
@ -100,13 +100,13 @@ unsigned int vp8_sub_pixel_variance16x16_neon
unsigned int *sse
) {
if (xoffset == HALFNDX && yoffset == 0)
return vp8_variance_halfpixvar16x16_h_neon(src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, sse);
return vp9_variance_halfpixvar16x16_h_neon(src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, sse);
else if (xoffset == 0 && yoffset == HALFNDX)
return vp8_variance_halfpixvar16x16_v_neon(src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, sse);
return vp9_variance_halfpixvar16x16_v_neon(src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, sse);
else if (xoffset == HALFNDX && yoffset == HALFNDX)
return vp8_variance_halfpixvar16x16_hv_neon(src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, sse);
return vp9_variance_halfpixvar16x16_hv_neon(src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, sse);
else
return vp8_sub_pixel_variance16x16_neon_func(src_ptr, src_pixels_per_line, xoffset, yoffset, dst_ptr, dst_pixels_per_line, sse);
return vp9_sub_pixel_variance16x16_neon_func(src_ptr, src_pixels_per_line, xoffset, yoffset, dst_ptr, dst_pixels_per_line, sse);
}
#endif

View File

@ -14,44 +14,44 @@
#if HAVE_ARMV6
extern prototype_sad(vp8_sad16x16_armv6);
extern prototype_variance(vp8_variance16x16_armv6);
extern prototype_variance(vp8_variance8x8_armv6);
extern prototype_subpixvariance(vp8_sub_pixel_variance16x16_armv6);
extern prototype_subpixvariance(vp8_sub_pixel_variance8x8_armv6);
extern prototype_variance(vp8_variance_halfpixvar16x16_h_armv6);
extern prototype_variance(vp8_variance_halfpixvar16x16_v_armv6);
extern prototype_variance(vp8_variance_halfpixvar16x16_hv_armv6);
extern prototype_variance(vp8_mse16x16_armv6);
extern prototype_sad(vp9_sad16x16_armv6);
extern prototype_variance(vp9_variance16x16_armv6);
extern prototype_variance(vp9_variance8x8_armv6);
extern prototype_subpixvariance(vp9_sub_pixel_variance16x16_armv6);
extern prototype_subpixvariance(vp9_sub_pixel_variance8x8_armv6);
extern prototype_variance(vp9_variance_halfpixvar16x16_h_armv6);
extern prototype_variance(vp9_variance_halfpixvar16x16_v_armv6);
extern prototype_variance(vp9_variance_halfpixvar16x16_hv_armv6);
extern prototype_variance(vp9_mse16x16_armv6);
#if !CONFIG_RUNTIME_CPU_DETECT
#undef vp8_variance_sad16x16
#define vp8_variance_sad16x16 vp8_sad16x16_armv6
#define vp8_variance_sad16x16 vp9_sad16x16_armv6
#undef vp8_variance_subpixvar16x16
#define vp8_variance_subpixvar16x16 vp8_sub_pixel_variance16x16_armv6
#define vp8_variance_subpixvar16x16 vp9_sub_pixel_variance16x16_armv6
#undef vp8_variance_subpixvar8x8
#define vp8_variance_subpixvar8x8 vp8_sub_pixel_variance8x8_armv6
#define vp8_variance_subpixvar8x8 vp9_sub_pixel_variance8x8_armv6
#undef vp8_variance_var16x16
#define vp8_variance_var16x16 vp8_variance16x16_armv6
#define vp8_variance_var16x16 vp9_variance16x16_armv6
#undef vp8_variance_mse16x16
#define vp8_variance_mse16x16 vp8_mse16x16_armv6
#define vp8_variance_mse16x16 vp9_mse16x16_armv6
#undef vp8_variance_var8x8
#define vp8_variance_var8x8 vp8_variance8x8_armv6
#define vp8_variance_var8x8 vp9_variance8x8_armv6
#undef vp8_variance_halfpixvar16x16_h
#define vp8_variance_halfpixvar16x16_h vp8_variance_halfpixvar16x16_h_armv6
#undef vp9_variance_halfpixvar16x16_h
#define vp9_variance_halfpixvar16x16_h vp9_variance_halfpixvar16x16_h_armv6
#undef vp8_variance_halfpixvar16x16_v
#define vp8_variance_halfpixvar16x16_v vp8_variance_halfpixvar16x16_v_armv6
#undef vp9_variance_halfpixvar16x16_v
#define vp9_variance_halfpixvar16x16_v vp9_variance_halfpixvar16x16_v_armv6
#undef vp8_variance_halfpixvar16x16_hv
#define vp8_variance_halfpixvar16x16_hv vp8_variance_halfpixvar16x16_hv_armv6
#undef vp9_variance_halfpixvar16x16_hv
#define vp9_variance_halfpixvar16x16_hv vp9_variance_halfpixvar16x16_hv_armv6
#endif /* !CONFIG_RUNTIME_CPU_DETECT */
@ -59,91 +59,91 @@ extern prototype_variance(vp8_mse16x16_armv6);
#if HAVE_ARMV7
extern prototype_sad(vp8_sad4x4_neon);
extern prototype_sad(vp8_sad8x8_neon);
extern prototype_sad(vp8_sad8x16_neon);
extern prototype_sad(vp8_sad16x8_neon);
extern prototype_sad(vp8_sad16x16_neon);
extern prototype_sad(vp9_sad4x4_neon);
extern prototype_sad(vp9_sad8x8_neon);
extern prototype_sad(vp9_sad8x16_neon);
extern prototype_sad(vp9_sad16x8_neon);
extern prototype_sad(vp9_sad16x16_neon);
// extern prototype_variance(vp8_variance4x4_c);
extern prototype_variance(vp8_variance8x8_neon);
extern prototype_variance(vp8_variance8x16_neon);
extern prototype_variance(vp8_variance16x8_neon);
extern prototype_variance(vp8_variance16x16_neon);
// extern prototype_variance(vp9_variance4x4_c);
extern prototype_variance(vp9_variance8x8_neon);
extern prototype_variance(vp9_variance8x16_neon);
extern prototype_variance(vp9_variance16x8_neon);
extern prototype_variance(vp9_variance16x16_neon);
// extern prototype_subpixvariance(vp8_sub_pixel_variance4x4_c);
extern prototype_subpixvariance(vp8_sub_pixel_variance8x8_neon);
// extern prototype_subpixvariance(vp8_sub_pixel_variance8x16_c);
// extern prototype_subpixvariance(vp8_sub_pixel_variance16x8_c);
extern prototype_subpixvariance(vp8_sub_pixel_variance16x16_neon);
extern prototype_subpixvariance(vp8_sub_pixel_variance16x16_neon_func);
extern prototype_variance(vp8_variance_halfpixvar16x16_h_neon);
extern prototype_variance(vp8_variance_halfpixvar16x16_v_neon);
extern prototype_variance(vp8_variance_halfpixvar16x16_hv_neon);
// extern prototype_subpixvariance(vp9_sub_pixel_variance4x4_c);
extern prototype_subpixvariance(vp9_sub_pixel_variance8x8_neon);
// extern prototype_subpixvariance(vp9_sub_pixel_variance8x16_c);
// extern prototype_subpixvariance(vp9_sub_pixel_variance16x8_c);
extern prototype_subpixvariance(vp9_sub_pixel_variance16x16_neon);
extern prototype_subpixvariance(vp9_sub_pixel_variance16x16_neon_func);
extern prototype_variance(vp9_variance_halfpixvar16x16_h_neon);
extern prototype_variance(vp9_variance_halfpixvar16x16_v_neon);
extern prototype_variance(vp9_variance_halfpixvar16x16_hv_neon);
// extern prototype_getmbss(vp8_get_mb_ss_c);
extern prototype_variance(vp8_mse16x16_neon);
// extern prototype_getmbss(vp9_get_mb_ss_c);
extern prototype_variance(vp9_mse16x16_neon);
#if !CONFIG_RUNTIME_CPU_DETECT
#undef vp8_variance_sad4x4
#define vp8_variance_sad4x4 vp8_sad4x4_neon
#define vp8_variance_sad4x4 vp9_sad4x4_neon
#undef vp8_variance_sad8x8
#define vp8_variance_sad8x8 vp8_sad8x8_neon
#define vp8_variance_sad8x8 vp9_sad8x8_neon
#undef vp8_variance_sad8x16
#define vp8_variance_sad8x16 vp8_sad8x16_neon
#define vp8_variance_sad8x16 vp9_sad8x16_neon
#undef vp8_variance_sad16x8
#define vp8_variance_sad16x8 vp8_sad16x8_neon
#define vp8_variance_sad16x8 vp9_sad16x8_neon
#undef vp8_variance_sad16x16
#define vp8_variance_sad16x16 vp8_sad16x16_neon
#define vp8_variance_sad16x16 vp9_sad16x16_neon
// #undef vp8_variance_var4x4
// #define vp8_variance_var4x4 vp8_variance4x4_c
// #define vp8_variance_var4x4 vp9_variance4x4_c
#undef vp8_variance_var8x8
#define vp8_variance_var8x8 vp8_variance8x8_neon
#define vp8_variance_var8x8 vp9_variance8x8_neon
#undef vp8_variance_var8x16
#define vp8_variance_var8x16 vp8_variance8x16_neon
#define vp8_variance_var8x16 vp9_variance8x16_neon
#undef vp8_variance_var16x8
#define vp8_variance_var16x8 vp8_variance16x8_neon
#define vp8_variance_var16x8 vp9_variance16x8_neon
#undef vp8_variance_var16x16
#define vp8_variance_var16x16 vp8_variance16x16_neon
#define vp8_variance_var16x16 vp9_variance16x16_neon
// #undef vp8_variance_subpixvar4x4
// #define vp8_variance_subpixvar4x4 vp8_sub_pixel_variance4x4_c
// #define vp8_variance_subpixvar4x4 vp9_sub_pixel_variance4x4_c
#undef vp8_variance_subpixvar8x8
#define vp8_variance_subpixvar8x8 vp8_sub_pixel_variance8x8_neon
#define vp8_variance_subpixvar8x8 vp9_sub_pixel_variance8x8_neon
// #undef vp8_variance_subpixvar8x16
// #define vp8_variance_subpixvar8x16 vp8_sub_pixel_variance8x16_c
// #define vp8_variance_subpixvar8x16 vp9_sub_pixel_variance8x16_c
// #undef vp8_variance_subpixvar16x8
// #define vp8_variance_subpixvar16x8 vp8_sub_pixel_variance16x8_c
// #define vp8_variance_subpixvar16x8 vp9_sub_pixel_variance16x8_c
#undef vp8_variance_subpixvar16x16
#define vp8_variance_subpixvar16x16 vp8_sub_pixel_variance16x16_neon
#define vp8_variance_subpixvar16x16 vp9_sub_pixel_variance16x16_neon
#undef vp8_variance_halfpixvar16x16_h
#define vp8_variance_halfpixvar16x16_h vp8_variance_halfpixvar16x16_h_neon
#undef vp9_variance_halfpixvar16x16_h
#define vp9_variance_halfpixvar16x16_h vp9_variance_halfpixvar16x16_h_neon
#undef vp8_variance_halfpixvar16x16_v
#define vp8_variance_halfpixvar16x16_v vp8_variance_halfpixvar16x16_v_neon
#undef vp9_variance_halfpixvar16x16_v
#define vp9_variance_halfpixvar16x16_v vp9_variance_halfpixvar16x16_v_neon
#undef vp8_variance_halfpixvar16x16_hv
#define vp8_variance_halfpixvar16x16_hv vp8_variance_halfpixvar16x16_hv_neon
#undef vp9_variance_halfpixvar16x16_hv
#define vp9_variance_halfpixvar16x16_hv vp9_variance_halfpixvar16x16_hv_neon
// #undef vp8_variance_getmbss
// #define vp8_variance_getmbss vp8_get_mb_ss_c
// #define vp8_variance_getmbss vp9_get_mb_ss_c
#undef vp8_variance_mse16x16
#define vp8_variance_mse16x16 vp8_mse16x16_neon
#define vp8_variance_mse16x16 vp9_mse16x16_neon
#endif

View File

@ -83,7 +83,7 @@ static int update_bits[255];
static void compute_update_table() {
int i;
for (i = 0; i < 255; i++)
update_bits[i] = vp8_count_term_subexp(i, SUBEXP_PARAM, 255);
update_bits[i] = vp9_count_term_subexp(i, SUBEXP_PARAM, 255);
}
static int split_index(int i, int n, int modulus) {
@ -109,7 +109,7 @@ static int remap_prob(int v, int m) {
static void write_prob_diff_update(vp8_writer *const bc,
vp8_prob newp, vp8_prob oldp) {
int delp = remap_prob(newp, oldp);
vp8_encode_term_subexp(bc, delp, SUBEXP_PARAM, 255);
vp9_encode_term_subexp(bc, delp, SUBEXP_PARAM, 255);
}
static int prob_diff_update_cost(vp8_prob newp, vp8_prob oldp) {
@ -618,12 +618,12 @@ static void write_nmv(vp8_writer *bc, const MV *mv, const int_mv *ref,
e.row = mv->row - ref->as_mv.row;
e.col = mv->col - ref->as_mv.col;
vp8_encode_nmv(bc, &e, &ref->as_mv, nmvc);
vp8_encode_nmv_fp(bc, &e, &ref->as_mv, nmvc, usehp);
vp9_encode_nmv(bc, &e, &ref->as_mv, nmvc);
vp9_encode_nmv_fp(bc, &e, &ref->as_mv, nmvc, usehp);
}
#if CONFIG_NEW_MVREF
static int vp8_cost_mv_ref_id(vp8_prob * ref_id_probs, int mv_ref_id) {
static int vp9_cost_mv_ref_id(vp8_prob * ref_id_probs, int mv_ref_id) {
int cost;
// Encode the index for the MV reference.
@ -698,8 +698,8 @@ static unsigned int pick_best_mv_ref(MACROBLOCK *x,
MACROBLOCKD *xd = &x->e_mbd;
int max_mv = MV_MAX;
cost = vp8_cost_mv_ref_id(xd->mb_mv_ref_id_probs[ref_frame], 0) +
vp8_mv_bit_cost(&target_mv,
cost = vp9_cost_mv_ref_id(xd->mb_mv_ref_id_probs[ref_frame], 0) +
vp9_mv_bit_cost(&target_mv,
&mv_ref_list[0],
XMVCOST, 96,
xd->allow_high_precision_mv);
@ -722,8 +722,8 @@ static unsigned int pick_best_mv_ref(MACROBLOCK *x,
continue;
}
cost2 = vp8_cost_mv_ref_id(xd->mb_mv_ref_id_probs[ref_frame], i) +
vp8_mv_bit_cost(&target_mv,
cost2 = vp9_cost_mv_ref_id(xd->mb_mv_ref_id_probs[ref_frame], i) +
vp9_mv_bit_cost(&target_mv,
&mv_ref_list[i],
XMVCOST, 96,
xd->allow_high_precision_mv);
@ -1820,13 +1820,13 @@ static void decide_kf_ymode_entropy(VP8_COMP *cpi) {
int i, j;
for (i = 0; i < 8; i++) {
vp8_cost_tokens(mode_cost, cpi->common.kf_ymode_prob[i], vp8_kf_ymode_tree);
vp9_cost_tokens(mode_cost, cpi->common.kf_ymode_prob[i], vp8_kf_ymode_tree);
cost = 0;
for (j = 0; j < VP8_YMODES; j++) {
cost += mode_cost[j] * cpi->ymode_count[j];
}
#if CONFIG_SUPERBLOCKS
vp8_cost_tokens(mode_cost, cpi->common.sb_kf_ymode_prob[i],
vp9_cost_tokens(mode_cost, cpi->common.sb_kf_ymode_prob[i],
vp8_sb_ymode_tree);
for (j = 0; j < VP8_I32X32_MODES; j++) {
cost += mode_cost[j] * cpi->sb_ymode_count[j];
@ -1860,7 +1860,7 @@ static void segment_reference_frames(VP8_COMP *cpi) {
}
}
void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size) {
void vp9_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size) {
int i, j;
VP8_HEADER oh;
VP8_COMMON *const pc = &cpi->common;
@ -1883,7 +1883,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
compute_update_table();
// vp8_kf_default_bmode_probs() is called in vp8_setup_key_frame() once for each
// vp8_kf_default_bmode_probs() is called in vp9_setup_key_frame() once for each
// K frame before encode frame. pc->kf_bmode_prob doesn't get changed anywhere
// else. No need to call it again here. --yw
// vp8_kf_default_bmode_probs( pc->kf_bmode_prob);
@ -1908,14 +1908,14 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
extra_bytes_packed = 7;
cx_data += extra_bytes_packed;
vp8_start_encode(&header_bc, cx_data);
vp9_start_encode(&header_bc, cx_data);
// signal clr type
vp8_write_bit(&header_bc, pc->clr_type);
vp8_write_bit(&header_bc, pc->clamp_type);
} else {
vp8_start_encode(&header_bc, cx_data);
vp9_start_encode(&header_bc, cx_data);
}
// Signal whether or not Segmentation is enabled
@ -2272,10 +2272,10 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
vpx_memset(xd->mb_mv_ref_id_probs, 192, sizeof(xd->mb_mv_ref_id_probs));
#endif
vp8_write_nmvprobs(cpi, xd->allow_high_precision_mv, &header_bc);
vp9_write_nmvprobs(cpi, xd->allow_high_precision_mv, &header_bc);
}
vp8_stop_encode(&header_bc);
vp9_stop_encode(&header_bc);
oh.first_partition_length_in_bytes = header_bc.pos;
@ -2292,7 +2292,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
}
*size = VP8_HEADER_SIZE + extra_bytes_packed + header_bc.pos;
vp8_start_encode(&residual_bc, cx_data + header_bc.pos);
vp9_start_encode(&residual_bc, cx_data + header_bc.pos);
if (pc->frame_type == KEY_FRAME) {
decide_kf_ymode_entropy(cpi);
@ -2303,7 +2303,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
}
vp8_stop_encode(&residual_bc);
vp9_stop_encode(&residual_bc);
*size += residual_bc.pos;

View File

@ -166,13 +166,13 @@ typedef struct macroblock {
PICK_MODE_CONTEXT sb_context[4];
#endif
void (*vp8_short_fdct4x4)(short *input, short *output, int pitch);
void (*vp8_short_fdct8x4)(short *input, short *output, int pitch);
void (*vp9_short_fdct4x4)(short *input, short *output, int pitch);
void (*vp9_short_fdct8x4)(short *input, short *output, int pitch);
void (*short_walsh4x4)(short *input, short *output, int pitch);
void (*quantize_b_4x4)(BLOCK *b, BLOCKD *d);
void (*quantize_b_4x4_pair)(BLOCK *b1, BLOCK *b2, BLOCKD *d0, BLOCKD *d1);
void (*vp8_short_fdct8x8)(short *input, short *output, int pitch);
void (*vp8_short_fdct16x16)(short *input, short *output, int pitch);
void (*vp9_short_fdct8x8)(short *input, short *output, int pitch);
void (*vp9_short_fdct16x16)(short *input, short *output, int pitch);
void (*short_fhaar2x2)(short *input, short *output, int pitch);
void (*quantize_b_16x16)(BLOCK *b, BLOCKD *d);
void (*quantize_b_8x8)(BLOCK *b, BLOCKD *d);

View File

@ -39,7 +39,7 @@ const unsigned int vp8_prob_cost[256] = {
22, 21, 19, 18, 16, 15, 13, 12, 10, 9, 7, 6, 4, 3, 1, 1
};
void vp8_start_encode(BOOL_CODER *br, unsigned char *source) {
void vp9_start_encode(BOOL_CODER *br, unsigned char *source) {
br->lowvalue = 0;
br->range = 255;
@ -49,7 +49,7 @@ void vp8_start_encode(BOOL_CODER *br, unsigned char *source) {
br->pos = 0;
}
void vp8_stop_encode(BOOL_CODER *br) {
void vp9_stop_encode(BOOL_CODER *br) {
int i;
for (i = 0; i < 32; i++)
@ -57,7 +57,7 @@ void vp8_stop_encode(BOOL_CODER *br) {
}
void vp8_encode_value(BOOL_CODER *br, int data, int bits) {
void vp9_encode_value(BOOL_CODER *br, int data, int bits) {
int bit;
for (bit = bits - 1; bit >= 0; bit--)
@ -81,20 +81,20 @@ static int get_unsigned_bits(unsigned num_values) {
return cat;
}
void vp8_encode_uniform(BOOL_CODER *br, int v, int n) {
void vp9_encode_uniform(BOOL_CODER *br, int v, int n) {
int l = get_unsigned_bits(n);
int m;
if (l == 0) return;
m = (1 << l) - n;
if (v < m)
vp8_encode_value(br, v, l - 1);
vp9_encode_value(br, v, l - 1);
else {
vp8_encode_value(br, m + ((v - m) >> 1), l - 1);
vp8_encode_value(br, (v - m) & 1, 1);
vp9_encode_value(br, m + ((v - m) >> 1), l - 1);
vp9_encode_value(br, (v - m) & 1, 1);
}
}
int vp8_count_uniform(int v, int n) {
int vp9_count_uniform(int v, int n) {
int l = get_unsigned_bits(n);
int m;
if (l == 0) return 0;
@ -105,30 +105,30 @@ int vp8_count_uniform(int v, int n) {
return l;
}
void vp8_encode_term_subexp(BOOL_CODER *br, int word, int k, int num_syms) {
void vp9_encode_term_subexp(BOOL_CODER *br, int word, int k, int num_syms) {
int i = 0;
int mk = 0;
while (1) {
int b = (i ? k + i - 1 : k);
int a = (1 << b);
if (num_syms <= mk + 3 * a) {
vp8_encode_uniform(br, word - mk, num_syms - mk);
vp9_encode_uniform(br, word - mk, num_syms - mk);
break;
} else {
int t = (word >= mk + a);
vp8_encode_value(br, t, 1);
vp9_encode_value(br, t, 1);
if (t) {
i = i + 1;
mk += a;
} else {
vp8_encode_value(br, word - mk, b);
vp9_encode_value(br, word - mk, b);
break;
}
}
}
}
int vp8_count_term_subexp(int word, int k, int num_syms) {
int vp9_count_term_subexp(int word, int k, int num_syms) {
int count = 0;
int i = 0;
int mk = 0;
@ -136,7 +136,7 @@ int vp8_count_term_subexp(int word, int k, int num_syms) {
int b = (i ? k + i - 1 : k);
int a = (1 << b);
if (num_syms <= mk + 3 * a) {
count += vp8_count_uniform(word - mk, num_syms - mk);
count += vp9_count_uniform(word - mk, num_syms - mk);
break;
} else {
int t = (word >= mk + a);

View File

@ -34,16 +34,16 @@ typedef struct {
unsigned long bit_counter;
} BOOL_CODER;
extern void vp8_start_encode(BOOL_CODER *bc, unsigned char *buffer);
extern void vp9_start_encode(BOOL_CODER *bc, unsigned char *buffer);
extern void vp8_encode_value(BOOL_CODER *br, int data, int bits);
extern void vp8_stop_encode(BOOL_CODER *bc);
extern void vp9_encode_value(BOOL_CODER *br, int data, int bits);
extern void vp9_stop_encode(BOOL_CODER *bc);
extern const unsigned int vp8_prob_cost[256];
extern void vp8_encode_uniform(BOOL_CODER *bc, int v, int n);
extern void vp8_encode_term_subexp(BOOL_CODER *bc, int v, int k, int n);
extern int vp8_count_uniform(int v, int n);
extern int vp8_count_term_subexp(int v, int k, int n);
extern void vp9_encode_uniform(BOOL_CODER *bc, int v, int n);
extern void vp9_encode_term_subexp(BOOL_CODER *bc, int v, int k, int n);
extern int vp9_count_uniform(int v, int n);
extern int vp9_count_term_subexp(int v, int k, int n);
extern int vp9_recenter_nonneg(int v, int m);
DECLARE_ALIGNED(16, extern const unsigned char, vp8_norm[256]);

View File

@ -281,7 +281,7 @@ static const int xC7S1 = 3196;
#define IN_SHIFT (FINAL_SHIFT+1)
void vp8_short_fdct8x8_c(short *InputData, short *OutputData, int pitch) {
void vp9_short_fdct8x8_c(short *InputData, short *OutputData, int pitch) {
int loop;
int short_pitch = pitch >> 1;
int is07, is12, is34, is56;
@ -503,7 +503,7 @@ void vp8_short_fdct8x8_c(short *InputData, short *OutputData, int pitch) {
}
}
void vp8_short_fhaar2x2_c(short *input, short *output, int pitch) { // pitch = 8
void vp9_short_fhaar2x2_c(short *input, short *output, int pitch) { // pitch = 8
/* [1 1; 1 -1] orthogonal transform */
/* use position: 0,1, 4, 8 */
int i;
@ -523,12 +523,12 @@ void vp8_short_fhaar2x2_c(short *input, short *output, int pitch) { // pitch = 8
/* For test */
#define TEST_INT 1
#if TEST_INT
#define vp8_fht_int_c vp8_fht_c
#define vp9_fht_int_c vp9_fht_c
#else
#define vp8_fht_float_c vp8_fht_c
#define vp9_fht_float_c vp9_fht_c
#endif
void vp8_fht_float_c(const int16_t *input, int pitch, int16_t *output,
void vp9_fht_float_c(const int16_t *input, int pitch, int16_t *output,
TX_TYPE tx_type, int tx_dim) {
vp8_clear_system_state(); // Make it simd safe : __asm emms;
{
@ -650,7 +650,7 @@ void vp8_fht_float_c(const int16_t *input, int pitch, int16_t *output,
#define VERTICAL_ROUNDING ((1 << (VERTICAL_SHIFT - 1)) - 1)
#define HORIZONTAL_SHIFT 16
#define HORIZONTAL_ROUNDING ((1 << (HORIZONTAL_SHIFT - 1)) - 1)
void vp8_fht_int_c(const int16_t *input, int pitch, int16_t *output,
void vp9_fht_int_c(const int16_t *input, int pitch, int16_t *output,
TX_TYPE tx_type, int tx_dim) {
int i, j, k;
int16_t imbuf[256];
@ -728,7 +728,7 @@ void vp8_fht_int_c(const int16_t *input, int pitch, int16_t *output,
}
}
void vp8_short_fdct4x4_c(short *input, short *output, int pitch) {
void vp9_short_fdct4x4_c(short *input, short *output, int pitch) {
int i;
int a1, b1, c1, d1;
short *ip = input;
@ -769,13 +769,13 @@ void vp8_short_fdct4x4_c(short *input, short *output, int pitch) {
}
}
void vp8_short_fdct8x4_c(short *input, short *output, int pitch)
void vp9_short_fdct8x4_c(short *input, short *output, int pitch)
{
vp8_short_fdct4x4_c(input, output, pitch);
vp8_short_fdct4x4_c(input + 4, output + 16, pitch);
vp9_short_fdct4x4_c(input, output, pitch);
vp9_short_fdct4x4_c(input + 4, output + 16, pitch);
}
void vp8_short_walsh4x4_c(short *input, short *output, int pitch) {
void vp9_short_walsh4x4_c(short *input, short *output, int pitch) {
int i;
int a1, b1, c1, d1;
short *ip = input;
@ -816,7 +816,7 @@ void vp8_short_walsh4x4_c(short *input, short *output, int pitch) {
}
#if CONFIG_LOSSLESS
void vp8_short_walsh4x4_lossless_c(short *input, short *output, int pitch) {
void vp9_short_walsh4x4_lossless_c(short *input, short *output, int pitch) {
int i;
int a1, b1, c1, d1;
short *ip = input;
@ -856,7 +856,7 @@ void vp8_short_walsh4x4_lossless_c(short *input, short *output, int pitch) {
}
}
void vp8_short_walsh4x4_x8_c(short *input, short *output, int pitch) {
void vp9_short_walsh4x4_x8_c(short *input, short *output, int pitch) {
int i;
int a1, b1, c1, d1;
short *ip = input;
@ -896,9 +896,9 @@ void vp8_short_walsh4x4_x8_c(short *input, short *output, int pitch) {
}
}
void vp8_short_walsh8x4_x8_c(short *input, short *output, int pitch) {
vp8_short_walsh4x4_x8_c(input, output, pitch);
vp8_short_walsh4x4_x8_c(input + 4, output + 16, pitch);
void vp9_short_walsh8x4_x8_c(short *input, short *output, int pitch) {
vp9_short_walsh4x4_x8_c(input, output, pitch);
vp9_short_walsh4x4_x8_c(input + 4, output + 16, pitch);
}
#endif
@ -1078,7 +1078,7 @@ static void dct16x16_1d(double input[16], double output[16]) {
vp8_clear_system_state(); // Make it simd safe : __asm emms;
}
void vp8_short_fdct16x16_c(short *input, short *out, int pitch) {
void vp9_short_fdct16x16_c(short *input, short *out, int pitch) {
vp8_clear_system_state(); // Make it simd safe : __asm emms;
{
int shortpitch = pitch >> 1;

View File

@ -54,29 +54,29 @@ int enc_debug = 0;
int mb_row_debug, mb_col_debug;
#endif
extern void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex);
extern void vp8_auto_select_speed(VP8_COMP *cpi);
extern void vp9cx_initialize_me_consts(VP8_COMP *cpi, int QIndex);
extern void vp9_auto_select_speed(VP8_COMP *cpi);
extern void vp8cx_init_mbrthread_data(VP8_COMP *cpi,
MACROBLOCK *x,
MB_ROW_COMP *mbr_ei,
int mb_row,
int count);
int64_t vp8_rd_pick_inter_mode_sb(VP8_COMP *cpi, MACROBLOCK *x,
int64_t vp9_rd_pick_inter_mode_sb(VP8_COMP *cpi, MACROBLOCK *x,
int recon_yoffset, int recon_uvoffset,
int *returnrate, int *returndistortion);
extern void vp8cx_pick_mode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x,
extern void vp9cx_pick_mode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x,
int recon_yoffset,
int recon_uvoffset, int *r, int *d);
void vp8_build_block_offsets(MACROBLOCK *x);
void vp8_setup_block_ptrs(MACROBLOCK *x);
void vp8cx_encode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
void vp9_build_block_offsets(MACROBLOCK *x);
void vp9_setup_block_ptrs(MACROBLOCK *x);
void vp9cx_encode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
int recon_yoffset, int recon_uvoffset,
int output_enabled);
void vp8cx_encode_inter_superblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
void vp9cx_encode_inter_superblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
int recon_yoffset, int recon_uvoffset, int mb_col, int mb_row);
void vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x,
void vp9cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x,
TOKENEXTRA **t, int output_enabled);
void vp8cx_encode_intra_super_block(VP8_COMP *cpi,
void vp9cx_encode_intra_super_block(VP8_COMP *cpi,
MACROBLOCK *x,
TOKENEXTRA **t, int mb_col);
static void adjust_act_zbin(VP8_COMP *cpi, MACROBLOCK *x);
@ -96,7 +96,7 @@ unsigned int b_modes[B_MODE_COUNT];
/* activity_avg must be positive, or flat regions could get a zero weight
* (infinite lambda), which confounds analysis.
* This also avoids the need for divide by zero checks in
* vp8_activity_masking().
* vp9_activity_masking().
*/
#define VP8_ACTIVITY_AVG_MIN (64)
@ -121,7 +121,7 @@ static unsigned int tt_activity_measure(VP8_COMP *cpi, MACROBLOCK *x) {
* lambda using a non-linear combination (e.g., the smallest, or second
* smallest, etc.).
*/
act = vp8_variance16x16(x->src.y_buffer, x->src.y_stride, VP8_VAR_OFFS, 0,
act = vp9_variance16x16(x->src.y_buffer, x->src.y_stride, VP8_VAR_OFFS, 0,
&sse);
act = act << 4;
@ -135,7 +135,7 @@ static unsigned int tt_activity_measure(VP8_COMP *cpi, MACROBLOCK *x) {
// Stub for alternative experimental activity measures.
static unsigned int alt_activity_measure(VP8_COMP *cpi,
MACROBLOCK *x, int use_dc_pred) {
return vp8_encode_intra(cpi, x, use_dc_pred);
return vp9_encode_intra(cpi, x, use_dc_pred);
}
@ -347,7 +347,7 @@ static void build_activity_map(VP8_COMP *cpi) {
}
// Macroblock activity masking
void vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x) {
void vp9_activity_masking(VP8_COMP *cpi, MACROBLOCK *x) {
#if USE_ACT_INDEX
x->rdmult += *(x->mb_activity_ptr) * (x->rdmult >> 2);
x->errorperbit = x->rdmult * 100 / (110 * x->rddiv);
@ -593,7 +593,7 @@ static void pick_mb_modes(VP8_COMP *cpi,
x->rdmult = cpi->RDMULT;
if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
vp8_activity_masking(cpi, x);
vp9_activity_masking(cpi, x);
// Is segmentation enabled
if (xd->segmentation_enabled) {
@ -605,7 +605,7 @@ static void pick_mb_modes(VP8_COMP *cpi,
if (mbmi->segment_id > 3)
mbmi->segment_id = 0;
vp8cx_mb_init_quantizer(cpi, x);
vp9cx_mb_init_quantizer(cpi, x);
} else
// Set to Segment 0 by default
mbmi->segment_id = 0;
@ -624,12 +624,12 @@ static void pick_mb_modes(VP8_COMP *cpi,
// as a predictor for MBs that follow in the SB
if (cm->frame_type == KEY_FRAME) {
int r, d;
vp8_rd_pick_intra_mode(cpi, x, &r, &d);
vp9_rd_pick_intra_mode(cpi, x, &r, &d);
*totalrate += r;
*totaldist += d;
// Dummy encode, do not do the tokenization
vp8cx_encode_intra_macro_block(cpi, x, tp, 0);
vp9cx_encode_intra_macro_block(cpi, x, tp, 0);
// Note the encoder may have changed the segment_id
// Save the coding context
@ -650,13 +650,13 @@ static void pick_mb_modes(VP8_COMP *cpi,
cpi->seg0_progress = (((mb_col & ~1) * 2 + (mb_row & ~1) * cm->mb_cols + i) << 16) / cm->MBs;
}
vp8cx_pick_mode_inter_macroblock(cpi, x, recon_yoffset,
vp9cx_pick_mode_inter_macroblock(cpi, x, recon_yoffset,
recon_uvoffset, &r, &d);
*totalrate += r;
*totaldist += d;
// Dummy encode, do not do the tokenization
vp8cx_encode_inter_macroblock(cpi, x, tp,
vp9cx_encode_inter_macroblock(cpi, x, tp,
recon_yoffset, recon_uvoffset, 0);
seg_id = mbmi->segment_id;
@ -777,7 +777,7 @@ static void pick_sb_modes (VP8_COMP *cpi,
x->rddiv = cpi->RDDIV;
x->rdmult = cpi->RDMULT;
if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
vp8_activity_masking(cpi, x);
vp9_activity_masking(cpi, x);
/* Is segmentation enabled */
if (xd->segmentation_enabled)
{
@ -797,7 +797,7 @@ static void pick_sb_modes (VP8_COMP *cpi,
if (xd->mode_info_context->mbmi.segment_id > 3)
xd->mode_info_context->mbmi.segment_id = 0;
vp8cx_mb_init_quantizer(cpi, x);
vp9cx_mb_init_quantizer(cpi, x);
}
else
/* Set to Segment 0 by default */
@ -811,7 +811,7 @@ static void pick_sb_modes (VP8_COMP *cpi,
* as a predictor for MBs that follow in the SB */
if (cm->frame_type == KEY_FRAME)
{
vp8_rd_pick_intra_mode_sb(cpi, x,
vp9_rd_pick_intra_mode_sb(cpi, x,
totalrate,
totaldist);
@ -832,7 +832,7 @@ static void pick_sb_modes (VP8_COMP *cpi,
(((mb_col & ~1) * 2 + (mb_row & ~1) * cm->mb_cols) << 16) / cm->MBs;
}
vp8_rd_pick_inter_mode_sb(cpi, x,
vp9_rd_pick_inter_mode_sb(cpi, x,
recon_yoffset,
recon_uvoffset,
totalrate,
@ -968,11 +968,11 @@ static void encode_sb(VP8_COMP *cpi,
vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
vp8_activity_masking(cpi, x);
vp9_activity_masking(cpi, x);
// Is segmentation enabled
if (xd->segmentation_enabled) {
vp8cx_mb_init_quantizer(cpi, x);
vp9cx_mb_init_quantizer(cpi, x);
}
x->active_ptr = cpi->active_map + map_index;
@ -987,10 +987,10 @@ static void encode_sb(VP8_COMP *cpi,
if (cm->frame_type == KEY_FRAME) {
#if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb)
vp8cx_encode_intra_super_block(cpi, x, tp, mb_col);
vp9cx_encode_intra_super_block(cpi, x, tp, mb_col);
else
#endif
vp8cx_encode_intra_macro_block(cpi, x, tp, 1);
vp9cx_encode_intra_macro_block(cpi, x, tp, 1);
// Note the encoder may have changed the segment_id
#ifdef MODE_STATS
@ -1013,10 +1013,10 @@ static void encode_sb(VP8_COMP *cpi,
#if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb)
vp8cx_encode_inter_superblock(cpi, x, tp, recon_yoffset, recon_uvoffset, mb_col, mb_row);
vp9cx_encode_inter_superblock(cpi, x, tp, recon_yoffset, recon_uvoffset, mb_col, mb_row);
else
#endif
vp8cx_encode_inter_macroblock(cpi, x, tp,
vp9cx_encode_inter_macroblock(cpi, x, tp,
recon_yoffset, recon_uvoffset, 1);
// Note the encoder may have changed the segment_id
@ -1261,11 +1261,11 @@ static void init_encode_frame_mb_context(VP8_COMP *cpi) {
// set up frame for intra coded blocks
vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]);
vp8_build_block_offsets(x);
vp9_build_block_offsets(x);
vp8_setup_block_dptrs(&x->e_mbd);
vp8_setup_block_ptrs(x);
vp9_setup_block_ptrs(x);
xd->mode_info_context->mbmi.mode = DC_PRED;
xd->mode_info_context->mbmi.uv_mode = DC_PRED;
@ -1360,10 +1360,10 @@ static void encode_frame_internal(VP8_COMP *cpi) {
vp8_zero(cpi->coef_counts_16x16);
vp8_zero(cpi->hybrid_coef_counts_16x16);
vp8cx_frame_init_quantizer(cpi);
vp9cx_frame_init_quantizer(cpi);
vp8_initialize_rd_consts(cpi, cm->base_qindex + cm->y1dc_delta_q);
vp8cx_initialize_me_consts(cpi, cm->base_qindex);
vp9_initialize_rd_consts(cpi, cm->base_qindex + cm->y1dc_delta_q);
vp9cx_initialize_me_consts(cpi, cm->base_qindex);
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
// Initialize encode frame context.
@ -1487,7 +1487,7 @@ static void reset_skip_txfm_size(VP8_COMP *cpi, TX_SIZE txfm_max) {
}
}
void vp8_encode_frame(VP8_COMP *cpi) {
void vp9_encode_frame(VP8_COMP *cpi) {
if (cpi->sf.RD) {
int i, frame_type, pred_type;
TXFM_MODE txfm_type;
@ -1640,7 +1640,7 @@ void vp8_encode_frame(VP8_COMP *cpi) {
}
void vp8_setup_block_ptrs(MACROBLOCK *x) {
void vp9_setup_block_ptrs(MACROBLOCK *x) {
int r, c;
int i;
@ -1671,7 +1671,7 @@ void vp8_setup_block_ptrs(MACROBLOCK *x) {
}
}
void vp8_build_block_offsets(MACROBLOCK *x) {
void vp9_build_block_offsets(MACROBLOCK *x) {
int block = 0;
int br, bc;
@ -1836,7 +1836,7 @@ static void update_sb_skip_coeff_state(VP8_COMP *cpi,
if (skip[n]) {
x->e_mbd.above_context = &ta[n];
x->e_mbd.left_context = &tl[n];
vp8_stuff_mb(cpi, &x->e_mbd, tp, 0);
vp9_stuff_mb(cpi, &x->e_mbd, tp, 0);
} else {
if (n_tokens[n]) {
memcpy(*tp, tokens[n], sizeof(*t[0]) * n_tokens[n]);
@ -1846,7 +1846,7 @@ static void update_sb_skip_coeff_state(VP8_COMP *cpi,
}
}
void vp8cx_encode_intra_super_block(VP8_COMP *cpi,
void vp9cx_encode_intra_super_block(VP8_COMP *cpi,
MACROBLOCK *x,
TOKENEXTRA **t,
int mb_col) {
@ -1870,7 +1870,7 @@ void vp8cx_encode_intra_super_block(VP8_COMP *cpi,
if ((cpi->oxcf.tuning == VP8_TUNE_SSIM) && output_enabled) {
adjust_act_zbin(cpi, x);
vp8_update_zbin_extra(cpi, x);
vp9_update_zbin_extra(cpi, x);
}
vp8_build_intra_predictors_sby_s(&x->e_mbd);
@ -1883,23 +1883,23 @@ void vp8cx_encode_intra_super_block(VP8_COMP *cpi,
xd->above_context = cm->above_context + mb_col + (n & 1);
xd->left_context = cm->left_context + (n >> 1);
vp8_subtract_mby_s_c(x->src_diff,
vp9_subtract_mby_s_c(x->src_diff,
src + x_idx * 16 + y_idx * 16 * src_y_stride,
src_y_stride,
dst + x_idx * 16 + y_idx * 16 * dst_y_stride,
dst_y_stride);
vp8_subtract_mbuv_s_c(x->src_diff,
vp9_subtract_mbuv_s_c(x->src_diff,
usrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
vsrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
src_uv_stride,
udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
dst_uv_stride);
vp8_transform_mb_8x8(x);
vp8_quantize_mb_8x8(x);
vp9_transform_mb_8x8(x);
vp9_quantize_mb_8x8(x);
if (x->optimize) {
vp8_optimize_mby_8x8(x, rtcd);
vp8_optimize_mbuv_8x8(x, rtcd);
vp9_optimize_mby_8x8(x, rtcd);
vp9_optimize_mbuv_8x8(x, rtcd);
}
vp8_inverse_transform_mb_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
vp8_recon_mby_s_c(&x->e_mbd, dst + x_idx * 16 + y_idx * 16 * dst_y_stride);
@ -1912,7 +1912,7 @@ void vp8cx_encode_intra_super_block(VP8_COMP *cpi,
memcpy(&tl[n], xd->left_context, sizeof(tl[n]));
tp[n] = *t;
xd->mode_info_context = mi + x_idx + y_idx * cm->mode_info_stride;
vp8_tokenize_mb(cpi, &x->e_mbd, t, 0);
vp9_tokenize_mb(cpi, &x->e_mbd, t, 0);
skip[n] = xd->mode_info_context->mbmi.mb_skip_coeff;
}
}
@ -1926,26 +1926,26 @@ void vp8cx_encode_intra_super_block(VP8_COMP *cpi,
}
#endif /* CONFIG_SUPERBLOCKS */
void vp8cx_encode_intra_macro_block(VP8_COMP *cpi,
void vp9cx_encode_intra_macro_block(VP8_COMP *cpi,
MACROBLOCK *x,
TOKENEXTRA **t,
int output_enabled) {
MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
if ((cpi->oxcf.tuning == VP8_TUNE_SSIM) && output_enabled) {
adjust_act_zbin(cpi, x);
vp8_update_zbin_extra(cpi, x);
vp9_update_zbin_extra(cpi, x);
}
if (mbmi->mode == I8X8_PRED) {
vp8_encode_intra8x8mby(IF_RTCD(&cpi->rtcd), x);
vp8_encode_intra8x8mbuv(IF_RTCD(&cpi->rtcd), x);
vp9_encode_intra8x8mby(IF_RTCD(&cpi->rtcd), x);
vp9_encode_intra8x8mbuv(IF_RTCD(&cpi->rtcd), x);
} else if (mbmi->mode == B_PRED) {
vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
vp9_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
} else {
vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
vp9_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
}
if (mbmi->mode != I8X8_PRED) {
vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
vp9_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
}
if (output_enabled) {
@ -1953,7 +1953,7 @@ void vp8cx_encode_intra_macro_block(VP8_COMP *cpi,
// Tokenize
sum_intra_stats(cpi, x);
vp8_tokenize_mb(cpi, &x->e_mbd, t, 0);
vp9_tokenize_mb(cpi, &x->e_mbd, t, 0);
if (cpi->common.txfm_mode == TX_MODE_SELECT &&
!((cpi->common.mb_no_coeff_skip && mbmi->mb_skip_coeff) ||
@ -1975,16 +1975,16 @@ void vp8cx_encode_intra_macro_block(VP8_COMP *cpi,
}
#if CONFIG_NEWBESTREFMV
else
vp8_tokenize_mb(cpi, &x->e_mbd, t, 1);
vp9_tokenize_mb(cpi, &x->e_mbd, t, 1);
#endif
}
#ifdef SPEEDSTATS
extern int cnt_pm;
#endif
extern void vp8_fix_contexts(MACROBLOCKD *xd);
extern void vp9_fix_contexts(MACROBLOCKD *xd);
void vp8cx_encode_inter_macroblock (VP8_COMP *cpi, MACROBLOCK *x,
void vp9cx_encode_inter_macroblock (VP8_COMP *cpi, MACROBLOCK *x,
TOKENEXTRA **t, int recon_yoffset,
int recon_uvoffset, int output_enabled) {
VP8_COMMON *cm = &cpi->common;
@ -2023,7 +2023,7 @@ void vp8cx_encode_inter_macroblock (VP8_COMP *cpi, MACROBLOCK *x,
}
}
vp8_update_zbin_extra(cpi, x);
vp9_update_zbin_extra(cpi, x);
}
seg_ref_active = vp9_segfeature_active(xd, *segment_id, SEG_LVL_REF_FRAME);
@ -2036,14 +2036,14 @@ void vp8cx_encode_inter_macroblock (VP8_COMP *cpi, MACROBLOCK *x,
if (mbmi->ref_frame == INTRA_FRAME) {
if (mbmi->mode == B_PRED) {
vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
vp9_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
vp9_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
} else if (mbmi->mode == I8X8_PRED) {
vp8_encode_intra8x8mby(IF_RTCD(&cpi->rtcd), x);
vp8_encode_intra8x8mbuv(IF_RTCD(&cpi->rtcd), x);
vp9_encode_intra8x8mby(IF_RTCD(&cpi->rtcd), x);
vp9_encode_intra8x8mbuv(IF_RTCD(&cpi->rtcd), x);
} else {
vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
vp9_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
vp9_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
}
if (output_enabled)
@ -2081,7 +2081,7 @@ void vp8cx_encode_inter_macroblock (VP8_COMP *cpi, MACROBLOCK *x,
}
if (!x->skip) {
vp8_encode_inter16x16(IF_RTCD(&cpi->rtcd), x);
vp9_encode_inter16x16(IF_RTCD(&cpi->rtcd), x);
// Clear mb_skip_coeff if mb_no_coeff_skip is not set
if (!cpi->common.mb_no_coeff_skip)
@ -2114,7 +2114,7 @@ void vp8cx_encode_inter_macroblock (VP8_COMP *cpi, MACROBLOCK *x,
}
#endif
vp8_tokenize_mb(cpi, xd, t, !output_enabled);
vp9_tokenize_mb(cpi, xd, t, !output_enabled);
#ifdef ENC_DEBUG
if (enc_debug) {
@ -2132,9 +2132,9 @@ void vp8cx_encode_inter_macroblock (VP8_COMP *cpi, MACROBLOCK *x,
mbmi->mb_skip_coeff = 1;
if (output_enabled)
cpi->skip_true_count[mb_skip_context]++;
vp8_fix_contexts(xd);
vp9_fix_contexts(xd);
} else {
vp8_stuff_mb(cpi, xd, t, !output_enabled);
vp9_stuff_mb(cpi, xd, t, !output_enabled);
mbmi->mb_skip_coeff = 0;
if (output_enabled)
cpi->skip_false_count[mb_skip_context]++;
@ -2170,7 +2170,7 @@ void vp8cx_encode_inter_macroblock (VP8_COMP *cpi, MACROBLOCK *x,
}
#if CONFIG_SUPERBLOCKS
void vp8cx_encode_inter_superblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
void vp9cx_encode_inter_superblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
int recon_yoffset, int recon_uvoffset, int mb_col, int mb_row) {
const int output_enabled = 1;
VP8_COMMON *cm = &cpi->common;
@ -2218,7 +2218,7 @@ void vp8cx_encode_inter_superblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
}
}
vp8_update_zbin_extra(cpi, x);
vp9_update_zbin_extra(cpi, x);
}
seg_ref_active = vp9_segfeature_active(xd, segment_id, SEG_LVL_REF_FRAME);
@ -2274,23 +2274,23 @@ void vp8cx_encode_inter_superblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
for (n = 0; n < 4; n++) {
int x_idx = n & 1, y_idx = n >> 1;
vp8_subtract_mby_s_c(x->src_diff,
vp9_subtract_mby_s_c(x->src_diff,
src + x_idx * 16 + y_idx * 16 * src_y_stride,
src_y_stride,
dst + x_idx * 16 + y_idx * 16 * dst_y_stride,
dst_y_stride);
vp8_subtract_mbuv_s_c(x->src_diff,
vp9_subtract_mbuv_s_c(x->src_diff,
usrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
vsrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
src_uv_stride,
udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
dst_uv_stride);
vp8_transform_mb_8x8(x);
vp8_quantize_mb_8x8(x);
vp9_transform_mb_8x8(x);
vp9_quantize_mb_8x8(x);
if (x->optimize) {
vp8_optimize_mby_8x8(x, rtcd);
vp8_optimize_mbuv_8x8(x, rtcd);
vp9_optimize_mby_8x8(x, rtcd);
vp9_optimize_mbuv_8x8(x, rtcd);
}
vp8_inverse_transform_mb_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
vp8_recon_mby_s_c( &x->e_mbd,
@ -2307,7 +2307,7 @@ void vp8cx_encode_inter_superblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
memcpy(&tl[n], xd->left_context, sizeof(tl[n]));
tp[n] = *t;
xd->mode_info_context = mi + x_idx + y_idx * cm->mode_info_stride;
vp8_tokenize_mb(cpi, &x->e_mbd, t, 0);
vp9_tokenize_mb(cpi, &x->e_mbd, t, 0);
skip[n] = xd->mode_info_context->mbmi.mb_skip_coeff;
}
} else {
@ -2324,9 +2324,9 @@ void vp8cx_encode_inter_superblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
memcpy(&tl[n], xd->left_context, sizeof(tl[n]));
tp[n] = *t;
cpi->skip_true_count[mb_skip_context]++;
vp8_fix_contexts(xd);
vp9_fix_contexts(xd);
} else {
vp8_stuff_mb(cpi, xd, t, 0);
vp9_stuff_mb(cpi, xd, t, 0);
xd->mode_info_context->mbmi.mb_skip_coeff = 0;
cpi->skip_false_count[mb_skip_context]++;
}

View File

@ -25,7 +25,7 @@
#define IF_RTCD(x) NULL
#endif
int vp8_encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_16x16_pred) {
int vp9_encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_16x16_pred) {
int i;
int intra_pred_var = 0;
MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
@ -39,20 +39,20 @@ int vp8_encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_16x16_pred) {
mbmi->uv_mode = DC_PRED;
mbmi->ref_frame = INTRA_FRAME;
vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
vp9_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
} else {
for (i = 0; i < 16; i++) {
x->e_mbd.block[i].bmi.as_mode.first = B_DC_PRED;
vp8_encode_intra4x4block(IF_RTCD(&cpi->rtcd), x, i);
vp9_encode_intra4x4block(IF_RTCD(&cpi->rtcd), x, i);
}
}
intra_pred_var = vp8_get_mb_ss(x->src_diff);
intra_pred_var = vp9_get_mb_ss(x->src_diff);
return intra_pred_var;
}
void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
void vp9_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
MACROBLOCK *x, int ib) {
BLOCKD *b = &x->e_mbd.block[ib];
BLOCK *be = &x->block[ib];
@ -69,15 +69,15 @@ void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
}
#endif
vp8_subtract_b(be, b, 16);
vp9_subtract_b(be, b, 16);
tx_type = get_tx_type(&x->e_mbd, b);
if (tx_type != DCT_DCT) {
vp8_fht_c(be->src_diff, 32, be->coeff, tx_type, 4);
vp8_ht_quantize_b_4x4(be, b, tx_type);
vp9_fht_c(be->src_diff, 32, be->coeff, tx_type, 4);
vp9_ht_quantize_b_4x4(be, b, tx_type);
vp8_ihtllm_c(b->dqcoeff, b->diff, 32, tx_type, 4);
} else {
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32) ;
x->vp9_short_fdct4x4(be->src_diff, be->coeff, 32) ;
x->quantize_b_4x4(be, b) ;
vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 32) ;
}
@ -85,15 +85,15 @@ void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
vp8_recon_b(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
}
void vp8_encode_intra4x4mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *mb) {
void vp9_encode_intra4x4mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *mb) {
int i;
for (i = 0; i < 16; i++)
vp8_encode_intra4x4block(rtcd, mb, i);
vp9_encode_intra4x4block(rtcd, mb, i);
return;
}
void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
void vp9_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
MACROBLOCKD *xd = &x->e_mbd;
BLOCK *b = &x->block[0];
TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size;
@ -108,42 +108,42 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
vp8_build_comp_intra_predictors_mby(xd);
#endif
vp8_subtract_mby(x->src_diff, *(b->base_src), xd->predictor, b->src_stride);
vp9_subtract_mby(x->src_diff, *(b->base_src), xd->predictor, b->src_stride);
if (tx_size == TX_16X16) {
BLOCKD *bd = &xd->block[0];
tx_type = get_tx_type(xd, bd);
if (tx_type != DCT_DCT) {
vp8_fht_c(b->src_diff, 32, b->coeff, tx_type, 16);
vp8_quantize_mby_16x16(x);
vp9_fht_c(b->src_diff, 32, b->coeff, tx_type, 16);
vp9_quantize_mby_16x16(x);
if (x->optimize)
vp8_optimize_mby_16x16(x, rtcd);
vp9_optimize_mby_16x16(x, rtcd);
vp8_ihtllm_c(bd->dqcoeff, bd->diff, 32, tx_type, 16);
} else {
vp8_transform_mby_16x16(x);
vp8_quantize_mby_16x16(x);
vp9_transform_mby_16x16(x);
vp9_quantize_mby_16x16(x);
if (x->optimize)
vp8_optimize_mby_16x16(x, rtcd);
vp9_optimize_mby_16x16(x, rtcd);
vp8_inverse_transform_mby_16x16(IF_RTCD(&rtcd->common->idct), xd);
}
} else if (tx_size == TX_8X8) {
vp8_transform_mby_8x8(x);
vp8_quantize_mby_8x8(x);
vp9_transform_mby_8x8(x);
vp9_quantize_mby_8x8(x);
if (x->optimize)
vp8_optimize_mby_8x8(x, rtcd);
vp9_optimize_mby_8x8(x, rtcd);
vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), xd);
} else {
vp8_transform_mby_4x4(x);
vp8_quantize_mby_4x4(x);
vp9_transform_mby_4x4(x);
vp9_quantize_mby_4x4(x);
if (x->optimize)
vp8_optimize_mby_4x4(x, rtcd);
vp9_optimize_mby_4x4(x, rtcd);
vp8_inverse_transform_mby_4x4(IF_RTCD(&rtcd->common->idct), xd);
}
vp8_recon_mby(xd);
}
void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
void vp9_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
MACROBLOCKD *xd = &x->e_mbd;
TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size;
@ -157,27 +157,27 @@ void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
}
#endif
vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
vp9_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
xd->predictor, x->src.uv_stride);
if (tx_size == TX_4X4) {
vp8_transform_mbuv_4x4(x);
vp8_quantize_mbuv_4x4(x);
vp9_transform_mbuv_4x4(x);
vp9_quantize_mbuv_4x4(x);
if (x->optimize)
vp8_optimize_mbuv_4x4(x, rtcd);
vp9_optimize_mbuv_4x4(x, rtcd);
vp8_inverse_transform_mbuv_4x4(IF_RTCD(&rtcd->common->idct), xd);
} else /* 16x16 or 8x8 */ {
vp8_transform_mbuv_8x8(x);
vp8_quantize_mbuv_8x8(x);
vp9_transform_mbuv_8x8(x);
vp9_quantize_mbuv_8x8(x);
if (x->optimize)
vp8_optimize_mbuv_8x8(x, rtcd);
vp9_optimize_mbuv_8x8(x, rtcd);
vp8_inverse_transform_mbuv_8x8(IF_RTCD(&rtcd->common->idct), xd);
}
vp8_recon_intra_mbuv(xd);
}
void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
void vp9_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
MACROBLOCK *x, int ib) {
MACROBLOCKD *xd = &x->e_mbd;
BLOCKD *b = &xd->block[ib];
@ -201,17 +201,17 @@ void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
int idx = (ib & 0x02) ? (ib + 2) : ib;
// generate residual blocks
vp8_subtract_4b_c(be, b, 16);
vp9_subtract_4b_c(be, b, 16);
tx_type = get_tx_type(xd, xd->block + idx);
if (tx_type != DCT_DCT) {
vp8_fht_c(be->src_diff, 32, (x->block + idx)->coeff,
vp9_fht_c(be->src_diff, 32, (x->block + idx)->coeff,
tx_type, 8);
x->quantize_b_8x8(x->block + idx, xd->block + idx);
vp8_ihtllm_c(xd->block[idx].dqcoeff, xd->block[ib].diff, 32,
tx_type, 8);
} else {
x->vp8_short_fdct8x8(be->src_diff, (x->block + idx)->coeff, 32);
x->vp9_short_fdct8x8(be->src_diff, (x->block + idx)->coeff, 32);
x->quantize_b_8x8(x->block + idx, xd->block + idx);
vp8_idct_idct8(xd->block[idx].dqcoeff, xd->block[ib].diff, 32);
}
@ -219,8 +219,8 @@ void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
for (i = 0; i < 4; i++) {
b = &xd->block[ib + iblock[i]];
be = &x->block[ib + iblock[i]];
vp8_subtract_b(be, b, 16);
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
vp9_subtract_b(be, b, 16);
x->vp9_short_fdct4x4(be->src_diff, be->coeff, 32);
x->quantize_b_4x4(be, b);
vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 32);
}
@ -234,16 +234,16 @@ void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
}
}
void vp8_encode_intra8x8mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
void vp9_encode_intra8x8mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
int i, ib;
for (i = 0; i < 4; i++) {
ib = vp8_i8x8_block[i];
vp8_encode_intra8x8(rtcd, x, ib);
vp9_encode_intra8x8(rtcd, x, ib);
}
}
void vp8_encode_intra_uv4x4(const VP8_ENCODER_RTCD *rtcd,
void vp9_encode_intra_uv4x4(const VP8_ENCODER_RTCD *rtcd,
MACROBLOCK *x, int ib,
int mode, int second) {
BLOCKD *b = &x->e_mbd.block[ib];
@ -259,16 +259,16 @@ void vp8_encode_intra_uv4x4(const VP8_ENCODER_RTCD *rtcd,
}
#endif
vp8_subtract_b(be, b, 8);
vp9_subtract_b(be, b, 8);
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 16);
x->vp9_short_fdct4x4(be->src_diff, be->coeff, 16);
x->quantize_b_4x4(be, b);
vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 16);
vp8_recon_uv_b_c(b->predictor,b->diff, *(b->base_dst) + b->dst, b->dst_stride);
}
void vp8_encode_intra8x8mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
void vp9_encode_intra8x8mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
int i, ib, mode, second;
BLOCKD *b;
@ -282,8 +282,8 @@ void vp8_encode_intra8x8mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
second = -1;
#endif
/*u */
vp8_encode_intra_uv4x4(rtcd, x, i + 16, mode, second);
vp9_encode_intra_uv4x4(rtcd, x, i + 16, mode, second);
/*v */
vp8_encode_intra_uv4x4(rtcd, x, i + 20, mode, second);
vp9_encode_intra_uv4x4(rtcd, x, i + 20, mode, second);
}
}

View File

@ -13,15 +13,15 @@
#define _ENCODEINTRA_H_
#include "onyx_int.h"
int vp8_encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_16x16_pred);
void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *, MACROBLOCK *x);
void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *, MACROBLOCK *x);
void vp8_encode_intra4x4mby(const VP8_ENCODER_RTCD *, MACROBLOCK *mb);
void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
int vp9_encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_16x16_pred);
void vp9_encode_intra16x16mby(const VP8_ENCODER_RTCD *, MACROBLOCK *x);
void vp9_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *, MACROBLOCK *x);
void vp9_encode_intra4x4mby(const VP8_ENCODER_RTCD *, MACROBLOCK *mb);
void vp9_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
MACROBLOCK *x, int ib);
void vp8_encode_intra8x8mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
void vp8_encode_intra8x8mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
void vp9_encode_intra8x8mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
void vp9_encode_intra8x8mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
void vp9_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
MACROBLOCK *x, int ib);
#endif

View File

@ -30,7 +30,7 @@
extern int enc_debug;
#endif
void vp8_subtract_b_c(BLOCK *be, BLOCKD *bd, int pitch) {
void vp9_subtract_b_c(BLOCK *be, BLOCKD *bd, int pitch) {
unsigned char *src_ptr = (*(be->base_src) + be->src);
short *diff_ptr = be->src_diff;
unsigned char *pred_ptr = bd->predictor;
@ -49,7 +49,7 @@ void vp8_subtract_b_c(BLOCK *be, BLOCKD *bd, int pitch) {
}
}
void vp8_subtract_4b_c(BLOCK *be, BLOCKD *bd, int pitch) {
void vp9_subtract_4b_c(BLOCK *be, BLOCKD *bd, int pitch) {
unsigned char *src_ptr = (*(be->base_src) + be->src);
short *diff_ptr = be->src_diff;
unsigned char *pred_ptr = bd->predictor;
@ -66,7 +66,7 @@ void vp8_subtract_4b_c(BLOCK *be, BLOCKD *bd, int pitch) {
}
}
void vp8_subtract_mbuv_s_c(short *diff, const unsigned char *usrc,
void vp9_subtract_mbuv_s_c(short *diff, const unsigned char *usrc,
const unsigned char *vsrc, int src_stride,
const unsigned char *upred,
const unsigned char *vpred, int dst_stride) {
@ -95,15 +95,15 @@ void vp8_subtract_mbuv_s_c(short *diff, const unsigned char *usrc,
}
}
void vp8_subtract_mbuv_c(short *diff, unsigned char *usrc,
void vp9_subtract_mbuv_c(short *diff, unsigned char *usrc,
unsigned char *vsrc, unsigned char *pred, int stride) {
unsigned char *upred = pred + 256;
unsigned char *vpred = pred + 320;
vp8_subtract_mbuv_s_c(diff, usrc, vsrc, stride, upred, vpred, 8);
vp9_subtract_mbuv_s_c(diff, usrc, vsrc, stride, upred, vpred, 8);
}
void vp8_subtract_mby_s_c(short *diff, const unsigned char *src, int src_stride,
void vp9_subtract_mby_s_c(short *diff, const unsigned char *src, int src_stride,
const unsigned char *pred, int dst_stride) {
int r, c;
@ -118,17 +118,17 @@ void vp8_subtract_mby_s_c(short *diff, const unsigned char *src, int src_stride,
}
}
void vp8_subtract_mby_c(short *diff, unsigned char *src,
void vp9_subtract_mby_c(short *diff, unsigned char *src,
unsigned char *pred, int stride) {
vp8_subtract_mby_s_c(diff, src, stride, pred, 16);
vp9_subtract_mby_s_c(diff, src, stride, pred, 16);
}
static void vp8_subtract_mb(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
BLOCK *b = &x->block[0];
vp8_subtract_mby(x->src_diff, *(b->base_src), x->e_mbd.predictor,
vp9_subtract_mby(x->src_diff, *(b->base_src), x->e_mbd.predictor,
b->src_stride);
vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
vp9_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
x->e_mbd.predictor, x->src.uv_stride);
}
@ -141,11 +141,11 @@ static void build_dcblock_4x4(MACROBLOCK *x) {
}
}
void vp8_transform_mby_4x4(MACROBLOCK *x) {
void vp9_transform_mby_4x4(MACROBLOCK *x) {
int i;
for (i = 0; i < 16; i += 2) {
x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
x->vp9_short_fdct8x4(&x->block[i].src_diff[0],
&x->block[i].coeff[0], 32);
}
@ -159,21 +159,21 @@ void vp8_transform_mby_4x4(MACROBLOCK *x) {
}
}
void vp8_transform_mbuv_4x4(MACROBLOCK *x) {
void vp9_transform_mbuv_4x4(MACROBLOCK *x) {
int i;
for (i = 16; i < 24; i += 2) {
x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
x->vp9_short_fdct8x4(&x->block[i].src_diff[0],
&x->block[i].coeff[0], 16);
}
}
static void transform_mb_4x4(MACROBLOCK *x) {
vp8_transform_mby_4x4(x);
vp8_transform_mbuv_4x4(x);
vp9_transform_mby_4x4(x);
vp9_transform_mbuv_4x4(x);
}
void vp8_build_dcblock_8x8(MACROBLOCK *x) {
void vp9_build_dcblock_8x8(MACROBLOCK *x) {
int16_t *src_diff_ptr = x->block[24].src_diff;
int i;
@ -186,21 +186,21 @@ void vp8_build_dcblock_8x8(MACROBLOCK *x) {
src_diff_ptr[8] = x->coeff[12 * 16];
}
void vp8_transform_mby_8x8(MACROBLOCK *x) {
void vp9_transform_mby_8x8(MACROBLOCK *x) {
int i;
for (i = 0; i < 9; i += 8) {
x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
x->vp9_short_fdct8x8(&x->block[i].src_diff[0],
&x->block[i].coeff[0], 32);
}
for (i = 2; i < 11; i += 8) {
x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
x->vp9_short_fdct8x8(&x->block[i].src_diff[0],
&x->block[i + 2].coeff[0], 32);
}
if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV) {
// build dc block from 2x2 y dc values
vp8_build_dcblock_8x8(x);
vp9_build_dcblock_8x8(x);
// do 2nd order transform on the dc block
x->short_fhaar2x2(&x->block[24].src_diff[0],
@ -208,29 +208,29 @@ void vp8_transform_mby_8x8(MACROBLOCK *x) {
}
}
void vp8_transform_mbuv_8x8(MACROBLOCK *x) {
void vp9_transform_mbuv_8x8(MACROBLOCK *x) {
int i;
for (i = 16; i < 24; i += 4) {
x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
x->vp9_short_fdct8x8(&x->block[i].src_diff[0],
&x->block[i].coeff[0], 16);
}
}
void vp8_transform_mb_8x8(MACROBLOCK *x) {
vp8_transform_mby_8x8(x);
vp8_transform_mbuv_8x8(x);
void vp9_transform_mb_8x8(MACROBLOCK *x) {
vp9_transform_mby_8x8(x);
vp9_transform_mbuv_8x8(x);
}
void vp8_transform_mby_16x16(MACROBLOCK *x) {
void vp9_transform_mby_16x16(MACROBLOCK *x) {
vp8_clear_system_state();
x->vp8_short_fdct16x16(&x->block[0].src_diff[0],
x->vp9_short_fdct16x16(&x->block[0].src_diff[0],
&x->block[0].coeff[0], 32);
}
void vp8_transform_mb_16x16(MACROBLOCK *x) {
vp8_transform_mby_16x16(x);
vp8_transform_mbuv_8x8(x);
void vp9_transform_mb_16x16(MACROBLOCK *x) {
vp9_transform_mby_16x16(x);
vp9_transform_mbuv_8x8(x);
}
#define RDTRUNC(RM,DM,R,D) ( (128+(R)*(RM)) & 0xFF )
@ -571,7 +571,7 @@ static void check_reset_8x8_2nd_coeffs(MACROBLOCKD *xd,
}
}
void vp8_optimize_mby_4x4(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
void vp9_optimize_mby_4x4(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
int b;
PLANE_TYPE type;
int has_2nd_order;
@ -606,7 +606,7 @@ void vp8_optimize_mby_4x4(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
}
}
void vp8_optimize_mbuv_4x4(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
void vp9_optimize_mbuv_4x4(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
int b;
ENTROPY_CONTEXT_PLANES t_above, t_left;
ENTROPY_CONTEXT *ta;
@ -628,11 +628,11 @@ void vp8_optimize_mbuv_4x4(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
}
static void optimize_mb_4x4(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
vp8_optimize_mby_4x4(x, rtcd);
vp8_optimize_mbuv_4x4(x, rtcd);
vp9_optimize_mby_4x4(x, rtcd);
vp9_optimize_mbuv_4x4(x, rtcd);
}
void vp8_optimize_mby_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
void vp9_optimize_mby_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
int b;
PLANE_TYPE type;
ENTROPY_CONTEXT_PLANES t_above, t_left;
@ -665,7 +665,7 @@ void vp8_optimize_mby_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
}
}
void vp8_optimize_mbuv_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
void vp9_optimize_mbuv_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
int b;
ENTROPY_CONTEXT_PLANES t_above, t_left;
ENTROPY_CONTEXT *ta;
@ -690,8 +690,8 @@ void vp8_optimize_mbuv_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
}
static void optimize_mb_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
vp8_optimize_mby_8x8(x, rtcd);
vp8_optimize_mbuv_8x8(x, rtcd);
vp9_optimize_mby_8x8(x, rtcd);
vp9_optimize_mbuv_8x8(x, rtcd);
}
static void optimize_b_16x16(MACROBLOCK *mb, int i, PLANE_TYPE type,
@ -868,7 +868,7 @@ static void optimize_b_16x16(MACROBLOCK *mb, int i, PLANE_TYPE type,
*a = *l = (d->eob != !type);
}
void vp8_optimize_mby_16x16(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
void vp9_optimize_mby_16x16(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
ENTROPY_CONTEXT_PLANES t_above, t_left;
ENTROPY_CONTEXT *ta, *tl;
@ -884,11 +884,11 @@ void vp8_optimize_mby_16x16(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
}
static void optimize_mb_16x16(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
vp8_optimize_mby_16x16(x, rtcd);
vp8_optimize_mbuv_8x8(x, rtcd);
vp9_optimize_mby_16x16(x, rtcd);
vp9_optimize_mbuv_8x8(x, rtcd);
}
void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
void vp9_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
MACROBLOCKD *xd = &x->e_mbd;
TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size;
@ -896,34 +896,34 @@ void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
vp8_subtract_mb(rtcd, x);
if (tx_size == TX_16X16) {
vp8_transform_mb_16x16(x);
vp8_quantize_mb_16x16(x);
vp9_transform_mb_16x16(x);
vp9_quantize_mb_16x16(x);
if (x->optimize)
optimize_mb_16x16(x, rtcd);
vp8_inverse_transform_mb_16x16(IF_RTCD(&rtcd->common->idct), xd);
} else if (tx_size == TX_8X8) {
if (xd->mode_info_context->mbmi.mode == SPLITMV) {
assert(xd->mode_info_context->mbmi.partitioning != PARTITIONING_4X4);
vp8_transform_mby_8x8(x);
vp8_transform_mbuv_4x4(x);
vp8_quantize_mby_8x8(x);
vp8_quantize_mbuv_4x4(x);
vp9_transform_mby_8x8(x);
vp9_transform_mbuv_4x4(x);
vp9_quantize_mby_8x8(x);
vp9_quantize_mbuv_4x4(x);
if (x->optimize) {
vp8_optimize_mby_8x8(x, rtcd);
vp8_optimize_mbuv_4x4(x, rtcd);
vp9_optimize_mby_8x8(x, rtcd);
vp9_optimize_mbuv_4x4(x, rtcd);
}
vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), xd);
vp8_inverse_transform_mbuv_4x4(IF_RTCD(&rtcd->common->idct), xd);
} else {
vp8_transform_mb_8x8(x);
vp8_quantize_mb_8x8(x);
vp9_transform_mb_8x8(x);
vp9_quantize_mb_8x8(x);
if (x->optimize)
optimize_mb_8x8(x, rtcd);
vp8_inverse_transform_mb_8x8(IF_RTCD(&rtcd->common->idct), xd);
}
} else {
transform_mb_4x4(x);
vp8_quantize_mb_4x4(x);
vp9_quantize_mb_4x4(x);
if (x->optimize)
optimize_mb_4x4(x, rtcd);
vp8_inverse_transform_mb_4x4(IF_RTCD(&rtcd->common->idct), xd);
@ -933,7 +933,7 @@ void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
}
/* this function is used by first pass only */
void vp8_encode_inter16x16y(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
void vp9_encode_inter16x16y(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
MACROBLOCKD *xd = &x->e_mbd;
BLOCK *b = &x->block[0];
@ -944,10 +944,10 @@ void vp8_encode_inter16x16y(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
vp8_build_1st_inter16x16_predictors_mby(xd, xd->predictor, 16, 0);
vp8_subtract_mby(x->src_diff, *(b->base_src), xd->predictor, b->src_stride);
vp9_subtract_mby(x->src_diff, *(b->base_src), xd->predictor, b->src_stride);
vp8_transform_mby_4x4(x);
vp8_quantize_mby_4x4(x);
vp9_transform_mby_4x4(x);
vp9_quantize_mby_4x4(x);
vp8_inverse_transform_mby_4x4(IF_RTCD(&rtcd->common->idct), xd);
vp8_recon_mby(xd);

View File

@ -35,36 +35,36 @@ typedef struct {
#include "onyx_int.h"
struct VP8_ENCODER_RTCD;
void vp8_encode_inter16x16(const struct VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
void vp9_encode_inter16x16(const struct VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
void vp8_build_dcblock(MACROBLOCK *b);
void vp8_transform_mb_4x4(MACROBLOCK *mb);
void vp8_transform_mbuv_4x4(MACROBLOCK *x);
void vp8_transform_mby_4x4(MACROBLOCK *x);
void vp9_transform_mbuv_4x4(MACROBLOCK *x);
void vp9_transform_mby_4x4(MACROBLOCK *x);
void vp8_optimize_mby_4x4(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
void vp8_optimize_mbuv_4x4(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
void vp8_encode_inter16x16y(const struct VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
void vp9_optimize_mby_4x4(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
void vp9_optimize_mbuv_4x4(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
void vp9_encode_inter16x16y(const struct VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
void vp8_transform_mb_8x8(MACROBLOCK *mb);
void vp8_transform_mby_8x8(MACROBLOCK *x);
void vp8_transform_mbuv_8x8(MACROBLOCK *x);
void vp8_build_dcblock_8x8(MACROBLOCK *b);
void vp8_optimize_mby_8x8(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
void vp8_optimize_mbuv_8x8(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
void vp9_transform_mb_8x8(MACROBLOCK *mb);
void vp9_transform_mby_8x8(MACROBLOCK *x);
void vp9_transform_mbuv_8x8(MACROBLOCK *x);
void vp9_build_dcblock_8x8(MACROBLOCK *b);
void vp9_optimize_mby_8x8(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
void vp9_optimize_mbuv_8x8(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
void vp8_transform_mb_16x16(MACROBLOCK *mb);
void vp8_transform_mby_16x16(MACROBLOCK *x);
void vp8_optimize_mby_16x16(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
void vp9_transform_mb_16x16(MACROBLOCK *mb);
void vp9_transform_mby_16x16(MACROBLOCK *x);
void vp9_optimize_mby_16x16(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
void vp8_subtract_4b_c(BLOCK *be, BLOCKD *bd, int pitch);
void vp9_subtract_4b_c(BLOCK *be, BLOCKD *bd, int pitch);
#if CONFIG_SUPERBLOCKS
void vp8_subtract_mbuv_s_c(short *diff, const unsigned char *usrc,
void vp9_subtract_mbuv_s_c(short *diff, const unsigned char *usrc,
const unsigned char *vsrc, int src_stride,
const unsigned char *upred,
const unsigned char *vpred, int dst_stride);
void vp8_subtract_mby_s_c(short *diff, const unsigned char *src,
void vp9_subtract_mby_s_c(short *diff, const unsigned char *src,
int src_stride, const unsigned char *pred,
int dst_stride);
#endif

View File

@ -99,16 +99,16 @@ static void build_nmv_component_cost_table(int *mvcost,
sign_cost[0] = vp8_cost_zero(mvcomp->sign);
sign_cost[1] = vp8_cost_one(mvcomp->sign);
vp8_cost_tokens(class_cost, mvcomp->classes, vp8_mv_class_tree);
vp8_cost_tokens(class0_cost, mvcomp->class0, vp8_mv_class0_tree);
vp9_cost_tokens(class_cost, mvcomp->classes, vp8_mv_class_tree);
vp9_cost_tokens(class0_cost, mvcomp->class0, vp8_mv_class0_tree);
for (i = 0; i < MV_OFFSET_BITS; ++i) {
bits_cost[i][0] = vp8_cost_zero(mvcomp->bits[i]);
bits_cost[i][1] = vp8_cost_one(mvcomp->bits[i]);
}
for (i = 0; i < CLASS0_SIZE; ++i)
vp8_cost_tokens(class0_fp_cost[i], mvcomp->class0_fp[i], vp8_mv_fp_tree);
vp8_cost_tokens(fp_cost, mvcomp->fp, vp8_mv_fp_tree);
vp9_cost_tokens(class0_fp_cost[i], mvcomp->class0_fp[i], vp8_mv_fp_tree);
vp9_cost_tokens(fp_cost, mvcomp->fp, vp8_mv_fp_tree);
if (usehp) {
class0_hp_cost[0] = vp8_cost_zero(mvcomp->class0_hp);
@ -356,7 +356,7 @@ static void add_nmvcount(nmv_context_counts* const dst,
}
#endif
void vp8_write_nmvprobs(VP8_COMP* const cpi, int usehp, vp8_writer* const bc) {
void vp9_write_nmvprobs(VP8_COMP* const cpi, int usehp, vp8_writer* const bc) {
int i, j;
nmv_context prob;
unsigned int branch_ct_joint[MV_JOINTS - 1][2];
@ -508,7 +508,7 @@ void vp8_write_nmvprobs(VP8_COMP* const cpi, int usehp, vp8_writer* const bc) {
}
}
void vp8_encode_nmv(vp8_writer* const bc, const MV* const mv,
void vp9_encode_nmv(vp8_writer* const bc, const MV* const mv,
const MV* const ref, const nmv_context* const mvctx) {
MV_JOINT_TYPE j = vp8_get_mv_joint(*mv);
vp8_write_token(bc, vp8_mv_joint_tree, mvctx->joints,
@ -521,7 +521,7 @@ void vp8_encode_nmv(vp8_writer* const bc, const MV* const mv,
}
}
void vp8_encode_nmv_fp(vp8_writer* const bc, const MV* const mv,
void vp9_encode_nmv_fp(vp8_writer* const bc, const MV* const mv,
const MV* const ref, const nmv_context* const mvctx,
int usehp) {
MV_JOINT_TYPE j = vp8_get_mv_joint(*mv);
@ -534,14 +534,14 @@ void vp8_encode_nmv_fp(vp8_writer* const bc, const MV* const mv,
}
}
void vp8_build_nmv_cost_table(int *mvjoint,
void vp9_build_nmv_cost_table(int *mvjoint,
int *mvcost[2],
const nmv_context* const mvctx,
int usehp,
int mvc_flag_v,
int mvc_flag_h) {
vp8_clear_system_state();
vp8_cost_tokens(mvjoint, mvctx->joints, vp8_mv_joint_tree);
vp9_cost_tokens(mvjoint, mvctx->joints, vp8_mv_joint_tree);
if (mvc_flag_v)
build_nmv_component_cost_table(mvcost[0], &mvctx->comps[0], usehp);
if (mvc_flag_h)

View File

@ -14,13 +14,13 @@
#include "onyx_int.h"
void vp8_write_nmvprobs(VP8_COMP* const, int usehp, vp8_writer* const);
void vp8_encode_nmv(vp8_writer* const w, const MV* const mv,
void vp9_write_nmvprobs(VP8_COMP* const, int usehp, vp8_writer* const);
void vp9_encode_nmv(vp8_writer* const w, const MV* const mv,
const MV* const ref, const nmv_context* const mvctx);
void vp8_encode_nmv_fp(vp8_writer* const w, const MV* const mv,
void vp9_encode_nmv_fp(vp8_writer* const w, const MV* const mv,
const MV* const ref, const nmv_context *mvctx,
int usehp);
void vp8_build_nmv_cost_table(int *mvjoint,
void vp9_build_nmv_cost_table(int *mvjoint,
int *mvcost[2],
const nmv_context *mvctx,
int usehp,

View File

@ -39,11 +39,11 @@
#define IF_RTCD(x) NULL
#endif
extern void vp8_build_block_offsets(MACROBLOCK *x);
extern void vp8_setup_block_ptrs(MACROBLOCK *x);
extern void vp8cx_frame_init_quantizer(VP8_COMP *cpi);
extern void vp8_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv);
extern void vp8_alloc_compressor_data(VP8_COMP *cpi);
extern void vp9_build_block_offsets(MACROBLOCK *x);
extern void vp9_setup_block_ptrs(MACROBLOCK *x);
extern void vp9cx_frame_init_quantizer(VP8_COMP *cpi);
extern void vp9_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv);
extern void vp9_alloc_compressor_data(VP8_COMP *cpi);
#define IIFACTOR 12.5
#define IIKFACTOR1 12.5
@ -66,10 +66,10 @@ static int select_cq_level(int qindex) {
int ret_val = QINDEX_RANGE - 1;
int i;
double target_q = (vp8_convert_qindex_to_q(qindex) * 0.5847) + 1.0;
double target_q = (vp9_convert_qindex_to_q(qindex) * 0.5847) + 1.0;
for (i = 0; i < QINDEX_RANGE; i++) {
if (target_q <= vp8_convert_qindex_to_q(i)) {
if (target_q <= vp9_convert_qindex_to_q(i)) {
ret_val = i;
break;
}
@ -341,11 +341,11 @@ static int frame_max_bits(VP8_COMP *cpi) {
return max_bits;
}
void vp8_init_first_pass(VP8_COMP *cpi) {
void vp9_init_first_pass(VP8_COMP *cpi) {
zero_stats(cpi->twopass.total_stats);
}
void vp8_end_first_pass(VP8_COMP *cpi) {
void vp9_end_first_pass(VP8_COMP *cpi) {
output_stats(cpi, cpi->output_pkt_list, cpi->twopass.total_stats);
}
@ -364,7 +364,7 @@ static void zz_motion_search(VP8_COMP *cpi, MACROBLOCK *x, YV12_BUFFER_CONFIG *r
ref_ptr = (unsigned char *)(*(d->base_pre) + d->pre);
vp8_mse16x16(src_ptr, src_stride, ref_ptr, ref_stride,
vp9_mse16x16(src_ptr, src_stride, ref_ptr, ref_stride,
(unsigned int *)(best_motion_err));
}
@ -388,7 +388,7 @@ static void first_pass_motion_search(VP8_COMP *cpi, MACROBLOCK *x,
int new_mv_mode_penalty = 256;
// override the default variance function to use MSE
v_fn_ptr.vf = vp8_mse16x16;
v_fn_ptr.vf = vp9_mse16x16;
// Set up pointers for this macro block recon buffer
xd->pre.y_buffer = recon_buffer->y_buffer + recon_yoffset;
@ -435,7 +435,7 @@ static void first_pass_motion_search(VP8_COMP *cpi, MACROBLOCK *x,
}
}
void vp8_first_pass(VP8_COMP *cpi) {
void vp9_first_pass(VP8_COMP *cpi) {
int mb_row, mb_col;
MACROBLOCK *const x = &cpi->mb;
VP8_COMMON *const cm = &cpi->common;
@ -477,15 +477,15 @@ void vp8_first_pass(VP8_COMP *cpi) {
xd->mode_info_context = cm->mi;
vp8_build_block_offsets(x);
vp9_build_block_offsets(x);
vp8_setup_block_dptrs(&x->e_mbd);
vp8_setup_block_ptrs(x);
vp9_setup_block_ptrs(x);
// set up frame new frame for intra coded blocks
vp8_setup_intra_recon(new_yv12);
vp8cx_frame_init_quantizer(cpi);
vp9cx_frame_init_quantizer(cpi);
// Initialise the MV cost table to the defaults
// if( cm->current_video_frame == 0)
@ -493,7 +493,7 @@ void vp8_first_pass(VP8_COMP *cpi) {
{
int flag[2] = {1, 1};
vp8_init_mv_probs(cm);
vp8_initialize_rd_consts(cpi, cm->base_qindex + cm->y1dc_delta_q);
vp9_initialize_rd_consts(cpi, cm->base_qindex + cm->y1dc_delta_q);
}
// for each macroblock row in image
@ -527,7 +527,7 @@ void vp8_first_pass(VP8_COMP *cpi) {
vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
// do intra 16x16 prediction
this_error = vp8_encode_intra(cpi, x, use_dc_pred);
this_error = vp9_encode_intra(cpi, x, use_dc_pred);
// "intrapenalty" below deals with situations where the intra and inter error scores are very low (eg a plain black frame)
// We do not have special cases in first pass for 0,0 and nearest etc so all inter modes carry an overhead cost estimate fot the mv.
@ -618,9 +618,9 @@ void vp8_first_pass(VP8_COMP *cpi) {
mv.as_mv.row <<= 3;
mv.as_mv.col <<= 3;
this_error = motion_error;
vp8_set_mbmode_and_mvs(x, NEWMV, &mv);
vp9_set_mbmode_and_mvs(x, NEWMV, &mv);
xd->mode_info_context->mbmi.txfm_size = TX_4X4;
vp8_encode_inter16x16y(IF_RTCD(&cpi->rtcd), x);
vp9_encode_inter16x16y(IF_RTCD(&cpi->rtcd), x);
sum_mvr += mv.as_mv.row;
sum_mvr_abs += abs(mv.as_mv.row);
sum_mvc += mv.as_mv.col;
@ -820,11 +820,11 @@ static long long estimate_modemvcost(VP8_COMP *cpi,
intra_cost = bitcost(av_intra);
// Estimate of extra bits per mv overhead for mbs
// << 9 is the normalization to the (bits * 512) used in vp8_bits_per_mb
// << 9 is the normalization to the (bits * 512) used in vp9_bits_per_mb
mv_cost = ((int)(fpstats->new_mv_count / fpstats->count) * 8) << 9;
// Crude estimate of overhead cost from modes
// << 9 is the normalization to (bits * 512) used in vp8_bits_per_mb
// << 9 is the normalization to (bits * 512) used in vp9_bits_per_mb
mode_cost =
(int)((((av_pct_inter - av_pct_motion) * zz_cost) +
(av_pct_motion * motion_cost) +
@ -845,7 +845,7 @@ static double calc_correction_factor(double err_per_mb,
double correction_factor;
// Adjustment based on actual quantizer to power term.
power_term = (vp8_convert_qindex_to_q(Q) * 0.01) + pt_low;
power_term = (vp9_convert_qindex_to_q(Q) * 0.01) + pt_low;
power_term = (power_term > pt_high) ? pt_high : power_term;
// Adjustments to error term
@ -875,7 +875,7 @@ static void adjust_maxq_qrange(VP8_COMP *cpi) {
cpi->twopass.maxq_max_limit = cpi->worst_quality;
for (i = cpi->best_quality; i <= cpi->worst_quality; i++) {
cpi->twopass.maxq_max_limit = i;
if (vp8_convert_qindex_to_q(i) >= q)
if (vp9_convert_qindex_to_q(i) >= q)
break;
}
@ -884,7 +884,7 @@ static void adjust_maxq_qrange(VP8_COMP *cpi) {
cpi->twopass.maxq_min_limit = cpi->best_quality;
for (i = cpi->worst_quality; i >= cpi->best_quality; i--) {
cpi->twopass.maxq_min_limit = i;
if (vp8_convert_qindex_to_q(i) <= q)
if (vp9_convert_qindex_to_q(i) <= q)
break;
}
}
@ -978,7 +978,7 @@ static int estimate_max_q(VP8_COMP *cpi,
err_correction_factor = 5.0;
bits_per_mb_at_this_q =
vp8_bits_per_mb(INTER_FRAME, Q) + overhead_bits_per_mb;
vp9_bits_per_mb(INTER_FRAME, Q) + overhead_bits_per_mb;
bits_per_mb_at_this_q = (int)(.5 + err_correction_factor *
(double)bits_per_mb_at_this_q);
@ -1084,7 +1084,7 @@ static int estimate_cq(VP8_COMP *cpi,
err_correction_factor = 5.0;
bits_per_mb_at_this_q =
vp8_bits_per_mb(INTER_FRAME, Q) + overhead_bits_per_mb;
vp9_bits_per_mb(INTER_FRAME, Q) + overhead_bits_per_mb;
bits_per_mb_at_this_q = (int)(.5 + err_correction_factor *
(double)bits_per_mb_at_this_q);
@ -1111,9 +1111,9 @@ static int estimate_cq(VP8_COMP *cpi,
}
extern void vp8_new_frame_rate(VP8_COMP *cpi, double framerate);
extern void vp9_new_frame_rate(VP8_COMP *cpi, double framerate);
void vp8_init_second_pass(VP8_COMP *cpi) {
void vp9_init_second_pass(VP8_COMP *cpi) {
FIRSTPASS_STATS this_frame;
FIRSTPASS_STATS *start_pos;
@ -1138,7 +1138,7 @@ void vp8_init_second_pass(VP8_COMP *cpi) {
// encoded in the second pass is a guess. However the sum duration is not.
// Its calculated based on the actual durations of all frames from the first
// pass.
vp8_new_frame_rate(cpi, 10000000.0 * cpi->twopass.total_stats->count / cpi->twopass.total_stats->duration);
vp9_new_frame_rate(cpi, 10000000.0 * cpi->twopass.total_stats->count / cpi->twopass.total_stats->duration);
cpi->output_frame_rate = cpi->oxcf.frame_rate;
cpi->twopass.bits_left = (int64_t)(cpi->twopass.total_stats->duration * cpi->oxcf.target_bandwidth / 10000000.0);
@ -1191,7 +1191,7 @@ void vp8_init_second_pass(VP8_COMP *cpi) {
}
}
void vp8_end_second_pass(VP8_COMP *cpi) {
void vp9_end_second_pass(VP8_COMP *cpi) {
}
// This function gives and estimate of how badly we believe
@ -1727,7 +1727,7 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
int Q = (cpi->oxcf.fixed_q < 0) ? cpi->last_q[INTER_FRAME] : cpi->oxcf.fixed_q;
int gf_bits;
boost = (cpi->gfu_boost * vp8_gfboost_qadjust(Q)) / 100;
boost = (cpi->gfu_boost * vp9_gfboost_qadjust(Q)) / 100;
// Set max and minimum boost and hence minimum allocation
if (boost > ((cpi->baseline_gf_interval + 1) * 200))
@ -1919,21 +1919,21 @@ static int adjust_active_maxq(int old_maxqi, int new_maxqi) {
double new_q;
double target_q;
old_q = vp8_convert_qindex_to_q(old_maxqi);
new_q = vp8_convert_qindex_to_q(new_maxqi);
old_q = vp9_convert_qindex_to_q(old_maxqi);
new_q = vp9_convert_qindex_to_q(new_maxqi);
target_q = ((old_q * 7.0) + new_q) / 8.0;
if (target_q > old_q) {
for (i = old_maxqi; i <= new_maxqi; i++) {
if (vp8_convert_qindex_to_q(i) >= target_q) {
if (vp9_convert_qindex_to_q(i) >= target_q) {
ret_val = i;
break;
}
}
} else {
for (i = old_maxqi; i >= new_maxqi; i--) {
if (vp8_convert_qindex_to_q(i) <= target_q) {
if (vp9_convert_qindex_to_q(i) <= target_q) {
ret_val = i;
break;
}
@ -1943,7 +1943,7 @@ static int adjust_active_maxq(int old_maxqi, int new_maxqi) {
return ret_val;
}
void vp8_second_pass(VP8_COMP *cpi) {
void vp9_second_pass(VP8_COMP *cpi) {
int tmp_q;
int frames_left = (int)(cpi->twopass.total_stats->count - cpi->common.current_video_frame);
@ -2059,7 +2059,7 @@ void vp8_second_pass(VP8_COMP *cpi) {
cpi->active_worst_quality = tmp_q;
cpi->ni_av_qi = tmp_q;
cpi->avg_q = vp8_convert_qindex_to_q(tmp_q);
cpi->avg_q = vp9_convert_qindex_to_q(tmp_q);
// Limit the maxq value returned subsequently.
// This increases the risk of overspend or underspend if the initial

View File

@ -12,13 +12,13 @@
#if !defined __INC_FIRSTPASS_H
#define __INC_FIRSTPASS_H
extern void vp8_init_first_pass(VP8_COMP *cpi);
extern void vp8_first_pass(VP8_COMP *cpi);
extern void vp8_end_first_pass(VP8_COMP *cpi);
extern void vp9_init_first_pass(VP8_COMP *cpi);
extern void vp9_first_pass(VP8_COMP *cpi);
extern void vp9_end_first_pass(VP8_COMP *cpi);
extern void vp8_init_second_pass(VP8_COMP *cpi);
extern void vp8_second_pass(VP8_COMP *cpi);
extern void vp8_end_second_pass(VP8_COMP *cpi);
extern void vp9_init_second_pass(VP8_COMP *cpi);
extern void vp9_second_pass(VP8_COMP *cpi);
extern void vp9_end_second_pass(VP8_COMP *cpi);
extern size_t vp8_firstpass_stats_sz(unsigned int mb_count);
#endif

View File

@ -14,26 +14,26 @@
#include "vp8/encoder/onyx_int.h"
void vp8_arch_x86_encoder_init(VP8_COMP *cpi);
void vp9_arch_x86_encoder_init(VP8_COMP *cpi);
void vp8_arch_arm_encoder_init(VP8_COMP *cpi);
void (*vp8_yv12_copy_partial_frame_ptr)(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc, int Fraction);
extern void vp8_yv12_copy_partial_frame(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc, int Fraction);
void (*vp9_yv12_copy_partial_frame_ptr)(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc, int Fraction);
extern void vp9_yv12_copy_partial_frame(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc, int Fraction);
void vp8_cmachine_specific_config(VP8_COMP *cpi) {
void vp9_cmachine_specific_config(VP8_COMP *cpi) {
#if CONFIG_RUNTIME_CPU_DETECT
cpi->rtcd.common = &cpi->common.rtcd;
cpi->rtcd.search.full_search = vp8_full_search_sad;
cpi->rtcd.search.refining_search = vp8_refining_search_sad;
cpi->rtcd.search.diamond_search = vp8_diamond_search_sad;
cpi->rtcd.temporal.apply = vp8_temporal_filter_apply_c;
cpi->rtcd.search.full_search = vp9_full_search_sad;
cpi->rtcd.search.refining_search = vp9_refining_search_sad;
cpi->rtcd.search.diamond_search = vp9_diamond_search_sad;
cpi->rtcd.temporal.apply = vp9_temporal_filter_apply_c;
#endif
vp8_yv12_copy_partial_frame_ptr = vp8_yv12_copy_partial_frame;
vp9_yv12_copy_partial_frame_ptr = vp9_yv12_copy_partial_frame;
#if ARCH_X86 || ARCH_X86_64
vp8_arch_x86_encoder_init(cpi);
vp9_arch_x86_encoder_init(cpi);
#endif
#if ARCH_ARM

View File

@ -40,7 +40,7 @@ pop(struct lookahead_ctx *ctx,
void
vp8_lookahead_destroy(struct lookahead_ctx *ctx) {
vp9_lookahead_destroy(struct lookahead_ctx *ctx) {
if (ctx) {
if (ctx->buf) {
int i;
@ -55,7 +55,7 @@ vp8_lookahead_destroy(struct lookahead_ctx *ctx) {
struct lookahead_ctx *
vp8_lookahead_init(unsigned int width,
vp9_lookahead_init(unsigned int width,
unsigned int height,
unsigned int depth) {
struct lookahead_ctx *ctx = NULL;
@ -85,13 +85,13 @@ vp8_lookahead_init(unsigned int width,
}
return ctx;
bail:
vp8_lookahead_destroy(ctx);
vp9_lookahead_destroy(ctx);
return NULL;
}
int
vp8_lookahead_push(struct lookahead_ctx *ctx,
vp9_lookahead_push(struct lookahead_ctx *ctx,
YV12_BUFFER_CONFIG *src,
int64_t ts_start,
int64_t ts_end,
@ -157,7 +157,7 @@ vp8_lookahead_push(struct lookahead_ctx *ctx,
struct lookahead_entry *
vp8_lookahead_pop(struct lookahead_ctx *ctx,
vp9_lookahead_pop(struct lookahead_ctx *ctx,
int drain) {
struct lookahead_entry *buf = NULL;
@ -170,7 +170,7 @@ vp8_lookahead_pop(struct lookahead_ctx *ctx,
struct lookahead_entry *
vp8_lookahead_peek(struct lookahead_ctx *ctx,
vp9_lookahead_peek(struct lookahead_ctx *ctx,
int index) {
struct lookahead_entry *buf = NULL;
@ -186,6 +186,6 @@ vp8_lookahead_peek(struct lookahead_ctx *ctx,
unsigned int
vp8_lookahead_depth(struct lookahead_ctx *ctx) {
vp9_lookahead_depth(struct lookahead_ctx *ctx) {
return ctx->sz;
}

View File

@ -29,7 +29,7 @@ struct lookahead_ctx;
*
*
*/
struct lookahead_ctx *vp8_lookahead_init(unsigned int width,
struct lookahead_ctx *vp9_lookahead_init(unsigned int width,
unsigned int height,
unsigned int depth
);
@ -38,7 +38,7 @@ struct lookahead_ctx *vp8_lookahead_init(unsigned int width,
/**\brief Destroys the lookahead stage
*
*/
void vp8_lookahead_destroy(struct lookahead_ctx *ctx);
void vp9_lookahead_destroy(struct lookahead_ctx *ctx);
/**\brief Enqueue a source buffer
@ -57,7 +57,7 @@ void vp8_lookahead_destroy(struct lookahead_ctx *ctx);
* \param[in] active_map Map that specifies which macroblock is active
*/
int
vp8_lookahead_push(struct lookahead_ctx *ctx,
vp9_lookahead_push(struct lookahead_ctx *ctx,
YV12_BUFFER_CONFIG *src,
int64_t ts_start,
int64_t ts_end,
@ -77,7 +77,7 @@ vp8_lookahead_push(struct lookahead_ctx *ctx,
*
*/
struct lookahead_entry *
vp8_lookahead_pop(struct lookahead_ctx *ctx,
vp9_lookahead_pop(struct lookahead_ctx *ctx,
int drain);
@ -90,7 +90,7 @@ vp8_lookahead_pop(struct lookahead_ctx *ctx,
*
*/
struct lookahead_entry *
vp8_lookahead_peek(struct lookahead_ctx *ctx,
vp9_lookahead_peek(struct lookahead_ctx *ctx,
int index);
@ -99,7 +99,7 @@ vp8_lookahead_peek(struct lookahead_ctx *ctx,
* \param[in] ctx Pointer to the lookahead context
*/
unsigned int
vp8_lookahead_depth(struct lookahead_ctx *ctx);
vp9_lookahead_depth(struct lookahead_ctx *ctx);
#endif

View File

@ -47,13 +47,13 @@ static unsigned int do_16x16_motion_iteration
further_steps = 0;
}
vp8_clamp_mv_min_max(x, ref_mv);
vp9_clamp_mv_min_max(x, ref_mv);
ref_full.as_mv.col = ref_mv->as_mv.col >> 3;
ref_full.as_mv.row = ref_mv->as_mv.row >> 3;
/*cpi->sf.search_method == HEX*/
best_err = vp8_hex_search(
best_err = vp9_hex_search(
x, b, d,
&ref_full, dst_mv,
step_param,
@ -81,9 +81,9 @@ static unsigned int do_16x16_motion_iteration
xd->mode_info_context->mbmi.pred_filter_enabled = 0;
#endif
vp8_set_mbmode_and_mvs(x, NEWMV, dst_mv);
vp9_set_mbmode_and_mvs(x, NEWMV, dst_mv);
vp8_build_1st_inter16x16_predictors_mby(xd, xd->predictor, 16, 0);
best_err = vp8_sad16x16(xd->dst.y_buffer, xd->dst.y_stride,
best_err = vp9_sad16x16(xd->dst.y_buffer, xd->dst.y_stride,
xd->predictor, 16, INT_MAX);
/* restore UMV window */
@ -128,7 +128,7 @@ static int do_16x16_motion_search
// FIXME should really use something like near/nearest MV and/or MV prediction
xd->pre.y_buffer = ref->y_buffer + mb_y_offset;
xd->pre.y_stride = ref->y_stride;
err = vp8_sad16x16(ref->y_buffer + mb_y_offset, ref->y_stride,
err = vp9_sad16x16(ref->y_buffer + mb_y_offset, ref->y_stride,
xd->dst.y_buffer, xd->dst.y_stride, INT_MAX);
dst_mv->as_int = 0;
@ -188,7 +188,7 @@ static int do_16x16_zerozero_search
xd->pre.y_buffer = ref->y_buffer + mb_y_offset;
xd->pre.y_stride = ref->y_stride;
// VARIANCE_INVOKE(&cpi->rtcd.variance, satd16x16)
err = vp8_sad16x16(ref->y_buffer + mb_y_offset, ref->y_stride,
err = vp9_sad16x16(ref->y_buffer + mb_y_offset, ref->y_stride,
xd->dst.y_buffer, xd->dst.y_stride, INT_MAX);
dst_mv->as_int = 0;
@ -214,7 +214,7 @@ static int find_best_16x16_intra
xd->mode_info_context->mbmi.mode = mode;
vp8_build_intra_predictors_mby(xd);
err = vp8_sad16x16(xd->predictor, 16, buf->y_buffer + mb_y_offset,
err = vp9_sad16x16(xd->predictor, 16, buf->y_buffer + mb_y_offset,
buf->y_stride, best_err);
// find best
if (err < best_err) {
@ -428,22 +428,22 @@ static void separate_arf_mbs(VP8_COMP *cpi) {
cpi->static_mb_pct = 0;
cpi->seg0_cnt = ncnt[0];
vp8_enable_segmentation((VP8_PTR) cpi);
vp9_enable_segmentation((VP8_PTR) cpi);
} else {
cpi->static_mb_pct = 0;
vp8_disable_segmentation((VP8_PTR) cpi);
vp9_disable_segmentation((VP8_PTR) cpi);
}
// Free localy allocated storage
vpx_free(arf_not_zz);
}
void vp8_update_mbgraph_stats
void vp9_update_mbgraph_stats
(
VP8_COMP *cpi
) {
VP8_COMMON *const cm = &cpi->common;
int i, n_frames = vp8_lookahead_depth(cpi->lookahead);
int i, n_frames = vp9_lookahead_depth(cpi->lookahead);
YV12_BUFFER_CONFIG *golden_ref = &cm->yv12_fb[cm->gld_fb_idx];
// we need to look ahead beyond where the ARF transitions into
@ -469,7 +469,7 @@ void vp8_update_mbgraph_stats
for (i = 0; i < n_frames; i++) {
MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i];
struct lookahead_entry *q_cur =
vp8_lookahead_peek(cpi->lookahead, i);
vp9_lookahead_peek(cpi->lookahead, i);
assert(q_cur != NULL);

View File

@ -11,6 +11,6 @@
#ifndef __INC_MBGRAPH_H__
#define __INC_MBGRAPH_H__ 1
extern void vp8_update_mbgraph_stats(VP8_COMP *cpi);
extern void vp9_update_mbgraph_stats(VP8_COMP *cpi);
#endif /* __INC_MBGRAPH_H__ */

View File

@ -23,7 +23,7 @@ static int mv_ref_ct [31] [4] [2];
static int mv_mode_cts [4] [2];
#endif
void vp8_clamp_mv_min_max(MACROBLOCK *x, int_mv *ref_mv) {
void vp9_clamp_mv_min_max(MACROBLOCK *x, int_mv *ref_mv) {
int col_min = (ref_mv->as_mv.col >> 3) - MAX_FULL_PEL_VAL +
((ref_mv->as_mv.col & 7) ? 1 : 0);
int row_min = (ref_mv->as_mv.row >> 3) - MAX_FULL_PEL_VAL +
@ -42,7 +42,7 @@ void vp8_clamp_mv_min_max(MACROBLOCK *x, int_mv *ref_mv) {
x->mv_row_max = row_max;
}
int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, DEC_MVCOSTS,
int vp9_mv_bit_cost(int_mv *mv, int_mv *ref, DEC_MVCOSTS,
int Weight, int ishp) {
MV v;
v.row = (mv->as_mv.row - ref->as_mv.row);
@ -79,7 +79,7 @@ static int mvsad_err_cost(int_mv *mv, int_mv *ref, DEC_MVSADCOSTS,
return 0;
}
void vp8_init_dsmotion_compensation(MACROBLOCK *x, int stride) {
void vp9_init_dsmotion_compensation(MACROBLOCK *x, int stride) {
int Len;
int search_site_count = 0;
@ -125,7 +125,7 @@ void vp8_init_dsmotion_compensation(MACROBLOCK *x, int stride) {
x->searches_per_step = 4;
}
void vp8_init3smotion_compensation(MACROBLOCK *x, int stride) {
void vp9_init3smotion_compensation(MACROBLOCK *x, int stride) {
int Len;
int search_site_count = 0;
@ -244,7 +244,7 @@ void vp8_init3smotion_compensation(MACROBLOCK *x, int stride) {
#define MIN(x,y) (((x)<(y))?(x):(y))
#define MAX(x,y) (((x)>(y))?(x):(y))
int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int vp9_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int_mv *bestmv, int_mv *ref_mv,
int error_per_bit,
const vp8_variance_fn_ptr_t *vfp,
@ -443,7 +443,7 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
#undef MIN
#undef MAX
int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int vp9_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int_mv *bestmv, int_mv *ref_mv,
int error_per_bit,
const vp8_variance_fn_ptr_t *vfp,
@ -923,7 +923,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
#undef SP
int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int vp9_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int_mv *bestmv, int_mv *ref_mv,
int error_per_bit,
const vp8_variance_fn_ptr_t *vfp,
@ -1099,7 +1099,7 @@ static const MV next_chkpts[6][3] = {
{{ -1, 2}, { -2, 0}, { -1, -2}}
};
int vp8_hex_search
int vp9_hex_search
(
MACROBLOCK *x,
BLOCK *b,
@ -1255,7 +1255,7 @@ cal_neighbors:
#undef CHECK_POINT
#undef CHECK_BETTER
int vp8_diamond_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int vp9_diamond_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int_mv *ref_mv, int_mv *best_mv,
int search_param, int sad_per_bit, int *num00,
vp8_variance_fn_ptr_t *fn_ptr, DEC_MVCOSTS,
@ -1364,7 +1364,7 @@ int vp8_diamond_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
xd->allow_high_precision_mv);
}
int vp8_diamond_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int vp9_diamond_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int_mv *ref_mv, int_mv *best_mv, int search_param,
int sad_per_bit, int *num00,
vp8_variance_fn_ptr_t *fn_ptr,
@ -1512,7 +1512,7 @@ int vp8_diamond_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
/* do_refine: If last step (1-away) of n-step search doesn't pick the center
point as the best match, we will do a final 1-away diamond
refining search */
int vp8_full_pixel_diamond(VP8_COMP *cpi, MACROBLOCK *x, BLOCK *b,
int vp9_full_pixel_diamond(VP8_COMP *cpi, MACROBLOCK *x, BLOCK *b,
BLOCKD *d, int_mv *mvp_full, int step_param,
int sadpb, int further_steps,
int do_refine, vp8_variance_fn_ptr_t *fn_ptr,
@ -1568,7 +1568,7 @@ int vp8_full_pixel_diamond(VP8_COMP *cpi, MACROBLOCK *x, BLOCK *b,
return bestsme;
}
int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
int vp9_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
int sad_per_bit, int distance,
vp8_variance_fn_ptr_t *fn_ptr, DEC_MVCOSTS,
int_mv *center_mv) {
@ -1662,7 +1662,7 @@ int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
return INT_MAX;
}
int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
int vp9_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
int sad_per_bit, int distance,
vp8_variance_fn_ptr_t *fn_ptr, DEC_MVCOSTS,
int_mv *center_mv) {
@ -1789,7 +1789,7 @@ int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
return INT_MAX;
}
int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
int vp9_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
int sad_per_bit, int distance,
vp8_variance_fn_ptr_t *fn_ptr,
DEC_MVCOSTS,
@ -1943,7 +1943,7 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
return INT_MAX;
}
int vp8_refining_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
int vp9_refining_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
int error_per_bit, int search_range,
vp8_variance_fn_ptr_t *fn_ptr, DEC_MVCOSTS,
int_mv *center_mv) {
@ -2019,7 +2019,7 @@ int vp8_refining_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
return INT_MAX;
}
int vp8_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int vp9_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int_mv *ref_mv, int error_per_bit,
int search_range, vp8_variance_fn_ptr_t *fn_ptr,
DEC_MVCOSTS, int_mv *center_mv) {

View File

@ -32,20 +32,20 @@ extern void accum_mv_refs(MB_PREDICTION_MODE, const int near_mv_ref_cts[4]);
#define MAX_FULL_PEL_VAL ((1 << (MAX_MVSEARCH_STEPS)) - 1) // Max full pel mv specified in 1 pel units
#define MAX_FIRST_STEP (1 << (MAX_MVSEARCH_STEPS-1)) // Maximum size of the first step in full pel units
extern void vp8_clamp_mv_min_max(MACROBLOCK *x, int_mv *ref_mv);
extern int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, DEC_MVCOSTS,
extern void vp9_clamp_mv_min_max(MACROBLOCK *x, int_mv *ref_mv);
extern int vp9_mv_bit_cost(int_mv *mv, int_mv *ref, DEC_MVCOSTS,
int Weight, int ishp);
extern void vp8_init_dsmotion_compensation(MACROBLOCK *x, int stride);
extern void vp8_init3smotion_compensation(MACROBLOCK *x, int stride);
extern void vp9_init_dsmotion_compensation(MACROBLOCK *x, int stride);
extern void vp9_init3smotion_compensation(MACROBLOCK *x, int stride);
// Runs sequence of diamond searches in smaller steps for RD
struct VP8_COMP;
int vp8_full_pixel_diamond(struct VP8_COMP *cpi, MACROBLOCK *x, BLOCK *b,
int vp9_full_pixel_diamond(struct VP8_COMP *cpi, MACROBLOCK *x, BLOCK *b,
BLOCKD *d, int_mv *mvp_full, int step_param,
int sadpb, int further_steps, int do_refine,
vp8_variance_fn_ptr_t *fn_ptr,
int_mv *ref_mv, int_mv *dst_mv);
extern int vp8_hex_search
extern int vp9_hex_search
(
MACROBLOCK *x,
BLOCK *b,
@ -64,9 +64,9 @@ typedef int (fractional_mv_step_fp)
(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *bestmv, int_mv *ref_mv,
int error_per_bit, const vp8_variance_fn_ptr_t *vfp, DEC_MVCOSTS,
int *distortion, unsigned int *sse);
extern fractional_mv_step_fp vp8_find_best_sub_pixel_step_iteratively;
extern fractional_mv_step_fp vp8_find_best_sub_pixel_step;
extern fractional_mv_step_fp vp8_find_best_half_pixel_step;
extern fractional_mv_step_fp vp9_find_best_sub_pixel_step_iteratively;
extern fractional_mv_step_fp vp9_find_best_sub_pixel_step;
extern fractional_mv_step_fp vp9_find_best_half_pixel_step;
extern fractional_mv_step_fp vp8_skip_fractional_mv_step;
#define prototype_full_search_sad(sym)\
@ -118,30 +118,30 @@ extern fractional_mv_step_fp vp8_skip_fractional_mv_step;
#endif
typedef prototype_full_search_sad(*vp8_full_search_fn_t);
extern prototype_full_search_sad(vp8_full_search_sad);
extern prototype_full_search_sad(vp8_full_search_sadx3);
extern prototype_full_search_sad(vp8_full_search_sadx8);
extern prototype_full_search_sad(vp9_full_search_sad);
extern prototype_full_search_sad(vp9_full_search_sadx3);
extern prototype_full_search_sad(vp9_full_search_sadx8);
typedef prototype_refining_search_sad(*vp8_refining_search_fn_t);
extern prototype_refining_search_sad(vp8_refining_search_sad);
extern prototype_refining_search_sad(vp8_refining_search_sadx4);
extern prototype_refining_search_sad(vp9_refining_search_sad);
extern prototype_refining_search_sad(vp9_refining_search_sadx4);
typedef prototype_diamond_search_sad(*vp8_diamond_search_fn_t);
extern prototype_diamond_search_sad(vp8_diamond_search_sad);
extern prototype_diamond_search_sad(vp8_diamond_search_sadx4);
extern prototype_diamond_search_sad(vp9_diamond_search_sad);
extern prototype_diamond_search_sad(vp9_diamond_search_sadx4);
#ifndef vp8_search_full_search
#define vp8_search_full_search vp8_full_search_sad
#define vp8_search_full_search vp9_full_search_sad
#endif
extern prototype_full_search_sad(vp8_search_full_search);
#ifndef vp8_search_refining_search
#define vp8_search_refining_search vp8_refining_search_sad
#define vp8_search_refining_search vp9_refining_search_sad
#endif
extern prototype_refining_search_sad(vp8_search_refining_search);
#ifndef vp8_search_diamond_search
#define vp8_search_diamond_search vp8_diamond_search_sad
#define vp8_search_diamond_search vp9_diamond_search_sad
#endif
extern prototype_diamond_search_sad(vp8_search_diamond_search);

View File

@ -15,7 +15,7 @@
#include "vp8/common/entropymode.h"
void vp8_init_mode_costs(VP8_COMP *c) {
void vp9_init_mode_costs(VP8_COMP *c) {
VP8_COMMON *x = &c->common;
{
const vp8_tree_p T = vp8_bmode_tree;
@ -26,30 +26,30 @@ void vp8_init_mode_costs(VP8_COMP *c) {
int j = 0;
do {
vp8_cost_tokens((int *)c->mb.bmode_costs[i][j], x->kf_bmode_prob[i][j], T);
vp9_cost_tokens((int *)c->mb.bmode_costs[i][j], x->kf_bmode_prob[i][j], T);
} while (++j < VP8_BINTRAMODES);
} while (++i < VP8_BINTRAMODES);
vp8_cost_tokens((int *)c->mb.inter_bmode_costs, x->fc.bmode_prob, T);
vp9_cost_tokens((int *)c->mb.inter_bmode_costs, x->fc.bmode_prob, T);
}
vp8_cost_tokens((int *)c->mb.inter_bmode_costs,
vp9_cost_tokens((int *)c->mb.inter_bmode_costs,
x->fc.sub_mv_ref_prob[0], vp8_sub_mv_ref_tree);
vp8_cost_tokens(c->mb.mbmode_cost[1], x->fc.ymode_prob, vp8_ymode_tree);
vp8_cost_tokens(c->mb.mbmode_cost[0],
vp9_cost_tokens(c->mb.mbmode_cost[1], x->fc.ymode_prob, vp8_ymode_tree);
vp9_cost_tokens(c->mb.mbmode_cost[0],
x->kf_ymode_prob[c->common.kf_ymode_probs_index],
vp8_kf_ymode_tree);
vp8_cost_tokens(c->mb.intra_uv_mode_cost[1],
vp9_cost_tokens(c->mb.intra_uv_mode_cost[1],
x->fc.uv_mode_prob[VP8_YMODES - 1], vp8_uv_mode_tree);
vp8_cost_tokens(c->mb.intra_uv_mode_cost[0],
vp9_cost_tokens(c->mb.intra_uv_mode_cost[0],
x->kf_uv_mode_prob[VP8_YMODES - 1], vp8_uv_mode_tree);
vp8_cost_tokens(c->mb.i8x8_mode_costs,
vp9_cost_tokens(c->mb.i8x8_mode_costs,
x->fc.i8x8_mode_prob, vp8_i8x8_mode_tree);
{
int i;
for (i = 0; i <= VP8_SWITCHABLE_FILTERS; ++i)
vp8_cost_tokens((int *)c->mb.switchable_interp_costs[i],
vp9_cost_tokens((int *)c->mb.switchable_interp_costs[i],
x->fc.switchable_interp_prob[i],
vp8_switchable_interp_tree);
}

View File

@ -12,6 +12,6 @@
#ifndef __INC_MODECOSTS_H
#define __INC_MODECOSTS_H
void vp8_init_mode_costs(VP8_COMP *x);
void vp9_init_mode_costs(VP8_COMP *x);
#endif

View File

@ -60,11 +60,11 @@
#define RTCD(x) NULL
#endif
extern void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi);
extern void vp8cx_set_alt_lf_level(VP8_COMP *cpi, int filt_val);
extern void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi);
extern void vp9cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi);
extern void vp9cx_set_alt_lf_level(VP8_COMP *cpi, int filt_val);
extern void vp9cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi);
extern void vp8_cmachine_specific_config(VP8_COMP *cpi);
extern void vp9_cmachine_specific_config(VP8_COMP *cpi);
extern void vp8_deblock_frame(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *post, int filt_lvl, int low_var_thresh, int flag);
extern void print_parms(VP8_CONFIG *ocf, char *filenam);
extern unsigned int vp8_get_processor_freq();
@ -76,9 +76,9 @@ extern void vp8_yv12_copy_frame_func_neon(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFF
extern void vp8_yv12_copy_src_frame_func_neon(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc);
#endif
int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest);
int vp9_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest);
extern void vp8_temporal_filter_prepare_c(VP8_COMP *cpi, int distance);
extern void vp9_temporal_filter_prepare_c(VP8_COMP *cpi, int distance);
static void set_default_lf_deltas(VP8_COMP *cpi);
@ -101,12 +101,12 @@ extern const int vp8_gf_interval_table[101];
#if CONFIG_INTERNAL_STATS
#include "math.h"
extern double vp8_calc_ssim(YV12_BUFFER_CONFIG *source,
extern double vp9_calc_ssim(YV12_BUFFER_CONFIG *source,
YV12_BUFFER_CONFIG *dest, int lumamask,
double *weight);
extern double vp8_calc_ssimg(YV12_BUFFER_CONFIG *source,
extern double vp9_calc_ssimg(YV12_BUFFER_CONFIG *source,
YV12_BUFFER_CONFIG *dest, double *ssim_y,
double *ssim_u, double *ssim_v);
@ -166,7 +166,7 @@ extern unsigned int inter_uv_modes[VP8_UV_MODES];
extern unsigned int inter_b_modes[B_MODE_COUNT];
#endif
extern void vp8cx_init_quantizer(VP8_COMP *cpi);
extern void vp9cx_init_quantizer(VP8_COMP *cpi);
int vp8cx_base_skip_false_prob[QINDEX_RANGE][3];
@ -196,8 +196,8 @@ static int calculate_minq_index(double maxq,
minqtarget = maxq;
for (i = 0; i < QINDEX_RANGE; i++) {
thisq = vp8_convert_qindex_to_q(i);
if (minqtarget <= vp8_convert_qindex_to_q(i))
thisq = vp9_convert_qindex_to_q(i);
if (minqtarget <= vp9_convert_qindex_to_q(i))
return i;
}
return QINDEX_RANGE - 1;
@ -208,7 +208,7 @@ static void init_minq_luts(void) {
double maxq;
for (i = 0; i < QINDEX_RANGE; i++) {
maxq = vp8_convert_qindex_to_q(i);
maxq = vp9_convert_qindex_to_q(i);
kf_low_motion_minq[i] = calculate_minq_index(maxq,
@ -246,7 +246,7 @@ static void init_base_skip_probs(void) {
int skip_prob, t;
for (i = 0; i < QINDEX_RANGE; i++) {
q = vp8_convert_qindex_to_q(i);
q = vp9_convert_qindex_to_q(i);
// Exponential decay caluclation of baseline skip prob with clamping
// Based on crude best fit of old table.
@ -306,15 +306,15 @@ static void update_base_skip_probs(VP8_COMP *cpi) {
}
void vp8_initialize() {
void vp9_initialize() {
static int init_done = 0;
if (!init_done) {
vp8_scale_machine_specific_config();
vp8_initialize_common();
vp8_tokenize_initialize();
vp9_tokenize_initialize();
vp8_init_quant_tables();
vp8_init_me_luts();
vp9_init_me_luts();
init_minq_luts();
init_base_skip_probs();
init_done = 1;
@ -381,7 +381,7 @@ static void dealloc_compressor_data(VP8_COMP *cpi) {
#if VP8_TEMPORAL_ALT_REF
vp8_yv12_de_alloc_frame_buffer(&cpi->alt_ref_buffer);
#endif
vp8_lookahead_destroy(cpi->lookahead);
vp9_lookahead_destroy(cpi->lookahead);
vpx_free(cpi->tok);
cpi->tok = 0;
@ -420,14 +420,14 @@ static int compute_qdelta(VP8_COMP *cpi, double qstart, double qtarget) {
// Convert the average q value to an index.
for (i = cpi->best_quality; i < cpi->worst_quality; i++) {
start_index = i;
if (vp8_convert_qindex_to_q(i) >= qstart)
if (vp9_convert_qindex_to_q(i) >= qstart)
break;
}
// Convert the q target to an index
for (i = cpi->best_quality; i < cpi->worst_quality; i++) {
target_index = i;
if (vp8_convert_qindex_to_q(i) >= qtarget)
if (vp9_convert_qindex_to_q(i) >= qtarget)
break;
}
@ -450,7 +450,7 @@ static void init_seg_features(VP8_COMP *cpi) {
cpi->static_mb_pct = 0;
// Disable segmentation
vp8_disable_segmentation((VP8_PTR)cpi);
vp9_disable_segmentation((VP8_PTR)cpi);
// Clear down the segment features.
vp9_clearall_segfeatures(xd);
@ -465,12 +465,12 @@ static void init_seg_features(VP8_COMP *cpi) {
cpi->static_mb_pct = 0;
// Disable segmentation and individual segment features by default
vp8_disable_segmentation((VP8_PTR)cpi);
vp9_disable_segmentation((VP8_PTR)cpi);
vp9_clearall_segfeatures(xd);
// Scan frames from current to arf frame.
// This function re-enables segmentation if appropriate.
vp8_update_mbgraph_stats(cpi);
vp9_update_mbgraph_stats(cpi);
// If segmentation was enabled set those features needed for the
// arf itself.
@ -526,7 +526,7 @@ static void init_seg_features(VP8_COMP *cpi) {
// Disable segmentation and clear down features if alt ref
// is not active for this group
else {
vp8_disable_segmentation((VP8_PTR)cpi);
vp9_disable_segmentation((VP8_PTR)cpi);
vpx_memset(cpi->segmentation_map, 0,
(cm->mb_rows * cm->mb_cols));
@ -660,7 +660,7 @@ static void set_default_lf_deltas(VP8_COMP *cpi) {
cpi->mb.e_mbd.mode_lf_deltas[3] = 4; // Split mv
}
void vp8_set_speed_features(VP8_COMP *cpi) {
void vp9_set_speed_features(VP8_COMP *cpi) {
SPEED_FEATURES *sf = &cpi->sf;
int Mode = cpi->compressor_speed;
int Speed = cpi->Speed;
@ -1172,48 +1172,48 @@ void vp8_set_speed_features(VP8_COMP *cpi) {
}
if (cpi->sf.search_method == NSTEP) {
vp8_init3smotion_compensation(&cpi->mb, cm->yv12_fb[cm->lst_fb_idx].y_stride);
vp9_init3smotion_compensation(&cpi->mb, cm->yv12_fb[cm->lst_fb_idx].y_stride);
} else if (cpi->sf.search_method == DIAMOND) {
vp8_init_dsmotion_compensation(&cpi->mb, cm->yv12_fb[cm->lst_fb_idx].y_stride);
vp9_init_dsmotion_compensation(&cpi->mb, cm->yv12_fb[cm->lst_fb_idx].y_stride);
}
cpi->mb.vp8_short_fdct16x16 = vp8_short_fdct16x16;
cpi->mb.vp8_short_fdct8x8 = vp8_short_fdct8x8;
cpi->mb.vp8_short_fdct8x4 = vp8_short_fdct8x4;
cpi->mb.vp8_short_fdct4x4 = vp8_short_fdct4x4;
cpi->mb.short_walsh4x4 = vp8_short_walsh4x4;
cpi->mb.short_fhaar2x2 = vp8_short_fhaar2x2;
cpi->mb.vp9_short_fdct16x16 = vp9_short_fdct16x16;
cpi->mb.vp9_short_fdct8x8 = vp9_short_fdct8x8;
cpi->mb.vp9_short_fdct8x4 = vp9_short_fdct8x4;
cpi->mb.vp9_short_fdct4x4 = vp9_short_fdct4x4;
cpi->mb.short_walsh4x4 = vp9_short_walsh4x4;
cpi->mb.short_fhaar2x2 = vp9_short_fhaar2x2;
#if CONFIG_LOSSLESS
if (cpi->oxcf.lossless) {
cpi->mb.vp8_short_fdct8x4 = vp8_short_walsh8x4_x8;
cpi->mb.vp8_short_fdct4x4 = vp8_short_walsh4x4_x8;
cpi->mb.short_walsh4x4 = vp8_short_walsh4x4;
cpi->mb.short_fhaar2x2 = vp8_short_fhaar2x2;
cpi->mb.short_walsh4x4 = vp8_short_walsh4x4_lossless;
cpi->mb.vp9_short_fdct8x4 = vp9_short_walsh8x4_x8;
cpi->mb.vp9_short_fdct4x4 = vp9_short_walsh4x4_x8;
cpi->mb.short_walsh4x4 = vp9_short_walsh4x4;
cpi->mb.short_fhaar2x2 = vp9_short_fhaar2x2;
cpi->mb.short_walsh4x4 = vp9_short_walsh4x4_lossless;
}
#endif
cpi->mb.quantize_b_4x4 = vp8_regular_quantize_b_4x4;
cpi->mb.quantize_b_4x4_pair = vp8_regular_quantize_b_4x4_pair;
cpi->mb.quantize_b_8x8 = vp8_regular_quantize_b_8x8;
cpi->mb.quantize_b_16x16 = vp8_regular_quantize_b_16x16;
cpi->mb.quantize_b_2x2 = vp8_regular_quantize_b_2x2;
cpi->mb.quantize_b_4x4 = vp9_regular_quantize_b_4x4;
cpi->mb.quantize_b_4x4_pair = vp9_regular_quantize_b_4x4_pair;
cpi->mb.quantize_b_8x8 = vp9_regular_quantize_b_8x8;
cpi->mb.quantize_b_16x16 = vp9_regular_quantize_b_16x16;
cpi->mb.quantize_b_2x2 = vp9_regular_quantize_b_2x2;
vp8cx_init_quantizer(cpi);
vp9cx_init_quantizer(cpi);
#if CONFIG_RUNTIME_CPU_DETECT
cpi->mb.e_mbd.rtcd = &cpi->common.rtcd;
#endif
if (cpi->sf.iterative_sub_pixel == 1) {
cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step_iteratively;
cpi->find_fractional_mv_step = vp9_find_best_sub_pixel_step_iteratively;
} else if (cpi->sf.quarter_pixel_search) {
cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step;
cpi->find_fractional_mv_step = vp9_find_best_sub_pixel_step;
} else if (cpi->sf.half_pixel_search) {
cpi->find_fractional_mv_step = vp8_find_best_half_pixel_step;
cpi->find_fractional_mv_step = vp9_find_best_half_pixel_step;
}
if (cpi->sf.optimize_coefficients == 1 && cpi->pass != 1)
@ -1229,7 +1229,7 @@ static void alloc_raw_frame_buffers(VP8_COMP *cpi) {
int width = (cpi->oxcf.Width + 15) & ~15;
int height = (cpi->oxcf.Height + 15) & ~15;
cpi->lookahead = vp8_lookahead_init(cpi->oxcf.Width, cpi->oxcf.Height,
cpi->lookahead = vp9_lookahead_init(cpi->oxcf.Width, cpi->oxcf.Height,
cpi->oxcf.lag_in_frames);
if (!cpi->lookahead)
vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
@ -1259,7 +1259,7 @@ static int vp8_alloc_partition_data(VP8_COMP *cpi) {
return 0;
}
void vp8_alloc_compressor_data(VP8_COMP *cpi) {
void vp9_alloc_compressor_data(VP8_COMP *cpi) {
VP8_COMMON *cm = &cpi->common;
int width = cm->Width;
@ -1363,7 +1363,7 @@ static const int q_trans[] = {
224, 228, 232, 236, 240, 244, 249, 255,
};
int vp8_reverse_trans(int x) {
int vp9_reverse_trans(int x) {
int i;
for (i = 0; i < 64; i++)
@ -1372,7 +1372,7 @@ int vp8_reverse_trans(int x) {
return 63;
};
void vp8_new_frame_rate(VP8_COMP *cpi, double framerate) {
void vp9_new_frame_rate(VP8_COMP *cpi, double framerate) {
if (framerate < .1)
framerate = 30;
@ -1430,7 +1430,7 @@ static void init_config(VP8_PTR ptr, VP8_CONFIG *oxcf) {
vp8_setup_version(cm);
// change includes all joint functionality
vp8_change_config(ptr, oxcf);
vp9_change_config(ptr, oxcf);
// Initialize active best and worst q and average q values.
cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
@ -1464,7 +1464,7 @@ static void init_config(VP8_PTR ptr, VP8_CONFIG *oxcf) {
}
void vp8_change_config(VP8_PTR ptr, VP8_CONFIG *oxcf) {
void vp9_change_config(VP8_PTR ptr, VP8_CONFIG *oxcf) {
VP8_COMP *cpi = (VP8_COMP *)(ptr);
VP8_COMMON *cm = &cpi->common;
@ -1576,7 +1576,7 @@ void vp8_change_config(VP8_PTR ptr, VP8_CONFIG *oxcf) {
cpi->oxcf.target_bandwidth, 1000);
// Set up frame rate and related parameters rate control values.
vp8_new_frame_rate(cpi, cpi->oxcf.frame_rate);
vp9_new_frame_rate(cpi, cpi->oxcf.frame_rate);
// Set absolute upper and lower quality limits
cpi->worst_quality = cpi->oxcf.worst_allowed_q;
@ -1639,7 +1639,7 @@ void vp8_change_config(VP8_PTR ptr, VP8_CONFIG *oxcf) {
cm->yv12_fb[cm->lst_fb_idx].y_height ||
cm->yv12_fb[cm->lst_fb_idx].y_width == 0) {
alloc_raw_frame_buffers(cpi);
vp8_alloc_compressor_data(cpi);
vp9_alloc_compressor_data(cpi);
}
if (cpi->oxcf.fixed_q >= 0) {
@ -1710,7 +1710,7 @@ static void cal_nmvsadcosts_hp(int *mvsadcost[2]) {
} while (++i <= MV_MAX);
}
VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) {
VP8_PTR vp9_create_compressor(VP8_CONFIG *oxcf) {
int i;
volatile union {
VP8_COMP *cpi;
@ -1733,7 +1733,7 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) {
VP8_PTR ptr = ctx.ptr;
ctx.cpi->common.error.setjmp = 0;
vp8_remove_compressor(&ptr);
vp9_remove_compressor(&ptr);
return 0;
}
@ -1742,7 +1742,7 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) {
CHECK_MEM_ERROR(cpi->mb.ss, vpx_calloc(sizeof(search_site), (MAX_MVSEARCH_STEPS * 8) + 1));
vp8_create_common(&cpi->common);
vp8_cmachine_specific_config(cpi);
vp9_cmachine_specific_config(cpi);
init_config((VP8_PTR)cpi, oxcf);
@ -1883,7 +1883,7 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) {
cpi->ni_tot_qi = 0;
cpi->ni_frames = 0;
cpi->tot_q = 0.0;
cpi->avg_q = vp8_convert_qindex_to_q(cpi->oxcf.worst_allowed_q);
cpi->avg_q = vp9_convert_qindex_to_q(cpi->oxcf.worst_allowed_q);
cpi->total_byte_count = 0;
cpi->rate_correction_factor = 1.0;
@ -1923,7 +1923,7 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) {
cpi->output_pkt_list = oxcf->output_pkt_list;
if (cpi->pass == 1) {
vp8_init_first_pass(cpi);
vp9_init_first_pass(cpi);
} else if (cpi->pass == 2) {
size_t packet_sz = sizeof(FIRSTPASS_STATS);
int packets = oxcf->two_pass_stats_in.sz / packet_sz;
@ -1932,10 +1932,10 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) {
cpi->twopass.stats_in = cpi->twopass.stats_in_start;
cpi->twopass.stats_in_end = (void *)((char *)cpi->twopass.stats_in
+ (packets - 1) * packet_sz);
vp8_init_second_pass(cpi);
vp9_init_second_pass(cpi);
}
vp8_set_speed_features(cpi);
vp9_set_speed_features(cpi);
// Set starting values of RD threshold multipliers (128 = *1)
for (i = 0; i < MAX_MODES; i++) {
@ -1959,35 +1959,35 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) {
#if CONFIG_SUPERBLOCKS
BFP(BLOCK_32X32, vp8_sad32x32, vp8_variance32x32, vp8_sub_pixel_variance32x32,
vp8_variance_halfpixvar32x32_h, vp8_variance_halfpixvar32x32_v,
vp8_variance_halfpixvar32x32_hv, vp8_sad32x32x3, vp8_sad32x32x8,
vp8_sad32x32x4d)
BFP(BLOCK_32X32, vp9_sad32x32, vp9_variance32x32, vp9_sub_pixel_variance32x32,
vp9_variance_halfpixvar32x32_h, vp9_variance_halfpixvar32x32_v,
vp9_variance_halfpixvar32x32_hv, vp9_sad32x32x3, vp9_sad32x32x8,
vp9_sad32x32x4d)
#endif
BFP(BLOCK_16X16, vp8_sad16x16, vp8_variance16x16, vp8_sub_pixel_variance16x16,
vp8_variance_halfpixvar16x16_h, vp8_variance_halfpixvar16x16_v,
vp8_variance_halfpixvar16x16_hv, vp8_sad16x16x3, vp8_sad16x16x8,
vp8_sad16x16x4d)
BFP(BLOCK_16X16, vp9_sad16x16, vp9_variance16x16, vp9_sub_pixel_variance16x16,
vp9_variance_halfpixvar16x16_h, vp9_variance_halfpixvar16x16_v,
vp9_variance_halfpixvar16x16_hv, vp9_sad16x16x3, vp9_sad16x16x8,
vp9_sad16x16x4d)
BFP(BLOCK_16X8, vp8_sad16x8, vp8_variance16x8, vp8_sub_pixel_variance16x8,
NULL, NULL, NULL, vp8_sad16x8x3, vp8_sad16x8x8, vp8_sad16x8x4d)
BFP(BLOCK_16X8, vp9_sad16x8, vp9_variance16x8, vp9_sub_pixel_variance16x8,
NULL, NULL, NULL, vp9_sad16x8x3, vp9_sad16x8x8, vp9_sad16x8x4d)
BFP(BLOCK_8X16, vp8_sad8x16, vp8_variance8x16, vp8_sub_pixel_variance8x16,
NULL, NULL, NULL, vp8_sad8x16x3, vp8_sad8x16x8, vp8_sad8x16x4d)
BFP(BLOCK_8X16, vp9_sad8x16, vp9_variance8x16, vp9_sub_pixel_variance8x16,
NULL, NULL, NULL, vp9_sad8x16x3, vp9_sad8x16x8, vp9_sad8x16x4d)
BFP(BLOCK_8X8, vp8_sad8x8, vp8_variance8x8, vp8_sub_pixel_variance8x8,
NULL, NULL, NULL, vp8_sad8x8x3, vp8_sad8x8x8, vp8_sad8x8x4d)
BFP(BLOCK_8X8, vp9_sad8x8, vp9_variance8x8, vp9_sub_pixel_variance8x8,
NULL, NULL, NULL, vp9_sad8x8x3, vp9_sad8x8x8, vp9_sad8x8x4d)
BFP(BLOCK_4X4, vp8_sad4x4, vp8_variance4x4, vp8_sub_pixel_variance4x4,
NULL, NULL, NULL, vp8_sad4x4x3, vp8_sad4x4x8, vp8_sad4x4x4d)
BFP(BLOCK_4X4, vp9_sad4x4, vp9_variance4x4, vp9_sub_pixel_variance4x4,
NULL, NULL, NULL, vp9_sad4x4x3, vp9_sad4x4x8, vp9_sad4x4x4d)
#if ARCH_X86 || ARCH_X86_64
cpi->fn_ptr[BLOCK_16X16].copymem = vp8_copy32xn;
cpi->fn_ptr[BLOCK_16X8].copymem = vp8_copy32xn;
cpi->fn_ptr[BLOCK_8X16].copymem = vp8_copy32xn;
cpi->fn_ptr[BLOCK_8X8].copymem = vp8_copy32xn;
cpi->fn_ptr[BLOCK_4X4].copymem = vp8_copy32xn;
cpi->fn_ptr[BLOCK_16X16].copymem = vp9_copy32xn;
cpi->fn_ptr[BLOCK_16X8].copymem = vp9_copy32xn;
cpi->fn_ptr[BLOCK_8X16].copymem = vp9_copy32xn;
cpi->fn_ptr[BLOCK_8X8].copymem = vp9_copy32xn;
cpi->fn_ptr[BLOCK_4X4].copymem = vp9_copy32xn;
#endif
cpi->full_search_sad = SEARCH_INVOKE(&cpi->rtcd.search, full_search);
@ -1997,9 +1997,9 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) {
// make sure frame 1 is okay
cpi->error_bins[0] = cpi->common.MBs;
// vp8cx_init_quantizer() is first called here. Add check in vp8cx_frame_init_quantizer() so that vp8cx_init_quantizer is only called later
// when needed. This will avoid unnecessary calls of vp8cx_init_quantizer() for every frame.
vp8cx_init_quantizer(cpi);
// vp9cx_init_quantizer() is first called here. Add check in vp9cx_frame_init_quantizer() so that vp9cx_init_quantizer is only called later
// when needed. This will avoid unnecessary calls of vp9cx_init_quantizer() for every frame.
vp9cx_init_quantizer(cpi);
vp8_loop_filter_init(cm);
@ -2010,7 +2010,7 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) {
return (VP8_PTR) cpi;
}
void vp8_remove_compressor(VP8_PTR *ptr) {
void vp9_remove_compressor(VP8_PTR *ptr) {
VP8_COMP *cpi = (VP8_COMP *)(*ptr);
int i;
@ -2019,7 +2019,7 @@ void vp8_remove_compressor(VP8_PTR *ptr) {
if (cpi && (cpi->common.current_video_frame > 0)) {
if (cpi->pass == 2) {
vp8_end_second_pass(cpi);
vp9_end_second_pass(cpi);
}
#ifdef ENTROPY_STATS
@ -2051,8 +2051,8 @@ void vp8_remove_compressor(VP8_PTR *ptr) {
if (cpi->b_calculate_psnr) {
YV12_BUFFER_CONFIG *lst_yv12 = &cpi->common.yv12_fb[cpi->common.lst_fb_idx];
double samples = 3.0 / 2 * cpi->count * lst_yv12->y_width * lst_yv12->y_height;
double total_psnr = vp8_mse2psnr(samples, 255.0, cpi->total_sq_error);
double total_psnr2 = vp8_mse2psnr(samples, 255.0, cpi->total_sq_error2);
double total_psnr = vp9_mse2psnr(samples, 255.0, cpi->total_sq_error);
double total_psnr2 = vp9_mse2psnr(samples, 255.0, cpi->total_sq_error2);
double total_ssim = 100 * pow(cpi->summed_quality / cpi->summed_weights, 8.0);
fprintf(f, "Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\tGLPsnrP\tVPXSSIM\t Time(ms)\n");
@ -2270,7 +2270,7 @@ static uint64_t calc_plane_error(unsigned char *orig, int orig_stride,
for (col = 0; col + 16 <= cols; col += 16) {
unsigned int sse;
vp8_mse16x16(orig + col, orig_stride, recon + col, recon_stride, &sse);
vp9_mse16x16(orig + col, orig_stride, recon + col, recon_stride, &sse);
total_sse += sse;
}
@ -2348,14 +2348,14 @@ static void generate_psnr_packet(VP8_COMP *cpi) {
pkt.data.psnr.samples[3] = width * height;
for (i = 0; i < 4; i++)
pkt.data.psnr.psnr[i] = vp8_mse2psnr(pkt.data.psnr.samples[i], 255.0,
pkt.data.psnr.psnr[i] = vp9_mse2psnr(pkt.data.psnr.samples[i], 255.0,
pkt.data.psnr.sse[i]);
vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt);
}
int vp8_use_as_reference(VP8_PTR ptr, int ref_frame_flags) {
int vp9_use_as_reference(VP8_PTR ptr, int ref_frame_flags) {
VP8_COMP *cpi = (VP8_COMP *)(ptr);
if (ref_frame_flags > 7)
@ -2364,7 +2364,7 @@ int vp8_use_as_reference(VP8_PTR ptr, int ref_frame_flags) {
cpi->ref_frame_flags = ref_frame_flags;
return 0;
}
int vp8_update_reference(VP8_PTR ptr, int ref_frame_flags) {
int vp9_update_reference(VP8_PTR ptr, int ref_frame_flags) {
VP8_COMP *cpi = (VP8_COMP *)(ptr);
if (ref_frame_flags > 7)
@ -2386,7 +2386,7 @@ int vp8_update_reference(VP8_PTR ptr, int ref_frame_flags) {
return 0;
}
int vp8_get_reference(VP8_PTR ptr, VP8_REFFRAME ref_frame_flag, YV12_BUFFER_CONFIG *sd) {
int vp9_get_reference(VP8_PTR ptr, VP8_REFFRAME ref_frame_flag, YV12_BUFFER_CONFIG *sd) {
VP8_COMP *cpi = (VP8_COMP *)(ptr);
VP8_COMMON *cm = &cpi->common;
int ref_fb_idx;
@ -2404,7 +2404,7 @@ int vp8_get_reference(VP8_PTR ptr, VP8_REFFRAME ref_frame_flag, YV12_BUFFER_CONF
return 0;
}
int vp8_set_reference(VP8_PTR ptr, VP8_REFFRAME ref_frame_flag, YV12_BUFFER_CONFIG *sd) {
int vp9_set_reference(VP8_PTR ptr, VP8_REFFRAME ref_frame_flag, YV12_BUFFER_CONFIG *sd) {
VP8_COMP *cpi = (VP8_COMP *)(ptr);
VP8_COMMON *cm = &cpi->common;
@ -2423,7 +2423,7 @@ int vp8_set_reference(VP8_PTR ptr, VP8_REFFRAME ref_frame_flag, YV12_BUFFER_CONF
return 0;
}
int vp8_update_entropy(VP8_PTR comp, int update) {
int vp9_update_entropy(VP8_PTR comp, int update) {
VP8_COMP *cpi = (VP8_COMP *) comp;
VP8_COMMON *cm = &cpi->common;
cm->refresh_entropy_probs = update;
@ -2574,7 +2574,7 @@ static int find_fp_qindex() {
int i;
for (i = 0; i < QINDEX_RANGE; i++) {
if (vp8_convert_qindex_to_q(i) >= 30.0) {
if (vp9_convert_qindex_to_q(i) >= 30.0) {
break;
}
}
@ -2591,8 +2591,8 @@ static void Pass1Encode(VP8_COMP *cpi, unsigned long *size, unsigned char *dest,
(void) frame_flags;
vp8_set_quantizer(cpi, find_fp_qindex());
vp8_first_pass(cpi);
vp9_set_quantizer(cpi, find_fp_qindex());
vp9_first_pass(cpi);
}
#define WRITE_RECON_BUFFER 0
@ -2786,17 +2786,17 @@ static void loopfilter_frame(VP8_COMP *cpi, VP8_COMMON *cm) {
vpx_usec_timer_start(&timer);
if (cpi->sf.auto_filter == 0)
vp8cx_pick_filter_level_fast(cpi->Source, cpi);
vp9cx_pick_filter_level_fast(cpi->Source, cpi);
else
vp8cx_pick_filter_level(cpi->Source, cpi);
vp9cx_pick_filter_level(cpi->Source, cpi);
vpx_usec_timer_mark(&timer);
cpi->time_pick_lpf += vpx_usec_timer_elapsed(&timer);
}
if (cm->filter_level > 0) {
vp8cx_set_alt_lf_level(cpi, cm->filter_level);
vp9cx_set_alt_lf_level(cpi, cm->filter_level);
vp8_loop_filter_frame(cm, &cpi->mb.e_mbd);
}
@ -2981,7 +2981,7 @@ static void encode_frame_to_data_rate
init_seg_features(cpi);
// Decide how big to make the frame
vp8_pick_frame_size(cpi);
vp9_pick_frame_size(cpi);
vp8_clear_system_state();
@ -3098,7 +3098,7 @@ static void encode_frame_to_data_rate
Q = cpi->last_boosted_qindex;
} else {
// Determine initial Q to try
Q = vp8_regulate_q(cpi, cpi->this_frame_target);
Q = vp9_regulate_q(cpi, cpi->this_frame_target);
}
last_zbin_oq = cpi->zbin_over_quant;
@ -3110,7 +3110,7 @@ static void encode_frame_to_data_rate
else
zbin_oq_high = ZBIN_OQ_MAX;
vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit, &frame_over_shoot_limit);
vp9_compute_frame_size_bounds(cpi, &frame_under_shoot_limit, &frame_over_shoot_limit);
// Limit Q range for the adaptive loop.
bottom_index = cpi->active_best_quality;
@ -3195,7 +3195,7 @@ static void encode_frame_to_data_rate
do {
vp8_clear_system_state(); // __asm emms;
vp8_set_quantizer(cpi, Q);
vp9_set_quantizer(cpi, Q);
this_q = Q;
if (loop_count == 0) {
@ -3245,14 +3245,14 @@ static void encode_frame_to_data_rate
// Set up entropy depending on frame type.
if (cm->frame_type == KEY_FRAME)
vp8_setup_key_frame(cpi);
vp9_setup_key_frame(cpi);
else
vp8_setup_inter_frame(cpi);
vp9_setup_inter_frame(cpi);
}
// transform / motion compensation build reconstruction frame
vp8_encode_frame(cpi);
vp9_encode_frame(cpi);
// Update the skip mb flag probabilities based on the distribution
// seen in the last encoder iteration.
@ -3270,11 +3270,11 @@ static void encode_frame_to_data_rate
// Dummy pack of the bitstream using up to date stats to get an
// accurate estimate of output frame size to determine if we need
// to recode.
vp8_save_coding_context(cpi);
vp9_save_coding_context(cpi);
cpi->dummy_packing = 1;
vp8_pack_bitstream(cpi, dest, size);
vp9_pack_bitstream(cpi, dest, size);
cpi->projected_frame_size = (*size) << 3;
vp8_restore_coding_context(cpi);
vp9_restore_coding_context(cpi);
if (frame_over_shoot_limit == 0)
frame_over_shoot_limit = 1;
@ -3283,7 +3283,7 @@ static void encode_frame_to_data_rate
// Special case handling for forced key frames
if ((cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced) {
int last_q = Q;
int kf_err = vp8_calc_ss_err(cpi->Source,
int kf_err = vp9_calc_ss_err(cpi->Source,
&cm->yv12_fb[cm->new_fb_idx]);
int high_err_target = cpi->ambient_err;
@ -3347,7 +3347,7 @@ static void encode_frame_to_data_rate
if (undershoot_seen || (loop_count > 1)) {
// Update rate_correction_factor unless cpi->active_worst_quality has changed.
if (!active_worst_qchanged)
vp8_update_rate_correction_factors(cpi, 1);
vp9_update_rate_correction_factors(cpi, 1);
Q = (q_high + q_low + 1) / 2;
@ -3361,13 +3361,13 @@ static void encode_frame_to_data_rate
} else {
// Update rate_correction_factor unless cpi->active_worst_quality has changed.
if (!active_worst_qchanged)
vp8_update_rate_correction_factors(cpi, 0);
vp9_update_rate_correction_factors(cpi, 0);
Q = vp8_regulate_q(cpi, cpi->this_frame_target);
Q = vp9_regulate_q(cpi, cpi->this_frame_target);
while (((Q < q_low) || (cpi->zbin_over_quant < zbin_oq_low)) && (Retries < 10)) {
vp8_update_rate_correction_factors(cpi, 0);
Q = vp8_regulate_q(cpi, cpi->this_frame_target);
vp9_update_rate_correction_factors(cpi, 0);
Q = vp9_regulate_q(cpi, cpi->this_frame_target);
Retries++;
}
}
@ -3384,7 +3384,7 @@ static void encode_frame_to_data_rate
if (overshoot_seen || (loop_count > 1)) {
// Update rate_correction_factor unless cpi->active_worst_quality has changed.
if (!active_worst_qchanged)
vp8_update_rate_correction_factors(cpi, 1);
vp9_update_rate_correction_factors(cpi, 1);
Q = (q_high + q_low) / 2;
@ -3396,9 +3396,9 @@ static void encode_frame_to_data_rate
} else {
// Update rate_correction_factor unless cpi->active_worst_quality has changed.
if (!active_worst_qchanged)
vp8_update_rate_correction_factors(cpi, 0);
vp9_update_rate_correction_factors(cpi, 0);
Q = vp8_regulate_q(cpi, cpi->this_frame_target);
Q = vp9_regulate_q(cpi, cpi->this_frame_target);
// Special case reset for qlow for constrained quality.
// This should only trigger where there is very substantial
@ -3410,8 +3410,8 @@ static void encode_frame_to_data_rate
}
while (((Q > q_high) || (cpi->zbin_over_quant > zbin_oq_high)) && (Retries < 10)) {
vp8_update_rate_correction_factors(cpi, 0);
Q = vp8_regulate_q(cpi, cpi->this_frame_target);
vp9_update_rate_correction_factors(cpi, 0);
Q = vp9_regulate_q(cpi, cpi->this_frame_target);
Retries++;
}
}
@ -3472,7 +3472,7 @@ static void encode_frame_to_data_rate
if (Loop == FALSE && cm->frame_type != KEY_FRAME && sf->search_best_filter) {
if (mcomp_filter_index < mcomp_filters) {
INT64 err = vp8_calc_ss_err(cpi->Source,
INT64 err = vp9_calc_ss_err(cpi->Source,
&cm->yv12_fb[cm->new_fb_idx]);
INT64 rate = cpi->projected_frame_size << 8;
mcomp_filter_cost[mcomp_filter_index] =
@ -3534,7 +3534,7 @@ static void encode_frame_to_data_rate
// fixed interval. Note the reconstruction error if it is the frame before
// the force key frame
if (cpi->next_key_frame_forced && (cpi->twopass.frames_to_key == 0)) {
cpi->ambient_err = vp8_calc_ss_err(cpi->Source,
cpi->ambient_err = vp9_calc_ss_err(cpi->Source,
&cm->yv12_fb[cm->new_fb_idx]);
}
@ -3564,7 +3564,7 @@ static void encode_frame_to_data_rate
// Update the GF useage maps.
// This is done after completing the compression of a frame when all modes
// etc. are finalized but before loop filter
vp8_update_gf_useage_maps(cpi, cm, &cpi->mb);
vp9_update_gf_useage_maps(cpi, cm, &cpi->mb);
if (cm->frame_type == KEY_FRAME)
cm->refresh_last_frame = 1;
@ -3593,7 +3593,7 @@ static void encode_frame_to_data_rate
// build the bitstream
cpi->dummy_packing = 0;
vp8_pack_bitstream(cpi, dest, size);
vp9_pack_bitstream(cpi, dest, size);
if (cpi->mb.e_mbd.update_mb_segmentation_map) {
update_reference_segmentation_map(cpi);
@ -3641,7 +3641,7 @@ static void encode_frame_to_data_rate
cpi->projected_frame_size = (*size) << 3;
if (!active_worst_qchanged)
vp8_update_rate_correction_factors(cpi, 2);
vp9_update_rate_correction_factors(cpi, 2);
cpi->last_q[cm->frame_type] = cm->base_qindex;
@ -3659,7 +3659,7 @@ static void encode_frame_to_data_rate
}
if (cm->frame_type == KEY_FRAME) {
vp8_adjust_key_frame_context(cpi);
vp9_adjust_key_frame_context(cpi);
}
// Keep a record of ambient average Q.
@ -3669,7 +3669,7 @@ static void encode_frame_to_data_rate
// Keep a record from which we can calculate the average Q excluding GF updates and key frames
if ((cm->frame_type != KEY_FRAME) && !cm->refresh_golden_frame && !cm->refresh_alt_ref_frame) {
cpi->ni_frames++;
cpi->tot_q += vp8_convert_qindex_to_q(Q);
cpi->tot_q += vp9_convert_qindex_to_q(Q);
cpi->avg_q = cpi->tot_q / (double)cpi->ni_frames;
// Calculate the average Q for normal inter frames (not key or GFU
@ -3740,7 +3740,7 @@ static void encode_frame_to_data_rate
vp8_clear_system_state(); // __asm emms;
recon_err = vp8_calc_ss_err(cpi->Source,
recon_err = vp9_calc_ss_err(cpi->Source,
&cm->yv12_fb[cm->new_fb_idx]);
if (cpi->twopass.total_left_stats->coded_error != 0.0)
@ -3754,13 +3754,13 @@ static void encode_frame_to_data_rate
(int)cpi->total_target_vs_actual,
(cpi->oxcf.starting_buffer_level - cpi->bits_off_target),
(int)cpi->total_actual_bits,
vp8_convert_qindex_to_q(cm->base_qindex),
vp9_convert_qindex_to_q(cm->base_qindex),
(double)vp8_dc_quant(cm->base_qindex, 0) / 4.0,
vp8_convert_qindex_to_q(cpi->active_best_quality),
vp8_convert_qindex_to_q(cpi->active_worst_quality),
vp9_convert_qindex_to_q(cpi->active_best_quality),
vp9_convert_qindex_to_q(cpi->active_worst_quality),
cpi->avg_q,
vp8_convert_qindex_to_q(cpi->ni_av_qi),
vp8_convert_qindex_to_q(cpi->cq_target_quality),
vp9_convert_qindex_to_q(cpi->ni_av_qi),
vp9_convert_qindex_to_q(cpi->cq_target_quality),
cpi->zbin_over_quant,
// cpi->avg_frame_qindex, cpi->zbin_over_quant,
cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
@ -3784,13 +3784,13 @@ static void encode_frame_to_data_rate
(int)cpi->total_target_vs_actual,
(cpi->oxcf.starting_buffer_level - cpi->bits_off_target),
(int)cpi->total_actual_bits,
vp8_convert_qindex_to_q(cm->base_qindex),
vp9_convert_qindex_to_q(cm->base_qindex),
(double)vp8_dc_quant(cm->base_qindex, 0) / 4.0,
vp8_convert_qindex_to_q(cpi->active_best_quality),
vp8_convert_qindex_to_q(cpi->active_worst_quality),
vp9_convert_qindex_to_q(cpi->active_best_quality),
vp9_convert_qindex_to_q(cpi->active_worst_quality),
cpi->avg_q,
vp8_convert_qindex_to_q(cpi->ni_av_qi),
vp8_convert_qindex_to_q(cpi->cq_target_quality),
vp9_convert_qindex_to_q(cpi->ni_av_qi),
vp9_convert_qindex_to_q(cpi->cq_target_quality),
cpi->zbin_over_quant,
// cpi->avg_frame_qindex, cpi->zbin_over_quant,
cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
@ -3929,7 +3929,7 @@ static void encode_frame_to_data_rate
static void Pass2Encode(VP8_COMP *cpi, unsigned long *size, unsigned char *dest, unsigned int *frame_flags) {
if (!cpi->common.refresh_alt_ref_frame)
vp8_second_pass(cpi);
vp9_second_pass(cpi);
encode_frame_to_data_rate(cpi, size, dest, frame_flags);
cpi->twopass.bits_left -= 8 * *size;
@ -3953,7 +3953,7 @@ extern void vp8_pop_neon(int64_t *store);
#endif
int vp8_receive_raw_frame(VP8_PTR ptr, unsigned int frame_flags, YV12_BUFFER_CONFIG *sd, int64_t time_stamp, int64_t end_time) {
int vp9_receive_raw_frame(VP8_PTR ptr, unsigned int frame_flags, YV12_BUFFER_CONFIG *sd, int64_t time_stamp, int64_t end_time) {
#if HAVE_ARMV7
int64_t store_reg[8];
#endif
@ -3972,7 +3972,7 @@ int vp8_receive_raw_frame(VP8_PTR ptr, unsigned int frame_flags, YV12_BUFFER_CON
#endif
vpx_usec_timer_start(&timer);
if (vp8_lookahead_push(cpi->lookahead, sd, time_stamp, end_time,
if (vp9_lookahead_push(cpi->lookahead, sd, time_stamp, end_time,
frame_flags, cpi->active_map_enabled ? cpi->active_map : NULL))
res = -1;
cm->clr_type = sd->clrtype;
@ -4005,7 +4005,7 @@ static int frame_is_reference(const VP8_COMP *cpi) {
}
int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned long *size, unsigned char *dest, int64_t *time_stamp, int64_t *time_end, int flush) {
int vp9_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned long *size, unsigned char *dest, int64_t *time_stamp, int64_t *time_end, int flush) {
#if HAVE_ARMV7
int64_t store_reg[8];
#endif
@ -4034,11 +4034,11 @@ int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned lon
// Should we code an alternate reference frame
if (cpi->oxcf.play_alternate &&
cpi->source_alt_ref_pending) {
if ((cpi->source = vp8_lookahead_peek(cpi->lookahead,
if ((cpi->source = vp9_lookahead_peek(cpi->lookahead,
cpi->frames_till_gf_update_due))) {
cpi->alt_ref_source = cpi->source;
if (cpi->oxcf.arnr_max_frames > 0) {
vp8_temporal_filter_prepare_c(cpi,
vp9_temporal_filter_prepare_c(cpi,
cpi->frames_till_gf_update_due);
force_src_buffer = &cpi->alt_ref_buffer;
}
@ -4053,7 +4053,7 @@ int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned lon
}
if (!cpi->source) {
if ((cpi->source = vp8_lookahead_pop(cpi->lookahead, flush))) {
if ((cpi->source = vp9_lookahead_pop(cpi->lookahead, flush))) {
cm->show_frame = 1;
cpi->is_src_frame_alt_ref = cpi->alt_ref_source
@ -4073,7 +4073,7 @@ int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned lon
} else {
*size = 0;
if (flush && cpi->pass == 1 && !cpi->twopass.first_pass_done) {
vp8_end_first_pass(cpi); /* get last stats packet */
vp9_end_first_pass(cpi); /* get last stats packet */
cpi->twopass.first_pass_done = 1;
}
@ -4114,7 +4114,7 @@ int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned lon
if (this_duration) {
if (step)
vp8_new_frame_rate(cpi, 10000000.0 / this_duration);
vp9_new_frame_rate(cpi, 10000000.0 / this_duration);
else {
double avg_duration, interval;
@ -4130,7 +4130,7 @@ int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned lon
avg_duration *= (interval - avg_duration + this_duration);
avg_duration /= interval;
vp8_new_frame_rate(cpi, 10000000.0 / avg_duration);
vp9_new_frame_rate(cpi, 10000000.0 / avg_duration);
}
}
@ -4240,11 +4240,11 @@ int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned lon
sq_error = ye + ue + ve;
frame_psnr = vp8_mse2psnr(t_samples, 255.0, sq_error);
frame_psnr = vp9_mse2psnr(t_samples, 255.0, sq_error);
cpi->total_y += vp8_mse2psnr(y_samples, 255.0, ye);
cpi->total_u += vp8_mse2psnr(uv_samples, 255.0, ue);
cpi->total_v += vp8_mse2psnr(uv_samples, 255.0, ve);
cpi->total_y += vp9_mse2psnr(y_samples, 255.0, ye);
cpi->total_u += vp9_mse2psnr(uv_samples, 255.0, ue);
cpi->total_v += vp9_mse2psnr(uv_samples, 255.0, ve);
cpi->total_sq_error += sq_error;
cpi->total += frame_psnr;
{
@ -4269,15 +4269,15 @@ int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned lon
sq_error = ye + ue + ve;
frame_psnr2 = vp8_mse2psnr(t_samples, 255.0, sq_error);
frame_psnr2 = vp9_mse2psnr(t_samples, 255.0, sq_error);
cpi->totalp_y += vp8_mse2psnr(y_samples, 255.0, ye);
cpi->totalp_u += vp8_mse2psnr(uv_samples, 255.0, ue);
cpi->totalp_v += vp8_mse2psnr(uv_samples, 255.0, ve);
cpi->totalp_y += vp9_mse2psnr(y_samples, 255.0, ye);
cpi->totalp_u += vp9_mse2psnr(uv_samples, 255.0, ue);
cpi->totalp_v += vp9_mse2psnr(uv_samples, 255.0, ve);
cpi->total_sq_error2 += sq_error;
cpi->totalp += frame_psnr2;
frame_ssim2 = vp8_calc_ssim(cpi->Source,
frame_ssim2 = vp9_calc_ssim(cpi->Source,
&cm->post_proc_buffer, 1, &weight);
cpi->summed_quality += frame_ssim2 * weight;
@ -4296,7 +4296,7 @@ int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned lon
if (cpi->b_calculate_ssimg) {
double y, u, v, frame_all;
frame_all = vp8_calc_ssimg(cpi->Source, cm->frame_to_show,
frame_all = vp9_calc_ssimg(cpi->Source, cm->frame_to_show,
&y, &u, &v);
cpi->total_ssimg_y += y;
cpi->total_ssimg_u += u;
@ -4321,7 +4321,7 @@ int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned lon
return 0;
}
int vp8_get_preview_raw_frame(VP8_PTR comp, YV12_BUFFER_CONFIG *dest, vp8_ppflags_t *flags) {
int vp9_get_preview_raw_frame(VP8_PTR comp, YV12_BUFFER_CONFIG *dest, vp8_ppflags_t *flags) {
VP8_COMP *cpi = (VP8_COMP *) comp;
if (cpi->common.refresh_alt_ref_frame)
@ -4348,7 +4348,7 @@ int vp8_get_preview_raw_frame(VP8_PTR comp, YV12_BUFFER_CONFIG *dest, vp8_ppflag
}
}
int vp8_set_roimap(VP8_PTR comp, unsigned char *map, unsigned int rows, unsigned int cols, int delta_q[4], int delta_lf[4], unsigned int threshold[4]) {
int vp9_set_roimap(VP8_PTR comp, unsigned char *map, unsigned int rows, unsigned int cols, int delta_q[4], int delta_lf[4], unsigned int threshold[4]) {
VP8_COMP *cpi = (VP8_COMP *) comp;
signed char feature_data[SEG_LVL_MAX][MAX_MB_SEGMENTS];
MACROBLOCKD *xd = &cpi->mb.e_mbd;
@ -4358,15 +4358,15 @@ int vp8_set_roimap(VP8_PTR comp, unsigned char *map, unsigned int rows, unsigned
return -1;
if (!map) {
vp8_disable_segmentation((VP8_PTR)cpi);
vp9_disable_segmentation((VP8_PTR)cpi);
return 0;
}
// Set the segmentation Map
vp8_set_segmentation_map((VP8_PTR)cpi, map);
vp9_set_segmentation_map((VP8_PTR)cpi, map);
// Activate segmentation.
vp8_enable_segmentation((VP8_PTR)cpi);
vp9_enable_segmentation((VP8_PTR)cpi);
// Set up the quant segment data
feature_data[SEG_LVL_ALT_Q][0] = delta_q[0];
@ -4400,12 +4400,12 @@ int vp8_set_roimap(VP8_PTR comp, unsigned char *map, unsigned int rows, unsigned
// Initialise the feature data structure
// SEGMENT_DELTADATA 0, SEGMENT_ABSDATA 1
vp8_set_segment_data((VP8_PTR)cpi, &feature_data[0][0], SEGMENT_DELTADATA);
vp9_set_segment_data((VP8_PTR)cpi, &feature_data[0][0], SEGMENT_DELTADATA);
return 0;
}
int vp8_set_active_map(VP8_PTR comp, unsigned char *map, unsigned int rows, unsigned int cols) {
int vp9_set_active_map(VP8_PTR comp, unsigned char *map, unsigned int rows, unsigned int cols) {
VP8_COMP *cpi = (VP8_COMP *) comp;
if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols) {
@ -4422,7 +4422,7 @@ int vp8_set_active_map(VP8_PTR comp, unsigned char *map, unsigned int rows, unsi
}
}
int vp8_set_internal_size(VP8_PTR comp, VPX_SCALING horiz_mode, VPX_SCALING vert_mode) {
int vp9_set_internal_size(VP8_PTR comp, VPX_SCALING horiz_mode, VPX_SCALING vert_mode) {
VP8_COMP *cpi = (VP8_COMP *) comp;
if (horiz_mode <= ONETWO)
@ -4440,7 +4440,7 @@ int vp8_set_internal_size(VP8_PTR comp, VPX_SCALING horiz_mode, VPX_SCALING vert
int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest) {
int vp9_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest) {
int i, j;
int Total = 0;
@ -4451,7 +4451,7 @@ int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest) {
for (i = 0; i < source->y_height; i += 16) {
for (j = 0; j < source->y_width; j += 16) {
unsigned int sse;
Total += vp8_mse16x16(src + j, source->y_stride, dst + j, dest->y_stride,
Total += vp9_mse16x16(src + j, source->y_stride, dst + j, dest->y_stride,
&sse);
}
@ -4463,7 +4463,7 @@ int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest) {
}
int vp8_get_quantizer(VP8_PTR c) {
int vp9_get_quantizer(VP8_PTR c) {
VP8_COMP *cpi = (VP8_COMP *) c;
return cpi->common.base_qindex;
}

View File

@ -759,18 +759,18 @@ typedef struct VP8_COMP {
void control_data_rate(VP8_COMP *cpi);
void vp8_encode_frame(VP8_COMP *cpi);
void vp9_encode_frame(VP8_COMP *cpi);
void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size);
void vp9_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size);
void vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x);
void vp9_activity_masking(VP8_COMP *cpi, MACROBLOCK *x);
int rd_cost_intra_mb(MACROBLOCKD *x);
void vp8_tokenize_mb(VP8_COMP *, MACROBLOCKD *, TOKENEXTRA **, int dry_run);
void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
void vp9_tokenize_mb(VP8_COMP *, MACROBLOCKD *, TOKENEXTRA **, int dry_run);
void vp9_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
void vp8_set_speed_features(VP8_COMP *cpi);
void vp9_set_speed_features(VP8_COMP *cpi);
#if CONFIG_DEBUG
#define CHECK_MEM_ERROR(lval,expr) do {\

View File

@ -21,7 +21,7 @@
#include "vpx_ports/arm.h"
#endif
extern int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source,
extern int vp9_calc_ss_err(YV12_BUFFER_CONFIG *source,
YV12_BUFFER_CONFIG *dest);
#if HAVE_ARMV7
extern void vp8_yv12_copy_frame_yonly_no_extend_frame_borders_neon(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc);
@ -34,7 +34,7 @@ extern void vp8_yv12_copy_frame_yonly_no_extend_frame_borders_neon(YV12_BUFFER_C
#endif
extern void
(*vp8_yv12_copy_partial_frame_ptr)(YV12_BUFFER_CONFIG *src_ybc,
(*vp9_yv12_copy_partial_frame_ptr)(YV12_BUFFER_CONFIG *src_ybc,
YV12_BUFFER_CONFIG *dst_ybc,
int Fraction);
@ -47,7 +47,7 @@ extern void vp8_loop_filter_frame_segment
);
void
vp8_yv12_copy_partial_frame(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc, int Fraction) {
vp9_yv12_copy_partial_frame(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc, int Fraction) {
unsigned char *src_y, *dst_y;
int yheight;
int ystride;
@ -98,7 +98,7 @@ static int vp8_calc_partial_ssl_err(YV12_BUFFER_CONFIG *source,
for (i = 0; i < linestocopy; i += 16) {
for (j = 0; j < source->y_width; j += 16) {
unsigned int sse;
Total += vp8_mse16x16(src + j, source->y_stride, dst + j, dest->y_stride,
Total += vp9_mse16x16(src + j, source->y_stride, dst + j, dest->y_stride,
&sse);
}
@ -112,7 +112,7 @@ static int vp8_calc_partial_ssl_err(YV12_BUFFER_CONFIG *source,
// Enforce a minimum filter level based upon baseline Q
static int get_min_filter_level(VP8_COMP *cpi, int base_qindex) {
int min_filter_level;
/*int q = (int) vp8_convert_qindex_to_q(base_qindex);
/*int q = (int) vp9_convert_qindex_to_q(base_qindex);
if (cpi->source_alt_ref_active && cpi->common.refresh_golden_frame && !cpi->common.refresh_alt_ref_frame)
min_filter_level = 0;
@ -146,7 +146,7 @@ static int get_max_filter_level(VP8_COMP *cpi, int base_qindex) {
return max_filter_level;
}
void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
void vp9cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
VP8_COMMON *cm = &cpi->common;
int best_err = 0;
@ -157,7 +157,7 @@ void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
int best_filt_val = cm->filter_level;
// Make a copy of the unfiltered / processed recon buffer
vp8_yv12_copy_partial_frame_ptr(cm->frame_to_show, &cpi->last_frame_uf, 3);
vp9_yv12_copy_partial_frame_ptr(cm->frame_to_show, &cpi->last_frame_uf, 3);
if (cm->frame_type == KEY_FRAME)
cm->sharpness_level = 0;
@ -184,7 +184,7 @@ void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
best_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3);
// Re-instate the unfiltered frame
vp8_yv12_copy_partial_frame_ptr(&cpi->last_frame_uf, cm->frame_to_show, 3);
vp9_yv12_copy_partial_frame_ptr(&cpi->last_frame_uf, cm->frame_to_show, 3);
filt_val -= (1 + ((filt_val > 10) ? 1 : 0));
@ -197,7 +197,7 @@ void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
filt_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3);
// Re-instate the unfiltered frame
vp8_yv12_copy_partial_frame_ptr(&cpi->last_frame_uf, cm->frame_to_show, 3);
vp9_yv12_copy_partial_frame_ptr(&cpi->last_frame_uf, cm->frame_to_show, 3);
// Update the best case record or exit loop.
@ -226,7 +226,7 @@ void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
filt_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3);
// Re-instate the unfiltered frame
vp8_yv12_copy_partial_frame_ptr(&cpi->last_frame_uf, cm->frame_to_show, 3);
vp9_yv12_copy_partial_frame_ptr(&cpi->last_frame_uf, cm->frame_to_show, 3);
// Update the best case record or exit loop.
if (filt_err < best_err) {
@ -252,10 +252,10 @@ void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
}
// Stub function for now Alt LF not used
void vp8cx_set_alt_lf_level(VP8_COMP *cpi, int filt_val) {
void vp9cx_set_alt_lf_level(VP8_COMP *cpi, int filt_val) {
}
void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
void vp9cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
VP8_COMMON *cm = &cpi->common;
int best_err = 0;
@ -307,10 +307,10 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
filter_step = (filt_mid < 16) ? 4 : filt_mid / 4;
// Get baseline error score
vp8cx_set_alt_lf_level(cpi, filt_mid);
vp9cx_set_alt_lf_level(cpi, filt_mid);
vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_mid);
best_err = vp8_calc_ss_err(sd, cm->frame_to_show);
best_err = vp9_calc_ss_err(sd, cm->frame_to_show);
filt_best = filt_mid;
// Re-instate the unfiltered frame
@ -347,10 +347,10 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
if ((filt_direction <= 0) && (filt_low != filt_mid)) {
// Get Low filter error score
vp8cx_set_alt_lf_level(cpi, filt_low);
vp9cx_set_alt_lf_level(cpi, filt_low);
vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_low);
filt_err = vp8_calc_ss_err(sd, cm->frame_to_show);
filt_err = vp9_calc_ss_err(sd, cm->frame_to_show);
// Re-instate the unfiltered frame
#if HAVE_ARMV7
@ -382,10 +382,10 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
// Now look at filt_high
if ((filt_direction >= 0) && (filt_high != filt_mid)) {
vp8cx_set_alt_lf_level(cpi, filt_high);
vp9cx_set_alt_lf_level(cpi, filt_high);
vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_high);
filt_err = vp8_calc_ss_err(sd, cm->frame_to_show);
filt_err = vp9_calc_ss_err(sd, cm->frame_to_show);
// Re-instate the unfiltered frame
#if HAVE_ARMV7

View File

@ -12,53 +12,53 @@
#include "vp8/encoder/variance.h"
#include "vp8/encoder/onyx_int.h"
SADFunction *vp8_sad16x16;
SADFunction *vp8_sad16x8;
SADFunction *vp8_sad8x16;
SADFunction *vp8_sad8x8;
SADFunction *vp8_sad4x4;
SADFunction *vp9_sad16x16;
SADFunction *vp9_sad16x8;
SADFunction *vp9_sad8x16;
SADFunction *vp9_sad8x8;
SADFunction *vp9_sad4x4;
variance_function *vp8_variance4x4;
variance_function *vp8_variance8x8;
variance_function *vp8_variance8x16;
variance_function *vp8_variance16x8;
variance_function *vp8_variance16x16;
variance_function *vp9_variance4x4;
variance_function *vp9_variance8x8;
variance_function *vp9_variance8x16;
variance_function *vp9_variance16x8;
variance_function *vp9_variance16x16;
variance_function *vp8_mse16x16;
variance_function *vp9_mse16x16;
sub_pixel_variance_function *vp8_sub_pixel_variance4x4;
sub_pixel_variance_function *vp8_sub_pixel_variance8x8;
sub_pixel_variance_function *vp8_sub_pixel_variance8x16;
sub_pixel_variance_function *vp8_sub_pixel_variance16x8;
sub_pixel_variance_function *vp8_sub_pixel_variance16x16;
sub_pixel_variance_function *vp9_sub_pixel_variance4x4;
sub_pixel_variance_function *vp9_sub_pixel_variance8x8;
sub_pixel_variance_function *vp9_sub_pixel_variance8x16;
sub_pixel_variance_function *vp9_sub_pixel_variance16x8;
sub_pixel_variance_function *vp9_sub_pixel_variance16x16;
int (*vp8_block_error)(short *coeff, short *dqcoeff);
int (*vp8_mbblock_error)(MACROBLOCK *mb, int dc);
int (*vp9_block_error)(short *coeff, short *dqcoeff);
int (*vp9_mbblock_error)(MACROBLOCK *mb, int dc);
int (*vp8_mbuverror)(MACROBLOCK *mb);
unsigned int (*vp8_get_mb_ss)(short *);
void (*vp8_short_fdct4x4)(short *input, short *output, int pitch);
void (*vp8_short_fdct8x4)(short *input, short *output, int pitch);
int (*vp9_mbuverror)(MACROBLOCK *mb);
unsigned int (*vp9_get_mb_ss)(short *);
void (*vp9_short_fdct4x4)(short *input, short *output, int pitch);
void (*vp9_short_fdct8x4)(short *input, short *output, int pitch);
void (*vp8_fast_fdct4x4)(short *input, short *output, int pitch);
void (*vp8_fast_fdct8x4)(short *input, short *output, int pitch);
void (*short_walsh4x4)(short *input, short *output, int pitch);
void (*vp8_subtract_b)(BLOCK *be, BLOCKD *bd, int pitch);
void (*vp8_subtract_mby)(short *diff, unsigned char *src, unsigned char *pred, int stride);
void (*vp8_subtract_mbuv)(short *diff, unsigned char *usrc, unsigned char *vsrc, unsigned char *pred, int stride);
void (*vp9_subtract_b)(BLOCK *be, BLOCKD *bd, int pitch);
void (*vp9_subtract_mby)(short *diff, unsigned char *src, unsigned char *pred, int stride);
void (*vp9_subtract_mbuv)(short *diff, unsigned char *usrc, unsigned char *vsrc, unsigned char *pred, int stride);
void (*vp8_fast_quantize_b)(BLOCK *b, BLOCKD *d);
// c imports
extern int block_error_c(short *coeff, short *dqcoeff);
extern int vp8_mbblock_error_c(MACROBLOCK *mb, int dc);
extern int vp9_mbblock_error_c(MACROBLOCK *mb, int dc);
extern int vp8_mbuverror_c(MACROBLOCK *mb);
extern int vp9_mbuverror_c(MACROBLOCK *mb);
extern unsigned int vp8_get8x8var_c(unsigned char *src_ptr, int source_stride, unsigned char *ref_ptr, int recon_stride, unsigned int *SSE, int *Sum);
extern void short_fdct4x4_c(short *input, short *output, int pitch);
extern void short_fdct8x4_c(short *input, short *output, int pitch);
extern void vp8_short_walsh4x4_c(short *input, short *output, int pitch);
extern void vp9_short_walsh4x4_c(short *input, short *output, int pitch);
extern void vp8_subtract_b_c(BLOCK *be, BLOCKD *bd, int pitch);
extern void vp9_subtract_b_c(BLOCK *be, BLOCKD *bd, int pitch);
extern void subtract_mby_c(short *diff, unsigned char *src, unsigned char *pred, int stride);
extern void subtract_mbuv_c(short *diff, unsigned char *usrc, unsigned char *vsrc, unsigned char *pred, int stride);
extern void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d);
@ -82,74 +82,74 @@ extern sub_pixel_variance_function sub_pixel_variance8x16_c;
extern sub_pixel_variance_function sub_pixel_variance16x8_c;
extern sub_pixel_variance_function sub_pixel_variance16x16_c;
extern unsigned int vp8_get_mb_ss_c(short *);
extern unsigned int vp9_get_mb_ss_c(short *);
// ppc
extern int vp8_block_error_ppc(short *coeff, short *dqcoeff);
extern int vp9_block_error_ppc(short *coeff, short *dqcoeff);
extern void vp8_short_fdct4x4_ppc(short *input, short *output, int pitch);
extern void vp8_short_fdct8x4_ppc(short *input, short *output, int pitch);
extern void vp9_short_fdct4x4_ppc(short *input, short *output, int pitch);
extern void vp9_short_fdct8x4_ppc(short *input, short *output, int pitch);
extern void vp8_subtract_mby_ppc(short *diff, unsigned char *src, unsigned char *pred, int stride);
extern void vp8_subtract_mbuv_ppc(short *diff, unsigned char *usrc, unsigned char *vsrc, unsigned char *pred, int stride);
extern void vp9_subtract_mby_ppc(short *diff, unsigned char *src, unsigned char *pred, int stride);
extern void vp9_subtract_mbuv_ppc(short *diff, unsigned char *usrc, unsigned char *vsrc, unsigned char *pred, int stride);
extern SADFunction vp8_sad16x16_ppc;
extern SADFunction vp8_sad16x8_ppc;
extern SADFunction vp8_sad8x16_ppc;
extern SADFunction vp8_sad8x8_ppc;
extern SADFunction vp8_sad4x4_ppc;
extern SADFunction vp9_sad16x16_ppc;
extern SADFunction vp9_sad16x8_ppc;
extern SADFunction vp9_sad8x16_ppc;
extern SADFunction vp9_sad8x8_ppc;
extern SADFunction vp9_sad4x4_ppc;
extern variance_function vp8_variance16x16_ppc;
extern variance_function vp8_variance8x16_ppc;
extern variance_function vp8_variance16x8_ppc;
extern variance_function vp8_variance8x8_ppc;
extern variance_function vp8_variance4x4_ppc;
extern variance_function vp8_mse16x16_ppc;
extern variance_function vp9_variance16x16_ppc;
extern variance_function vp9_variance8x16_ppc;
extern variance_function vp9_variance16x8_ppc;
extern variance_function vp9_variance8x8_ppc;
extern variance_function vp9_variance4x4_ppc;
extern variance_function vp9_mse16x16_ppc;
extern sub_pixel_variance_function vp8_sub_pixel_variance4x4_ppc;
extern sub_pixel_variance_function vp8_sub_pixel_variance8x8_ppc;
extern sub_pixel_variance_function vp8_sub_pixel_variance8x16_ppc;
extern sub_pixel_variance_function vp8_sub_pixel_variance16x8_ppc;
extern sub_pixel_variance_function vp8_sub_pixel_variance16x16_ppc;
extern sub_pixel_variance_function vp9_sub_pixel_variance4x4_ppc;
extern sub_pixel_variance_function vp9_sub_pixel_variance8x8_ppc;
extern sub_pixel_variance_function vp9_sub_pixel_variance8x16_ppc;
extern sub_pixel_variance_function vp9_sub_pixel_variance16x8_ppc;
extern sub_pixel_variance_function vp9_sub_pixel_variance16x16_ppc;
extern unsigned int vp8_get8x8var_ppc(unsigned char *src_ptr, int source_stride, unsigned char *ref_ptr, int recon_stride, unsigned int *SSE, int *Sum);
extern unsigned int vp8_get16x16var_ppc(unsigned char *src_ptr, int source_stride, unsigned char *ref_ptr, int recon_stride, unsigned int *SSE, int *Sum);
void vp8_cmachine_specific_config(void) {
void vp9_cmachine_specific_config(void) {
// Pure C:
vp8_mbuverror = vp8_mbuverror_c;
vp9_mbuverror = vp9_mbuverror_c;
vp8_fast_quantize_b = vp8_fast_quantize_b_c;
vp8_short_fdct4x4 = vp8_short_fdct4x4_ppc;
vp8_short_fdct8x4 = vp8_short_fdct8x4_ppc;
vp8_fast_fdct4x4 = vp8_short_fdct4x4_ppc;
vp8_fast_fdct8x4 = vp8_short_fdct8x4_ppc;
short_walsh4x4 = vp8_short_walsh4x4_c;
vp9_short_fdct4x4 = vp9_short_fdct4x4_ppc;
vp9_short_fdct8x4 = vp9_short_fdct8x4_ppc;
vp8_fast_fdct4x4 = vp9_short_fdct4x4_ppc;
vp8_fast_fdct8x4 = vp9_short_fdct8x4_ppc;
short_walsh4x4 = vp9_short_walsh4x4_c;
vp8_variance4x4 = vp8_variance4x4_ppc;
vp8_variance8x8 = vp8_variance8x8_ppc;
vp8_variance8x16 = vp8_variance8x16_ppc;
vp8_variance16x8 = vp8_variance16x8_ppc;
vp8_variance16x16 = vp8_variance16x16_ppc;
vp8_mse16x16 = vp8_mse16x16_ppc;
vp9_variance4x4 = vp9_variance4x4_ppc;
vp9_variance8x8 = vp9_variance8x8_ppc;
vp9_variance8x16 = vp9_variance8x16_ppc;
vp9_variance16x8 = vp9_variance16x8_ppc;
vp9_variance16x16 = vp9_variance16x16_ppc;
vp9_mse16x16 = vp9_mse16x16_ppc;
vp8_sub_pixel_variance4x4 = vp8_sub_pixel_variance4x4_ppc;
vp8_sub_pixel_variance8x8 = vp8_sub_pixel_variance8x8_ppc;
vp8_sub_pixel_variance8x16 = vp8_sub_pixel_variance8x16_ppc;
vp8_sub_pixel_variance16x8 = vp8_sub_pixel_variance16x8_ppc;
vp8_sub_pixel_variance16x16 = vp8_sub_pixel_variance16x16_ppc;
vp9_sub_pixel_variance4x4 = vp9_sub_pixel_variance4x4_ppc;
vp9_sub_pixel_variance8x8 = vp9_sub_pixel_variance8x8_ppc;
vp9_sub_pixel_variance8x16 = vp9_sub_pixel_variance8x16_ppc;
vp9_sub_pixel_variance16x8 = vp9_sub_pixel_variance16x8_ppc;
vp9_sub_pixel_variance16x16 = vp9_sub_pixel_variance16x16_ppc;
vp8_get_mb_ss = vp8_get_mb_ss_c;
vp9_get_mb_ss = vp9_get_mb_ss_c;
vp8_sad16x16 = vp8_sad16x16_ppc;
vp8_sad16x8 = vp8_sad16x8_ppc;
vp8_sad8x16 = vp8_sad8x16_ppc;
vp8_sad8x8 = vp8_sad8x8_ppc;
vp8_sad4x4 = vp8_sad4x4_ppc;
vp9_sad16x16 = vp9_sad16x16_ppc;
vp9_sad16x8 = vp9_sad16x8_ppc;
vp9_sad8x16 = vp9_sad8x16_ppc;
vp9_sad8x8 = vp9_sad8x8_ppc;
vp9_sad4x4 = vp9_sad4x4_ppc;
vp8_block_error = vp8_block_error_ppc;
vp8_mbblock_error = vp8_mbblock_error_c;
vp9_block_error = vp9_block_error_ppc;
vp9_mbblock_error = vp9_mbblock_error_c;
vp8_subtract_b = vp8_subtract_b_c;
vp8_subtract_mby = vp8_subtract_mby_ppc;
vp8_subtract_mbuv = vp8_subtract_mbuv_ppc;
vp9_subtract_b = vp9_subtract_b_c;
vp9_subtract_mby = vp9_subtract_mby_ppc;
vp9_subtract_mbuv = vp9_subtract_mbuv_ppc;
}

View File

@ -15,7 +15,7 @@
#define MAX_PSNR 100
double vp8_mse2psnr(double Samples, double Peak, double Mse) {
double vp9_mse2psnr(double Samples, double Peak, double Mse) {
double psnr;
if ((double)Mse > 0.0)

View File

@ -12,6 +12,6 @@
#ifndef __INC_PSNR_H
#define __INC_PSNR_H
extern double vp8_mse2psnr(double Samples, double Peak, double Mse);
extern double vp9_mse2psnr(double Samples, double Peak, double Mse);
#endif

View File

@ -21,7 +21,7 @@
extern int enc_debug;
#endif
void vp8_ht_quantize_b_4x4(BLOCK *b, BLOCKD *d, TX_TYPE tx_type) {
void vp9_ht_quantize_b_4x4(BLOCK *b, BLOCKD *d, TX_TYPE tx_type) {
int i, rc, eob;
int zbin;
int x, y, z, sz;
@ -85,7 +85,7 @@ void vp8_ht_quantize_b_4x4(BLOCK *b, BLOCKD *d, TX_TYPE tx_type) {
d->eob = eob + 1;
}
void vp8_regular_quantize_b_4x4(BLOCK *b, BLOCKD *d) {
void vp9_regular_quantize_b_4x4(BLOCK *b, BLOCKD *d) {
int i, rc, eob;
int zbin;
int x, y, z, sz;
@ -134,7 +134,7 @@ void vp8_regular_quantize_b_4x4(BLOCK *b, BLOCKD *d) {
d->eob = eob + 1;
}
void vp8_quantize_mby_4x4_c(MACROBLOCK *x) {
void vp9_quantize_mby_4x4_c(MACROBLOCK *x) {
int i;
int has_2nd_order = x->e_mbd.mode_info_context->mbmi.mode != SPLITMV;
@ -145,19 +145,19 @@ void vp8_quantize_mby_4x4_c(MACROBLOCK *x) {
x->quantize_b_4x4(&x->block[24], &x->e_mbd.block[24]);
}
void vp8_quantize_mbuv_4x4_c(MACROBLOCK *x) {
void vp9_quantize_mbuv_4x4_c(MACROBLOCK *x) {
int i;
for (i = 16; i < 24; i++)
x->quantize_b_4x4(&x->block[i], &x->e_mbd.block[i]);
}
void vp8_quantize_mb_4x4_c(MACROBLOCK *x) {
vp8_quantize_mby_4x4_c(x);
vp8_quantize_mbuv_4x4_c(x);
void vp9_quantize_mb_4x4_c(MACROBLOCK *x) {
vp9_quantize_mby_4x4_c(x);
vp9_quantize_mbuv_4x4_c(x);
}
void vp8_regular_quantize_b_2x2(BLOCK *b, BLOCKD *d) {
void vp9_regular_quantize_b_2x2(BLOCK *b, BLOCKD *d) {
int i, rc, eob;
int zbin;
int x, y, z, sz;
@ -207,7 +207,7 @@ void vp8_regular_quantize_b_2x2(BLOCK *b, BLOCKD *d) {
d->eob = eob + 1;
}
void vp8_regular_quantize_b_8x8(BLOCK *b, BLOCKD *d) {
void vp9_regular_quantize_b_8x8(BLOCK *b, BLOCKD *d) {
int i, rc, eob;
int zbin;
int x, y, z, sz;
@ -255,7 +255,7 @@ void vp8_regular_quantize_b_8x8(BLOCK *b, BLOCKD *d) {
d->eob = eob + 1;
}
void vp8_quantize_mby_8x8(MACROBLOCK *x) {
void vp9_quantize_mby_8x8(MACROBLOCK *x) {
int i;
int has_2nd_order = x->e_mbd.mode_info_context->mbmi.mode != SPLITMV;
@ -270,7 +270,7 @@ void vp8_quantize_mby_8x8(MACROBLOCK *x) {
x->quantize_b_2x2(&x->block[24], &x->e_mbd.block[24]);
}
void vp8_quantize_mbuv_8x8(MACROBLOCK *x) {
void vp9_quantize_mbuv_8x8(MACROBLOCK *x) {
int i;
for (i = 16; i < 24; i ++)
@ -279,12 +279,12 @@ void vp8_quantize_mbuv_8x8(MACROBLOCK *x) {
x->quantize_b_8x8(&x->block[i], &x->e_mbd.block[i]);
}
void vp8_quantize_mb_8x8(MACROBLOCK *x) {
vp8_quantize_mby_8x8(x);
vp8_quantize_mbuv_8x8(x);
void vp9_quantize_mb_8x8(MACROBLOCK *x) {
vp9_quantize_mby_8x8(x);
vp9_quantize_mbuv_8x8(x);
}
void vp8_quantize_mby_16x16(MACROBLOCK *x) {
void vp9_quantize_mby_16x16(MACROBLOCK *x) {
int i;
for (i = 0; i < 16; i++)
@ -293,12 +293,12 @@ void vp8_quantize_mby_16x16(MACROBLOCK *x) {
x->quantize_b_16x16(&x->block[0], &x->e_mbd.block[0]);
}
void vp8_quantize_mb_16x16(MACROBLOCK *x) {
vp8_quantize_mby_16x16(x);
vp8_quantize_mbuv_8x8(x);
void vp9_quantize_mb_16x16(MACROBLOCK *x) {
vp9_quantize_mby_16x16(x);
vp9_quantize_mbuv_8x8(x);
}
void vp8_regular_quantize_b_16x16(BLOCK *b, BLOCKD *d) {
void vp9_regular_quantize_b_16x16(BLOCK *b, BLOCKD *d) {
int i, rc, eob;
int zbin;
int x, y, z, sz;
@ -349,9 +349,9 @@ void vp8_regular_quantize_b_16x16(BLOCK *b, BLOCKD *d) {
* these two C functions if corresponding optimized routine is not available.
* NEON optimized version implements currently the fast quantization for pair
* of blocks. */
void vp8_regular_quantize_b_4x4_pair(BLOCK *b1, BLOCK *b2, BLOCKD *d1, BLOCKD *d2) {
vp8_regular_quantize_b_4x4(b1, d1);
vp8_regular_quantize_b_4x4(b2, d2);
void vp9_regular_quantize_b_4x4_pair(BLOCK *b1, BLOCK *b2, BLOCKD *d1, BLOCKD *d2) {
vp9_regular_quantize_b_4x4(b1, d1);
vp9_regular_quantize_b_4x4(b2, d2);
}
static void invert_quant(short *quant,
@ -366,7 +366,7 @@ static void invert_quant(short *quant,
*shift = l;
}
void vp8cx_init_quantizer(VP8_COMP *cpi) {
void vp9cx_init_quantizer(VP8_COMP *cpi) {
int i;
int quant_val;
int Q;
@ -530,7 +530,7 @@ void vp8cx_init_quantizer(VP8_COMP *cpi) {
}
}
void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x) {
void vp9cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x) {
int i;
int QIndex;
MACROBLOCKD *xd = &x->e_mbd;
@ -650,11 +650,11 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x) {
x->block[24].eob_max_offset_8x8 = 4;
}
/* save this macroblock QIndex for vp8_update_zbin_extra() */
/* save this macroblock QIndex for vp9_update_zbin_extra() */
x->e_mbd.q_index = QIndex;
}
void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x) {
void vp9_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x) {
int i;
int QIndex = x->e_mbd.q_index;
int zbin_extra;
@ -687,15 +687,15 @@ void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x) {
x->block[24].zbin_extra = (short)zbin_extra;
}
void vp8cx_frame_init_quantizer(VP8_COMP *cpi) {
void vp9cx_frame_init_quantizer(VP8_COMP *cpi) {
// Clear Zbin mode boost for default case
cpi->zbin_mode_boost = 0;
// MB level quantizer setup
vp8cx_mb_init_quantizer(cpi, &cpi->mb);
vp9cx_mb_init_quantizer(cpi, &cpi->mb);
}
void vp8_set_quantizer(struct VP8_COMP *cpi, int Q) {
void vp9_set_quantizer(struct VP8_COMP *cpi, int Q) {
VP8_COMMON *cm = &cpi->common;
cm->base_qindex = Q;
@ -711,5 +711,5 @@ void vp8_set_quantizer(struct VP8_COMP *cpi, int Q) {
// quantizer has to be reinitialized if any delta_q changes.
// As there are not any here for now this is inactive code.
// if(update)
// vp8cx_init_quantizer(cpi);
// vp9cx_init_quantizer(cpi);
}

View File

@ -32,61 +32,61 @@
#define prototype_quantize_block_type(sym) \
void (sym)(BLOCK *b, BLOCKD *d, TX_TYPE type)
extern prototype_quantize_block_type(vp8_ht_quantize_b_4x4);
extern prototype_quantize_block_type(vp9_ht_quantize_b_4x4);
#ifndef vp8_quantize_quantb_4x4
#define vp8_quantize_quantb_4x4 vp8_regular_quantize_b_4x4
#define vp8_quantize_quantb_4x4 vp9_regular_quantize_b_4x4
#endif
extern prototype_quantize_block(vp8_quantize_quantb_4x4);
#ifndef vp8_quantize_quantb_4x4_pair
#define vp8_quantize_quantb_4x4_pair vp8_regular_quantize_b_4x4_pair
#define vp8_quantize_quantb_4x4_pair vp9_regular_quantize_b_4x4_pair
#endif
extern prototype_quantize_block_pair(vp8_quantize_quantb_4x4_pair);
#ifndef vp8_quantize_quantb_8x8
#define vp8_quantize_quantb_8x8 vp8_regular_quantize_b_8x8
#define vp8_quantize_quantb_8x8 vp9_regular_quantize_b_8x8
#endif
extern prototype_quantize_block(vp8_quantize_quantb_8x8);
#ifndef vp8_quantize_quantb_16x16
#define vp8_quantize_quantb_16x16 vp8_regular_quantize_b_16x16
#define vp8_quantize_quantb_16x16 vp9_regular_quantize_b_16x16
#endif
extern prototype_quantize_block(vp8_quantize_quantb_16x16);
#ifndef vp8_quantize_quantb_2x2
#define vp8_quantize_quantb_2x2 vp8_regular_quantize_b_2x2
#define vp8_quantize_quantb_2x2 vp9_regular_quantize_b_2x2
#endif
extern prototype_quantize_block(vp8_quantize_quantb_2x2);
#ifndef vp8_quantize_mb_4x4
#define vp8_quantize_mb_4x4 vp8_quantize_mb_4x4_c
#ifndef vp9_quantize_mb_4x4
#define vp9_quantize_mb_4x4 vp9_quantize_mb_4x4_c
#endif
extern prototype_quantize_mb(vp8_quantize_mb_4x4);
void vp8_quantize_mb_8x8(MACROBLOCK *x);
extern prototype_quantize_mb(vp9_quantize_mb_4x4);
void vp9_quantize_mb_8x8(MACROBLOCK *x);
#ifndef vp8_quantize_mbuv_4x4
#define vp8_quantize_mbuv_4x4 vp8_quantize_mbuv_4x4_c
#ifndef vp9_quantize_mbuv_4x4
#define vp9_quantize_mbuv_4x4 vp9_quantize_mbuv_4x4_c
#endif
extern prototype_quantize_mb(vp8_quantize_mbuv_4x4);
extern prototype_quantize_mb(vp9_quantize_mbuv_4x4);
#ifndef vp8_quantize_mby_4x4
#define vp8_quantize_mby_4x4 vp8_quantize_mby_4x4_c
#ifndef vp9_quantize_mby_4x4
#define vp9_quantize_mby_4x4 vp9_quantize_mby_4x4_c
#endif
extern prototype_quantize_mb(vp8_quantize_mby_4x4);
extern prototype_quantize_mb(vp9_quantize_mby_4x4);
extern prototype_quantize_mb(vp8_quantize_mby_8x8);
extern prototype_quantize_mb(vp8_quantize_mbuv_8x8);
extern prototype_quantize_mb(vp9_quantize_mby_8x8);
extern prototype_quantize_mb(vp9_quantize_mbuv_8x8);
void vp8_quantize_mb_16x16(MACROBLOCK *x);
void vp9_quantize_mb_16x16(MACROBLOCK *x);
extern prototype_quantize_block(vp8_quantize_quantb_16x16);
extern prototype_quantize_mb(vp8_quantize_mby_16x16);
extern prototype_quantize_mb(vp9_quantize_mby_16x16);
struct VP8_COMP;
extern void vp8_set_quantizer(struct VP8_COMP *cpi, int Q);
extern void vp8cx_frame_init_quantizer(struct VP8_COMP *cpi);
extern void vp8_update_zbin_extra(struct VP8_COMP *cpi, MACROBLOCK *x);
extern void vp8cx_mb_init_quantizer(struct VP8_COMP *cpi, MACROBLOCK *x);
extern void vp8cx_init_quantizer(struct VP8_COMP *cpi);
extern void vp9_set_quantizer(struct VP8_COMP *cpi, int Q);
extern void vp9cx_frame_init_quantizer(struct VP8_COMP *cpi);
extern void vp9_update_zbin_extra(struct VP8_COMP *cpi, MACROBLOCK *x);
extern void vp9cx_mb_init_quantizer(struct VP8_COMP *cpi, MACROBLOCK *x);
extern void vp9cx_init_quantizer(struct VP8_COMP *cpi);
#endif

View File

@ -88,16 +88,16 @@ static const unsigned int prior_key_frame_weight[KEY_FRAME_CONTEXT] = { 1, 2, 3,
// These functions use formulaic calculations to make playing with the
// quantizer tables easier. If necessary they can be replaced by lookup
// tables if and when things settle down in the experimental bitstream
double vp8_convert_qindex_to_q(int qindex) {
double vp9_convert_qindex_to_q(int qindex) {
// Convert the index to a real Q value (scaled down to match old Q values)
return (double)vp8_ac_yquant(qindex) / 4.0;
}
int vp8_gfboost_qadjust(int qindex) {
int vp9_gfboost_qadjust(int qindex) {
int retval;
double q;
q = vp8_convert_qindex_to_q(qindex);
q = vp9_convert_qindex_to_q(qindex);
retval = (int)((0.00000828 * q * q * q) +
(-0.0055 * q * q) +
(1.32 * q) + 79.3);
@ -108,28 +108,28 @@ static int kfboost_qadjust(int qindex) {
int retval;
double q;
q = vp8_convert_qindex_to_q(qindex);
q = vp9_convert_qindex_to_q(qindex);
retval = (int)((0.00000973 * q * q * q) +
(-0.00613 * q * q) +
(1.316 * q) + 121.2);
return retval;
}
int vp8_bits_per_mb(FRAME_TYPE frame_type, int qindex) {
int vp9_bits_per_mb(FRAME_TYPE frame_type, int qindex) {
if (frame_type == KEY_FRAME)
return (int)(4500000 / vp8_convert_qindex_to_q(qindex));
return (int)(4500000 / vp9_convert_qindex_to_q(qindex));
else
return (int)(2850000 / vp8_convert_qindex_to_q(qindex));
return (int)(2850000 / vp9_convert_qindex_to_q(qindex));
}
void vp8_save_coding_context(VP8_COMP *cpi) {
void vp9_save_coding_context(VP8_COMP *cpi) {
CODING_CONTEXT *const cc = &cpi->coding_context;
VP8_COMMON *cm = &cpi->common;
MACROBLOCKD *xd = &cpi->mb.e_mbd;
// Stores a snapshot of key state variables which can subsequently be
// restored with a call to vp8_restore_coding_context. These functions are
// restored with a call to vp9_restore_coding_context. These functions are
// intended for use in a re-code loop in vp8_compress_frame where the
// quantizer value is adjusted between loop iterations.
@ -180,13 +180,13 @@ void vp8_save_coding_context(VP8_COMP *cpi) {
vp8_copy(cc->switchable_interp_prob, cm->fc.switchable_interp_prob);
}
void vp8_restore_coding_context(VP8_COMP *cpi) {
void vp9_restore_coding_context(VP8_COMP *cpi) {
CODING_CONTEXT *const cc = &cpi->coding_context;
VP8_COMMON *cm = &cpi->common;
MACROBLOCKD *xd = &cpi->mb.e_mbd;
// Restore key state variables to the snapshot state stored in the
// previous call to vp8_save_coding_context.
// previous call to vp9_save_coding_context.
cm->fc.nmvc = cc->nmvc;
vp8_copy(cpi->mb.nmvjointcost, cc->nmvjointcost);
@ -237,7 +237,7 @@ void vp8_restore_coding_context(VP8_COMP *cpi) {
}
void vp8_setup_key_frame(VP8_COMP *cpi) {
void vp9_setup_key_frame(VP8_COMP *cpi) {
VP8_COMMON *cm = &cpi->common;
// Setup for Key frame:
vp8_default_coef_probs(& cpi->common);
@ -269,7 +269,7 @@ void vp8_setup_key_frame(VP8_COMP *cpi) {
vp9_update_mode_info_in_image(cm, cm->mi);
}
void vp8_setup_inter_frame(VP8_COMP *cpi) {
void vp9_setup_inter_frame(VP8_COMP *cpi) {
if (cpi->common.refresh_alt_ref_frame) {
vpx_memcpy(&cpi->common.fc,
&cpi->common.lfc_a,
@ -290,7 +290,7 @@ void vp8_setup_inter_frame(VP8_COMP *cpi) {
static int estimate_bits_at_q(int frame_kind, int Q, int MBs,
double correction_factor) {
int Bpm = (int)(.5 + correction_factor * vp8_bits_per_mb(frame_kind, Q));
int Bpm = (int)(.5 + correction_factor * vp9_bits_per_mb(frame_kind, Q));
/* Attempt to retain reasonable accuracy without overflow. The cutoff is
* chosen such that the maximum product of Bpm and MBs fits 31 bits. The
@ -408,7 +408,7 @@ static void calc_pframe_target_size(VP8_COMP *cpi) {
}
void vp8_update_rate_correction_factors(VP8_COMP *cpi, int damp_var) {
void vp9_update_rate_correction_factors(VP8_COMP *cpi, int damp_var) {
int Q = cpi->common.base_qindex;
int correction_factor = 100;
double rate_correction_factor;
@ -432,7 +432,7 @@ void vp8_update_rate_correction_factors(VP8_COMP *cpi, int damp_var) {
// Stay in double to avoid int overflow when values are large
projected_size_based_on_q =
(int)(((.5 + rate_correction_factor *
vp8_bits_per_mb(cpi->common.frame_type, Q)) *
vp9_bits_per_mb(cpi->common.frame_type, Q)) *
cpi->common.MBs) / (1 << BPER_MB_NORMBITS));
// Make some allowance for cpi->zbin_over_quant
@ -504,7 +504,7 @@ void vp8_update_rate_correction_factors(VP8_COMP *cpi, int damp_var) {
}
int vp8_regulate_q(VP8_COMP *cpi, int target_bits_per_frame) {
int vp9_regulate_q(VP8_COMP *cpi, int target_bits_per_frame) {
int Q = cpi->active_worst_quality;
int i;
@ -537,7 +537,7 @@ int vp8_regulate_q(VP8_COMP *cpi, int target_bits_per_frame) {
do {
bits_per_mb_at_this_q =
(int)(.5 + correction_factor *
vp8_bits_per_mb(cpi->common.frame_type, i));
vp9_bits_per_mb(cpi->common.frame_type, i));
if (bits_per_mb_at_this_q <= target_bits_per_mb) {
if ((target_bits_per_mb - bits_per_mb_at_this_q) <= last_error)
@ -641,7 +641,7 @@ static int estimate_keyframe_frequency(VP8_COMP *cpi) {
}
void vp8_adjust_key_frame_context(VP8_COMP *cpi) {
void vp9_adjust_key_frame_context(VP8_COMP *cpi) {
// Clear down mmx registers to allow floating point in what follows
vp8_clear_system_state();
@ -650,7 +650,7 @@ void vp8_adjust_key_frame_context(VP8_COMP *cpi) {
}
void vp8_compute_frame_size_bounds(VP8_COMP *cpi, int *frame_under_shoot_limit, int *frame_over_shoot_limit) {
void vp9_compute_frame_size_bounds(VP8_COMP *cpi, int *frame_under_shoot_limit, int *frame_over_shoot_limit) {
// Set-up bounds on acceptable frame size:
if (cpi->oxcf.fixed_q >= 0) {
// Fixed Q scenario: frame size never outranges target (there is no target!)
@ -688,7 +688,7 @@ void vp8_compute_frame_size_bounds(VP8_COMP *cpi, int *frame_under_shoot_limit,
// return of 0 means drop frame
int vp8_pick_frame_size(VP8_COMP *cpi) {
int vp9_pick_frame_size(VP8_COMP *cpi) {
VP8_COMMON *cm = &cpi->common;
if (cm->frame_type == KEY_FRAME)

View File

@ -15,21 +15,21 @@
#define FRAME_OVERHEAD_BITS 200
extern void vp8_save_coding_context(VP8_COMP *cpi);
extern void vp8_restore_coding_context(VP8_COMP *cpi);
extern void vp9_save_coding_context(VP8_COMP *cpi);
extern void vp9_restore_coding_context(VP8_COMP *cpi);
extern void vp8_setup_key_frame(VP8_COMP *cpi);
extern void vp8_update_rate_correction_factors(VP8_COMP *cpi, int damp_var);
extern int vp8_regulate_q(VP8_COMP *cpi, int target_bits_per_frame);
extern void vp8_adjust_key_frame_context(VP8_COMP *cpi);
extern void vp8_compute_frame_size_bounds(VP8_COMP *cpi, int *frame_under_shoot_limit, int *frame_over_shoot_limit);
extern void vp9_setup_key_frame(VP8_COMP *cpi);
extern void vp9_update_rate_correction_factors(VP8_COMP *cpi, int damp_var);
extern int vp9_regulate_q(VP8_COMP *cpi, int target_bits_per_frame);
extern void vp9_adjust_key_frame_context(VP8_COMP *cpi);
extern void vp9_compute_frame_size_bounds(VP8_COMP *cpi, int *frame_under_shoot_limit, int *frame_over_shoot_limit);
// return of 0 means drop frame
extern int vp8_pick_frame_size(VP8_COMP *cpi);
extern int vp9_pick_frame_size(VP8_COMP *cpi);
extern double vp8_convert_qindex_to_q(int qindex);
extern int vp8_gfboost_qadjust(int qindex);
extern int vp8_bits_per_mb(FRAME_TYPE frame_type, int qindex);
void vp8_setup_inter_frame(VP8_COMP *cpi);
extern double vp9_convert_qindex_to_q(int qindex);
extern int vp9_gfboost_qadjust(int qindex);
extern int vp9_bits_per_mb(FRAME_TYPE frame_type, int qindex);
void vp9_setup_inter_frame(VP8_COMP *cpi);
#endif

View File

@ -52,8 +52,8 @@
#define IF_RTCD(x) NULL
#endif
extern void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x);
extern void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x);
extern void vp9cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x);
extern void vp9_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x);
#define MAXF(a,b) (((a) > (b)) ? (a) : (b))
@ -226,11 +226,11 @@ static void fill_token_costs(
for (j = 0; j < COEF_BANDS; j++)
for (k = 0; k < PREV_COEF_CONTEXTS; k++) {
if (k == 0 && ((j > 0 && i > 0) || (j > 1 && i == 0)))
vp8_cost_tokens_skip((int *)(c [i][j][k]),
vp9_cost_tokens_skip((int *)(c [i][j][k]),
p [i][j][k],
vp8_coef_tree);
else
vp8_cost_tokens((int *)(c [i][j][k]),
vp9_cost_tokens((int *)(c [i][j][k]),
p [i][j][k],
vp8_coef_tree);
}
@ -249,7 +249,7 @@ static int rd_iifactor [ 32 ] = { 4, 4, 3, 2, 1, 0, 0, 0,
static int sad_per_bit16lut[QINDEX_RANGE];
static int sad_per_bit4lut[QINDEX_RANGE];
void vp8_init_me_luts() {
void vp9_init_me_luts() {
int i;
// Initialize the sad lut tables using a formulaic calculation for now
@ -257,8 +257,8 @@ void vp8_init_me_luts() {
// to the quantizer tables.
for (i = 0; i < QINDEX_RANGE; i++) {
sad_per_bit16lut[i] =
(int)((0.0418 * vp8_convert_qindex_to_q(i)) + 2.4107);
sad_per_bit4lut[i] = (int)((0.063 * vp8_convert_qindex_to_q(i)) + 2.742);
(int)((0.0418 * vp9_convert_qindex_to_q(i)) + 2.4107);
sad_per_bit4lut[i] = (int)((0.063 * vp9_convert_qindex_to_q(i)) + 2.742);
}
}
@ -269,13 +269,13 @@ static int compute_rd_mult(int qindex) {
return (11 * q * q) >> 6;
}
void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex) {
void vp9cx_initialize_me_consts(VP8_COMP *cpi, int QIndex) {
cpi->mb.sadperbit16 = sad_per_bit16lut[QIndex];
cpi->mb.sadperbit4 = sad_per_bit4lut[QIndex];
}
void vp8_initialize_rd_consts(VP8_COMP *cpi, int QIndex) {
void vp9_initialize_rd_consts(VP8_COMP *cpi, int QIndex) {
int q, i;
vp8_clear_system_state(); // __asm emms;
@ -312,7 +312,7 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int QIndex) {
cpi->mb.errorperbit = (cpi->RDMULT / 110);
cpi->mb.errorperbit += (cpi->mb.errorperbit == 0);
vp8_set_speed_features(cpi);
vp9_set_speed_features(cpi);
q = (int)pow(vp8_dc_quant(QIndex, 0) >> 2, 1.25);
q = q << 2;
@ -380,11 +380,11 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int QIndex) {
/*rough estimate for costing*/
cpi->common.kf_ymode_probs_index = cpi->common.base_qindex >> 4;
vp8_init_mode_costs(cpi);
vp9_init_mode_costs(cpi);
if (cpi->common.frame_type != KEY_FRAME)
{
vp8_build_nmv_cost_table(
vp9_build_nmv_cost_table(
cpi->mb.nmvjointcost,
cpi->mb.e_mbd.allow_high_precision_mv ?
cpi->mb.nmvcost_hp : cpi->mb.nmvcost,
@ -393,7 +393,7 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int QIndex) {
}
}
void vp8_auto_select_speed(VP8_COMP *cpi) {
void vp9_auto_select_speed(VP8_COMP *cpi) {
int milliseconds_for_compress = (int)(1000000 / cpi->oxcf.frame_rate);
milliseconds_for_compress = milliseconds_for_compress * (16 - cpi->oxcf.cpu_used) / 16;
@ -446,7 +446,7 @@ void vp8_auto_select_speed(VP8_COMP *cpi) {
}
}
int vp8_block_error_c(short *coeff, short *dqcoeff, int block_size) {
int vp9_block_error_c(short *coeff, short *dqcoeff, int block_size) {
int i, error = 0;
for (i = 0; i < block_size; i++) {
@ -457,7 +457,7 @@ int vp8_block_error_c(short *coeff, short *dqcoeff, int block_size) {
return error;
}
int vp8_mbblock_error_c(MACROBLOCK *mb, int dc) {
int vp9_mbblock_error_c(MACROBLOCK *mb, int dc) {
BLOCK *be;
BLOCKD *bd;
int i, j;
@ -480,7 +480,7 @@ int vp8_mbblock_error_c(MACROBLOCK *mb, int dc) {
return error;
}
int vp8_mbuverror_c(MACROBLOCK *mb) {
int vp9_mbuverror_c(MACROBLOCK *mb) {
BLOCK *be;
BLOCKD *bd;
@ -490,13 +490,13 @@ int vp8_mbuverror_c(MACROBLOCK *mb) {
be = &mb->block[i];
bd = &mb->e_mbd.block[i];
error += vp8_block_error_c(be->coeff, bd->dqcoeff, 16);
error += vp9_block_error_c(be->coeff, bd->dqcoeff, 16);
}
return error;
}
int vp8_uvsse(MACROBLOCK *x) {
int vp9_uvsse(MACROBLOCK *x) {
unsigned char *uptr, *vptr;
unsigned char *upred_ptr = (*(x->block[16].base_src) + x->block[16].src);
unsigned char *vpred_ptr = (*(x->block[20].base_src) + x->block[20].src);
@ -527,14 +527,14 @@ int vp8_uvsse(MACROBLOCK *x) {
vptr = x->e_mbd.pre.v_buffer + offset;
if ((mv_row | mv_col) & 7) {
vp8_sub_pixel_variance8x8(uptr, pre_stride, (mv_col & 7) << 1,
vp9_sub_pixel_variance8x8(uptr, pre_stride, (mv_col & 7) << 1,
(mv_row & 7) << 1, upred_ptr, uv_stride, &sse2);
vp8_sub_pixel_variance8x8(vptr, pre_stride, (mv_col & 7) << 1,
vp9_sub_pixel_variance8x8(vptr, pre_stride, (mv_col & 7) << 1,
(mv_row & 7) << 1, vpred_ptr, uv_stride, &sse1);
sse2 += sse1;
} else {
vp8_variance8x8(uptr, pre_stride, upred_ptr, uv_stride, &sse2);
vp8_variance8x8(vptr, pre_stride, vpred_ptr, uv_stride, &sse1);
vp9_variance8x8(uptr, pre_stride, upred_ptr, uv_stride, &sse2);
vp9_variance8x8(vptr, pre_stride, vpred_ptr, uv_stride, &sse1);
sse2 += sse1;
}
return sse2;
@ -709,12 +709,12 @@ static void macro_block_yrd_4x4(MACROBLOCK *mb,
BLOCK *beptr;
int d;
vp8_subtract_mby(mb->src_diff, *(mb->block[0].base_src), xd->predictor,
vp9_subtract_mby(mb->src_diff, *(mb->block[0].base_src), xd->predictor,
mb->block[0].src_stride);
// Fdct and building the 2nd order block
for (beptr = mb->block; beptr < mb->block + 16; beptr += 2) {
mb->vp8_short_fdct8x4(beptr->src_diff, beptr->coeff, 32);
mb->vp9_short_fdct8x4(beptr->src_diff, beptr->coeff, 32);
*Y2DCPtr++ = beptr->coeff[0];
*Y2DCPtr++ = beptr->coeff[16];
}
@ -731,9 +731,9 @@ static void macro_block_yrd_4x4(MACROBLOCK *mb,
mb->quantize_b_4x4(mb_y2, x_y2);
// Distortion
d = vp8_mbblock_error(mb, 1);
d = vp9_mbblock_error(mb, 1);
d += vp8_block_error(mb_y2->coeff, x_y2->dqcoeff, 16);
d += vp9_block_error(mb_y2->coeff, x_y2->dqcoeff, 16);
*Distortion = (d >> 2);
// rate
@ -780,11 +780,11 @@ static void macro_block_yrd_8x8(MACROBLOCK *mb,
BLOCKD *const x_y2 = xd->block + 24;
int d;
vp8_subtract_mby(mb->src_diff, *(mb->block[0].base_src), xd->predictor,
vp9_subtract_mby(mb->src_diff, *(mb->block[0].base_src), xd->predictor,
mb->block[0].src_stride);
vp8_transform_mby_8x8(mb);
vp8_quantize_mby_8x8(mb);
vp9_transform_mby_8x8(mb);
vp9_quantize_mby_8x8(mb);
/* remove 1st order dc to properly combine 1st/2nd order distortion */
mb->coeff[0] = 0;
@ -796,8 +796,8 @@ static void macro_block_yrd_8x8(MACROBLOCK *mb,
xd->dqcoeff[128] = 0;
xd->dqcoeff[192] = 0;
d = vp8_mbblock_error(mb, 0);
d += vp8_block_error(mb_y2->coeff, x_y2->dqcoeff, 16);
d = vp9_mbblock_error(mb, 0);
d += vp9_block_error(mb_y2->coeff, x_y2->dqcoeff, 16);
*Distortion = (d >> 2);
// rate
@ -829,23 +829,23 @@ static void macro_block_yrd_16x16(MACROBLOCK *mb, int *Rate, int *Distortion,
BLOCK *be = &mb->block[0];
TX_TYPE tx_type;
vp8_subtract_mby(mb->src_diff, *(mb->block[0].base_src), mb->e_mbd.predictor,
vp9_subtract_mby(mb->src_diff, *(mb->block[0].base_src), mb->e_mbd.predictor,
mb->block[0].src_stride);
tx_type = get_tx_type_16x16(xd, b);
if (tx_type != DCT_DCT) {
vp8_fht_c(be->src_diff, 32, be->coeff, tx_type, 16);
vp9_fht_c(be->src_diff, 32, be->coeff, tx_type, 16);
} else
vp8_transform_mby_16x16(mb);
vp9_transform_mby_16x16(mb);
vp8_quantize_mby_16x16(mb);
vp9_quantize_mby_16x16(mb);
// TODO(jingning) is it possible to quickly determine whether to force
// trailing coefficients to be zero, instead of running trellis
// optimization in the rate-distortion optimization loop?
if (mb->e_mbd.mode_info_context->mbmi.mode < I8X8_PRED)
vp8_optimize_mby_16x16(mb, rtcd);
vp9_optimize_mby_16x16(mb, rtcd);
d = vp8_mbblock_error(mb, 0);
d = vp9_mbblock_error(mb, 0);
*Distortion = (d >> 2);
// rate
@ -1001,13 +1001,13 @@ static void super_block_yrd_8x8(MACROBLOCK *x,
for (n = 0; n < 4; n++) {
int x_idx = n & 1, y_idx = n >> 1;
vp8_subtract_mby_s_c(x->src_diff,
vp9_subtract_mby_s_c(x->src_diff,
src + x_idx * 16 + y_idx * 16 * src_y_stride,
src_y_stride,
dst + x_idx * 16 + y_idx * 16 * dst_y_stride,
dst_y_stride);
vp8_transform_mby_8x8(x);
vp8_quantize_mby_8x8(x);
vp9_transform_mby_8x8(x);
vp9_quantize_mby_8x8(x);
/* remove 1st order dc to properly combine 1st/2nd order distortion */
x->coeff[ 0] = 0;
@ -1019,8 +1019,8 @@ static void super_block_yrd_8x8(MACROBLOCK *x,
xd->dqcoeff[128] = 0;
xd->dqcoeff[192] = 0;
d += vp8_mbblock_error(x, 0);
d += vp8_block_error(by2->coeff, bdy2->dqcoeff, 16);
d += vp9_mbblock_error(x, 0);
d += vp9_block_error(by2->coeff, bdy2->dqcoeff, 16);
xd->above_context = ta + x_idx;
xd->left_context = tl + y_idx;
r += vp8_rdcost_mby_8x8(x, 0);
@ -1111,15 +1111,15 @@ static int64_t rd_pick_intra4x4block(VP8_COMP *cpi, MACROBLOCK *x, BLOCK *be,
rate += bmode_costs[mode2];
}
#endif
vp8_subtract_b(be, b, 16);
vp9_subtract_b(be, b, 16);
b->bmi.as_mode.first = mode;
tx_type = get_tx_type_4x4(xd, b);
if (tx_type != DCT_DCT) {
vp8_fht_c(be->src_diff, 32, be->coeff, tx_type, 4);
vp8_ht_quantize_b_4x4(be, b, tx_type);
vp9_fht_c(be->src_diff, 32, be->coeff, tx_type, 4);
vp9_ht_quantize_b_4x4(be, b, tx_type);
} else {
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
x->vp9_short_fdct4x4(be->src_diff, be->coeff, 32);
x->quantize_b_4x4(be, b);
}
@ -1128,7 +1128,7 @@ static int64_t rd_pick_intra4x4block(VP8_COMP *cpi, MACROBLOCK *x, BLOCK *be,
ratey = cost_coeffs(x, b, PLANE_TYPE_Y_WITH_DC, &tempa, &templ, TX_4X4);
rate += ratey;
distortion = vp8_block_error(be->coeff, b->dqcoeff, 16) >> 2;
distortion = vp9_block_error(be->coeff, b->dqcoeff, 16) >> 2;
this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
@ -1436,18 +1436,18 @@ static int64_t rd_pick_intra8x8block(VP8_COMP *cpi, MACROBLOCK *x, int ib,
}
#endif
vp8_subtract_4b_c(be, b, 16);
vp9_subtract_4b_c(be, b, 16);
if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) {
TX_TYPE tx_type = get_tx_type_8x8(xd, b);
if (tx_type != DCT_DCT)
vp8_fht_c(be->src_diff, 32, (x->block + idx)->coeff, tx_type, 8);
vp9_fht_c(be->src_diff, 32, (x->block + idx)->coeff, tx_type, 8);
else
x->vp8_short_fdct8x8(be->src_diff, (x->block + idx)->coeff, 32);
x->vp9_short_fdct8x8(be->src_diff, (x->block + idx)->coeff, 32);
x->quantize_b_8x8(x->block + idx, xd->block + idx);
// compute quantization mse of 8x8 block
distortion = vp8_block_error_c((x->block + idx)->coeff,
distortion = vp9_block_error_c((x->block + idx)->coeff,
(xd->block + idx)->dqcoeff, 64);
ta0 = a[vp8_block2above_8x8[idx]];
tl0 = l[vp8_block2left_8x8[idx]];
@ -1459,21 +1459,21 @@ static int64_t rd_pick_intra8x8block(VP8_COMP *cpi, MACROBLOCK *x, int ib,
ta1 = ta0;
tl1 = tl0;
} else {
x->vp8_short_fdct8x4(be->src_diff, be->coeff, 32);
x->vp8_short_fdct8x4((be + 4)->src_diff, (be + 4)->coeff, 32);
x->vp9_short_fdct8x4(be->src_diff, be->coeff, 32);
x->vp9_short_fdct8x4((be + 4)->src_diff, (be + 4)->coeff, 32);
x->quantize_b_4x4_pair(x->block + ib, x->block + ib + 1,
xd->block + ib, xd->block + ib + 1);
x->quantize_b_4x4_pair(x->block + ib + 4, x->block + ib + 5,
xd->block + ib + 4, xd->block + ib + 5);
distortion = vp8_block_error_c((x->block + ib)->coeff,
distortion = vp9_block_error_c((x->block + ib)->coeff,
(xd->block + ib)->dqcoeff, 16);
distortion += vp8_block_error_c((x->block + ib + 1)->coeff,
distortion += vp9_block_error_c((x->block + ib + 1)->coeff,
(xd->block + ib + 1)->dqcoeff, 16);
distortion += vp8_block_error_c((x->block + ib + 4)->coeff,
distortion += vp9_block_error_c((x->block + ib + 4)->coeff,
(xd->block + ib + 4)->dqcoeff, 16);
distortion += vp8_block_error_c((x->block + ib + 5)->coeff,
distortion += vp9_block_error_c((x->block + ib + 5)->coeff,
(xd->block + ib + 5)->dqcoeff, 16);
ta0 = a[vp8_block2above[ib]];
@ -1518,7 +1518,7 @@ static int64_t rd_pick_intra8x8block(VP8_COMP *cpi, MACROBLOCK *x, int ib,
#if CONFIG_COMP_INTRA_PRED
b->bmi.as_mode.second = (*best_second_mode);
#endif
vp8_encode_intra8x8(IF_RTCD(&cpi->rtcd), x, ib);
vp9_encode_intra8x8(IF_RTCD(&cpi->rtcd), x, ib);
if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) {
a[vp8_block2above_8x8[idx]] = besta0;
@ -1610,14 +1610,14 @@ static int rd_cost_mbuv(MACROBLOCK *mb) {
static int64_t rd_inter16x16_uv(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
int *distortion, int fullpixel, int *skip) {
vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
vp9_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
x->e_mbd.predictor, x->src.uv_stride);
vp8_transform_mbuv_4x4(x);
vp8_quantize_mbuv_4x4(x);
vp9_transform_mbuv_4x4(x);
vp9_quantize_mbuv_4x4(x);
*rate = rd_cost_mbuv(x);
*distortion = vp8_mbuverror(x) / 4;
*distortion = vp9_mbuverror(x) / 4;
*skip = vp9_mbuv_is_skippable_4x4(&x->e_mbd);
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
@ -1668,7 +1668,7 @@ static int64_t rd_inter32x32_uv_8x8(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
for (n = 0; n < 4; n++) {
int x_idx = n & 1, y_idx = n >> 1;
vp8_subtract_mbuv_s_c(x->src_diff,
vp9_subtract_mbuv_s_c(x->src_diff,
usrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
vsrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
src_uv_stride,
@ -1676,13 +1676,13 @@ static int64_t rd_inter32x32_uv_8x8(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
dst_uv_stride);
vp8_transform_mbuv_8x8(x);
vp8_quantize_mbuv_8x8(x);
vp9_transform_mbuv_8x8(x);
vp9_quantize_mbuv_8x8(x);
xd->above_context = ta + x_idx;
xd->left_context = tl + y_idx;
r += rd_cost_mbuv_8x8(x, 0);
d += vp8_mbuverror(x) / 4;
d += vp9_mbuverror(x) / 4;
skippable = skippable && vp9_mbuv_is_skippable_8x8(xd);
}
@ -1700,14 +1700,14 @@ static int64_t rd_inter32x32_uv_8x8(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
static int64_t rd_inter16x16_uv_8x8(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
int *distortion, int fullpixel, int *skip) {
vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
vp9_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
x->e_mbd.predictor, x->src.uv_stride);
vp8_transform_mbuv_8x8(x);
vp8_quantize_mbuv_8x8(x);
vp9_transform_mbuv_8x8(x);
vp9_quantize_mbuv_8x8(x);
*rate = rd_cost_mbuv_8x8(x, 1);
*distortion = vp8_mbuverror(x) / 4;
*distortion = vp9_mbuverror(x) / 4;
*skip = vp9_mbuv_is_skippable_8x8(&x->e_mbd);
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
@ -1717,14 +1717,14 @@ static int64_t rd_inter16x16_uv_8x8(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
static int64_t rd_inter4x4_uv(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
int *distortion, int *skippable, int fullpixel) {
vp8_build_inter4x4_predictors_mbuv(&x->e_mbd);
vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
vp9_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
x->e_mbd.predictor, x->src.uv_stride);
vp8_transform_mbuv_4x4(x);
vp8_quantize_mbuv_4x4(x);
vp9_transform_mbuv_4x4(x);
vp9_quantize_mbuv_4x4(x);
*rate = rd_cost_mbuv(x);
*distortion = vp8_mbuverror(x) / 4;
*distortion = vp9_mbuverror(x) / 4;
*skippable = vp9_mbuv_is_skippable_4x4(&x->e_mbd);
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
@ -1769,16 +1769,16 @@ static void rd_pick_intra_mbuv_mode(VP8_COMP *cpi,
}
#endif
vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
vp9_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
x->e_mbd.predictor, x->src.uv_stride);
vp8_transform_mbuv_4x4(x);
vp8_quantize_mbuv_4x4(x);
vp9_transform_mbuv_4x4(x);
vp9_quantize_mbuv_4x4(x);
rate_to = rd_cost_mbuv(x);
rate = rate_to
+ x->intra_uv_mode_cost[x->e_mbd.frame_type][mbmi->uv_mode];
distortion = vp8_mbuverror(x) / 4;
distortion = vp9_mbuverror(x) / 4;
this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
@ -1827,16 +1827,16 @@ static void rd_pick_intra_mbuv_mode_8x8(VP8_COMP *cpi,
mbmi->uv_mode = mode;
vp8_build_intra_predictors_mbuv(&x->e_mbd);
vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
vp9_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
x->e_mbd.predictor, x->src.uv_stride);
vp8_transform_mbuv_8x8(x);
vp9_transform_mbuv_8x8(x);
vp8_quantize_mbuv_8x8(x);
vp9_quantize_mbuv_8x8(x);
rate_to = rd_cost_mbuv_8x8(x, 1);
rate = rate_to + x->intra_uv_mode_cost[x->e_mbd.frame_type][mbmi->uv_mode];
distortion = vp8_mbuverror(x) / 4;
distortion = vp9_mbuverror(x) / 4;
this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
if (this_rd < best_rd) {
@ -1875,18 +1875,18 @@ static void super_block_uvrd_8x8(MACROBLOCK *x,
for (n = 0; n < 4; n++) {
int x_idx = n & 1, y_idx = n >> 1;
vp8_subtract_mbuv_s_c(x->src_diff,
vp9_subtract_mbuv_s_c(x->src_diff,
usrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
vsrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
src_uv_stride,
udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
dst_uv_stride);
vp8_transform_mbuv_8x8(x);
vp8_quantize_mbuv_8x8(x);
vp9_transform_mbuv_8x8(x);
vp9_quantize_mbuv_8x8(x);
s &= vp9_mbuv_is_skippable_8x8(xd);
d += vp8_mbuverror(x) >> 2;
d += vp9_mbuverror(x) >> 2;
xd->above_context = ta + x_idx;
xd->left_context = tl + y_idx;
r += rd_cost_mbuv_8x8(x, 0);
@ -1943,7 +1943,7 @@ static int64_t rd_pick_intra_sbuv_mode(VP8_COMP *cpi,
}
#endif
int vp8_cost_mv_ref(VP8_COMP *cpi,
int vp9_cost_mv_ref(VP8_COMP *cpi,
MB_PREDICTION_MODE m,
const int near_mv_ref_ct[4]) {
MACROBLOCKD *xd = &cpi->mb.e_mbd;
@ -1966,7 +1966,7 @@ int vp8_cost_mv_ref(VP8_COMP *cpi,
return 0;
}
void vp8_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv) {
void vp9_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv) {
x->e_mbd.mode_info_context->mbmi.mode = mb;
x->e_mbd.mode_info_context->mbmi.mv[0].as_int = mv->as_int;
}
@ -2014,10 +2014,10 @@ static int labels2mode(
seg_mvs[mbmi->second_ref_frame - 1].as_int;
}
thismvcost = vp8_mv_bit_cost(this_mv, best_ref_mv, MVCOSTS,
thismvcost = vp9_mv_bit_cost(this_mv, best_ref_mv, MVCOSTS,
102, xd->allow_high_precision_mv);
if (mbmi->second_ref_frame) {
thismvcost += vp8_mv_bit_cost(this_second_mv, second_best_ref_mv,
thismvcost += vp9_mv_bit_cost(this_second_mv, second_best_ref_mv,
MVCOSTS, 102,
xd->allow_high_precision_mv);
}
@ -2096,10 +2096,10 @@ static int64_t encode_inter_mb_segment(MACROBLOCK *x,
vp8_build_inter_predictors_b(bd, 16, xd->subpixel_predict);
if (xd->mode_info_context->mbmi.second_ref_frame)
vp8_build_2nd_inter_predictors_b(bd, 16, xd->subpixel_predict_avg);
vp8_subtract_b(be, bd, 16);
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
vp9_subtract_b(be, bd, 16);
x->vp9_short_fdct4x4(be->src_diff, be->coeff, 32);
x->quantize_b_4x4(be, bd);
thisdistortion = vp8_block_error(be->coeff, bd->dqcoeff, 16);
thisdistortion = vp9_block_error(be->coeff, bd->dqcoeff, 16);
*distortion += thisdistortion;
*labelyrate += cost_coeffs(x, bd, PLANE_TYPE_Y_WITH_DC,
ta + vp8_block2above[i],
@ -2146,13 +2146,13 @@ static int64_t encode_inter_mb_segment_8x8(MACROBLOCK *x,
vp8_build_inter_predictors4b(xd, bd, 16);
if (xd->mode_info_context->mbmi.second_ref_frame)
vp8_build_2nd_inter_predictors4b(xd, bd, 16);
vp8_subtract_4b_c(be, bd, 16);
vp9_subtract_4b_c(be, bd, 16);
if (xd->mode_info_context->mbmi.txfm_size == TX_4X4) {
if (otherrd) {
x->vp8_short_fdct8x8(be->src_diff, be2->coeff, 32);
x->vp9_short_fdct8x8(be->src_diff, be2->coeff, 32);
x->quantize_b_8x8(be2, bd2);
thisdistortion = vp8_block_error_c(be2->coeff, bd2->dqcoeff, 64);
thisdistortion = vp9_block_error_c(be2->coeff, bd2->dqcoeff, 64);
otherdist += thisdistortion;
othercost += cost_coeffs(x, bd2, PLANE_TYPE_Y_WITH_DC,
tacp + vp8_block2above_8x8[idx],
@ -2161,9 +2161,9 @@ static int64_t encode_inter_mb_segment_8x8(MACROBLOCK *x,
for (j = 0; j < 4; j += 2) {
bd = &xd->block[ib + iblock[j]];
be = &x->block[ib + iblock[j]];
x->vp8_short_fdct8x4(be->src_diff, be->coeff, 32);
x->vp9_short_fdct8x4(be->src_diff, be->coeff, 32);
x->quantize_b_4x4_pair(be, be + 1, bd, bd + 1);
thisdistortion = vp8_block_error_c(be->coeff, bd->dqcoeff, 32);
thisdistortion = vp9_block_error_c(be->coeff, bd->dqcoeff, 32);
*distortion += thisdistortion;
*labelyrate += cost_coeffs(x, bd, PLANE_TYPE_Y_WITH_DC,
ta + vp8_block2above[ib + iblock[j]],
@ -2179,9 +2179,9 @@ static int64_t encode_inter_mb_segment_8x8(MACROBLOCK *x,
for (j = 0; j < 4; j += 2) {
BLOCKD *bd3 = &xd->block[ib + iblock[j]];
BLOCK *be3 = &x->block[ib + iblock[j]];
x->vp8_short_fdct8x4(be3->src_diff, be3->coeff, 32);
x->vp9_short_fdct8x4(be3->src_diff, be3->coeff, 32);
x->quantize_b_4x4_pair(be3, be3 + 1, bd3, bd3 + 1);
thisdistortion = vp8_block_error_c(be3->coeff, bd3->dqcoeff, 32);
thisdistortion = vp9_block_error_c(be3->coeff, bd3->dqcoeff, 32);
otherdist += thisdistortion;
othercost += cost_coeffs(x, bd3, PLANE_TYPE_Y_WITH_DC,
tacp + vp8_block2above[ib + iblock[j]],
@ -2193,9 +2193,9 @@ static int64_t encode_inter_mb_segment_8x8(MACROBLOCK *x,
TX_4X4);
}
}
x->vp8_short_fdct8x8(be->src_diff, be2->coeff, 32);
x->vp9_short_fdct8x8(be->src_diff, be2->coeff, 32);
x->quantize_b_8x8(be2, bd2);
thisdistortion = vp8_block_error_c(be2->coeff, bd2->dqcoeff, 64);
thisdistortion = vp9_block_error_c(be2->coeff, bd2->dqcoeff, 64);
*distortion += thisdistortion;
*labelyrate += cost_coeffs(x, bd2, PLANE_TYPE_Y_WITH_DC,
ta + vp8_block2above_8x8[idx],
@ -2296,7 +2296,7 @@ static void rd_check_segment_txsize(VP8_COMP *cpi, MACROBLOCK *x,
// Segmentation method overheads
rate = vp8_cost_token(vp8_mbsplit_tree, vp8_mbsplit_probs,
vp8_mbsplit_encodings + segmentation);
rate += vp8_cost_mv_ref(cpi, SPLITMV, bsi->mdcounts);
rate += vp9_cost_mv_ref(cpi, SPLITMV, bsi->mdcounts);
this_segment_rd += RDCOST(x->rdmult, x->rddiv, rate, 0);
br += rate;
other_segment_rd = this_segment_rd;
@ -2371,7 +2371,7 @@ static void rd_check_segment_txsize(VP8_COMP *cpi, MACROBLOCK *x,
c = &x->block[n];
e = &x->e_mbd.block[n];
bestsme = vp8_full_pixel_diamond(cpi, x, c, e, &mvp_full, step_param,
bestsme = vp9_full_pixel_diamond(cpi, x, c, e, &mvp_full, step_param,
sadpb, further_steps, 0, v_fn_ptr,
bsi->ref_mv, &mode_mv[NEW4X4]);
@ -2679,7 +2679,7 @@ static int rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
int tmp_row_min = x->mv_row_min;
int tmp_row_max = x->mv_row_max;
vp8_clamp_mv_min_max(x, best_ref_mv);
vp9_clamp_mv_min_max(x, best_ref_mv);
/* Get 8x8 result */
bsi.sv_mvp[0].as_int = bsi.mvs[0].as_int;
@ -2817,7 +2817,7 @@ static void insertsortsad(int arr[], int idx[], int len) {
}
// The improved MV prediction
void vp8_mv_pred(VP8_COMP *cpi, MACROBLOCKD *xd, const MODE_INFO *here,
void vp9_mv_pred(VP8_COMP *cpi, MACROBLOCKD *xd, const MODE_INFO *here,
int_mv *mvp, int refframe, int *ref_frame_sign_bias,
int *sr, int near_sadidx[]) {
const MODE_INFO *above = here - xd->mode_info_stride;
@ -3313,11 +3313,11 @@ static int64_t handle_inter_mode(VP8_COMP *cpi, MACROBLOCK *x,
if (frame_mv[NEWMV][refs[0]].as_int == INVALID_MV ||
frame_mv[NEWMV][refs[1]].as_int == INVALID_MV)
return INT64_MAX;
*rate2 += vp8_mv_bit_cost(&frame_mv[NEWMV][refs[0]],
*rate2 += vp9_mv_bit_cost(&frame_mv[NEWMV][refs[0]],
&frame_best_ref_mv[refs[0]],
XMVCOST, 96,
x->e_mbd.allow_high_precision_mv);
*rate2 += vp8_mv_bit_cost(&frame_mv[NEWMV][refs[1]],
*rate2 += vp9_mv_bit_cost(&frame_mv[NEWMV][refs[1]],
&frame_best_ref_mv[refs[1]],
XMVCOST, 96,
x->e_mbd.allow_high_precision_mv);
@ -3334,14 +3334,14 @@ static int64_t handle_inter_mode(VP8_COMP *cpi, MACROBLOCK *x,
int tmp_row_min = x->mv_row_min;
int tmp_row_max = x->mv_row_max;
vp8_clamp_mv_min_max(x, &frame_best_ref_mv[refs[0]]);
vp9_clamp_mv_min_max(x, &frame_best_ref_mv[refs[0]]);
if (!*saddone) {
cal_sad(cpi, xd, x, recon_yoffset, &near_sadidx[0], block_size);
*saddone = 1;
}
vp8_mv_pred(cpi, &x->e_mbd, x->e_mbd.mode_info_context, &mvp,
vp9_mv_pred(cpi, &x->e_mbd, x->e_mbd.mode_info_context, &mvp,
mbmi->ref_frame, cpi->common.ref_frame_sign_bias,
&sr, &near_sadidx[0]);
@ -3354,7 +3354,7 @@ static int64_t handle_inter_mode(VP8_COMP *cpi, MACROBLOCK *x,
// Further step/diamond searches as necessary
further_steps = (cpi->sf.max_step_search_steps - 1) - step_param;
bestsme = vp8_full_pixel_diamond(cpi, x, b, d, &mvp_full, step_param,
bestsme = vp9_full_pixel_diamond(cpi, x, b, d, &mvp_full, step_param,
sadpb, further_steps, 1,
&cpi->fn_ptr[block_size],
&frame_best_ref_mv[refs[0]], &tmp_mv);
@ -3377,7 +3377,7 @@ static int64_t handle_inter_mode(VP8_COMP *cpi, MACROBLOCK *x,
frame_mv[NEWMV][refs[0]].as_int = d->bmi.as_mv.first.as_int;
// Add the new motion vector cost to our rolling cost variable
*rate2 += vp8_mv_bit_cost(&tmp_mv, &frame_best_ref_mv[refs[0]],
*rate2 += vp9_mv_bit_cost(&tmp_mv, &frame_best_ref_mv[refs[0]],
XMVCOST, 96, xd->allow_high_precision_mv);
}
break;
@ -3419,7 +3419,7 @@ static int64_t handle_inter_mode(VP8_COMP *cpi, MACROBLOCK *x,
* if the first is known */
*compmode_cost = vp8_cost_bit(vp9_get_pred_prob(cm, xd, PRED_COMP),
is_comp_pred);
*rate2 += vp8_cost_mv_ref(cpi, this_mode, mdcounts);
*rate2 += vp9_cost_mv_ref(cpi, this_mode, mdcounts);
if (block_size == BLOCK_16X16) {
vp8_build_1st_inter16x16_predictors_mby(xd, xd->predictor, 16, 0);
@ -3447,11 +3447,11 @@ static int64_t handle_inter_mode(VP8_COMP *cpi, MACROBLOCK *x,
threshold = x->encode_breakout;
if (block_size == BLOCK_16X16) {
var = vp8_variance16x16(*(b->base_src), b->src_stride,
var = vp9_variance16x16(*(b->base_src), b->src_stride,
xd->predictor, 16, &sse);
} else {
#if CONFIG_SUPERBLOCKS
var = vp8_variance32x32(*(b->base_src), b->src_stride,
var = vp9_variance32x32(*(b->base_src), b->src_stride,
xd->dst.y_buffer, xd->dst.y_stride, &sse);
#endif
}
@ -3466,12 +3466,12 @@ static int64_t handle_inter_mode(VP8_COMP *cpi, MACROBLOCK *x,
int sse2;
if (block_size == BLOCK_16X16) {
sse2 = vp8_uvsse(x);
sse2 = vp9_uvsse(x);
} else {
unsigned int sse2u, sse2v;
var = vp8_variance16x16(x->src.u_buffer, x->src.uv_stride,
var = vp9_variance16x16(x->src.u_buffer, x->src.uv_stride,
xd->dst.u_buffer, xd->dst.uv_stride, &sse2u);
var = vp8_variance16x16(x->src.v_buffer, x->src.uv_stride,
var = vp9_variance16x16(x->src.v_buffer, x->src.uv_stride,
xd->dst.v_buffer, xd->dst.uv_stride, &sse2v);
sse2 = sse2u + sse2v;
}
@ -3530,7 +3530,7 @@ static int64_t handle_inter_mode(VP8_COMP *cpi, MACROBLOCK *x,
return this_rd; // if 0, this will be re-calculated by caller
}
void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int recon_uvoffset,
void vp9_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int recon_uvoffset,
int *returnrate, int *returndistortion, int64_t *returnintra) {
VP8_COMMON *cm = &cpi->common;
MACROBLOCKD *xd = &x->e_mbd;
@ -3632,7 +3632,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
/* Initialize zbin mode boost for uv costing */
cpi->zbin_mode_boost = 0;
vp8_update_zbin_extra(cpi, x);
vp9_update_zbin_extra(cpi, x);
rd_pick_intra_mbuv_mode(cpi, x, &uv_intra_rate,
&uv_intra_rate_tokenonly, &uv_intra_distortion,
@ -3760,7 +3760,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
cpi->zbin_mode_boost = MV_ZBIN_BOOST;
}
vp8_update_zbin_extra(cpi, x);
vp9_update_zbin_extra(cpi, x);
}
// Intra
@ -4277,7 +4277,7 @@ end:
}
#if CONFIG_SUPERBLOCKS
void vp8_rd_pick_intra_mode_sb(VP8_COMP *cpi, MACROBLOCK *x,
void vp9_rd_pick_intra_mode_sb(VP8_COMP *cpi, MACROBLOCK *x,
int *returnrate,
int *returndist) {
VP8_COMMON *cm = &cpi->common;
@ -4308,7 +4308,7 @@ void vp8_rd_pick_intra_mode_sb(VP8_COMP *cpi, MACROBLOCK *x,
}
#endif
void vp8_rd_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x,
void vp9_rd_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x,
int *returnrate, int *returndist) {
VP8_COMMON *cm = &cpi->common;
MACROBLOCKD *xd = &x->e_mbd;
@ -4465,7 +4465,7 @@ void vp8_rd_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x,
}
#if CONFIG_SUPERBLOCKS
int64_t vp8_rd_pick_inter_mode_sb(VP8_COMP *cpi, MACROBLOCK *x,
int64_t vp9_rd_pick_inter_mode_sb(VP8_COMP *cpi, MACROBLOCK *x,
int recon_yoffset, int recon_uvoffset,
int *returnrate, int *returndistortion) {
VP8_COMMON *cm = &cpi->common;
@ -4815,7 +4815,7 @@ int64_t vp8_rd_pick_inter_mode_sb(VP8_COMP *cpi, MACROBLOCK *x,
}
#endif
void vp8cx_pick_mode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x,
void vp9cx_pick_mode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x,
int recon_yoffset,
int recon_uvoffset,
int *totalrate, int *totaldist) {
@ -4835,7 +4835,7 @@ void vp8cx_pick_mode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x,
{
int zbin_mode_boost_enabled = cpi->zbin_mode_boost_enabled;
vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate,
vp9_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate,
&distortion, &intra_error);
/* restore cpi->zbin_mode_boost_enabled */

View File

@ -15,13 +15,13 @@
#define RDCOST(RM,DM,R,D) ( ((128+((int64_t)R)*(RM)) >> 8) + ((int64_t)DM)*(D) )
#define RDCOST_8x8(RM,DM,R,D) ( ((128+((int64_t)R)*(RM)) >> 8) + ((int64_t)DM)*(D) )
extern void vp8_initialize_rd_consts(VP8_COMP *cpi, int Qvalue);
extern void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int recon_uvoffset,
extern void vp9_initialize_rd_consts(VP8_COMP *cpi, int Qvalue);
extern void vp9_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int recon_uvoffset,
int *returnrate, int *returndistortion, int64_t *returnintra);
extern void vp8_rd_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x, int *r, int *d);
extern void vp8_rd_pick_intra_mode_sb(VP8_COMP *cpi, MACROBLOCK *x, int *r, int *d);
extern void vp9_rd_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x, int *r, int *d);
extern void vp9_rd_pick_intra_mode_sb(VP8_COMP *cpi, MACROBLOCK *x, int *r, int *d);
extern void vp8_mv_pred
extern void vp9_mv_pred
(
VP8_COMP *cpi,
MACROBLOCKD *xd,
@ -32,6 +32,6 @@ extern void vp8_mv_pred
int *sr,
int near_sadidx[]
);
extern void vp8_init_me_luts();
extern void vp8_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv);
extern void vp9_init_me_luts();
extern void vp9_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv);
#endif

View File

@ -14,7 +14,7 @@
#include "vpx_ports/config.h"
#include "vpx/vpx_integer.h"
unsigned int vp8_sad32x32_c(const unsigned char *src_ptr,
unsigned int vp9_sad32x32_c(const unsigned char *src_ptr,
int src_stride,
const unsigned char *ref_ptr,
int ref_stride,
@ -22,7 +22,7 @@ unsigned int vp8_sad32x32_c(const unsigned char *src_ptr,
return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 32, 32);
}
unsigned int vp8_sad16x16_c(const unsigned char *src_ptr,
unsigned int vp9_sad16x16_c(const unsigned char *src_ptr,
int src_stride,
const unsigned char *ref_ptr,
int ref_stride,
@ -30,7 +30,7 @@ unsigned int vp8_sad16x16_c(const unsigned char *src_ptr,
return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 16, 16);
}
unsigned int vp8_sad8x8_c(
unsigned int vp9_sad8x8_c(
const unsigned char *src_ptr,
int src_stride,
const unsigned char *ref_ptr,
@ -41,7 +41,7 @@ unsigned int vp8_sad8x8_c(
}
unsigned int vp8_sad16x8_c(
unsigned int vp9_sad16x8_c(
const unsigned char *src_ptr,
int src_stride,
const unsigned char *ref_ptr,
@ -53,7 +53,7 @@ unsigned int vp8_sad16x8_c(
}
unsigned int vp8_sad8x16_c(
unsigned int vp9_sad8x16_c(
const unsigned char *src_ptr,
int src_stride,
const unsigned char *ref_ptr,
@ -64,7 +64,7 @@ unsigned int vp8_sad8x16_c(
}
unsigned int vp8_sad4x4_c(
unsigned int vp9_sad4x4_c(
const unsigned char *src_ptr,
int src_stride,
const unsigned char *ref_ptr,
@ -74,257 +74,257 @@ unsigned int vp8_sad4x4_c(
return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 4, 4);
}
void vp8_sad32x32x3_c(const unsigned char *src_ptr,
void vp9_sad32x32x3_c(const unsigned char *src_ptr,
int src_stride,
const unsigned char *ref_ptr,
int ref_stride,
unsigned int *sad_array
) {
sad_array[0] = vp8_sad32x32_c(src_ptr, src_stride, ref_ptr, ref_stride, 0x7fffffff);
sad_array[1] = vp8_sad32x32_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
sad_array[2] = vp8_sad32x32_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
sad_array[0] = vp9_sad32x32_c(src_ptr, src_stride, ref_ptr, ref_stride, 0x7fffffff);
sad_array[1] = vp9_sad32x32_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
sad_array[2] = vp9_sad32x32_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
}
void vp8_sad32x32x8_c(const unsigned char *src_ptr,
void vp9_sad32x32x8_c(const unsigned char *src_ptr,
int src_stride,
const unsigned char *ref_ptr,
int ref_stride,
unsigned short *sad_array
) {
sad_array[0] = (unsigned short)vp8_sad32x32_c(src_ptr, src_stride, ref_ptr, ref_stride, 0x7fffffff);
sad_array[1] = (unsigned short)vp8_sad32x32_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
sad_array[2] = (unsigned short)vp8_sad32x32_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
sad_array[3] = (unsigned short)vp8_sad32x32_c(src_ptr, src_stride, ref_ptr + 3, ref_stride, 0x7fffffff);
sad_array[4] = (unsigned short)vp8_sad32x32_c(src_ptr, src_stride, ref_ptr + 4, ref_stride, 0x7fffffff);
sad_array[5] = (unsigned short)vp8_sad32x32_c(src_ptr, src_stride, ref_ptr + 5, ref_stride, 0x7fffffff);
sad_array[6] = (unsigned short)vp8_sad32x32_c(src_ptr, src_stride, ref_ptr + 6, ref_stride, 0x7fffffff);
sad_array[7] = (unsigned short)vp8_sad32x32_c(src_ptr, src_stride, ref_ptr + 7, ref_stride, 0x7fffffff);
sad_array[0] = (unsigned short)vp9_sad32x32_c(src_ptr, src_stride, ref_ptr, ref_stride, 0x7fffffff);
sad_array[1] = (unsigned short)vp9_sad32x32_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
sad_array[2] = (unsigned short)vp9_sad32x32_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
sad_array[3] = (unsigned short)vp9_sad32x32_c(src_ptr, src_stride, ref_ptr + 3, ref_stride, 0x7fffffff);
sad_array[4] = (unsigned short)vp9_sad32x32_c(src_ptr, src_stride, ref_ptr + 4, ref_stride, 0x7fffffff);
sad_array[5] = (unsigned short)vp9_sad32x32_c(src_ptr, src_stride, ref_ptr + 5, ref_stride, 0x7fffffff);
sad_array[6] = (unsigned short)vp9_sad32x32_c(src_ptr, src_stride, ref_ptr + 6, ref_stride, 0x7fffffff);
sad_array[7] = (unsigned short)vp9_sad32x32_c(src_ptr, src_stride, ref_ptr + 7, ref_stride, 0x7fffffff);
}
void vp8_sad16x16x3_c(
void vp9_sad16x16x3_c(
const unsigned char *src_ptr,
int src_stride,
const unsigned char *ref_ptr,
int ref_stride,
unsigned int *sad_array
) {
sad_array[0] = vp8_sad16x16_c(src_ptr, src_stride, ref_ptr, ref_stride, 0x7fffffff);
sad_array[1] = vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
sad_array[2] = vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
sad_array[0] = vp9_sad16x16_c(src_ptr, src_stride, ref_ptr, ref_stride, 0x7fffffff);
sad_array[1] = vp9_sad16x16_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
sad_array[2] = vp9_sad16x16_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
}
void vp8_sad16x16x8_c(
void vp9_sad16x16x8_c(
const unsigned char *src_ptr,
int src_stride,
const unsigned char *ref_ptr,
int ref_stride,
unsigned short *sad_array
) {
sad_array[0] = (unsigned short)vp8_sad16x16_c(src_ptr, src_stride, ref_ptr, ref_stride, 0x7fffffff);
sad_array[1] = (unsigned short)vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
sad_array[2] = (unsigned short)vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
sad_array[3] = (unsigned short)vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 3, ref_stride, 0x7fffffff);
sad_array[4] = (unsigned short)vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 4, ref_stride, 0x7fffffff);
sad_array[5] = (unsigned short)vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 5, ref_stride, 0x7fffffff);
sad_array[6] = (unsigned short)vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 6, ref_stride, 0x7fffffff);
sad_array[7] = (unsigned short)vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 7, ref_stride, 0x7fffffff);
sad_array[0] = (unsigned short)vp9_sad16x16_c(src_ptr, src_stride, ref_ptr, ref_stride, 0x7fffffff);
sad_array[1] = (unsigned short)vp9_sad16x16_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
sad_array[2] = (unsigned short)vp9_sad16x16_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
sad_array[3] = (unsigned short)vp9_sad16x16_c(src_ptr, src_stride, ref_ptr + 3, ref_stride, 0x7fffffff);
sad_array[4] = (unsigned short)vp9_sad16x16_c(src_ptr, src_stride, ref_ptr + 4, ref_stride, 0x7fffffff);
sad_array[5] = (unsigned short)vp9_sad16x16_c(src_ptr, src_stride, ref_ptr + 5, ref_stride, 0x7fffffff);
sad_array[6] = (unsigned short)vp9_sad16x16_c(src_ptr, src_stride, ref_ptr + 6, ref_stride, 0x7fffffff);
sad_array[7] = (unsigned short)vp9_sad16x16_c(src_ptr, src_stride, ref_ptr + 7, ref_stride, 0x7fffffff);
}
void vp8_sad16x8x3_c(
void vp9_sad16x8x3_c(
const unsigned char *src_ptr,
int src_stride,
const unsigned char *ref_ptr,
int ref_stride,
unsigned int *sad_array
) {
sad_array[0] = vp8_sad16x8_c(src_ptr, src_stride, ref_ptr, ref_stride, 0x7fffffff);
sad_array[1] = vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
sad_array[2] = vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
sad_array[0] = vp9_sad16x8_c(src_ptr, src_stride, ref_ptr, ref_stride, 0x7fffffff);
sad_array[1] = vp9_sad16x8_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
sad_array[2] = vp9_sad16x8_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
}
void vp8_sad16x8x8_c(
void vp9_sad16x8x8_c(
const unsigned char *src_ptr,
int src_stride,
const unsigned char *ref_ptr,
int ref_stride,
unsigned short *sad_array
) {
sad_array[0] = (unsigned short)vp8_sad16x8_c(src_ptr, src_stride, ref_ptr, ref_stride, 0x7fffffff);
sad_array[1] = (unsigned short)vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
sad_array[2] = (unsigned short)vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
sad_array[3] = (unsigned short)vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 3, ref_stride, 0x7fffffff);
sad_array[4] = (unsigned short)vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 4, ref_stride, 0x7fffffff);
sad_array[5] = (unsigned short)vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 5, ref_stride, 0x7fffffff);
sad_array[6] = (unsigned short)vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 6, ref_stride, 0x7fffffff);
sad_array[7] = (unsigned short)vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 7, ref_stride, 0x7fffffff);
sad_array[0] = (unsigned short)vp9_sad16x8_c(src_ptr, src_stride, ref_ptr, ref_stride, 0x7fffffff);
sad_array[1] = (unsigned short)vp9_sad16x8_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
sad_array[2] = (unsigned short)vp9_sad16x8_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
sad_array[3] = (unsigned short)vp9_sad16x8_c(src_ptr, src_stride, ref_ptr + 3, ref_stride, 0x7fffffff);
sad_array[4] = (unsigned short)vp9_sad16x8_c(src_ptr, src_stride, ref_ptr + 4, ref_stride, 0x7fffffff);
sad_array[5] = (unsigned short)vp9_sad16x8_c(src_ptr, src_stride, ref_ptr + 5, ref_stride, 0x7fffffff);
sad_array[6] = (unsigned short)vp9_sad16x8_c(src_ptr, src_stride, ref_ptr + 6, ref_stride, 0x7fffffff);
sad_array[7] = (unsigned short)vp9_sad16x8_c(src_ptr, src_stride, ref_ptr + 7, ref_stride, 0x7fffffff);
}
void vp8_sad8x8x3_c(
void vp9_sad8x8x3_c(
const unsigned char *src_ptr,
int src_stride,
const unsigned char *ref_ptr,
int ref_stride,
unsigned int *sad_array
) {
sad_array[0] = vp8_sad8x8_c(src_ptr, src_stride, ref_ptr, ref_stride, 0x7fffffff);
sad_array[1] = vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
sad_array[2] = vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
sad_array[0] = vp9_sad8x8_c(src_ptr, src_stride, ref_ptr, ref_stride, 0x7fffffff);
sad_array[1] = vp9_sad8x8_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
sad_array[2] = vp9_sad8x8_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
}
void vp8_sad8x8x8_c(
void vp9_sad8x8x8_c(
const unsigned char *src_ptr,
int src_stride,
const unsigned char *ref_ptr,
int ref_stride,
unsigned short *sad_array
) {
sad_array[0] = (unsigned short)vp8_sad8x8_c(src_ptr, src_stride, ref_ptr, ref_stride, 0x7fffffff);
sad_array[1] = (unsigned short)vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
sad_array[2] = (unsigned short)vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
sad_array[3] = (unsigned short)vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 3, ref_stride, 0x7fffffff);
sad_array[4] = (unsigned short)vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 4, ref_stride, 0x7fffffff);
sad_array[5] = (unsigned short)vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 5, ref_stride, 0x7fffffff);
sad_array[6] = (unsigned short)vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 6, ref_stride, 0x7fffffff);
sad_array[7] = (unsigned short)vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 7, ref_stride, 0x7fffffff);
sad_array[0] = (unsigned short)vp9_sad8x8_c(src_ptr, src_stride, ref_ptr, ref_stride, 0x7fffffff);
sad_array[1] = (unsigned short)vp9_sad8x8_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
sad_array[2] = (unsigned short)vp9_sad8x8_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
sad_array[3] = (unsigned short)vp9_sad8x8_c(src_ptr, src_stride, ref_ptr + 3, ref_stride, 0x7fffffff);
sad_array[4] = (unsigned short)vp9_sad8x8_c(src_ptr, src_stride, ref_ptr + 4, ref_stride, 0x7fffffff);
sad_array[5] = (unsigned short)vp9_sad8x8_c(src_ptr, src_stride, ref_ptr + 5, ref_stride, 0x7fffffff);
sad_array[6] = (unsigned short)vp9_sad8x8_c(src_ptr, src_stride, ref_ptr + 6, ref_stride, 0x7fffffff);
sad_array[7] = (unsigned short)vp9_sad8x8_c(src_ptr, src_stride, ref_ptr + 7, ref_stride, 0x7fffffff);
}
void vp8_sad8x16x3_c(
void vp9_sad8x16x3_c(
const unsigned char *src_ptr,
int src_stride,
const unsigned char *ref_ptr,
int ref_stride,
unsigned int *sad_array
) {
sad_array[0] = vp8_sad8x16_c(src_ptr, src_stride, ref_ptr, ref_stride, 0x7fffffff);
sad_array[1] = vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
sad_array[2] = vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
sad_array[0] = vp9_sad8x16_c(src_ptr, src_stride, ref_ptr, ref_stride, 0x7fffffff);
sad_array[1] = vp9_sad8x16_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
sad_array[2] = vp9_sad8x16_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
}
void vp8_sad8x16x8_c(
void vp9_sad8x16x8_c(
const unsigned char *src_ptr,
int src_stride,
const unsigned char *ref_ptr,
int ref_stride,
unsigned short *sad_array
) {
sad_array[0] = (unsigned short)vp8_sad8x16_c(src_ptr, src_stride, ref_ptr, ref_stride, 0x7fffffff);
sad_array[1] = (unsigned short)vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
sad_array[2] = (unsigned short)vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
sad_array[3] = (unsigned short)vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 3, ref_stride, 0x7fffffff);
sad_array[4] = (unsigned short)vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 4, ref_stride, 0x7fffffff);
sad_array[5] = (unsigned short)vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 5, ref_stride, 0x7fffffff);
sad_array[6] = (unsigned short)vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 6, ref_stride, 0x7fffffff);
sad_array[7] = (unsigned short)vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 7, ref_stride, 0x7fffffff);
sad_array[0] = (unsigned short)vp9_sad8x16_c(src_ptr, src_stride, ref_ptr, ref_stride, 0x7fffffff);
sad_array[1] = (unsigned short)vp9_sad8x16_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
sad_array[2] = (unsigned short)vp9_sad8x16_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
sad_array[3] = (unsigned short)vp9_sad8x16_c(src_ptr, src_stride, ref_ptr + 3, ref_stride, 0x7fffffff);
sad_array[4] = (unsigned short)vp9_sad8x16_c(src_ptr, src_stride, ref_ptr + 4, ref_stride, 0x7fffffff);
sad_array[5] = (unsigned short)vp9_sad8x16_c(src_ptr, src_stride, ref_ptr + 5, ref_stride, 0x7fffffff);
sad_array[6] = (unsigned short)vp9_sad8x16_c(src_ptr, src_stride, ref_ptr + 6, ref_stride, 0x7fffffff);
sad_array[7] = (unsigned short)vp9_sad8x16_c(src_ptr, src_stride, ref_ptr + 7, ref_stride, 0x7fffffff);
}
void vp8_sad4x4x3_c(
void vp9_sad4x4x3_c(
const unsigned char *src_ptr,
int src_stride,
const unsigned char *ref_ptr,
int ref_stride,
unsigned int *sad_array
) {
sad_array[0] = vp8_sad4x4_c(src_ptr, src_stride, ref_ptr, ref_stride, 0x7fffffff);
sad_array[1] = vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
sad_array[2] = vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
sad_array[0] = vp9_sad4x4_c(src_ptr, src_stride, ref_ptr, ref_stride, 0x7fffffff);
sad_array[1] = vp9_sad4x4_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
sad_array[2] = vp9_sad4x4_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
}
void vp8_sad4x4x8_c(
void vp9_sad4x4x8_c(
const unsigned char *src_ptr,
int src_stride,
const unsigned char *ref_ptr,
int ref_stride,
unsigned short *sad_array
) {
sad_array[0] = (unsigned short)vp8_sad4x4_c(src_ptr, src_stride, ref_ptr, ref_stride, 0x7fffffff);
sad_array[1] = (unsigned short)vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
sad_array[2] = (unsigned short)vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
sad_array[3] = (unsigned short)vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 3, ref_stride, 0x7fffffff);
sad_array[4] = (unsigned short)vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 4, ref_stride, 0x7fffffff);
sad_array[5] = (unsigned short)vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 5, ref_stride, 0x7fffffff);
sad_array[6] = (unsigned short)vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 6, ref_stride, 0x7fffffff);
sad_array[7] = (unsigned short)vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 7, ref_stride, 0x7fffffff);
sad_array[0] = (unsigned short)vp9_sad4x4_c(src_ptr, src_stride, ref_ptr, ref_stride, 0x7fffffff);
sad_array[1] = (unsigned short)vp9_sad4x4_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
sad_array[2] = (unsigned short)vp9_sad4x4_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
sad_array[3] = (unsigned short)vp9_sad4x4_c(src_ptr, src_stride, ref_ptr + 3, ref_stride, 0x7fffffff);
sad_array[4] = (unsigned short)vp9_sad4x4_c(src_ptr, src_stride, ref_ptr + 4, ref_stride, 0x7fffffff);
sad_array[5] = (unsigned short)vp9_sad4x4_c(src_ptr, src_stride, ref_ptr + 5, ref_stride, 0x7fffffff);
sad_array[6] = (unsigned short)vp9_sad4x4_c(src_ptr, src_stride, ref_ptr + 6, ref_stride, 0x7fffffff);
sad_array[7] = (unsigned short)vp9_sad4x4_c(src_ptr, src_stride, ref_ptr + 7, ref_stride, 0x7fffffff);
}
void vp8_sad32x32x4d_c(const unsigned char *src_ptr,
void vp9_sad32x32x4d_c(const unsigned char *src_ptr,
int src_stride,
unsigned char *ref_ptr[],
int ref_stride,
unsigned int *sad_array
) {
sad_array[0] = vp8_sad32x32_c(src_ptr, src_stride, ref_ptr[0], ref_stride, 0x7fffffff);
sad_array[1] = vp8_sad32x32_c(src_ptr, src_stride, ref_ptr[1], ref_stride, 0x7fffffff);
sad_array[2] = vp8_sad32x32_c(src_ptr, src_stride, ref_ptr[2], ref_stride, 0x7fffffff);
sad_array[3] = vp8_sad32x32_c(src_ptr, src_stride, ref_ptr[3], ref_stride, 0x7fffffff);
sad_array[0] = vp9_sad32x32_c(src_ptr, src_stride, ref_ptr[0], ref_stride, 0x7fffffff);
sad_array[1] = vp9_sad32x32_c(src_ptr, src_stride, ref_ptr[1], ref_stride, 0x7fffffff);
sad_array[2] = vp9_sad32x32_c(src_ptr, src_stride, ref_ptr[2], ref_stride, 0x7fffffff);
sad_array[3] = vp9_sad32x32_c(src_ptr, src_stride, ref_ptr[3], ref_stride, 0x7fffffff);
}
void vp8_sad16x16x4d_c(
void vp9_sad16x16x4d_c(
const unsigned char *src_ptr,
int src_stride,
unsigned char *ref_ptr[],
int ref_stride,
unsigned int *sad_array
) {
sad_array[0] = vp8_sad16x16_c(src_ptr, src_stride, ref_ptr[0], ref_stride, 0x7fffffff);
sad_array[1] = vp8_sad16x16_c(src_ptr, src_stride, ref_ptr[1], ref_stride, 0x7fffffff);
sad_array[2] = vp8_sad16x16_c(src_ptr, src_stride, ref_ptr[2], ref_stride, 0x7fffffff);
sad_array[3] = vp8_sad16x16_c(src_ptr, src_stride, ref_ptr[3], ref_stride, 0x7fffffff);
sad_array[0] = vp9_sad16x16_c(src_ptr, src_stride, ref_ptr[0], ref_stride, 0x7fffffff);
sad_array[1] = vp9_sad16x16_c(src_ptr, src_stride, ref_ptr[1], ref_stride, 0x7fffffff);
sad_array[2] = vp9_sad16x16_c(src_ptr, src_stride, ref_ptr[2], ref_stride, 0x7fffffff);
sad_array[3] = vp9_sad16x16_c(src_ptr, src_stride, ref_ptr[3], ref_stride, 0x7fffffff);
}
void vp8_sad16x8x4d_c(
void vp9_sad16x8x4d_c(
const unsigned char *src_ptr,
int src_stride,
unsigned char *ref_ptr[],
int ref_stride,
unsigned int *sad_array
) {
sad_array[0] = vp8_sad16x8_c(src_ptr, src_stride, ref_ptr[0], ref_stride, 0x7fffffff);
sad_array[1] = vp8_sad16x8_c(src_ptr, src_stride, ref_ptr[1], ref_stride, 0x7fffffff);
sad_array[2] = vp8_sad16x8_c(src_ptr, src_stride, ref_ptr[2], ref_stride, 0x7fffffff);
sad_array[3] = vp8_sad16x8_c(src_ptr, src_stride, ref_ptr[3], ref_stride, 0x7fffffff);
sad_array[0] = vp9_sad16x8_c(src_ptr, src_stride, ref_ptr[0], ref_stride, 0x7fffffff);
sad_array[1] = vp9_sad16x8_c(src_ptr, src_stride, ref_ptr[1], ref_stride, 0x7fffffff);
sad_array[2] = vp9_sad16x8_c(src_ptr, src_stride, ref_ptr[2], ref_stride, 0x7fffffff);
sad_array[3] = vp9_sad16x8_c(src_ptr, src_stride, ref_ptr[3], ref_stride, 0x7fffffff);
}
void vp8_sad8x8x4d_c(
void vp9_sad8x8x4d_c(
const unsigned char *src_ptr,
int src_stride,
unsigned char *ref_ptr[],
int ref_stride,
unsigned int *sad_array
) {
sad_array[0] = vp8_sad8x8_c(src_ptr, src_stride, ref_ptr[0], ref_stride, 0x7fffffff);
sad_array[1] = vp8_sad8x8_c(src_ptr, src_stride, ref_ptr[1], ref_stride, 0x7fffffff);
sad_array[2] = vp8_sad8x8_c(src_ptr, src_stride, ref_ptr[2], ref_stride, 0x7fffffff);
sad_array[3] = vp8_sad8x8_c(src_ptr, src_stride, ref_ptr[3], ref_stride, 0x7fffffff);
sad_array[0] = vp9_sad8x8_c(src_ptr, src_stride, ref_ptr[0], ref_stride, 0x7fffffff);
sad_array[1] = vp9_sad8x8_c(src_ptr, src_stride, ref_ptr[1], ref_stride, 0x7fffffff);
sad_array[2] = vp9_sad8x8_c(src_ptr, src_stride, ref_ptr[2], ref_stride, 0x7fffffff);
sad_array[3] = vp9_sad8x8_c(src_ptr, src_stride, ref_ptr[3], ref_stride, 0x7fffffff);
}
void vp8_sad8x16x4d_c(
void vp9_sad8x16x4d_c(
const unsigned char *src_ptr,
int src_stride,
unsigned char *ref_ptr[],
int ref_stride,
unsigned int *sad_array
) {
sad_array[0] = vp8_sad8x16_c(src_ptr, src_stride, ref_ptr[0], ref_stride, 0x7fffffff);
sad_array[1] = vp8_sad8x16_c(src_ptr, src_stride, ref_ptr[1], ref_stride, 0x7fffffff);
sad_array[2] = vp8_sad8x16_c(src_ptr, src_stride, ref_ptr[2], ref_stride, 0x7fffffff);
sad_array[3] = vp8_sad8x16_c(src_ptr, src_stride, ref_ptr[3], ref_stride, 0x7fffffff);
sad_array[0] = vp9_sad8x16_c(src_ptr, src_stride, ref_ptr[0], ref_stride, 0x7fffffff);
sad_array[1] = vp9_sad8x16_c(src_ptr, src_stride, ref_ptr[1], ref_stride, 0x7fffffff);
sad_array[2] = vp9_sad8x16_c(src_ptr, src_stride, ref_ptr[2], ref_stride, 0x7fffffff);
sad_array[3] = vp9_sad8x16_c(src_ptr, src_stride, ref_ptr[3], ref_stride, 0x7fffffff);
}
void vp8_sad4x4x4d_c(
void vp9_sad4x4x4d_c(
const unsigned char *src_ptr,
int src_stride,
unsigned char *ref_ptr[],
int ref_stride,
unsigned int *sad_array
) {
sad_array[0] = vp8_sad4x4_c(src_ptr, src_stride, ref_ptr[0], ref_stride, 0x7fffffff);
sad_array[1] = vp8_sad4x4_c(src_ptr, src_stride, ref_ptr[1], ref_stride, 0x7fffffff);
sad_array[2] = vp8_sad4x4_c(src_ptr, src_stride, ref_ptr[2], ref_stride, 0x7fffffff);
sad_array[3] = vp8_sad4x4_c(src_ptr, src_stride, ref_ptr[3], ref_stride, 0x7fffffff);
sad_array[0] = vp9_sad4x4_c(src_ptr, src_stride, ref_ptr[0], ref_stride, 0x7fffffff);
sad_array[1] = vp9_sad4x4_c(src_ptr, src_stride, ref_ptr[1], ref_stride, 0x7fffffff);
sad_array[2] = vp9_sad4x4_c(src_ptr, src_stride, ref_ptr[2], ref_stride, 0x7fffffff);
sad_array[3] = vp9_sad4x4_c(src_ptr, src_stride, ref_ptr[3], ref_stride, 0x7fffffff);
}
/* Copy 2 macroblocks to a buffer */
void vp8_copy32xn_c(
void vp9_copy32xn_c(
unsigned char *src_ptr,
int src_stride,
unsigned char *dst_ptr,

View File

@ -11,7 +11,7 @@
#include <stdlib.h>
#include "vpx_ports/mem.h"
#include "./vpx_rtcd.h"
unsigned int vp8_satd16x16_c(const unsigned char *src_ptr,
unsigned int vp9_satd16x16_c(const unsigned char *src_ptr,
int src_stride,
const unsigned char *ref_ptr,
int ref_stride,
@ -33,7 +33,7 @@ unsigned int vp8_satd16x16_c(const unsigned char *src_ptr,
in = diff_in;
for (r = 0; r < 16; r += 4) {
for (c = 0; c < 16; c += 4) {
vp8_short_walsh4x4_c(in + c, diff_out, 32);
vp9_short_walsh4x4_c(in + c, diff_out, 32);
for (i = 0; i < 16; i++)
satd += abs(diff_out[i]);
}

View File

@ -14,7 +14,7 @@
#include "segmentation.h"
#include "vp8/common/pred_common.h"
void vp8_update_gf_useage_maps(VP8_COMP *cpi, VP8_COMMON *cm, MACROBLOCK *x) {
void vp9_update_gf_useage_maps(VP8_COMP *cpi, VP8_COMMON *cm, MACROBLOCK *x) {
int mb_row, mb_col;
MODE_INFO *this_mb_mode_info = cm->mi;
@ -58,7 +58,7 @@ void vp8_update_gf_useage_maps(VP8_COMP *cpi, VP8_COMMON *cm, MACROBLOCK *x) {
}
}
void vp8_enable_segmentation(VP8_PTR ptr) {
void vp9_enable_segmentation(VP8_PTR ptr) {
VP8_COMP *cpi = (VP8_COMP *)(ptr);
// Set the appropriate feature bit
@ -67,14 +67,14 @@ void vp8_enable_segmentation(VP8_PTR ptr) {
cpi->mb.e_mbd.update_mb_segmentation_data = 1;
}
void vp8_disable_segmentation(VP8_PTR ptr) {
void vp9_disable_segmentation(VP8_PTR ptr) {
VP8_COMP *cpi = (VP8_COMP *)(ptr);
// Clear the appropriate feature bit
cpi->mb.e_mbd.segmentation_enabled = 0;
}
void vp8_set_segmentation_map(VP8_PTR ptr,
void vp9_set_segmentation_map(VP8_PTR ptr,
unsigned char *segmentation_map) {
VP8_COMP *cpi = (VP8_COMP *)(ptr);
@ -87,7 +87,7 @@ void vp8_set_segmentation_map(VP8_PTR ptr,
cpi->mb.e_mbd.update_mb_segmentation_data = 1;
}
void vp8_set_segment_data(VP8_PTR ptr,
void vp9_set_segment_data(VP8_PTR ptr,
signed char *feature_data,
unsigned char abs_delta) {
VP8_COMP *cpi = (VP8_COMP *)(ptr);

View File

@ -16,14 +16,14 @@
#ifndef __INC_SEGMENTATION_H__
#define __INC_SEGMENTATION_H__ 1
extern void vp8_update_gf_useage_maps(VP8_COMP *cpi, VP8_COMMON *cm, MACROBLOCK *x);
extern void vp9_update_gf_useage_maps(VP8_COMP *cpi, VP8_COMMON *cm, MACROBLOCK *x);
extern void vp8_enable_segmentation(VP8_PTR ptr);
extern void vp8_disable_segmentation(VP8_PTR ptr);
extern void vp9_enable_segmentation(VP8_PTR ptr);
extern void vp9_disable_segmentation(VP8_PTR ptr);
// Valid values for a segment are 0 to 3
// Segmentation map is arrange as [Rows][Columns]
extern void vp8_set_segmentation_map(VP8_PTR ptr, unsigned char *segmentation_map);
extern void vp9_set_segmentation_map(VP8_PTR ptr, unsigned char *segmentation_map);
// The values given for each segment can be either deltas (from the default
// value chosen for the frame) or absolute values.
@ -36,7 +36,7 @@ extern void vp8_set_segmentation_map(VP8_PTR ptr, unsigned char *segmentation_ma
// abs_delta = SEGMENT_DELTADATA (deltas) abs_delta = SEGMENT_ABSDATA (use
// the absolute values given).
//
extern void vp8_set_segment_data(VP8_PTR ptr, signed char *feature_data, unsigned char abs_delta);
extern void vp9_set_segment_data(VP8_PTR ptr, signed char *feature_data, unsigned char abs_delta);
extern void vp9_choose_segmap_coding_method(VP8_COMP *cpi);

View File

@ -11,7 +11,7 @@
#include "onyx_int.h"
void vp8_ssim_parms_16x16_c(unsigned char *s, int sp, unsigned char *r,
void vp9_ssim_parms_16x16_c(unsigned char *s, int sp, unsigned char *r,
int rp, unsigned long *sum_s, unsigned long *sum_r,
unsigned long *sum_sq_s, unsigned long *sum_sq_r,
unsigned long *sum_sxr) {
@ -26,7 +26,7 @@ void vp8_ssim_parms_16x16_c(unsigned char *s, int sp, unsigned char *r,
}
}
}
void vp8_ssim_parms_8x8_c(unsigned char *s, int sp, unsigned char *r, int rp,
void vp9_ssim_parms_8x8_c(unsigned char *s, int sp, unsigned char *r, int rp,
unsigned long *sum_s, unsigned long *sum_r,
unsigned long *sum_sq_s, unsigned long *sum_sq_r,
unsigned long *sum_sxr) {
@ -67,13 +67,13 @@ static double similarity(unsigned long sum_s, unsigned long sum_r,
static double ssim_16x16(unsigned char *s, int sp, unsigned char *r, int rp) {
unsigned long sum_s = 0, sum_r = 0, sum_sq_s = 0, sum_sq_r = 0, sum_sxr = 0;
vp8_ssim_parms_16x16(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r,
vp9_ssim_parms_16x16(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r,
&sum_sxr);
return similarity(sum_s, sum_r, sum_sq_s, sum_sq_r, sum_sxr, 256);
}
static double ssim_8x8(unsigned char *s, int sp, unsigned char *r, int rp) {
unsigned long sum_s = 0, sum_r = 0, sum_sq_s = 0, sum_sq_r = 0, sum_sxr = 0;
vp8_ssim_parms_8x8(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r,
vp9_ssim_parms_8x8(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r,
&sum_sxr);
return similarity(sum_s, sum_r, sum_sq_s, sum_sq_r, sum_sxr, 64);
}
@ -81,7 +81,7 @@ static double ssim_8x8(unsigned char *s, int sp, unsigned char *r, int rp) {
// We are using a 8x8 moving window with starting location of each 8x8 window
// on the 4x4 pixel grid. Such arrangement allows the windows to overlap
// block boundaries to penalize blocking artifacts.
double vp8_ssim2(unsigned char *img1, unsigned char *img2, int stride_img1,
double vp9_ssim2(unsigned char *img1, unsigned char *img2, int stride_img1,
int stride_img2, int width, int height) {
int i, j;
int samples = 0;
@ -98,20 +98,20 @@ double vp8_ssim2(unsigned char *img1, unsigned char *img2, int stride_img1,
ssim_total /= samples;
return ssim_total;
}
double vp8_calc_ssim(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest,
double vp9_calc_ssim(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest,
int lumamask, double *weight) {
double a, b, c;
double ssimv;
a = vp8_ssim2(source->y_buffer, dest->y_buffer,
a = vp9_ssim2(source->y_buffer, dest->y_buffer,
source->y_stride, dest->y_stride, source->y_width,
source->y_height);
b = vp8_ssim2(source->u_buffer, dest->u_buffer,
b = vp9_ssim2(source->u_buffer, dest->u_buffer,
source->uv_stride, dest->uv_stride, source->uv_width,
source->uv_height);
c = vp8_ssim2(source->v_buffer, dest->v_buffer,
c = vp9_ssim2(source->v_buffer, dest->v_buffer,
source->uv_stride, dest->uv_stride, source->uv_width,
source->uv_height);
@ -122,20 +122,20 @@ double vp8_calc_ssim(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest,
return ssimv;
}
double vp8_calc_ssimg(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest,
double vp9_calc_ssimg(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest,
double *ssim_y, double *ssim_u, double *ssim_v) {
double ssim_all = 0;
double a, b, c;
a = vp8_ssim2(source->y_buffer, dest->y_buffer,
a = vp9_ssim2(source->y_buffer, dest->y_buffer,
source->y_stride, dest->y_stride, source->y_width,
source->y_height);
b = vp8_ssim2(source->u_buffer, dest->u_buffer,
b = vp9_ssim2(source->u_buffer, dest->u_buffer,
source->uv_stride, dest->uv_stride, source->uv_width,
source->uv_height);
c = vp8_ssim2(source->v_buffer, dest->v_buffer,
c = vp9_ssim2(source->v_buffer, dest->v_buffer,
source->uv_stride, dest->uv_stride, source->uv_width,
source->uv_height);
*ssim_y = a;

View File

@ -83,7 +83,7 @@ static void vp8_temporal_filter_predictors_mb_c
vp8_copy_mem8x8(vptr, stride, &pred[320], 8);
}
}
void vp8_temporal_filter_apply_c
void vp9_temporal_filter_apply_c
(
unsigned char *frame1,
unsigned int stride,
@ -185,7 +185,7 @@ static int vp8_temporal_filter_find_matching_mb_c
/*cpi->sf.search_method == HEX*/
// TODO Check that the 16x16 vf & sdf are selected here
// Ignore mv costing by sending NULL pointer instead of cost arrays
bestsme = vp8_hex_search(x, b, d, &best_ref_mv1_full, &d->bmi.as_mv.first,
bestsme = vp9_hex_search(x, b, d, &best_ref_mv1_full, &d->bmi.as_mv.first,
step_param, sadpb, &cpi->fn_ptr[BLOCK_16X16],
NULLMVCOST, NULLMVCOST,
&best_ref_mv1);
@ -410,7 +410,7 @@ static void vp8_temporal_filter_iterate_c
mbd->pre.v_buffer = v_buffer;
}
void vp8_temporal_filter_prepare_c
void vp9_temporal_filter_prepare_c
(
VP8_COMP *cpi,
int distance
@ -431,7 +431,7 @@ void vp8_temporal_filter_prepare_c
int max_frames = cpi->active_arnr_frames;
num_frames_backward = distance;
num_frames_forward = vp8_lookahead_depth(cpi->lookahead)
num_frames_forward = vp9_lookahead_depth(cpi->lookahead)
- (num_frames_backward + 1);
switch (blur_type) {
@ -503,7 +503,7 @@ void vp8_temporal_filter_prepare_c
vpx_memset(cpi->frames, 0, max_frames * sizeof(YV12_BUFFER_CONFIG *));
for (frame = 0; frame < frames_to_blur; frame++) {
int which_buffer = start_frame - frame;
struct lookahead_entry *buf = vp8_lookahead_peek(cpi->lookahead,
struct lookahead_entry *buf = vp9_lookahead_peek(cpi->lookahead,
which_buffer);
cpi->frames[frames_to_blur - 1 - frame] = &buf->img;
}

View File

@ -29,10 +29,10 @@
#include "x86/temporal_filter_x86.h"
#endif
#ifndef vp8_temporal_filter_apply
#define vp8_temporal_filter_apply vp8_temporal_filter_apply_c
#ifndef vp9_temporal_filter_apply
#define vp9_temporal_filter_apply vp9_temporal_filter_apply_c
#endif
extern prototype_apply(vp8_temporal_filter_apply);
extern prototype_apply(vp9_temporal_filter_apply);
typedef struct {
prototype_apply(*apply);

View File

@ -48,8 +48,8 @@ extern unsigned int hybrid_tree_update_hist_16x16[BLOCK_TYPES_16X16][COEF_BANDS]
[PREV_COEF_CONTEXTS][ENTROPY_NODES] [2];
#endif /* ENTROPY_STATS */
void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
void vp8_fix_contexts(MACROBLOCKD *xd);
void vp9_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
void vp9_fix_contexts(MACROBLOCKD *xd);
static TOKENVALUE dct_value_tokens[DCT_MAX_VALUE * 2];
const TOKENVALUE *vp8_dct_value_tokens_ptr;
@ -286,7 +286,7 @@ static int mb_is_skippable_16x16(MACROBLOCKD *xd) {
return (vp9_mby_is_skippable_16x16(xd) & vp9_mbuv_is_skippable_8x8(xd));
}
void vp8_tokenize_mb(VP8_COMP *cpi,
void vp9_tokenize_mb(VP8_COMP *cpi,
MACROBLOCKD *xd,
TOKENEXTRA **t,
int dry_run) {
@ -337,9 +337,9 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
if (!dry_run)
cpi->skip_true_count[mb_skip_context] += skip_inc;
if (!cpi->common.mb_no_coeff_skip) {
vp8_stuff_mb(cpi, xd, t, dry_run);
vp9_stuff_mb(cpi, xd, t, dry_run);
} else {
vp8_fix_contexts(xd);
vp9_fix_contexts(xd);
}
if (dry_run)
*t = t_backup;
@ -663,7 +663,7 @@ void print_context_counters() {
}
#endif
void vp8_tokenize_initialize() {
void vp9_tokenize_initialize() {
fill_value_tokens();
}
@ -730,7 +730,7 @@ static __inline void stuff_b(VP8_COMP *cpi,
}
}
static void vp8_stuff_mb_8x8(VP8_COMP *cpi, MACROBLOCKD *xd,
static void vp9_stuff_mb_8x8(VP8_COMP *cpi, MACROBLOCKD *xd,
TOKENEXTRA **t, int dry_run) {
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context;
@ -765,7 +765,7 @@ static void vp8_stuff_mb_8x8(VP8_COMP *cpi, MACROBLOCKD *xd,
}
}
static void vp8_stuff_mb_16x16(VP8_COMP *cpi, MACROBLOCKD *xd,
static void vp9_stuff_mb_16x16(VP8_COMP *cpi, MACROBLOCKD *xd,
TOKENEXTRA **t, int dry_run) {
ENTROPY_CONTEXT * A = (ENTROPY_CONTEXT *)xd->above_context;
ENTROPY_CONTEXT * L = (ENTROPY_CONTEXT *)xd->left_context;
@ -784,7 +784,7 @@ static void vp8_stuff_mb_16x16(VP8_COMP *cpi, MACROBLOCKD *xd,
vpx_memset(&L[8], 0, sizeof(L[8]));
}
static void vp8_stuff_mb_4x4(VP8_COMP *cpi, MACROBLOCKD *xd,
static void vp9_stuff_mb_4x4(VP8_COMP *cpi, MACROBLOCKD *xd,
TOKENEXTRA **t, int dry_run) {
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context;
@ -811,7 +811,7 @@ static void vp8_stuff_mb_4x4(VP8_COMP *cpi, MACROBLOCKD *xd,
L + vp8_block2left[b], TX_4X4, dry_run);
}
static void vp8_stuff_mb_8x8_4x4uv(VP8_COMP *cpi, MACROBLOCKD *xd,
static void vp9_stuff_mb_8x8_4x4uv(VP8_COMP *cpi, MACROBLOCKD *xd,
TOKENEXTRA **t, int dry_run) {
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context;
@ -830,21 +830,21 @@ static void vp8_stuff_mb_8x8_4x4uv(VP8_COMP *cpi, MACROBLOCKD *xd,
L + vp8_block2left[b], TX_4X4, dry_run);
}
void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run) {
void vp9_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run) {
TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size;
TOKENEXTRA * const t_backup = *t;
if (tx_size == TX_16X16) {
vp8_stuff_mb_16x16(cpi, xd, t, dry_run);
vp9_stuff_mb_16x16(cpi, xd, t, dry_run);
} else if (tx_size == TX_8X8) {
if (xd->mode_info_context->mbmi.mode == I8X8_PRED ||
xd->mode_info_context->mbmi.mode == SPLITMV) {
vp8_stuff_mb_8x8_4x4uv(cpi, xd, t, dry_run);
vp9_stuff_mb_8x8_4x4uv(cpi, xd, t, dry_run);
} else {
vp8_stuff_mb_8x8(cpi, xd, t, dry_run);
vp9_stuff_mb_8x8(cpi, xd, t, dry_run);
}
} else {
vp8_stuff_mb_4x4(cpi, xd, t, dry_run);
vp9_stuff_mb_4x4(cpi, xd, t, dry_run);
}
if (dry_run) {
@ -852,7 +852,7 @@ void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run) {
}
}
void vp8_fix_contexts(MACROBLOCKD *xd) {
void vp9_fix_contexts(MACROBLOCKD *xd) {
/* Clear entropy contexts for Y2 blocks */
if ((xd->mode_info_context->mbmi.mode != B_PRED
&& xd->mode_info_context->mbmi.mode != I8X8_PRED

View File

@ -15,7 +15,7 @@
#include "vp8/common/entropy.h"
#include "block.h"
void vp8_tokenize_initialize();
void vp9_tokenize_initialize();
typedef struct {
short Token;

View File

@ -30,10 +30,10 @@ static void cost(
cost(C, T, P, j, d);
} while (++i & 1);
}
void vp8_cost_tokens(int *c, const vp8_prob *p, vp8_tree t) {
void vp9_cost_tokens(int *c, const vp8_prob *p, vp8_tree t) {
cost(c, t, p, 0, 0);
}
void vp8_cost_tokens_skip(int *c, const vp8_prob *p, vp8_tree t) {
void vp9_cost_tokens_skip(int *c, const vp8_prob *p, vp8_tree t) {
cost(c, t, p, 2, 0);
}

View File

@ -22,7 +22,7 @@
typedef BOOL_CODER vp8_writer;
#define vp8_write vp8_encode_bool
#define vp8_write_literal vp8_encode_value
#define vp8_write_literal vp9_encode_value
#define vp8_write_bit( W, V) vp8_write( W, V, vp8_prob_half)
#define vp8bc_write vp8bc_write_bool
@ -113,10 +113,10 @@ static __inline int vp8_cost_token
/* Fill array of costs for all possible token values. */
void vp8_cost_tokens(
void vp9_cost_tokens(
int *Costs, const vp8_prob *, vp8_tree
);
void vp8_cost_tokens_skip(int *c, const vp8_prob *p, vp8_tree t);
void vp9_cost_tokens_skip(int *c, const vp8_prob *p, vp8_tree t);
#endif

View File

@ -18,7 +18,7 @@ typedef unsigned int(*vp8_sad_fn_t)(const unsigned char *src_ptr,
int ref_stride,
unsigned int max_sad);
typedef void (*vp8_copy32xn_fn_t)(const unsigned char *src_ptr,
typedef void (*vp9_copy32xn_fn_t)(const unsigned char *src_ptr,
int source_stride,
const unsigned char *ref_ptr,
int ref_stride,
@ -78,7 +78,7 @@ typedef struct variance_vtable {
vp8_sad_multi_fn_t sdx3f;
vp8_sad_multi1_fn_t sdx8f;
vp8_sad_multi_d_fn_t sdx4df;
vp8_copy32xn_fn_t copymem;
vp9_copy32xn_fn_t copymem;
} vp8_variance_fn_ptr_t;
#endif

View File

@ -13,7 +13,7 @@
#include "vp8/common/filter.h"
unsigned int vp8_get_mb_ss_c
unsigned int vp9_get_mb_ss_c
(
const short *src_ptr
) {
@ -56,7 +56,7 @@ static void variance(
}
#if CONFIG_SUPERBLOCKS
unsigned int vp8_variance32x32_c(const unsigned char *src_ptr,
unsigned int vp9_variance32x32_c(const unsigned char *src_ptr,
int source_stride,
const unsigned char *ref_ptr,
int recon_stride,
@ -70,7 +70,7 @@ unsigned int vp8_variance32x32_c(const unsigned char *src_ptr,
}
#endif
unsigned int vp8_variance16x16_c(
unsigned int vp9_variance16x16_c(
const unsigned char *src_ptr,
int source_stride,
const unsigned char *ref_ptr,
@ -85,7 +85,7 @@ unsigned int vp8_variance16x16_c(
return (var - ((avg * avg) >> 8));
}
unsigned int vp8_variance8x16_c(
unsigned int vp9_variance8x16_c(
const unsigned char *src_ptr,
int source_stride,
const unsigned char *ref_ptr,
@ -100,7 +100,7 @@ unsigned int vp8_variance8x16_c(
return (var - ((avg * avg) >> 7));
}
unsigned int vp8_variance16x8_c(
unsigned int vp9_variance16x8_c(
const unsigned char *src_ptr,
int source_stride,
const unsigned char *ref_ptr,
@ -116,7 +116,7 @@ unsigned int vp8_variance16x8_c(
}
unsigned int vp8_variance8x8_c(
unsigned int vp9_variance8x8_c(
const unsigned char *src_ptr,
int source_stride,
const unsigned char *ref_ptr,
@ -131,7 +131,7 @@ unsigned int vp8_variance8x8_c(
return (var - ((avg * avg) >> 6));
}
unsigned int vp8_variance4x4_c(
unsigned int vp9_variance4x4_c(
const unsigned char *src_ptr,
int source_stride,
const unsigned char *ref_ptr,
@ -147,7 +147,7 @@ unsigned int vp8_variance4x4_c(
}
unsigned int vp8_mse16x16_c(
unsigned int vp9_mse16x16_c(
const unsigned char *src_ptr,
int source_stride,
const unsigned char *ref_ptr,
@ -275,7 +275,7 @@ static void var_filter_block2d_bil_second_pass
}
unsigned int vp8_sub_pixel_variance4x4_c
unsigned int vp9_sub_pixel_variance4x4_c
(
const unsigned char *src_ptr,
int src_pixels_per_line,
@ -298,11 +298,11 @@ unsigned int vp8_sub_pixel_variance4x4_c
// Now filter Verticaly
var_filter_block2d_bil_second_pass(FData3, temp2, 4, 4, 4, 4, VFilter);
return vp8_variance4x4_c(temp2, 4, dst_ptr, dst_pixels_per_line, sse);
return vp9_variance4x4_c(temp2, 4, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp8_sub_pixel_variance8x8_c
unsigned int vp9_sub_pixel_variance8x8_c
(
const unsigned char *src_ptr,
int src_pixels_per_line,
@ -322,10 +322,10 @@ unsigned int vp8_sub_pixel_variance8x8_c
var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 9, 8, HFilter);
var_filter_block2d_bil_second_pass(FData3, temp2, 8, 8, 8, 8, VFilter);
return vp8_variance8x8_c(temp2, 8, dst_ptr, dst_pixels_per_line, sse);
return vp9_variance8x8_c(temp2, 8, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp8_sub_pixel_variance16x16_c
unsigned int vp9_sub_pixel_variance16x16_c
(
const unsigned char *src_ptr,
int src_pixels_per_line,
@ -345,11 +345,11 @@ unsigned int vp8_sub_pixel_variance16x16_c
var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 17, 16, HFilter);
var_filter_block2d_bil_second_pass(FData3, temp2, 16, 16, 16, 16, VFilter);
return vp8_variance16x16_c(temp2, 16, dst_ptr, dst_pixels_per_line, sse);
return vp9_variance16x16_c(temp2, 16, dst_ptr, dst_pixels_per_line, sse);
}
#if CONFIG_SUPERBLOCKS
unsigned int vp8_sub_pixel_variance32x32_c(const unsigned char *src_ptr,
unsigned int vp9_sub_pixel_variance32x32_c(const unsigned char *src_ptr,
int src_pixels_per_line,
int xoffset,
int yoffset,
@ -366,75 +366,75 @@ unsigned int vp8_sub_pixel_variance32x32_c(const unsigned char *src_ptr,
var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 33, 32, HFilter);
var_filter_block2d_bil_second_pass(FData3, temp2, 32, 32, 32, 32, VFilter);
return vp8_variance32x32_c(temp2, 32, dst_ptr, dst_pixels_per_line, sse);
return vp9_variance32x32_c(temp2, 32, dst_ptr, dst_pixels_per_line, sse);
}
#endif
unsigned int vp8_variance_halfpixvar16x16_h_c(
unsigned int vp9_variance_halfpixvar16x16_h_c(
const unsigned char *src_ptr,
int source_stride,
const unsigned char *ref_ptr,
int recon_stride,
unsigned int *sse) {
return vp8_sub_pixel_variance16x16_c(src_ptr, source_stride, 8, 0,
return vp9_sub_pixel_variance16x16_c(src_ptr, source_stride, 8, 0,
ref_ptr, recon_stride, sse);
}
#if CONFIG_SUPERBLOCKS
unsigned int vp8_variance_halfpixvar32x32_h_c(const unsigned char *src_ptr,
unsigned int vp9_variance_halfpixvar32x32_h_c(const unsigned char *src_ptr,
int source_stride,
const unsigned char *ref_ptr,
int recon_stride,
unsigned int *sse) {
return vp8_sub_pixel_variance32x32_c(src_ptr, source_stride, 8, 0,
return vp9_sub_pixel_variance32x32_c(src_ptr, source_stride, 8, 0,
ref_ptr, recon_stride, sse);
}
#endif
unsigned int vp8_variance_halfpixvar16x16_v_c(const unsigned char *src_ptr,
unsigned int vp9_variance_halfpixvar16x16_v_c(const unsigned char *src_ptr,
int source_stride,
const unsigned char *ref_ptr,
int recon_stride,
unsigned int *sse) {
return vp8_sub_pixel_variance16x16_c(src_ptr, source_stride, 0, 8,
return vp9_sub_pixel_variance16x16_c(src_ptr, source_stride, 0, 8,
ref_ptr, recon_stride, sse);
}
#if CONFIG_SUPERBLOCKS
unsigned int vp8_variance_halfpixvar32x32_v_c(
unsigned int vp9_variance_halfpixvar32x32_v_c(
const unsigned char *src_ptr,
int source_stride,
const unsigned char *ref_ptr,
int recon_stride,
unsigned int *sse) {
return vp8_sub_pixel_variance32x32_c(src_ptr, source_stride, 0, 8,
return vp9_sub_pixel_variance32x32_c(src_ptr, source_stride, 0, 8,
ref_ptr, recon_stride, sse);
}
#endif
unsigned int vp8_variance_halfpixvar16x16_hv_c(
unsigned int vp9_variance_halfpixvar16x16_hv_c(
const unsigned char *src_ptr,
int source_stride,
const unsigned char *ref_ptr,
int recon_stride,
unsigned int *sse) {
return vp8_sub_pixel_variance16x16_c(src_ptr, source_stride, 8, 8,
return vp9_sub_pixel_variance16x16_c(src_ptr, source_stride, 8, 8,
ref_ptr, recon_stride, sse);
}
#if CONFIG_SUPERBLOCKS
unsigned int vp8_variance_halfpixvar32x32_hv_c(const unsigned char *src_ptr,
unsigned int vp9_variance_halfpixvar32x32_hv_c(const unsigned char *src_ptr,
int source_stride,
const unsigned char *ref_ptr,
int recon_stride,
unsigned int *sse) {
return vp8_sub_pixel_variance32x32_c(src_ptr, source_stride, 8, 8,
return vp9_sub_pixel_variance32x32_c(src_ptr, source_stride, 8, 8,
ref_ptr, recon_stride, sse);
}
#endif
unsigned int vp8_sub_pixel_mse16x16_c
unsigned int vp9_sub_pixel_mse16x16_c
(
const unsigned char *src_ptr,
int src_pixels_per_line,
@ -444,24 +444,24 @@ unsigned int vp8_sub_pixel_mse16x16_c
int dst_pixels_per_line,
unsigned int *sse
) {
vp8_sub_pixel_variance16x16_c(src_ptr, src_pixels_per_line, xoffset, yoffset, dst_ptr, dst_pixels_per_line, sse);
vp9_sub_pixel_variance16x16_c(src_ptr, src_pixels_per_line, xoffset, yoffset, dst_ptr, dst_pixels_per_line, sse);
return *sse;
}
#if CONFIG_SUPERBLOCKS
unsigned int vp8_sub_pixel_mse32x32_c(const unsigned char *src_ptr,
unsigned int vp9_sub_pixel_mse32x32_c(const unsigned char *src_ptr,
int src_pixels_per_line,
int xoffset,
int yoffset,
const unsigned char *dst_ptr,
int dst_pixels_per_line,
unsigned int *sse) {
vp8_sub_pixel_variance32x32_c(src_ptr, src_pixels_per_line, xoffset, yoffset, dst_ptr, dst_pixels_per_line, sse);
vp9_sub_pixel_variance32x32_c(src_ptr, src_pixels_per_line, xoffset, yoffset, dst_ptr, dst_pixels_per_line, sse);
return *sse;
}
#endif
unsigned int vp8_sub_pixel_variance16x8_c
unsigned int vp9_sub_pixel_variance16x8_c
(
const unsigned char *src_ptr,
int src_pixels_per_line,
@ -481,10 +481,10 @@ unsigned int vp8_sub_pixel_variance16x8_c
var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 9, 16, HFilter);
var_filter_block2d_bil_second_pass(FData3, temp2, 16, 16, 8, 16, VFilter);
return vp8_variance16x8_c(temp2, 16, dst_ptr, dst_pixels_per_line, sse);
return vp9_variance16x8_c(temp2, 16, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp8_sub_pixel_variance8x16_c
unsigned int vp9_sub_pixel_variance8x16_c
(
const unsigned char *src_ptr,
int src_pixels_per_line,
@ -506,7 +506,7 @@ unsigned int vp8_sub_pixel_variance8x16_c
var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 17, 8, HFilter);
var_filter_block2d_bil_second_pass(FData3, temp2, 8, 8, 16, 8, VFilter);
return vp8_variance8x16_c(temp2, 8, dst_ptr, dst_pixels_per_line, sse);
return vp9_variance8x16_c(temp2, 8, dst_ptr, dst_pixels_per_line, sse);
}
#if CONFIG_NEWBESTREFMV
unsigned int vp8_variance2x16_c(

View File

@ -11,9 +11,9 @@
%include "vpx_ports/x86_abi_support.asm"
;void vp8_short_fdct4x4_mmx(short *input, short *output, int pitch)
global sym(vp8_short_fdct4x4_mmx)
sym(vp8_short_fdct4x4_mmx):
;void vp9_short_fdct4x4_mmx(short *input, short *output, int pitch)
global sym(vp9_short_fdct4x4_mmx)
sym(vp9_short_fdct4x4_mmx):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 3

View File

@ -60,9 +60,9 @@
ret
%endmacro
;void vp8_short_fdct4x4_sse2(short *input, short *output, int pitch)
global sym(vp8_short_fdct4x4_sse2)
sym(vp8_short_fdct4x4_sse2):
;void vp9_short_fdct4x4_sse2(short *input, short *output, int pitch)
global sym(vp9_short_fdct4x4_sse2)
sym(vp9_short_fdct4x4_sse2):
STACK_FRAME_CREATE
@ -165,9 +165,9 @@ sym(vp8_short_fdct4x4_sse2):
STACK_FRAME_DESTROY
;void vp8_short_fdct8x4_sse2(short *input, short *output, int pitch)
global sym(vp8_short_fdct8x4_sse2)
sym(vp8_short_fdct8x4_sse2):
;void vp9_short_fdct8x4_sse2(short *input, short *output, int pitch)
global sym(vp9_short_fdct8x4_sse2)
sym(vp9_short_fdct8x4_sse2):
STACK_FRAME_CREATE

View File

@ -11,9 +11,9 @@
%include "vpx_ports/x86_abi_support.asm"
;int vp8_block_error_xmm(short *coeff_ptr, short *dcoef_ptr)
global sym(vp8_block_error_xmm)
sym(vp8_block_error_xmm):
;int vp9_block_error_xmm(short *coeff_ptr, short *dcoef_ptr)
global sym(vp9_block_error_xmm)
sym(vp9_block_error_xmm):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 2
@ -59,9 +59,9 @@ sym(vp8_block_error_xmm):
pop rbp
ret
;int vp8_block_error_mmx(short *coeff_ptr, short *dcoef_ptr)
global sym(vp8_block_error_mmx)
sym(vp8_block_error_mmx):
;int vp9_block_error_mmx(short *coeff_ptr, short *dcoef_ptr)
global sym(vp9_block_error_mmx)
sym(vp9_block_error_mmx):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 2
@ -125,9 +125,9 @@ sym(vp8_block_error_mmx):
ret
;int vp8_mbblock_error_mmx_impl(short *coeff_ptr, short *dcoef_ptr, int dc);
global sym(vp8_mbblock_error_mmx_impl)
sym(vp8_mbblock_error_mmx_impl):
;int vp9_mbblock_error_mmx_impl(short *coeff_ptr, short *dcoef_ptr, int dc);
global sym(vp9_mbblock_error_mmx_impl)
sym(vp9_mbblock_error_mmx_impl):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 3
@ -202,9 +202,9 @@ sym(vp8_mbblock_error_mmx_impl):
ret
;int vp8_mbblock_error_xmm_impl(short *coeff_ptr, short *dcoef_ptr, int dc);
global sym(vp8_mbblock_error_xmm_impl)
sym(vp8_mbblock_error_xmm_impl):
;int vp9_mbblock_error_xmm_impl(short *coeff_ptr, short *dcoef_ptr, int dc);
global sym(vp9_mbblock_error_xmm_impl)
sym(vp9_mbblock_error_xmm_impl):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 3
@ -272,9 +272,9 @@ sym(vp8_mbblock_error_xmm_impl):
ret
;int vp8_mbuverror_mmx_impl(short *s_ptr, short *d_ptr);
global sym(vp8_mbuverror_mmx_impl)
sym(vp8_mbuverror_mmx_impl):
;int vp9_mbuverror_mmx_impl(short *s_ptr, short *d_ptr);
global sym(vp9_mbuverror_mmx_impl)
sym(vp9_mbuverror_mmx_impl):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 2
@ -329,9 +329,9 @@ sym(vp8_mbuverror_mmx_impl):
ret
;int vp8_mbuverror_xmm_impl(short *s_ptr, short *d_ptr);
global sym(vp8_mbuverror_xmm_impl)
sym(vp8_mbuverror_xmm_impl):
;int vp9_mbuverror_xmm_impl(short *s_ptr, short *d_ptr);
global sym(vp9_mbuverror_xmm_impl)
sym(vp9_mbuverror_xmm_impl):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 2

View File

@ -11,9 +11,9 @@
%include "vpx_ports/x86_abi_support.asm"
;void vp8_short_walsh4x4_sse2(short *input, short *output, int pitch)
global sym(vp8_short_walsh4x4_sse2)
sym(vp8_short_walsh4x4_sse2):
;void vp9_short_walsh4x4_sse2(short *input, short *output, int pitch)
global sym(vp9_short_walsh4x4_sse2)
sym(vp9_short_walsh4x4_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 3

View File

@ -16,13 +16,13 @@
#if !CONFIG_RUNTIME_CPU_DETECT
#undef vp8_search_full_search
#define vp8_search_full_search vp8_full_search_sadx3
#define vp8_search_full_search vp9_full_search_sadx3
#undef vp8_search_refining_search
#define vp8_search_refining_search vp8_refining_search_sadx4
#define vp8_search_refining_search vp9_refining_search_sadx4
#undef vp8_search_diamond_search
#define vp8_search_diamond_search vp8_diamond_search_sadx4
#define vp8_search_diamond_search vp9_diamond_search_sadx4
#endif
#endif
@ -31,7 +31,7 @@
#if !CONFIG_RUNTIME_CPU_DETECT
#undef vp8_search_full_search
#define vp8_search_full_search vp8_full_search_sadx8
#define vp8_search_full_search vp9_full_search_sadx8
#endif
#endif

View File

@ -11,12 +11,12 @@
%include "vpx_ports/x86_abi_support.asm"
;int vp8_fast_quantize_b_impl_mmx(short *coeff_ptr, short *zbin_ptr,
;int vp9_fast_quantize_b_impl_mmx(short *coeff_ptr, short *zbin_ptr,
; short *qcoeff_ptr,short *dequant_ptr,
; short *scan_mask, short *round_ptr,
; short *quant_ptr, short *dqcoeff_ptr);
global sym(vp8_fast_quantize_b_impl_mmx)
sym(vp8_fast_quantize_b_impl_mmx):
global sym(vp9_fast_quantize_b_impl_mmx)
sym(vp9_fast_quantize_b_impl_mmx):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 8

View File

@ -12,12 +12,12 @@
%include "asm_enc_offsets.asm"
; void vp8_regular_quantize_b_sse2 | arg
; void vp9_regular_quantize_b_sse2 | arg
; (BLOCK *b, | 0
; BLOCKD *d) | 1
global sym(vp8_regular_quantize_b_sse2)
sym(vp8_regular_quantize_b_sse2):
global sym(vp9_regular_quantize_b_sse2)
sym(vp9_regular_quantize_b_sse2):
push rbp
mov rbp, rsp
SAVE_XMM 7
@ -233,12 +233,12 @@ ZIGZAG_LOOP 15
pop rbp
ret
; void vp8_fast_quantize_b_sse2 | arg
; void vp9_fast_quantize_b_sse2 | arg
; (BLOCK *b, | 0
; BLOCKD *d) | 1
global sym(vp8_fast_quantize_b_sse2)
sym(vp8_fast_quantize_b_sse2):
global sym(vp9_fast_quantize_b_sse2)
sym(vp9_fast_quantize_b_sse2):
push rbp
mov rbp, rsp
GET_GOT rbx

View File

@ -12,12 +12,12 @@
%include "asm_enc_offsets.asm"
; void vp8_regular_quantize_b_sse4 | arg
; void vp9_regular_quantize_b_sse4 | arg
; (BLOCK *b, | 0
; BLOCKD *d) | 1
global sym(vp8_regular_quantize_b_sse4)
sym(vp8_regular_quantize_b_sse4):
global sym(vp9_regular_quantize_b_sse4)
sym(vp9_regular_quantize_b_sse4):
%if ABI_IS_32BIT
push rbp

View File

@ -12,13 +12,13 @@
%include "asm_enc_offsets.asm"
; void vp8_fast_quantize_b_ssse3 | arg
; void vp9_fast_quantize_b_ssse3 | arg
; (BLOCK *b, | 0
; BLOCKD *d) | 1
;
global sym(vp8_fast_quantize_b_ssse3)
sym(vp8_fast_quantize_b_ssse3):
global sym(vp9_fast_quantize_b_ssse3)
sym(vp9_fast_quantize_b_ssse3):
push rbp
mov rbp, rsp
GET_GOT rbx

View File

@ -23,23 +23,23 @@
#if HAVE_SSE2
extern prototype_quantize_block(vp8_regular_quantize_b_sse2);
extern prototype_quantize_block(vp9_regular_quantize_b_sse2);
#if !CONFIG_RUNTIME_CPU_DETECT
#undef vp8_quantize_quantb
#define vp8_quantize_quantb vp8_regular_quantize_b_sse2
#define vp8_quantize_quantb vp9_regular_quantize_b_sse2
#endif /* !CONFIG_RUNTIME_CPU_DETECT */
#endif /* HAVE_SSE2 */
#if HAVE_SSE4_1
extern prototype_quantize_block(vp8_regular_quantize_b_sse4);
extern prototype_quantize_block(vp9_regular_quantize_b_sse4);
#if !CONFIG_RUNTIME_CPU_DETECT
#undef vp8_quantize_quantb
#define vp8_quantize_quantb vp8_regular_quantize_b_sse4
#define vp8_quantize_quantb vp9_regular_quantize_b_sse4
#endif /* !CONFIG_RUNTIME_CPU_DETECT */

View File

@ -11,18 +11,18 @@
%include "vpx_ports/x86_abi_support.asm"
global sym(vp8_sad16x16_mmx)
global sym(vp8_sad8x16_mmx)
global sym(vp8_sad8x8_mmx)
global sym(vp8_sad4x4_mmx)
global sym(vp8_sad16x8_mmx)
global sym(vp9_sad16x16_mmx)
global sym(vp9_sad8x16_mmx)
global sym(vp9_sad8x8_mmx)
global sym(vp9_sad4x4_mmx)
global sym(vp9_sad16x8_mmx)
;unsigned int vp8_sad16x16_mmx(
;unsigned int vp9_sad16x16_mmx(
; unsigned char *src_ptr,
; int src_stride,
; unsigned char *ref_ptr,
; int ref_stride)
sym(vp8_sad16x16_mmx):
sym(vp9_sad16x16_mmx):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 4
@ -109,12 +109,12 @@ sym(vp8_sad16x16_mmx):
ret
;unsigned int vp8_sad8x16_mmx(
;unsigned int vp9_sad8x16_mmx(
; unsigned char *src_ptr,
; int src_stride,
; unsigned char *ref_ptr,
; int ref_stride)
sym(vp8_sad8x16_mmx):
sym(vp9_sad8x16_mmx):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 4
@ -181,12 +181,12 @@ sym(vp8_sad8x16_mmx):
ret
;unsigned int vp8_sad8x8_mmx(
;unsigned int vp9_sad8x8_mmx(
; unsigned char *src_ptr,
; int src_stride,
; unsigned char *ref_ptr,
; int ref_stride)
sym(vp8_sad8x8_mmx):
sym(vp9_sad8x8_mmx):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 4
@ -251,12 +251,12 @@ sym(vp8_sad8x8_mmx):
ret
;unsigned int vp8_sad4x4_mmx(
;unsigned int vp9_sad4x4_mmx(
; unsigned char *src_ptr,
; int src_stride,
; unsigned char *ref_ptr,
; int ref_stride)
sym(vp8_sad4x4_mmx):
sym(vp9_sad4x4_mmx):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 4
@ -340,12 +340,12 @@ sym(vp8_sad4x4_mmx):
ret
;unsigned int vp8_sad16x8_mmx(
;unsigned int vp9_sad16x8_mmx(
; unsigned char *src_ptr,
; int src_stride,
; unsigned char *ref_ptr,
; int ref_stride)
sym(vp8_sad16x8_mmx):
sym(vp9_sad16x8_mmx):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 4

View File

@ -11,13 +11,13 @@
%include "vpx_ports/x86_abi_support.asm"
;unsigned int vp8_sad16x16_wmt(
;unsigned int vp9_sad16x16_wmt(
; unsigned char *src_ptr,
; int src_stride,
; unsigned char *ref_ptr,
; int ref_stride)
global sym(vp8_sad16x16_wmt)
sym(vp8_sad16x16_wmt):
global sym(vp9_sad16x16_wmt)
sym(vp9_sad16x16_wmt):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 4
@ -84,14 +84,14 @@ sym(vp8_sad16x16_wmt):
pop rbp
ret
;unsigned int vp8_sad8x16_wmt(
;unsigned int vp9_sad8x16_wmt(
; unsigned char *src_ptr,
; int src_stride,
; unsigned char *ref_ptr,
; int ref_stride,
; int max_err)
global sym(vp8_sad8x16_wmt)
sym(vp8_sad8x16_wmt):
global sym(vp9_sad8x16_wmt)
sym(vp9_sad8x16_wmt):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 5
@ -148,13 +148,13 @@ sym(vp8_sad8x16_wmt):
ret
;unsigned int vp8_sad8x8_wmt(
;unsigned int vp9_sad8x8_wmt(
; unsigned char *src_ptr,
; int src_stride,
; unsigned char *ref_ptr,
; int ref_stride)
global sym(vp8_sad8x8_wmt)
sym(vp8_sad8x8_wmt):
global sym(vp9_sad8x8_wmt)
sym(vp9_sad8x8_wmt):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 5
@ -201,13 +201,13 @@ sym(vp8_sad8x8_wmt):
pop rbp
ret
;unsigned int vp8_sad4x4_wmt(
;unsigned int vp9_sad4x4_wmt(
; unsigned char *src_ptr,
; int src_stride,
; unsigned char *ref_ptr,
; int ref_stride)
global sym(vp8_sad4x4_wmt)
sym(vp8_sad4x4_wmt):
global sym(vp9_sad4x4_wmt)
sym(vp9_sad4x4_wmt):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 4
@ -256,13 +256,13 @@ sym(vp8_sad4x4_wmt):
ret
;unsigned int vp8_sad16x8_wmt(
;unsigned int vp9_sad16x8_wmt(
; unsigned char *src_ptr,
; int src_stride,
; unsigned char *ref_ptr,
; int ref_stride)
global sym(vp8_sad16x8_wmt)
sym(vp8_sad16x8_wmt):
global sym(vp9_sad16x8_wmt)
sym(vp9_sad16x8_wmt):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 5
@ -329,14 +329,14 @@ sym(vp8_sad16x8_wmt):
pop rbp
ret
;void vp8_copy32xn_sse2(
;void vp9_copy32xn_sse2(
; unsigned char *src_ptr,
; int src_stride,
; unsigned char *dst_ptr,
; int dst_stride,
; int height);
global sym(vp8_copy32xn_sse2)
sym(vp8_copy32xn_sse2):
global sym(vp9_copy32xn_sse2)
sym(vp9_copy32xn_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 5

View File

@ -374,14 +374,14 @@
%endmacro
;void int vp8_sad16x16x3_sse3(
;void int vp9_sad16x16x3_sse3(
; unsigned char *src_ptr,
; int src_stride,
; unsigned char *ref_ptr,
; int ref_stride,
; int *results)
global sym(vp8_sad16x16x3_sse3)
sym(vp8_sad16x16x3_sse3):
global sym(vp9_sad16x16x3_sse3)
sym(vp9_sad16x16x3_sse3):
STACK_FRAME_CREATE_X3
@ -416,14 +416,14 @@ sym(vp8_sad16x16x3_sse3):
STACK_FRAME_DESTROY_X3
;void int vp8_sad16x8x3_sse3(
;void int vp9_sad16x8x3_sse3(
; unsigned char *src_ptr,
; int src_stride,
; unsigned char *ref_ptr,
; int ref_stride,
; int *results)
global sym(vp8_sad16x8x3_sse3)
sym(vp8_sad16x8x3_sse3):
global sym(vp9_sad16x8x3_sse3)
sym(vp9_sad16x8x3_sse3):
STACK_FRAME_CREATE_X3
@ -454,14 +454,14 @@ sym(vp8_sad16x8x3_sse3):
STACK_FRAME_DESTROY_X3
;void int vp8_sad8x16x3_sse3(
;void int vp9_sad8x16x3_sse3(
; unsigned char *src_ptr,
; int src_stride,
; unsigned char *ref_ptr,
; int ref_stride,
; int *results)
global sym(vp8_sad8x16x3_sse3)
sym(vp8_sad8x16x3_sse3):
global sym(vp9_sad8x16x3_sse3)
sym(vp9_sad8x16x3_sse3):
STACK_FRAME_CREATE_X3
@ -483,14 +483,14 @@ sym(vp8_sad8x16x3_sse3):
STACK_FRAME_DESTROY_X3
;void int vp8_sad8x8x3_sse3(
;void int vp9_sad8x8x3_sse3(
; unsigned char *src_ptr,
; int src_stride,
; unsigned char *ref_ptr,
; int ref_stride,
; int *results)
global sym(vp8_sad8x8x3_sse3)
sym(vp8_sad8x8x3_sse3):
global sym(vp9_sad8x8x3_sse3)
sym(vp9_sad8x8x3_sse3):
STACK_FRAME_CREATE_X3
@ -508,14 +508,14 @@ sym(vp8_sad8x8x3_sse3):
STACK_FRAME_DESTROY_X3
;void int vp8_sad4x4x3_sse3(
;void int vp9_sad4x4x3_sse3(
; unsigned char *src_ptr,
; int src_stride,
; unsigned char *ref_ptr,
; int ref_stride,
; int *results)
global sym(vp8_sad4x4x3_sse3)
sym(vp8_sad4x4x3_sse3):
global sym(vp9_sad4x4x3_sse3)
sym(vp9_sad4x4x3_sse3):
STACK_FRAME_CREATE_X3
@ -582,22 +582,22 @@ sym(vp8_sad4x4x3_sse3):
STACK_FRAME_DESTROY_X3
;unsigned int vp8_sad16x16_sse3(
;unsigned int vp9_sad16x16_sse3(
; unsigned char *src_ptr,
; int src_stride,
; unsigned char *ref_ptr,
; int ref_stride,
; int max_err)
;%define lddqu movdqu
global sym(vp8_sad16x16_sse3)
sym(vp8_sad16x16_sse3):
global sym(vp9_sad16x16_sse3)
sym(vp9_sad16x16_sse3):
STACK_FRAME_CREATE_X3
mov end_ptr, 4
pxor xmm7, xmm7
.vp8_sad16x16_sse3_loop:
.vp9_sad16x16_sse3_loop:
movdqa xmm0, XMMWORD PTR [src_ptr]
movdqu xmm1, XMMWORD PTR [ref_ptr]
movdqa xmm2, XMMWORD PTR [src_ptr+src_stride]
@ -627,7 +627,7 @@ sym(vp8_sad16x16_sse3):
paddw xmm7, xmm6
sub end_ptr, 1
jne .vp8_sad16x16_sse3_loop
jne .vp9_sad16x16_sse3_loop
movq xmm0, xmm7
psrldq xmm7, 8
@ -636,14 +636,14 @@ sym(vp8_sad16x16_sse3):
STACK_FRAME_DESTROY_X3
;void vp8_copy32xn_sse3(
;void vp9_copy32xn_sse3(
; unsigned char *src_ptr,
; int src_stride,
; unsigned char *dst_ptr,
; int dst_stride,
; int height);
global sym(vp8_copy32xn_sse3)
sym(vp8_copy32xn_sse3):
global sym(vp9_copy32xn_sse3)
sym(vp9_copy32xn_sse3):
STACK_FRAME_CREATE_X3
@ -697,14 +697,14 @@ sym(vp8_copy32xn_sse3):
.copy_is_done:
STACK_FRAME_DESTROY_X3
;void vp8_sad16x16x4d_sse3(
;void vp9_sad16x16x4d_sse3(
; unsigned char *src_ptr,
; int src_stride,
; unsigned char *ref_ptr_base,
; int ref_stride,
; int *results)
global sym(vp8_sad16x16x4d_sse3)
sym(vp8_sad16x16x4d_sse3):
global sym(vp9_sad16x16x4d_sse3)
sym(vp9_sad16x16x4d_sse3):
STACK_FRAME_CREATE_X4
@ -748,14 +748,14 @@ sym(vp8_sad16x16x4d_sse3):
STACK_FRAME_DESTROY_X4
;void vp8_sad16x8x4d_sse3(
;void vp9_sad16x8x4d_sse3(
; unsigned char *src_ptr,
; int src_stride,
; unsigned char *ref_ptr_base,
; int ref_stride,
; int *results)
global sym(vp8_sad16x8x4d_sse3)
sym(vp8_sad16x8x4d_sse3):
global sym(vp9_sad16x8x4d_sse3)
sym(vp9_sad16x8x4d_sse3):
STACK_FRAME_CREATE_X4
@ -795,14 +795,14 @@ sym(vp8_sad16x8x4d_sse3):
STACK_FRAME_DESTROY_X4
;void int vp8_sad8x16x4d_sse3(
;void int vp9_sad8x16x4d_sse3(
; unsigned char *src_ptr,
; int src_stride,
; unsigned char *ref_ptr,
; int ref_stride,
; int *results)
global sym(vp8_sad8x16x4d_sse3)
sym(vp8_sad8x16x4d_sse3):
global sym(vp9_sad8x16x4d_sse3)
sym(vp9_sad8x16x4d_sse3):
STACK_FRAME_CREATE_X4
@ -828,14 +828,14 @@ sym(vp8_sad8x16x4d_sse3):
STACK_FRAME_DESTROY_X4
;void int vp8_sad8x8x4d_sse3(
;void int vp9_sad8x8x4d_sse3(
; unsigned char *src_ptr,
; int src_stride,
; unsigned char *ref_ptr,
; int ref_stride,
; int *results)
global sym(vp8_sad8x8x4d_sse3)
sym(vp8_sad8x8x4d_sse3):
global sym(vp9_sad8x8x4d_sse3)
sym(vp9_sad8x8x4d_sse3):
STACK_FRAME_CREATE_X4
@ -857,14 +857,14 @@ sym(vp8_sad8x8x4d_sse3):
STACK_FRAME_DESTROY_X4
;void int vp8_sad4x4x4d_sse3(
;void int vp9_sad4x4x4d_sse3(
; unsigned char *src_ptr,
; int src_stride,
; unsigned char *ref_ptr,
; int ref_stride,
; int *results)
global sym(vp8_sad4x4x4d_sse3)
sym(vp8_sad4x4x4d_sse3):
global sym(vp9_sad4x4x4d_sse3)
sym(vp9_sad4x4x4d_sse3):
STACK_FRAME_CREATE_X4

View File

@ -155,14 +155,14 @@
%endmacro
;void vp8_sad16x16x8_sse4(
;void vp9_sad16x16x8_sse4(
; const unsigned char *src_ptr,
; int src_stride,
; const unsigned char *ref_ptr,
; int ref_stride,
; unsigned short *sad_array);
global sym(vp8_sad16x16x8_sse4)
sym(vp8_sad16x16x8_sse4):
global sym(vp9_sad16x16x8_sse4)
sym(vp9_sad16x16x8_sse4):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 5
@ -196,15 +196,15 @@ sym(vp8_sad16x16x8_sse4):
ret
;void vp8_sad16x8x8_sse4(
;void vp9_sad16x8x8_sse4(
; const unsigned char *src_ptr,
; int src_stride,
; const unsigned char *ref_ptr,
; int ref_stride,
; unsigned short *sad_array
;);
global sym(vp8_sad16x8x8_sse4)
sym(vp8_sad16x8x8_sse4):
global sym(vp9_sad16x8x8_sse4)
sym(vp9_sad16x8x8_sse4):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 5
@ -234,15 +234,15 @@ sym(vp8_sad16x8x8_sse4):
ret
;void vp8_sad8x8x8_sse4(
;void vp9_sad8x8x8_sse4(
; const unsigned char *src_ptr,
; int src_stride,
; const unsigned char *ref_ptr,
; int ref_stride,
; unsigned short *sad_array
;);
global sym(vp8_sad8x8x8_sse4)
sym(vp8_sad8x8x8_sse4):
global sym(vp9_sad8x8x8_sse4)
sym(vp9_sad8x8x8_sse4):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 5
@ -272,15 +272,15 @@ sym(vp8_sad8x8x8_sse4):
ret
;void vp8_sad8x16x8_sse4(
;void vp9_sad8x16x8_sse4(
; const unsigned char *src_ptr,
; int src_stride,
; const unsigned char *ref_ptr,
; int ref_stride,
; unsigned short *sad_array
;);
global sym(vp8_sad8x16x8_sse4)
sym(vp8_sad8x16x8_sse4):
global sym(vp9_sad8x16x8_sse4)
sym(vp9_sad8x16x8_sse4):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 5
@ -313,15 +313,15 @@ sym(vp8_sad8x16x8_sse4):
ret
;void vp8_sad4x4x8_c(
;void vp9_sad4x4x8_c(
; const unsigned char *src_ptr,
; int src_stride,
; const unsigned char *ref_ptr,
; int ref_stride,
; unsigned short *sad_array
;);
global sym(vp8_sad4x4x8_sse4)
sym(vp8_sad4x4x8_sse4):
global sym(vp9_sad4x4x8_sse4)
sym(vp9_sad4x4x8_sse4):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 5

View File

@ -146,14 +146,14 @@
%endmacro
;void int vp8_sad16x16x3_ssse3(
;void int vp9_sad16x16x3_ssse3(
; unsigned char *src_ptr,
; int src_stride,
; unsigned char *ref_ptr,
; int ref_stride,
; int *results)
global sym(vp8_sad16x16x3_ssse3)
sym(vp8_sad16x16x3_ssse3):
global sym(vp9_sad16x16x3_ssse3)
sym(vp9_sad16x16x3_ssse3):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 5
@ -169,31 +169,31 @@ sym(vp8_sad16x16x3_ssse3):
mov rdx, 0xf
and rdx, rdi
jmp .vp8_sad16x16x3_ssse3_skiptable
.vp8_sad16x16x3_ssse3_jumptable:
dd .vp8_sad16x16x3_ssse3_aligned_by_0 - .vp8_sad16x16x3_ssse3_do_jump
dd .vp8_sad16x16x3_ssse3_aligned_by_1 - .vp8_sad16x16x3_ssse3_do_jump
dd .vp8_sad16x16x3_ssse3_aligned_by_2 - .vp8_sad16x16x3_ssse3_do_jump
dd .vp8_sad16x16x3_ssse3_aligned_by_3 - .vp8_sad16x16x3_ssse3_do_jump
dd .vp8_sad16x16x3_ssse3_aligned_by_4 - .vp8_sad16x16x3_ssse3_do_jump
dd .vp8_sad16x16x3_ssse3_aligned_by_5 - .vp8_sad16x16x3_ssse3_do_jump
dd .vp8_sad16x16x3_ssse3_aligned_by_6 - .vp8_sad16x16x3_ssse3_do_jump
dd .vp8_sad16x16x3_ssse3_aligned_by_7 - .vp8_sad16x16x3_ssse3_do_jump
dd .vp8_sad16x16x3_ssse3_aligned_by_8 - .vp8_sad16x16x3_ssse3_do_jump
dd .vp8_sad16x16x3_ssse3_aligned_by_9 - .vp8_sad16x16x3_ssse3_do_jump
dd .vp8_sad16x16x3_ssse3_aligned_by_10 - .vp8_sad16x16x3_ssse3_do_jump
dd .vp8_sad16x16x3_ssse3_aligned_by_11 - .vp8_sad16x16x3_ssse3_do_jump
dd .vp8_sad16x16x3_ssse3_aligned_by_12 - .vp8_sad16x16x3_ssse3_do_jump
dd .vp8_sad16x16x3_ssse3_aligned_by_13 - .vp8_sad16x16x3_ssse3_do_jump
dd .vp8_sad16x16x3_ssse3_aligned_by_14 - .vp8_sad16x16x3_ssse3_do_jump
dd .vp8_sad16x16x3_ssse3_aligned_by_15 - .vp8_sad16x16x3_ssse3_do_jump
.vp8_sad16x16x3_ssse3_skiptable:
jmp .vp9_sad16x16x3_ssse3_skiptable
.vp9_sad16x16x3_ssse3_jumptable:
dd .vp9_sad16x16x3_ssse3_aligned_by_0 - .vp9_sad16x16x3_ssse3_do_jump
dd .vp9_sad16x16x3_ssse3_aligned_by_1 - .vp9_sad16x16x3_ssse3_do_jump
dd .vp9_sad16x16x3_ssse3_aligned_by_2 - .vp9_sad16x16x3_ssse3_do_jump
dd .vp9_sad16x16x3_ssse3_aligned_by_3 - .vp9_sad16x16x3_ssse3_do_jump
dd .vp9_sad16x16x3_ssse3_aligned_by_4 - .vp9_sad16x16x3_ssse3_do_jump
dd .vp9_sad16x16x3_ssse3_aligned_by_5 - .vp9_sad16x16x3_ssse3_do_jump
dd .vp9_sad16x16x3_ssse3_aligned_by_6 - .vp9_sad16x16x3_ssse3_do_jump
dd .vp9_sad16x16x3_ssse3_aligned_by_7 - .vp9_sad16x16x3_ssse3_do_jump
dd .vp9_sad16x16x3_ssse3_aligned_by_8 - .vp9_sad16x16x3_ssse3_do_jump
dd .vp9_sad16x16x3_ssse3_aligned_by_9 - .vp9_sad16x16x3_ssse3_do_jump
dd .vp9_sad16x16x3_ssse3_aligned_by_10 - .vp9_sad16x16x3_ssse3_do_jump
dd .vp9_sad16x16x3_ssse3_aligned_by_11 - .vp9_sad16x16x3_ssse3_do_jump
dd .vp9_sad16x16x3_ssse3_aligned_by_12 - .vp9_sad16x16x3_ssse3_do_jump
dd .vp9_sad16x16x3_ssse3_aligned_by_13 - .vp9_sad16x16x3_ssse3_do_jump
dd .vp9_sad16x16x3_ssse3_aligned_by_14 - .vp9_sad16x16x3_ssse3_do_jump
dd .vp9_sad16x16x3_ssse3_aligned_by_15 - .vp9_sad16x16x3_ssse3_do_jump
.vp9_sad16x16x3_ssse3_skiptable:
call .vp8_sad16x16x3_ssse3_do_jump
.vp8_sad16x16x3_ssse3_do_jump:
call .vp9_sad16x16x3_ssse3_do_jump
.vp9_sad16x16x3_ssse3_do_jump:
pop rcx ; get the address of do_jump
mov rax, .vp8_sad16x16x3_ssse3_jumptable - .vp8_sad16x16x3_ssse3_do_jump
add rax, rcx ; get the absolute address of vp8_sad16x16x3_ssse3_jumptable
mov rax, .vp9_sad16x16x3_ssse3_jumptable - .vp9_sad16x16x3_ssse3_do_jump
add rax, rcx ; get the absolute address of vp9_sad16x16x3_ssse3_jumptable
movsxd rax, dword [rax + 4*rdx] ; get the 32 bit offset from the jumptable
add rcx, rax
@ -203,23 +203,23 @@ sym(vp8_sad16x16x3_ssse3):
jmp rcx
PROCESS_16X16X3_OFFSET 0, .vp8_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 1, .vp8_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 2, .vp8_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 3, .vp8_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 4, .vp8_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 5, .vp8_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 6, .vp8_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 7, .vp8_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 8, .vp8_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 9, .vp8_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 10, .vp8_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 11, .vp8_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 12, .vp8_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 13, .vp8_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 14, .vp8_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 0, .vp9_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 1, .vp9_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 2, .vp9_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 3, .vp9_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 4, .vp9_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 5, .vp9_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 6, .vp9_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 7, .vp9_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 8, .vp9_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 9, .vp9_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 10, .vp9_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 11, .vp9_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 12, .vp9_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 13, .vp9_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 14, .vp9_sad16x16x3_ssse3
.vp8_sad16x16x3_ssse3_aligned_by_15:
.vp9_sad16x16x3_ssse3_aligned_by_15:
PROCESS_16X2X3 1
PROCESS_16X2X3 0
PROCESS_16X2X3 0
@ -229,7 +229,7 @@ sym(vp8_sad16x16x3_ssse3):
PROCESS_16X2X3 0
PROCESS_16X2X3 0
.vp8_sad16x16x3_ssse3_store_off:
.vp9_sad16x16x3_ssse3_store_off:
mov rdi, arg(4) ;Results
movq xmm0, xmm5
@ -259,14 +259,14 @@ sym(vp8_sad16x16x3_ssse3):
pop rbp
ret
;void int vp8_sad16x8x3_ssse3(
;void int vp9_sad16x8x3_ssse3(
; unsigned char *src_ptr,
; int src_stride,
; unsigned char *ref_ptr,
; int ref_stride,
; int *results)
global sym(vp8_sad16x8x3_ssse3)
sym(vp8_sad16x8x3_ssse3):
global sym(vp9_sad16x8x3_ssse3)
sym(vp9_sad16x8x3_ssse3):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 5
@ -282,31 +282,31 @@ sym(vp8_sad16x8x3_ssse3):
mov rdx, 0xf
and rdx, rdi
jmp .vp8_sad16x8x3_ssse3_skiptable
.vp8_sad16x8x3_ssse3_jumptable:
dd .vp8_sad16x8x3_ssse3_aligned_by_0 - .vp8_sad16x8x3_ssse3_do_jump
dd .vp8_sad16x8x3_ssse3_aligned_by_1 - .vp8_sad16x8x3_ssse3_do_jump
dd .vp8_sad16x8x3_ssse3_aligned_by_2 - .vp8_sad16x8x3_ssse3_do_jump
dd .vp8_sad16x8x3_ssse3_aligned_by_3 - .vp8_sad16x8x3_ssse3_do_jump
dd .vp8_sad16x8x3_ssse3_aligned_by_4 - .vp8_sad16x8x3_ssse3_do_jump
dd .vp8_sad16x8x3_ssse3_aligned_by_5 - .vp8_sad16x8x3_ssse3_do_jump
dd .vp8_sad16x8x3_ssse3_aligned_by_6 - .vp8_sad16x8x3_ssse3_do_jump
dd .vp8_sad16x8x3_ssse3_aligned_by_7 - .vp8_sad16x8x3_ssse3_do_jump
dd .vp8_sad16x8x3_ssse3_aligned_by_8 - .vp8_sad16x8x3_ssse3_do_jump
dd .vp8_sad16x8x3_ssse3_aligned_by_9 - .vp8_sad16x8x3_ssse3_do_jump
dd .vp8_sad16x8x3_ssse3_aligned_by_10 - .vp8_sad16x8x3_ssse3_do_jump
dd .vp8_sad16x8x3_ssse3_aligned_by_11 - .vp8_sad16x8x3_ssse3_do_jump
dd .vp8_sad16x8x3_ssse3_aligned_by_12 - .vp8_sad16x8x3_ssse3_do_jump
dd .vp8_sad16x8x3_ssse3_aligned_by_13 - .vp8_sad16x8x3_ssse3_do_jump
dd .vp8_sad16x8x3_ssse3_aligned_by_14 - .vp8_sad16x8x3_ssse3_do_jump
dd .vp8_sad16x8x3_ssse3_aligned_by_15 - .vp8_sad16x8x3_ssse3_do_jump
.vp8_sad16x8x3_ssse3_skiptable:
jmp .vp9_sad16x8x3_ssse3_skiptable
.vp9_sad16x8x3_ssse3_jumptable:
dd .vp9_sad16x8x3_ssse3_aligned_by_0 - .vp9_sad16x8x3_ssse3_do_jump
dd .vp9_sad16x8x3_ssse3_aligned_by_1 - .vp9_sad16x8x3_ssse3_do_jump
dd .vp9_sad16x8x3_ssse3_aligned_by_2 - .vp9_sad16x8x3_ssse3_do_jump
dd .vp9_sad16x8x3_ssse3_aligned_by_3 - .vp9_sad16x8x3_ssse3_do_jump
dd .vp9_sad16x8x3_ssse3_aligned_by_4 - .vp9_sad16x8x3_ssse3_do_jump
dd .vp9_sad16x8x3_ssse3_aligned_by_5 - .vp9_sad16x8x3_ssse3_do_jump
dd .vp9_sad16x8x3_ssse3_aligned_by_6 - .vp9_sad16x8x3_ssse3_do_jump
dd .vp9_sad16x8x3_ssse3_aligned_by_7 - .vp9_sad16x8x3_ssse3_do_jump
dd .vp9_sad16x8x3_ssse3_aligned_by_8 - .vp9_sad16x8x3_ssse3_do_jump
dd .vp9_sad16x8x3_ssse3_aligned_by_9 - .vp9_sad16x8x3_ssse3_do_jump
dd .vp9_sad16x8x3_ssse3_aligned_by_10 - .vp9_sad16x8x3_ssse3_do_jump
dd .vp9_sad16x8x3_ssse3_aligned_by_11 - .vp9_sad16x8x3_ssse3_do_jump
dd .vp9_sad16x8x3_ssse3_aligned_by_12 - .vp9_sad16x8x3_ssse3_do_jump
dd .vp9_sad16x8x3_ssse3_aligned_by_13 - .vp9_sad16x8x3_ssse3_do_jump
dd .vp9_sad16x8x3_ssse3_aligned_by_14 - .vp9_sad16x8x3_ssse3_do_jump
dd .vp9_sad16x8x3_ssse3_aligned_by_15 - .vp9_sad16x8x3_ssse3_do_jump
.vp9_sad16x8x3_ssse3_skiptable:
call .vp8_sad16x8x3_ssse3_do_jump
.vp8_sad16x8x3_ssse3_do_jump:
call .vp9_sad16x8x3_ssse3_do_jump
.vp9_sad16x8x3_ssse3_do_jump:
pop rcx ; get the address of do_jump
mov rax, .vp8_sad16x8x3_ssse3_jumptable - .vp8_sad16x8x3_ssse3_do_jump
add rax, rcx ; get the absolute address of vp8_sad16x8x3_ssse3_jumptable
mov rax, .vp9_sad16x8x3_ssse3_jumptable - .vp9_sad16x8x3_ssse3_do_jump
add rax, rcx ; get the absolute address of vp9_sad16x8x3_ssse3_jumptable
movsxd rax, dword [rax + 4*rdx] ; get the 32 bit offset from the jumptable
add rcx, rax
@ -316,30 +316,30 @@ sym(vp8_sad16x8x3_ssse3):
jmp rcx
PROCESS_16X8X3_OFFSET 0, .vp8_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 1, .vp8_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 2, .vp8_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 3, .vp8_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 4, .vp8_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 5, .vp8_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 6, .vp8_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 7, .vp8_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 8, .vp8_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 9, .vp8_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 10, .vp8_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 11, .vp8_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 12, .vp8_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 13, .vp8_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 14, .vp8_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 0, .vp9_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 1, .vp9_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 2, .vp9_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 3, .vp9_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 4, .vp9_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 5, .vp9_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 6, .vp9_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 7, .vp9_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 8, .vp9_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 9, .vp9_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 10, .vp9_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 11, .vp9_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 12, .vp9_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 13, .vp9_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 14, .vp9_sad16x8x3_ssse3
.vp8_sad16x8x3_ssse3_aligned_by_15:
.vp9_sad16x8x3_ssse3_aligned_by_15:
PROCESS_16X2X3 1
PROCESS_16X2X3 0
PROCESS_16X2X3 0
PROCESS_16X2X3 0
.vp8_sad16x8x3_ssse3_store_off:
.vp9_sad16x8x3_ssse3_store_off:
mov rdi, arg(4) ;Results
movq xmm0, xmm5

View File

@ -61,8 +61,8 @@
; or pavgb At this point this is just meant to be first pass for calculating
; all the parms needed for 16x16 ssim so we can play with dssim as distortion
; in mode selection code.
global sym(vp8_ssim_parms_16x16_sse2)
sym(vp8_ssim_parms_16x16_sse2):
global sym(vp9_ssim_parms_16x16_sse2)
sym(vp9_ssim_parms_16x16_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 9
@ -151,8 +151,8 @@ sym(vp8_ssim_parms_16x16_sse2):
; or pavgb At this point this is just meant to be first pass for calculating
; all the parms needed for 16x16 ssim so we can play with dssim as distortion
; in mode selection code.
global sym(vp8_ssim_parms_8x8_sse2)
sym(vp8_ssim_parms_8x8_sse2):
global sym(vp9_ssim_parms_8x8_sse2)
sym(vp9_ssim_parms_8x8_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 9

View File

@ -11,11 +11,11 @@
%include "vpx_ports/x86_abi_support.asm"
;void vp8_subtract_b_mmx_impl(unsigned char *z, int src_stride,
;void vp9_subtract_b_mmx_impl(unsigned char *z, int src_stride,
; short *diff, unsigned char *Predictor,
; int pitch);
global sym(vp8_subtract_b_mmx_impl)
sym(vp8_subtract_b_mmx_impl):
global sym(vp9_subtract_b_mmx_impl)
sym(vp9_subtract_b_mmx_impl):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 5
@ -73,9 +73,9 @@ sym(vp8_subtract_b_mmx_impl):
pop rbp
ret
;void vp8_subtract_mby_mmx(short *diff, unsigned char *src, unsigned char *pred, int stride)
global sym(vp8_subtract_mby_mmx)
sym(vp8_subtract_mby_mmx):
;void vp9_subtract_mby_mmx(short *diff, unsigned char *src, unsigned char *pred, int stride)
global sym(vp9_subtract_mby_mmx)
sym(vp9_subtract_mby_mmx):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 4
@ -149,9 +149,9 @@ sym(vp8_subtract_mby_mmx):
ret
;void vp8_subtract_mbuv_mmx(short *diff, unsigned char *usrc, unsigned char *vsrc, unsigned char *pred, int stride)
global sym(vp8_subtract_mbuv_mmx)
sym(vp8_subtract_mbuv_mmx):
;void vp9_subtract_mbuv_mmx(short *diff, unsigned char *usrc, unsigned char *vsrc, unsigned char *pred, int stride)
global sym(vp9_subtract_mbuv_mmx)
sym(vp9_subtract_mbuv_mmx):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 5

View File

@ -11,11 +11,11 @@
%include "vpx_ports/x86_abi_support.asm"
;void vp8_subtract_b_sse2_impl(unsigned char *z, int src_stride,
;void vp9_subtract_b_sse2_impl(unsigned char *z, int src_stride,
; short *diff, unsigned char *Predictor,
; int pitch);
global sym(vp8_subtract_b_sse2_impl)
sym(vp8_subtract_b_sse2_impl):
global sym(vp9_subtract_b_sse2_impl)
sym(vp9_subtract_b_sse2_impl):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 5
@ -71,9 +71,9 @@ sym(vp8_subtract_b_sse2_impl):
ret
;void vp8_subtract_mby_sse2(short *diff, unsigned char *src, unsigned char *pred, int stride)
global sym(vp8_subtract_mby_sse2)
sym(vp8_subtract_mby_sse2):
;void vp9_subtract_mby_sse2(short *diff, unsigned char *src, unsigned char *pred, int stride)
global sym(vp9_subtract_mby_sse2)
sym(vp9_subtract_mby_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 4
@ -145,9 +145,9 @@ sym(vp8_subtract_mby_sse2):
ret
;void vp8_subtract_mbuv_sse2(short *diff, unsigned char *usrc, unsigned char *vsrc, unsigned char *pred, int stride)
global sym(vp8_subtract_mbuv_sse2)
sym(vp8_subtract_mbuv_sse2):
;void vp9_subtract_mbuv_sse2(short *diff, unsigned char *usrc, unsigned char *vsrc, unsigned char *pred, int stride)
global sym(vp9_subtract_mbuv_sse2)
sym(vp9_subtract_mbuv_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 5

View File

@ -11,7 +11,7 @@
%include "vpx_ports/x86_abi_support.asm"
; void vp8_temporal_filter_apply_sse2 | arg
; void vp9_temporal_filter_apply_sse2 | arg
; (unsigned char *frame1, | 0
; unsigned int stride, | 1
; unsigned char *frame2, | 2
@ -20,8 +20,8 @@
; int filter_weight, | 5
; unsigned int *accumulator, | 6
; unsigned short *count) | 7
global sym(vp8_temporal_filter_apply_sse2)
sym(vp8_temporal_filter_apply_sse2):
global sym(vp9_temporal_filter_apply_sse2)
sym(vp9_temporal_filter_apply_sse2):
push rbp
mov rbp, rsp

View File

@ -13,12 +13,12 @@
#define __INC_VP8_TEMPORAL_FILTER_X86_H
#if HAVE_SSE2
extern prototype_apply(vp8_temporal_filter_apply_sse2);
extern prototype_apply(vp9_temporal_filter_apply_sse2);
#if !CONFIG_RUNTIME_CPU_DETECT
#undef vp8_temporal_filter_apply
#define vp8_temporal_filter_apply vp8_temporal_filter_apply_sse2
#undef vp9_temporal_filter_apply
#define vp9_temporal_filter_apply vp9_temporal_filter_apply_sse2
#endif

View File

@ -11,9 +11,9 @@
%include "vpx_ports/x86_abi_support.asm"
;unsigned int vp8_get_mb_ss_mmx( short *src_ptr )
global sym(vp8_get_mb_ss_mmx)
sym(vp8_get_mb_ss_mmx):
;unsigned int vp9_get_mb_ss_mmx( short *src_ptr )
global sym(vp9_get_mb_ss_mmx)
sym(vp9_get_mb_ss_mmx):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 7
@ -63,7 +63,7 @@ sym(vp8_get_mb_ss_mmx):
ret
;unsigned int vp8_get8x8var_mmx
;unsigned int vp9_get8x8var_mmx
;(
; unsigned char *src_ptr,
; int source_stride,
@ -72,8 +72,8 @@ sym(vp8_get_mb_ss_mmx):
; unsigned int *SSE,
; int *Sum
;)
global sym(vp8_get8x8var_mmx)
sym(vp8_get8x8var_mmx):
global sym(vp9_get8x8var_mmx)
sym(vp9_get8x8var_mmx):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@ -311,7 +311,7 @@ sym(vp8_get8x8var_mmx):
;unsigned int
;vp8_get4x4var_mmx
;vp9_get4x4var_mmx
;(
; unsigned char *src_ptr,
; int source_stride,
@ -320,8 +320,8 @@ sym(vp8_get8x8var_mmx):
; unsigned int *SSE,
; int *Sum
;)
global sym(vp8_get4x4var_mmx)
sym(vp8_get4x4var_mmx):
global sym(vp9_get4x4var_mmx)
sym(vp9_get4x4var_mmx):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@ -426,15 +426,15 @@ sym(vp8_get4x4var_mmx):
;unsigned int
;vp8_get4x4sse_cs_mmx
;vp9_get4x4sse_cs_mmx
;(
; unsigned char *src_ptr,
; int source_stride,
; unsigned char *ref_ptr,
; int recon_stride
;)
global sym(vp8_get4x4sse_cs_mmx)
sym(vp8_get4x4sse_cs_mmx):
global sym(vp9_get4x4sse_cs_mmx)
sym(vp9_get4x4sse_cs_mmx):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 4
@ -511,7 +511,7 @@ sym(vp8_get4x4sse_cs_mmx):
%define mmx_filter_shift 7
;void vp8_filter_block2d_bil4x4_var_mmx
;void vp9_filter_block2d_bil4x4_var_mmx
;(
; unsigned char *ref_ptr,
; int ref_pixels_per_line,
@ -522,8 +522,8 @@ sym(vp8_get4x4sse_cs_mmx):
; int *sum,
; unsigned int *sumsquared
;)
global sym(vp8_filter_block2d_bil4x4_var_mmx)
sym(vp8_filter_block2d_bil4x4_var_mmx):
global sym(vp9_filter_block2d_bil4x4_var_mmx)
sym(vp9_filter_block2d_bil4x4_var_mmx):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 8
@ -655,7 +655,7 @@ sym(vp8_filter_block2d_bil4x4_var_mmx):
;void vp8_filter_block2d_bil_var_mmx
;void vp9_filter_block2d_bil_var_mmx
;(
; unsigned char *ref_ptr,
; int ref_pixels_per_line,
@ -667,8 +667,8 @@ sym(vp8_filter_block2d_bil4x4_var_mmx):
; int *sum,
; unsigned int *sumsquared
;)
global sym(vp8_filter_block2d_bil_var_mmx)
sym(vp8_filter_block2d_bil_var_mmx):
global sym(vp9_filter_block2d_bil_var_mmx)
sym(vp9_filter_block2d_bil_var_mmx):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 9

View File

@ -13,12 +13,12 @@
%define xmm_filter_shift 7
;unsigned int vp8_get_mb_ss_sse2
;unsigned int vp9_get_mb_ss_sse2
;(
; short *src_ptr
;)
global sym(vp8_get_mb_ss_sse2)
sym(vp8_get_mb_ss_sse2):
global sym(vp9_get_mb_ss_sse2)
sym(vp9_get_mb_ss_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 1
@ -71,7 +71,7 @@ sym(vp8_get_mb_ss_sse2):
ret
;unsigned int vp8_get16x16var_sse2
;unsigned int vp9_get16x16var_sse2
;(
; unsigned char * src_ptr,
; int source_stride,
@ -80,8 +80,8 @@ sym(vp8_get_mb_ss_sse2):
; unsigned int * SSE,
; int * Sum
;)
global sym(vp8_get16x16var_sse2)
sym(vp8_get16x16var_sse2):
global sym(vp9_get16x16var_sse2)
sym(vp9_get16x16var_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@ -215,7 +215,7 @@ sym(vp8_get16x16var_sse2):
;unsigned int vp8_get8x8var_sse2
;unsigned int vp9_get8x8var_sse2
;(
; unsigned char * src_ptr,
; int source_stride,
@ -224,8 +224,8 @@ sym(vp8_get16x16var_sse2):
; unsigned int * SSE,
; int * Sum
;)
global sym(vp8_get8x8var_sse2)
sym(vp8_get8x8var_sse2):
global sym(vp9_get8x8var_sse2)
sym(vp9_get8x8var_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 6
@ -400,7 +400,7 @@ sym(vp8_get8x8var_sse2):
pop rbp
ret
;void vp8_filter_block2d_bil_var_sse2
;void vp9_filter_block2d_bil_var_sse2
;(
; unsigned char *ref_ptr,
; int ref_pixels_per_line,
@ -413,8 +413,8 @@ sym(vp8_get8x8var_sse2):
; unsigned int *sumsquared;;
;
;)
global sym(vp8_filter_block2d_bil_var_sse2)
sym(vp8_filter_block2d_bil_var_sse2):
global sym(vp9_filter_block2d_bil_var_sse2)
sym(vp9_filter_block2d_bil_var_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 9
@ -680,7 +680,7 @@ filter_block2d_bil_variance:
ret
;void vp8_half_horiz_vert_variance8x_h_sse2
;void vp9_half_horiz_vert_variance8x_h_sse2
;(
; unsigned char *ref_ptr,
; int ref_pixels_per_line,
@ -690,8 +690,8 @@ filter_block2d_bil_variance:
; int *sum,
; unsigned int *sumsquared
;)
global sym(vp8_half_horiz_vert_variance8x_h_sse2)
sym(vp8_half_horiz_vert_variance8x_h_sse2):
global sym(vp9_half_horiz_vert_variance8x_h_sse2)
sym(vp9_half_horiz_vert_variance8x_h_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 7
@ -802,7 +802,7 @@ vp8_half_horiz_vert_variance8x_h_1:
pop rbp
ret
;void vp8_half_horiz_vert_variance16x_h_sse2
;void vp9_half_horiz_vert_variance16x_h_sse2
;(
; unsigned char *ref_ptr,
; int ref_pixels_per_line,
@ -812,8 +812,8 @@ vp8_half_horiz_vert_variance8x_h_1:
; int *sum,
; unsigned int *sumsquared
;)
global sym(vp8_half_horiz_vert_variance16x_h_sse2)
sym(vp8_half_horiz_vert_variance16x_h_sse2):
global sym(vp9_half_horiz_vert_variance16x_h_sse2)
sym(vp9_half_horiz_vert_variance16x_h_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 7
@ -918,7 +918,7 @@ vp8_half_horiz_vert_variance16x_h_1:
ret
;void vp8_half_vert_variance8x_h_sse2
;void vp9_half_vert_variance8x_h_sse2
;(
; unsigned char *ref_ptr,
; int ref_pixels_per_line,
@ -928,8 +928,8 @@ vp8_half_horiz_vert_variance16x_h_1:
; int *sum,
; unsigned int *sumsquared
;)
global sym(vp8_half_vert_variance8x_h_sse2)
sym(vp8_half_vert_variance8x_h_sse2):
global sym(vp9_half_vert_variance8x_h_sse2)
sym(vp9_half_vert_variance8x_h_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 7
@ -1025,7 +1025,7 @@ vp8_half_vert_variance8x_h_1:
pop rbp
ret
;void vp8_half_vert_variance16x_h_sse2
;void vp9_half_vert_variance16x_h_sse2
;(
; unsigned char *ref_ptr,
; int ref_pixels_per_line,
@ -1035,8 +1035,8 @@ vp8_half_vert_variance8x_h_1:
; int *sum,
; unsigned int *sumsquared
;)
global sym(vp8_half_vert_variance16x_h_sse2)
sym(vp8_half_vert_variance16x_h_sse2):
global sym(vp9_half_vert_variance16x_h_sse2)
sym(vp9_half_vert_variance16x_h_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 7
@ -1133,7 +1133,7 @@ vp8_half_vert_variance16x_h_1:
ret
;void vp8_half_horiz_variance8x_h_sse2
;void vp9_half_horiz_variance8x_h_sse2
;(
; unsigned char *ref_ptr,
; int ref_pixels_per_line,
@ -1143,8 +1143,8 @@ vp8_half_vert_variance16x_h_1:
; int *sum,
; unsigned int *sumsquared
;)
global sym(vp8_half_horiz_variance8x_h_sse2)
sym(vp8_half_horiz_variance8x_h_sse2):
global sym(vp9_half_horiz_variance8x_h_sse2)
sym(vp9_half_horiz_variance8x_h_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 7
@ -1238,7 +1238,7 @@ vp8_half_horiz_variance8x_h_1:
pop rbp
ret
;void vp8_half_horiz_variance16x_h_sse2
;void vp9_half_horiz_variance16x_h_sse2
;(
; unsigned char *ref_ptr,
; int ref_pixels_per_line,
@ -1248,8 +1248,8 @@ vp8_half_horiz_variance8x_h_1:
; int *sum,
; unsigned int *sumsquared
;)
global sym(vp8_half_horiz_variance16x_h_sse2)
sym(vp8_half_horiz_variance16x_h_sse2):
global sym(vp9_half_horiz_variance16x_h_sse2)
sym(vp9_half_horiz_variance16x_h_sse2):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 7

View File

@ -14,7 +14,7 @@
%define xmm_filter_shift 7
;void vp8_filter_block2d_bil_var_ssse3
;void vp9_filter_block2d_bil_var_ssse3
;(
; unsigned char *ref_ptr,
; int ref_pixels_per_line,
@ -29,8 +29,8 @@
;)
;Note: The filter coefficient at offset=0 is 128. Since the second register
;for Pmaddubsw is signed bytes, we must calculate zero offset seperately.
global sym(vp8_filter_block2d_bil_var_ssse3)
sym(vp8_filter_block2d_bil_var_ssse3):
global sym(vp9_filter_block2d_bil_var_ssse3)
sym(vp9_filter_block2d_bil_var_ssse3):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 9

View File

@ -34,8 +34,8 @@ extern void filter_block1d_v6_mmx
short *vp7_filter
);
extern unsigned int vp8_get_mb_ss_mmx(const short *src_ptr);
extern unsigned int vp8_get8x8var_mmx
extern unsigned int vp9_get_mb_ss_mmx(const short *src_ptr);
extern unsigned int vp9_get8x8var_mmx
(
const unsigned char *src_ptr,
int source_stride,
@ -44,7 +44,7 @@ extern unsigned int vp8_get8x8var_mmx
unsigned int *SSE,
int *Sum
);
extern unsigned int vp8_get4x4var_mmx
extern unsigned int vp9_get4x4var_mmx
(
const unsigned char *src_ptr,
int source_stride,
@ -53,7 +53,7 @@ extern unsigned int vp8_get4x4var_mmx
unsigned int *SSE,
int *Sum
);
extern void vp8_filter_block2d_bil4x4_var_mmx
extern void vp9_filter_block2d_bil4x4_var_mmx
(
const unsigned char *ref_ptr,
int ref_pixels_per_line,
@ -64,7 +64,7 @@ extern void vp8_filter_block2d_bil4x4_var_mmx
int *sum,
unsigned int *sumsquared
);
extern void vp8_filter_block2d_bil_var_mmx
extern void vp9_filter_block2d_bil_var_mmx
(
const unsigned char *ref_ptr,
int ref_pixels_per_line,
@ -78,7 +78,7 @@ extern void vp8_filter_block2d_bil_var_mmx
);
unsigned int vp8_variance4x4_mmx(
unsigned int vp9_variance4x4_mmx(
const unsigned char *src_ptr,
int source_stride,
const unsigned char *ref_ptr,
@ -87,13 +87,13 @@ unsigned int vp8_variance4x4_mmx(
unsigned int var;
int avg;
vp8_get4x4var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg);
vp9_get4x4var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg);
*sse = var;
return (var - ((avg * avg) >> 4));
}
unsigned int vp8_variance8x8_mmx(
unsigned int vp9_variance8x8_mmx(
const unsigned char *src_ptr,
int source_stride,
const unsigned char *ref_ptr,
@ -102,14 +102,14 @@ unsigned int vp8_variance8x8_mmx(
unsigned int var;
int avg;
vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg);
vp9_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg);
*sse = var;
return (var - ((avg * avg) >> 6));
}
unsigned int vp8_mse16x16_mmx(
unsigned int vp9_mse16x16_mmx(
const unsigned char *src_ptr,
int source_stride,
const unsigned char *ref_ptr,
@ -119,10 +119,10 @@ unsigned int vp8_mse16x16_mmx(
int sum0, sum1, sum2, sum3;
vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
vp8_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
vp8_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse2, &sum2);
vp8_get8x8var_mmx(src_ptr + 8 * source_stride + 8, source_stride, ref_ptr + 8 * recon_stride + 8, recon_stride, &sse3, &sum3);
vp9_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
vp9_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
vp9_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse2, &sum2);
vp9_get8x8var_mmx(src_ptr + 8 * source_stride + 8, source_stride, ref_ptr + 8 * recon_stride + 8, recon_stride, &sse3, &sum3);
var = sse0 + sse1 + sse2 + sse3;
*sse = var;
@ -130,7 +130,7 @@ unsigned int vp8_mse16x16_mmx(
}
unsigned int vp8_variance16x16_mmx(
unsigned int vp9_variance16x16_mmx(
const unsigned char *src_ptr,
int source_stride,
const unsigned char *ref_ptr,
@ -140,10 +140,10 @@ unsigned int vp8_variance16x16_mmx(
int sum0, sum1, sum2, sum3, avg;
vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
vp8_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
vp8_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse2, &sum2);
vp8_get8x8var_mmx(src_ptr + 8 * source_stride + 8, source_stride, ref_ptr + 8 * recon_stride + 8, recon_stride, &sse3, &sum3);
vp9_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
vp9_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
vp9_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse2, &sum2);
vp9_get8x8var_mmx(src_ptr + 8 * source_stride + 8, source_stride, ref_ptr + 8 * recon_stride + 8, recon_stride, &sse3, &sum3);
var = sse0 + sse1 + sse2 + sse3;
avg = sum0 + sum1 + sum2 + sum3;
@ -151,7 +151,7 @@ unsigned int vp8_variance16x16_mmx(
return (var - ((avg * avg) >> 8));
}
unsigned int vp8_variance16x8_mmx(
unsigned int vp9_variance16x8_mmx(
const unsigned char *src_ptr,
int source_stride,
const unsigned char *ref_ptr,
@ -160,8 +160,8 @@ unsigned int vp8_variance16x8_mmx(
unsigned int sse0, sse1, var;
int sum0, sum1, avg;
vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
vp8_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
vp9_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
vp9_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
var = sse0 + sse1;
avg = sum0 + sum1;
@ -171,7 +171,7 @@ unsigned int vp8_variance16x8_mmx(
}
unsigned int vp8_variance8x16_mmx(
unsigned int vp9_variance8x16_mmx(
const unsigned char *src_ptr,
int source_stride,
const unsigned char *ref_ptr,
@ -180,8 +180,8 @@ unsigned int vp8_variance8x16_mmx(
unsigned int sse0, sse1, var;
int sum0, sum1, avg;
vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
vp8_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse1, &sum1);
vp9_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
vp9_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse1, &sum1);
var = sse0 + sse1;
avg = sum0 + sum1;
@ -217,7 +217,7 @@ DECLARE_ALIGNED(16, const short, vp8_vp7_bilinear_filters_mmx[16][8]) = {
{ 8, 8, 8, 8, 120, 120, 120, 120 }
};
unsigned int vp8_sub_pixel_variance4x4_mmx
unsigned int vp9_sub_pixel_variance4x4_mmx
(
const unsigned char *src_ptr,
int src_pixels_per_line,
@ -230,7 +230,7 @@ unsigned int vp8_sub_pixel_variance4x4_mmx
{
int xsum;
unsigned int xxsum;
vp8_filter_block2d_bil4x4_var_mmx(
vp9_filter_block2d_bil4x4_var_mmx(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line,
vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
@ -241,7 +241,7 @@ unsigned int vp8_sub_pixel_variance4x4_mmx
}
unsigned int vp8_sub_pixel_variance8x8_mmx
unsigned int vp9_sub_pixel_variance8x8_mmx
(
const unsigned char *src_ptr,
int src_pixels_per_line,
@ -254,7 +254,7 @@ unsigned int vp8_sub_pixel_variance8x8_mmx
int xsum;
unsigned int xxsum;
vp8_filter_block2d_bil_var_mmx(
vp9_filter_block2d_bil_var_mmx(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 8,
vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
@ -264,7 +264,7 @@ unsigned int vp8_sub_pixel_variance8x8_mmx
return (xxsum - ((xsum * xsum) >> 6));
}
unsigned int vp8_sub_pixel_variance16x16_mmx
unsigned int vp9_sub_pixel_variance16x16_mmx
(
const unsigned char *src_ptr,
int src_pixels_per_line,
@ -278,14 +278,14 @@ unsigned int vp8_sub_pixel_variance16x16_mmx
int xsum0, xsum1;
unsigned int xxsum0, xxsum1;
vp8_filter_block2d_bil_var_mmx(
vp9_filter_block2d_bil_var_mmx(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 16,
vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
&xsum0, &xxsum0
);
vp8_filter_block2d_bil_var_mmx(
vp9_filter_block2d_bil_var_mmx(
src_ptr + 8, src_pixels_per_line,
dst_ptr + 8, dst_pixels_per_line, 16,
vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
@ -301,7 +301,7 @@ unsigned int vp8_sub_pixel_variance16x16_mmx
}
unsigned int vp8_sub_pixel_mse16x16_mmx(
unsigned int vp9_sub_pixel_mse16x16_mmx(
const unsigned char *src_ptr,
int src_pixels_per_line,
int xoffset,
@ -310,11 +310,11 @@ unsigned int vp8_sub_pixel_mse16x16_mmx(
int dst_pixels_per_line,
unsigned int *sse
) {
vp8_sub_pixel_variance16x16_mmx(src_ptr, src_pixels_per_line, xoffset, yoffset, dst_ptr, dst_pixels_per_line, sse);
vp9_sub_pixel_variance16x16_mmx(src_ptr, src_pixels_per_line, xoffset, yoffset, dst_ptr, dst_pixels_per_line, sse);
return *sse;
}
unsigned int vp8_sub_pixel_variance16x8_mmx
unsigned int vp9_sub_pixel_variance16x8_mmx
(
const unsigned char *src_ptr,
int src_pixels_per_line,
@ -328,7 +328,7 @@ unsigned int vp8_sub_pixel_variance16x8_mmx
unsigned int xxsum0, xxsum1;
vp8_filter_block2d_bil_var_mmx(
vp9_filter_block2d_bil_var_mmx(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 8,
vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
@ -336,7 +336,7 @@ unsigned int vp8_sub_pixel_variance16x8_mmx
);
vp8_filter_block2d_bil_var_mmx(
vp9_filter_block2d_bil_var_mmx(
src_ptr + 8, src_pixels_per_line,
dst_ptr + 8, dst_pixels_per_line, 8,
vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
@ -350,7 +350,7 @@ unsigned int vp8_sub_pixel_variance16x8_mmx
return (xxsum0 - ((xsum0 * xsum0) >> 7));
}
unsigned int vp8_sub_pixel_variance8x16_mmx
unsigned int vp9_sub_pixel_variance8x16_mmx
(
const unsigned char *src_ptr,
int src_pixels_per_line,
@ -362,7 +362,7 @@ unsigned int vp8_sub_pixel_variance8x16_mmx
) {
int xsum;
unsigned int xxsum;
vp8_filter_block2d_bil_var_mmx(
vp9_filter_block2d_bil_var_mmx(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 16,
vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
@ -373,34 +373,34 @@ unsigned int vp8_sub_pixel_variance8x16_mmx
}
unsigned int vp8_variance_halfpixvar16x16_h_mmx(
unsigned int vp9_variance_halfpixvar16x16_h_mmx(
const unsigned char *src_ptr,
int source_stride,
const unsigned char *ref_ptr,
int recon_stride,
unsigned int *sse) {
return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 8, 0,
return vp9_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 8, 0,
ref_ptr, recon_stride, sse);
}
unsigned int vp8_variance_halfpixvar16x16_v_mmx(
unsigned int vp9_variance_halfpixvar16x16_v_mmx(
const unsigned char *src_ptr,
int source_stride,
const unsigned char *ref_ptr,
int recon_stride,
unsigned int *sse) {
return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 0, 8,
return vp9_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 0, 8,
ref_ptr, recon_stride, sse);
}
unsigned int vp8_variance_halfpixvar16x16_hv_mmx(
unsigned int vp9_variance_halfpixvar16x16_hv_mmx(
const unsigned char *src_ptr,
int source_stride,
const unsigned char *ref_ptr,
int recon_stride,
unsigned int *sse) {
return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 8, 8,
return vp9_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 8, 8,
ref_ptr, recon_stride, sse);
}

View File

@ -20,7 +20,7 @@ extern void filter_block1d_v6_mmx(const short *src_ptr, unsigned char *output_pt
extern void filter_block1d8_h6_sse2(const unsigned char *src_ptr, unsigned short *output_ptr, unsigned int src_pixels_per_line, unsigned int pixel_step, unsigned int output_height, unsigned int output_width, short *vp7_filter);
extern void filter_block1d8_v6_sse2(const short *src_ptr, unsigned char *output_ptr, unsigned int pixels_per_line, unsigned int pixel_step, unsigned int output_height, unsigned int output_width, short *vp7_filter);
extern void vp8_filter_block2d_bil4x4_var_mmx
extern void vp9_filter_block2d_bil4x4_var_mmx
(
const unsigned char *ref_ptr,
int ref_pixels_per_line,
@ -32,7 +32,7 @@ extern void vp8_filter_block2d_bil4x4_var_mmx
unsigned int *sumsquared
);
extern unsigned int vp8_get4x4var_mmx
extern unsigned int vp9_get4x4var_mmx
(
const unsigned char *src_ptr,
int source_stride,
@ -42,11 +42,11 @@ extern unsigned int vp8_get4x4var_mmx
int *Sum
);
unsigned int vp8_get_mb_ss_sse2
unsigned int vp9_get_mb_ss_sse2
(
const short *src_ptr
);
unsigned int vp8_get16x16var_sse2
unsigned int vp9_get16x16var_sse2
(
const unsigned char *src_ptr,
int source_stride,
@ -55,7 +55,7 @@ unsigned int vp8_get16x16var_sse2
unsigned int *SSE,
int *Sum
);
unsigned int vp8_get8x8var_sse2
unsigned int vp9_get8x8var_sse2
(
const unsigned char *src_ptr,
int source_stride,
@ -64,7 +64,7 @@ unsigned int vp8_get8x8var_sse2
unsigned int *SSE,
int *Sum
);
void vp8_filter_block2d_bil_var_sse2
void vp9_filter_block2d_bil_var_sse2
(
const unsigned char *ref_ptr,
int ref_pixels_per_line,
@ -76,7 +76,7 @@ void vp8_filter_block2d_bil_var_sse2
int *sum,
unsigned int *sumsquared
);
void vp8_half_horiz_vert_variance8x_h_sse2
void vp9_half_horiz_vert_variance8x_h_sse2
(
const unsigned char *ref_ptr,
int ref_pixels_per_line,
@ -86,7 +86,7 @@ void vp8_half_horiz_vert_variance8x_h_sse2
int *sum,
unsigned int *sumsquared
);
void vp8_half_horiz_vert_variance16x_h_sse2
void vp9_half_horiz_vert_variance16x_h_sse2
(
const unsigned char *ref_ptr,
int ref_pixels_per_line,
@ -96,7 +96,7 @@ void vp8_half_horiz_vert_variance16x_h_sse2
int *sum,
unsigned int *sumsquared
);
void vp8_half_horiz_variance8x_h_sse2
void vp9_half_horiz_variance8x_h_sse2
(
const unsigned char *ref_ptr,
int ref_pixels_per_line,
@ -106,7 +106,7 @@ void vp8_half_horiz_variance8x_h_sse2
int *sum,
unsigned int *sumsquared
);
void vp8_half_horiz_variance16x_h_sse2
void vp9_half_horiz_variance16x_h_sse2
(
const unsigned char *ref_ptr,
int ref_pixels_per_line,
@ -116,7 +116,7 @@ void vp8_half_horiz_variance16x_h_sse2
int *sum,
unsigned int *sumsquared
);
void vp8_half_vert_variance8x_h_sse2
void vp9_half_vert_variance8x_h_sse2
(
const unsigned char *ref_ptr,
int ref_pixels_per_line,
@ -126,7 +126,7 @@ void vp8_half_vert_variance8x_h_sse2
int *sum,
unsigned int *sumsquared
);
void vp8_half_vert_variance16x_h_sse2
void vp9_half_vert_variance16x_h_sse2
(
const unsigned char *ref_ptr,
int ref_pixels_per_line,
@ -139,7 +139,7 @@ void vp8_half_vert_variance16x_h_sse2
DECLARE_ALIGNED(16, extern short, vp8_vp7_bilinear_filters_mmx[16][8]);
unsigned int vp8_variance4x4_wmt(
unsigned int vp9_variance4x4_wmt(
const unsigned char *src_ptr,
int source_stride,
const unsigned char *ref_ptr,
@ -148,13 +148,13 @@ unsigned int vp8_variance4x4_wmt(
unsigned int var;
int avg;
vp8_get4x4var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg);
vp9_get4x4var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg);
*sse = var;
return (var - ((avg * avg) >> 4));
}
unsigned int vp8_variance8x8_wmt
unsigned int vp9_variance8x8_wmt
(
const unsigned char *src_ptr,
int source_stride,
@ -164,14 +164,14 @@ unsigned int vp8_variance8x8_wmt
unsigned int var;
int avg;
vp8_get8x8var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg);
vp9_get8x8var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg);
*sse = var;
return (var - ((avg * avg) >> 6));
}
unsigned int vp8_variance16x16_wmt
unsigned int vp9_variance16x16_wmt
(
const unsigned char *src_ptr,
int source_stride,
@ -182,11 +182,11 @@ unsigned int vp8_variance16x16_wmt
int sum0;
vp8_get16x16var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
vp9_get16x16var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
*sse = sse0;
return (sse0 - ((sum0 * sum0) >> 8));
}
unsigned int vp8_mse16x16_wmt(
unsigned int vp9_mse16x16_wmt(
const unsigned char *src_ptr,
int source_stride,
const unsigned char *ref_ptr,
@ -195,14 +195,14 @@ unsigned int vp8_mse16x16_wmt(
unsigned int sse0;
int sum0;
vp8_get16x16var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
vp9_get16x16var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
*sse = sse0;
return sse0;
}
unsigned int vp8_variance16x8_wmt
unsigned int vp9_variance16x8_wmt
(
const unsigned char *src_ptr,
int source_stride,
@ -212,8 +212,8 @@ unsigned int vp8_variance16x8_wmt
unsigned int sse0, sse1, var;
int sum0, sum1, avg;
vp8_get8x8var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
vp8_get8x8var_sse2(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
vp9_get8x8var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
vp9_get8x8var_sse2(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
var = sse0 + sse1;
avg = sum0 + sum1;
@ -222,7 +222,7 @@ unsigned int vp8_variance16x8_wmt
}
unsigned int vp8_variance8x16_wmt
unsigned int vp9_variance8x16_wmt
(
const unsigned char *src_ptr,
int source_stride,
@ -232,8 +232,8 @@ unsigned int vp8_variance8x16_wmt
unsigned int sse0, sse1, var;
int sum0, sum1, avg;
vp8_get8x8var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
vp8_get8x8var_sse2(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse1, &sum1);
vp9_get8x8var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
vp9_get8x8var_sse2(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse1, &sum1);
var = sse0 + sse1;
avg = sum0 + sum1;
@ -242,7 +242,7 @@ unsigned int vp8_variance8x16_wmt
}
unsigned int vp8_sub_pixel_variance4x4_wmt
unsigned int vp9_sub_pixel_variance4x4_wmt
(
const unsigned char *src_ptr,
int src_pixels_per_line,
@ -254,7 +254,7 @@ unsigned int vp8_sub_pixel_variance4x4_wmt
) {
int xsum;
unsigned int xxsum;
vp8_filter_block2d_bil4x4_var_mmx(
vp9_filter_block2d_bil4x4_var_mmx(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line,
vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
@ -265,7 +265,7 @@ unsigned int vp8_sub_pixel_variance4x4_wmt
}
unsigned int vp8_sub_pixel_variance8x8_wmt
unsigned int vp9_sub_pixel_variance8x8_wmt
(
const unsigned char *src_ptr,
int src_pixels_per_line,
@ -279,22 +279,22 @@ unsigned int vp8_sub_pixel_variance8x8_wmt
unsigned int xxsum;
if (xoffset == HALFNDX && yoffset == 0) {
vp8_half_horiz_variance8x_h_sse2(
vp9_half_horiz_variance8x_h_sse2(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 8,
&xsum, &xxsum);
} else if (xoffset == 0 && yoffset == HALFNDX) {
vp8_half_vert_variance8x_h_sse2(
vp9_half_vert_variance8x_h_sse2(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 8,
&xsum, &xxsum);
} else if (xoffset == HALFNDX && yoffset == HALFNDX) {
vp8_half_horiz_vert_variance8x_h_sse2(
vp9_half_horiz_vert_variance8x_h_sse2(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 8,
&xsum, &xxsum);
} else {
vp8_filter_block2d_bil_var_sse2(
vp9_filter_block2d_bil_var_sse2(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 8,
xoffset, yoffset,
@ -305,7 +305,7 @@ unsigned int vp8_sub_pixel_variance8x8_wmt
return (xxsum - ((xsum * xsum) >> 6));
}
unsigned int vp8_sub_pixel_variance16x16_wmt
unsigned int vp9_sub_pixel_variance16x16_wmt
(
const unsigned char *src_ptr,
int src_pixels_per_line,
@ -322,29 +322,29 @@ unsigned int vp8_sub_pixel_variance16x16_wmt
// note we could avoid these if statements if the calling function
// just called the appropriate functions inside.
if (xoffset == HALFNDX && yoffset == 0) {
vp8_half_horiz_variance16x_h_sse2(
vp9_half_horiz_variance16x_h_sse2(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 16,
&xsum0, &xxsum0);
} else if (xoffset == 0 && yoffset == HALFNDX) {
vp8_half_vert_variance16x_h_sse2(
vp9_half_vert_variance16x_h_sse2(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 16,
&xsum0, &xxsum0);
} else if (xoffset == HALFNDX && yoffset == HALFNDX) {
vp8_half_horiz_vert_variance16x_h_sse2(
vp9_half_horiz_vert_variance16x_h_sse2(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 16,
&xsum0, &xxsum0);
} else {
vp8_filter_block2d_bil_var_sse2(
vp9_filter_block2d_bil_var_sse2(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 16,
xoffset, yoffset,
&xsum0, &xxsum0
);
vp8_filter_block2d_bil_var_sse2(
vp9_filter_block2d_bil_var_sse2(
src_ptr + 8, src_pixels_per_line,
dst_ptr + 8, dst_pixels_per_line, 16,
xoffset, yoffset,
@ -358,7 +358,7 @@ unsigned int vp8_sub_pixel_variance16x16_wmt
return (xxsum0 - ((xsum0 * xsum0) >> 8));
}
unsigned int vp8_sub_pixel_mse16x16_wmt(
unsigned int vp9_sub_pixel_mse16x16_wmt(
const unsigned char *src_ptr,
int src_pixels_per_line,
int xoffset,
@ -367,11 +367,11 @@ unsigned int vp8_sub_pixel_mse16x16_wmt(
int dst_pixels_per_line,
unsigned int *sse
) {
vp8_sub_pixel_variance16x16_wmt(src_ptr, src_pixels_per_line, xoffset, yoffset, dst_ptr, dst_pixels_per_line, sse);
vp9_sub_pixel_variance16x16_wmt(src_ptr, src_pixels_per_line, xoffset, yoffset, dst_ptr, dst_pixels_per_line, sse);
return *sse;
}
unsigned int vp8_sub_pixel_variance16x8_wmt
unsigned int vp9_sub_pixel_variance16x8_wmt
(
const unsigned char *src_ptr,
int src_pixels_per_line,
@ -386,28 +386,28 @@ unsigned int vp8_sub_pixel_variance16x8_wmt
unsigned int xxsum0, xxsum1;
if (xoffset == HALFNDX && yoffset == 0) {
vp8_half_horiz_variance16x_h_sse2(
vp9_half_horiz_variance16x_h_sse2(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 8,
&xsum0, &xxsum0);
} else if (xoffset == 0 && yoffset == HALFNDX) {
vp8_half_vert_variance16x_h_sse2(
vp9_half_vert_variance16x_h_sse2(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 8,
&xsum0, &xxsum0);
} else if (xoffset == HALFNDX && yoffset == HALFNDX) {
vp8_half_horiz_vert_variance16x_h_sse2(
vp9_half_horiz_vert_variance16x_h_sse2(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 8,
&xsum0, &xxsum0);
} else {
vp8_filter_block2d_bil_var_sse2(
vp9_filter_block2d_bil_var_sse2(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 8,
xoffset, yoffset,
&xsum0, &xxsum0);
vp8_filter_block2d_bil_var_sse2(
vp9_filter_block2d_bil_var_sse2(
src_ptr + 8, src_pixels_per_line,
dst_ptr + 8, dst_pixels_per_line, 8,
xoffset, yoffset,
@ -420,7 +420,7 @@ unsigned int vp8_sub_pixel_variance16x8_wmt
return (xxsum0 - ((xsum0 * xsum0) >> 7));
}
unsigned int vp8_sub_pixel_variance8x16_wmt
unsigned int vp9_sub_pixel_variance8x16_wmt
(
const unsigned char *src_ptr,
int src_pixels_per_line,
@ -434,22 +434,22 @@ unsigned int vp8_sub_pixel_variance8x16_wmt
unsigned int xxsum;
if (xoffset == HALFNDX && yoffset == 0) {
vp8_half_horiz_variance8x_h_sse2(
vp9_half_horiz_variance8x_h_sse2(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 16,
&xsum, &xxsum);
} else if (xoffset == 0 && yoffset == HALFNDX) {
vp8_half_vert_variance8x_h_sse2(
vp9_half_vert_variance8x_h_sse2(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 16,
&xsum, &xxsum);
} else if (xoffset == HALFNDX && yoffset == HALFNDX) {
vp8_half_horiz_vert_variance8x_h_sse2(
vp9_half_horiz_vert_variance8x_h_sse2(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 16,
&xsum, &xxsum);
} else {
vp8_filter_block2d_bil_var_sse2(
vp9_filter_block2d_bil_var_sse2(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 16,
xoffset, yoffset,
@ -461,7 +461,7 @@ unsigned int vp8_sub_pixel_variance8x16_wmt
}
unsigned int vp8_variance_halfpixvar16x16_h_wmt(
unsigned int vp9_variance_halfpixvar16x16_h_wmt(
const unsigned char *src_ptr,
int src_pixels_per_line,
const unsigned char *dst_ptr,
@ -470,7 +470,7 @@ unsigned int vp8_variance_halfpixvar16x16_h_wmt(
int xsum0;
unsigned int xxsum0;
vp8_half_horiz_variance16x_h_sse2(
vp9_half_horiz_variance16x_h_sse2(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 16,
&xsum0, &xxsum0);
@ -480,7 +480,7 @@ unsigned int vp8_variance_halfpixvar16x16_h_wmt(
}
unsigned int vp8_variance_halfpixvar16x16_v_wmt(
unsigned int vp9_variance_halfpixvar16x16_v_wmt(
const unsigned char *src_ptr,
int src_pixels_per_line,
const unsigned char *dst_ptr,
@ -488,7 +488,7 @@ unsigned int vp8_variance_halfpixvar16x16_v_wmt(
unsigned int *sse) {
int xsum0;
unsigned int xxsum0;
vp8_half_vert_variance16x_h_sse2(
vp9_half_vert_variance16x_h_sse2(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 16,
&xsum0, &xxsum0);
@ -498,7 +498,7 @@ unsigned int vp8_variance_halfpixvar16x16_v_wmt(
}
unsigned int vp8_variance_halfpixvar16x16_hv_wmt(
unsigned int vp9_variance_halfpixvar16x16_hv_wmt(
const unsigned char *src_ptr,
int src_pixels_per_line,
const unsigned char *dst_ptr,
@ -507,7 +507,7 @@ unsigned int vp8_variance_halfpixvar16x16_hv_wmt(
int xsum0;
unsigned int xxsum0;
vp8_half_horiz_vert_variance16x_h_sse2(
vp9_half_horiz_vert_variance16x_h_sse2(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 16,
&xsum0, &xxsum0);

View File

@ -15,7 +15,7 @@
#define HALFNDX 8
extern unsigned int vp8_get16x16var_sse2
extern unsigned int vp9_get16x16var_sse2
(
const unsigned char *src_ptr,
int source_stride,
@ -24,7 +24,7 @@ extern unsigned int vp8_get16x16var_sse2
unsigned int *SSE,
int *Sum
);
extern void vp8_half_horiz_vert_variance16x_h_sse2
extern void vp9_half_horiz_vert_variance16x_h_sse2
(
const unsigned char *ref_ptr,
int ref_pixels_per_line,
@ -34,7 +34,7 @@ extern void vp8_half_horiz_vert_variance16x_h_sse2
int *sum,
unsigned int *sumsquared
);
extern void vp8_half_horiz_variance16x_h_sse2
extern void vp9_half_horiz_variance16x_h_sse2
(
const unsigned char *ref_ptr,
int ref_pixels_per_line,
@ -44,7 +44,7 @@ extern void vp8_half_horiz_variance16x_h_sse2
int *sum,
unsigned int *sumsquared
);
extern void vp8_half_vert_variance16x_h_sse2
extern void vp9_half_vert_variance16x_h_sse2
(
const unsigned char *ref_ptr,
int ref_pixels_per_line,
@ -54,7 +54,7 @@ extern void vp8_half_vert_variance16x_h_sse2
int *sum,
unsigned int *sumsquared
);
extern void vp8_filter_block2d_bil_var_ssse3
extern void vp9_filter_block2d_bil_var_ssse3
(
const unsigned char *ref_ptr,
int ref_pixels_per_line,
@ -67,7 +67,7 @@ extern void vp8_filter_block2d_bil_var_ssse3
unsigned int *sumsquared
);
unsigned int vp8_sub_pixel_variance16x16_ssse3
unsigned int vp9_sub_pixel_variance16x16_ssse3
(
const unsigned char *src_ptr,
int src_pixels_per_line,
@ -83,22 +83,22 @@ unsigned int vp8_sub_pixel_variance16x16_ssse3
// note we could avoid these if statements if the calling function
// just called the appropriate functions inside.
if (xoffset == HALFNDX && yoffset == 0) {
vp8_half_horiz_variance16x_h_sse2(
vp9_half_horiz_variance16x_h_sse2(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 16,
&xsum0, &xxsum0);
} else if (xoffset == 0 && yoffset == HALFNDX) {
vp8_half_vert_variance16x_h_sse2(
vp9_half_vert_variance16x_h_sse2(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 16,
&xsum0, &xxsum0);
} else if (xoffset == HALFNDX && yoffset == HALFNDX) {
vp8_half_horiz_vert_variance16x_h_sse2(
vp9_half_horiz_vert_variance16x_h_sse2(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 16,
&xsum0, &xxsum0);
} else {
vp8_filter_block2d_bil_var_ssse3(
vp9_filter_block2d_bil_var_ssse3(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 16,
xoffset, yoffset,
@ -109,7 +109,7 @@ unsigned int vp8_sub_pixel_variance16x16_ssse3
return (xxsum0 - ((xsum0 * xsum0) >> 8));
}
unsigned int vp8_sub_pixel_variance16x8_ssse3
unsigned int vp9_sub_pixel_variance16x8_ssse3
(
const unsigned char *src_ptr,
int src_pixels_per_line,
@ -124,22 +124,22 @@ unsigned int vp8_sub_pixel_variance16x8_ssse3
unsigned int xxsum0;
if (xoffset == HALFNDX && yoffset == 0) {
vp8_half_horiz_variance16x_h_sse2(
vp9_half_horiz_variance16x_h_sse2(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 8,
&xsum0, &xxsum0);
} else if (xoffset == 0 && yoffset == HALFNDX) {
vp8_half_vert_variance16x_h_sse2(
vp9_half_vert_variance16x_h_sse2(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 8,
&xsum0, &xxsum0);
} else if (xoffset == HALFNDX && yoffset == HALFNDX) {
vp8_half_horiz_vert_variance16x_h_sse2(
vp9_half_horiz_vert_variance16x_h_sse2(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 8,
&xsum0, &xxsum0);
} else {
vp8_filter_block2d_bil_var_ssse3(
vp9_filter_block2d_bil_var_ssse3(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 8,
xoffset, yoffset,

View File

@ -16,67 +16,67 @@
#if HAVE_MMX
void vp8_short_fdct8x4_mmx(short *input, short *output, int pitch) {
vp8_short_fdct4x4_mmx(input, output, pitch);
vp8_short_fdct4x4_mmx(input + 4, output + 16, pitch);
void vp9_short_fdct8x4_mmx(short *input, short *output, int pitch) {
vp9_short_fdct4x4_mmx(input, output, pitch);
vp9_short_fdct4x4_mmx(input + 4, output + 16, pitch);
}
int vp8_mbblock_error_mmx_impl(short *coeff_ptr, short *dcoef_ptr, int dc);
int vp8_mbblock_error_mmx(MACROBLOCK *mb, int dc) {
int vp9_mbblock_error_mmx_impl(short *coeff_ptr, short *dcoef_ptr, int dc);
int vp9_mbblock_error_mmx(MACROBLOCK *mb, int dc) {
short *coeff_ptr = mb->block[0].coeff;
short *dcoef_ptr = mb->e_mbd.block[0].dqcoeff;
return vp8_mbblock_error_mmx_impl(coeff_ptr, dcoef_ptr, dc);
return vp9_mbblock_error_mmx_impl(coeff_ptr, dcoef_ptr, dc);
}
int vp8_mbuverror_mmx_impl(short *s_ptr, short *d_ptr);
int vp8_mbuverror_mmx(MACROBLOCK *mb) {
int vp9_mbuverror_mmx_impl(short *s_ptr, short *d_ptr);
int vp9_mbuverror_mmx(MACROBLOCK *mb) {
short *s_ptr = &mb->coeff[256];
short *d_ptr = &mb->e_mbd.dqcoeff[256];
return vp8_mbuverror_mmx_impl(s_ptr, d_ptr);
return vp9_mbuverror_mmx_impl(s_ptr, d_ptr);
}
void vp8_subtract_b_mmx_impl(unsigned char *z, int src_stride,
void vp9_subtract_b_mmx_impl(unsigned char *z, int src_stride,
short *diff, unsigned char *predictor,
int pitch);
void vp8_subtract_b_mmx(BLOCK *be, BLOCKD *bd, int pitch) {
void vp9_subtract_b_mmx(BLOCK *be, BLOCKD *bd, int pitch) {
unsigned char *z = *(be->base_src) + be->src;
unsigned int src_stride = be->src_stride;
short *diff = &be->src_diff[0];
unsigned char *predictor = &bd->predictor[0];
vp8_subtract_b_mmx_impl(z, src_stride, diff, predictor, pitch);
vp9_subtract_b_mmx_impl(z, src_stride, diff, predictor, pitch);
}
#endif
#if HAVE_SSE2
int vp8_mbblock_error_xmm_impl(short *coeff_ptr, short *dcoef_ptr, int dc);
int vp8_mbblock_error_xmm(MACROBLOCK *mb, int dc) {
int vp9_mbblock_error_xmm_impl(short *coeff_ptr, short *dcoef_ptr, int dc);
int vp9_mbblock_error_xmm(MACROBLOCK *mb, int dc) {
short *coeff_ptr = mb->block[0].coeff;
short *dcoef_ptr = mb->e_mbd.block[0].dqcoeff;
return vp8_mbblock_error_xmm_impl(coeff_ptr, dcoef_ptr, dc);
return vp9_mbblock_error_xmm_impl(coeff_ptr, dcoef_ptr, dc);
}
int vp8_mbuverror_xmm_impl(short *s_ptr, short *d_ptr);
int vp8_mbuverror_xmm(MACROBLOCK *mb) {
int vp9_mbuverror_xmm_impl(short *s_ptr, short *d_ptr);
int vp9_mbuverror_xmm(MACROBLOCK *mb) {
short *s_ptr = &mb->coeff[256];
short *d_ptr = &mb->e_mbd.dqcoeff[256];
return vp8_mbuverror_xmm_impl(s_ptr, d_ptr);
return vp9_mbuverror_xmm_impl(s_ptr, d_ptr);
}
void vp8_subtract_b_sse2_impl(unsigned char *z, int src_stride,
void vp9_subtract_b_sse2_impl(unsigned char *z, int src_stride,
short *diff, unsigned char *predictor,
int pitch);
void vp8_subtract_b_sse2(BLOCK *be, BLOCKD *bd, int pitch) {
void vp9_subtract_b_sse2(BLOCK *be, BLOCKD *bd, int pitch) {
unsigned char *z = *(be->base_src) + be->src;
unsigned int src_stride = be->src_stride;
short *diff = &be->src_diff[0];
unsigned char *predictor = &bd->predictor[0];
vp8_subtract_b_sse2_impl(z, src_stride, diff, predictor, pitch);
vp9_subtract_b_sse2_impl(z, src_stride, diff, predictor, pitch);
}
#endif
void vp8_arch_x86_encoder_init(VP8_COMP *cpi) {
void vp9_arch_x86_encoder_init(VP8_COMP *cpi) {
#if CONFIG_RUNTIME_CPU_DETECT
int flags = x86_simd_caps();
@ -90,23 +90,23 @@ void vp8_arch_x86_encoder_init(VP8_COMP *cpi) {
/* Override default functions with fastest ones for this CPU. */
#if HAVE_SSE2
if (flags & HAS_SSE2) {
cpi->rtcd.temporal.apply = vp8_temporal_filter_apply_sse2;
cpi->rtcd.temporal.apply = vp9_temporal_filter_apply_sse2;
}
#endif
#if HAVE_SSE3
if (flags & HAS_SSE3) {
cpi->rtcd.search.full_search = vp8_full_search_sadx3;
cpi->rtcd.search.diamond_search = vp8_diamond_search_sadx4;
cpi->rtcd.search.refining_search = vp8_refining_search_sadx4;
cpi->rtcd.search.full_search = vp9_full_search_sadx3;
cpi->rtcd.search.diamond_search = vp9_diamond_search_sadx4;
cpi->rtcd.search.refining_search = vp9_refining_search_sadx4;
}
#endif
#if HAVE_SSE4_1
if (flags & HAS_SSE4_1) {
cpi->rtcd.search.full_search = vp8_full_search_sadx8;
cpi->rtcd.search.full_search = vp9_full_search_sadx8;
}
#endif

View File

@ -357,14 +357,14 @@ static vpx_codec_err_t vp8e_set_config(vpx_codec_alg_priv_t *ctx,
if (!res) {
ctx->cfg = *cfg;
set_vp8e_config(&ctx->oxcf, ctx->cfg, ctx->vp8_cfg);
vp8_change_config(ctx->cpi, &ctx->oxcf);
vp9_change_config(ctx->cpi, &ctx->oxcf);
}
return res;
}
int vp8_reverse_trans(int);
int vp9_reverse_trans(int);
static vpx_codec_err_t get_param(vpx_codec_alg_priv_t *ctx,
@ -378,8 +378,8 @@ static vpx_codec_err_t get_param(vpx_codec_alg_priv_t *ctx,
return VPX_CODEC_INVALID_PARAM;
switch (ctrl_id) {
MAP(VP8E_GET_LAST_QUANTIZER, vp8_get_quantizer(ctx->cpi));
MAP(VP8E_GET_LAST_QUANTIZER_64, vp8_reverse_trans(vp8_get_quantizer(ctx->cpi)));
MAP(VP8E_GET_LAST_QUANTIZER, vp9_get_quantizer(ctx->cpi));
MAP(VP8E_GET_LAST_QUANTIZER_64, vp9_reverse_trans(vp9_get_quantizer(ctx->cpi)));
}
return VPX_CODEC_OK;
@ -418,7 +418,7 @@ static vpx_codec_err_t set_param(vpx_codec_alg_priv_t *ctx,
if (!res) {
ctx->vp8_cfg = xcfg;
set_vp8e_config(&ctx->oxcf, ctx->cfg, ctx->vp8_cfg);
vp8_change_config(ctx->cpi, &ctx->oxcf);
vp9_change_config(ctx->cpi, &ctx->oxcf);
}
return res;
@ -482,7 +482,7 @@ static vpx_codec_err_t vp8e_common_init(vpx_codec_ctx_t *ctx,
priv->deprecated_mode = NO_MODE_SET;
vp8_initialize();
vp9_initialize();
res = validate_config(priv, &priv->cfg, &priv->vp8_cfg);
@ -490,7 +490,7 @@ static vpx_codec_err_t vp8e_common_init(vpx_codec_ctx_t *ctx,
set_vp8e_config(&ctx->priv->alg_priv->oxcf,
ctx->priv->alg_priv->cfg,
ctx->priv->alg_priv->vp8_cfg);
optr = vp8_create_compressor(&ctx->priv->alg_priv->oxcf);
optr = vp9_create_compressor(&ctx->priv->alg_priv->oxcf);
if (!optr)
res = VPX_CODEC_MEM_ERROR;
@ -518,7 +518,7 @@ static vpx_codec_err_t vp8e_exp_init(vpx_codec_ctx_t *ctx) {
static vpx_codec_err_t vp8e_destroy(vpx_codec_alg_priv_t *ctx) {
free(ctx->cx_data);
vp8_remove_compressor(&ctx->cpi);
vp9_remove_compressor(&ctx->cpi);
free(ctx);
return VPX_CODEC_OK;
}
@ -563,7 +563,7 @@ static void pick_quickcompress_mode(vpx_codec_alg_priv_t *ctx,
if (ctx->oxcf.Mode != new_qc) {
ctx->oxcf.Mode = new_qc;
vp8_change_config(ctx->cpi, &ctx->oxcf);
vp9_change_config(ctx->cpi, &ctx->oxcf);
}
}
@ -602,7 +602,7 @@ static vpx_codec_err_t vp8e_encode(vpx_codec_alg_priv_t *ctx,
if (flags & VP8_EFLAG_NO_REF_ARF)
ref ^= VP8_ALT_FLAG;
vp8_use_as_reference(ctx->cpi, ref);
vp9_use_as_reference(ctx->cpi, ref);
}
if (flags & (VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF
@ -619,11 +619,11 @@ static vpx_codec_err_t vp8e_encode(vpx_codec_alg_priv_t *ctx,
if (flags & VP8_EFLAG_NO_UPD_ARF)
upd ^= VP8_ALT_FLAG;
vp8_update_reference(ctx->cpi, upd);
vp9_update_reference(ctx->cpi, upd);
}
if (flags & VP8_EFLAG_NO_UPD_ENTROPY) {
vp8_update_entropy(ctx->cpi, 0);
vp9_update_entropy(ctx->cpi, 0);
}
/* Handle fixed keyframe intervals */
@ -660,7 +660,7 @@ static vpx_codec_err_t vp8e_encode(vpx_codec_alg_priv_t *ctx,
if (img != NULL) {
res = image2yuvconfig(img, &sd);
if (vp8_receive_raw_frame(ctx->cpi, ctx->next_frame_flag | lib_flags,
if (vp9_receive_raw_frame(ctx->cpi, ctx->next_frame_flag | lib_flags,
&sd, dst_time_stamp, dst_end_time_stamp)) {
VP8_COMP *cpi = (VP8_COMP *)ctx->cpi;
res = update_error_state(ctx, &cpi->common.error);
@ -675,7 +675,7 @@ static vpx_codec_err_t vp8e_encode(vpx_codec_alg_priv_t *ctx,
lib_flags = 0;
while (cx_data_sz >= ctx->cx_data_sz / 2
&& -1 != vp8_get_compressed_data(ctx->cpi, &lib_flags, &size, cx_data, &dst_time_stamp, &dst_end_time_stamp, !img)) {
&& -1 != vp9_get_compressed_data(ctx->cpi, &lib_flags, &size, cx_data, &dst_time_stamp, &dst_end_time_stamp, !img)) {
if (size) {
vpx_codec_pts_t round, delta;
vpx_codec_cx_pkt_t pkt;
@ -766,7 +766,7 @@ static vpx_codec_err_t vp8e_set_reference(vpx_codec_alg_priv_t *ctx,
YV12_BUFFER_CONFIG sd;
image2yuvconfig(&frame->img, &sd);
vp8_set_reference(ctx->cpi, frame->frame_type, &sd);
vp9_set_reference(ctx->cpi, frame->frame_type, &sd);
return VPX_CODEC_OK;
} else
return VPX_CODEC_INVALID_PARAM;
@ -784,7 +784,7 @@ static vpx_codec_err_t vp8e_get_reference(vpx_codec_alg_priv_t *ctx,
YV12_BUFFER_CONFIG sd;
image2yuvconfig(&frame->img, &sd);
vp8_get_reference(ctx->cpi, frame->frame_type, &sd);
vp9_get_reference(ctx->cpi, frame->frame_type, &sd);
return VPX_CODEC_OK;
} else
return VPX_CODEC_INVALID_PARAM;
@ -822,7 +822,7 @@ static vpx_image_t *vp8e_get_preview(vpx_codec_alg_priv_t *ctx) {
flags.noise_level = ctx->preview_ppcfg.noise_level;
}
if (0 == vp8_get_preview_raw_frame(ctx->cpi, &sd, &flags)) {
if (0 == vp9_get_preview_raw_frame(ctx->cpi, &sd, &flags)) {
/*
vpx_img_wrap(&ctx->preview_img, VPX_IMG_FMT_YV12,
@ -865,7 +865,7 @@ static vpx_codec_err_t vp8e_update_entropy(vpx_codec_alg_priv_t *ctx,
int ctr_id,
va_list args) {
int update = va_arg(args, int);
vp8_update_entropy(ctx->cpi, update);
vp9_update_entropy(ctx->cpi, update);
return VPX_CODEC_OK;
}
@ -874,7 +874,7 @@ static vpx_codec_err_t vp8e_update_reference(vpx_codec_alg_priv_t *ctx,
int ctr_id,
va_list args) {
int update = va_arg(args, int);
vp8_update_reference(ctx->cpi, update);
vp9_update_reference(ctx->cpi, update);
return VPX_CODEC_OK;
}
@ -882,7 +882,7 @@ static vpx_codec_err_t vp8e_use_reference(vpx_codec_alg_priv_t *ctx,
int ctr_id,
va_list args) {
int reference_flag = va_arg(args, int);
vp8_use_as_reference(ctx->cpi, reference_flag);
vp9_use_as_reference(ctx->cpi, reference_flag);
return VPX_CODEC_OK;
}
@ -894,7 +894,7 @@ static vpx_codec_err_t vp8e_set_roi_map(vpx_codec_alg_priv_t *ctx,
if (data) {
vpx_roi_map_t *roi = (vpx_roi_map_t *)data;
if (!vp8_set_roimap(ctx->cpi, roi->roi_map, roi->rows, roi->cols, roi->delta_q, roi->delta_lf, roi->static_threshold))
if (!vp9_set_roimap(ctx->cpi, roi->roi_map, roi->rows, roi->cols, roi->delta_q, roi->delta_lf, roi->static_threshold))
return VPX_CODEC_OK;
else
return VPX_CODEC_INVALID_PARAM;
@ -912,7 +912,7 @@ static vpx_codec_err_t vp8e_set_activemap(vpx_codec_alg_priv_t *ctx,
vpx_active_map_t *map = (vpx_active_map_t *)data;
if (!vp8_set_active_map(ctx->cpi, map->active_map, map->rows, map->cols))
if (!vp9_set_active_map(ctx->cpi, map->active_map, map->rows, map->cols))
return VPX_CODEC_OK;
else
return VPX_CODEC_INVALID_PARAM;
@ -929,7 +929,7 @@ static vpx_codec_err_t vp8e_set_scalemode(vpx_codec_alg_priv_t *ctx,
if (data) {
int res;
vpx_scaling_mode_t scalemode = *(vpx_scaling_mode_t *)data;
res = vp8_set_internal_size(ctx->cpi, scalemode.h_scaling_mode, scalemode.v_scaling_mode);
res = vp9_set_internal_size(ctx->cpi, scalemode.h_scaling_mode, scalemode.v_scaling_mode);
if (!res) {
/*force next frame a key frame to effect scaling mode */

View File

@ -530,7 +530,7 @@ static vpx_codec_err_t image2yuvconfig(const vpx_image_t *img,
}
static vpx_codec_err_t vp8_set_reference(vpx_codec_alg_priv_t *ctx,
static vpx_codec_err_t vp9_set_reference(vpx_codec_alg_priv_t *ctx,
int ctr_id,
va_list args) {
@ -548,7 +548,7 @@ static vpx_codec_err_t vp8_set_reference(vpx_codec_alg_priv_t *ctx,
}
static vpx_codec_err_t vp8_get_reference(vpx_codec_alg_priv_t *ctx,
static vpx_codec_err_t vp9_get_reference(vpx_codec_alg_priv_t *ctx,
int ctr_id,
va_list args) {
@ -639,8 +639,8 @@ static vpx_codec_err_t vp8_get_frame_corrupted(vpx_codec_alg_priv_t *ctx,
}
vpx_codec_ctrl_fn_map_t vp8_ctf_maps[] = {
{VP8_SET_REFERENCE, vp8_set_reference},
{VP8_COPY_REFERENCE, vp8_get_reference},
{VP8_SET_REFERENCE, vp9_set_reference},
{VP8_COPY_REFERENCE, vp9_get_reference},
{VP8_SET_POSTPROC, vp8_set_postproc},
{VP8_SET_DBG_COLOR_REF_FRAME, vp8_set_dbg_options},
{VP8_SET_DBG_COLOR_MB_MODES, vp8_set_dbg_options},