6035da5448
This is a code snapshot of experimental work currently ongoing for a next-generation codec. The codebase has been cut down considerably from the libvpx baseline. For example, we are currently only supporting VBR 2-pass rate control and have removed most of the code relating to coding speed, threading, error resilience, partitions and various other features. This is in part to make the codebase easier to work on and experiment with, but also because we want to have an open discussion about how the bitstream will be structured and partitioned and not have that conversation constrained by past work. Our basic working pattern has been to initially encapsulate experiments using configure options linked to #IF CONFIG_XXX statements in the code. Once experiments have matured and we are reasonably happy that they give benefit and can be merged without breaking other experiments, we remove the conditional compile statements and merge them in. Current changes include: * Temporal coding experiment for segments (though still only 4 max, it will likely be increased). * Segment feature experiment - to allow various bits of information to be coded at the segment level. Features tested so far include mode and reference frame information, limiting end of block offset and transform size, alongside Q and loop filter parameters, but this set is very fluid. * Support for 8x8 transform - 8x8 dct with 2nd order 2x2 haar is used in MBs using 16x16 prediction modes within inter frames. * Compound prediction (combination of signals from existing predictors to create a new predictor). * 8 tap interpolation filters and 1/8th pel motion vectors. * Loop filter modifications. * Various entropy modifications and changes to how entropy contexts and updates are handled. * Extended quantizer range matched to transform precision improvements. There are also ongoing further experiments that we hope to merge in the near future: For example, coding of motion and other aspects of the prediction signal to better support larger image formats, use of larger block sizes (e.g. 32x32 and up) and lossless non-transform based coding options (especially for key frames). It is our hope that we will be able to make regular updates and we will warmly welcome community contributions. Please be warned that, at this stage, the codebase is currently slower than VP8 stable branch as most new code has not been optimized, and even the 'C' has been deliberately written to be simple and obvious, not fast. The following graphs have the initial test results, numbers in the tables measure the compression improvement in terms of percentage. The build has the following optional experiments configured: --enable-experimental --enable-enhanced_interp --enable-uvintra --enable-high_precision_mv --enable-sixteenth_subpel_uv CIF Size clips: http://getwebm.org/tmp/cif/ HD size clips: http://getwebm.org/tmp/hd/ (stable_20120309 represents encoding results of WebM master branch build as of commit#7a15907) They were encoded using the following encode parameters: --good --cpu-used=0 -t 0 --lag-in-frames=25 --min-q=0 --max-q=63 --end-usage=0 --auto-alt-ref=1 -p 2 --pass=2 --kf-max-dist=9999 --kf-min-dist=0 --drop-frame=0 --static-thresh=0 --bias-pct=50 --minsection-pct=0 --maxsection-pct=800 --sharpness=0 --arnr-maxframes=7 --arnr-strength=3(for HD,6 for CIF) --arnr-type=3 Change-Id: I5c62ed09cfff5815a2bb34e7820d6a810c23183c
329 lines
10 KiB
C
329 lines
10 KiB
C
/*
|
|
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
|
|
*
|
|
* Use of this source code is governed by a BSD-style license
|
|
* that can be found in the LICENSE file in the root of the source
|
|
* tree. An additional intellectual property rights grant can be found
|
|
* in the file PATENTS. All contributing project authors may
|
|
* be found in the AUTHORS file in the root of the source tree.
|
|
*/
|
|
|
|
|
|
#ifndef VARIANCE_X86_H
|
|
#define VARIANCE_X86_H
|
|
|
|
|
|
/* Note:
|
|
*
|
|
* This platform is commonly built for runtime CPU detection. If you modify
|
|
* any of the function mappings present in this file, be sure to also update
|
|
* them in the function pointer initialization code
|
|
*/
|
|
#if HAVE_MMX
|
|
extern prototype_sad(vp8_sad4x4_mmx);
|
|
extern prototype_sad(vp8_sad8x8_mmx);
|
|
extern prototype_sad(vp8_sad8x16_mmx);
|
|
extern prototype_sad(vp8_sad16x8_mmx);
|
|
extern prototype_sad(vp8_sad16x16_mmx);
|
|
extern prototype_variance(vp8_variance4x4_mmx);
|
|
extern prototype_variance(vp8_variance8x8_mmx);
|
|
extern prototype_variance(vp8_variance8x16_mmx);
|
|
extern prototype_variance(vp8_variance16x8_mmx);
|
|
extern prototype_variance(vp8_variance16x16_mmx);
|
|
extern prototype_subpixvariance(vp8_sub_pixel_variance4x4_mmx);
|
|
extern prototype_subpixvariance(vp8_sub_pixel_variance8x8_mmx);
|
|
extern prototype_subpixvariance(vp8_sub_pixel_variance8x16_mmx);
|
|
extern prototype_subpixvariance(vp8_sub_pixel_variance16x8_mmx);
|
|
extern prototype_subpixvariance(vp8_sub_pixel_variance16x16_mmx);
|
|
extern prototype_variance(vp8_variance_halfpixvar16x16_h_mmx);
|
|
extern prototype_variance(vp8_variance_halfpixvar16x16_v_mmx);
|
|
extern prototype_variance(vp8_variance_halfpixvar16x16_hv_mmx);
|
|
extern prototype_subpixvariance(vp8_sub_pixel_mse16x16_mmx);
|
|
extern prototype_getmbss(vp8_get_mb_ss_mmx);
|
|
extern prototype_variance(vp8_mse16x16_mmx);
|
|
extern prototype_variance2(vp8_get8x8var_mmx);
|
|
|
|
#if !CONFIG_RUNTIME_CPU_DETECT
|
|
#undef vp8_variance_sad4x4
|
|
#define vp8_variance_sad4x4 vp8_sad4x4_mmx
|
|
|
|
#undef vp8_variance_sad8x8
|
|
#define vp8_variance_sad8x8 vp8_sad8x8_mmx
|
|
|
|
#undef vp8_variance_sad8x16
|
|
#define vp8_variance_sad8x16 vp8_sad8x16_mmx
|
|
|
|
#undef vp8_variance_sad16x8
|
|
#define vp8_variance_sad16x8 vp8_sad16x8_mmx
|
|
|
|
#undef vp8_variance_sad16x16
|
|
#define vp8_variance_sad16x16 vp8_sad16x16_mmx
|
|
|
|
#undef vp8_variance_var4x4
|
|
#define vp8_variance_var4x4 vp8_variance4x4_mmx
|
|
|
|
#undef vp8_variance_var8x8
|
|
#define vp8_variance_var8x8 vp8_variance8x8_mmx
|
|
|
|
#undef vp8_variance_var8x16
|
|
#define vp8_variance_var8x16 vp8_variance8x16_mmx
|
|
|
|
#undef vp8_variance_var16x8
|
|
#define vp8_variance_var16x8 vp8_variance16x8_mmx
|
|
|
|
#undef vp8_variance_var16x16
|
|
#define vp8_variance_var16x16 vp8_variance16x16_mmx
|
|
|
|
#undef vp8_variance_subpixvar4x4
|
|
#define vp8_variance_subpixvar4x4 vp8_sub_pixel_variance4x4_mmx
|
|
|
|
#undef vp8_variance_subpixvar8x8
|
|
#define vp8_variance_subpixvar8x8 vp8_sub_pixel_variance8x8_mmx
|
|
|
|
#undef vp8_variance_subpixvar8x16
|
|
#define vp8_variance_subpixvar8x16 vp8_sub_pixel_variance8x16_mmx
|
|
|
|
#undef vp8_variance_subpixvar16x8
|
|
#define vp8_variance_subpixvar16x8 vp8_sub_pixel_variance16x8_mmx
|
|
|
|
#undef vp8_variance_subpixvar16x16
|
|
#define vp8_variance_subpixvar16x16 vp8_sub_pixel_variance16x16_mmx
|
|
|
|
#undef vp8_variance_halfpixvar16x16_h
|
|
#define vp8_variance_halfpixvar16x16_h vp8_variance_halfpixvar16x16_h_mmx
|
|
|
|
#undef vp8_variance_halfpixvar16x16_v
|
|
#define vp8_variance_halfpixvar16x16_v vp8_variance_halfpixvar16x16_v_mmx
|
|
|
|
#undef vp8_variance_halfpixvar16x16_hv
|
|
#define vp8_variance_halfpixvar16x16_hv vp8_variance_halfpixvar16x16_hv_mmx
|
|
|
|
#undef vp8_variance_subpixmse16x16
|
|
#define vp8_variance_subpixmse16x16 vp8_sub_pixel_mse16x16_mmx
|
|
|
|
#undef vp8_variance_getmbss
|
|
#define vp8_variance_getmbss vp8_get_mb_ss_mmx
|
|
|
|
#undef vp8_variance_mse16x16
|
|
#define vp8_variance_mse16x16 vp8_mse16x16_mmx
|
|
|
|
#endif
|
|
#endif
|
|
|
|
|
|
#if HAVE_SSE2
|
|
extern prototype_sad(vp8_sad4x4_wmt);
|
|
extern prototype_sad(vp8_sad8x8_wmt);
|
|
extern prototype_sad(vp8_sad8x16_wmt);
|
|
extern prototype_sad(vp8_sad16x8_wmt);
|
|
extern prototype_sad(vp8_sad16x16_wmt);
|
|
extern prototype_sad(vp8_copy32xn_sse2);
|
|
extern prototype_variance(vp8_variance4x4_wmt);
|
|
extern prototype_variance(vp8_variance8x8_wmt);
|
|
extern prototype_variance(vp8_variance8x16_wmt);
|
|
extern prototype_variance(vp8_variance16x8_wmt);
|
|
extern prototype_variance(vp8_variance16x16_wmt);
|
|
extern prototype_subpixvariance(vp8_sub_pixel_variance4x4_wmt);
|
|
extern prototype_subpixvariance(vp8_sub_pixel_variance8x8_wmt);
|
|
extern prototype_subpixvariance(vp8_sub_pixel_variance8x16_wmt);
|
|
extern prototype_subpixvariance(vp8_sub_pixel_variance16x8_wmt);
|
|
extern prototype_subpixvariance(vp8_sub_pixel_variance16x16_wmt);
|
|
extern prototype_variance(vp8_variance_halfpixvar16x16_h_wmt);
|
|
extern prototype_variance(vp8_variance_halfpixvar16x16_v_wmt);
|
|
extern prototype_variance(vp8_variance_halfpixvar16x16_hv_wmt);
|
|
extern prototype_subpixvariance(vp8_sub_pixel_mse16x16_wmt);
|
|
extern prototype_getmbss(vp8_get_mb_ss_sse2);
|
|
extern prototype_variance(vp8_mse16x16_wmt);
|
|
extern prototype_variance2(vp8_get8x8var_sse2);
|
|
extern prototype_variance2(vp8_get16x16var_sse2);
|
|
extern prototype_ssimpf(vp8_ssim_parms_8x8_sse2)
|
|
extern prototype_ssimpf(vp8_ssim_parms_16x16_sse2)
|
|
|
|
#if !CONFIG_RUNTIME_CPU_DETECT
|
|
#undef vp8_variance_sad4x4
|
|
#define vp8_variance_sad4x4 vp8_sad4x4_wmt
|
|
|
|
#undef vp8_variance_sad8x8
|
|
#define vp8_variance_sad8x8 vp8_sad8x8_wmt
|
|
|
|
#undef vp8_variance_sad8x16
|
|
#define vp8_variance_sad8x16 vp8_sad8x16_wmt
|
|
|
|
#undef vp8_variance_sad16x8
|
|
#define vp8_variance_sad16x8 vp8_sad16x8_wmt
|
|
|
|
#undef vp8_variance_sad16x16
|
|
#define vp8_variance_sad16x16 vp8_sad16x16_wmt
|
|
|
|
#undef vp8_variance_copy32xn
|
|
#define vp8_variance_copy32xn vp8_copy32xn_sse2
|
|
|
|
#undef vp8_variance_var4x4
|
|
#define vp8_variance_var4x4 vp8_variance4x4_wmt
|
|
|
|
#undef vp8_variance_var8x8
|
|
#define vp8_variance_var8x8 vp8_variance8x8_wmt
|
|
|
|
#undef vp8_variance_var8x16
|
|
#define vp8_variance_var8x16 vp8_variance8x16_wmt
|
|
|
|
#undef vp8_variance_var16x8
|
|
#define vp8_variance_var16x8 vp8_variance16x8_wmt
|
|
|
|
#undef vp8_variance_var16x16
|
|
#define vp8_variance_var16x16 vp8_variance16x16_wmt
|
|
|
|
#undef vp8_variance_subpixvar4x4
|
|
#define vp8_variance_subpixvar4x4 vp8_sub_pixel_variance4x4_wmt
|
|
|
|
#undef vp8_variance_subpixvar8x8
|
|
#define vp8_variance_subpixvar8x8 vp8_sub_pixel_variance8x8_wmt
|
|
|
|
#undef vp8_variance_subpixvar8x16
|
|
#define vp8_variance_subpixvar8x16 vp8_sub_pixel_variance8x16_wmt
|
|
|
|
#undef vp8_variance_subpixvar16x8
|
|
#define vp8_variance_subpixvar16x8 vp8_sub_pixel_variance16x8_wmt
|
|
|
|
#undef vp8_variance_subpixvar16x16
|
|
#define vp8_variance_subpixvar16x16 vp8_sub_pixel_variance16x16_wmt
|
|
|
|
#undef vp8_variance_halfpixvar16x16_h
|
|
#define vp8_variance_halfpixvar16x16_h vp8_variance_halfpixvar16x16_h_wmt
|
|
|
|
#undef vp8_variance_halfpixvar16x16_v
|
|
#define vp8_variance_halfpixvar16x16_v vp8_variance_halfpixvar16x16_v_wmt
|
|
|
|
#undef vp8_variance_halfpixvar16x16_hv
|
|
#define vp8_variance_halfpixvar16x16_hv vp8_variance_halfpixvar16x16_hv_wmt
|
|
|
|
#undef vp8_variance_subpixmse16x16
|
|
#define vp8_variance_subpixmse16x16 vp8_sub_pixel_mse16x16_wmt
|
|
|
|
#undef vp8_variance_getmbss
|
|
#define vp8_variance_getmbss vp8_get_mb_ss_sse2
|
|
|
|
#undef vp8_variance_mse16x16
|
|
#define vp8_variance_mse16x16 vp8_mse16x16_wmt
|
|
|
|
#if ARCH_X86_64
|
|
#undef vp8_ssimpf_8x8
|
|
#define vp8_ssimpf_8x8 vp8_ssim_parms_8x8_sse2
|
|
|
|
#undef vp8_ssimpf_16x16
|
|
#define vp8_ssimpf_16x16 vp8_ssim_parms_16x16_sse2
|
|
#endif
|
|
|
|
#endif
|
|
#endif
|
|
|
|
|
|
#if HAVE_SSE3
|
|
extern prototype_sad(vp8_sad16x16_sse3);
|
|
extern prototype_sad(vp8_sad16x8_sse3);
|
|
extern prototype_sad_multi_same_address(vp8_sad16x16x3_sse3);
|
|
extern prototype_sad_multi_same_address(vp8_sad16x8x3_sse3);
|
|
extern prototype_sad_multi_same_address(vp8_sad8x16x3_sse3);
|
|
extern prototype_sad_multi_same_address(vp8_sad8x8x3_sse3);
|
|
extern prototype_sad_multi_same_address(vp8_sad4x4x3_sse3);
|
|
|
|
extern prototype_sad_multi_dif_address(vp8_sad16x16x4d_sse3);
|
|
extern prototype_sad_multi_dif_address(vp8_sad16x8x4d_sse3);
|
|
extern prototype_sad_multi_dif_address(vp8_sad8x16x4d_sse3);
|
|
extern prototype_sad_multi_dif_address(vp8_sad8x8x4d_sse3);
|
|
extern prototype_sad_multi_dif_address(vp8_sad4x4x4d_sse3);
|
|
extern prototype_sad(vp8_copy32xn_sse3);
|
|
|
|
#if !CONFIG_RUNTIME_CPU_DETECT
|
|
|
|
#undef vp8_variance_sad16x16
|
|
#define vp8_variance_sad16x16 vp8_sad16x16_sse3
|
|
|
|
#undef vp8_variance_sad16x16x3
|
|
#define vp8_variance_sad16x16x3 vp8_sad16x16x3_sse3
|
|
|
|
#undef vp8_variance_sad16x8x3
|
|
#define vp8_variance_sad16x8x3 vp8_sad16x8x3_sse3
|
|
|
|
#undef vp8_variance_sad8x16x3
|
|
#define vp8_variance_sad8x16x3 vp8_sad8x16x3_sse3
|
|
|
|
#undef vp8_variance_sad8x8x3
|
|
#define vp8_variance_sad8x8x3 vp8_sad8x8x3_sse3
|
|
|
|
#undef vp8_variance_sad4x4x3
|
|
#define vp8_variance_sad4x4x3 vp8_sad4x4x3_sse3
|
|
|
|
#undef vp8_variance_sad16x16x4d
|
|
#define vp8_variance_sad16x16x4d vp8_sad16x16x4d_sse3
|
|
|
|
#undef vp8_variance_sad16x8x4d
|
|
#define vp8_variance_sad16x8x4d vp8_sad16x8x4d_sse3
|
|
|
|
#undef vp8_variance_sad8x16x4d
|
|
#define vp8_variance_sad8x16x4d vp8_sad8x16x4d_sse3
|
|
|
|
#undef vp8_variance_sad8x8x4d
|
|
#define vp8_variance_sad8x8x4d vp8_sad8x8x4d_sse3
|
|
|
|
#undef vp8_variance_sad4x4x4d
|
|
#define vp8_variance_sad4x4x4d vp8_sad4x4x4d_sse3
|
|
|
|
#undef vp8_variance_copy32xn
|
|
#define vp8_variance_copy32xn vp8_copy32xn_sse3
|
|
|
|
#endif
|
|
#endif
|
|
|
|
|
|
#if HAVE_SSSE3
|
|
extern prototype_sad_multi_same_address(vp8_sad16x16x3_ssse3);
|
|
extern prototype_sad_multi_same_address(vp8_sad16x8x3_ssse3);
|
|
extern prototype_subpixvariance(vp8_sub_pixel_variance16x8_ssse3);
|
|
extern prototype_subpixvariance(vp8_sub_pixel_variance16x16_ssse3);
|
|
|
|
#if !CONFIG_RUNTIME_CPU_DETECT
|
|
#undef vp8_variance_sad16x16x3
|
|
#define vp8_variance_sad16x16x3 vp8_sad16x16x3_ssse3
|
|
|
|
#undef vp8_variance_sad16x8x3
|
|
#define vp8_variance_sad16x8x3 vp8_sad16x8x3_ssse3
|
|
|
|
#undef vp8_variance_subpixvar16x8
|
|
#define vp8_variance_subpixvar16x8 vp8_sub_pixel_variance16x8_ssse3
|
|
|
|
#undef vp8_variance_subpixvar16x16
|
|
#define vp8_variance_subpixvar16x16 vp8_sub_pixel_variance16x16_ssse3
|
|
|
|
#endif
|
|
#endif
|
|
|
|
|
|
#if HAVE_SSE4_1
|
|
extern prototype_sad_multi_same_address_1(vp8_sad16x16x8_sse4);
|
|
extern prototype_sad_multi_same_address_1(vp8_sad16x8x8_sse4);
|
|
extern prototype_sad_multi_same_address_1(vp8_sad8x16x8_sse4);
|
|
extern prototype_sad_multi_same_address_1(vp8_sad8x8x8_sse4);
|
|
extern prototype_sad_multi_same_address_1(vp8_sad4x4x8_sse4);
|
|
|
|
#if !CONFIG_RUNTIME_CPU_DETECT
|
|
#undef vp8_variance_sad16x16x8
|
|
#define vp8_variance_sad16x16x8 vp8_sad16x16x8_sse4
|
|
|
|
#undef vp8_variance_sad16x8x8
|
|
#define vp8_variance_sad16x8x8 vp8_sad16x8x8_sse4
|
|
|
|
#undef vp8_variance_sad8x16x8
|
|
#define vp8_variance_sad8x16x8 vp8_sad8x16x8_sse4
|
|
|
|
#undef vp8_variance_sad8x8x8
|
|
#define vp8_variance_sad8x8x8 vp8_sad8x8x8_sse4
|
|
|
|
#undef vp8_variance_sad4x4x8
|
|
#define vp8_variance_sad4x4x8 vp8_sad4x4x8_sse4
|
|
|
|
#endif
|
|
#endif
|
|
|
|
#endif
|