9ac2f66320
This commit re-designs the quantization process for transform coefficient blocks of size 4x4 to 16x16. It improves compression performance for speed 7 by 3.85%. The SSSE3 version for the new quantization process is included. The average runtime of the 8x8 block quantization is reduced from 285 cycles -> 255 cycles, i.e., over 10% faster. Change-Id: I61278aa02efc70599b962d3314671db5b0446a50
80 lines
2.9 KiB
C
80 lines
2.9 KiB
C
/*
|
|
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
|
|
*
|
|
* Use of this source code is governed by a BSD-style license
|
|
* that can be found in the LICENSE file in the root of the source
|
|
* tree. An additional intellectual property rights grant can be found
|
|
* in the file PATENTS. All contributing project authors may
|
|
* be found in the AUTHORS file in the root of the source tree.
|
|
*/
|
|
|
|
#ifndef VP9_ENCODER_VP9_QUANTIZE_H_
|
|
#define VP9_ENCODER_VP9_QUANTIZE_H_
|
|
|
|
#include "./vpx_config.h"
|
|
#include "vp9/encoder/vp9_block.h"
|
|
|
|
#ifdef __cplusplus
|
|
extern "C" {
|
|
#endif
|
|
|
|
typedef struct {
|
|
DECLARE_ALIGNED(16, int16_t, y_quant[QINDEX_RANGE][8]);
|
|
DECLARE_ALIGNED(16, int16_t, y_quant_shift[QINDEX_RANGE][8]);
|
|
DECLARE_ALIGNED(16, int16_t, y_zbin[QINDEX_RANGE][8]);
|
|
DECLARE_ALIGNED(16, int16_t, y_round[QINDEX_RANGE][8]);
|
|
|
|
// TODO(jingning): in progress of re-working the quantization. will decide
|
|
// if we want to deprecate the current use of y_quant.
|
|
DECLARE_ALIGNED(16, int16_t, y_quant_fp[QINDEX_RANGE][8]);
|
|
DECLARE_ALIGNED(16, int16_t, uv_quant_fp[QINDEX_RANGE][8]);
|
|
DECLARE_ALIGNED(16, int16_t, y_round_fp[QINDEX_RANGE][8]);
|
|
DECLARE_ALIGNED(16, int16_t, uv_round_fp[QINDEX_RANGE][8]);
|
|
|
|
DECLARE_ALIGNED(16, int16_t, uv_quant[QINDEX_RANGE][8]);
|
|
DECLARE_ALIGNED(16, int16_t, uv_quant_shift[QINDEX_RANGE][8]);
|
|
DECLARE_ALIGNED(16, int16_t, uv_zbin[QINDEX_RANGE][8]);
|
|
DECLARE_ALIGNED(16, int16_t, uv_round[QINDEX_RANGE][8]);
|
|
|
|
#if CONFIG_ALPHA
|
|
DECLARE_ALIGNED(16, int16_t, a_quant[QINDEX_RANGE][8]);
|
|
DECLARE_ALIGNED(16, int16_t, a_quant_shift[QINDEX_RANGE][8]);
|
|
DECLARE_ALIGNED(16, int16_t, a_zbin[QINDEX_RANGE][8]);
|
|
DECLARE_ALIGNED(16, int16_t, a_round[QINDEX_RANGE][8]);
|
|
#endif
|
|
} QUANTS;
|
|
|
|
void vp9_quantize_dc(const int16_t *coeff_ptr, int skip_block,
|
|
const int16_t *round_ptr, const int16_t quant_ptr,
|
|
int16_t *qcoeff_ptr, int16_t *dqcoeff_ptr,
|
|
const int16_t dequant_ptr, uint16_t *eob_ptr);
|
|
void vp9_quantize_dc_32x32(const int16_t *coeff_ptr, int skip_block,
|
|
const int16_t *round_ptr, const int16_t quant_ptr,
|
|
int16_t *qcoeff_ptr, int16_t *dqcoeff_ptr,
|
|
const int16_t dequant_ptr, uint16_t *eob_ptr);
|
|
void vp9_regular_quantize_b_4x4(MACROBLOCK *x, int plane, int block,
|
|
const int16_t *scan, const int16_t *iscan);
|
|
|
|
struct VP9_COMP;
|
|
struct VP9Common;
|
|
|
|
void vp9_frame_init_quantizer(struct VP9_COMP *cpi);
|
|
|
|
void vp9_update_zbin_extra(struct VP9_COMP *cpi, MACROBLOCK *x);
|
|
|
|
void vp9_init_plane_quantizers(struct VP9_COMP *cpi, MACROBLOCK *x);
|
|
|
|
void vp9_init_quantizer(struct VP9_COMP *cpi);
|
|
|
|
void vp9_set_quantizer(struct VP9Common *cm, int q);
|
|
|
|
int vp9_quantizer_to_qindex(int quantizer);
|
|
|
|
int vp9_qindex_to_quantizer(int qindex);
|
|
|
|
#ifdef __cplusplus
|
|
} // extern "C"
|
|
#endif
|
|
|
|
#endif // VP9_ENCODER_VP9_QUANTIZE_H_
|