Merge remote branch 'origin/master' into experimental
Change-Id: I1a58ce4643377bae4cc6bf9c89320251f724ca66
This commit is contained in:
@@ -1147,7 +1147,7 @@ static void sum_intra_stats(VP8_COMP *cpi, MACROBLOCK *x)
|
|||||||
}
|
}
|
||||||
int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
|
int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
|
||||||
{
|
{
|
||||||
int Error4x4, Error16x16, error_uv;
|
int Error4x4, Error16x16;
|
||||||
int rate4x4, rate16x16, rateuv;
|
int rate4x4, rate16x16, rateuv;
|
||||||
int dist4x4, dist16x16, distuv;
|
int dist4x4, dist16x16, distuv;
|
||||||
int rate = 0;
|
int rate = 0;
|
||||||
@@ -1160,7 +1160,7 @@ int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
|
|||||||
#if !(CONFIG_REALTIME_ONLY)
|
#if !(CONFIG_REALTIME_ONLY)
|
||||||
if (cpi->sf.RD && cpi->compressor_speed != 2)
|
if (cpi->sf.RD && cpi->compressor_speed != 2)
|
||||||
{
|
{
|
||||||
error_uv = vp8_rd_pick_intra_mbuv_mode(cpi, x, &rateuv, &rateuv_tokenonly, &distuv);
|
vp8_rd_pick_intra_mbuv_mode(cpi, x, &rateuv, &rateuv_tokenonly, &distuv);
|
||||||
rate += rateuv;
|
rate += rateuv;
|
||||||
|
|
||||||
Error16x16 = vp8_rd_pick_intra16x16mby_mode(cpi, x, &rate16x16, &rate16x16_tokenonly, &dist16x16);
|
Error16x16 = vp8_rd_pick_intra16x16mby_mode(cpi, x, &rate16x16, &rate16x16_tokenonly, &dist16x16);
|
||||||
@@ -1231,7 +1231,6 @@ int vp8cx_encode_inter_macroblock
|
|||||||
)
|
)
|
||||||
{
|
{
|
||||||
MACROBLOCKD *const xd = &x->e_mbd;
|
MACROBLOCKD *const xd = &x->e_mbd;
|
||||||
int inter_error;
|
|
||||||
int intra_error = 0;
|
int intra_error = 0;
|
||||||
int rate;
|
int rate;
|
||||||
int distortion;
|
int distortion;
|
||||||
@@ -1258,7 +1257,7 @@ int vp8cx_encode_inter_macroblock
|
|||||||
* do not recalculate */
|
* do not recalculate */
|
||||||
cpi->zbin_mode_boost_enabled = 0;
|
cpi->zbin_mode_boost_enabled = 0;
|
||||||
}
|
}
|
||||||
inter_error = vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate, &distortion, &intra_error);
|
vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate, &distortion, &intra_error);
|
||||||
|
|
||||||
/* switch back to the regular quantizer for the encode */
|
/* switch back to the regular quantizer for the encode */
|
||||||
if (cpi->sf.improved_quant)
|
if (cpi->sf.improved_quant)
|
||||||
@@ -1272,10 +1271,9 @@ int vp8cx_encode_inter_macroblock
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
#endif
|
#endif
|
||||||
inter_error = vp8_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate, &distortion, &intra_error);
|
vp8_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate, &distortion, &intra_error);
|
||||||
|
|
||||||
|
cpi->prediction_error += distortion;
|
||||||
cpi->prediction_error += inter_error;
|
|
||||||
cpi->intra_error += intra_error;
|
cpi->intra_error += intra_error;
|
||||||
|
|
||||||
#if 0
|
#if 0
|
||||||
|
|||||||
@@ -263,7 +263,7 @@ int vp8_pick_intra4x4mby_modes(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *mb, int
|
|||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
int vp8_pick_intra_mbuv_mode(MACROBLOCK *mb)
|
void vp8_pick_intra_mbuv_mode(MACROBLOCK *mb)
|
||||||
{
|
{
|
||||||
|
|
||||||
MACROBLOCKD *x = &mb->e_mbd;
|
MACROBLOCKD *x = &mb->e_mbd;
|
||||||
@@ -408,11 +408,10 @@ int vp8_pick_intra_mbuv_mode(MACROBLOCK *mb)
|
|||||||
|
|
||||||
|
|
||||||
mb->e_mbd.mode_info_context->mbmi.uv_mode = best_mode;
|
mb->e_mbd.mode_info_context->mbmi.uv_mode = best_mode;
|
||||||
return best_error;
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int recon_uvoffset, int *returnrate, int *returndistortion, int *returnintra)
|
void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int recon_uvoffset, int *returnrate, int *returndistortion, int *returnintra)
|
||||||
{
|
{
|
||||||
BLOCK *b = &x->block[0];
|
BLOCK *b = &x->block[0];
|
||||||
BLOCKD *d = &x->e_mbd.block[0];
|
BLOCKD *d = &x->e_mbd.block[0];
|
||||||
@@ -504,7 +503,7 @@ int vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int rec
|
|||||||
|
|
||||||
cpi->mbs_tested_so_far++; // Count of the number of MBs tested so far this frame
|
cpi->mbs_tested_so_far++; // Count of the number of MBs tested so far this frame
|
||||||
|
|
||||||
*returnintra = best_intra_rd;
|
*returnintra = INT_MAX;
|
||||||
x->skip = 0;
|
x->skip = 0;
|
||||||
|
|
||||||
ref_frame_cost[INTRA_FRAME] = vp8_cost_zero(cpi->prob_intra_coded);
|
ref_frame_cost[INTRA_FRAME] = vp8_cost_zero(cpi->prob_intra_coded);
|
||||||
@@ -649,7 +648,7 @@ int vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int rec
|
|||||||
if (this_rd < best_intra_rd)
|
if (this_rd < best_intra_rd)
|
||||||
{
|
{
|
||||||
best_intra_rd = this_rd;
|
best_intra_rd = this_rd;
|
||||||
*returnintra = best_intra_rd ;
|
*returnintra = distortion2;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -673,9 +672,8 @@ int vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int rec
|
|||||||
if (this_rd < best_intra_rd)
|
if (this_rd < best_intra_rd)
|
||||||
{
|
{
|
||||||
best_intra_rd = this_rd;
|
best_intra_rd = this_rd;
|
||||||
*returnintra = best_intra_rd ;
|
*returnintra = distortion2;
|
||||||
}
|
}
|
||||||
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case NEWMV:
|
case NEWMV:
|
||||||
@@ -934,8 +932,6 @@ int vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int rec
|
|||||||
}
|
}
|
||||||
|
|
||||||
x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
|
x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
|
||||||
|
|
||||||
return best_rd;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -955,6 +951,4 @@ int vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int rec
|
|||||||
}
|
}
|
||||||
|
|
||||||
x->e_mbd.mode_info_context->mbmi.mv.as_mv = x->e_mbd.block[15].bmi.mv.as_mv;
|
x->e_mbd.mode_info_context->mbmi.mv.as_mv = x->e_mbd.block[15].bmi.mv.as_mv;
|
||||||
|
|
||||||
return best_rd;
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,6 +16,6 @@
|
|||||||
|
|
||||||
#define RD_ESTIMATE(RM,DM,R,D) ( ((128+(R)*(RM)) >> 8) + (DM)*(D) )
|
#define RD_ESTIMATE(RM,DM,R,D) ( ((128+(R)*(RM)) >> 8) + (DM)*(D) )
|
||||||
extern int vp8_pick_intra4x4mby_modes(const VP8_ENCODER_RTCD *, MACROBLOCK *mb, int *Rate, int *Distortion);
|
extern int vp8_pick_intra4x4mby_modes(const VP8_ENCODER_RTCD *, MACROBLOCK *mb, int *Rate, int *Distortion);
|
||||||
extern int vp8_pick_intra_mbuv_mode(MACROBLOCK *mb);
|
extern void vp8_pick_intra_mbuv_mode(MACROBLOCK *mb);
|
||||||
extern int vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int recon_uvoffset, int *returnrate, int *returndistortion, int *returnintra);
|
extern void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int recon_uvoffset, int *returnrate, int *returndistortion, int *returnintra);
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -864,7 +864,7 @@ static int vp8_rd_inter_uv(VP8_COMP *cpi, MACROBLOCK *x, int *rate, int *distort
|
|||||||
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
|
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
|
||||||
}
|
}
|
||||||
|
|
||||||
int vp8_rd_pick_intra_mbuv_mode(VP8_COMP *cpi, MACROBLOCK *x, int *rate, int *rate_tokenonly, int *distortion)
|
void vp8_rd_pick_intra_mbuv_mode(VP8_COMP *cpi, MACROBLOCK *x, int *rate, int *rate_tokenonly, int *distortion)
|
||||||
{
|
{
|
||||||
MB_PREDICTION_MODE mode;
|
MB_PREDICTION_MODE mode;
|
||||||
MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected);
|
MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected);
|
||||||
@@ -907,7 +907,6 @@ int vp8_rd_pick_intra_mbuv_mode(VP8_COMP *cpi, MACROBLOCK *x, int *rate, int *ra
|
|||||||
*distortion = d;
|
*distortion = d;
|
||||||
|
|
||||||
x->e_mbd.mode_info_context->mbmi.uv_mode = mode_selected;
|
x->e_mbd.mode_info_context->mbmi.uv_mode = mode_selected;
|
||||||
return best_rd;
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@@ -1256,7 +1255,12 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
|
|||||||
// Should we do a full search (best quality only)
|
// Should we do a full search (best quality only)
|
||||||
if ((cpi->compressor_speed == 0) && (bestsme >> sseshift) > 4000)
|
if ((cpi->compressor_speed == 0) && (bestsme >> sseshift) > 4000)
|
||||||
{
|
{
|
||||||
thissme = cpi->full_search_sad(x, c, e, bsi->mvp,
|
MV full_mvp;
|
||||||
|
|
||||||
|
full_mvp.row = bsi->mvp->row >>3;
|
||||||
|
full_mvp.col = bsi->mvp->col >>3;
|
||||||
|
|
||||||
|
thissme = cpi->full_search_sad(x, c, e, &full_mvp,
|
||||||
sadpb / 4, 16, v_fn_ptr, x->mvcost, bsi->ref_mv);
|
sadpb / 4, 16, v_fn_ptr, x->mvcost, bsi->ref_mv);
|
||||||
|
|
||||||
if (thissme < bestsme)
|
if (thissme < bestsme)
|
||||||
@@ -1787,7 +1791,7 @@ void vp8_cal_sad(VP8_COMP *cpi, MACROBLOCKD *xd, MACROBLOCK *x, int recon_yoffse
|
|||||||
}
|
}
|
||||||
|
|
||||||
#if !(CONFIG_REALTIME_ONLY)
|
#if !(CONFIG_REALTIME_ONLY)
|
||||||
int vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int recon_uvoffset, int *returnrate, int *returndistortion, int *returnintra)
|
void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int recon_uvoffset, int *returnrate, int *returndistortion, int *returnintra)
|
||||||
{
|
{
|
||||||
BLOCK *b = &x->block[0];
|
BLOCK *b = &x->block[0];
|
||||||
BLOCKD *d = &x->e_mbd.block[0];
|
BLOCKD *d = &x->e_mbd.block[0];
|
||||||
@@ -1806,7 +1810,8 @@ int vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
|
|||||||
int mdcounts[4];
|
int mdcounts[4];
|
||||||
int rate;
|
int rate;
|
||||||
int distortion;
|
int distortion;
|
||||||
int best_rd = INT_MAX; // 1 << 30;
|
int best_rd = INT_MAX;
|
||||||
|
int best_intra_rd = INT_MAX;
|
||||||
int ref_frame_cost[MAX_REF_FRAMES];
|
int ref_frame_cost[MAX_REF_FRAMES];
|
||||||
int rate2, distortion2;
|
int rate2, distortion2;
|
||||||
int uv_intra_rate, uv_intra_distortion, uv_intra_rate_tokenonly;
|
int uv_intra_rate, uv_intra_distortion, uv_intra_rate_tokenonly;
|
||||||
@@ -2423,9 +2428,12 @@ int vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
|
|||||||
//all_rates[mode_index] = rate2;
|
//all_rates[mode_index] = rate2;
|
||||||
//all_dist[mode_index] = distortion2;
|
//all_dist[mode_index] = distortion2;
|
||||||
|
|
||||||
if ((x->e_mbd.mode_info_context->mbmi.ref_frame == INTRA_FRAME) && (this_rd < *returnintra))
|
// Keep record of best intra distortion
|
||||||
|
if ((x->e_mbd.mode_info_context->mbmi.ref_frame == INTRA_FRAME) &&
|
||||||
|
(this_rd < best_intra_rd) )
|
||||||
{
|
{
|
||||||
*returnintra = this_rd ;
|
best_intra_rd = this_rd;
|
||||||
|
*returnintra = distortion2 ;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Did this mode help.. i.i is it the new best mode
|
// Did this mode help.. i.i is it the new best mode
|
||||||
@@ -2541,8 +2549,6 @@ int vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
|
|||||||
}
|
}
|
||||||
|
|
||||||
x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
|
x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
|
||||||
|
|
||||||
return best_rd;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -2565,7 +2571,5 @@ int vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
|
|||||||
}
|
}
|
||||||
|
|
||||||
x->e_mbd.mode_info_context->mbmi.mv.as_mv = x->e_mbd.block[15].bmi.mv.as_mv;
|
x->e_mbd.mode_info_context->mbmi.mv.as_mv = x->e_mbd.block[15].bmi.mv.as_mv;
|
||||||
|
|
||||||
return best_rd;
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -11,11 +11,11 @@
|
|||||||
|
|
||||||
#ifndef __INC_RDOPT_H
|
#ifndef __INC_RDOPT_H
|
||||||
#define __INC_RDOPT_H
|
#define __INC_RDOPT_H
|
||||||
void vp8_initialize_rd_consts(VP8_COMP *cpi, int Qvalue);
|
extern void vp8_initialize_rd_consts(VP8_COMP *cpi, int Qvalue);
|
||||||
int vp8_rd_pick_intra4x4mby_modes(VP8_COMP *cpi, MACROBLOCK *mb, int *rate, int *rate_to, int *distortion, int best_rd);
|
extern int vp8_rd_pick_intra4x4mby_modes(VP8_COMP *cpi, MACROBLOCK *mb, int *rate, int *rate_to, int *distortion, int best_rd);
|
||||||
int vp8_rd_pick_intra16x16mby_mode(VP8_COMP *cpi, MACROBLOCK *x, int *returnrate, int *rate_to, int *returndistortion);
|
extern int vp8_rd_pick_intra16x16mby_mode(VP8_COMP *cpi, MACROBLOCK *x, int *returnrate, int *rate_to, int *returndistortion);
|
||||||
int vp8_rd_pick_intra_mbuv_mode(VP8_COMP *cpi, MACROBLOCK *x, int *rate, int *rate_to, int *distortion);
|
extern void vp8_rd_pick_intra_mbuv_mode(VP8_COMP *cpi, MACROBLOCK *x, int *rate, int *rate_to, int *distortion);
|
||||||
extern int vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int recon_uvoffset, int *returnrate, int *returndistortion, int *returnintra);
|
extern void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int recon_uvoffset, int *returnrate, int *returndistortion, int *returnintra);
|
||||||
|
|
||||||
extern void vp8_mv_pred
|
extern void vp8_mv_pred
|
||||||
(
|
(
|
||||||
|
|||||||
@@ -9,38 +9,62 @@
|
|||||||
|
|
||||||
|
|
||||||
%include "vpx_ports/x86_abi_support.asm"
|
%include "vpx_ports/x86_abi_support.asm"
|
||||||
|
%include "asm_enc_offsets.asm"
|
||||||
|
|
||||||
|
|
||||||
;int vp8_fast_quantize_b_impl_ssse3(short *coeff_ptr
|
; void vp8_fast_quantize_b_ssse3 | arg
|
||||||
; short *qcoeff_ptr,short *dequant_ptr,
|
; (BLOCK *b, | 0
|
||||||
; short *round_ptr,
|
; BLOCKD *d) | 1
|
||||||
; short *quant_ptr, short *dqcoeff_ptr);
|
|
||||||
;
|
;
|
||||||
global sym(vp8_fast_quantize_b_impl_ssse3)
|
|
||||||
sym(vp8_fast_quantize_b_impl_ssse3):
|
global sym(vp8_fast_quantize_b_ssse3)
|
||||||
|
sym(vp8_fast_quantize_b_ssse3):
|
||||||
push rbp
|
push rbp
|
||||||
mov rbp, rsp
|
mov rbp, rsp
|
||||||
SHADOW_ARGS_TO_STACK 6
|
|
||||||
GET_GOT rbx
|
GET_GOT rbx
|
||||||
push rsi
|
|
||||||
|
%if ABI_IS_32BIT
|
||||||
push rdi
|
push rdi
|
||||||
|
push rsi
|
||||||
|
%else
|
||||||
|
%ifidn __OUTPUT_FORMAT__,x64
|
||||||
|
push rdi
|
||||||
|
push rsi
|
||||||
|
%endif
|
||||||
|
%endif
|
||||||
; end prolog
|
; end prolog
|
||||||
|
|
||||||
mov rdx, arg(0) ;coeff_ptr
|
%if ABI_IS_32BIT
|
||||||
mov rdi, arg(3) ;round_ptr
|
mov rdi, arg(0) ; BLOCK *b
|
||||||
mov rsi, arg(4) ;quant_ptr
|
mov rsi, arg(1) ; BLOCKD *d
|
||||||
|
%else
|
||||||
|
%ifidn __OUTPUT_FORMAT__,x64
|
||||||
|
mov rdi, rcx ; BLOCK *b
|
||||||
|
mov rsi, rdx ; BLOCKD *d
|
||||||
|
%else
|
||||||
|
;mov rdi, rdi ; BLOCK *b
|
||||||
|
;mov rsi, rsi ; BLOCKD *d
|
||||||
|
%endif
|
||||||
|
%endif
|
||||||
|
|
||||||
movdqa xmm0, [rdx]
|
mov rax, [rdi + vp8_block_coeff]
|
||||||
movdqa xmm4, [rdx + 16]
|
mov rcx, [rdi + vp8_block_round]
|
||||||
|
mov rdx, [rdi + vp8_block_quant_fast]
|
||||||
|
|
||||||
movdqa xmm2, [rdi] ;round lo
|
; coeff
|
||||||
movdqa xmm3, [rdi + 16] ;round hi
|
movdqa xmm0, [rax]
|
||||||
|
movdqa xmm4, [rax + 16]
|
||||||
|
|
||||||
|
; round
|
||||||
|
movdqa xmm2, [rcx]
|
||||||
|
movdqa xmm3, [rcx + 16]
|
||||||
|
|
||||||
movdqa xmm1, xmm0
|
movdqa xmm1, xmm0
|
||||||
movdqa xmm5, xmm4
|
movdqa xmm5, xmm4
|
||||||
|
|
||||||
psraw xmm0, 15 ;sign of z (aka sz)
|
; sz = z >> 15
|
||||||
psraw xmm4, 15 ;sign of z (aka sz)
|
psraw xmm0, 15
|
||||||
|
psraw xmm4, 15
|
||||||
|
|
||||||
pabsw xmm1, xmm1
|
pabsw xmm1, xmm1
|
||||||
pabsw xmm5, xmm5
|
pabsw xmm5, xmm5
|
||||||
@@ -48,23 +72,24 @@ sym(vp8_fast_quantize_b_impl_ssse3):
|
|||||||
paddw xmm1, xmm2
|
paddw xmm1, xmm2
|
||||||
paddw xmm5, xmm3
|
paddw xmm5, xmm3
|
||||||
|
|
||||||
pmulhw xmm1, [rsi]
|
; quant_fast
|
||||||
pmulhw xmm5, [rsi + 16]
|
pmulhw xmm1, [rdx]
|
||||||
|
pmulhw xmm5, [rdx + 16]
|
||||||
|
|
||||||
mov rdi, arg(1) ;qcoeff_ptr
|
mov rax, [rsi + vp8_blockd_qcoeff]
|
||||||
mov rcx, arg(2) ;dequant_ptr
|
mov rdi, [rsi + vp8_blockd_dequant]
|
||||||
mov rsi, arg(5) ;dqcoeff_ptr
|
mov rcx, [rsi + vp8_blockd_dqcoeff]
|
||||||
|
|
||||||
pxor xmm1, xmm0
|
pxor xmm1, xmm0
|
||||||
pxor xmm5, xmm4
|
pxor xmm5, xmm4
|
||||||
psubw xmm1, xmm0
|
psubw xmm1, xmm0
|
||||||
psubw xmm5, xmm4
|
psubw xmm5, xmm4
|
||||||
|
|
||||||
movdqa [rdi], xmm1
|
movdqa [rax], xmm1
|
||||||
movdqa [rdi + 16], xmm5
|
movdqa [rax + 16], xmm5
|
||||||
|
|
||||||
movdqa xmm2, [rcx]
|
movdqa xmm2, [rdi]
|
||||||
movdqa xmm3, [rcx + 16]
|
movdqa xmm3, [rdi + 16]
|
||||||
|
|
||||||
pxor xmm4, xmm4
|
pxor xmm4, xmm4
|
||||||
pmullw xmm2, xmm1
|
pmullw xmm2, xmm1
|
||||||
@@ -73,38 +98,37 @@ sym(vp8_fast_quantize_b_impl_ssse3):
|
|||||||
pcmpeqw xmm1, xmm4 ;non zero mask
|
pcmpeqw xmm1, xmm4 ;non zero mask
|
||||||
pcmpeqw xmm5, xmm4 ;non zero mask
|
pcmpeqw xmm5, xmm4 ;non zero mask
|
||||||
packsswb xmm1, xmm5
|
packsswb xmm1, xmm5
|
||||||
pshufb xmm1, [ GLOBAL(zz_shuf)]
|
pshufb xmm1, [GLOBAL(zz_shuf)]
|
||||||
|
|
||||||
pmovmskb edx, xmm1
|
pmovmskb edx, xmm1
|
||||||
|
|
||||||
; xor ecx, ecx
|
|
||||||
; mov eax, -1
|
|
||||||
;find_eob_loop:
|
|
||||||
; shr edx, 1
|
|
||||||
; jc fq_skip
|
|
||||||
; mov eax, ecx
|
|
||||||
;fq_skip:
|
|
||||||
; inc ecx
|
|
||||||
; cmp ecx, 16
|
|
||||||
; jne find_eob_loop
|
|
||||||
xor rdi, rdi
|
xor rdi, rdi
|
||||||
mov eax, -1
|
mov eax, -1
|
||||||
xor dx, ax ;flip the bits for bsr
|
xor dx, ax ;flip the bits for bsr
|
||||||
bsr eax, edx
|
bsr eax, edx
|
||||||
|
|
||||||
movdqa [rsi], xmm2 ;store dqcoeff
|
movdqa [rcx], xmm2 ;store dqcoeff
|
||||||
movdqa [rsi + 16], xmm3 ;store dqcoeff
|
movdqa [rcx + 16], xmm3 ;store dqcoeff
|
||||||
|
|
||||||
sub edi, edx ;check for all zeros in bit mask
|
sub edi, edx ;check for all zeros in bit mask
|
||||||
sar edi, 31 ;0 or -1
|
sar edi, 31 ;0 or -1
|
||||||
add eax, 1
|
add eax, 1
|
||||||
and eax, edi ;if the bit mask was all zero,
|
and eax, edi ;if the bit mask was all zero,
|
||||||
;then eob = 0
|
;then eob = 0
|
||||||
|
mov [rsi + vp8_blockd_eob], eax
|
||||||
|
|
||||||
; begin epilog
|
; begin epilog
|
||||||
pop rdi
|
%if ABI_IS_32BIT
|
||||||
pop rsi
|
pop rsi
|
||||||
|
pop rdi
|
||||||
|
%else
|
||||||
|
%ifidn __OUTPUT_FORMAT__,x64
|
||||||
|
pop rsi
|
||||||
|
pop rdi
|
||||||
|
%endif
|
||||||
|
%endif
|
||||||
|
|
||||||
RESTORE_GOT
|
RESTORE_GOT
|
||||||
UNSHADOW_ARGS
|
|
||||||
pop rbp
|
pop rbp
|
||||||
ret
|
ret
|
||||||
|
|
||||||
|
|||||||
@@ -19,7 +19,7 @@
|
|||||||
*/
|
*/
|
||||||
#if HAVE_MMX
|
#if HAVE_MMX
|
||||||
|
|
||||||
#endif
|
#endif /* HAVE_MMX */
|
||||||
|
|
||||||
|
|
||||||
#if HAVE_SSE2
|
#if HAVE_SSE2
|
||||||
@@ -34,9 +34,21 @@ extern prototype_quantize_block(vp8_fast_quantize_b_sse2);
|
|||||||
#undef vp8_quantize_fastquantb
|
#undef vp8_quantize_fastquantb
|
||||||
#define vp8_quantize_fastquantb vp8_fast_quantize_b_sse2
|
#define vp8_quantize_fastquantb vp8_fast_quantize_b_sse2
|
||||||
|
|
||||||
#endif
|
#endif /* !CONFIG_RUNTIME_CPU_DETECT */
|
||||||
|
|
||||||
#endif
|
#endif /* HAVE_SSE2 */
|
||||||
|
|
||||||
|
|
||||||
#endif
|
#if HAVE_SSSE3
|
||||||
|
extern prototype_quantize_block(vp8_fast_quantize_b_ssse3);
|
||||||
|
|
||||||
|
#if !CONFIG_RUNTIME_CPU_DETECT
|
||||||
|
|
||||||
|
#undef vp8_quantize_fastquantb
|
||||||
|
#define vp8_quantize_fastquantb vp8_fast_quantize_b_ssse3
|
||||||
|
|
||||||
|
#endif /* !CONFIG_RUNTIME_CPU_DETECT */
|
||||||
|
|
||||||
|
#endif /* HAVE_SSSE3 */
|
||||||
|
|
||||||
|
#endif /* QUANTIZE_X86_H */
|
||||||
|
|||||||
@@ -112,21 +112,6 @@ static void subtract_b_sse2(BLOCK *be, BLOCKD *bd, int pitch)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if HAVE_SSSE3
|
#if HAVE_SSSE3
|
||||||
int vp8_fast_quantize_b_impl_ssse3(short *coeff_ptr,
|
|
||||||
short *qcoeff_ptr, short *dequant_ptr,
|
|
||||||
short *round_ptr,
|
|
||||||
short *quant_ptr, short *dqcoeff_ptr);
|
|
||||||
static void fast_quantize_b_ssse3(BLOCK *b, BLOCKD *d)
|
|
||||||
{
|
|
||||||
d->eob = vp8_fast_quantize_b_impl_ssse3(
|
|
||||||
b->coeff,
|
|
||||||
d->qcoeff,
|
|
||||||
d->dequant,
|
|
||||||
b->round,
|
|
||||||
b->quant_fast,
|
|
||||||
d->dqcoeff
|
|
||||||
);
|
|
||||||
}
|
|
||||||
#if CONFIG_PSNR
|
#if CONFIG_PSNR
|
||||||
#if ARCH_X86_64
|
#if ARCH_X86_64
|
||||||
typedef void ssimpf
|
typedef void ssimpf
|
||||||
@@ -307,7 +292,7 @@ void vp8_arch_x86_encoder_init(VP8_COMP *cpi)
|
|||||||
cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_ssse3;
|
cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_ssse3;
|
||||||
cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_ssse3;
|
cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_ssse3;
|
||||||
|
|
||||||
cpi->rtcd.quantize.fastquantb = fast_quantize_b_ssse3;
|
cpi->rtcd.quantize.fastquantb = vp8_fast_quantize_b_ssse3;
|
||||||
|
|
||||||
#if CONFIG_PSNR
|
#if CONFIG_PSNR
|
||||||
#if ARCH_X86_64
|
#if ARCH_X86_64
|
||||||
|
|||||||
Reference in New Issue
Block a user