Merge commit '9abc80f1ed673141326341e26a05c3e1f78576d0'
* commit '9abc80f1ed673141326341e26a05c3e1f78576d0': libavcodec: Make use of av_clip functions Conflicts: libavcodec/takdec.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
commit
66d79aa2e1
@ -161,7 +161,7 @@ static av_always_inline float quantize_and_encode_band_cost_template(
|
|||||||
di = t - CLIPPED_ESCAPE;
|
di = t - CLIPPED_ESCAPE;
|
||||||
curbits += 21;
|
curbits += 21;
|
||||||
} else {
|
} else {
|
||||||
int c = av_clip(quant(t, Q), 0, 8191);
|
int c = av_clip_uintp2(quant(t, Q), 13);
|
||||||
di = t - c*cbrtf(c)*IQ;
|
di = t - c*cbrtf(c)*IQ;
|
||||||
curbits += av_log2(c)*2 - 4 + 1;
|
curbits += av_log2(c)*2 - 4 + 1;
|
||||||
}
|
}
|
||||||
@ -191,7 +191,7 @@ static av_always_inline float quantize_and_encode_band_cost_template(
|
|||||||
if (BT_ESC) {
|
if (BT_ESC) {
|
||||||
for (j = 0; j < 2; j++) {
|
for (j = 0; j < 2; j++) {
|
||||||
if (ff_aac_codebook_vectors[cb-1][curidx*2+j] == 64.0f) {
|
if (ff_aac_codebook_vectors[cb-1][curidx*2+j] == 64.0f) {
|
||||||
int coef = av_clip(quant(fabsf(in[i+j]), Q), 0, 8191);
|
int coef = av_clip_uintp2(quant(fabsf(in[i+j]), Q), 13);
|
||||||
int len = av_log2(coef);
|
int len = av_log2(coef);
|
||||||
|
|
||||||
put_bits(pb, len - 4 + 1, (1 << (len - 4 + 1)) - 2);
|
put_bits(pb, len - 4 + 1, (1 << (len - 4 + 1)) - 2);
|
||||||
|
@ -125,7 +125,7 @@ static void ac3_bit_alloc_calc_bap_c(int16_t *mask, int16_t *psd,
|
|||||||
band_end = FFMIN(band_end, end);
|
band_end = FFMIN(band_end, end);
|
||||||
|
|
||||||
for (; bin < band_end; bin++) {
|
for (; bin < band_end; bin++) {
|
||||||
int address = av_clip((psd[bin] - m) >> 5, 0, 63);
|
int address = av_clip_uintp2((psd[bin] - m) >> 5, 6);
|
||||||
bap[bin] = bap_tab[address];
|
bap[bin] = bap_tab[address];
|
||||||
}
|
}
|
||||||
} while (end > band_end);
|
} while (end > band_end);
|
||||||
|
@ -91,9 +91,9 @@ static inline int get_bs(cavs_vector *mvP, cavs_vector *mvQ, int b)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#define SET_PARAMS \
|
#define SET_PARAMS \
|
||||||
alpha = alpha_tab[av_clip(qp_avg + h->alpha_offset, 0, 63)]; \
|
alpha = alpha_tab[av_clip_uintp2(qp_avg + h->alpha_offset, 6)]; \
|
||||||
beta = beta_tab[av_clip(qp_avg + h->beta_offset, 0, 63)]; \
|
beta = beta_tab[av_clip_uintp2(qp_avg + h->beta_offset, 6)]; \
|
||||||
tc = tc_tab[av_clip(qp_avg + h->alpha_offset, 0, 63)];
|
tc = tc_tab[av_clip_uintp2(qp_avg + h->alpha_offset, 6)];
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* in-loop deblocking filter for a single macroblock
|
* in-loop deblocking filter for a single macroblock
|
||||||
|
@ -110,13 +110,13 @@ static int g722_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
ilow = get_bits(&gb, 6 - skip);
|
ilow = get_bits(&gb, 6 - skip);
|
||||||
skip_bits(&gb, skip);
|
skip_bits(&gb, skip);
|
||||||
|
|
||||||
rlow = av_clip((c->band[0].scale_factor * quantizer_table[ilow] >> 10)
|
rlow = av_clip_intp2((c->band[0].scale_factor * quantizer_table[ilow] >> 10)
|
||||||
+ c->band[0].s_predictor, -16384, 16383);
|
+ c->band[0].s_predictor, 14);
|
||||||
|
|
||||||
ff_g722_update_low_predictor(&c->band[0], ilow >> (2 - skip));
|
ff_g722_update_low_predictor(&c->band[0], ilow >> (2 - skip));
|
||||||
|
|
||||||
dhigh = c->band[1].scale_factor * ff_g722_high_inv_quant[ihigh] >> 10;
|
dhigh = c->band[1].scale_factor * ff_g722_high_inv_quant[ihigh] >> 10;
|
||||||
rhigh = av_clip(dhigh + c->band[1].s_predictor, -16384, 16383);
|
rhigh = av_clip_intp2(dhigh + c->band[1].s_predictor, 14);
|
||||||
|
|
||||||
ff_g722_update_high_predictor(&c->band[1], dhigh, ihigh);
|
ff_g722_update_high_predictor(&c->band[1], dhigh, ihigh);
|
||||||
|
|
||||||
|
@ -226,9 +226,9 @@ static void g722_encode_trellis(G722Context *c, int trellis,
|
|||||||
if (k < 0)
|
if (k < 0)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
decoded = av_clip((cur_node->state.scale_factor *
|
decoded = av_clip_intp2((cur_node->state.scale_factor *
|
||||||
ff_g722_low_inv_quant6[k] >> 10)
|
ff_g722_low_inv_quant6[k] >> 10)
|
||||||
+ cur_node->state.s_predictor, -16384, 16383);
|
+ cur_node->state.s_predictor, 14);
|
||||||
dec_diff = xlow - decoded;
|
dec_diff = xlow - decoded;
|
||||||
|
|
||||||
#define STORE_NODE(index, UPDATE, VALUE)\
|
#define STORE_NODE(index, UPDATE, VALUE)\
|
||||||
@ -285,8 +285,7 @@ static void g722_encode_trellis(G722Context *c, int trellis,
|
|||||||
|
|
||||||
dhigh = cur_node->state.scale_factor *
|
dhigh = cur_node->state.scale_factor *
|
||||||
ff_g722_high_inv_quant[ihigh] >> 10;
|
ff_g722_high_inv_quant[ihigh] >> 10;
|
||||||
decoded = av_clip(dhigh + cur_node->state.s_predictor,
|
decoded = av_clip_intp2(dhigh + cur_node->state.s_predictor, 14);
|
||||||
-16384, 16383);
|
|
||||||
dec_diff = xhigh - decoded;
|
dec_diff = xhigh - decoded;
|
||||||
|
|
||||||
STORE_NODE(1, ff_g722_update_high_predictor(&node->state, dhigh, ihigh), ihigh);
|
STORE_NODE(1, ff_g722_update_high_predictor(&node->state, dhigh, ihigh), ihigh);
|
||||||
|
@ -219,7 +219,7 @@ static int16_t g726_decode(G726Context* c, int I)
|
|||||||
c->b[i] = 0;
|
c->b[i] = 0;
|
||||||
} else {
|
} else {
|
||||||
/* This is a bit crazy, but it really is +255 not +256 */
|
/* This is a bit crazy, but it really is +255 not +256 */
|
||||||
fa1 = av_clip((-c->a[0]*c->pk[0]*pk0)>>5, -256, 255);
|
fa1 = av_clip_intp2((-c->a[0]*c->pk[0]*pk0)>>5, 8);
|
||||||
|
|
||||||
c->a[1] += 128*pk0*c->pk[1] + fa1 - (c->a[1]>>7);
|
c->a[1] += 128*pk0*c->pk[1] + fa1 - (c->a[1]>>7);
|
||||||
c->a[1] = av_clip(c->a[1], -12288, 12288);
|
c->a[1] = av_clip(c->a[1], -12288, 12288);
|
||||||
|
@ -37,13 +37,13 @@
|
|||||||
static int get_scale_factor(H264Context *const h, int poc, int poc1, int i)
|
static int get_scale_factor(H264Context *const h, int poc, int poc1, int i)
|
||||||
{
|
{
|
||||||
int poc0 = h->ref_list[0][i].poc;
|
int poc0 = h->ref_list[0][i].poc;
|
||||||
int td = av_clip(poc1 - poc0, -128, 127);
|
int td = av_clip_int8(poc1 - poc0);
|
||||||
if (td == 0 || h->ref_list[0][i].long_ref) {
|
if (td == 0 || h->ref_list[0][i].long_ref) {
|
||||||
return 256;
|
return 256;
|
||||||
} else {
|
} else {
|
||||||
int tb = av_clip(poc - poc0, -128, 127);
|
int tb = av_clip_int8(poc - poc0);
|
||||||
int tx = (16384 + (FFABS(td) >> 1)) / td;
|
int tx = (16384 + (FFABS(td) >> 1)) / td;
|
||||||
return av_clip((tb * tx + 32) >> 6, -1024, 1023);
|
return av_clip_intp2((tb * tx + 32) >> 6, 10);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -916,9 +916,9 @@ static void implicit_weight_table(H264Context *h, int field)
|
|||||||
int w = 32;
|
int w = 32;
|
||||||
if (!h->ref_list[0][ref0].long_ref && !h->ref_list[1][ref1].long_ref) {
|
if (!h->ref_list[0][ref0].long_ref && !h->ref_list[1][ref1].long_ref) {
|
||||||
int poc1 = h->ref_list[1][ref1].poc;
|
int poc1 = h->ref_list[1][ref1].poc;
|
||||||
int td = av_clip(poc1 - poc0, -128, 127);
|
int td = av_clip_int8(poc1 - poc0);
|
||||||
if (td) {
|
if (td) {
|
||||||
int tb = av_clip(cur_poc - poc0, -128, 127);
|
int tb = av_clip_int8(cur_poc - poc0);
|
||||||
int tx = (16384 + (FFABS(td) >> 1)) / td;
|
int tx = (16384 + (FFABS(td) >> 1)) / td;
|
||||||
int dist_scale_factor = (tb * tx + 32) >> 8;
|
int dist_scale_factor = (tb * tx + 32) >> 8;
|
||||||
if (dist_scale_factor >= -64 && dist_scale_factor <= 128)
|
if (dist_scale_factor >= -64 && dist_scale_factor <= 128)
|
||||||
|
@ -232,13 +232,13 @@ static void mp_decode_line(MotionPixelsContext *mp, GetBitContext *gb, int y)
|
|||||||
p = mp_get_yuv_from_rgb(mp, x - 1, y);
|
p = mp_get_yuv_from_rgb(mp, x - 1, y);
|
||||||
} else {
|
} else {
|
||||||
p.y += mp_gradient(mp, 0, mp_get_vlc(mp, gb));
|
p.y += mp_gradient(mp, 0, mp_get_vlc(mp, gb));
|
||||||
p.y = av_clip(p.y, 0, 31);
|
p.y = av_clip_uintp2(p.y, 5);
|
||||||
if ((x & 3) == 0) {
|
if ((x & 3) == 0) {
|
||||||
if ((y & 3) == 0) {
|
if ((y & 3) == 0) {
|
||||||
p.v += mp_gradient(mp, 1, mp_get_vlc(mp, gb));
|
p.v += mp_gradient(mp, 1, mp_get_vlc(mp, gb));
|
||||||
p.v = av_clip(p.v, -32, 31);
|
p.v = av_clip_intp2(p.v, 5);
|
||||||
p.u += mp_gradient(mp, 2, mp_get_vlc(mp, gb));
|
p.u += mp_gradient(mp, 2, mp_get_vlc(mp, gb));
|
||||||
p.u = av_clip(p.u, -32, 31);
|
p.u = av_clip_intp2(p.u, 5);
|
||||||
mp->hpt[((y / 4) * mp->avctx->width + x) / 4] = p;
|
mp->hpt[((y / 4) * mp->avctx->width + x) / 4] = p;
|
||||||
} else {
|
} else {
|
||||||
p.v = mp->hpt[((y / 4) * mp->avctx->width + x) / 4].v;
|
p.v = mp->hpt[((y / 4) * mp->avctx->width + x) / 4].v;
|
||||||
@ -264,12 +264,12 @@ static void mp_decode_frame_helper(MotionPixelsContext *mp, GetBitContext *gb)
|
|||||||
p = mp_get_yuv_from_rgb(mp, 0, y);
|
p = mp_get_yuv_from_rgb(mp, 0, y);
|
||||||
} else {
|
} else {
|
||||||
p.y += mp_gradient(mp, 0, mp_get_vlc(mp, gb));
|
p.y += mp_gradient(mp, 0, mp_get_vlc(mp, gb));
|
||||||
p.y = av_clip(p.y, 0, 31);
|
p.y = av_clip_uintp2(p.y, 5);
|
||||||
if ((y & 3) == 0) {
|
if ((y & 3) == 0) {
|
||||||
p.v += mp_gradient(mp, 1, mp_get_vlc(mp, gb));
|
p.v += mp_gradient(mp, 1, mp_get_vlc(mp, gb));
|
||||||
p.v = av_clip(p.v, -32, 31);
|
p.v = av_clip_intp2(p.v, 5);
|
||||||
p.u += mp_gradient(mp, 2, mp_get_vlc(mp, gb));
|
p.u += mp_gradient(mp, 2, mp_get_vlc(mp, gb));
|
||||||
p.u = av_clip(p.u, -32, 31);
|
p.u = av_clip_intp2(p.u, 5);
|
||||||
}
|
}
|
||||||
mp->vpt[y] = p;
|
mp->vpt[y] = p;
|
||||||
mp_set_rgb_from_yuv(mp, 0, y, &p);
|
mp_set_rgb_from_yuv(mp, 0, y, &p);
|
||||||
|
@ -1909,7 +1909,7 @@ static void celt_decode_bands(CeltContext *s, OpusRangeCoder *rc)
|
|||||||
s->remaining2 = totalbits - consumed - 1;
|
s->remaining2 = totalbits - consumed - 1;
|
||||||
if (i <= s->codedbands - 1) {
|
if (i <= s->codedbands - 1) {
|
||||||
int curr_balance = s->remaining / FFMIN(3, s->codedbands-i);
|
int curr_balance = s->remaining / FFMIN(3, s->codedbands-i);
|
||||||
b = av_clip(FFMIN(s->remaining2 + 1, s->pulses[i] + curr_balance), 0, 16383);
|
b = av_clip_uintp2(FFMIN(s->remaining2 + 1, s->pulses[i] + curr_balance), 14);
|
||||||
} else
|
} else
|
||||||
b = 0;
|
b = 0;
|
||||||
|
|
||||||
|
@ -1077,7 +1077,7 @@ static inline void silk_decode_lpc(SilkContext *s, SilkFrame *frame,
|
|||||||
weight = y + ((213 * fpart * y) >> 16);
|
weight = y + ((213 * fpart * y) >> 16);
|
||||||
|
|
||||||
value = cur * 128 + (lsf_res[i] * 16384) / weight;
|
value = cur * 128 + (lsf_res[i] * 16384) / weight;
|
||||||
nlsf[i] = av_clip(value, 0, 32767);
|
nlsf[i] = av_clip_uintp2(value, 15);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* stabilize the NLSF coefficients */
|
/* stabilize the NLSF coefficients */
|
||||||
@ -1288,8 +1288,8 @@ static void silk_decode_frame(SilkContext *s, OpusRangeCoder *rc,
|
|||||||
} else {
|
} else {
|
||||||
/* gain is coded relative */
|
/* gain is coded relative */
|
||||||
int delta_gain = opus_rc_getsymbol(rc, silk_model_gain_delta);
|
int delta_gain = opus_rc_getsymbol(rc, silk_model_gain_delta);
|
||||||
log_gain = av_clip(FFMAX((delta_gain<<1) - 16,
|
log_gain = av_clip_uintp2(FFMAX((delta_gain<<1) - 16,
|
||||||
frame->log_gain + delta_gain - 4), 0, 63);
|
frame->log_gain + delta_gain - 4), 6);
|
||||||
}
|
}
|
||||||
|
|
||||||
frame->log_gain = log_gain;
|
frame->log_gain = log_gain;
|
||||||
|
@ -476,7 +476,7 @@ static int decode_subframe(TAKDecContext *s, int32_t *decoded,
|
|||||||
s->residues[i + j + 1] * s->filter[j + 1] +
|
s->residues[i + j + 1] * s->filter[j + 1] +
|
||||||
s->residues[i + j ] * s->filter[j ];
|
s->residues[i + j ] * s->filter[j ];
|
||||||
}
|
}
|
||||||
v = (av_clip(v >> filter_quant, -8192, 8191) << dshift) - *decoded;
|
v = (av_clip_intp2(v >> filter_quant, 13) << dshift) - *decoded;
|
||||||
*decoded++ = v;
|
*decoded++ = v;
|
||||||
s->residues[filter_order + i] = v >> dshift;
|
s->residues[filter_order + i] = v >> dshift;
|
||||||
}
|
}
|
||||||
@ -652,7 +652,7 @@ static int decorrelate(TAKDecContext *s, int c1, int c2, int length)
|
|||||||
s->residues[i ] * s->filter[0];
|
s->residues[i ] * s->filter[0];
|
||||||
}
|
}
|
||||||
|
|
||||||
v = (av_clip(v >> 10, -8192, 8191) << dshift) - *p1;
|
v = (av_clip_intp2(v >> 10, 13) << dshift) - *p1;
|
||||||
*p1++ = v;
|
*p1++ = v;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -188,7 +188,7 @@ static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
|
|||||||
yoff[i] = av_clip(sd->coefs[i][5], 0, v->sprite_height-1 << 16);
|
yoff[i] = av_clip(sd->coefs[i][5], 0, v->sprite_height-1 << 16);
|
||||||
yadv[i] = av_clip(sd->coefs[i][4], 0, ((v->sprite_height << 16) - yoff[i]) / v->output_height);
|
yadv[i] = av_clip(sd->coefs[i][4], 0, ((v->sprite_height << 16) - yoff[i]) / v->output_height);
|
||||||
}
|
}
|
||||||
alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1);
|
alpha = av_clip_uint16(sd->coefs[1][6]);
|
||||||
|
|
||||||
for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++) {
|
for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++) {
|
||||||
int width = v->output_width>>!!plane;
|
int width = v->output_width>>!!plane;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user