Compare commits
219 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
36982b3616 | ||
![]() |
5b5e61a0bf | ||
![]() |
7f1fb8d2a3 | ||
![]() |
75a11e950f | ||
![]() |
e6fa08f14e | ||
![]() |
400b23beab | ||
![]() |
cff9f07d39 | ||
![]() |
de4606a5b7 | ||
![]() |
93a0dd8358 | ||
![]() |
12801f969b | ||
![]() |
35b15a0da8 | ||
![]() |
fa73f547a0 | ||
![]() |
c09b4dde37 | ||
![]() |
db4903f4e4 | ||
![]() |
7a0e5a63d0 | ||
![]() |
3038e2041e | ||
![]() |
8b64036038 | ||
![]() |
f2d56c2eeb | ||
![]() |
46c1e5de58 | ||
![]() |
e6dfaf7bb8 | ||
![]() |
c9df500190 | ||
![]() |
d12bf6fc9e | ||
![]() |
8a525e4d18 | ||
![]() |
7450a0215a | ||
![]() |
ba10ea845f | ||
![]() |
0b9d464348 | ||
![]() |
c279e37e90 | ||
![]() |
cc88dacc1a | ||
![]() |
988910a277 | ||
![]() |
d6a55ab016 | ||
![]() |
36487066ee | ||
![]() |
fe8243d7a9 | ||
![]() |
6365b43295 | ||
![]() |
2c8ce46250 | ||
![]() |
a0f6c93f52 | ||
![]() |
ca8c814970 | ||
![]() |
8076d32f30 | ||
![]() |
0f3381ad5b | ||
![]() |
9822e3aa52 | ||
![]() |
0b923a2b72 | ||
![]() |
d792be5681 | ||
![]() |
443f1463c0 | ||
![]() |
be209bdabb | ||
![]() |
24025cc0b9 | ||
![]() |
5920d00d74 | ||
![]() |
79fb7bc667 | ||
![]() |
141d4ed6c0 | ||
![]() |
5acd1c6561 | ||
![]() |
a2d4d9f4fb | ||
![]() |
3c55bf1201 | ||
![]() |
dc5283dffc | ||
![]() |
c28e1c12ad | ||
![]() |
c5f9c272e9 | ||
![]() |
0f81057c12 | ||
![]() |
592ba67815 | ||
![]() |
15c2e8027f | ||
![]() |
be2dd2559f | ||
![]() |
2051adbfa0 | ||
![]() |
2bc1e4fcb9 | ||
![]() |
0582b8e3ea | ||
![]() |
6744eee1e5 | ||
![]() |
14bba214fa | ||
![]() |
1c8e2561b4 | ||
![]() |
5c413648c1 | ||
![]() |
3efe6becc7 | ||
![]() |
dc8371b2b1 | ||
![]() |
0815d9174c | ||
![]() |
332555f660 | ||
![]() |
c5ec190859 | ||
![]() |
b561618014 | ||
![]() |
e0daa15a96 | ||
![]() |
911c250aef | ||
![]() |
965302c9f3 | ||
![]() |
0c19855539 | ||
![]() |
d0267ecf76 | ||
![]() |
2281ac9ffd | ||
![]() |
12941dbe2c | ||
![]() |
9e575e54a0 | ||
![]() |
9a76b7375e | ||
![]() |
d7de11260b | ||
![]() |
31bc3fb563 | ||
![]() |
9aaaeba45c | ||
![]() |
e46cf805b1 | ||
![]() |
d4f3abca6a | ||
![]() |
e5f4e24942 | ||
![]() |
b1ad5a21da | ||
![]() |
90575bd7dd | ||
![]() |
f695bd6016 | ||
![]() |
8c0bbe5156 | ||
![]() |
6d1b91324c | ||
![]() |
a1b127515b | ||
![]() |
d9ffa2aca1 | ||
![]() |
7124fa5d36 | ||
![]() |
da0c457663 | ||
![]() |
7a7229b52d | ||
![]() |
25a1a5b1b3 | ||
![]() |
6704522ca9 | ||
![]() |
fdb7080781 | ||
![]() |
bed5847563 | ||
![]() |
e9ac06160f | ||
![]() |
02b7239462 | ||
![]() |
8efae4cbbf | ||
![]() |
50032a75d6 | ||
![]() |
eed53a38c9 | ||
![]() |
501e60dcf5 | ||
![]() |
d36c706b86 | ||
![]() |
fcb8bbf264 | ||
![]() |
38c5e8fec5 | ||
![]() |
1301942248 | ||
![]() |
e2c7b37fd2 | ||
![]() |
7f90fe1b4b | ||
![]() |
2cf6afffe5 | ||
![]() |
50e6e494c9 | ||
![]() |
0f54c97f58 | ||
![]() |
a1f678f7ca | ||
![]() |
94905d2af6 | ||
![]() |
b04fbd2cd2 | ||
![]() |
f7b045db09 | ||
![]() |
de1591b167 | ||
![]() |
c7b73724c7 | ||
![]() |
1846f3b5b1 | ||
![]() |
2fb4be9a99 | ||
![]() |
e1608014c5 | ||
![]() |
8c0c0e9eb3 | ||
![]() |
997e7692d8 | ||
![]() |
944b6a801e | ||
![]() |
ddd9483a10 | ||
![]() |
9c13d232a4 | ||
![]() |
c4926cba15 | ||
![]() |
321bbb6f49 | ||
![]() |
81476cf693 | ||
![]() |
3c69368e6b | ||
![]() |
fcf09ebff5 | ||
![]() |
d6c73986cc | ||
![]() |
aefa2bf70a | ||
![]() |
ece27b09d6 | ||
![]() |
479856a3b2 | ||
![]() |
fc0d962919 | ||
![]() |
0452ebfd4b | ||
![]() |
9e9e6bbe7b | ||
![]() |
3e4eea6c32 | ||
![]() |
cc0fec8393 | ||
![]() |
fa67ad85ac | ||
![]() |
0adc452146 | ||
![]() |
7df0e309fd | ||
![]() |
a4b329d622 | ||
![]() |
eefd6bbee9 | ||
![]() |
ce39a84a7d | ||
![]() |
514f3e7c02 | ||
![]() |
4dfea3e9f0 | ||
![]() |
f9ee7d13e8 | ||
![]() |
ec27262c4d | ||
![]() |
d34e9e61dd | ||
![]() |
c38d3e1a39 | ||
![]() |
5872580e65 | ||
![]() |
4713234518 | ||
![]() |
5836110018 | ||
![]() |
3fab87edc9 | ||
![]() |
b1f9ff45d4 | ||
![]() |
96acb0a4eb | ||
![]() |
df93682e64 | ||
![]() |
22285aba13 | ||
![]() |
097ad61100 | ||
![]() |
c785a7058a | ||
![]() |
6736de0ce6 | ||
![]() |
fe8508b948 | ||
![]() |
0d40fbaef0 | ||
![]() |
a4846943a3 | ||
![]() |
bf2534a5e2 | ||
![]() |
1ca4e70b6c | ||
![]() |
25a2802239 | ||
![]() |
581a830829 | ||
![]() |
43e5fda45c | ||
![]() |
a638e10ba0 | ||
![]() |
d5207e2af8 | ||
![]() |
9ea94c44b1 | ||
![]() |
aaa6a66677 | ||
![]() |
7240cc3f8b | ||
![]() |
7fe4c8cb76 | ||
![]() |
746f1594d7 | ||
![]() |
0e4bb0530f | ||
![]() |
994c0efcc7 | ||
![]() |
cf5e119d4a | ||
![]() |
1ee1e9e43f | ||
![]() |
15e9aee544 | ||
![]() |
e8050f313e | ||
![]() |
be424d86a8 | ||
![]() |
a08cb950b2 | ||
![]() |
46f8bbfc6d | ||
![]() |
562c6a7bf1 | ||
![]() |
e711ccee4d | ||
![]() |
d6372e80fe | ||
![]() |
29d91e9161 | ||
![]() |
583f57f04a | ||
![]() |
f8f6c14f54 | ||
![]() |
9e24f2a1f0 | ||
![]() |
e788c6e9cb | ||
![]() |
2e681cf50f | ||
![]() |
9ddd3abe78 | ||
![]() |
86bd0244ec | ||
![]() |
15de658c04 | ||
![]() |
19d3f7d8ac | ||
![]() |
c21b858b27 | ||
![]() |
0b9bb581fd | ||
![]() |
105601c151 | ||
![]() |
3a4949aa50 | ||
![]() |
ec554ee747 | ||
![]() |
bf3998d71e | ||
![]() |
87208b8fc4 | ||
![]() |
265a628f16 | ||
![]() |
a854d00acd | ||
![]() |
d076d0febd | ||
![]() |
a56eaa024f | ||
![]() |
fdc6f6507c | ||
![]() |
976d173606 | ||
![]() |
989431c02f | ||
![]() |
f9bdc93723 | ||
![]() |
e687d77d15 | ||
![]() |
abfafb6c81 |
44
Changelog
44
Changelog
@@ -3,6 +3,50 @@ releases are sorted from youngest to oldest.
|
||||
|
||||
version next:
|
||||
|
||||
version 0.10.6:
|
||||
|
||||
- many bug fixes that where found with Coverity
|
||||
|
||||
- The following CVE fixes where backported:
|
||||
CVE-2012-2796, CVE-2012-2775, CVE-2012-2772, CVE-2012-2776,
|
||||
CVE-2012-2779, CVE-2012-2787, CVE-2012-2794, CVE-2012-2800,
|
||||
CVE-2012-2802, CVE-2012-2801, CVE-2012-2786, CVE-2012-2798,
|
||||
CVE-2012-2793, CVE-2012-2789, CVE-2012-2788, CVE-2012-2790,
|
||||
CVE-2012-2777, CVE-2012-2784
|
||||
|
||||
- hundreads of other bug fixes, some possibly security relevant,
|
||||
see the git log for details.
|
||||
|
||||
|
||||
version 0.10.5:
|
||||
|
||||
- Several bugs and crashes have been fixed as well as build problems
|
||||
with recent mingw64
|
||||
|
||||
|
||||
version 0.10.4:
|
||||
|
||||
- Several bugs and crashes have been fixed
|
||||
Note, CVE-2012-0851 and CVE-2011-3937 have been fixed in previous releases
|
||||
|
||||
version 0.10.3:
|
||||
|
||||
- Security fixes in the 4xm demuxer, avi demuxer, cook decoder,
|
||||
mm demuxer, mpegvideo decoder, vqavideo decoder (CVE-2012-0947) and
|
||||
xmv demuxer.
|
||||
|
||||
- Several bugs and crashes have been fixed in the following codecs: AAC,
|
||||
APE, H.263, H.264, Indeo 4, Mimic, MJPEG, Motion Pixels Video, RAW,
|
||||
TTA, VC1, VQA, WMA Voice, vqavideo.
|
||||
|
||||
- Several bugs and crashes have been fixed in the following formats:
|
||||
ASF, ID3v2, MOV, xWMA
|
||||
|
||||
- This release additionally updates the following codecs to the
|
||||
bytestream2 API, and therefore benefit from additional overflow
|
||||
checks: truemotion2, utvideo, vqavideo
|
||||
|
||||
|
||||
version 0.10.1
|
||||
- Several security fixes, many bugfixes affecting many formats and
|
||||
codecs, the list below is not complete.
|
||||
|
2
Doxyfile
2
Doxyfile
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
||||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER = 0.10.2
|
||||
PROJECT_NUMBER = 0.10.6
|
||||
|
||||
# With the PROJECT_LOGO tag one can specify an logo or icon that is included
|
||||
# in the documentation. The maximum height of the logo should not exceed 55
|
||||
|
2
configure
vendored
2
configure
vendored
@@ -1168,6 +1168,7 @@ HAVE_LIST="
|
||||
dlfcn_h
|
||||
dlopen
|
||||
dos_paths
|
||||
dxva_h
|
||||
ebp_available
|
||||
ebx_available
|
||||
exp2
|
||||
@@ -3047,6 +3048,7 @@ check_func_headers windows.h MapViewOfFile
|
||||
check_func_headers windows.h VirtualAlloc
|
||||
|
||||
check_header dlfcn.h
|
||||
check_header dxva.h
|
||||
check_header dxva2api.h -D_WIN32_WINNT=0x0600
|
||||
check_header libcrystalhd/libcrystalhd_if.h
|
||||
check_header malloc.h
|
||||
|
@@ -407,6 +407,10 @@ prefix is ``ffmpeg2pass''. The complete file name will be
|
||||
@file{PREFIX-N.log}, where N is a number specific to the output
|
||||
stream
|
||||
|
||||
Note that this option is overwritten by a local option of the same name
|
||||
when using @code{-vcodec libx264}. That option maps to the x264 option stats
|
||||
which has a different syntax.
|
||||
|
||||
@item -vlang @var{code}
|
||||
Set the ISO 639 language code (3 letters) of the current video stream.
|
||||
|
||||
|
2
ffmpeg.c
2
ffmpeg.c
@@ -505,7 +505,7 @@ static int alloc_buffer(AVCodecContext *s, InputStream *ist, FrameBuffer **pbuf)
|
||||
const int v_shift = i==0 ? 0 : v_chroma_shift;
|
||||
if (s->flags & CODEC_FLAG_EMU_EDGE)
|
||||
buf->data[i] = buf->base[i];
|
||||
else
|
||||
else if (buf->base[i])
|
||||
buf->data[i] = buf->base[i] +
|
||||
FFALIGN((buf->linesize[i]*edge >> v_shift) +
|
||||
(pixel_size*edge >> h_shift), 32);
|
||||
|
@@ -1790,6 +1790,10 @@ int main(int argc, char **argv)
|
||||
|
||||
if (!print_format)
|
||||
print_format = av_strdup("default");
|
||||
if (!print_format) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
w_name = av_strtok(print_format, "=", &buf);
|
||||
w_args = buf;
|
||||
|
||||
|
@@ -3457,6 +3457,9 @@ static AVStream *add_av_stream1(FFStream *stream, AVCodecContext *codec, int cop
|
||||
{
|
||||
AVStream *fst;
|
||||
|
||||
if(stream->nb_streams >= FF_ARRAY_ELEMS(stream->streams))
|
||||
return NULL;
|
||||
|
||||
fst = av_mallocz(sizeof(AVStream));
|
||||
if (!fst)
|
||||
return NULL;
|
||||
@@ -3802,6 +3805,9 @@ static void add_codec(FFStream *stream, AVCodecContext *av)
|
||||
{
|
||||
AVStream *st;
|
||||
|
||||
if(stream->nb_streams >= FF_ARRAY_ELEMS(stream->streams))
|
||||
return NULL;
|
||||
|
||||
/* compute default parameters */
|
||||
switch(av->codec_type) {
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
|
@@ -915,7 +915,7 @@ static av_cold int decode_end(AVCodecContext *avctx){
|
||||
av_freep(&f->cfrm[i].data);
|
||||
f->cfrm[i].allocated_size= 0;
|
||||
}
|
||||
free_vlc(&f->pre_vlc);
|
||||
ff_free_vlc(&f->pre_vlc);
|
||||
if(f->current_picture.data[0])
|
||||
avctx->release_buffer(avctx, &f->current_picture);
|
||||
if(f->last_picture.data[0])
|
||||
|
@@ -47,7 +47,7 @@ typedef struct EightSvxContext {
|
||||
/* buffer used to store the whole audio decoded/interleaved chunk,
|
||||
* which is sent with the first packet */
|
||||
uint8_t *samples;
|
||||
size_t samples_size;
|
||||
int64_t samples_size;
|
||||
int samples_idx;
|
||||
} EightSvxContext;
|
||||
|
||||
|
@@ -275,6 +275,10 @@ int ff_ps_read_data(AVCodecContext *avctx, GetBitContext *gb_host, PSContext *ps
|
||||
err:
|
||||
ps->start = 0;
|
||||
skip_bits_long(gb_host, bits_left);
|
||||
memset(ps->iid_par, 0, sizeof(ps->iid_par));
|
||||
memset(ps->icc_par, 0, sizeof(ps->icc_par));
|
||||
memset(ps->ipd_par, 0, sizeof(ps->ipd_par));
|
||||
memset(ps->opd_par, 0, sizeof(ps->opd_par));
|
||||
return bits_left;
|
||||
}
|
||||
|
||||
|
@@ -542,7 +542,7 @@ static int sbr_hf_calc_npatches(AACContext *ac, SpectralBandReplication *sbr)
|
||||
k = sbr->n_master;
|
||||
} while (sb != sbr->kx[1] + sbr->m[1]);
|
||||
|
||||
if (sbr->patch_num_subbands[sbr->num_patches-1] < 3 && sbr->num_patches > 1)
|
||||
if (sbr->num_patches > 1 && sbr->patch_num_subbands[sbr->num_patches-1] < 3)
|
||||
sbr->num_patches--;
|
||||
|
||||
return 0;
|
||||
|
@@ -1408,6 +1408,7 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data,
|
||||
avctx->audio_service_type = AV_AUDIO_SERVICE_TYPE_KARAOKE;
|
||||
|
||||
/* get output buffer */
|
||||
avctx->channels = s->out_channels;
|
||||
s->frame.nb_samples = s->num_blocks * 256;
|
||||
if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
|
@@ -651,6 +651,11 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
for (k = 1; k < sub_blocks; k++)
|
||||
s[k] = s[k - 1] + decode_rice(gb, 0);
|
||||
}
|
||||
for (k = 1; k < sub_blocks; k++)
|
||||
if (s[k] > 32) {
|
||||
av_log(avctx, AV_LOG_ERROR, "k invalid for rice code.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (get_bits1(gb))
|
||||
*bd->shift_lsbs = get_bits(gb, 4) + 1;
|
||||
@@ -663,6 +668,11 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
int opt_order_length = av_ceil_log2(av_clip((bd->block_length >> 3) - 1,
|
||||
2, sconf->max_order + 1));
|
||||
*bd->opt_order = get_bits(gb, opt_order_length);
|
||||
if (*bd->opt_order > sconf->max_order) {
|
||||
*bd->opt_order = sconf->max_order;
|
||||
av_log(avctx, AV_LOG_ERROR, "Predictor order too large!\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
} else {
|
||||
*bd->opt_order = sconf->max_order;
|
||||
}
|
||||
@@ -695,6 +705,10 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
int rice_param = parcor_rice_table[sconf->coef_table][k][1];
|
||||
int offset = parcor_rice_table[sconf->coef_table][k][0];
|
||||
quant_cof[k] = decode_rice(gb, rice_param) + offset;
|
||||
if (quant_cof[k] < -64 || quant_cof[k] > 63) {
|
||||
av_log(avctx, AV_LOG_ERROR, "quant_cof %d is out of range\n", quant_cof[k]);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
|
||||
// read coefficients 20 to 126
|
||||
@@ -727,7 +741,7 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
bd->ltp_gain[0] = decode_rice(gb, 1) << 3;
|
||||
bd->ltp_gain[1] = decode_rice(gb, 2) << 3;
|
||||
|
||||
r = get_unary(gb, 0, 4);
|
||||
r = get_unary(gb, 0, 3);
|
||||
c = get_bits(gb, 2);
|
||||
bd->ltp_gain[2] = ltp_gain_values[r][c];
|
||||
|
||||
@@ -756,7 +770,6 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
int delta[8];
|
||||
unsigned int k [8];
|
||||
unsigned int b = av_clip((av_ceil_log2(bd->block_length) - 3) >> 1, 0, 5);
|
||||
unsigned int i = start;
|
||||
|
||||
// read most significant bits
|
||||
unsigned int high;
|
||||
@@ -767,29 +780,30 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
|
||||
current_res = bd->raw_samples + start;
|
||||
|
||||
for (sb = 0; sb < sub_blocks; sb++, i = 0) {
|
||||
for (sb = 0; sb < sub_blocks; sb++) {
|
||||
unsigned int sb_len = sb_length - (sb ? 0 : start);
|
||||
|
||||
k [sb] = s[sb] > b ? s[sb] - b : 0;
|
||||
delta[sb] = 5 - s[sb] + k[sb];
|
||||
|
||||
ff_bgmc_decode(gb, sb_length, current_res,
|
||||
ff_bgmc_decode(gb, sb_len, current_res,
|
||||
delta[sb], sx[sb], &high, &low, &value, ctx->bgmc_lut, ctx->bgmc_lut_status);
|
||||
|
||||
current_res += sb_length;
|
||||
current_res += sb_len;
|
||||
}
|
||||
|
||||
ff_bgmc_decode_end(gb);
|
||||
|
||||
|
||||
// read least significant bits and tails
|
||||
i = start;
|
||||
current_res = bd->raw_samples + start;
|
||||
|
||||
for (sb = 0; sb < sub_blocks; sb++, i = 0) {
|
||||
for (sb = 0; sb < sub_blocks; sb++, start = 0) {
|
||||
unsigned int cur_tail_code = tail_code[sx[sb]][delta[sb]];
|
||||
unsigned int cur_k = k[sb];
|
||||
unsigned int cur_s = s[sb];
|
||||
|
||||
for (; i < sb_length; i++) {
|
||||
for (; start < sb_length; start++) {
|
||||
int32_t res = *current_res;
|
||||
|
||||
if (res == cur_tail_code) {
|
||||
@@ -1165,14 +1179,14 @@ static int read_channel_data(ALSDecContext *ctx, ALSChannelData *cd, int c)
|
||||
|
||||
if (current->master_channel != c) {
|
||||
current->time_diff_flag = get_bits1(gb);
|
||||
current->weighting[0] = mcc_weightings[av_clip(decode_rice(gb, 1) + 16, 0, 32)];
|
||||
current->weighting[1] = mcc_weightings[av_clip(decode_rice(gb, 2) + 14, 0, 32)];
|
||||
current->weighting[2] = mcc_weightings[av_clip(decode_rice(gb, 1) + 16, 0, 32)];
|
||||
current->weighting[0] = mcc_weightings[av_clip(decode_rice(gb, 1) + 16, 0, 31)];
|
||||
current->weighting[1] = mcc_weightings[av_clip(decode_rice(gb, 2) + 14, 0, 31)];
|
||||
current->weighting[2] = mcc_weightings[av_clip(decode_rice(gb, 1) + 16, 0, 31)];
|
||||
|
||||
if (current->time_diff_flag) {
|
||||
current->weighting[3] = mcc_weightings[av_clip(decode_rice(gb, 1) + 16, 0, 32)];
|
||||
current->weighting[4] = mcc_weightings[av_clip(decode_rice(gb, 1) + 16, 0, 32)];
|
||||
current->weighting[5] = mcc_weightings[av_clip(decode_rice(gb, 1) + 16, 0, 32)];
|
||||
current->weighting[3] = mcc_weightings[av_clip(decode_rice(gb, 1) + 16, 0, 31)];
|
||||
current->weighting[4] = mcc_weightings[av_clip(decode_rice(gb, 1) + 16, 0, 31)];
|
||||
current->weighting[5] = mcc_weightings[av_clip(decode_rice(gb, 1) + 16, 0, 31)];
|
||||
|
||||
current->time_diff_sign = get_bits1(gb);
|
||||
current->time_diff_index = get_bits(gb, ctx->ltp_lag_length - 3) + 3;
|
||||
|
@@ -404,9 +404,12 @@ static inline int ape_decode_value(APEContext *ctx, APERice *rice)
|
||||
|
||||
if (tmpk <= 16)
|
||||
x = range_decode_bits(ctx, tmpk);
|
||||
else {
|
||||
else if (tmpk <= 32) {
|
||||
x = range_decode_bits(ctx, 16);
|
||||
x |= (range_decode_bits(ctx, tmpk - 16) << 16);
|
||||
} else {
|
||||
av_log(ctx->avctx, AV_LOG_ERROR, "Too many bits: %d\n", tmpk);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
x += overflow << tmpk;
|
||||
} else {
|
||||
|
@@ -162,6 +162,7 @@ static av_cold int avs_decode_init(AVCodecContext * avctx)
|
||||
AvsContext *const avs = avctx->priv_data;
|
||||
avctx->pix_fmt = PIX_FMT_PAL8;
|
||||
avcodec_get_frame_defaults(&avs->picture);
|
||||
avcodec_set_dimensions(avctx, 318, 198);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -91,9 +91,9 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
frame_len_bits = 11;
|
||||
}
|
||||
|
||||
if (avctx->channels > MAX_CHANNELS) {
|
||||
av_log(avctx, AV_LOG_ERROR, "too many channels: %d\n", avctx->channels);
|
||||
return -1;
|
||||
if (avctx->channels < 1 || avctx->channels > MAX_CHANNELS) {
|
||||
av_log(avctx, AV_LOG_ERROR, "invalid number of channels: %d\n", avctx->channels);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
s->version_b = avctx->extradata && avctx->extradata[3] == 'b';
|
||||
|
@@ -253,9 +253,9 @@ static int build_table(VLC *vlc, int table_nb_bits, int nb_codes,
|
||||
(byte/word/long) to store the 'bits', 'codes', and 'symbols' tables.
|
||||
|
||||
'use_static' should be set to 1 for tables, which should be freed
|
||||
with av_free_static(), 0 if free_vlc() will be used.
|
||||
with av_free_static(), 0 if ff_free_vlc() will be used.
|
||||
*/
|
||||
int init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes,
|
||||
int ff_init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes,
|
||||
const void *bits, int bits_wrap, int bits_size,
|
||||
const void *codes, int codes_wrap, int codes_size,
|
||||
const void *symbols, int symbols_wrap, int symbols_size,
|
||||
@@ -318,7 +318,7 @@ int init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes,
|
||||
}
|
||||
|
||||
|
||||
void free_vlc(VLC *vlc)
|
||||
void ff_free_vlc(VLC *vlc)
|
||||
{
|
||||
av_freep(&vlc->table);
|
||||
}
|
||||
|
@@ -117,7 +117,7 @@ static int bmp_decode_frame(AVCodecContext *avctx,
|
||||
|
||||
depth = bytestream_get_le16(&buf);
|
||||
|
||||
if(ihsize == 40 || ihsize == 64 || ihsize == 56)
|
||||
if (ihsize >= 40)
|
||||
comp = bytestream_get_le32(&buf);
|
||||
else
|
||||
comp = BMP_RGB;
|
||||
@@ -132,8 +132,7 @@ static int bmp_decode_frame(AVCodecContext *avctx,
|
||||
rgb[0] = bytestream_get_le32(&buf);
|
||||
rgb[1] = bytestream_get_le32(&buf);
|
||||
rgb[2] = bytestream_get_le32(&buf);
|
||||
if (ihsize >= 108)
|
||||
alpha = bytestream_get_le32(&buf);
|
||||
alpha = bytestream_get_le32(&buf);
|
||||
}
|
||||
|
||||
avctx->width = width;
|
||||
@@ -231,9 +230,6 @@ static int bmp_decode_frame(AVCodecContext *avctx,
|
||||
if(comp == BMP_RLE4 || comp == BMP_RLE8)
|
||||
memset(p->data[0], 0, avctx->height * p->linesize[0]);
|
||||
|
||||
if(depth == 4 || depth == 8)
|
||||
memset(p->data[1], 0, 1024);
|
||||
|
||||
if(height > 0){
|
||||
ptr = p->data[0] + (avctx->height - 1) * p->linesize[0];
|
||||
linesize = -p->linesize[0];
|
||||
@@ -244,6 +240,9 @@ static int bmp_decode_frame(AVCodecContext *avctx,
|
||||
|
||||
if(avctx->pix_fmt == PIX_FMT_PAL8){
|
||||
int colors = 1 << depth;
|
||||
|
||||
memset(p->data[1], 0, 1024);
|
||||
|
||||
if(ihsize >= 36){
|
||||
int t;
|
||||
buf = buf0 + 46;
|
||||
|
@@ -21,6 +21,7 @@
|
||||
|
||||
#include "avcodec.h"
|
||||
#include "bytestream.h"
|
||||
#include "libavutil/avassert.h"
|
||||
|
||||
enum BMVFlags{
|
||||
BMV_NOP = 0,
|
||||
@@ -52,7 +53,7 @@ typedef struct BMVDecContext {
|
||||
|
||||
static int decode_bmv_frame(const uint8_t *source, int src_len, uint8_t *frame, int frame_off)
|
||||
{
|
||||
int val, saved_val = 0;
|
||||
unsigned val, saved_val = 0;
|
||||
int tmplen = src_len;
|
||||
const uint8_t *src, *source_end = source + src_len;
|
||||
uint8_t *frame_end = frame + SCREEN_WIDE * SCREEN_HIGH;
|
||||
@@ -98,6 +99,8 @@ static int decode_bmv_frame(const uint8_t *source, int src_len, uint8_t *frame,
|
||||
}
|
||||
if (!(val & 0xC)) {
|
||||
for (;;) {
|
||||
if(shift>22)
|
||||
return -1;
|
||||
if (!read_two_nibbles) {
|
||||
if (src < source || src >= source_end)
|
||||
return -1;
|
||||
@@ -131,6 +134,7 @@ static int decode_bmv_frame(const uint8_t *source, int src_len, uint8_t *frame,
|
||||
}
|
||||
advance_mode = val & 1;
|
||||
len = (val >> 1) - 1;
|
||||
av_assert0(len>0);
|
||||
mode += 1 + advance_mode;
|
||||
if (mode >= 4)
|
||||
mode -= 3;
|
||||
@@ -139,7 +143,7 @@ static int decode_bmv_frame(const uint8_t *source, int src_len, uint8_t *frame,
|
||||
switch (mode) {
|
||||
case 1:
|
||||
if (forward) {
|
||||
if (dst - frame + SCREEN_WIDE < frame_off ||
|
||||
if (dst - frame + SCREEN_WIDE < -frame_off ||
|
||||
frame_end - dst < frame_off + len)
|
||||
return -1;
|
||||
for (i = 0; i < len; i++)
|
||||
@@ -147,7 +151,7 @@ static int decode_bmv_frame(const uint8_t *source, int src_len, uint8_t *frame,
|
||||
dst += len;
|
||||
} else {
|
||||
dst -= len;
|
||||
if (dst - frame + SCREEN_WIDE < frame_off ||
|
||||
if (dst - frame + SCREEN_WIDE < -frame_off ||
|
||||
frame_end - dst < frame_off + len)
|
||||
return -1;
|
||||
for (i = len - 1; i >= 0; i--)
|
||||
@@ -264,6 +268,11 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
c->avctx = avctx;
|
||||
avctx->pix_fmt = PIX_FMT_PAL8;
|
||||
|
||||
if (avctx->width != SCREEN_WIDE || avctx->height != SCREEN_HIGH) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid dimension %dx%d\n", avctx->width, avctx->height);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
c->pic.reference = 1;
|
||||
if (avctx->get_buffer(avctx, &c->pic) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
|
@@ -1,6 +1,7 @@
|
||||
/*
|
||||
* Bytestream functions
|
||||
* copyright (c) 2006 Baptiste Coudurier <baptiste.coudurier@free.fr>
|
||||
* Copyright (c) 2012 Aneesh Dogra (lionaneesh) <lionaneesh@gmail.com>
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
@@ -23,6 +24,7 @@
|
||||
#define AVCODEC_BYTESTREAM_H
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#include "libavutil/common.h"
|
||||
#include "libavutil/intreadwrite.h"
|
||||
|
||||
@@ -30,35 +32,57 @@ typedef struct {
|
||||
const uint8_t *buffer, *buffer_end, *buffer_start;
|
||||
} GetByteContext;
|
||||
|
||||
#define DEF_T(type, name, bytes, read, write) \
|
||||
static av_always_inline type bytestream_get_ ## name(const uint8_t **b){\
|
||||
(*b) += bytes;\
|
||||
return read(*b - bytes);\
|
||||
}\
|
||||
static av_always_inline void bytestream_put_ ##name(uint8_t **b, const type value){\
|
||||
write(*b, value);\
|
||||
(*b) += bytes;\
|
||||
}\
|
||||
static av_always_inline type bytestream2_get_ ## name ## u(GetByteContext *g)\
|
||||
{\
|
||||
return bytestream_get_ ## name(&g->buffer);\
|
||||
}\
|
||||
static av_always_inline type bytestream2_get_ ## name(GetByteContext *g)\
|
||||
{\
|
||||
if (g->buffer_end - g->buffer < bytes)\
|
||||
return 0;\
|
||||
return bytestream2_get_ ## name ## u(g);\
|
||||
}\
|
||||
static av_always_inline type bytestream2_peek_ ## name(GetByteContext *g)\
|
||||
{\
|
||||
if (g->buffer_end - g->buffer < bytes)\
|
||||
return 0;\
|
||||
return read(g->buffer);\
|
||||
typedef struct {
|
||||
uint8_t *buffer, *buffer_end, *buffer_start;
|
||||
int eof;
|
||||
} PutByteContext;
|
||||
|
||||
#define DEF_T(type, name, bytes, read, write) \
|
||||
static av_always_inline type bytestream_get_ ## name(const uint8_t **b) \
|
||||
{ \
|
||||
(*b) += bytes; \
|
||||
return read(*b - bytes); \
|
||||
} \
|
||||
static av_always_inline void bytestream_put_ ## name(uint8_t **b, \
|
||||
const type value) \
|
||||
{ \
|
||||
write(*b, value); \
|
||||
(*b) += bytes; \
|
||||
} \
|
||||
static av_always_inline void bytestream2_put_ ## name ## u(PutByteContext *p, \
|
||||
const type value) \
|
||||
{ \
|
||||
bytestream_put_ ## name(&p->buffer, value); \
|
||||
} \
|
||||
static av_always_inline void bytestream2_put_ ## name(PutByteContext *p, \
|
||||
const type value) \
|
||||
{ \
|
||||
if (!p->eof && (p->buffer_end - p->buffer >= bytes)) { \
|
||||
write(p->buffer, value); \
|
||||
p->buffer += bytes; \
|
||||
} else \
|
||||
p->eof = 1; \
|
||||
} \
|
||||
static av_always_inline type bytestream2_get_ ## name ## u(GetByteContext *g) \
|
||||
{ \
|
||||
return bytestream_get_ ## name(&g->buffer); \
|
||||
} \
|
||||
static av_always_inline type bytestream2_get_ ## name(GetByteContext *g) \
|
||||
{ \
|
||||
if (g->buffer_end - g->buffer < bytes) \
|
||||
return 0; \
|
||||
return bytestream2_get_ ## name ## u(g); \
|
||||
} \
|
||||
static av_always_inline type bytestream2_peek_ ## name(GetByteContext *g) \
|
||||
{ \
|
||||
if (g->buffer_end - g->buffer < bytes) \
|
||||
return 0; \
|
||||
return read(g->buffer); \
|
||||
}
|
||||
|
||||
#define DEF(name, bytes, read, write) \
|
||||
#define DEF(name, bytes, read, write) \
|
||||
DEF_T(unsigned int, name, bytes, read, write)
|
||||
#define DEF64(name, bytes, read, write) \
|
||||
#define DEF64(name, bytes, read, write) \
|
||||
DEF_T(uint64_t, name, bytes, read, write)
|
||||
|
||||
DEF64(le64, 8, AV_RL64, AV_WL64)
|
||||
@@ -112,11 +136,22 @@ DEF (byte, 1, AV_RB8 , AV_WB8 )
|
||||
#endif
|
||||
|
||||
static av_always_inline void bytestream2_init(GetByteContext *g,
|
||||
const uint8_t *buf, int buf_size)
|
||||
const uint8_t *buf,
|
||||
int buf_size)
|
||||
{
|
||||
g->buffer = buf;
|
||||
g->buffer = buf;
|
||||
g->buffer_start = buf;
|
||||
g->buffer_end = buf + buf_size;
|
||||
g->buffer_end = buf + buf_size;
|
||||
}
|
||||
|
||||
static av_always_inline void bytestream2_init_writer(PutByteContext *p,
|
||||
uint8_t *buf,
|
||||
int buf_size)
|
||||
{
|
||||
p->buffer = buf;
|
||||
p->buffer_start = buf;
|
||||
p->buffer_end = buf + buf_size;
|
||||
p->eof = 0;
|
||||
}
|
||||
|
||||
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
|
||||
@@ -124,32 +159,61 @@ static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *
|
||||
return g->buffer_end - g->buffer;
|
||||
}
|
||||
|
||||
static av_always_inline unsigned int bytestream2_get_bytes_left_p(PutByteContext *p)
|
||||
{
|
||||
return p->buffer_end - p->buffer;
|
||||
}
|
||||
|
||||
static av_always_inline void bytestream2_skip(GetByteContext *g,
|
||||
unsigned int size)
|
||||
{
|
||||
g->buffer += FFMIN(g->buffer_end - g->buffer, size);
|
||||
}
|
||||
|
||||
static av_always_inline void bytestream2_skipu(GetByteContext *g,
|
||||
unsigned int size)
|
||||
{
|
||||
g->buffer += size;
|
||||
}
|
||||
|
||||
static av_always_inline void bytestream2_skip_p(PutByteContext *p,
|
||||
unsigned int size)
|
||||
{
|
||||
int size2;
|
||||
if (p->eof)
|
||||
return;
|
||||
size2 = FFMIN(p->buffer_end - p->buffer, size);
|
||||
if (size2 != size)
|
||||
p->eof = 1;
|
||||
p->buffer += size2;
|
||||
}
|
||||
|
||||
static av_always_inline int bytestream2_tell(GetByteContext *g)
|
||||
{
|
||||
return (int)(g->buffer - g->buffer_start);
|
||||
}
|
||||
|
||||
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset,
|
||||
static av_always_inline int bytestream2_tell_p(PutByteContext *p)
|
||||
{
|
||||
return (int)(p->buffer - p->buffer_start);
|
||||
}
|
||||
|
||||
static av_always_inline int bytestream2_seek(GetByteContext *g,
|
||||
int offset,
|
||||
int whence)
|
||||
{
|
||||
switch (whence) {
|
||||
case SEEK_CUR:
|
||||
offset = av_clip(offset, -(g->buffer - g->buffer_start),
|
||||
g->buffer_end - g->buffer);
|
||||
offset = av_clip(offset, -(g->buffer - g->buffer_start),
|
||||
g->buffer_end - g->buffer);
|
||||
g->buffer += offset;
|
||||
break;
|
||||
case SEEK_END:
|
||||
offset = av_clip(offset, -(g->buffer_end - g->buffer_start), 0);
|
||||
offset = av_clip(offset, -(g->buffer_end - g->buffer_start), 0);
|
||||
g->buffer = g->buffer_end + offset;
|
||||
break;
|
||||
case SEEK_SET:
|
||||
offset = av_clip(offset, 0, g->buffer_end - g->buffer_start);
|
||||
offset = av_clip(offset, 0, g->buffer_end - g->buffer_start);
|
||||
g->buffer = g->buffer_start + offset;
|
||||
break;
|
||||
default:
|
||||
@@ -158,6 +222,37 @@ static av_always_inline int bytestream2_seek(GetByteContext *g, int offset,
|
||||
return bytestream2_tell(g);
|
||||
}
|
||||
|
||||
static av_always_inline int bytestream2_seek_p(PutByteContext *p,
|
||||
int offset,
|
||||
int whence)
|
||||
{
|
||||
p->eof = 0;
|
||||
switch (whence) {
|
||||
case SEEK_CUR:
|
||||
if (p->buffer_end - p->buffer < offset)
|
||||
p->eof = 1;
|
||||
offset = av_clip(offset, -(p->buffer - p->buffer_start),
|
||||
p->buffer_end - p->buffer);
|
||||
p->buffer += offset;
|
||||
break;
|
||||
case SEEK_END:
|
||||
if (offset > 0)
|
||||
p->eof = 1;
|
||||
offset = av_clip(offset, -(p->buffer_end - p->buffer_start), 0);
|
||||
p->buffer = p->buffer_end + offset;
|
||||
break;
|
||||
case SEEK_SET:
|
||||
if (p->buffer_end - p->buffer_start < offset)
|
||||
p->eof = 1;
|
||||
offset = av_clip(offset, 0, p->buffer_end - p->buffer_start);
|
||||
p->buffer = p->buffer_start + offset;
|
||||
break;
|
||||
default:
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
return bytestream2_tell_p(p);
|
||||
}
|
||||
|
||||
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g,
|
||||
uint8_t *dst,
|
||||
unsigned int size)
|
||||
@@ -168,14 +263,78 @@ static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g,
|
||||
return size2;
|
||||
}
|
||||
|
||||
static av_always_inline unsigned int bytestream_get_buffer(const uint8_t **b, uint8_t *dst, unsigned int size)
|
||||
static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g,
|
||||
uint8_t *dst,
|
||||
unsigned int size)
|
||||
{
|
||||
memcpy(dst, g->buffer, size);
|
||||
g->buffer += size;
|
||||
return size;
|
||||
}
|
||||
|
||||
static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p,
|
||||
const uint8_t *src,
|
||||
unsigned int size)
|
||||
{
|
||||
int size2;
|
||||
if (p->eof)
|
||||
return 0;
|
||||
size2 = FFMIN(p->buffer_end - p->buffer, size);
|
||||
if (size2 != size)
|
||||
p->eof = 1;
|
||||
memcpy(p->buffer, src, size2);
|
||||
p->buffer += size2;
|
||||
return size2;
|
||||
}
|
||||
|
||||
static av_always_inline unsigned int bytestream2_put_bufferu(PutByteContext *p,
|
||||
const uint8_t *src,
|
||||
unsigned int size)
|
||||
{
|
||||
memcpy(p->buffer, src, size);
|
||||
p->buffer += size;
|
||||
return size;
|
||||
}
|
||||
|
||||
static av_always_inline void bytestream2_set_buffer(PutByteContext *p,
|
||||
const uint8_t c,
|
||||
unsigned int size)
|
||||
{
|
||||
int size2;
|
||||
if (p->eof)
|
||||
return;
|
||||
size2 = FFMIN(p->buffer_end - p->buffer, size);
|
||||
if (size2 != size)
|
||||
p->eof = 1;
|
||||
memset(p->buffer, c, size2);
|
||||
p->buffer += size2;
|
||||
}
|
||||
|
||||
static av_always_inline void bytestream2_set_bufferu(PutByteContext *p,
|
||||
const uint8_t c,
|
||||
unsigned int size)
|
||||
{
|
||||
memset(p->buffer, c, size);
|
||||
p->buffer += size;
|
||||
}
|
||||
|
||||
static av_always_inline unsigned int bytestream2_get_eof(PutByteContext *p)
|
||||
{
|
||||
return p->eof;
|
||||
}
|
||||
|
||||
static av_always_inline unsigned int bytestream_get_buffer(const uint8_t **b,
|
||||
uint8_t *dst,
|
||||
unsigned int size)
|
||||
{
|
||||
memcpy(dst, *b, size);
|
||||
(*b) += size;
|
||||
return size;
|
||||
}
|
||||
|
||||
static av_always_inline void bytestream_put_buffer(uint8_t **b, const uint8_t *src, unsigned int size)
|
||||
static av_always_inline void bytestream_put_buffer(uint8_t **b,
|
||||
const uint8_t *src,
|
||||
unsigned int size)
|
||||
{
|
||||
memcpy(*b, src, size);
|
||||
(*b) += size;
|
||||
|
@@ -609,12 +609,21 @@ static int decode_pic(AVSContext *h) {
|
||||
static int decode_seq_header(AVSContext *h) {
|
||||
MpegEncContext *s = &h->s;
|
||||
int frame_rate_code;
|
||||
int width, height;
|
||||
|
||||
h->profile = get_bits(&s->gb,8);
|
||||
h->level = get_bits(&s->gb,8);
|
||||
skip_bits1(&s->gb); //progressive sequence
|
||||
s->width = get_bits(&s->gb,14);
|
||||
s->height = get_bits(&s->gb,14);
|
||||
|
||||
width = get_bits(&s->gb, 14);
|
||||
height = get_bits(&s->gb, 14);
|
||||
if ((s->width || s->height) && (s->width != width || s->height != height)) {
|
||||
av_log_missing_feature(s, "Width/height changing in CAVS is", 0);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
s->width = width;
|
||||
s->height = height;
|
||||
|
||||
skip_bits(&s->gb,2); //chroma format
|
||||
skip_bits(&s->gb,3); //sample_precision
|
||||
h->aspect_ratio = get_bits(&s->gb,4);
|
||||
|
@@ -280,6 +280,10 @@ static int cdg_decode_frame(AVCodecContext *avctx,
|
||||
av_log(avctx, AV_LOG_ERROR, "buffer too small for decoder\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
if (buf_size > CDG_HEADER_SIZE + CDG_DATA_SIZE) {
|
||||
av_log(avctx, AV_LOG_ERROR, "buffer too big for decoder\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
ret = avctx->reget_buffer(avctx, &cc->frame);
|
||||
if (ret) {
|
||||
|
@@ -133,9 +133,8 @@ void ff_celp_lp_synthesis_filterf(float *out, const float *filter_coeffs,
|
||||
out2 -= val * old_out2;
|
||||
out3 -= val * old_out3;
|
||||
|
||||
old_out3 = out[-5];
|
||||
|
||||
for (i = 5; i <= filter_length; i += 2) {
|
||||
old_out3 = out[-i];
|
||||
val = filter_coeffs[i-1];
|
||||
|
||||
out0 -= val * old_out3;
|
||||
@@ -154,7 +153,6 @@ void ff_celp_lp_synthesis_filterf(float *out, const float *filter_coeffs,
|
||||
|
||||
FFSWAP(float, old_out0, old_out2);
|
||||
old_out1 = old_out3;
|
||||
old_out3 = out[-i-2];
|
||||
}
|
||||
|
||||
tmp0 = out0;
|
||||
|
@@ -321,11 +321,11 @@ static av_cold int cook_decode_close(AVCodecContext *avctx)
|
||||
|
||||
/* Free the VLC tables. */
|
||||
for (i = 0; i < 13; i++)
|
||||
free_vlc(&q->envelope_quant_index[i]);
|
||||
ff_free_vlc(&q->envelope_quant_index[i]);
|
||||
for (i = 0; i < 7; i++)
|
||||
free_vlc(&q->sqvh[i]);
|
||||
ff_free_vlc(&q->sqvh[i]);
|
||||
for (i = 0; i < q->num_subpackets; i++)
|
||||
free_vlc(&q->subpacket[i].ccpl);
|
||||
ff_free_vlc(&q->subpacket[i].ccpl);
|
||||
|
||||
av_log(avctx, AV_LOG_DEBUG, "Memory deallocated.\n");
|
||||
|
||||
@@ -1235,6 +1235,11 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
|
||||
q->subpacket[s].gains2.now = q->subpacket[s].gain_3;
|
||||
q->subpacket[s].gains2.previous = q->subpacket[s].gain_4;
|
||||
|
||||
if (q->num_subpackets + q->subpacket[s].num_channels > q->nb_channels) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Too many subpackets %d for channels %d\n", q->num_subpackets, q->nb_channels);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
q->num_subpackets++;
|
||||
s++;
|
||||
if (s > MAX_SUBPACKETS) {
|
||||
|
219
libavcodec/dfa.c
219
libavcodec/dfa.c
@@ -21,8 +21,9 @@
|
||||
*/
|
||||
|
||||
#include "avcodec.h"
|
||||
#include "libavutil/intreadwrite.h"
|
||||
#include "bytestream.h"
|
||||
|
||||
#include "libavutil/imgutils.h"
|
||||
#include "libavutil/lzo.h" // for av_memcpy_backptr
|
||||
|
||||
typedef struct DfaContext {
|
||||
@@ -35,9 +36,13 @@ typedef struct DfaContext {
|
||||
static av_cold int dfa_decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
DfaContext *s = avctx->priv_data;
|
||||
int ret;
|
||||
|
||||
avctx->pix_fmt = PIX_FMT_PAL8;
|
||||
|
||||
if ((ret = av_image_check_size(avctx->width, avctx->height, 0, avctx)) < 0)
|
||||
return ret;
|
||||
|
||||
s->frame_buf = av_mallocz(avctx->width * avctx->height + AV_LZO_OUTPUT_PADDING);
|
||||
if (!s->frame_buf)
|
||||
return AVERROR(ENOMEM);
|
||||
@@ -45,19 +50,16 @@ static av_cold int dfa_decode_init(AVCodecContext *avctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int decode_copy(uint8_t *frame, int width, int height,
|
||||
const uint8_t *src, const uint8_t *src_end)
|
||||
static int decode_copy(GetByteContext *gb, uint8_t *frame, int width, int height)
|
||||
{
|
||||
const int size = width * height;
|
||||
|
||||
if (src_end - src < size)
|
||||
return -1;
|
||||
bytestream_get_buffer(&src, frame, size);
|
||||
if (bytestream2_get_buffer(gb, frame, size) != size)
|
||||
return AVERROR_INVALIDDATA;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int decode_tsw1(uint8_t *frame, int width, int height,
|
||||
const uint8_t *src, const uint8_t *src_end)
|
||||
static int decode_tsw1(GetByteContext *gb, uint8_t *frame, int width, int height)
|
||||
{
|
||||
const uint8_t *frame_start = frame;
|
||||
const uint8_t *frame_end = frame + width * height;
|
||||
@@ -65,31 +67,31 @@ static int decode_tsw1(uint8_t *frame, int width, int height,
|
||||
int v, count, segments;
|
||||
unsigned offset;
|
||||
|
||||
segments = bytestream_get_le32(&src);
|
||||
offset = bytestream_get_le32(&src);
|
||||
segments = bytestream2_get_le32(gb);
|
||||
offset = bytestream2_get_le32(gb);
|
||||
if (frame_end - frame <= offset)
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
frame += offset;
|
||||
while (segments--) {
|
||||
if (bytestream2_get_bytes_left(gb) < 2)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (mask == 0x10000) {
|
||||
if (src >= src_end)
|
||||
return -1;
|
||||
bitbuf = bytestream_get_le16(&src);
|
||||
bitbuf = bytestream2_get_le16u(gb);
|
||||
mask = 1;
|
||||
}
|
||||
if (src_end - src < 2 || frame_end - frame < 2)
|
||||
return -1;
|
||||
if (frame_end - frame < 2)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (bitbuf & mask) {
|
||||
v = bytestream_get_le16(&src);
|
||||
v = bytestream2_get_le16(gb);
|
||||
offset = (v & 0x1FFF) << 1;
|
||||
count = ((v >> 13) + 2) << 1;
|
||||
if (frame - frame_start < offset || frame_end - frame < count)
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
av_memcpy_backptr(frame, offset, count);
|
||||
frame += count;
|
||||
} else {
|
||||
*frame++ = *src++;
|
||||
*frame++ = *src++;
|
||||
*frame++ = bytestream2_get_byte(gb);
|
||||
*frame++ = bytestream2_get_byte(gb);
|
||||
}
|
||||
mask <<= 1;
|
||||
}
|
||||
@@ -97,39 +99,38 @@ static int decode_tsw1(uint8_t *frame, int width, int height,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int decode_dsw1(uint8_t *frame, int width, int height,
|
||||
const uint8_t *src, const uint8_t *src_end)
|
||||
static int decode_dsw1(GetByteContext *gb, uint8_t *frame, int width, int height)
|
||||
{
|
||||
const uint8_t *frame_start = frame;
|
||||
const uint8_t *frame_end = frame + width * height;
|
||||
int mask = 0x10000, bitbuf = 0;
|
||||
int v, offset, count, segments;
|
||||
|
||||
segments = bytestream_get_le16(&src);
|
||||
segments = bytestream2_get_le16(gb);
|
||||
while (segments--) {
|
||||
if (bytestream2_get_bytes_left(gb) < 2)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (mask == 0x10000) {
|
||||
if (src >= src_end)
|
||||
return -1;
|
||||
bitbuf = bytestream_get_le16(&src);
|
||||
bitbuf = bytestream2_get_le16u(gb);
|
||||
mask = 1;
|
||||
}
|
||||
if (src_end - src < 2 || frame_end - frame < 2)
|
||||
return -1;
|
||||
if (frame_end - frame < 2)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (bitbuf & mask) {
|
||||
v = bytestream_get_le16(&src);
|
||||
v = bytestream2_get_le16(gb);
|
||||
offset = (v & 0x1FFF) << 1;
|
||||
count = ((v >> 13) + 2) << 1;
|
||||
if (frame - frame_start < offset || frame_end - frame < count)
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
// can't use av_memcpy_backptr() since it can overwrite following pixels
|
||||
for (v = 0; v < count; v++)
|
||||
frame[v] = frame[v - offset];
|
||||
frame += count;
|
||||
} else if (bitbuf & (mask << 1)) {
|
||||
frame += bytestream_get_le16(&src);
|
||||
frame += bytestream2_get_le16(gb);
|
||||
} else {
|
||||
*frame++ = *src++;
|
||||
*frame++ = *src++;
|
||||
*frame++ = bytestream2_get_byte(gb);
|
||||
*frame++ = bytestream2_get_byte(gb);
|
||||
}
|
||||
mask <<= 2;
|
||||
}
|
||||
@@ -137,30 +138,28 @@ static int decode_dsw1(uint8_t *frame, int width, int height,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int decode_dds1(uint8_t *frame, int width, int height,
|
||||
const uint8_t *src, const uint8_t *src_end)
|
||||
static int decode_dds1(GetByteContext *gb, uint8_t *frame, int width, int height)
|
||||
{
|
||||
const uint8_t *frame_start = frame;
|
||||
const uint8_t *frame_end = frame + width * height;
|
||||
int mask = 0x10000, bitbuf = 0;
|
||||
int i, v, offset, count, segments;
|
||||
|
||||
segments = bytestream_get_le16(&src);
|
||||
segments = bytestream2_get_le16(gb);
|
||||
while (segments--) {
|
||||
if (bytestream2_get_bytes_left(gb) < 2)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (mask == 0x10000) {
|
||||
if (src >= src_end)
|
||||
return -1;
|
||||
bitbuf = bytestream_get_le16(&src);
|
||||
bitbuf = bytestream2_get_le16u(gb);
|
||||
mask = 1;
|
||||
}
|
||||
if (src_end - src < 2 || frame_end - frame < 2)
|
||||
return -1;
|
||||
|
||||
if (bitbuf & mask) {
|
||||
v = bytestream_get_le16(&src);
|
||||
v = bytestream2_get_le16(gb);
|
||||
offset = (v & 0x1FFF) << 2;
|
||||
count = ((v >> 13) + 2) << 1;
|
||||
if (frame - frame_start < offset || frame_end - frame < count*2 + width)
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
for (i = 0; i < count; i++) {
|
||||
frame[0] = frame[1] =
|
||||
frame[width] = frame[width + 1] = frame[-offset];
|
||||
@@ -168,13 +167,18 @@ static int decode_dds1(uint8_t *frame, int width, int height,
|
||||
frame += 2;
|
||||
}
|
||||
} else if (bitbuf & (mask << 1)) {
|
||||
frame += bytestream_get_le16(&src) * 2;
|
||||
v = bytestream2_get_le16(gb)*2;
|
||||
if (frame - frame_end < v)
|
||||
return AVERROR_INVALIDDATA;
|
||||
frame += v;
|
||||
} else {
|
||||
if (frame_end - frame < width + 3)
|
||||
return AVERROR_INVALIDDATA;
|
||||
frame[0] = frame[1] =
|
||||
frame[width] = frame[width + 1] = *src++;
|
||||
frame[width] = frame[width + 1] = bytestream2_get_byte(gb);
|
||||
frame += 2;
|
||||
frame[0] = frame[1] =
|
||||
frame[width] = frame[width + 1] = *src++;
|
||||
frame[width] = frame[width + 1] = bytestream2_get_byte(gb);
|
||||
frame += 2;
|
||||
}
|
||||
mask <<= 2;
|
||||
@@ -183,40 +187,40 @@ static int decode_dds1(uint8_t *frame, int width, int height,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int decode_bdlt(uint8_t *frame, int width, int height,
|
||||
const uint8_t *src, const uint8_t *src_end)
|
||||
static int decode_bdlt(GetByteContext *gb, uint8_t *frame, int width, int height)
|
||||
{
|
||||
uint8_t *line_ptr;
|
||||
int count, lines, segments;
|
||||
|
||||
count = bytestream_get_le16(&src);
|
||||
count = bytestream2_get_le16(gb);
|
||||
if (count >= height)
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
frame += width * count;
|
||||
lines = bytestream_get_le16(&src);
|
||||
if (count + lines > height || src >= src_end)
|
||||
return -1;
|
||||
lines = bytestream2_get_le16(gb);
|
||||
if (count + lines > height)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
while (lines--) {
|
||||
if (bytestream2_get_bytes_left(gb) < 1)
|
||||
return AVERROR_INVALIDDATA;
|
||||
line_ptr = frame;
|
||||
frame += width;
|
||||
segments = *src++;
|
||||
segments = bytestream2_get_byteu(gb);
|
||||
while (segments--) {
|
||||
if (src_end - src < 3)
|
||||
return -1;
|
||||
if (frame - line_ptr <= *src)
|
||||
return -1;
|
||||
line_ptr += *src++;
|
||||
count = (int8_t)*src++;
|
||||
if (frame - line_ptr <= bytestream2_peek_byte(gb))
|
||||
return AVERROR_INVALIDDATA;
|
||||
line_ptr += bytestream2_get_byte(gb);
|
||||
count = (int8_t)bytestream2_get_byte(gb);
|
||||
if (count >= 0) {
|
||||
if (frame - line_ptr < count || src_end - src < count)
|
||||
return -1;
|
||||
bytestream_get_buffer(&src, line_ptr, count);
|
||||
if (frame - line_ptr < count)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (bytestream2_get_buffer(gb, line_ptr, count) != count)
|
||||
return AVERROR_INVALIDDATA;
|
||||
} else {
|
||||
count = -count;
|
||||
if (frame - line_ptr < count || src >= src_end)
|
||||
return -1;
|
||||
memset(line_ptr, *src++, count);
|
||||
if (frame - line_ptr < count)
|
||||
return AVERROR_INVALIDDATA;
|
||||
memset(line_ptr, bytestream2_get_byte(gb), count);
|
||||
}
|
||||
line_ptr += count;
|
||||
}
|
||||
@@ -225,49 +229,53 @@ static int decode_bdlt(uint8_t *frame, int width, int height,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int decode_wdlt(uint8_t *frame, int width, int height,
|
||||
const uint8_t *src, const uint8_t *src_end)
|
||||
static int decode_wdlt(GetByteContext *gb, uint8_t *frame, int width, int height)
|
||||
{
|
||||
const uint8_t *frame_end = frame + width * height;
|
||||
uint8_t *line_ptr;
|
||||
int count, i, v, lines, segments;
|
||||
int y = 0;
|
||||
|
||||
lines = bytestream_get_le16(&src);
|
||||
if (lines > height || src >= src_end)
|
||||
return -1;
|
||||
lines = bytestream2_get_le16(gb);
|
||||
if (lines > height)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
while (lines--) {
|
||||
segments = bytestream_get_le16(&src);
|
||||
if (bytestream2_get_bytes_left(gb) < 2)
|
||||
return AVERROR_INVALIDDATA;
|
||||
segments = bytestream2_get_le16u(gb);
|
||||
while ((segments & 0xC000) == 0xC000) {
|
||||
unsigned skip_lines = -(int16_t)segments;
|
||||
unsigned delta = -((int16_t)segments * width);
|
||||
if (frame_end - frame <= delta)
|
||||
return -1;
|
||||
if (frame_end - frame <= delta || y + lines + skip_lines > height)
|
||||
return AVERROR_INVALIDDATA;
|
||||
frame += delta;
|
||||
segments = bytestream_get_le16(&src);
|
||||
y += skip_lines;
|
||||
segments = bytestream2_get_le16(gb);
|
||||
}
|
||||
if (segments & 0x8000) {
|
||||
frame[width - 1] = segments & 0xFF;
|
||||
segments = bytestream_get_le16(&src);
|
||||
segments = bytestream2_get_le16(gb);
|
||||
}
|
||||
line_ptr = frame;
|
||||
frame += width;
|
||||
y++;
|
||||
while (segments--) {
|
||||
if (src_end - src < 2)
|
||||
return -1;
|
||||
if (frame - line_ptr <= *src)
|
||||
return -1;
|
||||
line_ptr += *src++;
|
||||
count = (int8_t)*src++;
|
||||
if (frame - line_ptr <= bytestream2_peek_byte(gb))
|
||||
return AVERROR_INVALIDDATA;
|
||||
line_ptr += bytestream2_get_byte(gb);
|
||||
count = (int8_t)bytestream2_get_byte(gb);
|
||||
if (count >= 0) {
|
||||
if (frame - line_ptr < count*2 || src_end - src < count*2)
|
||||
return -1;
|
||||
bytestream_get_buffer(&src, line_ptr, count*2);
|
||||
if (frame - line_ptr < count * 2)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (bytestream2_get_buffer(gb, line_ptr, count * 2) != count * 2)
|
||||
return AVERROR_INVALIDDATA;
|
||||
line_ptr += count * 2;
|
||||
} else {
|
||||
count = -count;
|
||||
if (frame - line_ptr < count*2 || src_end - src < 2)
|
||||
return -1;
|
||||
v = bytestream_get_le16(&src);
|
||||
if (frame - line_ptr < count * 2)
|
||||
return AVERROR_INVALIDDATA;
|
||||
v = bytestream2_get_le16(gb);
|
||||
for (i = 0; i < count; i++)
|
||||
bytestream_put_le16(&line_ptr, v);
|
||||
}
|
||||
@@ -277,22 +285,19 @@ static int decode_wdlt(uint8_t *frame, int width, int height,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int decode_unk6(uint8_t *frame, int width, int height,
|
||||
const uint8_t *src, const uint8_t *src_end)
|
||||
static int decode_unk6(GetByteContext *gb, uint8_t *frame, int width, int height)
|
||||
{
|
||||
return -1;
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
static int decode_blck(uint8_t *frame, int width, int height,
|
||||
const uint8_t *src, const uint8_t *src_end)
|
||||
static int decode_blck(GetByteContext *gb, uint8_t *frame, int width, int height)
|
||||
{
|
||||
memset(frame, 0, width * height);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
typedef int (*chunk_decoder)(uint8_t *frame, int width, int height,
|
||||
const uint8_t *src, const uint8_t *src_end);
|
||||
typedef int (*chunk_decoder)(GetByteContext *gb, uint8_t *frame, int width, int height);
|
||||
|
||||
static const chunk_decoder decoder[8] = {
|
||||
decode_copy, decode_tsw1, decode_bdlt, decode_wdlt,
|
||||
@@ -308,9 +313,8 @@ static int dfa_decode_frame(AVCodecContext *avctx,
|
||||
AVPacket *avpkt)
|
||||
{
|
||||
DfaContext *s = avctx->priv_data;
|
||||
GetByteContext gb;
|
||||
const uint8_t *buf = avpkt->data;
|
||||
const uint8_t *buf_end = avpkt->data + avpkt->size;
|
||||
const uint8_t *tmp_buf;
|
||||
uint32_t chunk_type, chunk_size;
|
||||
uint8_t *dst;
|
||||
int ret;
|
||||
@@ -324,30 +328,25 @@ static int dfa_decode_frame(AVCodecContext *avctx,
|
||||
return ret;
|
||||
}
|
||||
|
||||
while (buf < buf_end) {
|
||||
chunk_size = AV_RL32(buf + 4);
|
||||
chunk_type = AV_RL32(buf + 8);
|
||||
buf += 12;
|
||||
if (buf_end - buf < chunk_size) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Chunk size is too big (%d bytes)\n", chunk_size);
|
||||
return -1;
|
||||
}
|
||||
bytestream2_init(&gb, avpkt->data, avpkt->size);
|
||||
while (bytestream2_get_bytes_left(&gb) > 0) {
|
||||
bytestream2_skip(&gb, 4);
|
||||
chunk_size = bytestream2_get_le32(&gb);
|
||||
chunk_type = bytestream2_get_le32(&gb);
|
||||
if (!chunk_type)
|
||||
break;
|
||||
if (chunk_type == 1) {
|
||||
pal_elems = FFMIN(chunk_size / 3, 256);
|
||||
tmp_buf = buf;
|
||||
for (i = 0; i < pal_elems; i++) {
|
||||
s->pal[i] = bytestream_get_be24(&tmp_buf) << 2;
|
||||
s->pal[i] = bytestream2_get_be24(&gb) << 2;
|
||||
s->pal[i] |= 0xFF << 24 | (s->pal[i] >> 6) & 0x30303;
|
||||
}
|
||||
s->pic.palette_has_changed = 1;
|
||||
} else if (chunk_type <= 9) {
|
||||
if (decoder[chunk_type - 2](s->frame_buf, avctx->width, avctx->height,
|
||||
buf, buf + chunk_size)) {
|
||||
if (decoder[chunk_type - 2](&gb, s->frame_buf, avctx->width, avctx->height)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Error decoding %s chunk\n",
|
||||
chunk_name[chunk_type - 2]);
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
} else {
|
||||
av_log(avctx, AV_LOG_WARNING, "Ignoring unknown chunk type %d\n",
|
||||
|
@@ -1038,7 +1038,7 @@ int ff_dnxhd_find_cid(AVCodecContext *avctx, int bit_depth)
|
||||
if (cid->width == avctx->width && cid->height == avctx->height &&
|
||||
cid->interlaced == !!(avctx->flags & CODEC_FLAG_INTERLACED_DCT) &&
|
||||
cid->bit_depth == bit_depth) {
|
||||
for (j = 0; j < sizeof(cid->bit_rates); j++) {
|
||||
for (j = 0; j < FF_ARRAY_ELEMS(cid->bit_rates); j++) {
|
||||
if (cid->bit_rates[j] == mbs)
|
||||
return cid->cid;
|
||||
}
|
||||
|
@@ -84,9 +84,9 @@ static int dnxhd_init_vlc(DNXHDContext *ctx, int cid)
|
||||
}
|
||||
ctx->cid_table = &ff_dnxhd_cid_table[index];
|
||||
|
||||
free_vlc(&ctx->ac_vlc);
|
||||
free_vlc(&ctx->dc_vlc);
|
||||
free_vlc(&ctx->run_vlc);
|
||||
ff_free_vlc(&ctx->ac_vlc);
|
||||
ff_free_vlc(&ctx->dc_vlc);
|
||||
ff_free_vlc(&ctx->run_vlc);
|
||||
|
||||
init_vlc(&ctx->ac_vlc, DNXHD_VLC_BITS, 257,
|
||||
ctx->cid_table->ac_bits, 1, 1,
|
||||
@@ -416,9 +416,9 @@ static av_cold int dnxhd_decode_close(AVCodecContext *avctx)
|
||||
|
||||
if (ctx->picture.data[0])
|
||||
ff_thread_release_buffer(avctx, &ctx->picture);
|
||||
free_vlc(&ctx->ac_vlc);
|
||||
free_vlc(&ctx->dc_vlc);
|
||||
free_vlc(&ctx->run_vlc);
|
||||
ff_free_vlc(&ctx->ac_vlc);
|
||||
ff_free_vlc(&ctx->dc_vlc);
|
||||
ff_free_vlc(&ctx->run_vlc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -312,7 +312,7 @@ static av_cold int dvvideo_init(AVCodecContext *avctx)
|
||||
dv_rl_vlc[i].level = level;
|
||||
dv_rl_vlc[i].run = run;
|
||||
}
|
||||
free_vlc(&dv_vlc);
|
||||
ff_free_vlc(&dv_vlc);
|
||||
|
||||
dv_vlc_map_tableinit();
|
||||
}
|
||||
|
@@ -25,7 +25,14 @@
|
||||
|
||||
#define _WIN32_WINNT 0x0600
|
||||
#define COBJMACROS
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#include "dxva2.h"
|
||||
#if HAVE_DXVA_H
|
||||
#include <dxva.h>
|
||||
#endif
|
||||
|
||||
#include "avcodec.h"
|
||||
#include "mpegvideo.h"
|
||||
|
||||
|
@@ -62,7 +62,7 @@ static int tqi_decode_mb(MpegEncContext *s, DCTELEM (*block)[64])
|
||||
int n;
|
||||
s->dsp.clear_blocks(block[0]);
|
||||
for (n=0; n<6; n++)
|
||||
if(ff_mpeg1_decode_block_intra(s, block[n], n)<0)
|
||||
if (ff_mpeg1_decode_block_intra(s, block[n], n) < 0)
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
@@ -137,7 +137,7 @@ static int tqi_decode_frame(AVCodecContext *avctx,
|
||||
for (s->mb_y=0; s->mb_y<(avctx->height+15)/16; s->mb_y++)
|
||||
for (s->mb_x=0; s->mb_x<(avctx->width+15)/16; s->mb_x++)
|
||||
{
|
||||
if(tqi_decode_mb(s, t->block) < 0)
|
||||
if (tqi_decode_mb(s, t->block) < 0)
|
||||
break;
|
||||
tqi_idct_put(t, t->block);
|
||||
}
|
||||
|
@@ -48,7 +48,7 @@ typedef struct Escape124Context {
|
||||
CodeBook codebooks[3];
|
||||
} Escape124Context;
|
||||
|
||||
static int can_safely_read(GetBitContext* gb, int bits) {
|
||||
static int can_safely_read(GetBitContext* gb, uint64_t bits) {
|
||||
return get_bits_left(gb) >= bits;
|
||||
}
|
||||
|
||||
@@ -90,7 +90,7 @@ static CodeBook unpack_codebook(GetBitContext* gb, unsigned depth,
|
||||
unsigned i, j;
|
||||
CodeBook cb = { 0 };
|
||||
|
||||
if (!can_safely_read(gb, size * 34))
|
||||
if (!can_safely_read(gb, size * 34L))
|
||||
return cb;
|
||||
|
||||
if (size >= INT_MAX / sizeof(MacroBlock))
|
||||
|
@@ -110,11 +110,11 @@ av_cold void ff_ccitt_unpack_init(void)
|
||||
ccitt_vlc[1].table = code_table2;
|
||||
ccitt_vlc[1].table_allocated = 648;
|
||||
for(i = 0; i < 2; i++){
|
||||
init_vlc_sparse(&ccitt_vlc[i], 9, CCITT_SYMS,
|
||||
ccitt_codes_lens[i], 1, 1,
|
||||
ccitt_codes_bits[i], 1, 1,
|
||||
ccitt_syms, 2, 2,
|
||||
INIT_VLC_USE_NEW_STATIC);
|
||||
ff_init_vlc_sparse(&ccitt_vlc[i], 9, CCITT_SYMS,
|
||||
ccitt_codes_lens[i], 1, 1,
|
||||
ccitt_codes_bits[i], 1, 1,
|
||||
ccitt_syms, 2, 2,
|
||||
INIT_VLC_USE_NEW_STATIC);
|
||||
}
|
||||
INIT_VLC_STATIC(&ccitt_group3_2d_vlc, 9, 11,
|
||||
ccitt_group3_2d_lens, 1, 1,
|
||||
@@ -228,7 +228,7 @@ static int decode_group3_2d_line(AVCodecContext *avctx, GetBitContext *gb,
|
||||
mode = !mode;
|
||||
}
|
||||
//sync line pointers
|
||||
while(run_off <= offs){
|
||||
while(offs < width && run_off <= offs){
|
||||
run_off += *ref++;
|
||||
run_off += *ref++;
|
||||
}
|
||||
|
@@ -255,7 +255,7 @@ static void find_best_state(uint8_t best_state[256][256], const uint8_t one_stat
|
||||
occ[j]=1.0;
|
||||
for(k=0; k<256; k++){
|
||||
double newocc[256]={0};
|
||||
for(m=0; m<256; m++){
|
||||
for(m=1; m<256; m++){
|
||||
if(occ[m]){
|
||||
len -=occ[m]*( p *l2tab[ m]
|
||||
+ (1-p)*l2tab[256-m]);
|
||||
@@ -993,7 +993,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
|
||||
}
|
||||
}
|
||||
gob_count= strtol(p, &next, 0);
|
||||
if(next==p || gob_count <0){
|
||||
if(next==p || gob_count <=0){
|
||||
av_log(avctx, AV_LOG_ERROR, "2Pass file invalid\n");
|
||||
return -1;
|
||||
}
|
||||
|
@@ -122,10 +122,11 @@ static av_cold int flashsv_decode_init(AVCodecContext *avctx)
|
||||
}
|
||||
|
||||
|
||||
static void flashsv2_prime(FlashSVContext *s, uint8_t *src,
|
||||
int size, int unp_size)
|
||||
static int flashsv2_prime(FlashSVContext *s, uint8_t *src,
|
||||
int size, int unp_size)
|
||||
{
|
||||
z_stream zs;
|
||||
int zret; // Zlib return code
|
||||
|
||||
zs.zalloc = NULL;
|
||||
zs.zfree = NULL;
|
||||
@@ -137,7 +138,8 @@ static void flashsv2_prime(FlashSVContext *s, uint8_t *src,
|
||||
s->zstream.avail_out = s->block_size * 3;
|
||||
inflate(&s->zstream, Z_SYNC_FLUSH);
|
||||
|
||||
deflateInit(&zs, 0);
|
||||
if (deflateInit(&zs, 0) != Z_OK)
|
||||
return -1;
|
||||
zs.next_in = s->tmpblock;
|
||||
zs.avail_in = s->block_size * 3 - s->zstream.avail_out;
|
||||
zs.next_out = s->deflate_block;
|
||||
@@ -145,13 +147,18 @@ static void flashsv2_prime(FlashSVContext *s, uint8_t *src,
|
||||
deflate(&zs, Z_SYNC_FLUSH);
|
||||
deflateEnd(&zs);
|
||||
|
||||
inflateReset(&s->zstream);
|
||||
if ((zret = inflateReset(&s->zstream)) != Z_OK) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Inflate reset error: %d\n", zret);
|
||||
return AVERROR_UNKNOWN;
|
||||
}
|
||||
|
||||
s->zstream.next_in = s->deflate_block;
|
||||
s->zstream.avail_in = s->deflate_block_size - zs.avail_out;
|
||||
s->zstream.next_out = s->tmpblock;
|
||||
s->zstream.avail_out = s->block_size * 3;
|
||||
inflate(&s->zstream, Z_SYNC_FLUSH);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int flashsv_decode_block(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
@@ -164,11 +171,14 @@ static int flashsv_decode_block(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
int k;
|
||||
int ret = inflateReset(&s->zstream);
|
||||
if (ret != Z_OK) {
|
||||
//return -1;
|
||||
av_log(avctx, AV_LOG_ERROR, "Inflate reset error: %d\n", ret);
|
||||
return AVERROR_UNKNOWN;
|
||||
}
|
||||
if (s->zlibprime_curr || s->zlibprime_prev) {
|
||||
flashsv2_prime(s, s->blocks[blk_idx].pos, s->blocks[blk_idx].size,
|
||||
ret = flashsv2_prime(s, s->blocks[blk_idx].pos, s->blocks[blk_idx].size,
|
||||
s->blocks[blk_idx].unp_size);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
s->zstream.next_in = avpkt->data + get_bits_count(gb) / 8;
|
||||
s->zstream.avail_in = block_size;
|
||||
|
@@ -113,13 +113,13 @@ static int fraps2_decode_plane(FrapsContext *s, uint8_t *dst, int stride, int w,
|
||||
if(j) dst[i] += dst[i - stride];
|
||||
else if(Uoff) dst[i] += 0x80;
|
||||
if (get_bits_left(&gb) < 0) {
|
||||
free_vlc(&vlc);
|
||||
ff_free_vlc(&vlc);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
dst += stride;
|
||||
}
|
||||
free_vlc(&vlc);
|
||||
ff_free_vlc(&vlc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -174,7 +174,7 @@ static void g722_encode_trellis(G722Context *c, int trellis,
|
||||
for (i = 0; i < 2; i++) {
|
||||
nodes[i] = c->nodep_buf[i];
|
||||
nodes_next[i] = c->nodep_buf[i] + frontier;
|
||||
memset(c->nodep_buf[i], 0, 2 * frontier * sizeof(*c->nodep_buf));
|
||||
memset(c->nodep_buf[i], 0, 2 * frontier * sizeof(*c->nodep_buf[i]));
|
||||
nodes[i][0] = c->node_buf[i] + frontier;
|
||||
nodes[i][0]->ssd = 0;
|
||||
nodes[i][0]->path = 0;
|
||||
|
@@ -373,19 +373,19 @@ static inline void align_get_bits(GetBitContext *s)
|
||||
bits, bits_wrap, bits_size, \
|
||||
codes, codes_wrap, codes_size, \
|
||||
flags) \
|
||||
init_vlc_sparse(vlc, nb_bits, nb_codes, \
|
||||
bits, bits_wrap, bits_size, \
|
||||
codes, codes_wrap, codes_size, \
|
||||
NULL, 0, 0, flags)
|
||||
ff_init_vlc_sparse(vlc, nb_bits, nb_codes, \
|
||||
bits, bits_wrap, bits_size, \
|
||||
codes, codes_wrap, codes_size, \
|
||||
NULL, 0, 0, flags)
|
||||
|
||||
int init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes,
|
||||
int ff_init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes,
|
||||
const void *bits, int bits_wrap, int bits_size,
|
||||
const void *codes, int codes_wrap, int codes_size,
|
||||
const void *symbols, int symbols_wrap, int symbols_size,
|
||||
int flags);
|
||||
#define INIT_VLC_LE 2
|
||||
#define INIT_VLC_USE_NEW_STATIC 4
|
||||
void free_vlc(VLC *vlc);
|
||||
void ff_free_vlc(VLC *vlc);
|
||||
|
||||
#define INIT_VLC_STATIC(vlc, bits, a,b,c,d,e,f,g, static_size) do { \
|
||||
static VLC_TYPE table[static_size][2]; \
|
||||
|
@@ -66,7 +66,7 @@ static av_cold void h261_decode_init_vlc(H261Context *h){
|
||||
INIT_VLC_STATIC(&h261_cbp_vlc, H261_CBP_VLC_BITS, 63,
|
||||
&h261_cbp_tab[0][1], 2, 1,
|
||||
&h261_cbp_tab[0][0], 2, 1, 512);
|
||||
init_rl(&h261_rl_tcoeff, ff_h261_rl_table_store);
|
||||
ff_init_rl(&h261_rl_tcoeff, ff_h261_rl_table_store);
|
||||
INIT_VLC_RL(h261_rl_tcoeff, 552);
|
||||
}
|
||||
}
|
||||
@@ -286,6 +286,10 @@ static int h261_decode_mb(H261Context *h){
|
||||
|
||||
// Read mtype
|
||||
h->mtype = get_vlc2(&s->gb, h261_mtype_vlc.table, H261_MTYPE_VLC_BITS, 2);
|
||||
if (h->mtype < 0) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "illegal mtype %d\n", h->mtype);
|
||||
return SLICE_ERROR;
|
||||
}
|
||||
h->mtype = h261_mtype_map[h->mtype];
|
||||
|
||||
// Read mquant
|
||||
|
@@ -240,7 +240,7 @@ void ff_h261_encode_init(MpegEncContext *s){
|
||||
|
||||
if (!done) {
|
||||
done = 1;
|
||||
init_rl(&h261_rl_tcoeff, ff_h261_rl_table_store);
|
||||
ff_init_rl(&h261_rl_tcoeff, ff_h261_rl_table_store);
|
||||
}
|
||||
|
||||
s->min_qcoeff= -127;
|
||||
|
@@ -98,7 +98,7 @@ void ff_h263_update_motion_val(MpegEncContext * s){
|
||||
}
|
||||
}
|
||||
|
||||
int h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr)
|
||||
int ff_h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr)
|
||||
{
|
||||
int x, y, wrap, a, c, pred_dc;
|
||||
int16_t *dc_val;
|
||||
@@ -226,7 +226,7 @@ void ff_h263_loop_filter(MpegEncContext * s){
|
||||
}
|
||||
}
|
||||
|
||||
void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n)
|
||||
void ff_h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n)
|
||||
{
|
||||
int x, y, wrap, a, c, pred_dc, scale, i;
|
||||
int16_t *dc_val, *ac_val, *ac_val1;
|
||||
@@ -313,8 +313,8 @@ void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n)
|
||||
ac_val1[8 + i] = block[s->dsp.idct_permutation[i ]];
|
||||
}
|
||||
|
||||
int16_t *h263_pred_motion(MpegEncContext * s, int block, int dir,
|
||||
int *px, int *py)
|
||||
int16_t *ff_h263_pred_motion(MpegEncContext * s, int block, int dir,
|
||||
int *px, int *py)
|
||||
{
|
||||
int wrap;
|
||||
int16_t *A, *B, *C, (*mot_val)[2];
|
||||
|
@@ -38,16 +38,16 @@
|
||||
extern const AVRational ff_h263_pixel_aspect[16];
|
||||
extern const uint8_t ff_h263_cbpy_tab[16][2];
|
||||
|
||||
extern const uint8_t cbpc_b_tab[4][2];
|
||||
extern const uint8_t ff_cbpc_b_tab[4][2];
|
||||
|
||||
extern const uint8_t mvtab[33][2];
|
||||
extern const uint8_t ff_mvtab[33][2];
|
||||
|
||||
extern const uint8_t ff_h263_intra_MCBPC_code[9];
|
||||
extern const uint8_t ff_h263_intra_MCBPC_bits[9];
|
||||
|
||||
extern const uint8_t ff_h263_inter_MCBPC_code[28];
|
||||
extern const uint8_t ff_h263_inter_MCBPC_bits[28];
|
||||
extern const uint8_t h263_mbtype_b_tab[15][2];
|
||||
extern const uint8_t ff_h263_mbtype_b_tab[15][2];
|
||||
|
||||
extern VLC ff_h263_intra_MCBPC_vlc;
|
||||
extern VLC ff_h263_inter_MCBPC_vlc;
|
||||
@@ -55,41 +55,41 @@ extern VLC ff_h263_cbpy_vlc;
|
||||
|
||||
extern RLTable ff_h263_rl_inter;
|
||||
|
||||
extern RLTable rl_intra_aic;
|
||||
extern RLTable ff_rl_intra_aic;
|
||||
|
||||
extern const uint16_t h263_format[8][2];
|
||||
extern const uint8_t modified_quant_tab[2][32];
|
||||
extern const uint16_t ff_h263_format[8][2];
|
||||
extern const uint8_t ff_modified_quant_tab[2][32];
|
||||
extern const uint16_t ff_mba_max[6];
|
||||
extern const uint8_t ff_mba_length[7];
|
||||
|
||||
extern uint8_t ff_h263_static_rl_table_store[2][2][2*MAX_RUN + MAX_LEVEL + 3];
|
||||
|
||||
|
||||
int h263_decode_motion(MpegEncContext * s, int pred, int f_code);
|
||||
int ff_h263_decode_motion(MpegEncContext * s, int pred, int f_code);
|
||||
av_const int ff_h263_aspect_to_info(AVRational aspect);
|
||||
int ff_h263_decode_init(AVCodecContext *avctx);
|
||||
int ff_h263_decode_frame(AVCodecContext *avctx,
|
||||
void *data, int *data_size,
|
||||
AVPacket *avpkt);
|
||||
int ff_h263_decode_end(AVCodecContext *avctx);
|
||||
void h263_encode_mb(MpegEncContext *s,
|
||||
DCTELEM block[6][64],
|
||||
int motion_x, int motion_y);
|
||||
void h263_encode_picture_header(MpegEncContext *s, int picture_number);
|
||||
void h263_encode_gob_header(MpegEncContext * s, int mb_line);
|
||||
int16_t *h263_pred_motion(MpegEncContext * s, int block, int dir,
|
||||
int *px, int *py);
|
||||
void h263_encode_init(MpegEncContext *s);
|
||||
void h263_decode_init_vlc(MpegEncContext *s);
|
||||
int h263_decode_picture_header(MpegEncContext *s);
|
||||
void ff_h263_encode_mb(MpegEncContext *s,
|
||||
DCTELEM block[6][64],
|
||||
int motion_x, int motion_y);
|
||||
void ff_h263_encode_picture_header(MpegEncContext *s, int picture_number);
|
||||
void ff_h263_encode_gob_header(MpegEncContext * s, int mb_line);
|
||||
int16_t *ff_h263_pred_motion(MpegEncContext * s, int block, int dir,
|
||||
int *px, int *py);
|
||||
void ff_h263_encode_init(MpegEncContext *s);
|
||||
void ff_h263_decode_init_vlc(MpegEncContext *s);
|
||||
int ff_h263_decode_picture_header(MpegEncContext *s);
|
||||
int ff_h263_decode_gob_header(MpegEncContext *s);
|
||||
void ff_h263_update_motion_val(MpegEncContext * s);
|
||||
void ff_h263_loop_filter(MpegEncContext * s);
|
||||
int ff_h263_decode_mba(MpegEncContext *s);
|
||||
void ff_h263_encode_mba(MpegEncContext *s);
|
||||
void ff_init_qscale_tab(MpegEncContext *s);
|
||||
int h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr);
|
||||
void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n);
|
||||
int ff_h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr);
|
||||
void ff_h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n);
|
||||
|
||||
|
||||
/**
|
||||
@@ -119,7 +119,7 @@ static inline int h263_get_motion_length(MpegEncContext * s, int val, int f_code
|
||||
int l, bit_size, code;
|
||||
|
||||
if (val == 0) {
|
||||
return mvtab[0][1];
|
||||
return ff_mvtab[0][1];
|
||||
} else {
|
||||
bit_size = f_code - 1;
|
||||
/* modulo encoding */
|
||||
@@ -128,7 +128,7 @@ static inline int h263_get_motion_length(MpegEncContext * s, int val, int f_code
|
||||
val--;
|
||||
code = (val >> bit_size) + 1;
|
||||
|
||||
return mvtab[code][1] + 1 + bit_size;
|
||||
return ff_mvtab[code][1] + 1 + bit_size;
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -57,7 +57,7 @@ const uint8_t ff_h263_inter_MCBPC_bits[28] = {
|
||||
11, 13, 13, 13,/* inter4Q*/
|
||||
};
|
||||
|
||||
const uint8_t h263_mbtype_b_tab[15][2] = {
|
||||
const uint8_t ff_h263_mbtype_b_tab[15][2] = {
|
||||
{1, 1},
|
||||
{3, 3},
|
||||
{1, 5},
|
||||
@@ -75,7 +75,7 @@ const uint8_t h263_mbtype_b_tab[15][2] = {
|
||||
{1, 8},
|
||||
};
|
||||
|
||||
const uint8_t cbpc_b_tab[4][2] = {
|
||||
const uint8_t ff_cbpc_b_tab[4][2] = {
|
||||
{0, 1},
|
||||
{2, 2},
|
||||
{7, 3},
|
||||
@@ -88,7 +88,7 @@ const uint8_t ff_h263_cbpy_tab[16][2] =
|
||||
{2,5}, {3,6}, {5,4}, {10,4}, {4,4}, {8,4}, {6,4}, {3,2}
|
||||
};
|
||||
|
||||
const uint8_t mvtab[33][2] =
|
||||
const uint8_t ff_mvtab[33][2] =
|
||||
{
|
||||
{1,1}, {1,2}, {1,3}, {1,4}, {3,6}, {5,7}, {4,7}, {3,7},
|
||||
{11,9}, {10,9}, {9,9}, {17,10}, {16,10}, {15,10}, {14,10}, {13,10},
|
||||
@@ -98,7 +98,7 @@ const uint8_t mvtab[33][2] =
|
||||
};
|
||||
|
||||
/* third non intra table */
|
||||
const uint16_t inter_vlc[103][2] = {
|
||||
const uint16_t ff_inter_vlc[103][2] = {
|
||||
{ 0x2, 2 },{ 0xf, 4 },{ 0x15, 6 },{ 0x17, 7 },
|
||||
{ 0x1f, 8 },{ 0x25, 9 },{ 0x24, 9 },{ 0x21, 10 },
|
||||
{ 0x20, 10 },{ 0x7, 11 },{ 0x6, 11 },{ 0x20, 11 },
|
||||
@@ -127,7 +127,7 @@ const uint16_t inter_vlc[103][2] = {
|
||||
{ 0x5e, 12 },{ 0x5f, 12 },{ 0x3, 7 },
|
||||
};
|
||||
|
||||
const int8_t inter_level[102] = {
|
||||
const int8_t ff_inter_level[102] = {
|
||||
1, 2, 3, 4, 5, 6, 7, 8,
|
||||
9, 10, 11, 12, 1, 2, 3, 4,
|
||||
5, 6, 1, 2, 3, 4, 1, 2,
|
||||
@@ -143,7 +143,7 @@ const int8_t inter_level[102] = {
|
||||
1, 1, 1, 1, 1, 1,
|
||||
};
|
||||
|
||||
const int8_t inter_run[102] = {
|
||||
const int8_t ff_inter_run[102] = {
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 1, 1, 1, 1,
|
||||
1, 1, 2, 2, 2, 2, 3, 3,
|
||||
@@ -162,9 +162,9 @@ const int8_t inter_run[102] = {
|
||||
RLTable ff_h263_rl_inter = {
|
||||
102,
|
||||
58,
|
||||
inter_vlc,
|
||||
inter_run,
|
||||
inter_level,
|
||||
ff_inter_vlc,
|
||||
ff_inter_run,
|
||||
ff_inter_level,
|
||||
};
|
||||
|
||||
static const uint16_t intra_vlc_aic[103][2] = {
|
||||
@@ -228,7 +228,7 @@ static const int8_t intra_level_aic[102] = {
|
||||
1, 1, 1, 1, 1, 1,
|
||||
};
|
||||
|
||||
RLTable rl_intra_aic = {
|
||||
RLTable ff_rl_intra_aic = {
|
||||
102,
|
||||
58,
|
||||
intra_vlc_aic,
|
||||
@@ -236,7 +236,7 @@ RLTable rl_intra_aic = {
|
||||
intra_level_aic,
|
||||
};
|
||||
|
||||
const uint16_t h263_format[8][2] = {
|
||||
const uint16_t ff_h263_format[8][2] = {
|
||||
{ 0, 0 },
|
||||
{ 128, 96 },
|
||||
{ 176, 144 },
|
||||
@@ -250,7 +250,7 @@ const uint8_t ff_aic_dc_scale_table[32]={
|
||||
0, 2, 4, 6, 8,10,12,14,16,18,20,22,24,26,28,30,32,34,36,38,40,42,44,46,48,50,52,54,56,58,60,62
|
||||
};
|
||||
|
||||
const uint8_t modified_quant_tab[2][32]={
|
||||
const uint8_t ff_modified_quant_tab[2][32]={
|
||||
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
|
||||
{
|
||||
0, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9,10,11,12,13,14,15,16,17,18,18,19,20,21,22,23,24,25,26,27,28
|
||||
|
@@ -115,7 +115,7 @@ av_cold int ff_h263_decode_init(AVCodecContext *avctx)
|
||||
if (MPV_common_init(s) < 0)
|
||||
return -1;
|
||||
|
||||
h263_decode_init_vlc(s);
|
||||
ff_h263_decode_init_vlc(s);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -435,7 +435,7 @@ retry:
|
||||
} else if (CONFIG_FLV_DECODER && s->h263_flv) {
|
||||
ret = ff_flv_decode_picture_header(s);
|
||||
} else {
|
||||
ret = h263_decode_picture_header(s);
|
||||
ret = ff_h263_decode_picture_header(s);
|
||||
}
|
||||
|
||||
if(ret==FRAME_SKIPPED) return get_consumed_bytes(s, buf_size);
|
||||
@@ -444,6 +444,13 @@ retry:
|
||||
if (ret < 0){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "header damaged\n");
|
||||
return -1;
|
||||
} else if ((s->width != avctx->coded_width ||
|
||||
s->height != avctx->coded_height ||
|
||||
(s->width + 15) >> 4 != s->mb_width ||
|
||||
(s->height + 15) >> 4 != s->mb_height) &&
|
||||
(HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))) {
|
||||
av_log_missing_feature(s->avctx, "Width/height/bit depth/chroma idc changing with threads is", 0);
|
||||
return AVERROR_PATCHWELCOME; // width / height changed during parallelized decoding
|
||||
}
|
||||
|
||||
avctx->has_b_frames= !s->low_delay;
|
||||
@@ -571,7 +578,6 @@ retry:
|
||||
if (s->codec_id == CODEC_ID_MPEG4 && s->xvid_build>=0 && avctx->idct_algo == FF_IDCT_AUTO && (av_get_cpu_flags() & AV_CPU_FLAG_MMX)) {
|
||||
avctx->idct_algo= FF_IDCT_XVIDMMX;
|
||||
ff_dct_common_init(s);
|
||||
s->picture_number=0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@@ -2506,8 +2506,8 @@ static int field_end(H264Context *h, int in_setup){
|
||||
s->mb_y= 0;
|
||||
|
||||
if (!in_setup && !s->dropable)
|
||||
ff_thread_report_progress((AVFrame*)s->current_picture_ptr, (16*s->mb_height >> FIELD_PICTURE) - 1,
|
||||
s->picture_structure==PICT_BOTTOM_FIELD);
|
||||
ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX,
|
||||
s->picture_structure == PICT_BOTTOM_FIELD);
|
||||
|
||||
if (CONFIG_H264_VDPAU_DECODER && s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
|
||||
ff_vdpau_h264_set_reference_frames(s);
|
||||
@@ -2624,9 +2624,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
int num_ref_idx_active_override_flag;
|
||||
unsigned int slice_type, tmp, i, j;
|
||||
int default_ref_list_done = 0;
|
||||
int last_pic_structure;
|
||||
|
||||
s->dropable= h->nal_ref_idc == 0;
|
||||
int last_pic_structure, last_pic_dropable;
|
||||
|
||||
/* FIXME: 2tap qpel isn't implemented for high bit depth. */
|
||||
if((s->avctx->flags2 & CODEC_FLAG2_FAST) && !h->nal_ref_idc && !h->pixel_shift){
|
||||
@@ -2645,8 +2643,14 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
}
|
||||
|
||||
h0->current_slice = 0;
|
||||
if (!s0->first_field)
|
||||
s->current_picture_ptr= NULL;
|
||||
if (!s0->first_field) {
|
||||
if (s->current_picture_ptr && !s->dropable &&
|
||||
s->current_picture_ptr->owner2 == s) {
|
||||
ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX,
|
||||
s->picture_structure == PICT_BOTTOM_FIELD);
|
||||
}
|
||||
s->current_picture_ptr = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
slice_type= get_ue_golomb_31(&s->gb);
|
||||
@@ -2706,9 +2710,9 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
|| s->avctx->bits_per_raw_sample != h->sps.bit_depth_luma
|
||||
|| h->cur_chroma_format_idc != h->sps.chroma_format_idc
|
||||
|| av_cmp_q(h->sps.sar, s->avctx->sample_aspect_ratio))) {
|
||||
if(h != h0 || (s->avctx->active_thread_type & FF_THREAD_FRAME)) {
|
||||
if(h != h0 || (HAVE_THREADS && h->s.avctx->active_thread_type & FF_THREAD_FRAME)) {
|
||||
av_log_missing_feature(s->avctx, "Width/height/bit depth/chroma idc changing with threads is", 0);
|
||||
return -1; // width / height changed during parallelized decoding
|
||||
return AVERROR_PATCHWELCOME; // width / height changed during parallelized decoding
|
||||
}
|
||||
free_tables(h, 0);
|
||||
flush_dpb(s->avctx);
|
||||
@@ -2864,6 +2868,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
h->mb_mbaff = 0;
|
||||
h->mb_aff_frame = 0;
|
||||
last_pic_structure = s0->picture_structure;
|
||||
last_pic_dropable = s->dropable;
|
||||
s->dropable = h->nal_ref_idc == 0;
|
||||
if(h->sps.frame_mbs_only_flag){
|
||||
s->picture_structure= PICT_FRAME;
|
||||
}else{
|
||||
@@ -2880,10 +2886,22 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
}
|
||||
h->mb_field_decoding_flag= s->picture_structure != PICT_FRAME;
|
||||
|
||||
if(h0->current_slice == 0){
|
||||
// Shorten frame num gaps so we don't have to allocate reference frames just to throw them away
|
||||
if(h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0) {
|
||||
int unwrap_prev_frame_num = h->prev_frame_num, max_frame_num = 1<<h->sps.log2_max_frame_num;
|
||||
if (h0->current_slice != 0) {
|
||||
if (last_pic_structure != s->picture_structure ||
|
||||
last_pic_dropable != s->dropable) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR,
|
||||
"Changing field mode (%d -> %d) between slices is not allowed\n",
|
||||
last_pic_structure, s->picture_structure);
|
||||
s->picture_structure = last_pic_structure;
|
||||
s->dropable = last_pic_dropable;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
} else {
|
||||
/* Shorten frame num gaps so we don't have to allocate reference
|
||||
* frames just to throw them away */
|
||||
if (h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0) {
|
||||
int unwrap_prev_frame_num = h->prev_frame_num;
|
||||
int max_frame_num = 1 << h->sps.log2_max_frame_num;
|
||||
|
||||
if (unwrap_prev_frame_num > h->frame_num) unwrap_prev_frame_num -= max_frame_num;
|
||||
|
||||
@@ -2896,8 +2914,74 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
}
|
||||
}
|
||||
|
||||
while(h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0 &&
|
||||
h->frame_num != (h->prev_frame_num+1)%(1<<h->sps.log2_max_frame_num)){
|
||||
/* See if we have a decoded first field looking for a pair...
|
||||
* Here, we're using that to see if we should mark previously
|
||||
* decode frames as "finished".
|
||||
* We have to do that before the "dummy" in-between frame allocation,
|
||||
* since that can modify s->current_picture_ptr. */
|
||||
if (s0->first_field) {
|
||||
assert(s0->current_picture_ptr);
|
||||
assert(s0->current_picture_ptr->f.data[0]);
|
||||
assert(s0->current_picture_ptr->f.reference != DELAYED_PIC_REF);
|
||||
|
||||
/* Mark old field/frame as completed */
|
||||
if (!last_pic_dropable && s0->current_picture_ptr->owner2 == s0) {
|
||||
ff_thread_report_progress(&s0->current_picture_ptr->f, INT_MAX,
|
||||
last_pic_structure == PICT_BOTTOM_FIELD);
|
||||
}
|
||||
|
||||
/* figure out if we have a complementary field pair */
|
||||
if (!FIELD_PICTURE || s->picture_structure == last_pic_structure) {
|
||||
/* Previous field is unmatched. Don't display it, but let it
|
||||
* remain for reference if marked as such. */
|
||||
if (!last_pic_dropable && last_pic_structure != PICT_FRAME) {
|
||||
ff_thread_report_progress(&s0->current_picture_ptr->f, INT_MAX,
|
||||
last_pic_structure == PICT_TOP_FIELD);
|
||||
}
|
||||
} else {
|
||||
if (s0->current_picture_ptr->frame_num != h->frame_num) {
|
||||
/* This and previous field were reference, but had
|
||||
* different frame_nums. Consider this field first in
|
||||
* pair. Throw away previous field except for reference
|
||||
* purposes. */
|
||||
if (!last_pic_dropable && last_pic_structure != PICT_FRAME) {
|
||||
ff_thread_report_progress(&s0->current_picture_ptr->f, INT_MAX,
|
||||
last_pic_structure == PICT_TOP_FIELD);
|
||||
}
|
||||
} else {
|
||||
/* Second field in complementary pair */
|
||||
if (!((last_pic_structure == PICT_TOP_FIELD &&
|
||||
s->picture_structure == PICT_BOTTOM_FIELD) ||
|
||||
(last_pic_structure == PICT_BOTTOM_FIELD &&
|
||||
s->picture_structure == PICT_TOP_FIELD))) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"Invalid field mode combination %d/%d\n",
|
||||
last_pic_structure, s->picture_structure);
|
||||
s->picture_structure = last_pic_structure;
|
||||
s->dropable = last_pic_dropable;
|
||||
return AVERROR_INVALIDDATA;
|
||||
} else if (last_pic_dropable != s->dropable) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"Cannot combine reference and non-reference fields in the same frame\n");
|
||||
av_log_ask_for_sample(s->avctx, NULL);
|
||||
s->picture_structure = last_pic_structure;
|
||||
s->dropable = last_pic_dropable;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* Take ownership of this buffer. Note that if another thread owned
|
||||
* the first field of this buffer, we're not operating on that pointer,
|
||||
* so the original thread is still responsible for reporting progress
|
||||
* on that first field (or if that was us, we just did that above).
|
||||
* By taking ownership, we assign responsibility to ourselves to
|
||||
* report progress on the second field. */
|
||||
s0->current_picture_ptr->owner2 = s0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
while (h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0 &&
|
||||
h->frame_num != (h->prev_frame_num + 1) % (1 << h->sps.log2_max_frame_num)) {
|
||||
Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
|
||||
av_log(h->s.avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n", h->frame_num, h->prev_frame_num);
|
||||
if (ff_h264_frame_start(h) < 0)
|
||||
@@ -2928,7 +3012,9 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
}
|
||||
}
|
||||
|
||||
/* See if we have a decoded first field looking for a pair... */
|
||||
/* See if we have a decoded first field looking for a pair...
|
||||
* We're using that to see whether to continue decoding in that
|
||||
* frame, or to allocate a new one. */
|
||||
if (s0->first_field) {
|
||||
assert(s0->current_picture_ptr);
|
||||
assert(s0->current_picture_ptr->f.data[0]);
|
||||
@@ -2945,13 +3031,10 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
|
||||
} else {
|
||||
if (s0->current_picture_ptr->frame_num != h->frame_num) {
|
||||
/*
|
||||
* This and previous field had
|
||||
* different frame_nums. Consider this field first in
|
||||
* pair. Throw away previous field except for reference
|
||||
* purposes.
|
||||
*/
|
||||
s0->first_field = 1;
|
||||
/* This and the previous field had different frame_nums.
|
||||
* Consider this field first in pair. Throw away previous
|
||||
* one except for reference purposes. */
|
||||
s0->first_field = 1;
|
||||
s0->current_picture_ptr = NULL;
|
||||
|
||||
} else {
|
||||
@@ -3811,7 +3894,11 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
break;
|
||||
}
|
||||
|
||||
if(buf_index+3 >= buf_size) break;
|
||||
|
||||
if (buf_index + 3 >= buf_size) {
|
||||
buf_index = buf_size;
|
||||
break;
|
||||
}
|
||||
|
||||
buf_index+=3;
|
||||
if(buf_index >= next_avc) continue;
|
||||
@@ -3820,8 +3907,9 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
hx = h->thread_context[context_count];
|
||||
|
||||
ptr= ff_h264_decode_nal(hx, buf + buf_index, &dst_length, &consumed, next_avc - buf_index);
|
||||
if (ptr==NULL || dst_length < 0){
|
||||
return -1;
|
||||
if (ptr == NULL || dst_length < 0) {
|
||||
buf_index = -1;
|
||||
goto end;
|
||||
}
|
||||
i= buf_index + consumed;
|
||||
if((s->workaround_bugs & FF_BUG_AUTODETECT) && i+3<next_avc &&
|
||||
@@ -3873,7 +3961,8 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
case NAL_IDR_SLICE:
|
||||
if (h->nal_unit_type != NAL_IDR_SLICE) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "Invalid mix of idr and non-idr slices\n");
|
||||
return -1;
|
||||
buf_index = -1;
|
||||
goto end;
|
||||
}
|
||||
idr(h); // FIXME ensure we don't lose some frames if there is reordering
|
||||
case NAL_SLICE:
|
||||
@@ -4017,6 +4106,15 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
}
|
||||
if(context_count)
|
||||
execute_decode_slices(h, context_count);
|
||||
|
||||
end:
|
||||
/* clean up */
|
||||
if (s->current_picture_ptr && s->current_picture_ptr->owner2 == s &&
|
||||
!s->dropable) {
|
||||
ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX,
|
||||
s->picture_structure == PICT_BOTTOM_FIELD);
|
||||
}
|
||||
|
||||
return buf_index;
|
||||
}
|
||||
|
||||
|
@@ -351,9 +351,9 @@ int ff_h264_decode_seq_parameter_set(H264Context *h){
|
||||
if (sps->chroma_format_idc > 3U) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "chroma_format_idc %d is illegal\n", sps->chroma_format_idc);
|
||||
goto fail;
|
||||
}
|
||||
if(sps->chroma_format_idc == 3)
|
||||
} else if(sps->chroma_format_idc == 3) {
|
||||
sps->residual_color_transform_flag = get_bits1(&s->gb);
|
||||
}
|
||||
sps->bit_depth_luma = get_ue_golomb(&s->gb) + 8;
|
||||
sps->bit_depth_chroma = get_ue_golomb(&s->gb) + 8;
|
||||
if (sps->bit_depth_luma > 12U || sps->bit_depth_chroma > 12U) {
|
||||
@@ -515,6 +515,9 @@ int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length){
|
||||
if(pps_id >= MAX_PPS_COUNT) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "pps_id (%d) out of range\n", pps_id);
|
||||
return -1;
|
||||
} else if (h->sps.bit_depth_luma > 10) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "Unimplemented luma bit depth=%d (max=10)\n", h->sps.bit_depth_luma);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
pps= av_mallocz(sizeof(PPS));
|
||||
|
@@ -655,6 +655,8 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
|
||||
|
||||
if(err >= 0 && h->long_ref_count==0 && h->short_ref_count<=2 && h->pps.ref_count[0]<=1 + (s->picture_structure != PICT_FRAME) && s->current_picture_ptr->f.pict_type == AV_PICTURE_TYPE_I){
|
||||
s->current_picture_ptr->sync |= 1;
|
||||
if(!h->s.avctx->has_b_frames)
|
||||
h->sync = 2;
|
||||
}
|
||||
|
||||
return (h->s.avctx->err_recognition & AV_EF_EXPLODE) ? err : 0;
|
||||
|
@@ -61,7 +61,7 @@ static int build_huff_tree(VLC *vlc, Node *nodes, int head, int flags)
|
||||
int pos = 0;
|
||||
|
||||
get_tree_codes(bits, lens, xlat, nodes, head, 0, 0, &pos, no_zero_count);
|
||||
return init_vlc_sparse(vlc, 9, pos, lens, 2, 2, bits, 4, 4, xlat, 1, 1, 0);
|
||||
return ff_init_vlc_sparse(vlc, 9, pos, lens, 2, 2, bits, 4, 4, xlat, 1, 1, 0);
|
||||
}
|
||||
|
||||
|
||||
|
@@ -324,8 +324,8 @@ static void generate_joint_tables(HYuvContext *s){
|
||||
i++;
|
||||
}
|
||||
}
|
||||
free_vlc(&s->vlc[3+p]);
|
||||
init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
|
||||
ff_free_vlc(&s->vlc[3+p]);
|
||||
ff_init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
|
||||
}
|
||||
}else{
|
||||
uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
|
||||
@@ -365,7 +365,7 @@ static void generate_joint_tables(HYuvContext *s){
|
||||
}
|
||||
}
|
||||
}
|
||||
free_vlc(&s->vlc[3]);
|
||||
ff_free_vlc(&s->vlc[3]);
|
||||
init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
|
||||
}
|
||||
}
|
||||
@@ -382,7 +382,7 @@ static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length){
|
||||
if(generate_bits_table(s->bits[i], s->len[i])<0){
|
||||
return -1;
|
||||
}
|
||||
free_vlc(&s->vlc[i]);
|
||||
ff_free_vlc(&s->vlc[i]);
|
||||
init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
|
||||
}
|
||||
|
||||
@@ -414,7 +414,7 @@ static int read_old_huffman_tables(HYuvContext *s){
|
||||
memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
|
||||
|
||||
for(i=0; i<3; i++){
|
||||
free_vlc(&s->vlc[i]);
|
||||
ff_free_vlc(&s->vlc[i]);
|
||||
init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
|
||||
}
|
||||
|
||||
@@ -1255,7 +1255,7 @@ static av_cold int decode_end(AVCodecContext *avctx)
|
||||
av_freep(&s->bitstream_buffer);
|
||||
|
||||
for(i=0; i<6; i++){
|
||||
free_vlc(&s->vlc[i]);
|
||||
ff_free_vlc(&s->vlc[i]);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@@ -191,7 +191,13 @@ static int extract_header(AVCodecContext *const avctx,
|
||||
const uint8_t *buf;
|
||||
unsigned buf_size;
|
||||
IffContext *s = avctx->priv_data;
|
||||
int palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
|
||||
int palette_size;
|
||||
|
||||
if (avctx->extradata_size < 2) {
|
||||
av_log(avctx, AV_LOG_ERROR, "not enough extradata\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
|
||||
|
||||
if (avpkt) {
|
||||
int image_size;
|
||||
@@ -207,8 +213,6 @@ static int extract_header(AVCodecContext *const avctx,
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
} else {
|
||||
if (avctx->extradata_size < 2)
|
||||
return AVERROR_INVALIDDATA;
|
||||
buf = avctx->extradata;
|
||||
buf_size = bytestream_get_be16(&buf);
|
||||
if (buf_size <= 1 || palette_size < 0) {
|
||||
@@ -312,7 +316,12 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
int err;
|
||||
|
||||
if (avctx->bits_per_coded_sample <= 8) {
|
||||
int palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
|
||||
int palette_size;
|
||||
|
||||
if (avctx->extradata_size >= 2)
|
||||
palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
|
||||
else
|
||||
palette_size = 0;
|
||||
avctx->pix_fmt = (avctx->bits_per_coded_sample < 8) ||
|
||||
(avctx->extradata_size >= 2 && palette_size) ? PIX_FMT_PAL8 : PIX_FMT_GRAY8;
|
||||
} else if (avctx->bits_per_coded_sample <= 32) {
|
||||
@@ -473,7 +482,7 @@ static int decode_frame_ilbm(AVCodecContext *avctx,
|
||||
} else if ((res = avctx->get_buffer(avctx, &s->frame)) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return res;
|
||||
} else if (avctx->bits_per_coded_sample <= 8 && avctx->pix_fmt != PIX_FMT_GRAY8) {
|
||||
} else if (avctx->bits_per_coded_sample <= 8 && avctx->pix_fmt == PIX_FMT_PAL8) {
|
||||
if ((res = ff_cmap_read_palette(avctx, (uint32_t*)s->frame.data[1])) < 0)
|
||||
return res;
|
||||
}
|
||||
|
@@ -419,6 +419,9 @@ static int decode_cell_data(Cell *cell, uint8_t *block, uint8_t *ref_block,
|
||||
blk_row_offset = (row_offset << (2 + v_zoom)) - (cell->width << 2);
|
||||
line_offset = v_zoom ? row_offset : 0;
|
||||
|
||||
if (cell->height & v_zoom || cell->width & h_zoom)
|
||||
return IV3_BAD_DATA;
|
||||
|
||||
for (y = 0; y < cell->height; is_first_row = 0, y += 1 + v_zoom) {
|
||||
for (x = 0; x < cell->width; x += 1 + h_zoom) {
|
||||
ref = ref_block;
|
||||
@@ -898,6 +901,14 @@ static int decode_frame_headers(Indeo3DecodeContext *ctx, AVCodecContext *avctx,
|
||||
|
||||
av_dlog(avctx, "Frame dimensions changed!\n");
|
||||
|
||||
if (width < 16 || width > 640 ||
|
||||
height < 16 || height > 480 ||
|
||||
width & 3 || height & 3) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Invalid picture dimensions: %d x %d!\n", width, height);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
ctx->width = width;
|
||||
ctx->height = height;
|
||||
|
||||
|
@@ -35,9 +35,6 @@
|
||||
#include "ivi_common.h"
|
||||
#include "indeo4data.h"
|
||||
|
||||
#define IVI4_STREAM_ANALYSER 0
|
||||
#define IVI4_DEBUG_CHECKSUM 0
|
||||
|
||||
/**
|
||||
* Indeo 4 frame types.
|
||||
*/
|
||||
@@ -54,46 +51,6 @@ enum {
|
||||
#define IVI4_PIC_SIZE_ESC 7
|
||||
|
||||
|
||||
typedef struct {
|
||||
GetBitContext gb;
|
||||
AVFrame frame;
|
||||
RVMapDesc rvmap_tabs[9]; ///< local corrected copy of the static rvmap tables
|
||||
|
||||
uint32_t frame_num;
|
||||
int frame_type;
|
||||
int prev_frame_type; ///< frame type of the previous frame
|
||||
uint32_t data_size; ///< size of the frame data in bytes from picture header
|
||||
int is_scalable;
|
||||
int transp_status; ///< transparency mode status: 1 - enabled
|
||||
|
||||
IVIPicConfig pic_conf;
|
||||
IVIPlaneDesc planes[3]; ///< color planes
|
||||
|
||||
int buf_switch; ///< used to switch between three buffers
|
||||
int dst_buf; ///< buffer index for the currently decoded frame
|
||||
int ref_buf; ///< inter frame reference buffer index
|
||||
|
||||
IVIHuffTab mb_vlc; ///< current macroblock table descriptor
|
||||
IVIHuffTab blk_vlc; ///< current block table descriptor
|
||||
|
||||
uint16_t checksum; ///< frame checksum
|
||||
|
||||
uint8_t rvmap_sel;
|
||||
uint8_t in_imf;
|
||||
uint8_t in_q; ///< flag for explicitly stored quantiser delta
|
||||
uint8_t pic_glob_quant;
|
||||
uint8_t unknown1;
|
||||
|
||||
#if IVI4_STREAM_ANALYSER
|
||||
uint8_t has_b_frames;
|
||||
uint8_t has_transp;
|
||||
uint8_t uses_tiling;
|
||||
uint8_t uses_haar;
|
||||
uint8_t uses_fullpel;
|
||||
#endif
|
||||
} IVI4DecContext;
|
||||
|
||||
|
||||
static const struct {
|
||||
InvTransformPtr *inv_trans;
|
||||
DCTransformPtr *dc_trans;
|
||||
@@ -158,7 +115,7 @@ static inline int scale_tile_size(int def_size, int size_factor)
|
||||
* @param[in] avctx pointer to the AVCodecContext
|
||||
* @return result code: 0 = OK, negative number = error
|
||||
*/
|
||||
static int decode_pic_hdr(IVI4DecContext *ctx, AVCodecContext *avctx)
|
||||
static int decode_pic_hdr(IVI45DecContext *ctx, AVCodecContext *avctx)
|
||||
{
|
||||
int pic_size_indx, i, p;
|
||||
IVIPicConfig pic_conf;
|
||||
@@ -322,7 +279,7 @@ static int decode_pic_hdr(IVI4DecContext *ctx, AVCodecContext *avctx)
|
||||
* @param[in] avctx pointer to the AVCodecContext
|
||||
* @return result code: 0 = OK, negative number = error
|
||||
*/
|
||||
static int decode_band_hdr(IVI4DecContext *ctx, IVIBandDesc *band,
|
||||
static int decode_band_hdr(IVI45DecContext *ctx, IVIBandDesc *band,
|
||||
AVCodecContext *avctx)
|
||||
{
|
||||
int plane, band_num, indx, transform_id, scan_indx;
|
||||
@@ -372,7 +329,8 @@ static int decode_band_hdr(IVI4DecContext *ctx, IVIBandDesc *band,
|
||||
|
||||
if (!get_bits1(&ctx->gb) || ctx->frame_type == FRAMETYPE_INTRA) {
|
||||
transform_id = get_bits(&ctx->gb, 5);
|
||||
if (!transforms[transform_id].inv_trans) {
|
||||
if (transform_id >= FF_ARRAY_ELEMS(transforms) ||
|
||||
!transforms[transform_id].inv_trans) {
|
||||
av_log_ask_for_sample(avctx, "Unimplemented transform: %d!\n", transform_id);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
@@ -457,7 +415,7 @@ static int decode_band_hdr(IVI4DecContext *ctx, IVIBandDesc *band,
|
||||
* @param[in] avctx pointer to the AVCodecContext
|
||||
* @return result code: 0 = OK, negative number = error
|
||||
*/
|
||||
static int decode_mb_info(IVI4DecContext *ctx, IVIBandDesc *band,
|
||||
static int decode_mb_info(IVI45DecContext *ctx, IVIBandDesc *band,
|
||||
IVITile *tile, AVCodecContext *avctx)
|
||||
{
|
||||
int x, y, mv_x, mv_y, mv_delta, offs, mb_offset, blks_per_mb,
|
||||
@@ -476,6 +434,11 @@ static int decode_mb_info(IVI4DecContext *ctx, IVIBandDesc *band,
|
||||
mv_scale = (ctx->planes[0].bands[0].mb_size >> 3) - (band->mb_size >> 3);
|
||||
mv_x = mv_y = 0;
|
||||
|
||||
if (((tile->width + band->mb_size-1)/band->mb_size) * ((tile->height + band->mb_size-1)/band->mb_size) != tile->num_MBs) {
|
||||
av_log(avctx, AV_LOG_ERROR, "num_MBs mismatch %d %d %d %d\n", tile->width, tile->height, band->mb_size, tile->num_MBs);
|
||||
return -1;
|
||||
}
|
||||
|
||||
for (y = tile->ypos; y < tile->ypos + tile->height; y += band->mb_size) {
|
||||
mb_offset = offs;
|
||||
|
||||
@@ -572,126 +535,12 @@ static int decode_mb_info(IVI4DecContext *ctx, IVIBandDesc *band,
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Decode an Indeo 4 band.
|
||||
*
|
||||
* @param[in,out] ctx pointer to the decoder context
|
||||
* @param[in,out] band pointer to the band descriptor
|
||||
* @param[in] avctx pointer to the AVCodecContext
|
||||
* @return result code: 0 = OK, negative number = error
|
||||
*/
|
||||
static int decode_band(IVI4DecContext *ctx, int plane_num,
|
||||
IVIBandDesc *band, AVCodecContext *avctx)
|
||||
{
|
||||
int result, i, t, pos, idx1, idx2;
|
||||
IVITile *tile;
|
||||
|
||||
band->buf = band->bufs[ctx->dst_buf];
|
||||
band->ref_buf = band->bufs[ctx->ref_buf];
|
||||
|
||||
result = decode_band_hdr(ctx, band, avctx);
|
||||
if (result) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Error decoding band header\n");
|
||||
return result;
|
||||
}
|
||||
|
||||
if (band->is_empty) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Empty band encountered!\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
band->rv_map = &ctx->rvmap_tabs[band->rvmap_sel];
|
||||
|
||||
/* apply corrections to the selected rvmap table if present */
|
||||
for (i = 0; i < band->num_corr; i++) {
|
||||
idx1 = band->corr[i * 2];
|
||||
idx2 = band->corr[i * 2 + 1];
|
||||
FFSWAP(uint8_t, band->rv_map->runtab[idx1], band->rv_map->runtab[idx2]);
|
||||
FFSWAP(int16_t, band->rv_map->valtab[idx1], band->rv_map->valtab[idx2]);
|
||||
}
|
||||
|
||||
pos = get_bits_count(&ctx->gb);
|
||||
|
||||
for (t = 0; t < band->num_tiles; t++) {
|
||||
tile = &band->tiles[t];
|
||||
|
||||
tile->is_empty = get_bits1(&ctx->gb);
|
||||
if (tile->is_empty) {
|
||||
ff_ivi_process_empty_tile(avctx, band, tile,
|
||||
(ctx->planes[0].bands[0].mb_size >> 3) - (band->mb_size >> 3));
|
||||
av_dlog(avctx, "Empty tile encountered!\n");
|
||||
} else {
|
||||
tile->data_size = ff_ivi_dec_tile_data_size(&ctx->gb);
|
||||
if (!tile->data_size) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Tile data size is zero!\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
result = decode_mb_info(ctx, band, tile, avctx);
|
||||
if (result < 0)
|
||||
break;
|
||||
|
||||
result = ff_ivi_decode_blocks(&ctx->gb, band, tile);
|
||||
if (result < 0 || ((get_bits_count(&ctx->gb) - pos) >> 3) != tile->data_size) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Corrupted tile data encountered!\n");
|
||||
break;
|
||||
}
|
||||
|
||||
pos += tile->data_size << 3; // skip to next tile
|
||||
}
|
||||
}
|
||||
|
||||
/* restore the selected rvmap table by applying its corrections in reverse order */
|
||||
for (i = band->num_corr - 1; i >= 0; i--) {
|
||||
idx1 = band->corr[i * 2];
|
||||
idx2 = band->corr[i * 2 + 1];
|
||||
FFSWAP(uint8_t, band->rv_map->runtab[idx1], band->rv_map->runtab[idx2]);
|
||||
FFSWAP(int16_t, band->rv_map->valtab[idx1], band->rv_map->valtab[idx2]);
|
||||
}
|
||||
|
||||
#if defined(DEBUG) && IVI4_DEBUG_CHECKSUM
|
||||
if (band->checksum_present) {
|
||||
uint16_t chksum = ivi_calc_band_checksum(band);
|
||||
if (chksum != band->checksum) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Band checksum mismatch! Plane %d, band %d, received: %x, calculated: %x\n",
|
||||
band->plane, band->band_num, band->checksum, chksum);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
align_get_bits(&ctx->gb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static av_cold int decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
IVI4DecContext *ctx = avctx->priv_data;
|
||||
|
||||
ff_ivi_init_static_vlc();
|
||||
|
||||
/* copy rvmap tables in our context so we can apply changes to them */
|
||||
memcpy(ctx->rvmap_tabs, ff_ivi_rvmap_tabs, sizeof(ff_ivi_rvmap_tabs));
|
||||
|
||||
/* Force allocation of the internal buffers */
|
||||
/* during picture header decoding. */
|
||||
ctx->pic_conf.pic_width = 0;
|
||||
ctx->pic_conf.pic_height = 0;
|
||||
|
||||
avctx->pix_fmt = PIX_FMT_YUV410P;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Rearrange decoding and reference buffers.
|
||||
*
|
||||
* @param[in,out] ctx pointer to the decoder context
|
||||
*/
|
||||
static void switch_buffers(IVI4DecContext *ctx)
|
||||
static void switch_buffers(IVI45DecContext *ctx)
|
||||
{
|
||||
switch (ctx->prev_frame_type) {
|
||||
case FRAMETYPE_INTRA:
|
||||
@@ -720,95 +569,33 @@ static void switch_buffers(IVI4DecContext *ctx)
|
||||
}
|
||||
|
||||
|
||||
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
||||
AVPacket *avpkt)
|
||||
static int is_nonnull_frame(IVI45DecContext *ctx)
|
||||
{
|
||||
IVI4DecContext *ctx = avctx->priv_data;
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
int result, p, b;
|
||||
|
||||
init_get_bits(&ctx->gb, buf, buf_size * 8);
|
||||
|
||||
result = decode_pic_hdr(ctx, avctx);
|
||||
if (result) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Error decoding picture header\n");
|
||||
return result;
|
||||
}
|
||||
|
||||
switch_buffers(ctx);
|
||||
|
||||
if (ctx->frame_type < FRAMETYPE_NULL_FIRST) {
|
||||
for (p = 0; p < 3; p++) {
|
||||
for (b = 0; b < ctx->planes[p].num_bands; b++) {
|
||||
result = decode_band(ctx, p, &ctx->planes[p].bands[b], avctx);
|
||||
if (result) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Error decoding band: %d, plane: %d\n", b, p);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* If the bidirectional mode is enabled, next I and the following P frame will */
|
||||
/* be sent together. Unfortunately the approach below seems to be the only way */
|
||||
/* to handle the B-frames mode. That's exactly the same Intel decoders do. */
|
||||
if (ctx->frame_type == FRAMETYPE_INTRA) {
|
||||
while (get_bits(&ctx->gb, 8)); // skip version string
|
||||
skip_bits_long(&ctx->gb, 64); // skip padding, TODO: implement correct 8-bytes alignment
|
||||
if (get_bits_left(&ctx->gb) > 18 && show_bits(&ctx->gb, 18) == 0x3FFF8)
|
||||
av_log(avctx, AV_LOG_ERROR, "Buffer contains IP frames!\n");
|
||||
}
|
||||
|
||||
if (ctx->frame.data[0])
|
||||
avctx->release_buffer(avctx, &ctx->frame);
|
||||
|
||||
ctx->frame.reference = 0;
|
||||
if ((result = avctx->get_buffer(avctx, &ctx->frame)) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return result;
|
||||
}
|
||||
|
||||
if (ctx->is_scalable) {
|
||||
ff_ivi_recompose_haar(&ctx->planes[0], ctx->frame.data[0], ctx->frame.linesize[0], 4);
|
||||
} else {
|
||||
ff_ivi_output_plane(&ctx->planes[0], ctx->frame.data[0], ctx->frame.linesize[0]);
|
||||
}
|
||||
|
||||
ff_ivi_output_plane(&ctx->planes[2], ctx->frame.data[1], ctx->frame.linesize[1]);
|
||||
ff_ivi_output_plane(&ctx->planes[1], ctx->frame.data[2], ctx->frame.linesize[2]);
|
||||
|
||||
*data_size = sizeof(AVFrame);
|
||||
*(AVFrame*)data = ctx->frame;
|
||||
|
||||
return buf_size;
|
||||
return ctx->frame_type < FRAMETYPE_NULL_FIRST;
|
||||
}
|
||||
|
||||
|
||||
static av_cold int decode_close(AVCodecContext *avctx)
|
||||
static av_cold int decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
IVI4DecContext *ctx = avctx->priv_data;
|
||||
IVI45DecContext *ctx = avctx->priv_data;
|
||||
|
||||
ff_ivi_free_buffers(&ctx->planes[0]);
|
||||
ff_ivi_init_static_vlc();
|
||||
|
||||
if (ctx->frame.data[0])
|
||||
avctx->release_buffer(avctx, &ctx->frame);
|
||||
/* copy rvmap tables in our context so we can apply changes to them */
|
||||
memcpy(ctx->rvmap_tabs, ff_ivi_rvmap_tabs, sizeof(ff_ivi_rvmap_tabs));
|
||||
|
||||
#if IVI4_STREAM_ANALYSER
|
||||
if (ctx->is_scalable)
|
||||
av_log(avctx, AV_LOG_ERROR, "This video uses scalability mode!\n");
|
||||
if (ctx->uses_tiling)
|
||||
av_log(avctx, AV_LOG_ERROR, "This video uses local decoding!\n");
|
||||
if (ctx->has_b_frames)
|
||||
av_log(avctx, AV_LOG_ERROR, "This video contains B-frames!\n");
|
||||
if (ctx->has_transp)
|
||||
av_log(avctx, AV_LOG_ERROR, "Transparency mode is enabled!\n");
|
||||
if (ctx->uses_haar)
|
||||
av_log(avctx, AV_LOG_ERROR, "This video uses Haar transform!\n");
|
||||
if (ctx->uses_fullpel)
|
||||
av_log(avctx, AV_LOG_ERROR, "This video uses fullpel motion vectors!\n");
|
||||
#endif
|
||||
/* Force allocation of the internal buffers */
|
||||
/* during picture header decoding. */
|
||||
ctx->pic_conf.pic_width = 0;
|
||||
ctx->pic_conf.pic_height = 0;
|
||||
|
||||
avctx->pix_fmt = PIX_FMT_YUV410P;
|
||||
|
||||
ctx->decode_pic_hdr = decode_pic_hdr;
|
||||
ctx->decode_band_hdr = decode_band_hdr;
|
||||
ctx->decode_mb_info = decode_mb_info;
|
||||
ctx->switch_buffers = switch_buffers;
|
||||
ctx->is_nonnull_frame = is_nonnull_frame;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -818,9 +605,9 @@ AVCodec ff_indeo4_decoder = {
|
||||
.name = "indeo4",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.id = CODEC_ID_INDEO4,
|
||||
.priv_data_size = sizeof(IVI4DecContext),
|
||||
.priv_data_size = sizeof(IVI45DecContext),
|
||||
.init = decode_init,
|
||||
.close = decode_close,
|
||||
.decode = decode_frame,
|
||||
.close = ff_ivi_decode_close,
|
||||
.decode = ff_ivi_decode_frame,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("Intel Indeo Video Interactive 4"),
|
||||
};
|
||||
|
@@ -48,37 +48,6 @@ enum {
|
||||
|
||||
#define IVI5_PIC_SIZE_ESC 15
|
||||
|
||||
#define IVI5_IS_PROTECTED 0x20
|
||||
|
||||
typedef struct {
|
||||
GetBitContext gb;
|
||||
AVFrame frame;
|
||||
RVMapDesc rvmap_tabs[9]; ///< local corrected copy of the static rvmap tables
|
||||
IVIPlaneDesc planes[3]; ///< color planes
|
||||
const uint8_t *frame_data; ///< input frame data pointer
|
||||
int buf_switch; ///< used to switch between three buffers
|
||||
int inter_scal; ///< signals a sequence of scalable inter frames
|
||||
int dst_buf; ///< buffer index for the currently decoded frame
|
||||
int ref_buf; ///< inter frame reference buffer index
|
||||
int ref2_buf; ///< temporal storage for switching buffers
|
||||
uint32_t frame_size; ///< frame size in bytes
|
||||
int frame_type;
|
||||
int prev_frame_type; ///< frame type of the previous frame
|
||||
int frame_num;
|
||||
uint32_t pic_hdr_size; ///< picture header size in bytes
|
||||
uint8_t frame_flags;
|
||||
uint16_t checksum; ///< frame checksum
|
||||
|
||||
IVIHuffTab mb_vlc; ///< vlc table for decoding macroblock data
|
||||
|
||||
uint16_t gop_hdr_size;
|
||||
uint8_t gop_flags;
|
||||
int is_scalable;
|
||||
uint32_t lock_word;
|
||||
IVIPicConfig pic_conf;
|
||||
} IVI5DecContext;
|
||||
|
||||
|
||||
/**
|
||||
* Decode Indeo5 GOP (Group of pictures) header.
|
||||
* This header is present in key frames only.
|
||||
@@ -88,7 +57,7 @@ typedef struct {
|
||||
* @param[in] avctx ptr to the AVCodecContext
|
||||
* @return result code: 0 = OK, -1 = error
|
||||
*/
|
||||
static int decode_gop_header(IVI5DecContext *ctx, AVCodecContext *avctx)
|
||||
static int decode_gop_header(IVI45DecContext *ctx, AVCodecContext *avctx)
|
||||
{
|
||||
int result, i, p, tile_size, pic_size_indx, mb_size, blk_size, is_scalable;
|
||||
int quant_mat, blk_size_changed = 0;
|
||||
@@ -220,6 +189,10 @@ static int decode_gop_header(IVI5DecContext *ctx, AVCodecContext *avctx)
|
||||
}
|
||||
|
||||
if (band->blk_size == 8) {
|
||||
if(quant_mat >= 5){
|
||||
av_log(avctx, AV_LOG_ERROR, "quant_mat %d too large!\n", quant_mat);
|
||||
return -1;
|
||||
}
|
||||
band->intra_base = &ivi5_base_quant_8x8_intra[quant_mat][0];
|
||||
band->inter_base = &ivi5_base_quant_8x8_inter[quant_mat][0];
|
||||
band->intra_scale = &ivi5_scale_quant_8x8_intra[quant_mat][0];
|
||||
@@ -319,7 +292,7 @@ static inline void skip_hdr_extension(GetBitContext *gb)
|
||||
* @param[in] avctx ptr to the AVCodecContext
|
||||
* @return result code: 0 = OK, -1 = error
|
||||
*/
|
||||
static int decode_pic_hdr(IVI5DecContext *ctx, AVCodecContext *avctx)
|
||||
static int decode_pic_hdr(IVI45DecContext *ctx, AVCodecContext *avctx)
|
||||
{
|
||||
if (get_bits(&ctx->gb, 5) != 0x1F) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid picture start code!\n");
|
||||
@@ -336,8 +309,12 @@ static int decode_pic_hdr(IVI5DecContext *ctx, AVCodecContext *avctx)
|
||||
ctx->frame_num = get_bits(&ctx->gb, 8);
|
||||
|
||||
if (ctx->frame_type == FRAMETYPE_INTRA) {
|
||||
if (decode_gop_header(ctx, avctx))
|
||||
return -1;
|
||||
ctx->gop_invalid = 1;
|
||||
if (decode_gop_header(ctx, avctx)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid GOP header, skipping frames.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
ctx->gop_invalid = 0;
|
||||
}
|
||||
|
||||
if (ctx->frame_type != FRAMETYPE_NULL) {
|
||||
@@ -372,7 +349,7 @@ static int decode_pic_hdr(IVI5DecContext *ctx, AVCodecContext *avctx)
|
||||
* @param[in] avctx ptr to the AVCodecContext
|
||||
* @return result code: 0 = OK, -1 = error
|
||||
*/
|
||||
static int decode_band_hdr(IVI5DecContext *ctx, IVIBandDesc *band,
|
||||
static int decode_band_hdr(IVI45DecContext *ctx, IVIBandDesc *band,
|
||||
AVCodecContext *avctx)
|
||||
{
|
||||
int i;
|
||||
@@ -442,7 +419,7 @@ static int decode_band_hdr(IVI5DecContext *ctx, IVIBandDesc *band,
|
||||
* @param[in] avctx ptr to the AVCodecContext
|
||||
* @return result code: 0 = OK, -1 = error
|
||||
*/
|
||||
static int decode_mb_info(IVI5DecContext *ctx, IVIBandDesc *band,
|
||||
static int decode_mb_info(IVI45DecContext *ctx, IVIBandDesc *band,
|
||||
IVITile *tile, AVCodecContext *avctx)
|
||||
{
|
||||
int x, y, mv_x, mv_y, mv_delta, offs, mb_offset,
|
||||
@@ -458,6 +435,12 @@ static int decode_mb_info(IVI5DecContext *ctx, IVIBandDesc *band,
|
||||
((band->qdelta_present && band->inherit_qdelta) || band->inherit_mv))
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if (tile->num_MBs != IVI_MBs_PER_TILE(tile->width, tile->height, band->mb_size)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Allocated tile size %d mismatches parameters %d\n",
|
||||
tile->num_MBs, IVI_MBs_PER_TILE(tile->width, tile->height, band->mb_size));
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* scale factor for motion vectors */
|
||||
mv_scale = (ctx->planes[0].bands[0].mb_size >> 3) - (band->mb_size >> 3);
|
||||
mv_x = mv_y = 0;
|
||||
@@ -561,102 +544,12 @@ static int decode_mb_info(IVI5DecContext *ctx, IVIBandDesc *band,
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Decode an Indeo5 band.
|
||||
*
|
||||
* @param[in,out] ctx ptr to the decoder context
|
||||
* @param[in,out] band ptr to the band descriptor
|
||||
* @param[in] avctx ptr to the AVCodecContext
|
||||
* @return result code: 0 = OK, -1 = error
|
||||
*/
|
||||
static int decode_band(IVI5DecContext *ctx, int plane_num,
|
||||
IVIBandDesc *band, AVCodecContext *avctx)
|
||||
{
|
||||
int result, i, t, idx1, idx2, pos;
|
||||
IVITile *tile;
|
||||
|
||||
band->buf = band->bufs[ctx->dst_buf];
|
||||
band->ref_buf = band->bufs[ctx->ref_buf];
|
||||
band->data_ptr = ctx->frame_data + (get_bits_count(&ctx->gb) >> 3);
|
||||
|
||||
result = decode_band_hdr(ctx, band, avctx);
|
||||
if (result) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Error while decoding band header: %d\n",
|
||||
result);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (band->is_empty) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Empty band encountered!\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
band->rv_map = &ctx->rvmap_tabs[band->rvmap_sel];
|
||||
|
||||
/* apply corrections to the selected rvmap table if present */
|
||||
for (i = 0; i < band->num_corr; i++) {
|
||||
idx1 = band->corr[i*2];
|
||||
idx2 = band->corr[i*2+1];
|
||||
FFSWAP(uint8_t, band->rv_map->runtab[idx1], band->rv_map->runtab[idx2]);
|
||||
FFSWAP(int16_t, band->rv_map->valtab[idx1], band->rv_map->valtab[idx2]);
|
||||
}
|
||||
|
||||
pos = get_bits_count(&ctx->gb);
|
||||
|
||||
for (t = 0; t < band->num_tiles; t++) {
|
||||
tile = &band->tiles[t];
|
||||
|
||||
tile->is_empty = get_bits1(&ctx->gb);
|
||||
if (tile->is_empty) {
|
||||
ff_ivi_process_empty_tile(avctx, band, tile,
|
||||
(ctx->planes[0].bands[0].mb_size >> 3) - (band->mb_size >> 3));
|
||||
} else {
|
||||
tile->data_size = ff_ivi_dec_tile_data_size(&ctx->gb);
|
||||
|
||||
result = decode_mb_info(ctx, band, tile, avctx);
|
||||
if (result < 0)
|
||||
break;
|
||||
|
||||
result = ff_ivi_decode_blocks(&ctx->gb, band, tile);
|
||||
if (result < 0 || (get_bits_count(&ctx->gb) - pos) >> 3 != tile->data_size) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Corrupted tile data encountered!\n");
|
||||
break;
|
||||
}
|
||||
pos += tile->data_size << 3; // skip to next tile
|
||||
}
|
||||
}
|
||||
|
||||
/* restore the selected rvmap table by applying its corrections in reverse order */
|
||||
for (i = band->num_corr-1; i >= 0; i--) {
|
||||
idx1 = band->corr[i*2];
|
||||
idx2 = band->corr[i*2+1];
|
||||
FFSWAP(uint8_t, band->rv_map->runtab[idx1], band->rv_map->runtab[idx2]);
|
||||
FFSWAP(int16_t, band->rv_map->valtab[idx1], band->rv_map->valtab[idx2]);
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
if (band->checksum_present) {
|
||||
uint16_t chksum = ivi_calc_band_checksum(band);
|
||||
if (chksum != band->checksum) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Band checksum mismatch! Plane %d, band %d, received: %x, calculated: %x\n",
|
||||
band->plane, band->band_num, band->checksum, chksum);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
align_get_bits(&ctx->gb);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Switch buffers.
|
||||
*
|
||||
* @param[in,out] ctx ptr to the decoder context
|
||||
*/
|
||||
static void switch_buffers(IVI5DecContext *ctx)
|
||||
static void switch_buffers(IVI45DecContext *ctx)
|
||||
{
|
||||
switch (ctx->prev_frame_type) {
|
||||
case FRAMETYPE_INTRA:
|
||||
@@ -694,12 +587,18 @@ static void switch_buffers(IVI5DecContext *ctx)
|
||||
}
|
||||
|
||||
|
||||
static int is_nonnull_frame(IVI45DecContext *ctx)
|
||||
{
|
||||
return ctx->frame_type != FRAMETYPE_NULL;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Initialize Indeo5 decoder.
|
||||
*/
|
||||
static av_cold int decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
IVI5DecContext *ctx = avctx->priv_data;
|
||||
IVI45DecContext *ctx = avctx->priv_data;
|
||||
int result;
|
||||
|
||||
ff_ivi_init_static_vlc();
|
||||
@@ -729,109 +628,25 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
ctx->buf_switch = 0;
|
||||
ctx->inter_scal = 0;
|
||||
|
||||
ctx->decode_pic_hdr = decode_pic_hdr;
|
||||
ctx->decode_band_hdr = decode_band_hdr;
|
||||
ctx->decode_mb_info = decode_mb_info;
|
||||
ctx->switch_buffers = switch_buffers;
|
||||
ctx->is_nonnull_frame = is_nonnull_frame;
|
||||
|
||||
avctx->pix_fmt = PIX_FMT_YUV410P;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* main decoder function
|
||||
*/
|
||||
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
||||
AVPacket *avpkt)
|
||||
{
|
||||
IVI5DecContext *ctx = avctx->priv_data;
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
int result, p, b;
|
||||
|
||||
init_get_bits(&ctx->gb, buf, buf_size * 8);
|
||||
ctx->frame_data = buf;
|
||||
ctx->frame_size = buf_size;
|
||||
|
||||
result = decode_pic_hdr(ctx, avctx);
|
||||
if (result) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Error while decoding picture header: %d\n", result);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (ctx->gop_flags & IVI5_IS_PROTECTED) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Password-protected clip!\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
switch_buffers(ctx);
|
||||
|
||||
//{ START_TIMER;
|
||||
|
||||
if (ctx->frame_type != FRAMETYPE_NULL) {
|
||||
for (p = 0; p < 3; p++) {
|
||||
for (b = 0; b < ctx->planes[p].num_bands; b++) {
|
||||
result = decode_band(ctx, p, &ctx->planes[p].bands[b], avctx);
|
||||
if (result) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Error while decoding band: %d, plane: %d\n", b, p);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//STOP_TIMER("decode_planes"); }
|
||||
|
||||
if (ctx->frame.data[0])
|
||||
avctx->release_buffer(avctx, &ctx->frame);
|
||||
|
||||
ctx->frame.reference = 0;
|
||||
if (avctx->get_buffer(avctx, &ctx->frame) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (ctx->is_scalable) {
|
||||
ff_ivi_recompose53 (&ctx->planes[0], ctx->frame.data[0], ctx->frame.linesize[0], 4);
|
||||
} else {
|
||||
ff_ivi_output_plane(&ctx->planes[0], ctx->frame.data[0], ctx->frame.linesize[0]);
|
||||
}
|
||||
|
||||
ff_ivi_output_plane(&ctx->planes[2], ctx->frame.data[1], ctx->frame.linesize[1]);
|
||||
ff_ivi_output_plane(&ctx->planes[1], ctx->frame.data[2], ctx->frame.linesize[2]);
|
||||
|
||||
*data_size = sizeof(AVFrame);
|
||||
*(AVFrame*)data = ctx->frame;
|
||||
|
||||
return buf_size;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Close Indeo5 decoder and clean up its context.
|
||||
*/
|
||||
static av_cold int decode_close(AVCodecContext *avctx)
|
||||
{
|
||||
IVI5DecContext *ctx = avctx->priv_data;
|
||||
|
||||
ff_ivi_free_buffers(&ctx->planes[0]);
|
||||
|
||||
if (ctx->mb_vlc.cust_tab.table)
|
||||
free_vlc(&ctx->mb_vlc.cust_tab);
|
||||
|
||||
if (ctx->frame.data[0])
|
||||
avctx->release_buffer(avctx, &ctx->frame);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
AVCodec ff_indeo5_decoder = {
|
||||
.name = "indeo5",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.id = CODEC_ID_INDEO5,
|
||||
.priv_data_size = sizeof(IVI5DecContext),
|
||||
.priv_data_size = sizeof(IVI45DecContext),
|
||||
.init = decode_init,
|
||||
.close = decode_close,
|
||||
.decode = decode_frame,
|
||||
.close = ff_ivi_decode_close,
|
||||
.decode = ff_ivi_decode_frame,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("Intel Indeo Video Interactive 5"),
|
||||
};
|
||||
|
@@ -65,8 +65,8 @@ int ff_intel_h263_decode_picture_header(MpegEncContext *s)
|
||||
s->pb_frame = get_bits1(&s->gb);
|
||||
|
||||
if (format < 6) {
|
||||
s->width = h263_format[format][0];
|
||||
s->height = h263_format[format][1];
|
||||
s->width = ff_h263_format[format][0];
|
||||
s->height = ff_h263_format[format][1];
|
||||
s->avctx->sample_aspect_ratio.num = 12;
|
||||
s->avctx->sample_aspect_ratio.den = 11;
|
||||
} else {
|
||||
@@ -77,7 +77,7 @@ int ff_intel_h263_decode_picture_header(MpegEncContext *s)
|
||||
}
|
||||
if(get_bits(&s->gb, 2))
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Bad value for reserved field\n");
|
||||
s->loop_filter = get_bits1(&s->gb);
|
||||
s->loop_filter = get_bits1(&s->gb) * !s->avctx->lowres;
|
||||
if(get_bits1(&s->gb))
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Bad value for reserved field\n");
|
||||
if(get_bits1(&s->gb))
|
||||
|
@@ -103,7 +103,7 @@ static VLC cbpc_b_vlc;
|
||||
/* init vlcs */
|
||||
|
||||
/* XXX: find a better solution to handle static init */
|
||||
void h263_decode_init_vlc(MpegEncContext *s)
|
||||
void ff_h263_decode_init_vlc(MpegEncContext *s)
|
||||
{
|
||||
static int done = 0;
|
||||
|
||||
@@ -120,18 +120,18 @@ void h263_decode_init_vlc(MpegEncContext *s)
|
||||
&ff_h263_cbpy_tab[0][1], 2, 1,
|
||||
&ff_h263_cbpy_tab[0][0], 2, 1, 64);
|
||||
INIT_VLC_STATIC(&mv_vlc, MV_VLC_BITS, 33,
|
||||
&mvtab[0][1], 2, 1,
|
||||
&mvtab[0][0], 2, 1, 538);
|
||||
init_rl(&ff_h263_rl_inter, ff_h263_static_rl_table_store[0]);
|
||||
init_rl(&rl_intra_aic, ff_h263_static_rl_table_store[1]);
|
||||
&ff_mvtab[0][1], 2, 1,
|
||||
&ff_mvtab[0][0], 2, 1, 538);
|
||||
ff_init_rl(&ff_h263_rl_inter, ff_h263_static_rl_table_store[0]);
|
||||
ff_init_rl(&ff_rl_intra_aic, ff_h263_static_rl_table_store[1]);
|
||||
INIT_VLC_RL(ff_h263_rl_inter, 554);
|
||||
INIT_VLC_RL(rl_intra_aic, 554);
|
||||
INIT_VLC_RL(ff_rl_intra_aic, 554);
|
||||
INIT_VLC_STATIC(&h263_mbtype_b_vlc, H263_MBTYPE_B_VLC_BITS, 15,
|
||||
&h263_mbtype_b_tab[0][1], 2, 1,
|
||||
&h263_mbtype_b_tab[0][0], 2, 1, 80);
|
||||
&ff_h263_mbtype_b_tab[0][1], 2, 1,
|
||||
&ff_h263_mbtype_b_tab[0][0], 2, 1, 80);
|
||||
INIT_VLC_STATIC(&cbpc_b_vlc, CBPC_B_VLC_BITS, 4,
|
||||
&cbpc_b_tab[0][1], 2, 1,
|
||||
&cbpc_b_tab[0][0], 2, 1, 8);
|
||||
&ff_cbpc_b_tab[0][1], 2, 1,
|
||||
&ff_cbpc_b_tab[0][0], 2, 1, 8);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -271,7 +271,7 @@ int ff_h263_resync(MpegEncContext *s){
|
||||
return -1;
|
||||
}
|
||||
|
||||
int h263_decode_motion(MpegEncContext * s, int pred, int f_code)
|
||||
int ff_h263_decode_motion(MpegEncContext * s, int pred, int f_code)
|
||||
{
|
||||
int code, val, sign, shift;
|
||||
code = get_vlc2(&s->gb, mv_vlc.table, MV_VLC_BITS, 2);
|
||||
@@ -381,16 +381,16 @@ static void preview_obmc(MpegEncContext *s){
|
||||
if ((cbpc & 16) == 0) {
|
||||
s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
|
||||
/* 16x16 motion prediction */
|
||||
mot_val= h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
mot_val= ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
if (s->umvplus)
|
||||
mx = h263p_decode_umotion(s, pred_x);
|
||||
else
|
||||
mx = h263_decode_motion(s, pred_x, 1);
|
||||
mx = ff_h263_decode_motion(s, pred_x, 1);
|
||||
|
||||
if (s->umvplus)
|
||||
my = h263p_decode_umotion(s, pred_y);
|
||||
else
|
||||
my = h263_decode_motion(s, pred_y, 1);
|
||||
my = ff_h263_decode_motion(s, pred_y, 1);
|
||||
|
||||
mot_val[0 ]= mot_val[2 ]=
|
||||
mot_val[0+stride]= mot_val[2+stride]= mx;
|
||||
@@ -399,16 +399,16 @@ static void preview_obmc(MpegEncContext *s){
|
||||
} else {
|
||||
s->current_picture.f.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
|
||||
for(i=0;i<4;i++) {
|
||||
mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
if (s->umvplus)
|
||||
mx = h263p_decode_umotion(s, pred_x);
|
||||
else
|
||||
mx = h263_decode_motion(s, pred_x, 1);
|
||||
mx = ff_h263_decode_motion(s, pred_x, 1);
|
||||
|
||||
if (s->umvplus)
|
||||
my = h263p_decode_umotion(s, pred_y);
|
||||
else
|
||||
my = h263_decode_motion(s, pred_y, 1);
|
||||
my = ff_h263_decode_motion(s, pred_y, 1);
|
||||
if (s->umvplus && (mx - pred_x) == 1 && (my - pred_y) == 1)
|
||||
skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */
|
||||
mot_val[0] = mx;
|
||||
@@ -432,7 +432,7 @@ static void h263_decode_dquant(MpegEncContext *s){
|
||||
|
||||
if(s->modified_quant){
|
||||
if(get_bits1(&s->gb))
|
||||
s->qscale= modified_quant_tab[get_bits1(&s->gb)][ s->qscale ];
|
||||
s->qscale= ff_modified_quant_tab[get_bits1(&s->gb)][ s->qscale ];
|
||||
else
|
||||
s->qscale= get_bits(&s->gb, 5);
|
||||
}else
|
||||
@@ -450,7 +450,7 @@ static int h263_decode_block(MpegEncContext * s, DCTELEM * block,
|
||||
|
||||
scan_table = s->intra_scantable.permutated;
|
||||
if (s->h263_aic && s->mb_intra) {
|
||||
rl = &rl_intra_aic;
|
||||
rl = &ff_rl_intra_aic;
|
||||
i = 0;
|
||||
if (s->ac_pred) {
|
||||
if (s->h263_aic_dir)
|
||||
@@ -539,7 +539,7 @@ retry:
|
||||
if (i >= 64){
|
||||
if(s->alt_inter_vlc && rl == &ff_h263_rl_inter && !s->mb_intra){
|
||||
//Looks like a hack but no, it's the way it is supposed to work ...
|
||||
rl = &rl_intra_aic;
|
||||
rl = &ff_rl_intra_aic;
|
||||
i = 0;
|
||||
s->gb= gb;
|
||||
s->dsp.clear_block(block);
|
||||
@@ -556,7 +556,7 @@ retry:
|
||||
}
|
||||
not_coded:
|
||||
if (s->mb_intra && s->h263_aic) {
|
||||
h263_pred_acdc(s, block, n);
|
||||
ff_h263_pred_acdc(s, block, n);
|
||||
i = 63;
|
||||
}
|
||||
s->block_last_index[n] = i;
|
||||
@@ -655,11 +655,11 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
||||
s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
|
||||
/* 16x16 motion prediction */
|
||||
s->mv_type = MV_TYPE_16X16;
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
if (s->umvplus)
|
||||
mx = h263p_decode_umotion(s, pred_x);
|
||||
else
|
||||
mx = h263_decode_motion(s, pred_x, 1);
|
||||
mx = ff_h263_decode_motion(s, pred_x, 1);
|
||||
|
||||
if (mx >= 0xffff)
|
||||
return -1;
|
||||
@@ -667,7 +667,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
||||
if (s->umvplus)
|
||||
my = h263p_decode_umotion(s, pred_y);
|
||||
else
|
||||
my = h263_decode_motion(s, pred_y, 1);
|
||||
my = ff_h263_decode_motion(s, pred_y, 1);
|
||||
|
||||
if (my >= 0xffff)
|
||||
return -1;
|
||||
@@ -680,18 +680,18 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
||||
s->current_picture.f.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
|
||||
s->mv_type = MV_TYPE_8X8;
|
||||
for(i=0;i<4;i++) {
|
||||
mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
if (s->umvplus)
|
||||
mx = h263p_decode_umotion(s, pred_x);
|
||||
else
|
||||
mx = h263_decode_motion(s, pred_x, 1);
|
||||
mx = ff_h263_decode_motion(s, pred_x, 1);
|
||||
if (mx >= 0xffff)
|
||||
return -1;
|
||||
|
||||
if (s->umvplus)
|
||||
my = h263p_decode_umotion(s, pred_y);
|
||||
else
|
||||
my = h263_decode_motion(s, pred_y, 1);
|
||||
my = ff_h263_decode_motion(s, pred_y, 1);
|
||||
if (my >= 0xffff)
|
||||
return -1;
|
||||
s->mv[0][i][0] = mx;
|
||||
@@ -763,11 +763,11 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
||||
//FIXME UMV
|
||||
|
||||
if(USES_LIST(mb_type, 0)){
|
||||
int16_t *mot_val= h263_pred_motion(s, 0, 0, &mx, &my);
|
||||
int16_t *mot_val= ff_h263_pred_motion(s, 0, 0, &mx, &my);
|
||||
s->mv_dir = MV_DIR_FORWARD;
|
||||
|
||||
mx = h263_decode_motion(s, mx, 1);
|
||||
my = h263_decode_motion(s, my, 1);
|
||||
mx = ff_h263_decode_motion(s, mx, 1);
|
||||
my = ff_h263_decode_motion(s, my, 1);
|
||||
|
||||
s->mv[0][0][0] = mx;
|
||||
s->mv[0][0][1] = my;
|
||||
@@ -776,11 +776,11 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
||||
}
|
||||
|
||||
if(USES_LIST(mb_type, 1)){
|
||||
int16_t *mot_val= h263_pred_motion(s, 0, 1, &mx, &my);
|
||||
int16_t *mot_val= ff_h263_pred_motion(s, 0, 1, &mx, &my);
|
||||
s->mv_dir |= MV_DIR_BACKWARD;
|
||||
|
||||
mx = h263_decode_motion(s, mx, 1);
|
||||
my = h263_decode_motion(s, my, 1);
|
||||
mx = ff_h263_decode_motion(s, mx, 1);
|
||||
my = ff_h263_decode_motion(s, my, 1);
|
||||
|
||||
s->mv[1][0][0] = mx;
|
||||
s->mv[1][0][1] = my;
|
||||
@@ -831,8 +831,8 @@ intra:
|
||||
}
|
||||
|
||||
while(pb_mv_count--){
|
||||
h263_decode_motion(s, 0, 1);
|
||||
h263_decode_motion(s, 0, 1);
|
||||
ff_h263_decode_motion(s, 0, 1);
|
||||
ff_h263_decode_motion(s, 0, 1);
|
||||
}
|
||||
|
||||
/* decode each block */
|
||||
@@ -866,7 +866,7 @@ end:
|
||||
}
|
||||
|
||||
/* most is hardcoded. should extend to handle all h263 streams */
|
||||
int h263_decode_picture_header(MpegEncContext *s)
|
||||
int ff_h263_decode_picture_header(MpegEncContext *s)
|
||||
{
|
||||
int format, width, height, i;
|
||||
uint32_t startcode;
|
||||
@@ -918,8 +918,8 @@ int h263_decode_picture_header(MpegEncContext *s)
|
||||
if (format != 7 && format != 6) {
|
||||
s->h263_plus = 0;
|
||||
/* H.263v1 */
|
||||
width = h263_format[format][0];
|
||||
height = h263_format[format][1];
|
||||
width = ff_h263_format[format][0];
|
||||
height = ff_h263_format[format][1];
|
||||
if (!width)
|
||||
return -1;
|
||||
|
||||
@@ -963,6 +963,8 @@ int h263_decode_picture_header(MpegEncContext *s)
|
||||
s->h263_aic = get_bits1(&s->gb); /* Advanced Intra Coding (AIC) */
|
||||
s->loop_filter= get_bits1(&s->gb);
|
||||
s->unrestricted_mv = s->umvplus || s->obmc || s->loop_filter;
|
||||
if(s->avctx->lowres)
|
||||
s->loop_filter = 0;
|
||||
|
||||
s->h263_slice_structured= get_bits1(&s->gb);
|
||||
if (get_bits1(&s->gb) != 0) {
|
||||
@@ -1026,8 +1028,8 @@ int h263_decode_picture_header(MpegEncContext *s)
|
||||
s->avctx->sample_aspect_ratio= ff_h263_pixel_aspect[s->aspect_ratio_info];
|
||||
}
|
||||
} else {
|
||||
width = h263_format[format][0];
|
||||
height = h263_format[format][1];
|
||||
width = ff_h263_format[format][0];
|
||||
height = ff_h263_format[format][1];
|
||||
s->avctx->sample_aspect_ratio= (AVRational){12,11};
|
||||
}
|
||||
if ((width == 0) || (height == 0))
|
||||
|
@@ -102,7 +102,7 @@ av_const int ff_h263_aspect_to_info(AVRational aspect){
|
||||
return FF_ASPECT_EXTENDED;
|
||||
}
|
||||
|
||||
void h263_encode_picture_header(MpegEncContext * s, int picture_number)
|
||||
void ff_h263_encode_picture_header(MpegEncContext * s, int picture_number)
|
||||
{
|
||||
int format, coded_frame_rate, coded_frame_rate_base, i, temp_ref;
|
||||
int best_clock_code=1;
|
||||
@@ -141,7 +141,7 @@ void h263_encode_picture_header(MpegEncContext * s, int picture_number)
|
||||
put_bits(&s->pb, 1, 0); /* camera off */
|
||||
put_bits(&s->pb, 1, 0); /* freeze picture release off */
|
||||
|
||||
format = ff_match_2uint16(h263_format, FF_ARRAY_ELEMS(h263_format), s->width, s->height);
|
||||
format = ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format), s->width, s->height);
|
||||
if (!s->h263_plus) {
|
||||
/* H.263v1 */
|
||||
put_bits(&s->pb, 3, format);
|
||||
@@ -247,7 +247,7 @@ void h263_encode_picture_header(MpegEncContext * s, int picture_number)
|
||||
/**
|
||||
* Encode a group of blocks header.
|
||||
*/
|
||||
void h263_encode_gob_header(MpegEncContext * s, int mb_line)
|
||||
void ff_h263_encode_gob_header(MpegEncContext * s, int mb_line)
|
||||
{
|
||||
put_bits(&s->pb, 17, 1); /* GBSC */
|
||||
|
||||
@@ -333,7 +333,7 @@ static void h263_encode_block(MpegEncContext * s, DCTELEM * block, int n)
|
||||
} else {
|
||||
i = 0;
|
||||
if (s->h263_aic && s->mb_intra)
|
||||
rl = &rl_intra_aic;
|
||||
rl = &ff_rl_intra_aic;
|
||||
|
||||
if(s->alt_inter_vlc && !s->mb_intra){
|
||||
int aic_vlc_bits=0;
|
||||
@@ -353,14 +353,14 @@ static void h263_encode_block(MpegEncContext * s, DCTELEM * block, int n)
|
||||
if(level<0) level= -level;
|
||||
|
||||
code = get_rl_index(rl, last, run, level);
|
||||
aic_code = get_rl_index(&rl_intra_aic, last, run, level);
|
||||
aic_code = get_rl_index(&ff_rl_intra_aic, last, run, level);
|
||||
inter_vlc_bits += rl->table_vlc[code][1]+1;
|
||||
aic_vlc_bits += rl_intra_aic.table_vlc[aic_code][1]+1;
|
||||
aic_vlc_bits += ff_rl_intra_aic.table_vlc[aic_code][1]+1;
|
||||
|
||||
if (code == rl->n) {
|
||||
inter_vlc_bits += 1+6+8-1;
|
||||
}
|
||||
if (aic_code == rl_intra_aic.n) {
|
||||
if (aic_code == ff_rl_intra_aic.n) {
|
||||
aic_vlc_bits += 1+6+8-1;
|
||||
wrong_pos += run + 1;
|
||||
}else
|
||||
@@ -370,7 +370,7 @@ static void h263_encode_block(MpegEncContext * s, DCTELEM * block, int n)
|
||||
}
|
||||
i = 0;
|
||||
if(aic_vlc_bits < inter_vlc_bits && wrong_pos > 63)
|
||||
rl = &rl_intra_aic;
|
||||
rl = &ff_rl_intra_aic;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -454,9 +454,9 @@ static void h263p_encode_umotion(MpegEncContext * s, int val)
|
||||
}
|
||||
}
|
||||
|
||||
void h263_encode_mb(MpegEncContext * s,
|
||||
DCTELEM block[6][64],
|
||||
int motion_x, int motion_y)
|
||||
void ff_h263_encode_mb(MpegEncContext * s,
|
||||
DCTELEM block[6][64],
|
||||
int motion_x, int motion_y)
|
||||
{
|
||||
int cbpc, cbpy, i, cbp, pred_x, pred_y;
|
||||
int16_t pred_dc;
|
||||
@@ -500,7 +500,7 @@ void h263_encode_mb(MpegEncContext * s,
|
||||
}
|
||||
|
||||
/* motion vectors: 16x16 mode */
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
|
||||
if (!s->umvplus) {
|
||||
ff_h263_encode_motion_vector(s, motion_x - pred_x,
|
||||
@@ -527,7 +527,7 @@ void h263_encode_mb(MpegEncContext * s,
|
||||
|
||||
for(i=0; i<4; i++){
|
||||
/* motion vectors: 8x8 mode*/
|
||||
h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
|
||||
motion_x = s->current_picture.f.motion_val[0][s->block_index[i]][0];
|
||||
motion_y = s->current_picture.f.motion_val[0][s->block_index[i]][1];
|
||||
@@ -561,7 +561,7 @@ void h263_encode_mb(MpegEncContext * s,
|
||||
if(i<4) scale= s->y_dc_scale;
|
||||
else scale= s->c_dc_scale;
|
||||
|
||||
pred_dc = h263_pred_dc(s, i, &dc_ptr[i]);
|
||||
pred_dc = ff_h263_pred_dc(s, i, &dc_ptr[i]);
|
||||
level -= pred_dc;
|
||||
/* Quant */
|
||||
if (level >= 0)
|
||||
@@ -662,7 +662,7 @@ void ff_h263_encode_motion(MpegEncContext * s, int val, int f_code)
|
||||
if (val == 0) {
|
||||
/* zero vector */
|
||||
code = 0;
|
||||
put_bits(&s->pb, mvtab[code][1], mvtab[code][0]);
|
||||
put_bits(&s->pb, ff_mvtab[code][1], ff_mvtab[code][0]);
|
||||
} else {
|
||||
bit_size = f_code - 1;
|
||||
range = 1 << bit_size;
|
||||
@@ -676,7 +676,7 @@ void ff_h263_encode_motion(MpegEncContext * s, int val, int f_code)
|
||||
code = (val >> bit_size) + 1;
|
||||
bits = val & (range - 1);
|
||||
|
||||
put_bits(&s->pb, mvtab[code][1] + 1, (mvtab[code][0] << 1) | sign);
|
||||
put_bits(&s->pb, ff_mvtab[code][1] + 1, (ff_mvtab[code][0] << 1) | sign);
|
||||
if (bit_size > 0) {
|
||||
put_bits(&s->pb, bit_size, bits);
|
||||
}
|
||||
@@ -692,7 +692,7 @@ static void init_mv_penalty_and_fcode(MpegEncContext *s)
|
||||
for(mv=-MAX_MV; mv<=MAX_MV; mv++){
|
||||
int len;
|
||||
|
||||
if(mv==0) len= mvtab[0][1];
|
||||
if(mv==0) len= ff_mvtab[0][1];
|
||||
else{
|
||||
int val, bit_size, code;
|
||||
|
||||
@@ -704,9 +704,9 @@ static void init_mv_penalty_and_fcode(MpegEncContext *s)
|
||||
val--;
|
||||
code = (val >> bit_size) + 1;
|
||||
if(code<33){
|
||||
len= mvtab[code][1] + 1 + bit_size;
|
||||
len= ff_mvtab[code][1] + 1 + bit_size;
|
||||
}else{
|
||||
len= mvtab[32][1] + av_log2(code>>5) + 2 + bit_size;
|
||||
len= ff_mvtab[32][1] + av_log2(code>>5) + 2 + bit_size;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -768,17 +768,17 @@ static void init_uni_h263_rl_tab(RLTable *rl, uint32_t *bits_tab, uint8_t *len_t
|
||||
}
|
||||
}
|
||||
|
||||
void h263_encode_init(MpegEncContext *s)
|
||||
void ff_h263_encode_init(MpegEncContext *s)
|
||||
{
|
||||
static int done = 0;
|
||||
|
||||
if (!done) {
|
||||
done = 1;
|
||||
|
||||
init_rl(&ff_h263_rl_inter, ff_h263_static_rl_table_store[0]);
|
||||
init_rl(&rl_intra_aic, ff_h263_static_rl_table_store[1]);
|
||||
ff_init_rl(&ff_h263_rl_inter, ff_h263_static_rl_table_store[0]);
|
||||
ff_init_rl(&ff_rl_intra_aic, ff_h263_static_rl_table_store[1]);
|
||||
|
||||
init_uni_h263_rl_tab(&rl_intra_aic, NULL, uni_h263_intra_aic_rl_len);
|
||||
init_uni_h263_rl_tab(&ff_rl_intra_aic, NULL, uni_h263_intra_aic_rl_len);
|
||||
init_uni_h263_rl_tab(&ff_h263_rl_inter , NULL, uni_h263_inter_rl_len);
|
||||
|
||||
init_mv_penalty_and_fcode(s);
|
||||
|
@@ -123,6 +123,10 @@ int ff_ivi_dec_huff_desc(GetBitContext *gb, int desc_coded, int which_tab,
|
||||
if (huff_tab->tab_sel == 7) {
|
||||
/* custom huffman table (explicitly encoded) */
|
||||
new_huff.num_rows = get_bits(gb, 4);
|
||||
if (!new_huff.num_rows) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Empty custom Huffman table!\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
for (i = 0; i < new_huff.num_rows; i++)
|
||||
new_huff.xbits[i] = get_bits(gb, 4);
|
||||
@@ -132,13 +136,14 @@ int ff_ivi_dec_huff_desc(GetBitContext *gb, int desc_coded, int which_tab,
|
||||
ff_ivi_huff_desc_copy(&huff_tab->cust_desc, &new_huff);
|
||||
|
||||
if (huff_tab->cust_tab.table)
|
||||
free_vlc(&huff_tab->cust_tab);
|
||||
ff_free_vlc(&huff_tab->cust_tab);
|
||||
result = ff_ivi_create_huff_from_desc(&huff_tab->cust_desc,
|
||||
&huff_tab->cust_tab, 0);
|
||||
if (result) {
|
||||
huff_tab->cust_desc.num_rows = 0; // reset faulty description
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Error while initializing custom vlc table!\n");
|
||||
return -1;
|
||||
return result;
|
||||
}
|
||||
}
|
||||
huff_tab->tab = &huff_tab->cust_tab;
|
||||
@@ -207,14 +212,15 @@ int av_cold ff_ivi_init_planes(IVIPlaneDesc *planes, const IVIPicConfig *cfg)
|
||||
band->width = b_width;
|
||||
band->height = b_height;
|
||||
band->pitch = width_aligned;
|
||||
band->bufs[0] = av_malloc(buf_size);
|
||||
band->bufs[1] = av_malloc(buf_size);
|
||||
band->aheight = height_aligned;
|
||||
band->bufs[0] = av_mallocz(buf_size);
|
||||
band->bufs[1] = av_mallocz(buf_size);
|
||||
if (!band->bufs[0] || !band->bufs[1])
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
/* allocate the 3rd band buffer for scalability mode */
|
||||
if (cfg->luma_bands > 1) {
|
||||
band->bufs[2] = av_malloc(buf_size);
|
||||
band->bufs[2] = av_mallocz(buf_size);
|
||||
if (!band->bufs[2])
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
@@ -237,7 +243,7 @@ void av_cold ff_ivi_free_buffers(IVIPlaneDesc *planes)
|
||||
av_freep(&planes[p].bands[b].bufs[2]);
|
||||
|
||||
if (planes[p].bands[b].blk_vlc.cust_tab.table)
|
||||
free_vlc(&planes[p].bands[b].blk_vlc.cust_tab);
|
||||
ff_free_vlc(&planes[p].bands[b].blk_vlc.cust_tab);
|
||||
for (t = 0; t < planes[p].bands[b].num_tiles; t++)
|
||||
av_freep(&planes[p].bands[b].tiles[t].mbs);
|
||||
av_freep(&planes[p].bands[b].tiles);
|
||||
@@ -284,6 +290,7 @@ int av_cold ff_ivi_init_tiles(IVIPlaneDesc *planes, int tile_width, int tile_hei
|
||||
for (x = 0; x < band->width; x += t_width) {
|
||||
tile->xpos = x;
|
||||
tile->ypos = y;
|
||||
tile->mb_size = band->mb_size;
|
||||
tile->width = FFMIN(band->width - x, t_width);
|
||||
tile->height = FFMIN(band->height - y, t_height);
|
||||
tile->is_empty = tile->data_size = 0;
|
||||
@@ -379,6 +386,21 @@ int ff_ivi_decode_blocks(GetBitContext *gb, IVIBandDesc *band, IVITile *tile)
|
||||
mv_x >>= 1;
|
||||
mv_y >>= 1; /* convert halfpel vectors into fullpel ones */
|
||||
}
|
||||
if (mb->type) {
|
||||
int dmv_x, dmv_y, cx, cy;
|
||||
|
||||
dmv_x = mb->mv_x >> band->is_halfpel;
|
||||
dmv_y = mb->mv_y >> band->is_halfpel;
|
||||
cx = mb->mv_x & band->is_halfpel;
|
||||
cy = mb->mv_y & band->is_halfpel;
|
||||
|
||||
if ( mb->xpos + dmv_x < 0
|
||||
|| mb->xpos + dmv_x + band->mb_size + cx > band->pitch
|
||||
|| mb->ypos + dmv_y < 0
|
||||
|| mb->ypos + dmv_y + band->mb_size + cy > band->aheight) {
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (blk = 0; blk < num_blocks; blk++) {
|
||||
@@ -471,8 +493,17 @@ int ff_ivi_decode_blocks(GetBitContext *gb, IVIBandDesc *band, IVITile *tile)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ff_ivi_process_empty_tile(AVCodecContext *avctx, IVIBandDesc *band,
|
||||
IVITile *tile, int32_t mv_scale)
|
||||
/**
|
||||
* Handle empty tiles by performing data copying and motion
|
||||
* compensation respectively.
|
||||
*
|
||||
* @param[in] avctx ptr to the AVCodecContext
|
||||
* @param[in] band pointer to the band descriptor
|
||||
* @param[in] tile pointer to the tile descriptor
|
||||
* @param[in] mv_scale scaling factor for motion vectors
|
||||
*/
|
||||
static int ivi_process_empty_tile(AVCodecContext *avctx, IVIBandDesc *band,
|
||||
IVITile *tile, int32_t mv_scale)
|
||||
{
|
||||
int x, y, need_mc, mbn, blk, num_blocks, mv_x, mv_y, mc_type;
|
||||
int offs, mb_offset, row_offset;
|
||||
@@ -482,6 +513,13 @@ void ff_ivi_process_empty_tile(AVCodecContext *avctx, IVIBandDesc *band,
|
||||
void (*mc_no_delta_func)(int16_t *buf, const int16_t *ref_buf, uint32_t pitch,
|
||||
int mc_type);
|
||||
|
||||
if (tile->num_MBs != IVI_MBs_PER_TILE(tile->width, tile->height, band->mb_size)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Allocated tile size %d mismatches "
|
||||
"parameters %d in ivi_process_empty_tile()\n",
|
||||
tile->num_MBs, IVI_MBs_PER_TILE(tile->width, tile->height, band->mb_size));
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
offs = tile->ypos * band->pitch + tile->xpos;
|
||||
mb = tile->mbs;
|
||||
ref_mb = tile->ref_mbs;
|
||||
@@ -562,6 +600,8 @@ void ff_ivi_process_empty_tile(AVCodecContext *avctx, IVIBandDesc *band,
|
||||
dst += band->pitch;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -624,6 +664,226 @@ void ff_ivi_output_plane(IVIPlaneDesc *plane, uint8_t *dst, int dst_pitch)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode an Indeo 4 or 5 band.
|
||||
*
|
||||
* @param[in,out] ctx ptr to the decoder context
|
||||
* @param[in,out] band ptr to the band descriptor
|
||||
* @param[in] avctx ptr to the AVCodecContext
|
||||
* @return result code: 0 = OK, -1 = error
|
||||
*/
|
||||
static int decode_band(IVI45DecContext *ctx, int plane_num,
|
||||
IVIBandDesc *band, AVCodecContext *avctx)
|
||||
{
|
||||
int result, i, t, idx1, idx2, pos;
|
||||
IVITile *tile;
|
||||
|
||||
band->buf = band->bufs[ctx->dst_buf];
|
||||
band->ref_buf = band->bufs[ctx->ref_buf];
|
||||
band->data_ptr = ctx->frame_data + (get_bits_count(&ctx->gb) >> 3);
|
||||
|
||||
result = ctx->decode_band_hdr(ctx, band, avctx);
|
||||
if (result) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Error while decoding band header: %d\n",
|
||||
result);
|
||||
return result;
|
||||
}
|
||||
|
||||
if (band->is_empty) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Empty band encountered!\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
band->rv_map = &ctx->rvmap_tabs[band->rvmap_sel];
|
||||
|
||||
/* apply corrections to the selected rvmap table if present */
|
||||
for (i = 0; i < band->num_corr; i++) {
|
||||
idx1 = band->corr[i * 2];
|
||||
idx2 = band->corr[i * 2 + 1];
|
||||
FFSWAP(uint8_t, band->rv_map->runtab[idx1], band->rv_map->runtab[idx2]);
|
||||
FFSWAP(int16_t, band->rv_map->valtab[idx1], band->rv_map->valtab[idx2]);
|
||||
}
|
||||
|
||||
pos = get_bits_count(&ctx->gb);
|
||||
|
||||
for (t = 0; t < band->num_tiles; t++) {
|
||||
tile = &band->tiles[t];
|
||||
|
||||
if (tile->mb_size != band->mb_size) {
|
||||
av_log(avctx, AV_LOG_ERROR, "MB sizes mismatch: %d vs. %d\n",
|
||||
band->mb_size, tile->mb_size);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
tile->is_empty = get_bits1(&ctx->gb);
|
||||
if (tile->is_empty) {
|
||||
result = ivi_process_empty_tile(avctx, band, tile,
|
||||
(ctx->planes[0].bands[0].mb_size >> 3) - (band->mb_size >> 3));
|
||||
if (result < 0)
|
||||
break;
|
||||
av_dlog(avctx, "Empty tile encountered!\n");
|
||||
} else {
|
||||
tile->data_size = ff_ivi_dec_tile_data_size(&ctx->gb);
|
||||
if (!tile->data_size) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Tile data size is zero!\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
result = ctx->decode_mb_info(ctx, band, tile, avctx);
|
||||
if (result < 0)
|
||||
break;
|
||||
|
||||
result = ff_ivi_decode_blocks(&ctx->gb, band, tile);
|
||||
if (result < 0 || ((get_bits_count(&ctx->gb) - pos) >> 3) != tile->data_size) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Corrupted tile data encountered!\n");
|
||||
break;
|
||||
}
|
||||
|
||||
pos += tile->data_size << 3; // skip to next tile
|
||||
}
|
||||
}
|
||||
|
||||
/* restore the selected rvmap table by applying its corrections in reverse order */
|
||||
for (i = band->num_corr-1; i >= 0; i--) {
|
||||
idx1 = band->corr[i*2];
|
||||
idx2 = band->corr[i*2+1];
|
||||
FFSWAP(uint8_t, band->rv_map->runtab[idx1], band->rv_map->runtab[idx2]);
|
||||
FFSWAP(int16_t, band->rv_map->valtab[idx1], band->rv_map->valtab[idx2]);
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
if (band->checksum_present) {
|
||||
uint16_t chksum = ivi_calc_band_checksum(band);
|
||||
if (chksum != band->checksum) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Band checksum mismatch! Plane %d, band %d, received: %x, calculated: %x\n",
|
||||
band->plane, band->band_num, band->checksum, chksum);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
align_get_bits(&ctx->gb);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
int ff_ivi_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
||||
AVPacket *avpkt)
|
||||
{
|
||||
IVI45DecContext *ctx = avctx->priv_data;
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
int result, p, b;
|
||||
|
||||
init_get_bits(&ctx->gb, buf, buf_size * 8);
|
||||
ctx->frame_data = buf;
|
||||
ctx->frame_size = buf_size;
|
||||
|
||||
result = ctx->decode_pic_hdr(ctx, avctx);
|
||||
if (result) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Error while decoding picture header: %d\n", result);
|
||||
return -1;
|
||||
}
|
||||
if (ctx->gop_invalid)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if (ctx->gop_flags & IVI5_IS_PROTECTED) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Password-protected clip!\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
ctx->switch_buffers(ctx);
|
||||
|
||||
//{ START_TIMER;
|
||||
|
||||
if (ctx->is_nonnull_frame(ctx)) {
|
||||
for (p = 0; p < 3; p++) {
|
||||
for (b = 0; b < ctx->planes[p].num_bands; b++) {
|
||||
result = decode_band(ctx, p, &ctx->planes[p].bands[b], avctx);
|
||||
if (result) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Error while decoding band: %d, plane: %d\n", b, p);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//STOP_TIMER("decode_planes"); }
|
||||
|
||||
/* If the bidirectional mode is enabled, next I and the following P frame will */
|
||||
/* be sent together. Unfortunately the approach below seems to be the only way */
|
||||
/* to handle the B-frames mode. That's exactly the same Intel decoders do. */
|
||||
if (avctx->codec_id == CODEC_ID_INDEO4 && ctx->frame_type == 0/*FRAMETYPE_INTRA*/) {
|
||||
while (get_bits(&ctx->gb, 8)); // skip version string
|
||||
skip_bits_long(&ctx->gb, 64); // skip padding, TODO: implement correct 8-bytes alignment
|
||||
if (get_bits_left(&ctx->gb) > 18 && show_bits(&ctx->gb, 18) == 0x3FFF8)
|
||||
av_log(avctx, AV_LOG_ERROR, "Buffer contains IP frames!\n");
|
||||
}
|
||||
|
||||
if (ctx->frame.data[0])
|
||||
avctx->release_buffer(avctx, &ctx->frame);
|
||||
|
||||
ctx->frame.reference = 0;
|
||||
avcodec_set_dimensions(avctx, ctx->planes[0].width, ctx->planes[0].height);
|
||||
if ((result = avctx->get_buffer(avctx, &ctx->frame)) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return result;
|
||||
}
|
||||
|
||||
if (ctx->is_scalable) {
|
||||
if (avctx->codec_id == CODEC_ID_INDEO4)
|
||||
ff_ivi_recompose_haar(&ctx->planes[0], ctx->frame.data[0], ctx->frame.linesize[0], 4);
|
||||
else
|
||||
ff_ivi_recompose53 (&ctx->planes[0], ctx->frame.data[0], ctx->frame.linesize[0], 4);
|
||||
} else {
|
||||
ff_ivi_output_plane(&ctx->planes[0], ctx->frame.data[0], ctx->frame.linesize[0]);
|
||||
}
|
||||
|
||||
ff_ivi_output_plane(&ctx->planes[2], ctx->frame.data[1], ctx->frame.linesize[1]);
|
||||
ff_ivi_output_plane(&ctx->planes[1], ctx->frame.data[2], ctx->frame.linesize[2]);
|
||||
|
||||
*data_size = sizeof(AVFrame);
|
||||
*(AVFrame*)data = ctx->frame;
|
||||
|
||||
return buf_size;
|
||||
}
|
||||
|
||||
/**
|
||||
* Close Indeo5 decoder and clean up its context.
|
||||
*/
|
||||
av_cold int ff_ivi_decode_close(AVCodecContext *avctx)
|
||||
{
|
||||
IVI45DecContext *ctx = avctx->priv_data;
|
||||
|
||||
ff_ivi_free_buffers(&ctx->planes[0]);
|
||||
|
||||
if (ctx->mb_vlc.cust_tab.table)
|
||||
ff_free_vlc(&ctx->mb_vlc.cust_tab);
|
||||
|
||||
if (ctx->frame.data[0])
|
||||
avctx->release_buffer(avctx, &ctx->frame);
|
||||
|
||||
#if IVI4_STREAM_ANALYSER
|
||||
if (avctx->codec_id == CODEC_ID_INDEO4) {
|
||||
if (ctx->is_scalable)
|
||||
av_log(avctx, AV_LOG_ERROR, "This video uses scalability mode!\n");
|
||||
if (ctx->uses_tiling)
|
||||
av_log(avctx, AV_LOG_ERROR, "This video uses local decoding!\n");
|
||||
if (ctx->has_b_frames)
|
||||
av_log(avctx, AV_LOG_ERROR, "This video contains B-frames!\n");
|
||||
if (ctx->has_transp)
|
||||
av_log(avctx, AV_LOG_ERROR, "Transparency mode is enabled!\n");
|
||||
if (ctx->uses_haar)
|
||||
av_log(avctx, AV_LOG_ERROR, "This video uses Haar transform!\n");
|
||||
if (ctx->uses_fullpel)
|
||||
av_log(avctx, AV_LOG_ERROR, "This video uses fullpel motion vectors!\n");
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* These are 2x8 predefined Huffman codebooks for coding macroblock/block
|
||||
|
@@ -34,6 +34,8 @@
|
||||
#include <stdint.h>
|
||||
|
||||
#define IVI_VLC_BITS 13 ///< max number of bits of the ivi's huffman codes
|
||||
#define IVI4_STREAM_ANALYSER 0
|
||||
#define IVI5_IS_PROTECTED 0x20
|
||||
|
||||
/**
|
||||
* huffman codebook descriptor
|
||||
@@ -116,6 +118,7 @@ typedef struct {
|
||||
int ypos;
|
||||
int width;
|
||||
int height;
|
||||
int mb_size;
|
||||
int is_empty; ///< = 1 if this tile doesn't contain any data
|
||||
int data_size; ///< size of the data in bytes
|
||||
int num_MBs; ///< number of macroblocks in this tile
|
||||
@@ -132,6 +135,7 @@ typedef struct {
|
||||
int band_num; ///< band number
|
||||
int width;
|
||||
int height;
|
||||
int aheight; ///< aligned band height
|
||||
const uint8_t *data_ptr; ///< ptr to the first byte of the band data
|
||||
int data_size; ///< size of the band data
|
||||
int16_t *buf; ///< pointer to the output buffer for this band
|
||||
@@ -192,6 +196,62 @@ typedef struct {
|
||||
uint8_t chroma_bands;
|
||||
} IVIPicConfig;
|
||||
|
||||
typedef struct IVI45DecContext {
|
||||
GetBitContext gb;
|
||||
AVFrame frame;
|
||||
RVMapDesc rvmap_tabs[9]; ///< local corrected copy of the static rvmap tables
|
||||
|
||||
uint32_t frame_num;
|
||||
int frame_type;
|
||||
int prev_frame_type; ///< frame type of the previous frame
|
||||
uint32_t data_size; ///< size of the frame data in bytes from picture header
|
||||
int is_scalable;
|
||||
int transp_status; ///< transparency mode status: 1 - enabled
|
||||
const uint8_t *frame_data; ///< input frame data pointer
|
||||
int inter_scal; ///< signals a sequence of scalable inter frames
|
||||
uint32_t frame_size; ///< frame size in bytes
|
||||
uint32_t pic_hdr_size; ///< picture header size in bytes
|
||||
uint8_t frame_flags;
|
||||
uint16_t checksum; ///< frame checksum
|
||||
|
||||
IVIPicConfig pic_conf;
|
||||
IVIPlaneDesc planes[3]; ///< color planes
|
||||
|
||||
int buf_switch; ///< used to switch between three buffers
|
||||
int dst_buf; ///< buffer index for the currently decoded frame
|
||||
int ref_buf; ///< inter frame reference buffer index
|
||||
int ref2_buf; ///< temporal storage for switching buffers
|
||||
|
||||
IVIHuffTab mb_vlc; ///< current macroblock table descriptor
|
||||
IVIHuffTab blk_vlc; ///< current block table descriptor
|
||||
|
||||
uint8_t rvmap_sel;
|
||||
uint8_t in_imf;
|
||||
uint8_t in_q; ///< flag for explicitly stored quantiser delta
|
||||
uint8_t pic_glob_quant;
|
||||
uint8_t unknown1;
|
||||
|
||||
uint16_t gop_hdr_size;
|
||||
uint8_t gop_flags;
|
||||
uint32_t lock_word;
|
||||
|
||||
#if IVI4_STREAM_ANALYSER
|
||||
uint8_t has_b_frames;
|
||||
uint8_t has_transp;
|
||||
uint8_t uses_tiling;
|
||||
uint8_t uses_haar;
|
||||
uint8_t uses_fullpel;
|
||||
#endif
|
||||
|
||||
int (*decode_pic_hdr) (struct IVI45DecContext *ctx, AVCodecContext *avctx);
|
||||
int (*decode_band_hdr) (struct IVI45DecContext *ctx, IVIBandDesc *band, AVCodecContext *avctx);
|
||||
int (*decode_mb_info) (struct IVI45DecContext *ctx, IVIBandDesc *band, IVITile *tile, AVCodecContext *avctx);
|
||||
void (*switch_buffers) (struct IVI45DecContext *ctx);
|
||||
int (*is_nonnull_frame)(struct IVI45DecContext *ctx);
|
||||
|
||||
int gop_invalid;
|
||||
} IVI45DecContext;
|
||||
|
||||
/** compare some properties of two pictures */
|
||||
static inline int ivi_pic_config_cmp(IVIPicConfig *str1, IVIPicConfig *str2)
|
||||
{
|
||||
@@ -315,18 +375,6 @@ int ff_ivi_dec_tile_data_size(GetBitContext *gb);
|
||||
*/
|
||||
int ff_ivi_decode_blocks(GetBitContext *gb, IVIBandDesc *band, IVITile *tile);
|
||||
|
||||
/**
|
||||
* Handle empty tiles by performing data copying and motion
|
||||
* compensation respectively.
|
||||
*
|
||||
* @param[in] avctx ptr to the AVCodecContext
|
||||
* @param[in] band pointer to the band descriptor
|
||||
* @param[in] tile pointer to the tile descriptor
|
||||
* @param[in] mv_scale scaling factor for motion vectors
|
||||
*/
|
||||
void ff_ivi_process_empty_tile(AVCodecContext *avctx, IVIBandDesc *band,
|
||||
IVITile *tile, int32_t mv_scale);
|
||||
|
||||
/**
|
||||
* Convert and output the current plane.
|
||||
* This conversion is done by adding back the bias value of 128
|
||||
@@ -348,4 +396,8 @@ uint16_t ivi_calc_band_checksum (IVIBandDesc *band);
|
||||
*/
|
||||
int ivi_check_band (IVIBandDesc *band, const uint8_t *ref, int pitch);
|
||||
|
||||
int ff_ivi_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
||||
AVPacket *avpkt);
|
||||
av_cold int ff_ivi_decode_close(AVCodecContext *avctx);
|
||||
|
||||
#endif /* AVCODEC_IVI_COMMON_H */
|
||||
|
@@ -40,7 +40,7 @@ typedef struct JLSState{
|
||||
int A[367], B[367], C[365], N[367];
|
||||
int limit, reset, bpp, qbpp, maxval, range;
|
||||
int near, twonear;
|
||||
int run_index[3];
|
||||
int run_index[4];
|
||||
}JLSState;
|
||||
|
||||
extern const uint8_t ff_log2_run[32];
|
||||
|
@@ -290,7 +290,7 @@ int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transfor
|
||||
// av_log(s->avctx, AV_LOG_DEBUG, "JPEG params: ILV=%i Pt=%i BPP=%i, scan = %i\n", ilv, point_transform, s->bits, s->cur_scan);
|
||||
if(ilv == 0) { /* separate planes */
|
||||
stride = (s->nb_components > 1) ? 3 : 1;
|
||||
off = av_clip(s->cur_scan - 1, 0, stride);
|
||||
off = av_clip(s->cur_scan - 1, 0, stride - 1);
|
||||
width = s->width * stride;
|
||||
cur += off;
|
||||
for(i = 0; i < s->height; i++) {
|
||||
|
@@ -143,6 +143,10 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
buf += 5;
|
||||
|
||||
if (video_size) {
|
||||
if(video_size < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "video size %d invalid\n", video_size);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (avctx->reget_buffer(avctx, &s->frame) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return -1;
|
||||
|
@@ -247,24 +247,26 @@ static void lag_pred_line(LagarithContext *l, uint8_t *buf,
|
||||
{
|
||||
int L, TL;
|
||||
|
||||
/* Left pixel is actually prev_row[width] */
|
||||
L = buf[width - stride - 1];
|
||||
if (!line) {
|
||||
/* Left prediction only for first line */
|
||||
L = l->dsp.add_hfyu_left_prediction(buf + 1, buf + 1,
|
||||
width - 1, buf[0]);
|
||||
return;
|
||||
} else if (line == 1) {
|
||||
/* Second line, left predict first pixel, the rest of the line is median predicted
|
||||
* NOTE: In the case of RGB this pixel is top predicted */
|
||||
TL = l->avctx->pix_fmt == PIX_FMT_YUV420P ? buf[-stride] : L;
|
||||
} else {
|
||||
/* Top left is 2 rows back, last pixel */
|
||||
TL = buf[width - (2 * stride) - 1];
|
||||
}
|
||||
/* Left pixel is actually prev_row[width] */
|
||||
L = buf[width - stride - 1];
|
||||
|
||||
add_lag_median_prediction(buf, buf - stride, buf,
|
||||
width, &L, &TL);
|
||||
if (line == 1) {
|
||||
/* Second line, left predict first pixel, the rest of the line is median predicted
|
||||
* NOTE: In the case of RGB this pixel is top predicted */
|
||||
TL = l->avctx->pix_fmt == PIX_FMT_YUV420P ? buf[-stride] : L;
|
||||
} else {
|
||||
/* Top left is 2 rows back, last pixel */
|
||||
TL = buf[width - (2 * stride) - 1];
|
||||
}
|
||||
|
||||
add_lag_median_prediction(buf, buf - stride, buf,
|
||||
width, &L, &TL);
|
||||
}
|
||||
}
|
||||
|
||||
static int lag_decode_line(LagarithContext *l, lag_rac *rac,
|
||||
@@ -310,13 +312,13 @@ handle_zeros:
|
||||
}
|
||||
|
||||
static int lag_decode_zero_run_line(LagarithContext *l, uint8_t *dst,
|
||||
const uint8_t *src, int width,
|
||||
int esc_count)
|
||||
const uint8_t *src, const uint8_t *src_end,
|
||||
int width, int esc_count)
|
||||
{
|
||||
int i = 0;
|
||||
int count;
|
||||
uint8_t zero_run = 0;
|
||||
const uint8_t *start = src;
|
||||
const uint8_t *src_start = src;
|
||||
uint8_t mask1 = -(esc_count < 2);
|
||||
uint8_t mask2 = -(esc_count < 3);
|
||||
uint8_t *end = dst + (width - 2);
|
||||
@@ -324,6 +326,11 @@ static int lag_decode_zero_run_line(LagarithContext *l, uint8_t *dst,
|
||||
output_zeros:
|
||||
if (l->zeros_rem) {
|
||||
count = FFMIN(l->zeros_rem, width - i);
|
||||
if (end - dst < count) {
|
||||
av_log(l->avctx, AV_LOG_ERROR, "Too many zeros remaining.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
memset(dst, 0, count);
|
||||
l->zeros_rem -= count;
|
||||
dst += count;
|
||||
@@ -333,6 +340,8 @@ output_zeros:
|
||||
i = 0;
|
||||
while (!zero_run && dst + i < end) {
|
||||
i++;
|
||||
if (src + i >= src_end)
|
||||
return AVERROR_INVALIDDATA;
|
||||
zero_run =
|
||||
!(src[i] | (src[i + 1] & mask1) | (src[i + 2] & mask2));
|
||||
}
|
||||
@@ -348,9 +357,10 @@ output_zeros:
|
||||
} else {
|
||||
memcpy(dst, src, i);
|
||||
src += i;
|
||||
dst += i;
|
||||
}
|
||||
}
|
||||
return start - src;
|
||||
return src_start - src;
|
||||
}
|
||||
|
||||
|
||||
@@ -366,6 +376,7 @@ static int lag_decode_arith_plane(LagarithContext *l, uint8_t *dst,
|
||||
int esc_count = src[0];
|
||||
GetBitContext gb;
|
||||
lag_rac rac;
|
||||
const uint8_t *src_end = src + src_size;
|
||||
|
||||
rac.avctx = l->avctx;
|
||||
l->zeros = 0;
|
||||
@@ -396,10 +407,16 @@ static int lag_decode_arith_plane(LagarithContext *l, uint8_t *dst,
|
||||
esc_count -= 4;
|
||||
if (esc_count > 0) {
|
||||
/* Zero run coding only, no range coding. */
|
||||
for (i = 0; i < height; i++)
|
||||
src += lag_decode_zero_run_line(l, dst + (i * stride), src,
|
||||
width, esc_count);
|
||||
for (i = 0; i < height; i++) {
|
||||
int res = lag_decode_zero_run_line(l, dst + (i * stride), src,
|
||||
src_end, width, esc_count);
|
||||
if (res < 0)
|
||||
return res;
|
||||
src += res;
|
||||
}
|
||||
} else {
|
||||
if (src_size < width * height)
|
||||
return AVERROR_INVALIDDATA; // buffer not big enough
|
||||
/* Plane is stored uncompressed */
|
||||
for (i = 0; i < height; i++) {
|
||||
memcpy(dst + (i * stride), src, width);
|
||||
@@ -500,11 +517,19 @@ static int lag_decode_frame(AVCodecContext *avctx,
|
||||
}
|
||||
for (i = 0; i < 4; i++)
|
||||
srcs[i] = l->rgb_planes + (i + 1) * l->rgb_stride * avctx->height - l->rgb_stride;
|
||||
if (offset_ry >= buf_size ||
|
||||
offset_gu >= buf_size ||
|
||||
offset_bv >= buf_size ||
|
||||
offs[3] >= buf_size) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Invalid frame offsets\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
for (i = 0; i < 4; i++)
|
||||
lag_decode_arith_plane(l, srcs[i],
|
||||
avctx->width, avctx->height,
|
||||
-l->rgb_stride, buf + offs[i],
|
||||
buf_size);
|
||||
buf_size - offs[i]);
|
||||
dst = p->data[0];
|
||||
for (i = 0; i < 4; i++)
|
||||
srcs[i] = l->rgb_planes + i * l->rgb_stride * avctx->height;
|
||||
@@ -576,15 +601,23 @@ static int lag_decode_frame(AVCodecContext *avctx,
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (offset_ry >= buf_size ||
|
||||
offset_gu >= buf_size ||
|
||||
offset_bv >= buf_size) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Invalid frame offsets\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
lag_decode_arith_plane(l, p->data[0], avctx->width, avctx->height,
|
||||
p->linesize[0], buf + offset_ry,
|
||||
buf_size);
|
||||
buf_size - offset_ry);
|
||||
lag_decode_arith_plane(l, p->data[2], avctx->width / 2,
|
||||
avctx->height / 2, p->linesize[2],
|
||||
buf + offset_gu, buf_size);
|
||||
buf + offset_gu, buf_size - offset_gu);
|
||||
lag_decode_arith_plane(l, p->data[1], avctx->width / 2,
|
||||
avctx->height / 2, p->linesize[1],
|
||||
buf + offset_bv, buf_size);
|
||||
buf + offset_bv, buf_size - offset_bv);
|
||||
break;
|
||||
default:
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
|
@@ -32,15 +32,16 @@
|
||||
|
||||
void lag_rac_init(lag_rac *l, GetBitContext *gb, int length)
|
||||
{
|
||||
int i, j;
|
||||
int i, j, left;
|
||||
|
||||
/* According to reference decoder "1st byte is garbage",
|
||||
* however, it gets skipped by the call to align_get_bits()
|
||||
*/
|
||||
align_get_bits(gb);
|
||||
left = get_bits_left(gb) >> 3;
|
||||
l->bytestream_start =
|
||||
l->bytestream = gb->buffer + get_bits_count(gb) / 8;
|
||||
l->bytestream_end = l->bytestream_start + length;
|
||||
l->bytestream_end = l->bytestream_start + FFMIN(length, left);
|
||||
|
||||
l->range = 0x80;
|
||||
l->low = *l->bytestream >> 1;
|
||||
|
@@ -29,6 +29,7 @@
|
||||
#include "libavutil/opt.h"
|
||||
#include "avcodec.h"
|
||||
#include "bytestream.h"
|
||||
#include "internal.h"
|
||||
#include "vorbis.h"
|
||||
#include "libavutil/mathematics.h"
|
||||
|
||||
@@ -59,6 +60,12 @@ static const AVOption options[] = {
|
||||
{ "iblock", "Sets the impulse block bias", offsetof(OggVorbisContext, iblock), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, -15, 0, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static const AVCodecDefault defaults[] = {
|
||||
{ "b", "0" },
|
||||
{ NULL },
|
||||
};
|
||||
|
||||
static const AVClass class = { "libvorbis", av_default_item_name, options, LIBAVUTIL_VERSION_INT };
|
||||
|
||||
static const char * error(int oggerr, int *averr)
|
||||
@@ -75,33 +82,29 @@ static av_cold int oggvorbis_init_encoder(vorbis_info *vi, AVCodecContext *avcco
|
||||
{
|
||||
OggVorbisContext *context = avccontext->priv_data;
|
||||
double cfreq;
|
||||
int r;
|
||||
|
||||
if (avccontext->flags & CODEC_FLAG_QSCALE) {
|
||||
/* variable bitrate */
|
||||
float quality = avccontext->global_quality / (float)FF_QP2LAMBDA;
|
||||
r = vorbis_encode_setup_vbr(vi, avccontext->channels,
|
||||
if (avccontext->flags & CODEC_FLAG_QSCALE || !avccontext->bit_rate) {
|
||||
/* variable bitrate
|
||||
* NOTE: we use the oggenc range of -1 to 10 for global_quality for
|
||||
* user convenience, but libvorbis uses -0.1 to 1.0.
|
||||
*/
|
||||
float q = avccontext->global_quality / (float)FF_QP2LAMBDA;
|
||||
/* default to 3 if the user did not set quality or bitrate */
|
||||
if (!(avccontext->flags & CODEC_FLAG_QSCALE))
|
||||
q = 3.0;
|
||||
if (vorbis_encode_setup_vbr(vi, avccontext->channels,
|
||||
avccontext->sample_rate,
|
||||
quality / 10.0);
|
||||
if (r) {
|
||||
av_log(avccontext, AV_LOG_ERROR,
|
||||
"Unable to set quality to %g: %s\n", quality, error(r, &r));
|
||||
return r;
|
||||
}
|
||||
q / 10.0))
|
||||
return -1;
|
||||
} else {
|
||||
int minrate = avccontext->rc_min_rate > 0 ? avccontext->rc_min_rate : -1;
|
||||
int maxrate = avccontext->rc_min_rate > 0 ? avccontext->rc_max_rate : -1;
|
||||
int maxrate = avccontext->rc_max_rate > 0 ? avccontext->rc_max_rate : -1;
|
||||
|
||||
/* constant bitrate */
|
||||
r = vorbis_encode_setup_managed(vi, avccontext->channels,
|
||||
avccontext->sample_rate, minrate,
|
||||
avccontext->bit_rate, maxrate);
|
||||
if (r) {
|
||||
av_log(avccontext, AV_LOG_ERROR,
|
||||
"Unable to set CBR to %d: %s\n", avccontext->bit_rate,
|
||||
error(r, &r));
|
||||
return r;
|
||||
}
|
||||
if (vorbis_encode_setup_managed(vi, avccontext->channels,
|
||||
avccontext->sample_rate, maxrate,
|
||||
avccontext->bit_rate, minrate))
|
||||
return -1;
|
||||
|
||||
/* variable bitrate by estimate, disable slow rate management */
|
||||
if (minrate == -1 && maxrate == -1)
|
||||
@@ -314,4 +317,5 @@ AVCodec ff_libvorbis_encoder = {
|
||||
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE },
|
||||
.long_name = NULL_IF_CONFIG_SMALL("libvorbis Vorbis"),
|
||||
.priv_class = &class,
|
||||
.defaults = defaults,
|
||||
};
|
||||
|
@@ -483,6 +483,7 @@ static int queue_frames(AVCodecContext *avctx, uint8_t *buf, int buf_size,
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Data buffer alloc (%zu bytes) failed\n",
|
||||
cx_frame->sz);
|
||||
av_free(cx_frame);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
memcpy(cx_frame->buf, pkt->data.frame.buf, pkt->data.frame.sz);
|
||||
|
@@ -101,9 +101,14 @@ void ff_lzw_decode_tail(LZWState *p)
|
||||
struct LZWState *s = (struct LZWState *)p;
|
||||
|
||||
if(s->mode == FF_LZW_GIF) {
|
||||
while(s->pbuf < s->ebuf && s->bs>0){
|
||||
s->pbuf += s->bs;
|
||||
s->bs = *s->pbuf++;
|
||||
while (s->bs > 0) {
|
||||
if (s->pbuf + s->bs >= s->ebuf) {
|
||||
s->pbuf = s->ebuf;
|
||||
break;
|
||||
} else {
|
||||
s->pbuf += s->bs;
|
||||
s->bs = *s->pbuf++;
|
||||
}
|
||||
}
|
||||
}else
|
||||
s->pbuf= s->ebuf;
|
||||
|
@@ -259,8 +259,8 @@ static int decode(MimicContext *ctx, int quality, int num_coeffs,
|
||||
int index = (ctx->cur_index+backref)&15;
|
||||
uint8_t *p = ctx->flipped_ptrs[index].data[0];
|
||||
|
||||
ff_thread_await_progress(&ctx->buf_ptrs[index], cur_row, 0);
|
||||
if(p) {
|
||||
if (index != ctx->cur_index && p) {
|
||||
ff_thread_await_progress(&ctx->buf_ptrs[index], cur_row, 0);
|
||||
p += src -
|
||||
ctx->flipped_ptrs[ctx->prev_index].data[plane];
|
||||
ctx->dsp.put_pixels_tab[1][0](dst, p, stride, 8);
|
||||
@@ -310,6 +310,7 @@ static int mimic_decode_frame(AVCodecContext *avctx, void *data,
|
||||
int width, height;
|
||||
int quality, num_coeffs;
|
||||
int swap_buf_size = buf_size - MIMIC_HEADER_SIZE;
|
||||
int res;
|
||||
|
||||
if(buf_size < MIMIC_HEADER_SIZE) {
|
||||
av_log(avctx, AV_LOG_ERROR, "insufficient data\n");
|
||||
@@ -376,10 +377,10 @@ static int mimic_decode_frame(AVCodecContext *avctx, void *data,
|
||||
swap_buf_size>>2);
|
||||
init_get_bits(&ctx->gb, ctx->swap_buf, swap_buf_size << 3);
|
||||
|
||||
if(!decode(ctx, quality, num_coeffs, !is_pframe)) {
|
||||
if (avctx->active_thread_type&FF_THREAD_FRAME)
|
||||
ff_thread_report_progress(&ctx->buf_ptrs[ctx->cur_index], INT_MAX, 0);
|
||||
else {
|
||||
res = decode(ctx, quality, num_coeffs, !is_pframe);
|
||||
ff_thread_report_progress(&ctx->buf_ptrs[ctx->cur_index], INT_MAX, 0);
|
||||
if (!res) {
|
||||
if (!(avctx->active_thread_type & FF_THREAD_FRAME)) {
|
||||
ff_thread_release_buffer(avctx, &ctx->buf_ptrs[ctx->cur_index]);
|
||||
return -1;
|
||||
}
|
||||
@@ -411,7 +412,7 @@ static av_cold int mimic_decode_end(AVCodecContext *avctx)
|
||||
for(i = 0; i < 16; i++)
|
||||
if(ctx->buf_ptrs[i].data[0])
|
||||
ff_thread_release_buffer(avctx, &ctx->buf_ptrs[i]);
|
||||
free_vlc(&ctx->vlc);
|
||||
ff_free_vlc(&ctx->vlc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -63,8 +63,8 @@ static int build_vlc(VLC *vlc, const uint8_t *bits_table,
|
||||
if (is_ac)
|
||||
huff_sym[0] = 16 * 256;
|
||||
|
||||
return init_vlc_sparse(vlc, 9, nb_codes, huff_size, 1, 1,
|
||||
huff_code, 2, 2, huff_sym, 2, 2, use_static);
|
||||
return ff_init_vlc_sparse(vlc, 9, nb_codes, huff_size, 1, 1,
|
||||
huff_code, 2, 2, huff_sym, 2, 2, use_static);
|
||||
}
|
||||
|
||||
static void build_basic_mjpeg_vlc(MJpegDecodeContext *s)
|
||||
@@ -197,7 +197,7 @@ int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
|
||||
len -= n;
|
||||
|
||||
/* build VLC and flush previous vlc if present */
|
||||
free_vlc(&s->vlcs[class][index]);
|
||||
ff_free_vlc(&s->vlcs[class][index]);
|
||||
av_log(s->avctx, AV_LOG_DEBUG, "class=%d index=%d nb_codes=%d\n",
|
||||
class, index, code_max + 1);
|
||||
if (build_vlc(&s->vlcs[class][index], bits_table, val_table,
|
||||
@@ -205,7 +205,7 @@ int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
|
||||
return -1;
|
||||
|
||||
if (class > 0) {
|
||||
free_vlc(&s->vlcs[2][index]);
|
||||
ff_free_vlc(&s->vlcs[2][index]);
|
||||
if (build_vlc(&s->vlcs[2][index], bits_table, val_table,
|
||||
code_max + 1, 0, 0) < 0)
|
||||
return -1;
|
||||
@@ -316,9 +316,7 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
||||
s->first_picture = 0;
|
||||
}
|
||||
|
||||
if (s->interlaced && (s->bottom_field == !s->interlace_polarity))
|
||||
return 0;
|
||||
|
||||
if (!(s->interlaced && (s->bottom_field == !s->interlace_polarity))) {
|
||||
/* XXX: not complete test ! */
|
||||
pix_fmt_id = (s->h_count[0] << 28) | (s->v_count[0] << 24) |
|
||||
(s->h_count[1] << 20) | (s->v_count[1] << 16) |
|
||||
@@ -431,6 +429,7 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
||||
|
||||
if (len != (8 + (3 * nb_components)))
|
||||
av_log(s->avctx, AV_LOG_DEBUG, "decode_sof0: error, len(%d) mismatch\n", len);
|
||||
}
|
||||
|
||||
/* totally blank picture as progressive JPEG will only add details to it */
|
||||
if (s->progressive) {
|
||||
@@ -1774,7 +1773,7 @@ av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
|
||||
|
||||
for (i = 0; i < 3; i++) {
|
||||
for (j = 0; j < 4; j++)
|
||||
free_vlc(&s->vlcs[i][j]);
|
||||
ff_free_vlc(&s->vlcs[i][j]);
|
||||
}
|
||||
for (i = 0; i < MAX_COMPONENTS; i++) {
|
||||
av_freep(&s->blocks[i]);
|
||||
|
@@ -300,7 +300,7 @@ int ff_init_me(MpegEncContext *s){
|
||||
int cache_size= FFMIN(ME_MAP_SIZE>>ME_MAP_SHIFT, 1<<ME_MAP_SHIFT);
|
||||
int dia_size= FFMAX(FFABS(s->avctx->dia_size)&255, FFABS(s->avctx->pre_dia_size)&255);
|
||||
|
||||
if(FFMIN(s->avctx->dia_size, s->avctx->pre_dia_size) < -ME_MAP_SIZE){
|
||||
if(FFMIN(s->avctx->dia_size, s->avctx->pre_dia_size) < -FFMIN(ME_MAP_SIZE, MAX_SAB_SIZE)){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "ME_MAP size is too small for SAB diamond\n");
|
||||
return -1;
|
||||
}
|
||||
|
@@ -55,6 +55,11 @@ static av_cold int mp_decode_init(AVCodecContext *avctx)
|
||||
int w4 = (avctx->width + 3) & ~3;
|
||||
int h4 = (avctx->height + 3) & ~3;
|
||||
|
||||
if(avctx->extradata_size < 2){
|
||||
av_log(avctx, AV_LOG_ERROR, "extradata too small\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
motionpixels_tableinit();
|
||||
mp->avctx = avctx;
|
||||
dsputil_init(&mp->dsp, avctx);
|
||||
@@ -191,10 +196,13 @@ static void mp_decode_line(MotionPixelsContext *mp, GetBitContext *gb, int y)
|
||||
p = mp_get_yuv_from_rgb(mp, x - 1, y);
|
||||
} else {
|
||||
p.y += mp_gradient(mp, 0, mp_get_vlc(mp, gb));
|
||||
p.y = av_clip(p.y, 0, 31);
|
||||
if ((x & 3) == 0) {
|
||||
if ((y & 3) == 0) {
|
||||
p.v += mp_gradient(mp, 1, mp_get_vlc(mp, gb));
|
||||
p.v = av_clip(p.v, -32, 31);
|
||||
p.u += mp_gradient(mp, 2, mp_get_vlc(mp, gb));
|
||||
p.u = av_clip(p.u, -32, 31);
|
||||
mp->hpt[((y / 4) * mp->avctx->width + x) / 4] = p;
|
||||
} else {
|
||||
p.v = mp->hpt[((y / 4) * mp->avctx->width + x) / 4].v;
|
||||
@@ -218,9 +226,12 @@ static void mp_decode_frame_helper(MotionPixelsContext *mp, GetBitContext *gb)
|
||||
p = mp_get_yuv_from_rgb(mp, 0, y);
|
||||
} else {
|
||||
p.y += mp_gradient(mp, 0, mp_get_vlc(mp, gb));
|
||||
p.y = av_clip(p.y, 0, 31);
|
||||
if ((y & 3) == 0) {
|
||||
p.v += mp_gradient(mp, 1, mp_get_vlc(mp, gb));
|
||||
p.v = av_clip(p.v, -32, 31);
|
||||
p.u += mp_gradient(mp, 2, mp_get_vlc(mp, gb));
|
||||
p.u = av_clip(p.u, -32, 31);
|
||||
}
|
||||
mp->vpt[y] = p;
|
||||
mp_set_rgb_from_yuv(mp, 0, y, &p);
|
||||
@@ -287,7 +298,7 @@ static int mp_decode_frame(AVCodecContext *avctx,
|
||||
if (init_vlc(&mp->vlc, mp->max_codes_bits, mp->codes_count, &mp->codes[0].size, sizeof(HuffCode), 1, &mp->codes[0].code, sizeof(HuffCode), 4, 0))
|
||||
goto end;
|
||||
mp_decode_frame_helper(mp, &gb);
|
||||
free_vlc(&mp->vlc);
|
||||
ff_free_vlc(&mp->vlc);
|
||||
|
||||
end:
|
||||
*data_size = sizeof(AVFrame);
|
||||
|
@@ -138,7 +138,8 @@ static av_cold int mpc8_decode_init(AVCodecContext * avctx)
|
||||
c->frames = 1 << (get_bits(&gb, 3) * 2);
|
||||
|
||||
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
|
||||
avctx->channel_layout = (channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
|
||||
avctx->channels = channels;
|
||||
|
||||
if(vlc_initialized) return 0;
|
||||
av_log(avctx, AV_LOG_DEBUG, "Initing VLC\n");
|
||||
@@ -184,13 +185,13 @@ static av_cold int mpc8_decode_init(AVCodecContext * avctx)
|
||||
|
||||
q3_vlc[0].table = q3_0_table;
|
||||
q3_vlc[0].table_allocated = 512;
|
||||
init_vlc_sparse(&q3_vlc[0], MPC8_Q3_BITS, MPC8_Q3_SIZE,
|
||||
ff_init_vlc_sparse(&q3_vlc[0], MPC8_Q3_BITS, MPC8_Q3_SIZE,
|
||||
mpc8_q3_bits, 1, 1,
|
||||
mpc8_q3_codes, 1, 1,
|
||||
mpc8_q3_syms, 1, 1, INIT_VLC_USE_NEW_STATIC);
|
||||
q3_vlc[1].table = q3_1_table;
|
||||
q3_vlc[1].table_allocated = 516;
|
||||
init_vlc_sparse(&q3_vlc[1], MPC8_Q4_BITS, MPC8_Q4_SIZE,
|
||||
ff_init_vlc_sparse(&q3_vlc[1], MPC8_Q4_BITS, MPC8_Q4_SIZE,
|
||||
mpc8_q4_bits, 1, 1,
|
||||
mpc8_q4_codes, 1, 1,
|
||||
mpc8_q4_syms, 1, 1, INIT_VLC_USE_NEW_STATIC);
|
||||
|
@@ -696,8 +696,8 @@ av_cold void ff_mpeg12_init_vlcs(void)
|
||||
INIT_VLC_STATIC(&mb_btype_vlc, MB_BTYPE_VLC_BITS, 11,
|
||||
&table_mb_btype[0][1], 2, 1,
|
||||
&table_mb_btype[0][0], 2, 1, 64);
|
||||
init_rl(&ff_rl_mpeg1, ff_mpeg12_static_rl_table_store[0]);
|
||||
init_rl(&ff_rl_mpeg2, ff_mpeg12_static_rl_table_store[1]);
|
||||
ff_init_rl(&ff_rl_mpeg1, ff_mpeg12_static_rl_table_store[0]);
|
||||
ff_init_rl(&ff_rl_mpeg2, ff_mpeg12_static_rl_table_store[1]);
|
||||
|
||||
INIT_2D_VLC_RL(ff_rl_mpeg1, 680);
|
||||
INIT_2D_VLC_RL(ff_rl_mpeg2, 674);
|
||||
|
@@ -729,8 +729,8 @@ void ff_mpeg1_encode_init(MpegEncContext *s)
|
||||
int i;
|
||||
|
||||
done=1;
|
||||
init_rl(&ff_rl_mpeg1, ff_mpeg12_static_rl_table_store[0]);
|
||||
init_rl(&ff_rl_mpeg2, ff_mpeg12_static_rl_table_store[1]);
|
||||
ff_init_rl(&ff_rl_mpeg1, ff_mpeg12_static_rl_table_store[0]);
|
||||
ff_init_rl(&ff_rl_mpeg2, ff_mpeg12_static_rl_table_store[1]);
|
||||
|
||||
for(i=0; i<64; i++)
|
||||
{
|
||||
|
@@ -654,13 +654,13 @@ try_again:
|
||||
if ((cbpc & 16) == 0) {
|
||||
/* 16x16 motion prediction */
|
||||
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
if(!s->mcsel){
|
||||
mx = h263_decode_motion(s, pred_x, s->f_code);
|
||||
mx = ff_h263_decode_motion(s, pred_x, s->f_code);
|
||||
if (mx >= 0xffff)
|
||||
return -1;
|
||||
|
||||
my = h263_decode_motion(s, pred_y, s->f_code);
|
||||
my = ff_h263_decode_motion(s, pred_y, s->f_code);
|
||||
if (my >= 0xffff)
|
||||
return -1;
|
||||
s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
|
||||
@@ -678,12 +678,12 @@ try_again:
|
||||
int i;
|
||||
s->current_picture.f.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
|
||||
for(i=0;i<4;i++) {
|
||||
int16_t *mot_val= h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
mx = h263_decode_motion(s, pred_x, s->f_code);
|
||||
int16_t *mot_val= ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
mx = ff_h263_decode_motion(s, pred_x, s->f_code);
|
||||
if (mx >= 0xffff)
|
||||
return -1;
|
||||
|
||||
my = h263_decode_motion(s, pred_y, s->f_code);
|
||||
my = ff_h263_decode_motion(s, pred_y, s->f_code);
|
||||
if (my >= 0xffff)
|
||||
return -1;
|
||||
mot_val[0] = mx;
|
||||
@@ -1248,14 +1248,14 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
||||
s->field_select[0][0]= get_bits1(&s->gb);
|
||||
s->field_select[0][1]= get_bits1(&s->gb);
|
||||
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
|
||||
for(i=0; i<2; i++){
|
||||
mx = h263_decode_motion(s, pred_x, s->f_code);
|
||||
mx = ff_h263_decode_motion(s, pred_x, s->f_code);
|
||||
if (mx >= 0xffff)
|
||||
return -1;
|
||||
|
||||
my = h263_decode_motion(s, pred_y/2, s->f_code);
|
||||
my = ff_h263_decode_motion(s, pred_y/2, s->f_code);
|
||||
if (my >= 0xffff)
|
||||
return -1;
|
||||
|
||||
@@ -1266,13 +1266,13 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
||||
s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
|
||||
/* 16x16 motion prediction */
|
||||
s->mv_type = MV_TYPE_16X16;
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
mx = h263_decode_motion(s, pred_x, s->f_code);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
mx = ff_h263_decode_motion(s, pred_x, s->f_code);
|
||||
|
||||
if (mx >= 0xffff)
|
||||
return -1;
|
||||
|
||||
my = h263_decode_motion(s, pred_y, s->f_code);
|
||||
my = ff_h263_decode_motion(s, pred_y, s->f_code);
|
||||
|
||||
if (my >= 0xffff)
|
||||
return -1;
|
||||
@@ -1283,12 +1283,12 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
||||
s->current_picture.f.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
|
||||
s->mv_type = MV_TYPE_8X8;
|
||||
for(i=0;i<4;i++) {
|
||||
mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
mx = h263_decode_motion(s, pred_x, s->f_code);
|
||||
mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
mx = ff_h263_decode_motion(s, pred_x, s->f_code);
|
||||
if (mx >= 0xffff)
|
||||
return -1;
|
||||
|
||||
my = h263_decode_motion(s, pred_y, s->f_code);
|
||||
my = ff_h263_decode_motion(s, pred_y, s->f_code);
|
||||
if (my >= 0xffff)
|
||||
return -1;
|
||||
s->mv[0][i][0] = mx;
|
||||
@@ -1384,8 +1384,8 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
||||
if(USES_LIST(mb_type, 0)){
|
||||
s->mv_dir = MV_DIR_FORWARD;
|
||||
|
||||
mx = h263_decode_motion(s, s->last_mv[0][0][0], s->f_code);
|
||||
my = h263_decode_motion(s, s->last_mv[0][0][1], s->f_code);
|
||||
mx = ff_h263_decode_motion(s, s->last_mv[0][0][0], s->f_code);
|
||||
my = ff_h263_decode_motion(s, s->last_mv[0][0][1], s->f_code);
|
||||
s->last_mv[0][1][0]= s->last_mv[0][0][0]= s->mv[0][0][0] = mx;
|
||||
s->last_mv[0][1][1]= s->last_mv[0][0][1]= s->mv[0][0][1] = my;
|
||||
}
|
||||
@@ -1393,8 +1393,8 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
||||
if(USES_LIST(mb_type, 1)){
|
||||
s->mv_dir |= MV_DIR_BACKWARD;
|
||||
|
||||
mx = h263_decode_motion(s, s->last_mv[1][0][0], s->b_code);
|
||||
my = h263_decode_motion(s, s->last_mv[1][0][1], s->b_code);
|
||||
mx = ff_h263_decode_motion(s, s->last_mv[1][0][0], s->b_code);
|
||||
my = ff_h263_decode_motion(s, s->last_mv[1][0][1], s->b_code);
|
||||
s->last_mv[1][1][0]= s->last_mv[1][0][0]= s->mv[1][0][0] = mx;
|
||||
s->last_mv[1][1][1]= s->last_mv[1][0][1]= s->mv[1][0][1] = my;
|
||||
}
|
||||
@@ -1405,8 +1405,8 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
||||
s->mv_dir = MV_DIR_FORWARD;
|
||||
|
||||
for(i=0; i<2; i++){
|
||||
mx = h263_decode_motion(s, s->last_mv[0][i][0] , s->f_code);
|
||||
my = h263_decode_motion(s, s->last_mv[0][i][1]/2, s->f_code);
|
||||
mx = ff_h263_decode_motion(s, s->last_mv[0][i][0] , s->f_code);
|
||||
my = ff_h263_decode_motion(s, s->last_mv[0][i][1]/2, s->f_code);
|
||||
s->last_mv[0][i][0]= s->mv[0][i][0] = mx;
|
||||
s->last_mv[0][i][1]= (s->mv[0][i][1] = my)*2;
|
||||
}
|
||||
@@ -1416,8 +1416,8 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
||||
s->mv_dir |= MV_DIR_BACKWARD;
|
||||
|
||||
for(i=0; i<2; i++){
|
||||
mx = h263_decode_motion(s, s->last_mv[1][i][0] , s->b_code);
|
||||
my = h263_decode_motion(s, s->last_mv[1][i][1]/2, s->b_code);
|
||||
mx = ff_h263_decode_motion(s, s->last_mv[1][i][0] , s->b_code);
|
||||
my = ff_h263_decode_motion(s, s->last_mv[1][i][1]/2, s->b_code);
|
||||
s->last_mv[1][i][0]= s->mv[1][i][0] = mx;
|
||||
s->last_mv[1][i][1]= (s->mv[1][i][1] = my)*2;
|
||||
}
|
||||
@@ -1429,8 +1429,8 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
||||
if(IS_SKIP(mb_type))
|
||||
mx=my=0;
|
||||
else{
|
||||
mx = h263_decode_motion(s, 0, 1);
|
||||
my = h263_decode_motion(s, 0, 1);
|
||||
mx = ff_h263_decode_motion(s, 0, 1);
|
||||
my = ff_h263_decode_motion(s, 0, 1);
|
||||
}
|
||||
|
||||
s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
|
||||
@@ -2237,9 +2237,9 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
if (!done) {
|
||||
done = 1;
|
||||
|
||||
init_rl(&ff_mpeg4_rl_intra, ff_mpeg4_static_rl_table_store[0]);
|
||||
init_rl(&rvlc_rl_inter, ff_mpeg4_static_rl_table_store[1]);
|
||||
init_rl(&rvlc_rl_intra, ff_mpeg4_static_rl_table_store[2]);
|
||||
ff_init_rl(&ff_mpeg4_rl_intra, ff_mpeg4_static_rl_table_store[0]);
|
||||
ff_init_rl(&rvlc_rl_inter, ff_mpeg4_static_rl_table_store[1]);
|
||||
ff_init_rl(&rvlc_rl_intra, ff_mpeg4_static_rl_table_store[2]);
|
||||
INIT_VLC_RL(ff_mpeg4_rl_intra, 554);
|
||||
INIT_VLC_RL(rvlc_rl_inter, 1072);
|
||||
INIT_VLC_RL(rvlc_rl_intra, 1072);
|
||||
|
@@ -705,7 +705,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
|
||||
}
|
||||
|
||||
/* motion vectors: 16x16 mode */
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
|
||||
ff_h263_encode_motion_vector(s, motion_x - pred_x,
|
||||
motion_y - pred_y, s->f_code);
|
||||
@@ -729,7 +729,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
|
||||
}
|
||||
|
||||
/* motion vectors: 16x8 interlaced mode */
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
pred_y /=2;
|
||||
|
||||
put_bits(&s->pb, 1, s->field_select[0][0]);
|
||||
@@ -757,7 +757,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
|
||||
|
||||
for(i=0; i<4; i++){
|
||||
/* motion vectors: 8x8 mode*/
|
||||
h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
|
||||
ff_h263_encode_motion_vector(s, s->current_picture.f.motion_val[0][ s->block_index[i] ][0] - pred_x,
|
||||
s->current_picture.f.motion_val[0][ s->block_index[i] ][1] - pred_y, s->f_code);
|
||||
@@ -1240,7 +1240,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
|
||||
|
||||
init_uni_dc_tab();
|
||||
|
||||
init_rl(&ff_mpeg4_rl_intra, ff_mpeg4_static_rl_table_store[0]);
|
||||
ff_init_rl(&ff_mpeg4_rl_intra, ff_mpeg4_static_rl_table_store[0]);
|
||||
|
||||
init_uni_mpeg4_rl_tab(&ff_mpeg4_rl_intra, uni_mpeg4_intra_rl_bits, uni_mpeg4_intra_rl_len);
|
||||
init_uni_mpeg4_rl_tab(&ff_h263_rl_inter, uni_mpeg4_inter_rl_bits, uni_mpeg4_inter_rl_len);
|
||||
|
@@ -53,6 +53,7 @@ static int mpegaudio_parse(AVCodecParserContext *s1,
|
||||
int inc= FFMIN(buf_size - i, s->frame_size);
|
||||
i += inc;
|
||||
s->frame_size -= inc;
|
||||
state = 0;
|
||||
|
||||
if(!s->frame_size){
|
||||
next= i;
|
||||
|
@@ -210,7 +210,7 @@ static void ff_compute_band_indexes(MPADecodeContext *s, GranuleDef *g)
|
||||
else
|
||||
g->long_end = 4; /* 8000 Hz */
|
||||
|
||||
g->short_start = 2 + (s->sample_rate_index != 8);
|
||||
g->short_start = 3;
|
||||
} else {
|
||||
g->long_end = 0;
|
||||
g->short_start = 0;
|
||||
|
@@ -422,12 +422,12 @@ static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
|
||||
// edge emu needs blocksize + filter length - 1
|
||||
// (= 17x17 for halfpel / 21x21 for h264)
|
||||
FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer,
|
||||
(s->width + 64) * 2 * 21 * 2, fail); // (width + edge + align)*interlaced*MBsize*tolerance
|
||||
(s->width + 95) * 2 * 21 * 4, fail); // (width + edge + align)*interlaced*MBsize*tolerance
|
||||
|
||||
// FIXME should be linesize instead of s->width * 2
|
||||
// but that is not known before get_buffer()
|
||||
FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,
|
||||
(s->width + 64) * 4 * 16 * 2 * sizeof(uint8_t), fail)
|
||||
(s->width + 95) * 4 * 16 * 2 * sizeof(uint8_t), fail)
|
||||
s->me.temp = s->me.scratchpad;
|
||||
s->rd_scratchpad = s->me.scratchpad;
|
||||
s->b_scratchpad = s->me.scratchpad;
|
||||
@@ -956,8 +956,8 @@ void MPV_common_end(MpegEncContext *s)
|
||||
avcodec_default_free_buffers(s->avctx);
|
||||
}
|
||||
|
||||
void init_rl(RLTable *rl,
|
||||
uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
|
||||
void ff_init_rl(RLTable *rl,
|
||||
uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
|
||||
{
|
||||
int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
|
||||
uint8_t index_run[MAX_RUN + 1];
|
||||
@@ -1008,7 +1008,7 @@ void init_rl(RLTable *rl,
|
||||
}
|
||||
}
|
||||
|
||||
void init_vlc_rl(RLTable *rl)
|
||||
void ff_init_vlc_rl(RLTable *rl)
|
||||
{
|
||||
int i, q;
|
||||
|
||||
@@ -1394,8 +1394,7 @@ void MPV_frame_end(MpegEncContext *s)
|
||||
s->avctx->coded_frame = (AVFrame *) s->current_picture_ptr;
|
||||
|
||||
if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
|
||||
ff_thread_report_progress((AVFrame *) s->current_picture_ptr,
|
||||
s->mb_height - 1, 0);
|
||||
ff_thread_report_progress((AVFrame *) s->current_picture_ptr, INT_MAX, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1430,7 +1429,7 @@ static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
|
||||
y = (x * f) >> 16;
|
||||
fr = (x * f) & 0xFFFF;
|
||||
buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
|
||||
buf[(y + 1) * stride + x] += (color * fr ) >> 16;
|
||||
if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
|
||||
}
|
||||
} else {
|
||||
if (sy > ey) {
|
||||
@@ -1447,7 +1446,7 @@ static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
|
||||
x = (y*f) >> 16;
|
||||
fr = (y*f) & 0xFFFF;
|
||||
buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
|
||||
buf[y * stride + x + 1] += (color * fr ) >> 16;
|
||||
if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1598,7 +1597,7 @@ void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
|
||||
int mb_x;
|
||||
for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
|
||||
const int mb_index = mb_x + mb_y * s->mb_stride;
|
||||
if ((s->avctx->debug_mv) && pict->motion_val) {
|
||||
if ((s->avctx->debug_mv) && pict->motion_val[0]) {
|
||||
int type;
|
||||
for (type = 0; type < 3; type++) {
|
||||
int direction = 0;
|
||||
@@ -1677,7 +1676,7 @@ void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
|
||||
}
|
||||
}
|
||||
}
|
||||
if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
|
||||
if ((s->avctx->debug & FF_DEBUG_VIS_QP)) {
|
||||
uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
|
||||
0x0101010101010101ULL;
|
||||
int y;
|
||||
@@ -1691,7 +1690,7 @@ void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
|
||||
}
|
||||
}
|
||||
if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
|
||||
pict->motion_val) {
|
||||
pict->motion_val[0]) {
|
||||
int mb_type = pict->mb_type[mb_index];
|
||||
uint64_t u,v;
|
||||
int y;
|
||||
|
@@ -725,7 +725,8 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
|
||||
0, 0, 0,
|
||||
ref_picture, pix_op, qpix_op,
|
||||
s->mv[dir][0][0], s->mv[dir][0][1], 16);
|
||||
}else if(!is_mpeg12 && (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) && s->mspel && s->codec_id == CODEC_ID_WMV2){
|
||||
} else if (!is_mpeg12 && (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) &&
|
||||
s->mspel && s->codec_id == CODEC_ID_WMV2) {
|
||||
ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
|
||||
ref_picture, pix_op,
|
||||
s->mv[dir][0][0], s->mv[dir][0][1], 16);
|
||||
|
@@ -712,7 +712,7 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
|
||||
case CODEC_ID_H263:
|
||||
if (!CONFIG_H263_ENCODER)
|
||||
return -1;
|
||||
if (ff_match_2uint16(h263_format, FF_ARRAY_ELEMS(h263_format),
|
||||
if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
|
||||
s->width, s->height) == 8) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"The specified picture size of %dx%d is not valid for "
|
||||
@@ -848,7 +848,7 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
|
||||
if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
|
||||
ff_h261_encode_init(s);
|
||||
if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
|
||||
h263_encode_init(s);
|
||||
ff_h263_encode_init(s);
|
||||
if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
|
||||
ff_msmpeg4_encode_init(s);
|
||||
if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
|
||||
@@ -2086,7 +2086,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
|
||||
case CODEC_ID_RV10:
|
||||
case CODEC_ID_RV20:
|
||||
if (CONFIG_H263_ENCODER)
|
||||
h263_encode_mb(s, s->block, motion_x, motion_y);
|
||||
ff_h263_encode_mb(s, s->block, motion_x, motion_y);
|
||||
break;
|
||||
case CODEC_ID_MJPEG:
|
||||
case CODEC_ID_AMV:
|
||||
@@ -2522,7 +2522,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
|
||||
case CODEC_ID_H263:
|
||||
case CODEC_ID_H263P:
|
||||
if (CONFIG_H263_ENCODER)
|
||||
h263_encode_gob_header(s, mb_y);
|
||||
ff_h263_encode_gob_header(s, mb_y);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -3298,7 +3298,7 @@ static int encode_picture(MpegEncContext *s, int picture_number)
|
||||
else if (CONFIG_FLV_ENCODER && s->codec_id == CODEC_ID_FLV1)
|
||||
ff_flv_encode_picture_header(s, picture_number);
|
||||
else if (CONFIG_H263_ENCODER)
|
||||
h263_encode_picture_header(s, picture_number);
|
||||
ff_h263_encode_picture_header(s, picture_number);
|
||||
break;
|
||||
case FMT_MPEG1:
|
||||
if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
|
||||
|
@@ -262,7 +262,7 @@ av_cold void ff_msmpeg4_encode_init(MpegEncContext *s)
|
||||
init_mv_table(&mv_tables[0]);
|
||||
init_mv_table(&mv_tables[1]);
|
||||
for(i=0;i<NB_RL_TABLES;i++)
|
||||
init_rl(&rl_table[i], static_rl_table_store[i]);
|
||||
ff_init_rl(&rl_table[i], static_rl_table_store[i]);
|
||||
|
||||
for(i=0; i<NB_RL_TABLES; i++){
|
||||
int level;
|
||||
@@ -507,7 +507,7 @@ static void msmpeg4v2_encode_motion(MpegEncContext * s, int val)
|
||||
if (val == 0) {
|
||||
/* zero vector */
|
||||
code = 0;
|
||||
put_bits(&s->pb, mvtab[code][1], mvtab[code][0]);
|
||||
put_bits(&s->pb, ff_mvtab[code][1], ff_mvtab[code][0]);
|
||||
} else {
|
||||
bit_size = s->f_code - 1;
|
||||
range = 1 << bit_size;
|
||||
@@ -526,7 +526,7 @@ static void msmpeg4v2_encode_motion(MpegEncContext * s, int val)
|
||||
code = (val >> bit_size) + 1;
|
||||
bits = val & (range - 1);
|
||||
|
||||
put_bits(&s->pb, mvtab[code][1] + 1, (mvtab[code][0] << 1) | sign);
|
||||
put_bits(&s->pb, ff_mvtab[code][1] + 1, (ff_mvtab[code][0] << 1) | sign);
|
||||
if (bit_size > 0) {
|
||||
put_bits(&s->pb, bit_size, bits);
|
||||
}
|
||||
@@ -575,7 +575,7 @@ void msmpeg4_encode_mb(MpegEncContext * s,
|
||||
|
||||
s->misc_bits += get_bits_diff(s);
|
||||
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
msmpeg4v2_encode_motion(s, motion_x - pred_x);
|
||||
msmpeg4v2_encode_motion(s, motion_y - pred_y);
|
||||
}else{
|
||||
@@ -586,7 +586,7 @@ void msmpeg4_encode_mb(MpegEncContext * s,
|
||||
s->misc_bits += get_bits_diff(s);
|
||||
|
||||
/* motion vector */
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_msmpeg4_encode_motion(s, motion_x - pred_x,
|
||||
motion_y - pred_y);
|
||||
}
|
||||
@@ -1134,7 +1134,7 @@ static int msmpeg4v12_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
|
||||
cbp|= cbpy<<2;
|
||||
if(s->msmpeg4_version==1 || (cbp&3) != 3) cbp^= 0x3C;
|
||||
|
||||
h263_pred_motion(s, 0, 0, &mx, &my);
|
||||
ff_h263_pred_motion(s, 0, 0, &mx, &my);
|
||||
mx= msmpeg4v2_decode_motion(s, mx, 1);
|
||||
my= msmpeg4v2_decode_motion(s, my, 1);
|
||||
|
||||
@@ -1220,7 +1220,7 @@ static int msmpeg4v34_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
|
||||
s->rl_table_index = decode012(&s->gb);
|
||||
s->rl_chroma_table_index = s->rl_table_index;
|
||||
}
|
||||
h263_pred_motion(s, 0, 0, &mx, &my);
|
||||
ff_h263_pred_motion(s, 0, 0, &mx, &my);
|
||||
if (ff_msmpeg4_decode_motion(s, &mx, &my) < 0)
|
||||
return -1;
|
||||
s->mv_dir = MV_DIR_FORWARD;
|
||||
@@ -1271,7 +1271,7 @@ av_cold int ff_msmpeg4_decode_init(AVCodecContext *avctx)
|
||||
done = 1;
|
||||
|
||||
for(i=0;i<NB_RL_TABLES;i++) {
|
||||
init_rl(&rl_table[i], static_rl_table_store[i]);
|
||||
ff_init_rl(&rl_table[i], static_rl_table_store[i]);
|
||||
}
|
||||
INIT_VLC_RL(rl_table[0], 642);
|
||||
INIT_VLC_RL(rl_table[1], 1104);
|
||||
@@ -1316,8 +1316,8 @@ av_cold int ff_msmpeg4_decode_init(AVCodecContext *avctx)
|
||||
&v2_mb_type[0][1], 2, 1,
|
||||
&v2_mb_type[0][0], 2, 1, 128);
|
||||
INIT_VLC_STATIC(&v2_mv_vlc, V2_MV_VLC_BITS, 33,
|
||||
&mvtab[0][1], 2, 1,
|
||||
&mvtab[0][0], 2, 1, 538);
|
||||
&ff_mvtab[0][1], 2, 1,
|
||||
&ff_mvtab[0][0], 2, 1, 538);
|
||||
|
||||
INIT_VLC_STATIC(&ff_mb_non_intra_vlc[0], MB_NON_INTRA_VLC_BITS, 128,
|
||||
&wmv2_inter_table[0][0][1], 8, 4,
|
||||
|
@@ -592,9 +592,9 @@ static const int8_t table4_run[168] = {
|
||||
29, 30, 31, 32, 33, 34, 35, 36,
|
||||
};
|
||||
|
||||
extern const uint16_t inter_vlc[103][2];
|
||||
extern const int8_t inter_level[102];
|
||||
extern const int8_t inter_run[102];
|
||||
extern const uint16_t ff_inter_vlc[103][2];
|
||||
extern const int8_t ff_inter_level[102];
|
||||
extern const int8_t ff_inter_run[102];
|
||||
|
||||
extern const uint16_t ff_mpeg4_intra_vlc[103][2];
|
||||
extern const int8_t ff_mpeg4_intra_level[102];
|
||||
@@ -647,9 +647,9 @@ RLTable rl_table[NB_RL_TABLES] = {
|
||||
{
|
||||
102,
|
||||
58,
|
||||
inter_vlc,
|
||||
inter_run,
|
||||
inter_level,
|
||||
ff_inter_vlc,
|
||||
ff_inter_run,
|
||||
ff_inter_level,
|
||||
},
|
||||
};
|
||||
|
||||
|
@@ -28,6 +28,9 @@ static int noise(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx, const ch
|
||||
int amount= args ? atoi(args) : (*state % 10001+1);
|
||||
int i;
|
||||
|
||||
if(amount <= 0)
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
*poutbuf= av_malloc(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
|
||||
memcpy(*poutbuf, buf, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
|
@@ -191,13 +191,15 @@ retry:
|
||||
}
|
||||
if (c->codec_frameheader) {
|
||||
int w, h, q, res;
|
||||
if (buf[0] != 'V' || buf_size < 12) {
|
||||
av_log(avctx, AV_LOG_ERROR, "invalid nuv video frame (wrong codec_tag?)\n");
|
||||
if (buf_size < RTJPEG_HEADER_SIZE || buf[4] != RTJPEG_HEADER_SIZE ||
|
||||
buf[5] != RTJPEG_FILE_VERSION) {
|
||||
av_log(avctx, AV_LOG_ERROR, "invalid nuv video frame\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
w = AV_RL16(&buf[6]);
|
||||
h = AV_RL16(&buf[8]);
|
||||
q = buf[10];
|
||||
|
||||
res = codec_reinit(avctx, w, h, q);
|
||||
if (res < 0)
|
||||
return res;
|
||||
@@ -207,8 +209,8 @@ retry:
|
||||
size_change = 1;
|
||||
goto retry;
|
||||
}
|
||||
buf = &buf[12];
|
||||
buf_size -= 12;
|
||||
buf = &buf[RTJPEG_HEADER_SIZE];
|
||||
buf_size -= RTJPEG_HEADER_SIZE;
|
||||
}
|
||||
|
||||
if ((size_change || keyframe) && c->pic.data[0])
|
||||
|
@@ -223,15 +223,13 @@ static const AVOption options[]={
|
||||
{"parse_only", NULL, OFFSET(parse_only), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
|
||||
#endif
|
||||
{"mpeg_quant", "use MPEG quantizers instead of H.263", OFFSET(mpeg_quant), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
|
||||
{"stats_out", NULL, OFFSET(stats_out), AV_OPT_TYPE_STRING, {.str = NULL}, CHAR_MIN, CHAR_MAX},
|
||||
{"stats_in", NULL, OFFSET(stats_in), AV_OPT_TYPE_STRING, {.str = NULL}, CHAR_MIN, CHAR_MAX},
|
||||
{"qsquish", "how to keep quantizer between qmin and qmax (0 = clip, 1 = use differentiable function)", OFFSET(rc_qsquish), AV_OPT_TYPE_FLOAT, {.dbl = DEFAULT }, 0, 99, V|E},
|
||||
{"rc_qmod_amp", "experimental quantizer modulation", OFFSET(rc_qmod_amp), AV_OPT_TYPE_FLOAT, {.dbl = DEFAULT }, -FLT_MAX, FLT_MAX, V|E},
|
||||
{"rc_qmod_freq", "experimental quantizer modulation", OFFSET(rc_qmod_freq), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
|
||||
{"rc_override_count", NULL, OFFSET(rc_override_count), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
|
||||
{"rc_eq", "set rate control equation", OFFSET(rc_eq), AV_OPT_TYPE_STRING, {.str = NULL}, CHAR_MIN, CHAR_MAX, V|E},
|
||||
{"maxrate", "set max video bitrate tolerance (in bits/s)", OFFSET(rc_max_rate), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
|
||||
{"minrate", "set min video bitrate tolerance (in bits/s)", OFFSET(rc_min_rate), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
|
||||
{"maxrate", "set max bitrate tolerance (in bits/s)", OFFSET(rc_max_rate), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|A|E},
|
||||
{"minrate", "set min bitrate tolerance (in bits/s)", OFFSET(rc_min_rate), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|A|E},
|
||||
{"bufsize", "set ratecontrol buffer size (in bits)", OFFSET(rc_buffer_size), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, A|V|E},
|
||||
{"rc_buf_aggressivity", "currently useless", OFFSET(rc_buffer_aggressivity), AV_OPT_TYPE_FLOAT, {.dbl = 1.0 }, -FLT_MAX, FLT_MAX, V|E},
|
||||
{"i_qfactor", "qp factor between P and I frames", OFFSET(i_quant_factor), AV_OPT_TYPE_FLOAT, {.dbl = -0.8 }, -FLT_MAX, FLT_MAX, V|E},
|
||||
|
@@ -479,13 +479,14 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
} else if (s->bit_depth == 16 &&
|
||||
s->color_type == PNG_COLOR_TYPE_RGB) {
|
||||
avctx->pix_fmt = PIX_FMT_RGB48BE;
|
||||
} else if (s->color_type == PNG_COLOR_TYPE_PALETTE) {
|
||||
} else if ((s->bits_per_pixel == 1 || s->bits_per_pixel == 2 || s->bits_per_pixel == 4 || s->bits_per_pixel == 8) &&
|
||||
s->color_type == PNG_COLOR_TYPE_PALETTE) {
|
||||
avctx->pix_fmt = PIX_FMT_PAL8;
|
||||
} else if (s->bit_depth == 1) {
|
||||
avctx->pix_fmt = PIX_FMT_MONOBLACK;
|
||||
} else if (s->bit_depth == 8 &&
|
||||
s->color_type == PNG_COLOR_TYPE_GRAY_ALPHA) {
|
||||
avctx->pix_fmt = PIX_FMT_GRAY8A;
|
||||
avctx->pix_fmt = PIX_FMT_Y400A;
|
||||
} else {
|
||||
av_log(avctx, AV_LOG_ERROR, "unsupported bit depth %d "
|
||||
"and color type %d\n",
|
||||
|
@@ -55,7 +55,7 @@ static void png_get_interlaced_row(uint8_t *dst, int row_size,
|
||||
uint8_t *d;
|
||||
const uint8_t *s;
|
||||
|
||||
mask = ff_png_pass_mask[pass];
|
||||
mask = (int[]){0x80, 0x08, 0x88, 0x22, 0xaa, 0x55, 0xff}[pass];
|
||||
switch(bits_per_pixel) {
|
||||
case 1:
|
||||
memset(dst, 0, row_size);
|
||||
|
@@ -79,6 +79,7 @@ typedef struct ThreadContext {
|
||||
pthread_cond_t current_job_cond;
|
||||
pthread_mutex_t current_job_lock;
|
||||
int current_job;
|
||||
unsigned int current_execute;
|
||||
int done;
|
||||
} ThreadContext;
|
||||
|
||||
@@ -203,6 +204,7 @@ static void* attribute_align_arg worker(void *v)
|
||||
AVCodecContext *avctx = v;
|
||||
ThreadContext *c = avctx->thread_opaque;
|
||||
int our_job = c->job_count;
|
||||
int last_execute = 0;
|
||||
int thread_count = avctx->thread_count;
|
||||
int self_id;
|
||||
|
||||
@@ -213,7 +215,9 @@ static void* attribute_align_arg worker(void *v)
|
||||
if (c->current_job == thread_count + c->job_count)
|
||||
pthread_cond_signal(&c->last_job_cond);
|
||||
|
||||
pthread_cond_wait(&c->current_job_cond, &c->current_job_lock);
|
||||
while (last_execute == c->current_execute && !c->done)
|
||||
pthread_cond_wait(&c->current_job_cond, &c->current_job_lock);
|
||||
last_execute = c->current_execute;
|
||||
our_job = self_id;
|
||||
|
||||
if (c->done) {
|
||||
@@ -233,7 +237,8 @@ static void* attribute_align_arg worker(void *v)
|
||||
|
||||
static av_always_inline void avcodec_thread_park_workers(ThreadContext *c, int thread_count)
|
||||
{
|
||||
pthread_cond_wait(&c->last_job_cond, &c->current_job_lock);
|
||||
while (c->current_job != thread_count + c->job_count)
|
||||
pthread_cond_wait(&c->last_job_cond, &c->current_job_lock);
|
||||
pthread_mutex_unlock(&c->current_job_lock);
|
||||
}
|
||||
|
||||
@@ -282,6 +287,7 @@ static int avcodec_thread_execute(AVCodecContext *avctx, action_func* func, void
|
||||
c->rets = &dummy_ret;
|
||||
c->rets_count = 1;
|
||||
}
|
||||
c->current_execute++;
|
||||
pthread_cond_broadcast(&c->current_job_cond);
|
||||
|
||||
avcodec_thread_park_workers(c, avctx->thread_count);
|
||||
@@ -390,7 +396,7 @@ static attribute_align_arg void *frame_worker_thread(void *arg)
|
||||
|
||||
pthread_mutex_lock(&p->progress_mutex);
|
||||
for (i = 0; i < MAX_BUFFERS; i++)
|
||||
if (p->progress_used[i]) {
|
||||
if (p->progress_used[i] && (p->got_frame || p->result<0 || avctx->codec_id != CODEC_ID_H264)) {
|
||||
p->progress[i][0] = INT_MAX;
|
||||
p->progress[i][1] = INT_MAX;
|
||||
}
|
||||
|
@@ -884,9 +884,13 @@ static void synthfilt_build_sb_samples (QDM2Context *q, GetBitContext *gb, int l
|
||||
break;
|
||||
|
||||
case 30:
|
||||
if (BITS_LEFT(length,gb) >= 4)
|
||||
samples[0] = type30_dequant[qdm2_get_vlc(gb, &vlc_tab_type30, 0, 1)];
|
||||
else
|
||||
if (BITS_LEFT(length,gb) >= 4) {
|
||||
unsigned index = qdm2_get_vlc(gb, &vlc_tab_type30, 0, 1);
|
||||
if (index < FF_ARRAY_ELEMS(type30_dequant)) {
|
||||
samples[0] = type30_dequant[index];
|
||||
} else
|
||||
samples[0] = SB_DITHERING_NOISE(sb,q->noise_idx);
|
||||
} else
|
||||
samples[0] = SB_DITHERING_NOISE(sb,q->noise_idx);
|
||||
|
||||
run = 1;
|
||||
@@ -900,8 +904,12 @@ static void synthfilt_build_sb_samples (QDM2Context *q, GetBitContext *gb, int l
|
||||
type34_predictor = samples[0];
|
||||
type34_first = 0;
|
||||
} else {
|
||||
samples[0] = type34_delta[qdm2_get_vlc(gb, &vlc_tab_type34, 0, 1)] / type34_div + type34_predictor;
|
||||
type34_predictor = samples[0];
|
||||
unsigned index = qdm2_get_vlc(gb, &vlc_tab_type34, 0, 1);
|
||||
if (index < FF_ARRAY_ELEMS(type34_delta)) {
|
||||
samples[0] = type34_delta[index] / type34_div + type34_predictor;
|
||||
type34_predictor = samples[0];
|
||||
} else
|
||||
samples[0] = SB_DITHERING_NOISE(sb,q->noise_idx);
|
||||
}
|
||||
} else {
|
||||
samples[0] = SB_DITHERING_NOISE(sb,q->noise_idx);
|
||||
|
@@ -139,6 +139,7 @@ static int raw_decode(AVCodecContext *avctx,
|
||||
int buf_size = avpkt->size;
|
||||
int linesize_align = 4;
|
||||
RawVideoContext *context = avctx->priv_data;
|
||||
int res;
|
||||
|
||||
AVFrame * frame = (AVFrame *) data;
|
||||
AVPicture * picture = (AVPicture *) data;
|
||||
@@ -185,7 +186,9 @@ static int raw_decode(AVCodecContext *avctx,
|
||||
avctx->codec_tag == MKTAG('A', 'V', 'u', 'p'))
|
||||
buf += buf_size - context->length;
|
||||
|
||||
avpicture_fill(picture, buf, avctx->pix_fmt, avctx->width, avctx->height);
|
||||
if ((res = avpicture_fill(picture, buf, avctx->pix_fmt,
|
||||
avctx->width, avctx->height)) < 0)
|
||||
return res;
|
||||
if((avctx->pix_fmt==PIX_FMT_PAL8 && buf_size < context->length) ||
|
||||
(avctx->pix_fmt!=PIX_FMT_PAL8 &&
|
||||
(av_pix_fmt_descriptors[avctx->pix_fmt].flags & PIX_FMT_PAL))){
|
||||
|
@@ -53,8 +53,8 @@ typedef struct RLTable {
|
||||
* @param static_store static uint8_t array[2][2*MAX_RUN + MAX_LEVEL + 3] which will hold
|
||||
* the level and run tables, if this is NULL av_malloc() will be used
|
||||
*/
|
||||
void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3]);
|
||||
void init_vlc_rl(RLTable *rl);
|
||||
void ff_init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3]);
|
||||
void ff_init_vlc_rl(RLTable *rl);
|
||||
|
||||
#define INIT_VLC_RL(rl, static_size)\
|
||||
{\
|
||||
@@ -68,7 +68,7 @@ void init_vlc_rl(RLTable *rl);
|
||||
for(q=0; q<32; q++)\
|
||||
rl.rl_vlc[q]= rl_vlc_table[q];\
|
||||
\
|
||||
init_vlc_rl(&rl);\
|
||||
ff_init_vlc_rl(&rl);\
|
||||
}\
|
||||
}
|
||||
|
||||
|
@@ -25,6 +25,9 @@
|
||||
#include <stdint.h>
|
||||
#include "dsputil.h"
|
||||
|
||||
#define RTJPEG_FILE_VERSION 0
|
||||
#define RTJPEG_HEADER_SIZE 12
|
||||
|
||||
typedef struct {
|
||||
int w, h;
|
||||
DSPContext *dsp;
|
||||
|
@@ -487,7 +487,7 @@ static av_cold int rv10_decode_init(AVCodecContext *avctx)
|
||||
if (MPV_common_init(s) < 0)
|
||||
return -1;
|
||||
|
||||
h263_decode_init_vlc(s);
|
||||
ff_h263_decode_init_vlc(s);
|
||||
|
||||
/* init rv vlc */
|
||||
if (!done) {
|
||||
|
@@ -131,10 +131,10 @@ static void rv34_gen_vlc(const uint8_t *bits, int size, VLC *vlc, const uint8_t
|
||||
|
||||
vlc->table = &table_data[table_offs[num]];
|
||||
vlc->table_allocated = table_offs[num + 1] - table_offs[num];
|
||||
init_vlc_sparse(vlc, FFMIN(maxbits, 9), realsize,
|
||||
bits2, 1, 1,
|
||||
cw, 2, 2,
|
||||
syms, 2, 2, INIT_VLC_USE_NEW_STATIC);
|
||||
ff_init_vlc_sparse(vlc, FFMIN(maxbits, 9), realsize,
|
||||
bits2, 1, 1,
|
||||
cw, 2, 2,
|
||||
syms, 2, 2, INIT_VLC_USE_NEW_STATIC);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1411,7 +1411,7 @@ static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int
|
||||
{
|
||||
MpegEncContext *s = &r->s;
|
||||
GetBitContext *gb = &s->gb;
|
||||
int mb_pos;
|
||||
int mb_pos, slice_type;
|
||||
int res;
|
||||
|
||||
init_get_bits(&r->s.gb, buf, buf_size*8);
|
||||
@@ -1421,64 +1421,14 @@ static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ((s->mb_x == 0 && s->mb_y == 0) || s->current_picture_ptr==NULL) {
|
||||
if (s->width != r->si.width || s->height != r->si.height) {
|
||||
int err;
|
||||
|
||||
av_log(s->avctx, AV_LOG_WARNING, "Changing dimensions to %dx%d\n",
|
||||
r->si.width, r->si.height);
|
||||
MPV_common_end(s);
|
||||
s->width = r->si.width;
|
||||
s->height = r->si.height;
|
||||
avcodec_set_dimensions(s->avctx, s->width, s->height);
|
||||
if ((err = MPV_common_init(s)) < 0)
|
||||
return err;
|
||||
if ((err = rv34_decoder_realloc(r)) < 0)
|
||||
return err;
|
||||
}
|
||||
s->pict_type = r->si.type ? r->si.type : AV_PICTURE_TYPE_I;
|
||||
if(MPV_frame_start(s, s->avctx) < 0)
|
||||
return -1;
|
||||
ff_er_frame_start(s);
|
||||
if (!r->tmp_b_block_base) {
|
||||
int i;
|
||||
|
||||
r->tmp_b_block_base = av_malloc(s->linesize * 48);
|
||||
for (i = 0; i < 2; i++)
|
||||
r->tmp_b_block_y[i] = r->tmp_b_block_base + i * 16 * s->linesize;
|
||||
for (i = 0; i < 4; i++)
|
||||
r->tmp_b_block_uv[i] = r->tmp_b_block_base + 32 * s->linesize
|
||||
+ (i >> 1) * 8 * s->uvlinesize + (i & 1) * 16;
|
||||
}
|
||||
r->cur_pts = r->si.pts;
|
||||
if(s->pict_type != AV_PICTURE_TYPE_B){
|
||||
r->last_pts = r->next_pts;
|
||||
r->next_pts = r->cur_pts;
|
||||
}else{
|
||||
int refdist = GET_PTS_DIFF(r->next_pts, r->last_pts);
|
||||
int dist0 = GET_PTS_DIFF(r->cur_pts, r->last_pts);
|
||||
int dist1 = GET_PTS_DIFF(r->next_pts, r->cur_pts);
|
||||
|
||||
if(!refdist){
|
||||
r->weight1 = r->weight2 = 8192;
|
||||
}else{
|
||||
r->weight1 = (dist0 << 14) / refdist;
|
||||
r->weight2 = (dist1 << 14) / refdist;
|
||||
}
|
||||
}
|
||||
s->mb_x = s->mb_y = 0;
|
||||
ff_thread_finish_setup(s->avctx);
|
||||
} else {
|
||||
int slice_type = r->si.type ? r->si.type : AV_PICTURE_TYPE_I;
|
||||
|
||||
if (slice_type != s->pict_type) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Slice type mismatch\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (s->width != r->si.width || s->height != r->si.height) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Size mismatch\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
slice_type = r->si.type ? r->si.type : AV_PICTURE_TYPE_I;
|
||||
if (slice_type != s->pict_type) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Slice type mismatch\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (s->width != r->si.width || s->height != r->si.height) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Size mismatch\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
r->si.end = end;
|
||||
@@ -1628,10 +1578,6 @@ int ff_rv34_decode_update_thread_context(AVCodecContext *dst, const AVCodecConte
|
||||
|
||||
memset(&r->si, 0, sizeof(r->si));
|
||||
|
||||
/* necessary since it is it the condition checked for in decode_slice
|
||||
* to call MPV_frame_start. cmp. comment at the end of decode_frame */
|
||||
s->current_picture_ptr = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1641,8 +1587,33 @@ static int get_slice_offset(AVCodecContext *avctx, const uint8_t *buf, int n)
|
||||
else return AV_RL32(buf + n*8 - 4) == 1 ? AV_RL32(buf + n*8) : AV_RB32(buf + n*8);
|
||||
}
|
||||
|
||||
static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
|
||||
{
|
||||
RV34DecContext *r = avctx->priv_data;
|
||||
MpegEncContext *s = &r->s;
|
||||
int got_picture = 0;
|
||||
|
||||
ff_er_frame_end(s);
|
||||
MPV_frame_end(s);
|
||||
|
||||
if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
|
||||
ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
|
||||
|
||||
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
|
||||
*pict = s->current_picture_ptr->f;
|
||||
got_picture = 1;
|
||||
} else if (s->last_picture_ptr != NULL) {
|
||||
*pict = s->last_picture_ptr->f;
|
||||
got_picture = 1;
|
||||
}
|
||||
if (got_picture)
|
||||
ff_print_debug_info(s, pict);
|
||||
|
||||
return got_picture;
|
||||
}
|
||||
|
||||
int ff_rv34_decode_frame(AVCodecContext *avctx,
|
||||
void *data, int *data_size,
|
||||
void *data, int *got_picture_ptr,
|
||||
AVPacket *avpkt)
|
||||
{
|
||||
const uint8_t *buf = avpkt->data;
|
||||
@@ -1660,10 +1631,10 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
|
||||
if (buf_size == 0) {
|
||||
/* special case for last picture */
|
||||
if (s->low_delay==0 && s->next_picture_ptr) {
|
||||
*pict = *(AVFrame*)s->next_picture_ptr;
|
||||
*pict = s->next_picture_ptr->f;
|
||||
s->next_picture_ptr = NULL;
|
||||
|
||||
*data_size = sizeof(AVFrame);
|
||||
*got_picture_ptr = 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -1680,20 +1651,95 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
|
||||
if(get_slice_offset(avctx, slices_hdr, 0) < 0 ||
|
||||
get_slice_offset(avctx, slices_hdr, 0) > buf_size){
|
||||
av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
init_get_bits(&s->gb, buf+get_slice_offset(avctx, slices_hdr, 0), (buf_size-get_slice_offset(avctx, slices_hdr, 0))*8);
|
||||
if(r->parse_slice_header(r, &r->s.gb, &si) < 0 || si.start){
|
||||
av_log(avctx, AV_LOG_ERROR, "First slice header is incorrect\n");
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if ((!s->last_picture_ptr || !s->last_picture_ptr->f.data[0]) &&
|
||||
si.type == AV_PICTURE_TYPE_B) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid decoder state: B-frame without "
|
||||
"reference data.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if ((!s->last_picture_ptr || !s->last_picture_ptr->f.data[0]) && si.type == AV_PICTURE_TYPE_B)
|
||||
return -1;
|
||||
if( (avctx->skip_frame >= AVDISCARD_NONREF && si.type==AV_PICTURE_TYPE_B)
|
||||
|| (avctx->skip_frame >= AVDISCARD_NONKEY && si.type!=AV_PICTURE_TYPE_I)
|
||||
|| avctx->skip_frame >= AVDISCARD_ALL)
|
||||
return avpkt->size;
|
||||
|
||||
/* first slice */
|
||||
if (si.start == 0) {
|
||||
if (s->mb_num_left > 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "New frame but still %d MB left.",
|
||||
s->mb_num_left);
|
||||
ff_er_frame_end(s);
|
||||
MPV_frame_end(s);
|
||||
}
|
||||
|
||||
if (s->width != si.width || s->height != si.height) {
|
||||
int err;
|
||||
|
||||
if (HAVE_THREADS &&
|
||||
(s->avctx->active_thread_type & FF_THREAD_FRAME)) {
|
||||
av_log_missing_feature(s->avctx, "Width/height changing with "
|
||||
"frame threading is", 0);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
av_log(s->avctx, AV_LOG_WARNING, "Changing dimensions to %dx%d\n",
|
||||
si.width, si.height);
|
||||
MPV_common_end(s);
|
||||
s->width = si.width;
|
||||
s->height = si.height;
|
||||
avcodec_set_dimensions(s->avctx, s->width, s->height);
|
||||
if ((err = MPV_common_init(s)) < 0)
|
||||
return err;
|
||||
if ((err = rv34_decoder_realloc(r)) < 0)
|
||||
return err;
|
||||
}
|
||||
s->pict_type = si.type ? si.type : AV_PICTURE_TYPE_I;
|
||||
if (MPV_frame_start(s, s->avctx) < 0)
|
||||
return -1;
|
||||
ff_er_frame_start(s);
|
||||
if (!r->tmp_b_block_base) {
|
||||
int i;
|
||||
|
||||
r->tmp_b_block_base = av_malloc(s->linesize * 48);
|
||||
for (i = 0; i < 2; i++)
|
||||
r->tmp_b_block_y[i] = r->tmp_b_block_base
|
||||
+ i * 16 * s->linesize;
|
||||
for (i = 0; i < 4; i++)
|
||||
r->tmp_b_block_uv[i] = r->tmp_b_block_base + 32 * s->linesize
|
||||
+ (i >> 1) * 8 * s->uvlinesize
|
||||
+ (i & 1) * 16;
|
||||
}
|
||||
r->cur_pts = si.pts;
|
||||
if (s->pict_type != AV_PICTURE_TYPE_B) {
|
||||
r->last_pts = r->next_pts;
|
||||
r->next_pts = r->cur_pts;
|
||||
} else {
|
||||
int refdist = GET_PTS_DIFF(r->next_pts, r->last_pts);
|
||||
int dist0 = GET_PTS_DIFF(r->cur_pts, r->last_pts);
|
||||
int dist1 = GET_PTS_DIFF(r->next_pts, r->cur_pts);
|
||||
|
||||
if (!refdist) {
|
||||
r->weight1 = r->weight2 = 8192;
|
||||
} else {
|
||||
r->weight1 = (dist0 << 14) / refdist;
|
||||
r->weight2 = (dist1 << 14) / refdist;
|
||||
}
|
||||
}
|
||||
s->mb_x = s->mb_y = 0;
|
||||
ff_thread_finish_setup(s->avctx);
|
||||
} else if (HAVE_THREADS &&
|
||||
(s->avctx->active_thread_type & FF_THREAD_FRAME)) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Decoder needs full frames in frame "
|
||||
"multithreading mode (start MB is %d).\n", si.start);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
for(i = 0; i < slice_count; i++){
|
||||
int offset = get_slice_offset(avctx, slices_hdr, i);
|
||||
int size;
|
||||
@@ -1708,6 +1754,8 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
|
||||
}
|
||||
|
||||
r->si.end = s->mb_width * s->mb_height;
|
||||
s->mb_num_left = r->s.mb_x + r->s.mb_y*r->s.mb_width - r->si.start;
|
||||
|
||||
if(i+1 < slice_count){
|
||||
if (get_slice_offset(avctx, slices_hdr, i+1) < 0 ||
|
||||
get_slice_offset(avctx, slices_hdr, i+1) > buf_size) {
|
||||
@@ -1728,32 +1776,28 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
|
||||
break;
|
||||
}
|
||||
last = rv34_decode_slice(r, r->si.end, buf + offset, size);
|
||||
s->mb_num_left = r->s.mb_x + r->s.mb_y*r->s.mb_width - r->si.start;
|
||||
if(last)
|
||||
break;
|
||||
}
|
||||
|
||||
if(last && s->current_picture_ptr){
|
||||
if(r->loop_filter)
|
||||
r->loop_filter(r, s->mb_height - 1);
|
||||
ff_er_frame_end(s);
|
||||
MPV_frame_end(s);
|
||||
if (s->current_picture_ptr) {
|
||||
if (last) {
|
||||
if(r->loop_filter)
|
||||
r->loop_filter(r, s->mb_height - 1);
|
||||
|
||||
if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
|
||||
*got_picture_ptr = finish_frame(avctx, pict);
|
||||
} else if (HAVE_THREADS &&
|
||||
(s->avctx->active_thread_type & FF_THREAD_FRAME)) {
|
||||
av_log(avctx, AV_LOG_INFO, "marking unfished frame as finished\n");
|
||||
/* always mark the current frame as finished, frame-mt supports
|
||||
* only complete frames */
|
||||
ff_er_frame_end(s);
|
||||
MPV_frame_end(s);
|
||||
ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
|
||||
|
||||
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
|
||||
*pict = *(AVFrame*)s->current_picture_ptr;
|
||||
} else if (s->last_picture_ptr != NULL) {
|
||||
*pict = *(AVFrame*)s->last_picture_ptr;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if(s->last_picture_ptr || s->low_delay){
|
||||
*data_size = sizeof(AVFrame);
|
||||
ff_print_debug_info(s, pict);
|
||||
}
|
||||
s->current_picture_ptr = NULL; //so we can detect if frame_end wasnt called (find some nicer solution...)
|
||||
}
|
||||
|
||||
return avpkt->size;
|
||||
}
|
||||
|
||||
|
@@ -101,7 +101,7 @@ static const uint8_t rv34_quant_to_vlc_set[2][31] = {
|
||||
|
||||
/**
|
||||
* table for obtaining the quantizer difference
|
||||
* @todo Use with modified_quant_tab from h263data.h.
|
||||
* @todo Use with ff_modified_quant_tab from h263data.h.
|
||||
*/
|
||||
static const uint8_t rv34_dquant_tab[2][32]={
|
||||
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
|
||||
|
@@ -80,18 +80,18 @@ static av_cold void rv40_init_tables(void)
|
||||
for(i = 0; i < NUM_PTYPE_VLCS; i++){
|
||||
ptype_vlc[i].table = &ptype_table[i << PTYPE_VLC_BITS];
|
||||
ptype_vlc[i].table_allocated = 1 << PTYPE_VLC_BITS;
|
||||
init_vlc_sparse(&ptype_vlc[i], PTYPE_VLC_BITS, PTYPE_VLC_SIZE,
|
||||
ptype_vlc_bits[i], 1, 1,
|
||||
ptype_vlc_codes[i], 1, 1,
|
||||
ptype_vlc_syms, 1, 1, INIT_VLC_USE_NEW_STATIC);
|
||||
ff_init_vlc_sparse(&ptype_vlc[i], PTYPE_VLC_BITS, PTYPE_VLC_SIZE,
|
||||
ptype_vlc_bits[i], 1, 1,
|
||||
ptype_vlc_codes[i], 1, 1,
|
||||
ptype_vlc_syms, 1, 1, INIT_VLC_USE_NEW_STATIC);
|
||||
}
|
||||
for(i = 0; i < NUM_BTYPE_VLCS; i++){
|
||||
btype_vlc[i].table = &btype_table[i << BTYPE_VLC_BITS];
|
||||
btype_vlc[i].table_allocated = 1 << BTYPE_VLC_BITS;
|
||||
init_vlc_sparse(&btype_vlc[i], BTYPE_VLC_BITS, BTYPE_VLC_SIZE,
|
||||
btype_vlc_bits[i], 1, 1,
|
||||
btype_vlc_codes[i], 1, 1,
|
||||
btype_vlc_syms, 1, 1, INIT_VLC_USE_NEW_STATIC);
|
||||
ff_init_vlc_sparse(&btype_vlc[i], BTYPE_VLC_BITS, BTYPE_VLC_SIZE,
|
||||
btype_vlc_bits[i], 1, 1,
|
||||
btype_vlc_codes[i], 1, 1,
|
||||
btype_vlc_syms, 1, 1, INIT_VLC_USE_NEW_STATIC);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -486,11 +486,13 @@ static av_cold int sipr_decoder_init(AVCodecContext * avctx)
|
||||
case 29: ctx->mode = MODE_6k5; break;
|
||||
case 37: ctx->mode = MODE_5k0; break;
|
||||
default:
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid block_align: %d\n", avctx->block_align);
|
||||
if (avctx->bit_rate > 12200) ctx->mode = MODE_16k;
|
||||
else if (avctx->bit_rate > 7500 ) ctx->mode = MODE_8k5;
|
||||
else if (avctx->bit_rate > 5750 ) ctx->mode = MODE_6k5;
|
||||
else ctx->mode = MODE_5k0;
|
||||
av_log(avctx, AV_LOG_WARNING,
|
||||
"Invalid block_align: %d. Mode %s guessed based on bitrate: %d\n",
|
||||
avctx->block_align, modes[ctx->mode].mode_name, avctx->bit_rate);
|
||||
}
|
||||
|
||||
av_log(avctx, AV_LOG_DEBUG, "Mode: %s\n", modes[ctx->mode].mode_name);
|
||||
|
@@ -272,9 +272,9 @@ static int smacker_decode_header_tree(SmackVContext *smk, GetBitContext *gb, int
|
||||
*recodes = huff.values;
|
||||
|
||||
if(vlc[0].table)
|
||||
free_vlc(&vlc[0]);
|
||||
ff_free_vlc(&vlc[0]);
|
||||
if(vlc[1].table)
|
||||
free_vlc(&vlc[1]);
|
||||
ff_free_vlc(&vlc[1]);
|
||||
av_free(tmp1.bits);
|
||||
av_free(tmp1.lengths);
|
||||
av_free(tmp1.values);
|
||||
@@ -668,7 +668,7 @@ static int smka_decode_frame(AVCodecContext *avctx, void *data,
|
||||
}
|
||||
if(bits) { //decode 16-bit data
|
||||
for(i = stereo; i >= 0; i--)
|
||||
pred[i] = av_bswap16(get_bits(&gb, 16));
|
||||
pred[i] = sign_extend(av_bswap16(get_bits(&gb, 16)), 16);
|
||||
for(i = 0; i <= stereo; i++)
|
||||
*samples++ = pred[i];
|
||||
for(; i < unp_size / 2; i++) {
|
||||
@@ -730,7 +730,7 @@ static int smka_decode_frame(AVCodecContext *avctx, void *data,
|
||||
|
||||
for(i = 0; i < 4; i++) {
|
||||
if(vlc[i].table)
|
||||
free_vlc(&vlc[i]);
|
||||
ff_free_vlc(&vlc[i]);
|
||||
av_free(h[i].bits);
|
||||
av_free(h[i].lengths);
|
||||
av_free(h[i].values);
|
||||
|
@@ -385,7 +385,7 @@ mca( 8, 8,8)
|
||||
av_cold int ff_snow_common_init(AVCodecContext *avctx){
|
||||
SnowContext *s = avctx->priv_data;
|
||||
int width, height;
|
||||
int i, j;
|
||||
int i, j, ret;
|
||||
|
||||
s->avctx= avctx;
|
||||
s->max_ref_frames=1; //just make sure its not an invalid value in case of no initial keyframe
|
||||
@@ -438,17 +438,22 @@ av_cold int ff_snow_common_init(AVCodecContext *avctx){
|
||||
width= s->avctx->width;
|
||||
height= s->avctx->height;
|
||||
|
||||
s->spatial_idwt_buffer= av_mallocz(width*height*sizeof(IDWTELEM));
|
||||
s->spatial_dwt_buffer= av_mallocz(width*height*sizeof(DWTELEM)); //FIXME this does not belong here
|
||||
FF_ALLOCZ_OR_GOTO(avctx, s->spatial_idwt_buffer, width * height * sizeof(IDWTELEM), fail);
|
||||
FF_ALLOCZ_OR_GOTO(avctx, s->spatial_dwt_buffer, width * height * sizeof(DWTELEM), fail); //FIXME this does not belong here
|
||||
|
||||
for(i=0; i<MAX_REF_FRAMES; i++)
|
||||
for(j=0; j<MAX_REF_FRAMES; j++)
|
||||
scale_mv_ref[i][j] = 256*(i+1)/(j+1);
|
||||
|
||||
s->avctx->get_buffer(s->avctx, &s->mconly_picture);
|
||||
s->scratchbuf = av_malloc(s->mconly_picture.linesize[0]*7*MB_SIZE);
|
||||
if ((ret = s->avctx->get_buffer(s->avctx, &s->mconly_picture)) < 0) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return ret;
|
||||
}
|
||||
FF_ALLOC_OR_GOTO(avctx, s->scratchbuf, s->mconly_picture.linesize[0]*7*MB_SIZE, fail);
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
int ff_snow_common_init_after_header(AVCodecContext *avctx) {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user