Compare commits
123 Commits
n1.2.8
...
release/1.
Author | SHA1 | Date | |
---|---|---|---|
![]() |
17788711ac | ||
![]() |
30c644afeb | ||
![]() |
0cdeacf41b | ||
![]() |
928473a187 | ||
![]() |
43919b6922 | ||
![]() |
bd8d8f57e8 | ||
![]() |
581bfd9c6f | ||
![]() |
1218e9d73d | ||
![]() |
2515907a45 | ||
![]() |
7e7772c13a | ||
![]() |
ac8ef33f9a | ||
![]() |
4de40be507 | ||
![]() |
64e50b2f2a | ||
![]() |
5b4a79ee02 | ||
![]() |
d9618b964b | ||
![]() |
763c753348 | ||
![]() |
4ef5605fc9 | ||
![]() |
3508aa1427 | ||
![]() |
42d9a7010f | ||
![]() |
fd6a9fcd42 | ||
![]() |
20be3ea442 | ||
![]() |
70ff376e83 | ||
![]() |
49dd89f902 | ||
![]() |
bdc4abd694 | ||
![]() |
5d7320be52 | ||
![]() |
7881d3b2a5 | ||
![]() |
5f2902bdc7 | ||
![]() |
ef3687998f | ||
![]() |
ba5e66cf65 | ||
![]() |
0e79fd1ec9 | ||
![]() |
9939d98b2c | ||
![]() |
abfa416181 | ||
![]() |
237cf31a57 | ||
![]() |
051b47dc4e | ||
![]() |
71bdcd66e1 | ||
![]() |
a86cc0cc1b | ||
![]() |
b00a3bf332 | ||
![]() |
08fe58ef32 | ||
![]() |
8a3a3e3154 | ||
![]() |
5410d6d26f | ||
![]() |
529488afa3 | ||
![]() |
e81236d1a5 | ||
![]() |
54a9f64d42 | ||
![]() |
b6351f9978 | ||
![]() |
07f634f948 | ||
![]() |
931f4313b2 | ||
![]() |
fd4c80177b | ||
![]() |
2dd1d0c60b | ||
![]() |
fa8d407c1f | ||
![]() |
8a21620c26 | ||
![]() |
c60caa5769 | ||
![]() |
96e2a4ba74 | ||
![]() |
a9c0f905aa | ||
![]() |
42b7d224bc | ||
![]() |
45bb57e009 | ||
![]() |
ec0f77fbff | ||
![]() |
3aa99bed5d | ||
![]() |
ece0c9c4b0 | ||
![]() |
0da0d7754e | ||
![]() |
c03491d96b | ||
![]() |
237958f8e8 | ||
![]() |
8e17d28863 | ||
![]() |
084102cd47 | ||
![]() |
0d5e626197 | ||
![]() |
6560e61826 | ||
![]() |
71b3f64935 | ||
![]() |
69a92ae397 | ||
![]() |
d723f92dbf | ||
![]() |
11e5106c56 | ||
![]() |
a65573c45d | ||
![]() |
6c28673cb3 | ||
![]() |
5284d17562 | ||
![]() |
01384e3dde | ||
![]() |
2d39d8ffc1 | ||
![]() |
5d33142fd6 | ||
![]() |
7e8b4506e0 | ||
![]() |
96dac432f7 | ||
![]() |
b92ccfefc3 | ||
![]() |
6e45a99a7c | ||
![]() |
d59804bafb | ||
![]() |
35cf24eee9 | ||
![]() |
341cf9ed81 | ||
![]() |
c36f5df34a | ||
![]() |
23b3fe00bb | ||
![]() |
42669252e8 | ||
![]() |
25a9823eb4 | ||
![]() |
a8fd50bb4e | ||
![]() |
9282c96071 | ||
![]() |
aa9d705871 | ||
![]() |
7ddf252c7e | ||
![]() |
1abb0e5639 | ||
![]() |
e5cf4d16c6 | ||
![]() |
e0465d6fcf | ||
![]() |
65889b62b3 | ||
![]() |
7fc97160c2 | ||
![]() |
c4f0f3c52d | ||
![]() |
4865948d2e | ||
![]() |
891376d261 | ||
![]() |
24a5cd720d | ||
![]() |
75f5fe165c | ||
![]() |
665f70209a | ||
![]() |
12c8e4021c | ||
![]() |
f56095c4d7 | ||
![]() |
76601e4ab8 | ||
![]() |
e90d620cb9 | ||
![]() |
0ffa44340f | ||
![]() |
0bd9b78fb6 | ||
![]() |
997bf49b1c | ||
![]() |
e9d85860d0 | ||
![]() |
2144ce08c1 | ||
![]() |
e28c27e25f | ||
![]() |
9b9048a3dd | ||
![]() |
70dc7bb893 | ||
![]() |
b9b97900c1 | ||
![]() |
d49b57fe0f | ||
![]() |
0f1882b9b2 | ||
![]() |
0fc229450f | ||
![]() |
6f02de4ee8 | ||
![]() |
248a2fca90 | ||
![]() |
1482253790 | ||
![]() |
0f578ea705 | ||
![]() |
48443598ed | ||
![]() |
30ffb80dca |
@@ -14,7 +14,6 @@ and related discussions.
|
||||
Project Leader
|
||||
==============
|
||||
|
||||
Michael Niedermayer
|
||||
final design decisions
|
||||
|
||||
|
||||
|
@@ -65,7 +65,7 @@ struct SwsContext *sws_opts;
|
||||
AVDictionary *swr_opts;
|
||||
AVDictionary *format_opts, *codec_opts, *resample_opts;
|
||||
|
||||
const int this_year = 2014;
|
||||
const int this_year = 2015;
|
||||
|
||||
static FILE *report_file;
|
||||
|
||||
|
1
configure
vendored
1
configure
vendored
@@ -3750,6 +3750,7 @@ EOF
|
||||
fi
|
||||
|
||||
check_ldflags -Wl,--as-needed
|
||||
check_ldflags -Wl,-z,noexecstack
|
||||
|
||||
if check_func dlopen; then
|
||||
ldl=
|
||||
|
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
||||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER = 1.2.8
|
||||
PROJECT_NUMBER = 1.2.12
|
||||
|
||||
# With the PROJECT_LOGO tag one can specify an logo or icon that is included
|
||||
# in the documentation. The maximum height of the logo should not exceed 55
|
||||
|
2
ffmpeg.c
2
ffmpeg.c
@@ -2092,7 +2092,7 @@ static int transcode_init(void)
|
||||
AVCodecContext *codec;
|
||||
OutputStream *ost;
|
||||
InputStream *ist;
|
||||
char error[1024];
|
||||
char error[1024] = {0};
|
||||
int want_sdp = 1;
|
||||
|
||||
/* init framerate emulation */
|
||||
|
@@ -34,7 +34,7 @@ static int aac_sync(uint64_t state, AACAC3ParseContext *hdr_info,
|
||||
int size;
|
||||
union {
|
||||
uint64_t u64;
|
||||
uint8_t u8[8];
|
||||
uint8_t u8[8 + FF_INPUT_BUFFER_PADDING_SIZE];
|
||||
} tmp;
|
||||
|
||||
tmp.u64 = av_be2ne64(state);
|
||||
|
@@ -147,7 +147,7 @@ static int ac3_sync(uint64_t state, AACAC3ParseContext *hdr_info,
|
||||
int err;
|
||||
union {
|
||||
uint64_t u64;
|
||||
uint8_t u8[8];
|
||||
uint8_t u8[8 + FF_INPUT_BUFFER_PADDING_SIZE];
|
||||
} tmp = { av_be2ne64(state) };
|
||||
AC3HeaderInfo hdr;
|
||||
GetBitContext gbc;
|
||||
|
@@ -260,7 +260,7 @@ static void apply_channel_coupling(AC3EncodeContext *s)
|
||||
energy_cpl = energy[blk][CPL_CH][bnd];
|
||||
energy_ch = energy[blk][ch][bnd];
|
||||
blk1 = blk+1;
|
||||
while (!s->blocks[blk1].new_cpl_coords[ch] && blk1 < s->num_blocks) {
|
||||
while (blk1 < s->num_blocks && !s->blocks[blk1].new_cpl_coords[ch]) {
|
||||
if (s->blocks[blk1].cpl_in_use) {
|
||||
energy_cpl += energy[blk1][CPL_CH][bnd];
|
||||
energy_ch += energy[blk1][ch][bnd];
|
||||
|
@@ -134,7 +134,7 @@ static int cinepak_decode_vectors (CinepakContext *s, cvid_strip *strip,
|
||||
const uint8_t *eod = (data + size);
|
||||
uint32_t flag, mask;
|
||||
uint8_t *cb0, *cb1, *cb2, *cb3;
|
||||
unsigned int x, y;
|
||||
int x, y;
|
||||
char *ip0, *ip1, *ip2, *ip3;
|
||||
|
||||
flag = 0;
|
||||
|
@@ -171,6 +171,10 @@ static inline int dirac_get_arith_uint(DiracArith *c, int follow_ctx, int data_c
|
||||
{
|
||||
int ret = 1;
|
||||
while (!dirac_get_arith_bit(c, follow_ctx)) {
|
||||
if (ret >= 0x40000000) {
|
||||
av_log(NULL, AV_LOG_ERROR, "dirac_get_arith_uint overflow\n");
|
||||
return -1;
|
||||
}
|
||||
ret <<= 1;
|
||||
ret += dirac_get_arith_bit(c, data_ctx);
|
||||
follow_ctx = ff_dirac_next_ctx[follow_ctx];
|
||||
|
@@ -598,10 +598,10 @@ static av_always_inline void decode_subband_internal(DiracContext *s, SubBand *b
|
||||
|
||||
top = 0;
|
||||
for (cb_y = 0; cb_y < cb_height; cb_y++) {
|
||||
bottom = (b->height * (cb_y+1)) / cb_height;
|
||||
bottom = (b->height * (cb_y+1LL)) / cb_height;
|
||||
left = 0;
|
||||
for (cb_x = 0; cb_x < cb_width; cb_x++) {
|
||||
right = (b->width * (cb_x+1)) / cb_width;
|
||||
right = (b->width * (cb_x+1LL)) / cb_width;
|
||||
codeblock(s, b, &gb, &c, left, right, top, bottom, blockcnt_one, is_arith);
|
||||
left = right;
|
||||
}
|
||||
@@ -990,8 +990,8 @@ static int dirac_unpack_idwt_params(DiracContext *s)
|
||||
/* Codeblock parameters (core syntax only) */
|
||||
if (get_bits1(gb)) {
|
||||
for (i = 0; i <= s->wavelet_depth; i++) {
|
||||
CHECKEDREAD(s->codeblock[i].width , tmp < 1, "codeblock width invalid\n")
|
||||
CHECKEDREAD(s->codeblock[i].height, tmp < 1, "codeblock height invalid\n")
|
||||
CHECKEDREAD(s->codeblock[i].width , tmp < 1 || tmp > (s->avctx->width >>s->wavelet_depth-i), "codeblock width invalid\n")
|
||||
CHECKEDREAD(s->codeblock[i].height, tmp < 1 || tmp > (s->avctx->height>>s->wavelet_depth-i), "codeblock height invalid\n")
|
||||
}
|
||||
|
||||
CHECKEDREAD(s->codeblock_mode, tmp > 1, "unknown codeblock mode\n")
|
||||
|
@@ -39,6 +39,7 @@ typedef struct DNXHDContext {
|
||||
GetBitContext gb;
|
||||
int64_t cid; ///< compression id
|
||||
unsigned int width, height;
|
||||
enum AVPixelFormat pix_fmt;
|
||||
unsigned int mb_width, mb_height;
|
||||
uint32_t mb_scan_index[68]; /* max for 1080p */
|
||||
int cur_field; ///< current interlaced field
|
||||
@@ -135,7 +136,7 @@ static int dnxhd_decode_header(DNXHDContext *ctx, const uint8_t *buf, int buf_si
|
||||
av_dlog(ctx->avctx, "width %d, height %d\n", ctx->width, ctx->height);
|
||||
|
||||
if (buf[0x21] & 0x40) {
|
||||
ctx->avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
|
||||
ctx->pix_fmt = AV_PIX_FMT_YUV422P10;
|
||||
ctx->avctx->bits_per_raw_sample = 10;
|
||||
if (ctx->bit_depth != 10) {
|
||||
ff_dsputil_init(&ctx->dsp, ctx->avctx);
|
||||
@@ -143,7 +144,7 @@ static int dnxhd_decode_header(DNXHDContext *ctx, const uint8_t *buf, int buf_si
|
||||
ctx->decode_dct_block = dnxhd_decode_dct_block_10;
|
||||
}
|
||||
} else {
|
||||
ctx->avctx->pix_fmt = AV_PIX_FMT_YUV422P;
|
||||
ctx->pix_fmt = AV_PIX_FMT_YUV422P;
|
||||
ctx->avctx->bits_per_raw_sample = 8;
|
||||
if (ctx->bit_depth != 8) {
|
||||
ff_dsputil_init(&ctx->dsp, ctx->avctx);
|
||||
@@ -381,9 +382,15 @@ static int dnxhd_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
||||
avctx->width, avctx->height, ctx->width, ctx->height);
|
||||
first_field = 1;
|
||||
}
|
||||
if (avctx->pix_fmt != AV_PIX_FMT_NONE && avctx->pix_fmt != ctx->pix_fmt) {
|
||||
av_log(avctx, AV_LOG_WARNING, "pix_fmt changed: %s -> %s\n",
|
||||
av_get_pix_fmt_name(avctx->pix_fmt), av_get_pix_fmt_name(ctx->pix_fmt));
|
||||
first_field = 1;
|
||||
}
|
||||
|
||||
if (av_image_check_size(ctx->width, ctx->height, 0, avctx))
|
||||
return -1;
|
||||
avctx->pix_fmt = ctx->pix_fmt;
|
||||
avcodec_set_dimensions(avctx, ctx->width, ctx->height);
|
||||
|
||||
if (first_field) {
|
||||
|
@@ -99,6 +99,12 @@ static int decode_rle(uint8_t *bitmap, int linesize, int w, int h,
|
||||
int x, y, len, color;
|
||||
uint8_t *d;
|
||||
|
||||
if (start >= buf_size)
|
||||
return -1;
|
||||
|
||||
if (w <= 0 || h <= 0)
|
||||
return -1;
|
||||
|
||||
bit_len = (buf_size - start) * 8;
|
||||
init_get_bits(&gb, buf + start, bit_len);
|
||||
|
||||
@@ -340,10 +346,12 @@ static int decode_dvd_subtitles(DVDSubContext *ctx, AVSubtitle *sub_header,
|
||||
sub_header->rects[0] = av_mallocz(sizeof(AVSubtitleRect));
|
||||
sub_header->num_rects = 1;
|
||||
sub_header->rects[0]->pict.data[0] = bitmap;
|
||||
decode_rle(bitmap, w * 2, w, (h + 1) / 2,
|
||||
buf, offset1, buf_size, is_8bit);
|
||||
decode_rle(bitmap + w, w * 2, w, h / 2,
|
||||
buf, offset2, buf_size, is_8bit);
|
||||
if (decode_rle(bitmap, w * 2, w, (h + 1) / 2,
|
||||
buf, offset1, buf_size, is_8bit) < 0)
|
||||
goto fail;
|
||||
if (decode_rle(bitmap + w, w * 2, w, h / 2,
|
||||
buf, offset2, buf_size, is_8bit) < 0)
|
||||
goto fail;
|
||||
sub_header->rects[0]->pict.data[1] = av_mallocz(AVPALETTE_SIZE);
|
||||
if (is_8bit) {
|
||||
if (yuv_palette == 0)
|
||||
|
@@ -308,6 +308,11 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
|
||||
avctx->pix_fmt = AV_PIX_FMT_PAL8;
|
||||
|
||||
if (avctx->width%4 || avctx->height%4) {
|
||||
av_log(avctx, AV_LOG_ERROR, "dimensions are not a multiple of 4");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
avcodec_get_frame_defaults(&c->pic);
|
||||
avcodec_get_frame_defaults(&c->prev);
|
||||
|
||||
|
@@ -655,7 +655,7 @@ static int flac_parse(AVCodecParserContext *s, AVCodecContext *avctx,
|
||||
handle_error:
|
||||
*poutbuf = NULL;
|
||||
*poutbuf_size = 0;
|
||||
return read_end - buf;
|
||||
return buf_size ? read_end - buf : 0;
|
||||
}
|
||||
|
||||
static int flac_parse_init(AVCodecParserContext *c)
|
||||
|
@@ -463,10 +463,10 @@ static int decode_frame(FLACContext *s)
|
||||
ret = allocate_buffers(s);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ff_flacdsp_init(&s->dsp, s->avctx->sample_fmt, s->bps);
|
||||
s->got_streaminfo = 1;
|
||||
dump_headers(s->avctx, (FLACStreaminfo *)s);
|
||||
}
|
||||
ff_flacdsp_init(&s->dsp, s->avctx->sample_fmt, s->bps);
|
||||
|
||||
// dump_headers(s->avctx, (FLACStreaminfo *)s);
|
||||
|
||||
|
@@ -253,26 +253,21 @@ static int gif_read_image(GifState *s)
|
||||
case 1:
|
||||
y1 += 8;
|
||||
ptr += linesize * 8;
|
||||
if (y1 >= height) {
|
||||
y1 = pass ? 2 : 4;
|
||||
ptr = ptr1 + linesize * y1;
|
||||
pass++;
|
||||
}
|
||||
break;
|
||||
case 2:
|
||||
y1 += 4;
|
||||
ptr += linesize * 4;
|
||||
if (y1 >= height) {
|
||||
y1 = 1;
|
||||
ptr = ptr1 + linesize;
|
||||
pass++;
|
||||
}
|
||||
break;
|
||||
case 3:
|
||||
y1 += 2;
|
||||
ptr += linesize * 2;
|
||||
break;
|
||||
}
|
||||
while (y1 >= height) {
|
||||
y1 = 4 >> pass;
|
||||
ptr = ptr1 + linesize * y1;
|
||||
pass++;
|
||||
}
|
||||
} else {
|
||||
ptr += linesize;
|
||||
}
|
||||
|
@@ -417,18 +417,18 @@ int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma)
|
||||
|
||||
if ((h->left_samples_available & 0x8080) != 0x8080) {
|
||||
mode = left[mode];
|
||||
if (is_chroma && (h->left_samples_available & 0x8080)) {
|
||||
// mad cow disease mode, aka MBAFF + constrained_intra_pred
|
||||
mode = ALZHEIMER_DC_L0T_PRED8x8 +
|
||||
(!(h->left_samples_available & 0x8000)) +
|
||||
2 * (mode == DC_128_PRED8x8);
|
||||
}
|
||||
if (mode < 0) {
|
||||
av_log(h->avctx, AV_LOG_ERROR,
|
||||
"left block unavailable for requested intra mode at %d %d\n",
|
||||
h->mb_x, h->mb_y);
|
||||
return -1;
|
||||
}
|
||||
if (is_chroma && (h->left_samples_available & 0x8080)) {
|
||||
// mad cow disease mode, aka MBAFF + constrained_intra_pred
|
||||
mode = ALZHEIMER_DC_L0T_PRED8x8 +
|
||||
(!(h->left_samples_available & 0x8000)) +
|
||||
2 * (mode == DC_128_PRED8x8);
|
||||
}
|
||||
}
|
||||
|
||||
return mode;
|
||||
@@ -1043,6 +1043,7 @@ static void free_tables(H264Context *h, int free_rbsp)
|
||||
av_freep(&h->visualization_buffer[i]);
|
||||
|
||||
if (free_rbsp) {
|
||||
memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
|
||||
for (i = 0; i < h->picture_count && !h->avctx->internal->is_copy; i++)
|
||||
free_picture(h, &h->DPB[i]);
|
||||
av_freep(&h->DPB);
|
||||
@@ -1600,6 +1601,17 @@ static int decode_update_thread_context(AVCodecContext *dst,
|
||||
memset(&h->me, 0, sizeof(h->me));
|
||||
h->avctx = dst;
|
||||
h->DPB = NULL;
|
||||
h->intra4x4_pred_mode= NULL;
|
||||
h->non_zero_count = NULL;
|
||||
h->slice_table_base = NULL;
|
||||
h->slice_table = NULL;
|
||||
h->cbp_table = NULL;
|
||||
h->chroma_pred_mode_table = NULL;
|
||||
memset(h->mvd_table, 0, sizeof(h->mvd_table));
|
||||
h->direct_table = NULL;
|
||||
h->list_counts = NULL;
|
||||
h->mb2b_xy = NULL;
|
||||
h->mb2br_xy = NULL;
|
||||
|
||||
if (h1->context_initialized) {
|
||||
h->context_initialized = 0;
|
||||
@@ -2420,6 +2432,16 @@ static int pred_weight_table(H264Context *h)
|
||||
h->luma_log2_weight_denom = get_ue_golomb(&h->gb);
|
||||
if (h->sps.chroma_format_idc)
|
||||
h->chroma_log2_weight_denom = get_ue_golomb(&h->gb);
|
||||
|
||||
if (h->luma_log2_weight_denom > 7U) {
|
||||
av_log(h->avctx, AV_LOG_ERROR, "luma_log2_weight_denom %d is out of range\n", h->luma_log2_weight_denom);
|
||||
h->luma_log2_weight_denom = 0;
|
||||
}
|
||||
if (h->chroma_log2_weight_denom > 7U) {
|
||||
av_log(h->avctx, AV_LOG_ERROR, "chroma_log2_weight_denom %d is out of range\n", h->chroma_log2_weight_denom);
|
||||
h->chroma_log2_weight_denom = 0;
|
||||
}
|
||||
|
||||
luma_def = 1 << h->luma_log2_weight_denom;
|
||||
chroma_def = 1 << h->chroma_log2_weight_denom;
|
||||
|
||||
@@ -3125,6 +3147,9 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
int last_pic_structure, last_pic_droppable;
|
||||
int must_reinit;
|
||||
int needs_reinit = 0;
|
||||
int first_slice = h == h0 && !h0->current_slice;
|
||||
int frame_num, picture_structure, droppable;
|
||||
PPS *pps;
|
||||
|
||||
h->me.qpel_put = h->h264qpel.put_h264_qpel_pixels_tab;
|
||||
h->me.qpel_avg = h->h264qpel.avg_h264_qpel_pixels_tab;
|
||||
@@ -3178,17 +3203,26 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
pps_id);
|
||||
return -1;
|
||||
}
|
||||
h->pps = *h0->pps_buffers[pps_id];
|
||||
|
||||
if (!h0->sps_buffers[h->pps.sps_id]) {
|
||||
pps = h0->pps_buffers[pps_id];
|
||||
|
||||
if (!h0->sps_buffers[pps->sps_id]) {
|
||||
av_log(h->avctx, AV_LOG_ERROR,
|
||||
"non-existing SPS %u referenced\n",
|
||||
h->pps.sps_id);
|
||||
return -1;
|
||||
}
|
||||
if (first_slice)
|
||||
h->pps = *h0->pps_buffers[pps_id];
|
||||
|
||||
if (h->pps.sps_id != h->current_sps_id ||
|
||||
h0->sps_buffers[h->pps.sps_id]->new) {
|
||||
if (pps->sps_id != h->current_sps_id ||
|
||||
h0->sps_buffers[pps->sps_id]->new) {
|
||||
|
||||
if (!first_slice) {
|
||||
av_log(h->avctx, AV_LOG_ERROR,
|
||||
"SPS changed in the middle of the frame\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
h0->sps_buffers[h->pps.sps_id]->new = 0;
|
||||
|
||||
h->current_sps_id = h->pps.sps_id;
|
||||
@@ -3220,13 +3254,15 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
|| 16*h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) != h->avctx->coded_height
|
||||
|| h->avctx->bits_per_raw_sample != h->sps.bit_depth_luma
|
||||
|| h->cur_chroma_format_idc != h->sps.chroma_format_idc
|
||||
|| av_cmp_q(h->sps.sar, h->avctx->sample_aspect_ratio)
|
||||
|| h->mb_width != h->sps.mb_width
|
||||
|| h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag)
|
||||
));
|
||||
if (non_j_pixfmt(h0->avctx->pix_fmt) != non_j_pixfmt(get_pixel_format(h0, 0)))
|
||||
must_reinit = 1;
|
||||
|
||||
if (first_slice && av_cmp_q(h->sps.sar, h->avctx->sample_aspect_ratio))
|
||||
must_reinit = 1;
|
||||
|
||||
h->mb_width = h->sps.mb_width;
|
||||
h->mb_height = h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag);
|
||||
h->mb_num = h->mb_width * h->mb_height;
|
||||
@@ -3300,37 +3336,40 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
init_dequant_tables(h);
|
||||
}
|
||||
|
||||
h->frame_num = get_bits(&h->gb, h->sps.log2_max_frame_num);
|
||||
frame_num = get_bits(&h->gb, h->sps.log2_max_frame_num);
|
||||
if (!first_slice) {
|
||||
if (h0->frame_num != frame_num) {
|
||||
av_log(h->avctx, AV_LOG_ERROR, "Frame num change from %d to %d\n",
|
||||
h0->frame_num, frame_num);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
|
||||
h->mb_mbaff = 0;
|
||||
h->mb_aff_frame = 0;
|
||||
last_pic_structure = h0->picture_structure;
|
||||
last_pic_droppable = h0->droppable;
|
||||
h->droppable = h->nal_ref_idc == 0;
|
||||
droppable = h->nal_ref_idc == 0;
|
||||
if (h->sps.frame_mbs_only_flag) {
|
||||
h->picture_structure = PICT_FRAME;
|
||||
picture_structure = PICT_FRAME;
|
||||
} else {
|
||||
if (!h->sps.direct_8x8_inference_flag && slice_type == AV_PICTURE_TYPE_B) {
|
||||
av_log(h->avctx, AV_LOG_ERROR, "This stream was generated by a broken encoder, invalid 8x8 inference\n");
|
||||
return -1;
|
||||
}
|
||||
if (get_bits1(&h->gb)) { // field_pic_flag
|
||||
h->picture_structure = PICT_TOP_FIELD + get_bits1(&h->gb); // bottom_field_flag
|
||||
picture_structure = PICT_TOP_FIELD + get_bits1(&h->gb); // bottom_field_flag
|
||||
} else {
|
||||
h->picture_structure = PICT_FRAME;
|
||||
picture_structure = PICT_FRAME;
|
||||
h->mb_aff_frame = h->sps.mb_aff;
|
||||
}
|
||||
}
|
||||
h->mb_field_decoding_flag = h->picture_structure != PICT_FRAME;
|
||||
|
||||
if (h0->current_slice != 0) {
|
||||
if (last_pic_structure != h->picture_structure ||
|
||||
last_pic_droppable != h->droppable) {
|
||||
if (h0->current_slice) {
|
||||
if (last_pic_structure != picture_structure ||
|
||||
last_pic_droppable != droppable) {
|
||||
av_log(h->avctx, AV_LOG_ERROR,
|
||||
"Changing field mode (%d -> %d) between slices is not allowed\n",
|
||||
last_pic_structure, h->picture_structure);
|
||||
h->picture_structure = last_pic_structure;
|
||||
h->droppable = last_pic_droppable;
|
||||
return AVERROR_INVALIDDATA;
|
||||
} else if (!h0->cur_pic_ptr) {
|
||||
av_log(h->avctx, AV_LOG_ERROR,
|
||||
@@ -3338,7 +3377,14 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
h0->current_slice + 1);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
} else {
|
||||
}
|
||||
|
||||
h->picture_structure = picture_structure;
|
||||
h->droppable = droppable;
|
||||
h->frame_num = frame_num;
|
||||
h->mb_field_decoding_flag = picture_structure != PICT_FRAME;
|
||||
|
||||
if (h0->current_slice == 0) {
|
||||
/* Shorten frame num gaps so we don't have to allocate reference
|
||||
* frames just to throw them away */
|
||||
if (h->frame_num != h->prev_frame_num) {
|
||||
|
@@ -255,6 +255,7 @@ typedef struct MMCO {
|
||||
* H264Context
|
||||
*/
|
||||
typedef struct H264Context {
|
||||
AVClass *av_class;
|
||||
AVCodecContext *avctx;
|
||||
VideoDSPContext vdsp;
|
||||
H264DSPContext h264dsp;
|
||||
@@ -969,6 +970,33 @@ static av_always_inline int get_dct8x8_allowed(H264Context *h)
|
||||
0x0001000100010001ULL));
|
||||
}
|
||||
|
||||
static inline int find_start_code(const uint8_t *buf, int buf_size,
|
||||
int buf_index, int next_avc)
|
||||
{
|
||||
uint32_t state = -1;
|
||||
|
||||
buf_index = avpriv_mpv_find_start_code(buf + buf_index, buf + next_avc + 1, &state) - buf - 1;
|
||||
|
||||
return FFMIN(buf_index, buf_size);
|
||||
}
|
||||
|
||||
static inline int get_avc_nalsize(H264Context *h, const uint8_t *buf,
|
||||
int buf_size, int *buf_index)
|
||||
{
|
||||
int i, nalsize = 0;
|
||||
|
||||
if (*buf_index >= buf_size - h->nal_length_size)
|
||||
return -1;
|
||||
|
||||
for (i = 0; i < h->nal_length_size; i++)
|
||||
nalsize = ((unsigned)nalsize << 8) | buf[(*buf_index)++];
|
||||
if (nalsize <= 0 || nalsize > buf_size - *buf_index) {
|
||||
av_log(h->avctx, AV_LOG_ERROR,
|
||||
"AVC: nal size %d\n", nalsize);
|
||||
return -1;
|
||||
}
|
||||
return nalsize;
|
||||
}
|
||||
void ff_h264_draw_horiz_band(H264Context *h, int y, int height);
|
||||
|
||||
#endif /* AVCODEC_H264_H */
|
||||
|
@@ -1278,7 +1278,7 @@ void ff_h264_init_cabac_states(H264Context *h) {
|
||||
}
|
||||
|
||||
static int decode_cabac_field_decoding_flag(H264Context *h) {
|
||||
const long mbb_xy = h->mb_xy - 2L*h->mb_stride;
|
||||
const int mbb_xy = h->mb_xy - 2*h->mb_stride;
|
||||
|
||||
unsigned long ctx = 0;
|
||||
|
||||
|
@@ -150,7 +150,7 @@ pps:
|
||||
buf += ctx->length_size;
|
||||
unit_type = *buf & 0x1f;
|
||||
|
||||
if (buf + nal_size > buf_end || nal_size < 0)
|
||||
if (nal_size > buf_end - buf || nal_size < 0)
|
||||
goto fail;
|
||||
|
||||
/* prepend only to the first type 5 NAL unit of an IDR picture */
|
||||
|
@@ -139,10 +139,10 @@ found:
|
||||
*/
|
||||
static inline int parse_nal_units(AVCodecParserContext *s,
|
||||
AVCodecContext *avctx,
|
||||
const uint8_t *buf, int buf_size)
|
||||
const uint8_t * const buf, int buf_size)
|
||||
{
|
||||
H264Context *h = s->priv_data;
|
||||
const uint8_t *buf_end = buf + buf_size;
|
||||
int buf_index, next_avc;
|
||||
unsigned int pps_id;
|
||||
unsigned int slice_type;
|
||||
int state = -1;
|
||||
@@ -162,26 +162,26 @@ static inline int parse_nal_units(AVCodecParserContext *s,
|
||||
if (!buf_size)
|
||||
return 0;
|
||||
|
||||
buf_index = 0;
|
||||
next_avc = h->is_avc ? 0 : buf_size;
|
||||
for(;;) {
|
||||
int src_length, dst_length, consumed, nalsize = 0;
|
||||
if (h->is_avc) {
|
||||
int i;
|
||||
if (h->nal_length_size >= buf_end - buf) break;
|
||||
nalsize = 0;
|
||||
for (i = 0; i < h->nal_length_size; i++)
|
||||
nalsize = (nalsize << 8) | *buf++;
|
||||
if (nalsize <= 0 || nalsize > buf_end - buf) {
|
||||
av_log(h->avctx, AV_LOG_ERROR, "AVC: nal size %d\n", nalsize);
|
||||
|
||||
if (buf_index >= next_avc) {
|
||||
nalsize = get_avc_nalsize(h, buf, buf_size, &buf_index);
|
||||
if (nalsize < 0)
|
||||
break;
|
||||
}
|
||||
src_length = nalsize;
|
||||
next_avc = buf_index + nalsize;
|
||||
} else {
|
||||
buf = avpriv_mpv_find_start_code(buf, buf_end, &state);
|
||||
if(buf >= buf_end)
|
||||
break;
|
||||
--buf;
|
||||
src_length = buf_end - buf;
|
||||
buf_index = find_start_code(buf, buf_size, buf_index, next_avc);
|
||||
if (buf_index >= buf_size)
|
||||
break;
|
||||
if (buf_index >= next_avc)
|
||||
continue;
|
||||
}
|
||||
src_length = next_avc - buf_index;
|
||||
|
||||
state = buf[buf_index];
|
||||
switch (state & 0x1f) {
|
||||
case NAL_SLICE:
|
||||
case NAL_IDR_SLICE:
|
||||
@@ -190,10 +190,12 @@ static inline int parse_nal_units(AVCodecParserContext *s,
|
||||
src_length = 20;
|
||||
break;
|
||||
}
|
||||
ptr= ff_h264_decode_nal(h, buf, &dst_length, &consumed, src_length);
|
||||
ptr= ff_h264_decode_nal(h, buf + buf_index, &dst_length, &consumed, src_length);
|
||||
if (ptr==NULL || dst_length < 0)
|
||||
break;
|
||||
|
||||
buf_index += consumed;
|
||||
|
||||
init_get_bits(&h->gb, ptr, 8*dst_length);
|
||||
switch(h->nal_unit_type) {
|
||||
case NAL_SPS:
|
||||
@@ -277,7 +279,6 @@ static inline int parse_nal_units(AVCodecParserContext *s,
|
||||
|
||||
return 0; /* no need to evaluate the rest */
|
||||
}
|
||||
buf += h->is_avc ? nalsize : consumed;
|
||||
}
|
||||
if (q264)
|
||||
return 0;
|
||||
|
@@ -379,7 +379,9 @@ int ff_h264_decode_seq_parameter_set(H264Context *h){
|
||||
}
|
||||
sps->bit_depth_luma = get_ue_golomb(&h->gb) + 8;
|
||||
sps->bit_depth_chroma = get_ue_golomb(&h->gb) + 8;
|
||||
if (sps->bit_depth_luma > 14U || sps->bit_depth_chroma > 14U || sps->bit_depth_luma != sps->bit_depth_chroma) {
|
||||
if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 14 ||
|
||||
sps->bit_depth_chroma < 8 || sps->bit_depth_chroma > 14 ||
|
||||
sps->bit_depth_luma != sps->bit_depth_chroma) {
|
||||
av_log(h->avctx, AV_LOG_ERROR, "illegal bit depth value (%d, %d)\n",
|
||||
sps->bit_depth_luma, sps->bit_depth_chroma);
|
||||
goto fail;
|
||||
|
@@ -95,7 +95,7 @@ typedef struct Indeo3DecodeContext {
|
||||
|
||||
int16_t width, height;
|
||||
uint32_t frame_num; ///< current frame number (zero-based)
|
||||
uint32_t data_size; ///< size of the frame data in bytes
|
||||
int data_size; ///< size of the frame data in bytes
|
||||
uint16_t frame_flags; ///< frame properties
|
||||
uint8_t cb_offset; ///< needed for selecting VQ tables
|
||||
uint8_t buf_sel; ///< active frame buffer: 0 - primary, 1 -secondary
|
||||
@@ -897,7 +897,8 @@ static int decode_frame_headers(Indeo3DecodeContext *ctx, AVCodecContext *avctx,
|
||||
{
|
||||
const uint8_t *buf_ptr = buf, *bs_hdr;
|
||||
uint32_t frame_num, word2, check_sum, data_size;
|
||||
uint32_t y_offset, u_offset, v_offset, starts[3], ends[3];
|
||||
int y_offset, u_offset, v_offset;
|
||||
uint32_t starts[3], ends[3];
|
||||
uint16_t height, width;
|
||||
int i, j;
|
||||
|
||||
@@ -977,7 +978,8 @@ static int decode_frame_headers(Indeo3DecodeContext *ctx, AVCodecContext *avctx,
|
||||
ctx->y_data_size = ends[0] - starts[0];
|
||||
ctx->v_data_size = ends[1] - starts[1];
|
||||
ctx->u_data_size = ends[2] - starts[2];
|
||||
if (FFMAX3(y_offset, v_offset, u_offset) >= ctx->data_size - 16 ||
|
||||
if (FFMIN3(y_offset, v_offset, u_offset) < 0 ||
|
||||
FFMAX3(y_offset, v_offset, u_offset) >= ctx->data_size - 16 ||
|
||||
FFMIN3(ctx->y_data_size, ctx->v_data_size, ctx->u_data_size) <= 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "One of the y/u/v offsets is invalid\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
@@ -207,6 +207,11 @@ static inline void ls_decode_line(JLSState *state, MJpegDecodeContext *s, void *
|
||||
x += stride;
|
||||
}
|
||||
|
||||
if (x >= w) {
|
||||
av_log(NULL, AV_LOG_ERROR, "run overflow\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* decode run termination value */
|
||||
Rb = R(last, x);
|
||||
RItype = (FFABS(Ra - Rb) <= state->near) ? 1 : 0;
|
||||
|
@@ -98,8 +98,7 @@ static int ilbc_decode_frame(AVCodecContext *avctx, void *data,
|
||||
return ret;
|
||||
}
|
||||
|
||||
WebRtcIlbcfix_DecodeImpl((WebRtc_Word16*) frame->data[0],
|
||||
(const WebRtc_UWord16*) buf, &s->decoder, 1);
|
||||
WebRtcIlbcfix_DecodeImpl((int16_t *) frame->data[0], (const uint16_t *) buf, &s->decoder, 1);
|
||||
|
||||
*got_frame_ptr = 1;
|
||||
|
||||
@@ -185,7 +184,7 @@ static int ilbc_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
if ((ret = ff_alloc_packet2(avctx, avpkt, 50)) < 0)
|
||||
return ret;
|
||||
|
||||
WebRtcIlbcfix_EncodeImpl((WebRtc_UWord16*) avpkt->data, (const WebRtc_Word16*) frame->data[0], &s->encoder);
|
||||
WebRtcIlbcfix_EncodeImpl((uint16_t *) avpkt->data, (const int16_t *) frame->data[0], &s->encoder);
|
||||
|
||||
avpkt->size = s->encoder.no_of_bytes;
|
||||
*got_packet_ptr = 1;
|
||||
|
@@ -211,7 +211,7 @@ int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
|
||||
|
||||
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
||||
{
|
||||
int len, nb_components, i, width, height, pix_fmt_id;
|
||||
int len, nb_components, i, width, height, bits, pix_fmt_id;
|
||||
int h_count[MAX_COMPONENTS];
|
||||
int v_count[MAX_COMPONENTS];
|
||||
|
||||
@@ -220,14 +220,14 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
||||
|
||||
/* XXX: verify len field validity */
|
||||
len = get_bits(&s->gb, 16);
|
||||
s->bits = get_bits(&s->gb, 8);
|
||||
bits = get_bits(&s->gb, 8);
|
||||
|
||||
if (s->pegasus_rct)
|
||||
s->bits = 9;
|
||||
if (s->bits == 9 && !s->pegasus_rct)
|
||||
bits = 9;
|
||||
if (bits == 9 && !s->pegasus_rct)
|
||||
s->rct = 1; // FIXME ugly
|
||||
|
||||
if (s->bits != 8 && !s->lossless) {
|
||||
if (bits != 8 && !s->lossless) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "only 8 bits/component accepted\n");
|
||||
return -1;
|
||||
}
|
||||
@@ -258,7 +258,7 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
if (s->ls && !(s->bits <= 8 || nb_components == 1)) {
|
||||
if (s->ls && !(bits <= 8 || nb_components == 1)) {
|
||||
av_log_missing_feature(s->avctx,
|
||||
"For JPEG-LS anything except <= 8 bits/component"
|
||||
" or 16-bit gray", 0);
|
||||
@@ -301,12 +301,14 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
||||
|
||||
/* if different size, realloc/alloc picture */
|
||||
if ( width != s->width || height != s->height
|
||||
|| bits != s->bits
|
||||
|| memcmp(s->h_count, h_count, sizeof(h_count))
|
||||
|| memcmp(s->v_count, v_count, sizeof(v_count))) {
|
||||
av_freep(&s->qscale_table);
|
||||
|
||||
s->width = width;
|
||||
s->height = height;
|
||||
s->bits = bits;
|
||||
memcpy(s->h_count, h_count, sizeof(h_count));
|
||||
memcpy(s->v_count, v_count, sizeof(v_count));
|
||||
s->interlaced = 0;
|
||||
@@ -435,9 +437,12 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
||||
}
|
||||
if (s->ls) {
|
||||
s->upscale_h = s->upscale_v = 0;
|
||||
if (s->nb_components > 1)
|
||||
if (s->nb_components == 3) {
|
||||
s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
|
||||
else if (s->bits <= 8)
|
||||
} else if (s->nb_components != 1) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Unsupported number of components %d\n", s->nb_components);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
} else if (s->bits <= 8)
|
||||
s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
|
||||
else
|
||||
s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
|
||||
@@ -1078,13 +1083,18 @@ static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
|
||||
|
||||
if (s->interlaced && s->bottom_field)
|
||||
block_offset += linesize[c] >> 1;
|
||||
ptr = data[c] + block_offset;
|
||||
if ( 8*(h * mb_x + x) < s->width
|
||||
&& 8*(v * mb_y + y) < s->height) {
|
||||
ptr = data[c] + block_offset;
|
||||
} else
|
||||
ptr = NULL;
|
||||
if (!s->progressive) {
|
||||
if (copy_mb)
|
||||
mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
|
||||
linesize[c], s->avctx->lowres);
|
||||
if (copy_mb) {
|
||||
if (ptr)
|
||||
mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
|
||||
linesize[c], s->avctx->lowres);
|
||||
|
||||
else {
|
||||
} else {
|
||||
s->dsp.clear_block(s->block);
|
||||
if (decode_block(s, s->block, i,
|
||||
s->dc_index[i], s->ac_index[i],
|
||||
@@ -1093,7 +1103,9 @@ static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
|
||||
"error y=%d x=%d\n", mb_y, mb_x);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
s->dsp.idct_put(ptr, linesize[c], s->block);
|
||||
if (ptr) {
|
||||
s->dsp.idct_put(ptr, linesize[c], s->block);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
int block_idx = s->block_stride[c] * (v * mb_y + y) +
|
||||
@@ -1435,6 +1447,8 @@ static int mjpeg_decode_app(MJpegDecodeContext *s)
|
||||
}
|
||||
|
||||
if (id == AV_RB32("LJIF")) {
|
||||
int rgb = s->rgb;
|
||||
int pegasus_rct = s->pegasus_rct;
|
||||
if (s->avctx->debug & FF_DEBUG_PICT_INFO)
|
||||
av_log(s->avctx, AV_LOG_INFO,
|
||||
"Pegasus lossless jpeg header found\n");
|
||||
@@ -1444,17 +1458,27 @@ static int mjpeg_decode_app(MJpegDecodeContext *s)
|
||||
skip_bits(&s->gb, 16); /* unknown always 0? */
|
||||
switch (get_bits(&s->gb, 8)) {
|
||||
case 1:
|
||||
s->rgb = 1;
|
||||
s->pegasus_rct = 0;
|
||||
rgb = 1;
|
||||
pegasus_rct = 0;
|
||||
break;
|
||||
case 2:
|
||||
s->rgb = 1;
|
||||
s->pegasus_rct = 1;
|
||||
rgb = 1;
|
||||
pegasus_rct = 1;
|
||||
break;
|
||||
default:
|
||||
av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace\n");
|
||||
}
|
||||
|
||||
len -= 9;
|
||||
if (s->got_picture)
|
||||
if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) {
|
||||
av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
s->rgb = rgb;
|
||||
s->pegasus_rct = pegasus_rct;
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -1617,6 +1641,10 @@ int ff_mjpeg_find_marker(MJpegDecodeContext *s,
|
||||
put_bits(&pb, 8, x);
|
||||
if (x == 0xFF) {
|
||||
x = src[b++];
|
||||
if (x & 0x80) {
|
||||
av_log(s->avctx, AV_LOG_WARNING, "Invalid escape sequence\n");
|
||||
x &= 0x7f;
|
||||
}
|
||||
put_bits(&pb, 7, x);
|
||||
bit_count--;
|
||||
}
|
||||
|
@@ -109,7 +109,7 @@ static int mm_decode_intra(MmContext * s, int half_horiz, int half_vert)
|
||||
|
||||
if (color) {
|
||||
memset(s->frame.data[0] + y*s->frame.linesize[0] + x, color, run_length);
|
||||
if (half_vert)
|
||||
if (half_vert && y + half_vert < s->avctx->height)
|
||||
memset(s->frame.data[0] + (y+1)*s->frame.linesize[0] + x, color, run_length);
|
||||
}
|
||||
x+= run_length;
|
||||
|
@@ -189,7 +189,13 @@ static av_always_inline int cmp_inline(MpegEncContext *s, const int x, const int
|
||||
int uvdxy; /* no, it might not be used uninitialized */
|
||||
if(dxy){
|
||||
if(qpel){
|
||||
c->qpel_put[size][dxy](c->temp, ref[0] + x + y*stride, stride); //FIXME prototype (add h)
|
||||
if (h << size == 16) {
|
||||
c->qpel_put[size][dxy](c->temp, ref[0] + x + y*stride, stride); //FIXME prototype (add h)
|
||||
} else if (size == 0 && h == 8) {
|
||||
c->qpel_put[1][dxy](c->temp , ref[0] + x + y*stride , stride);
|
||||
c->qpel_put[1][dxy](c->temp + 8, ref[0] + x + y*stride + 8, stride);
|
||||
} else
|
||||
av_assert2(0);
|
||||
if(chroma){
|
||||
int cx= hx/2;
|
||||
int cy= hy/2;
|
||||
|
@@ -1667,9 +1667,11 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *got_frame_ptr,
|
||||
uint32_t header;
|
||||
int ret;
|
||||
|
||||
int skipped = 0;
|
||||
while(buf_size && !*buf){
|
||||
buf++;
|
||||
buf_size--;
|
||||
skipped++;
|
||||
}
|
||||
|
||||
if (buf_size < HEADER_SIZE)
|
||||
@@ -1724,7 +1726,7 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *got_frame_ptr,
|
||||
return ret;
|
||||
}
|
||||
s->frame_size = 0;
|
||||
return buf_size;
|
||||
return buf_size + skipped;
|
||||
}
|
||||
|
||||
static void mp_flush(MPADecodeContext *ctx)
|
||||
|
@@ -1086,6 +1086,9 @@ int ff_MPV_common_frame_size_change(MpegEncContext *s)
|
||||
{
|
||||
int i, err = 0;
|
||||
|
||||
if (!s->context_initialized)
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
if (s->slice_context_count > 1) {
|
||||
for (i = 0; i < s->slice_context_count; i++) {
|
||||
free_duplicate_context(s->thread_context[i]);
|
||||
@@ -1114,8 +1117,8 @@ int ff_MPV_common_frame_size_change(MpegEncContext *s)
|
||||
s->mb_height = (s->height + 15) / 16;
|
||||
|
||||
if ((s->width || s->height) &&
|
||||
av_image_check_size(s->width, s->height, 0, s->avctx))
|
||||
return AVERROR_INVALIDDATA;
|
||||
(err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
|
||||
goto fail;
|
||||
|
||||
if ((err = init_context_frame(s)))
|
||||
goto fail;
|
||||
@@ -1131,7 +1134,7 @@ int ff_MPV_common_frame_size_change(MpegEncContext *s)
|
||||
}
|
||||
|
||||
for (i = 0; i < nb_slices; i++) {
|
||||
if (init_duplicate_context(s->thread_context[i]) < 0)
|
||||
if ((err = init_duplicate_context(s->thread_context[i])) < 0)
|
||||
goto fail;
|
||||
s->thread_context[i]->start_mb_y =
|
||||
(s->mb_height * (i) + nb_slices / 2) / nb_slices;
|
||||
|
@@ -412,18 +412,18 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
|
||||
switch(avctx->codec_id) {
|
||||
case AV_CODEC_ID_MPEG1VIDEO:
|
||||
case AV_CODEC_ID_MPEG2VIDEO:
|
||||
avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112L / 15000000 * 16384;
|
||||
avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
|
||||
break;
|
||||
case AV_CODEC_ID_MPEG4:
|
||||
case AV_CODEC_ID_MSMPEG4V1:
|
||||
case AV_CODEC_ID_MSMPEG4V2:
|
||||
case AV_CODEC_ID_MSMPEG4V3:
|
||||
if (avctx->rc_max_rate >= 15000000) {
|
||||
avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000L) * (760-320) / (38400000 - 15000000);
|
||||
avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
|
||||
} else if(avctx->rc_max_rate >= 2000000) {
|
||||
avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000L) * (320- 80) / (15000000 - 2000000);
|
||||
avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
|
||||
} else if(avctx->rc_max_rate >= 384000) {
|
||||
avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000L) * ( 80- 40) / ( 2000000 - 384000);
|
||||
avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
|
||||
} else
|
||||
avctx->rc_buffer_size = 40;
|
||||
avctx->rc_buffer_size *= 16384;
|
||||
|
@@ -102,8 +102,8 @@ static const AVOption options[]={
|
||||
{"extradata_size", NULL, OFFSET(extradata_size), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
|
||||
{"time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, {.dbl = 0}, INT_MIN, INT_MAX},
|
||||
{"g", "set the group of picture (GOP) size", OFFSET(gop_size), AV_OPT_TYPE_INT, {.i64 = 12 }, INT_MIN, INT_MAX, V|E},
|
||||
{"ar", "set audio sampling rate (in Hz)", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, A|D|E},
|
||||
{"ac", "set number of audio channels", OFFSET(channels), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, A|D|E},
|
||||
{"ar", "set audio sampling rate (in Hz)", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX, A|D|E},
|
||||
{"ac", "set number of audio channels", OFFSET(channels), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX, A|D|E},
|
||||
{"cutoff", "set cutoff bandwidth", OFFSET(cutoff), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, A|E},
|
||||
{"frame_size", NULL, OFFSET(frame_size), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, A|E},
|
||||
{"frame_number", NULL, OFFSET(frame_number), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
|
||||
|
@@ -563,6 +563,12 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
case MKTAG('I', 'H', 'D', 'R'):
|
||||
if (length != 13)
|
||||
goto fail;
|
||||
|
||||
if (s->state & PNG_IDAT) {
|
||||
av_log(avctx, AV_LOG_ERROR, "IHDR after IDAT\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
s->width = bytestream2_get_be32(&s->gb);
|
||||
s->height = bytestream2_get_be32(&s->gb);
|
||||
if (av_image_check_size(s->width, s->height, 0, avctx)) {
|
||||
@@ -631,7 +637,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
} else if ((s->bits_per_pixel == 1 || s->bits_per_pixel == 2 || s->bits_per_pixel == 4 || s->bits_per_pixel == 8) &&
|
||||
s->color_type == PNG_COLOR_TYPE_PALETTE) {
|
||||
avctx->pix_fmt = AV_PIX_FMT_PAL8;
|
||||
} else if (s->bit_depth == 1) {
|
||||
} else if (s->bit_depth == 1 && s->bits_per_pixel == 1) {
|
||||
avctx->pix_fmt = AV_PIX_FMT_MONOBLACK;
|
||||
} else if (s->bit_depth == 8 &&
|
||||
s->color_type == PNG_COLOR_TYPE_GRAY_ALPHA) {
|
||||
@@ -842,9 +848,10 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
int i, j;
|
||||
uint8_t *pd = s->current_picture->data[0];
|
||||
uint8_t *pd_last = s->last_picture->data[0];
|
||||
int ls = FFMIN(av_image_get_linesize(s->current_picture->format, s->width, 0), s->width * s->bpp);
|
||||
|
||||
for (j = 0; j < s->height; j++) {
|
||||
for (i = 0; i < s->width * s->bpp; i++) {
|
||||
for (i = 0; i < ls; i++) {
|
||||
pd[i] += pd_last[i];
|
||||
}
|
||||
pd += s->image_linesize;
|
||||
|
@@ -163,7 +163,7 @@ static void qpeg_decode_inter(QpegContext *qctx, uint8_t *dst,
|
||||
|
||||
/* check motion vector */
|
||||
if ((me_x + filled < 0) || (me_x + me_w + filled > width) ||
|
||||
(height - me_y - me_h < 0) || (height - me_y > orig_height) ||
|
||||
(height - me_y - me_h < 0) || (height - me_y >= orig_height) ||
|
||||
(filled + me_w > width) || (height - me_h < 0))
|
||||
av_log(NULL, AV_LOG_ERROR, "Bogus motion vector (%i,%i), block size %ix%i at %i,%i\n",
|
||||
me_x, me_y, me_w, me_h, filled, height);
|
||||
|
@@ -34,7 +34,6 @@
|
||||
#include "celp_filters.h"
|
||||
#include "ra144.h"
|
||||
|
||||
|
||||
static av_cold int ra144_encode_close(AVCodecContext *avctx)
|
||||
{
|
||||
RA144Context *ractx = avctx->priv_data;
|
||||
|
@@ -69,7 +69,7 @@ typedef struct SmcContext {
|
||||
row_ptr += stride * 4; \
|
||||
} \
|
||||
total_blocks--; \
|
||||
if (total_blocks < 0) \
|
||||
if (total_blocks < 0 + !!n_blocks) \
|
||||
{ \
|
||||
av_log(s->avctx, AV_LOG_INFO, "warning: block counter just went negative (this should not happen)\n"); \
|
||||
return; \
|
||||
|
@@ -652,7 +652,10 @@ static inline void unpack_coeffs(SnowContext *s, SubBand *b, SubBand * parent, i
|
||||
if(v){
|
||||
v= 2*(get_symbol2(&s->c, b->state[context + 2], context-4) + 1);
|
||||
v+=get_rac(&s->c, &b->state[0][16 + 1 + 3 + ff_quant3bA[l&0xFF] + 3*ff_quant3bA[t&0xFF]]);
|
||||
|
||||
if ((uint16_t)v != v) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Coefficient damaged\n");
|
||||
v = 1;
|
||||
}
|
||||
xc->x=x;
|
||||
(xc++)->coeff= v;
|
||||
}
|
||||
@@ -662,6 +665,10 @@ static inline void unpack_coeffs(SnowContext *s, SubBand *b, SubBand * parent, i
|
||||
else run= INT_MAX;
|
||||
v= 2*(get_symbol2(&s->c, b->state[0 + 2], 0-4) + 1);
|
||||
v+=get_rac(&s->c, &b->state[0][16 + 1 + 3]);
|
||||
if ((uint16_t)v != v) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Coefficient damaged\n");
|
||||
v = 1;
|
||||
}
|
||||
|
||||
xc->x=x;
|
||||
(xc++)->coeff= v;
|
||||
|
@@ -497,7 +497,7 @@ static int svq1_decode_delta_block(AVCodecContext *avctx, DSPContext *dsp,
|
||||
return result;
|
||||
}
|
||||
|
||||
static void svq1_parse_string(GetBitContext *bitbuf, uint8_t *out)
|
||||
static void svq1_parse_string(GetBitContext *bitbuf, uint8_t out[257])
|
||||
{
|
||||
uint8_t seed;
|
||||
int i;
|
||||
@@ -509,6 +509,7 @@ static void svq1_parse_string(GetBitContext *bitbuf, uint8_t *out)
|
||||
out[i] = get_bits(bitbuf, 8) ^ seed;
|
||||
seed = string_table[out[i] ^ seed];
|
||||
}
|
||||
out[i] = 0;
|
||||
}
|
||||
|
||||
static int svq1_decode_frame_header(AVCodecContext *avctx, AVFrame *frame)
|
||||
@@ -551,12 +552,12 @@ static int svq1_decode_frame_header(AVCodecContext *avctx, AVFrame *frame)
|
||||
}
|
||||
|
||||
if ((s->frame_code ^ 0x10) >= 0x50) {
|
||||
uint8_t msg[256];
|
||||
uint8_t msg[257];
|
||||
|
||||
svq1_parse_string(bitbuf, msg);
|
||||
|
||||
av_log(avctx, AV_LOG_INFO,
|
||||
"embedded message: \"%s\"\n", (char *)msg);
|
||||
"embedded message: \"%s\"\n", ((char *)msg) + 1);
|
||||
}
|
||||
|
||||
skip_bits(bitbuf, 2);
|
||||
|
@@ -716,13 +716,13 @@ static int tiff_decode_tag(TiffContext *s)
|
||||
s->height = value;
|
||||
break;
|
||||
case TIFF_BPP:
|
||||
s->bppcount = count;
|
||||
if (count > 4) {
|
||||
if (count > 4U) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"This format is not supported (bpp=%d, %d components)\n",
|
||||
s->bpp, count);
|
||||
value, count);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
s->bppcount = count;
|
||||
if (count == 1)
|
||||
s->bpp = value;
|
||||
else {
|
||||
@@ -743,6 +743,13 @@ static int tiff_decode_tag(TiffContext *s)
|
||||
s->bpp = -1;
|
||||
}
|
||||
}
|
||||
if (s->bpp > 64U) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"This format is not supported (bpp=%d, %d components)\n",
|
||||
s->bpp, count);
|
||||
s->bpp = 0;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
break;
|
||||
case TIFF_SAMPLES_PER_PIXEL:
|
||||
if (count != 1) {
|
||||
|
@@ -193,6 +193,12 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
|
||||
int i;
|
||||
int w_align = 1;
|
||||
int h_align = 1;
|
||||
AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->pix_fmt);
|
||||
|
||||
if (desc) {
|
||||
w_align = 1 << desc->log2_chroma_w;
|
||||
h_align = 1 << desc->log2_chroma_h;
|
||||
}
|
||||
|
||||
switch (s->pix_fmt) {
|
||||
case AV_PIX_FMT_YUV420P:
|
||||
@@ -244,6 +250,8 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
|
||||
case AV_PIX_FMT_GBRP12BE:
|
||||
case AV_PIX_FMT_GBRP14LE:
|
||||
case AV_PIX_FMT_GBRP14BE:
|
||||
case AV_PIX_FMT_GBRP16LE:
|
||||
case AV_PIX_FMT_GBRP16BE:
|
||||
w_align = 16; //FIXME assume 16 pixel per macroblock
|
||||
h_align = 16 * 2; // interlaced needs 2 macroblocks height
|
||||
break;
|
||||
@@ -272,6 +280,10 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
|
||||
w_align = 4;
|
||||
h_align = 4;
|
||||
}
|
||||
if (s->codec_id == AV_CODEC_ID_JV) {
|
||||
w_align = 8;
|
||||
h_align = 8;
|
||||
}
|
||||
break;
|
||||
case AV_PIX_FMT_BGR24:
|
||||
if ((s->codec_id == AV_CODEC_ID_MSZH) ||
|
||||
@@ -287,8 +299,6 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
|
||||
}
|
||||
break;
|
||||
default:
|
||||
w_align = 1;
|
||||
h_align = 1;
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -2866,6 +2876,11 @@ int avpriv_bprint_to_extradata(AVCodecContext *avctx, struct AVBPrint *buf)
|
||||
ret = av_bprint_finalize(buf, &str);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (!av_bprint_is_complete(buf)) {
|
||||
av_free(str);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
avctx->extradata = str;
|
||||
/* Note: the string is NUL terminated (so extradata can be read as a
|
||||
* string), but the ending character is not accounted in the size (in
|
||||
|
@@ -212,6 +212,8 @@ static void restore_median(uint8_t *src, int step, int stride,
|
||||
slice_height = ((((slice + 1) * height) / slices) & cmask) -
|
||||
slice_start;
|
||||
|
||||
if (!slice_height)
|
||||
continue;
|
||||
bsrc = src + slice_start * stride;
|
||||
|
||||
// first line - left neighbour prediction
|
||||
@@ -222,7 +224,7 @@ static void restore_median(uint8_t *src, int step, int stride,
|
||||
A = bsrc[i];
|
||||
}
|
||||
bsrc += stride;
|
||||
if (slice_height == 1)
|
||||
if (slice_height <= 1)
|
||||
continue;
|
||||
// second line - first element has top prediction, the rest uses median
|
||||
C = bsrc[-stride];
|
||||
@@ -267,6 +269,8 @@ static void restore_median_il(uint8_t *src, int step, int stride,
|
||||
slice_height = ((((slice + 1) * height) / slices) & cmask) -
|
||||
slice_start;
|
||||
slice_height >>= 1;
|
||||
if (!slice_height)
|
||||
continue;
|
||||
|
||||
bsrc = src + slice_start * stride;
|
||||
|
||||
@@ -282,7 +286,7 @@ static void restore_median_il(uint8_t *src, int step, int stride,
|
||||
A = bsrc[stride + i];
|
||||
}
|
||||
bsrc += stride2;
|
||||
if (slice_height == 1)
|
||||
if (slice_height <= 1)
|
||||
continue;
|
||||
// second line - first element has top prediction, the rest uses median
|
||||
C = bsrc[-stride2];
|
||||
|
@@ -343,7 +343,7 @@ static void vmd_decode(VmdVideoContext *s)
|
||||
if (*pb++ == 0xFF)
|
||||
len = rle_unpack(pb, pb_end - pb, len, &dp[ofs], frame_width - ofs);
|
||||
else {
|
||||
if (pb_end - pb < len)
|
||||
if (ofs + len > frame_width || pb_end - pb < len)
|
||||
return;
|
||||
memcpy(&dp[ofs], pb, len);
|
||||
}
|
||||
|
@@ -409,6 +409,9 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
offset &= ~3;
|
||||
if (offset > s->sfb_offsets[i][band - 1])
|
||||
s->sfb_offsets[i][band++] = offset;
|
||||
|
||||
if (offset >= subframe_len)
|
||||
break;
|
||||
}
|
||||
s->sfb_offsets[i][band - 1] = subframe_len;
|
||||
s->num_sfb[i] = band - 1;
|
||||
|
@@ -61,6 +61,9 @@ cglobal scalarproduct_int16, 3,3,3, v1, v2, order
|
||||
%endif
|
||||
paddd m2, m0
|
||||
movd eax, m2
|
||||
%if mmsize == 8
|
||||
emms
|
||||
%endif
|
||||
RET
|
||||
|
||||
; int scalarproduct_and_madd_int16(int16_t *v1, int16_t *v2, int16_t *v3, int order, int mul)
|
||||
|
@@ -135,8 +135,11 @@ WEIGHT_FUNC_HALF_MM 8, 8
|
||||
add off_regd, 1
|
||||
or off_regd, 1
|
||||
add r4, 1
|
||||
cmp r6d, 128
|
||||
je .nonnormal
|
||||
cmp r5, 128
|
||||
jne .normal
|
||||
.nonnormal
|
||||
sar r5, 1
|
||||
sar r6, 1
|
||||
sar off_regd, 1
|
||||
|
@@ -3,7 +3,7 @@
|
||||
* Copyright (c) 2002 Steve O'Hara-Smith
|
||||
* based on
|
||||
* Linux video grab interface
|
||||
* Copyright (c) 2000,2001 Gerard Lantau
|
||||
* Copyright (c) 2000, 2001 Fabrice Bellard
|
||||
* and
|
||||
* simple_grab.c Copyright (c) 1999 Roger Hardiman
|
||||
*
|
||||
|
@@ -27,8 +27,8 @@ SECTION .text
|
||||
%if lut_bits != 8
|
||||
sar %1q, 8-lut_bits
|
||||
%endif
|
||||
movsx %1d, word [%3q+%1q*2]
|
||||
add %1d, %2d
|
||||
movsx %1q, word [%3q+%1q*2]
|
||||
add %1q, %2q
|
||||
%endmacro
|
||||
|
||||
%macro LOAD 3 ; dstreg, x, bitdepth
|
||||
|
@@ -51,8 +51,10 @@ static int ape_tag_read_field(AVFormatContext *s)
|
||||
av_log(s, AV_LOG_WARNING, "Invalid APE tag key '%s'.\n", key);
|
||||
return -1;
|
||||
}
|
||||
if (size >= UINT_MAX)
|
||||
return -1;
|
||||
if (size > INT32_MAX - FF_INPUT_BUFFER_PADDING_SIZE) {
|
||||
av_log(s, AV_LOG_ERROR, "APE tag size too large.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (flags & APE_TAG_FLAG_IS_BINARY) {
|
||||
uint8_t filename[1024];
|
||||
enum AVCodecID id;
|
||||
|
@@ -1309,7 +1309,8 @@ static int avi_read_idx1(AVFormatContext *s, int size)
|
||||
ast = st->priv_data;
|
||||
|
||||
if (first_packet && first_packet_pos) {
|
||||
data_offset = first_packet_pos - pos;
|
||||
if (avi->movi_list + 4 != pos || pos + 500 > first_packet_pos)
|
||||
data_offset = first_packet_pos - pos;
|
||||
first_packet = 0;
|
||||
}
|
||||
pos += data_offset;
|
||||
|
@@ -216,6 +216,9 @@ int64_t avio_seek(AVIOContext *s, int64_t offset, int whence)
|
||||
return offset1;
|
||||
offset += offset1;
|
||||
}
|
||||
if (offset < 0)
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
offset1 = offset - pos;
|
||||
if (!s->must_flush && (!s->direct || !s->seek) &&
|
||||
offset1 >= 0 && offset1 <= (s->buf_end - s->buffer)) {
|
||||
|
@@ -127,6 +127,8 @@ static int cdxl_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
height = AV_RB16(&cdxl->header[16]);
|
||||
palette_size = AV_RB16(&cdxl->header[20]);
|
||||
audio_size = AV_RB16(&cdxl->header[22]);
|
||||
if (FFALIGN(width, 16) * (uint64_t)height * cdxl->header[19] > INT_MAX)
|
||||
return AVERROR_INVALIDDATA;
|
||||
image_size = FFALIGN(width, 16) * height * cdxl->header[19] / 8;
|
||||
video_size = palette_size + image_size;
|
||||
|
||||
|
@@ -333,7 +333,7 @@ static int amf_parse_object(AVFormatContext *s, AVStream *astream, AVStream *vst
|
||||
FLVContext *flv = s->priv_data;
|
||||
AVIOContext *ioc;
|
||||
AMFDataType amf_type;
|
||||
char str_val[256];
|
||||
char str_val[1024];
|
||||
double num_val;
|
||||
|
||||
num_val = 0;
|
||||
|
@@ -305,9 +305,10 @@ static int hls_write_trailer(struct AVFormatContext *s)
|
||||
|
||||
av_write_trailer(oc);
|
||||
avio_closep(&oc->pb);
|
||||
avformat_free_context(oc);
|
||||
av_free(hls->basename);
|
||||
append_entry(hls, hls->duration);
|
||||
avformat_free_context(oc);
|
||||
hls->avf = NULL;
|
||||
hls_window(s, 1);
|
||||
|
||||
free_entries(hls);
|
||||
|
@@ -162,6 +162,7 @@ typedef struct MOVContext {
|
||||
int use_absolute_path;
|
||||
int ignore_editlist;
|
||||
int64_t next_root_atom; ///< offset of the next root atom
|
||||
int atom_depth;
|
||||
} MOVContext;
|
||||
|
||||
int ff_mp4_read_descr_len(AVIOContext *pb);
|
||||
|
@@ -33,13 +33,15 @@ static int mpeg4video_probe(AVProbeData *probe_packet)
|
||||
|
||||
for(i=0; i<probe_packet->buf_size; i++){
|
||||
temp_buffer = (temp_buffer<<8) + probe_packet->buf[i];
|
||||
if ((temp_buffer & 0xffffff00) != 0x100)
|
||||
if (temp_buffer & 0xfffffe00)
|
||||
continue;
|
||||
if (temp_buffer < 2)
|
||||
continue;
|
||||
|
||||
if (temp_buffer == VOP_START_CODE) VOP++;
|
||||
else if (temp_buffer == VISUAL_OBJECT_START_CODE) VISO++;
|
||||
else if (temp_buffer < 0x120) VO++;
|
||||
else if (temp_buffer < 0x130) VOL++;
|
||||
else if (temp_buffer >= 0x100 && temp_buffer < 0x120) VO++;
|
||||
else if (temp_buffer >= 0x120 && temp_buffer < 0x130) VOL++;
|
||||
else if ( !(0x1AF < temp_buffer && temp_buffer < 0x1B7)
|
||||
&& !(0x1B9 < temp_buffer && temp_buffer < 0x1C4)) res++;
|
||||
}
|
||||
|
@@ -991,7 +991,7 @@ static void ebml_free(EbmlSyntax *syntax, void *data)
|
||||
char *ptr = list->elem;
|
||||
for (j=0; j<list->nb_elem; j++, ptr+=syntax[i].list_elem_size)
|
||||
ebml_free(syntax[i].def.n, ptr);
|
||||
av_free(list->elem);
|
||||
av_freep(&list->elem);
|
||||
} else
|
||||
ebml_free(syntax[i].def.n, data_off);
|
||||
default: break;
|
||||
@@ -1362,13 +1362,17 @@ static void matroska_execute_seekhead(MatroskaDemuxContext *matroska)
|
||||
EbmlList *seekhead_list = &matroska->seekhead;
|
||||
int64_t before_pos = avio_tell(matroska->ctx->pb);
|
||||
int i;
|
||||
int nb_elem;
|
||||
|
||||
// we should not do any seeking in the streaming case
|
||||
if (!matroska->ctx->pb->seekable ||
|
||||
(matroska->ctx->flags & AVFMT_FLAG_IGNIDX))
|
||||
return;
|
||||
|
||||
for (i = 0; i < seekhead_list->nb_elem; i++) {
|
||||
// do not read entries that are added while parsing seekhead entries
|
||||
nb_elem = seekhead_list->nb_elem;
|
||||
|
||||
for (i = 0; i < nb_elem; i++) {
|
||||
MatroskaSeekhead *seekhead = seekhead_list->elem;
|
||||
if (seekhead[i].pos <= before_pos)
|
||||
continue;
|
||||
@@ -1785,7 +1789,8 @@ static int matroska_read_header(AVFormatContext *s)
|
||||
av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
|
||||
1000000000, track->default_duration, 30000);
|
||||
#if FF_API_R_FRAME_RATE
|
||||
if (st->avg_frame_rate.num < st->avg_frame_rate.den * 1000L)
|
||||
if ( st->avg_frame_rate.num < st->avg_frame_rate.den * 1000LL
|
||||
&& st->avg_frame_rate.num > st->avg_frame_rate.den * 5LL)
|
||||
st->r_frame_rate = st->avg_frame_rate;
|
||||
#endif
|
||||
}
|
||||
@@ -1886,7 +1891,7 @@ static int matroska_deliver_packet(MatroskaDemuxContext *matroska,
|
||||
{
|
||||
if (matroska->num_packets > 0) {
|
||||
memcpy(pkt, matroska->packets[0], sizeof(AVPacket));
|
||||
av_free(matroska->packets[0]);
|
||||
av_freep(&matroska->packets[0]);
|
||||
if (matroska->num_packets > 1) {
|
||||
void *newpackets;
|
||||
memmove(&matroska->packets[0], &matroska->packets[1],
|
||||
@@ -1916,7 +1921,7 @@ static void matroska_clear_queue(MatroskaDemuxContext *matroska)
|
||||
int n;
|
||||
for (n = 0; n < matroska->num_packets; n++) {
|
||||
av_free_packet(matroska->packets[n]);
|
||||
av_free(matroska->packets[n]);
|
||||
av_freep(&matroska->packets[n]);
|
||||
}
|
||||
av_freep(&matroska->packets);
|
||||
matroska->num_packets = 0;
|
||||
@@ -2509,7 +2514,7 @@ static int matroska_read_close(AVFormatContext *s)
|
||||
|
||||
for (n=0; n < matroska->tracks.nb_elem; n++)
|
||||
if (tracks[n].type == MATROSKA_TRACK_TYPE_AUDIO)
|
||||
av_free(tracks[n].audio.buf);
|
||||
av_freep(&tracks[n].audio.buf);
|
||||
ebml_free(matroska_cluster, &matroska->current_cluster);
|
||||
ebml_free(matroska_segment, matroska);
|
||||
|
||||
|
@@ -281,7 +281,11 @@ static int mov_read_covr(MOVContext *c, AVIOContext *pb, int type, int len)
|
||||
static int mov_metadata_raw(MOVContext *c, AVIOContext *pb,
|
||||
unsigned len, const char *key)
|
||||
{
|
||||
char *value = av_malloc(len + 1);
|
||||
char *value;
|
||||
// Check for overflow.
|
||||
if (len >= INT_MAX)
|
||||
return AVERROR(EINVAL);
|
||||
value = av_malloc(len + 1);
|
||||
if (!value)
|
||||
return AVERROR(ENOMEM);
|
||||
avio_read(pb, value, len);
|
||||
@@ -385,7 +389,7 @@ static int mov_read_udta_string(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
|
||||
if (!key)
|
||||
return 0;
|
||||
if (atom.size < 0)
|
||||
if (atom.size < 0 || str_size >= INT_MAX/2)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
str_size = FFMIN3(sizeof(str)-1, str_size, atom.size);
|
||||
@@ -1162,6 +1166,10 @@ static int mov_read_stco(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
if (entries >= UINT_MAX/sizeof(int64_t))
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if (sc->chunk_offsets)
|
||||
av_log(c->fc, AV_LOG_WARNING, "Duplicate STCO atom\n");
|
||||
av_free(sc->chunk_offsets);
|
||||
sc->chunk_count = 0;
|
||||
sc->chunk_offsets = av_malloc(entries * sizeof(int64_t));
|
||||
if (!sc->chunk_offsets)
|
||||
return AVERROR(ENOMEM);
|
||||
@@ -1607,6 +1615,10 @@ static int mov_read_stsc(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
return 0;
|
||||
if (entries >= UINT_MAX / sizeof(*sc->stsc_data))
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (sc->stsc_data)
|
||||
av_log(c->fc, AV_LOG_WARNING, "Duplicate STSC atom\n");
|
||||
av_free(sc->stsc_data);
|
||||
sc->stsc_count = 0;
|
||||
sc->stsc_data = av_malloc(entries * sizeof(*sc->stsc_data));
|
||||
if (!sc->stsc_data)
|
||||
return AVERROR(ENOMEM);
|
||||
@@ -2788,6 +2800,12 @@ static int mov_read_default(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
MOVAtom a;
|
||||
int i;
|
||||
|
||||
if (c->atom_depth > 10) {
|
||||
av_log(c->fc, AV_LOG_ERROR, "Atoms too deeply nested\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
c->atom_depth ++;
|
||||
|
||||
if (atom.size < 0)
|
||||
atom.size = INT64_MAX;
|
||||
while (total_size + 8 <= atom.size && !url_feof(pb)) {
|
||||
@@ -2804,11 +2822,12 @@ static int mov_read_default(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
{
|
||||
av_log(c->fc, AV_LOG_ERROR, "Broken file, trak/mdat not at top-level\n");
|
||||
avio_skip(pb, -8);
|
||||
c->atom_depth --;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
total_size += 8;
|
||||
if (a.size == 1) { /* 64 bit extended size */
|
||||
if (a.size == 1 && total_size + 8 <= atom.size) { /* 64 bit extended size */
|
||||
a.size = avio_rb64(pb) - 8;
|
||||
total_size += 8;
|
||||
}
|
||||
@@ -2840,13 +2859,16 @@ static int mov_read_default(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
int64_t start_pos = avio_tell(pb);
|
||||
int64_t left;
|
||||
int err = parse(c, pb, a);
|
||||
if (err < 0)
|
||||
if (err < 0) {
|
||||
c->atom_depth --;
|
||||
return err;
|
||||
}
|
||||
if (c->found_moov && c->found_mdat &&
|
||||
((!pb->seekable || c->fc->flags & AVFMT_FLAG_IGNIDX) ||
|
||||
start_pos + a.size == avio_size(pb))) {
|
||||
if (!pb->seekable || c->fc->flags & AVFMT_FLAG_IGNIDX)
|
||||
c->next_root_atom = start_pos + a.size;
|
||||
c->atom_depth --;
|
||||
return 0;
|
||||
}
|
||||
left = a.size - avio_tell(pb) + start_pos;
|
||||
@@ -2864,6 +2886,7 @@ static int mov_read_default(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
if (total_size < atom.size && atom.size < 0x7ffff)
|
||||
avio_skip(pb, atom.size - total_size);
|
||||
|
||||
c->atom_depth --;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -1894,7 +1894,8 @@ static int mov_write_mvhd_tag(AVIOContext *pb, MOVMuxContext *mov)
|
||||
}
|
||||
|
||||
version = max_track_len < UINT32_MAX ? 0 : 1;
|
||||
(version == 1) ? avio_wb32(pb, 120) : avio_wb32(pb, 108); /* size */
|
||||
avio_wb32(pb, version == 1 ? 120 : 108); /* size */
|
||||
|
||||
ffio_wfourcc(pb, "mvhd");
|
||||
avio_w8(pb, version);
|
||||
avio_wb24(pb, 0); /* flags */
|
||||
|
@@ -57,7 +57,7 @@ typedef struct {
|
||||
|
||||
static inline int64_t bs_get_v(const uint8_t **bs)
|
||||
{
|
||||
int64_t v = 0;
|
||||
uint64_t v = 0;
|
||||
int br = 0;
|
||||
int c;
|
||||
|
||||
@@ -91,7 +91,7 @@ static int mpc8_probe(AVProbeData *p)
|
||||
size = bs_get_v(&bs);
|
||||
if (size < 2)
|
||||
return 0;
|
||||
if (bs + size - 2 >= bs_end)
|
||||
if (size >= bs_end - bs + 2)
|
||||
return AVPROBE_SCORE_MAX / 4 - 1; //seems to be valid MPC but no header yet
|
||||
if (header_found) {
|
||||
if (size < 11 || size > 28)
|
||||
@@ -108,7 +108,7 @@ static int mpc8_probe(AVProbeData *p)
|
||||
|
||||
static inline int64_t gb_get_v(GetBitContext *gb)
|
||||
{
|
||||
int64_t v = 0;
|
||||
uint64_t v = 0;
|
||||
int bits = 0;
|
||||
while(get_bits1(gb) && bits < 64-7){
|
||||
v <<= 7;
|
||||
@@ -216,6 +216,10 @@ static int mpc8_read_header(AVFormatContext *s)
|
||||
while(!url_feof(pb)){
|
||||
pos = avio_tell(pb);
|
||||
mpc8_get_chunk_header(pb, &tag, &size);
|
||||
if (size < 0) {
|
||||
av_log(s, AV_LOG_ERROR, "Invalid chunk length\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if(tag == TAG_STREAMHDR)
|
||||
break;
|
||||
mpc8_handle_chunk(s, tag, pos, size);
|
||||
|
@@ -91,7 +91,7 @@ static int mpegps_probe(AVProbeData *p)
|
||||
score= AVPROBE_SCORE_MAX/4;
|
||||
|
||||
if(sys>invalid && sys*9 <= pspack*10)
|
||||
return (audio > 12 || vid > 3 || pspack > 2) ? AVPROBE_SCORE_MAX/2+2 : AVPROBE_SCORE_MAX/4; // +1 for .mpg
|
||||
return (audio > 12 || vid > 3 || pspack > 2) ? AVPROBE_SCORE_MAX/2+2 : AVPROBE_SCORE_MAX/4+1; // +1 for .mpg
|
||||
if(pspack > invalid && (priv1+vid+audio)*10 >= pspack*9)
|
||||
return pspack > 2 ? AVPROBE_SCORE_MAX/2+2 : AVPROBE_SCORE_MAX/4; // +1 for .mpg
|
||||
if((!!vid ^ !!audio) && (audio > 4 || vid > 1) && !sys && !pspack && p->buf_size>2048 && vid + audio > invalid) /* PES stream */
|
||||
|
@@ -1695,7 +1695,7 @@ static void sdt_cb(MpegTSFilter *filter, const uint8_t *section, int section_len
|
||||
break;
|
||||
desc_len = get8(&p, desc_list_end);
|
||||
desc_end = p + desc_len;
|
||||
if (desc_end > desc_list_end)
|
||||
if (desc_len < 0 || desc_end > desc_list_end)
|
||||
break;
|
||||
|
||||
av_dlog(ts->stream, "tag: 0x%02x len=%d\n",
|
||||
|
@@ -256,7 +256,7 @@ static int ogg_buffer_data(AVFormatContext *s, AVStream *st,
|
||||
if (i == total_segments)
|
||||
page->granule = granule;
|
||||
|
||||
if (!header) {
|
||||
{
|
||||
AVStream *st = s->streams[page->stream_index];
|
||||
|
||||
int64_t start = av_rescale_q(page->start_granule, st->time_base,
|
||||
@@ -264,10 +264,13 @@ static int ogg_buffer_data(AVFormatContext *s, AVStream *st,
|
||||
int64_t next = av_rescale_q(page->granule, st->time_base,
|
||||
AV_TIME_BASE_Q);
|
||||
|
||||
if (page->segments_count == 255 ||
|
||||
(ogg->pref_size > 0 && page->size >= ogg->pref_size) ||
|
||||
(ogg->pref_duration > 0 && next - start >= ogg->pref_duration)) {
|
||||
if (page->segments_count == 255) {
|
||||
ogg_buffer_page(s, oggstream);
|
||||
} else if (!header) {
|
||||
if ((ogg->pref_size > 0 && page->size >= ogg->pref_size) ||
|
||||
(ogg->pref_duration > 0 && next - start >= ogg->pref_duration)) {
|
||||
ogg_buffer_page(s, oggstream);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -309,6 +309,9 @@ ff_rm_read_mdpr_codecdata (AVFormatContext *s, AVIOContext *pb,
|
||||
int64_t codec_pos;
|
||||
int ret;
|
||||
|
||||
if (codec_data_size < 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
avpriv_set_pts_info(st, 64, 1, 1000);
|
||||
codec_pos = avio_tell(pb);
|
||||
v = avio_rb32(pb);
|
||||
@@ -386,7 +389,11 @@ ff_rm_read_mdpr_codecdata (AVFormatContext *s, AVIOContext *pb,
|
||||
skip:
|
||||
/* skip codec info */
|
||||
size = avio_tell(pb) - codec_pos;
|
||||
avio_skip(pb, codec_data_size - size);
|
||||
if (codec_data_size >= size) {
|
||||
avio_skip(pb, codec_data_size - size);
|
||||
} else {
|
||||
av_log(s, AV_LOG_WARNING, "codec_data_size %u < size %d\n", codec_data_size, size);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -82,6 +82,7 @@ static int h263_handle_packet(AVFormatContext *ctx, PayloadContext *data,
|
||||
avio_close_dyn_buf(data->buf, &p);
|
||||
av_free(p);
|
||||
data->buf = NULL;
|
||||
data->endbyte_bits = 0;
|
||||
}
|
||||
|
||||
if (len < 4) {
|
||||
|
@@ -717,12 +717,6 @@ fail:
|
||||
if (pkt->stream_index == seg->reference_stream_index)
|
||||
seg->frame_count++;
|
||||
|
||||
if (ret < 0) {
|
||||
if (seg->list)
|
||||
avio_close(seg->list_pb);
|
||||
avformat_free_context(oc);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@@ -309,7 +309,7 @@ static int smacker_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
uint8_t *tmpbuf;
|
||||
|
||||
size = avio_rl32(s->pb) - 4;
|
||||
if(size + 4L > frame_size)
|
||||
if(size + 4LL > frame_size)
|
||||
return AVERROR_INVALIDDATA;
|
||||
frame_size -= size;
|
||||
frame_size -= 4;
|
||||
|
@@ -283,6 +283,7 @@ static int swf_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
const int bmp_fmt = avio_r8(pb);
|
||||
const int width = avio_rl16(pb);
|
||||
const int height = avio_rl16(pb);
|
||||
int pix_fmt;
|
||||
|
||||
len -= 2+1+2+2;
|
||||
|
||||
@@ -347,17 +348,21 @@ static int swf_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
avpriv_set_pts_info(vst, 64, 256, swf->frame_rate);
|
||||
st = vst;
|
||||
}
|
||||
st->codec->width = width;
|
||||
st->codec->height = height;
|
||||
|
||||
if ((res = av_new_packet(pkt, out_len - colormapsize * colormapbpp)) < 0)
|
||||
goto bitmap_end;
|
||||
if (!st->codec->width && !st->codec->height) {
|
||||
st->codec->width = width;
|
||||
st->codec->height = height;
|
||||
} else {
|
||||
ff_add_param_change(pkt, 0, 0, 0, width, height);
|
||||
}
|
||||
pkt->pos = pos;
|
||||
pkt->stream_index = st->index;
|
||||
|
||||
switch (bmp_fmt) {
|
||||
case 3:
|
||||
st->codec->pix_fmt = AV_PIX_FMT_PAL8;
|
||||
pix_fmt = AV_PIX_FMT_PAL8;
|
||||
for (i = 0; i < colormapsize; i++)
|
||||
if (alpha_bmp) colormap[i] = buf[3]<<24 | AV_RB24(buf + 4*i);
|
||||
else colormap[i] = 0xffU <<24 | AV_RB24(buf + 3*i);
|
||||
@@ -369,14 +374,18 @@ static int swf_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
memcpy(pal, colormap, AVPALETTE_SIZE);
|
||||
break;
|
||||
case 4:
|
||||
st->codec->pix_fmt = AV_PIX_FMT_RGB555;
|
||||
pix_fmt = AV_PIX_FMT_RGB555;
|
||||
break;
|
||||
case 5:
|
||||
st->codec->pix_fmt = alpha_bmp ? AV_PIX_FMT_ARGB : AV_PIX_FMT_0RGB;
|
||||
pix_fmt = alpha_bmp ? AV_PIX_FMT_ARGB : AV_PIX_FMT_0RGB;
|
||||
break;
|
||||
default:
|
||||
av_assert0(0);
|
||||
}
|
||||
if (st->codec->pix_fmt != AV_PIX_FMT_NONE && st->codec->pix_fmt != pix_fmt) {
|
||||
av_log(s, AV_LOG_ERROR, "pixel format change unsupported\n");
|
||||
}else
|
||||
st->codec->pix_fmt = pix_fmt;
|
||||
|
||||
if (linesize * height > pkt->size) {
|
||||
res = AVERROR_INVALIDDATA;
|
||||
|
@@ -180,6 +180,8 @@ static int thp_read_packet(AVFormatContext *s,
|
||||
pkt->stream_index = thp->video_stream_index;
|
||||
} else {
|
||||
ret = av_get_packet(pb, pkt, thp->audiosize);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (ret != thp->audiosize) {
|
||||
av_free_packet(pkt);
|
||||
return AVERROR(EIO);
|
||||
|
@@ -96,8 +96,10 @@ static int tta_read_header(AVFormatContext *s)
|
||||
|
||||
for (i = 0; i < c->totalframes; i++) {
|
||||
uint32_t size = avio_rl32(s->pb);
|
||||
av_add_index_entry(st, framepos, i * c->frame_size, size, 0,
|
||||
AVINDEX_KEYFRAME);
|
||||
int r;
|
||||
if ((r = av_add_index_entry(st, framepos, i * c->frame_size, size, 0,
|
||||
AVINDEX_KEYFRAME)) < 0)
|
||||
return r;
|
||||
framepos += size;
|
||||
}
|
||||
avio_skip(s->pb, 4); // seektable crc
|
||||
@@ -135,6 +137,11 @@ static int tta_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
if (c->currentframe >= c->totalframes)
|
||||
return AVERROR_EOF;
|
||||
|
||||
if (st->nb_index_entries < c->totalframes) {
|
||||
av_log(s, AV_LOG_ERROR, "Index entry disappeared\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
size = st->index_entries[c->currentframe].size;
|
||||
|
||||
ret = av_get_packet(s->pb, pkt, size);
|
||||
|
@@ -2675,8 +2675,8 @@ static int get_std_framerate(int i){
|
||||
* And there are "variable" fps files this needs to detect as well.
|
||||
*/
|
||||
static int tb_unreliable(AVCodecContext *c){
|
||||
if( c->time_base.den >= 101L*c->time_base.num
|
||||
|| c->time_base.den < 5L*c->time_base.num
|
||||
if( c->time_base.den >= 101LL*c->time_base.num
|
||||
|| c->time_base.den < 5LL*c->time_base.num
|
||||
/* || c->codec_tag == AV_RL32("DIVX")
|
||||
|| c->codec_tag == AV_RL32("XVID")*/
|
||||
|| c->codec_tag == AV_RL32("mp4v")
|
||||
|
@@ -74,8 +74,8 @@ static int read_number(const AVOption *o, void *dst, double *num, int *den, int6
|
||||
{
|
||||
switch (o->type) {
|
||||
case AV_OPT_TYPE_FLAGS: *intnum = *(unsigned int*)dst;return 0;
|
||||
case AV_OPT_TYPE_PIXEL_FMT:
|
||||
case AV_OPT_TYPE_SAMPLE_FMT:
|
||||
case AV_OPT_TYPE_PIXEL_FMT: *intnum = *(enum AVPixelFormat *)dst;return 0;
|
||||
case AV_OPT_TYPE_SAMPLE_FMT:*intnum = *(enum AVSampleFormat*)dst;return 0;
|
||||
case AV_OPT_TYPE_INT: *intnum = *(int *)dst;return 0;
|
||||
case AV_OPT_TYPE_INT64: *intnum = *(int64_t *)dst;return 0;
|
||||
case AV_OPT_TYPE_FLOAT: *num = *(float *)dst;return 0;
|
||||
@@ -97,9 +97,9 @@ static int write_number(void *obj, const AVOption *o, void *dst, double num, int
|
||||
}
|
||||
|
||||
switch (o->type) {
|
||||
case AV_OPT_TYPE_PIXEL_FMT: *(enum AVPixelFormat *)dst = llrint(num/den) * intnum; break;
|
||||
case AV_OPT_TYPE_SAMPLE_FMT:*(enum AVSampleFormat*)dst = llrint(num/den) * intnum; break;
|
||||
case AV_OPT_TYPE_FLAGS:
|
||||
case AV_OPT_TYPE_PIXEL_FMT:
|
||||
case AV_OPT_TYPE_SAMPLE_FMT:
|
||||
case AV_OPT_TYPE_INT: *(int *)dst= llrint(num/den)*intnum; break;
|
||||
case AV_OPT_TYPE_INT64: *(int64_t *)dst= llrint(num/den)*intnum; break;
|
||||
case AV_OPT_TYPE_FLOAT: *(float *)dst= num*intnum/den; break;
|
||||
@@ -149,6 +149,8 @@ static int set_string_binary(void *obj, const AVOption *o, const char *val, uint
|
||||
len /= 2;
|
||||
|
||||
ptr = bin = av_malloc(len);
|
||||
if (!ptr)
|
||||
return AVERROR(ENOMEM);
|
||||
while (*val) {
|
||||
int a = hexchar2int(*val++);
|
||||
int b = hexchar2int(*val++);
|
||||
|
@@ -44,7 +44,7 @@
|
||||
"cpuid \n\t" \
|
||||
"xchg %%"REG_b", %%"REG_S \
|
||||
: "=a" (eax), "=S" (ebx), "=c" (ecx), "=d" (edx) \
|
||||
: "0" (index))
|
||||
: "0" (index), "2"(0))
|
||||
|
||||
#define xgetbv(index, eax, edx) \
|
||||
__asm__ (".byte 0x0f, 0x01, 0xd0" : "=a"(eax), "=d"(edx) : "c" (index))
|
||||
|
@@ -975,7 +975,7 @@ void pp_postprocess(const uint8_t * src[3], const int srcStride[3],
|
||||
|
||||
if(pict_type & PP_PICT_TYPE_QP2){
|
||||
int i;
|
||||
const int count= mbHeight * absQPStride;
|
||||
const int count= FFMAX(mbHeight * absQPStride, mbWidth);
|
||||
for(i=0; i<(count>>2); i++){
|
||||
((uint32_t*)c->stdQPTable)[i] = (((const uint32_t*)QP_store)[i]>>1) & 0x7F7F7F7F;
|
||||
}
|
||||
@@ -1000,7 +1000,7 @@ void pp_postprocess(const uint8_t * src[3], const int srcStride[3],
|
||||
if((pict_type&7)!=3){
|
||||
if (QPStride >= 0){
|
||||
int i;
|
||||
const int count= mbHeight * QPStride;
|
||||
const int count= FFMAX(mbHeight * QPStride, mbWidth);
|
||||
for(i=0; i<(count>>2); i++){
|
||||
((uint32_t*)c->nonBQPTable)[i] = ((const uint32_t*)QP_store)[i] & 0x3F3F3F3F;
|
||||
}
|
||||
|
@@ -84,13 +84,13 @@ int swri_dither_init(SwrContext *s, enum AVSampleFormat out_fmt, enum AVSampleFo
|
||||
in_fmt = av_get_packed_sample_fmt( in_fmt);
|
||||
|
||||
if(in_fmt == AV_SAMPLE_FMT_FLT || in_fmt == AV_SAMPLE_FMT_DBL){
|
||||
if(out_fmt == AV_SAMPLE_FMT_S32) scale = 1.0/(1L<<31);
|
||||
if(out_fmt == AV_SAMPLE_FMT_S16) scale = 1.0/(1L<<15);
|
||||
if(out_fmt == AV_SAMPLE_FMT_U8 ) scale = 1.0/(1L<< 7);
|
||||
if(out_fmt == AV_SAMPLE_FMT_S32) scale = 1.0/(1LL<<31);
|
||||
if(out_fmt == AV_SAMPLE_FMT_S16) scale = 1.0/(1LL<<15);
|
||||
if(out_fmt == AV_SAMPLE_FMT_U8 ) scale = 1.0/(1LL<< 7);
|
||||
}
|
||||
if(in_fmt == AV_SAMPLE_FMT_S32 && out_fmt == AV_SAMPLE_FMT_S16) scale = 1L<<16;
|
||||
if(in_fmt == AV_SAMPLE_FMT_S32 && out_fmt == AV_SAMPLE_FMT_U8 ) scale = 1L<<24;
|
||||
if(in_fmt == AV_SAMPLE_FMT_S16 && out_fmt == AV_SAMPLE_FMT_U8 ) scale = 1L<<8;
|
||||
if(in_fmt == AV_SAMPLE_FMT_S32 && out_fmt == AV_SAMPLE_FMT_S16) scale = 1<<16;
|
||||
if(in_fmt == AV_SAMPLE_FMT_S32 && out_fmt == AV_SAMPLE_FMT_U8 ) scale = 1<<24;
|
||||
if(in_fmt == AV_SAMPLE_FMT_S16 && out_fmt == AV_SAMPLE_FMT_U8 ) scale = 1<<8;
|
||||
|
||||
scale *= s->dither.scale;
|
||||
|
||||
|
@@ -76,8 +76,12 @@ static int process(
|
||||
AudioData *src, int src_size, int *consumed){
|
||||
size_t idone, odone;
|
||||
soxr_error_t error = soxr_set_error((soxr_t)c, soxr_set_num_channels((soxr_t)c, src->ch_count));
|
||||
error = soxr_process((soxr_t)c, src->ch, (size_t)src_size,
|
||||
&idone, dst->ch, (size_t)dst_size, &odone);
|
||||
if (!error)
|
||||
error = soxr_process((soxr_t)c, src->ch, (size_t)src_size,
|
||||
&idone, dst->ch, (size_t)dst_size, &odone);
|
||||
else
|
||||
idone = 0;
|
||||
|
||||
*consumed = (int)idone;
|
||||
return error? -1 : odone;
|
||||
}
|
||||
|
@@ -751,6 +751,8 @@ int swr_convert(struct SwrContext *s, uint8_t *out_arg[SWR_CH_MAX], int out_coun
|
||||
in_count = 0;
|
||||
if(ret>0) {
|
||||
s->drop_output -= ret;
|
||||
if (!s->drop_output && !out_arg)
|
||||
return 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@@ -37,7 +37,7 @@
|
||||
|
||||
#define STR(s) AV_TOSTRING(s) // AV_STRINGIFY is too long
|
||||
|
||||
#define YUVRGB_TABLE_HEADROOM 128
|
||||
#define YUVRGB_TABLE_HEADROOM 256
|
||||
|
||||
#define FAST_BGR2YV12 // use 7-bit instead of 15-bit coefficients
|
||||
|
||||
|
@@ -571,14 +571,15 @@ static int initFilter(int16_t **outFilter, int32_t **filterPos,
|
||||
}
|
||||
|
||||
if ((*filterPos)[i] + filterSize > srcW) {
|
||||
int shift = (*filterPos)[i] + filterSize - srcW;
|
||||
int shift = (*filterPos)[i] + FFMIN(filterSize - srcW, 0);
|
||||
|
||||
// move filter coefficients right to compensate for filterPos
|
||||
for (j = filterSize - 2; j >= 0; j--) {
|
||||
int right = FFMIN(j + shift, filterSize - 1);
|
||||
filter[i * filterSize + right] += filter[i * filterSize + j];
|
||||
filter[i * filterSize + j] = 0;
|
||||
}
|
||||
(*filterPos)[i]= srcW - filterSize;
|
||||
(*filterPos)[i]-= shift;
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -1616,6 +1616,16 @@ static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_
|
||||
{
|
||||
int y;
|
||||
const x86_reg chromWidth= width>>1;
|
||||
|
||||
if (height > 2) {
|
||||
rgb24toyv12_c(src, ydst, udst, vdst, width, 2, lumStride, chromStride, srcStride);
|
||||
src += 2*srcStride;
|
||||
ydst += 2*lumStride;
|
||||
udst += chromStride;
|
||||
vdst += chromStride;
|
||||
height -= 2;
|
||||
}
|
||||
|
||||
for (y=0; y<height-2; y+=2) {
|
||||
int i;
|
||||
for (i=0; i<2; i++) {
|
||||
@@ -1864,6 +1874,7 @@ static void RENAME(interleaveBytes)(const uint8_t *src1, const uint8_t *src2, ui
|
||||
for (h=0; h < height; h++) {
|
||||
int w;
|
||||
|
||||
if (width >= 16)
|
||||
#if COMPILE_TEMPLATE_SSE2
|
||||
__asm__(
|
||||
"xor %%"REG_a", %%"REG_a" \n\t"
|
||||
|
@@ -782,9 +782,13 @@ av_cold int ff_yuv2rgb_c_init_tables(SwsContext *c, const int inv_table[4],
|
||||
|
||||
av_free(c->yuvTable);
|
||||
|
||||
#define ALLOC_YUV_TABLE(x) \
|
||||
c->yuvTable = av_malloc(x); \
|
||||
if (!c->yuvTable) \
|
||||
return AVERROR(ENOMEM);
|
||||
switch (bpp) {
|
||||
case 1:
|
||||
c->yuvTable = av_malloc(1024);
|
||||
ALLOC_YUV_TABLE(1024);
|
||||
y_table = c->yuvTable;
|
||||
yb = -(384 << 16) - oy;
|
||||
for (i = 0; i < 1024 - 110; i++) {
|
||||
@@ -799,7 +803,7 @@ av_cold int ff_yuv2rgb_c_init_tables(SwsContext *c, const int inv_table[4],
|
||||
rbase = isRgb ? 3 : 0;
|
||||
gbase = 1;
|
||||
bbase = isRgb ? 0 : 3;
|
||||
c->yuvTable = av_malloc(1024 * 3);
|
||||
ALLOC_YUV_TABLE(1024 * 3);
|
||||
y_table = c->yuvTable;
|
||||
yb = -(384 << 16) - oy;
|
||||
for (i = 0; i < 1024 - 110; i++) {
|
||||
@@ -818,7 +822,7 @@ av_cold int ff_yuv2rgb_c_init_tables(SwsContext *c, const int inv_table[4],
|
||||
rbase = isRgb ? 5 : 0;
|
||||
gbase = isRgb ? 2 : 3;
|
||||
bbase = isRgb ? 0 : 6;
|
||||
c->yuvTable = av_malloc(1024 * 3);
|
||||
ALLOC_YUV_TABLE(1024 * 3);
|
||||
y_table = c->yuvTable;
|
||||
yb = -(384 << 16) - oy;
|
||||
for (i = 0; i < 1024 - 38; i++) {
|
||||
@@ -837,7 +841,7 @@ av_cold int ff_yuv2rgb_c_init_tables(SwsContext *c, const int inv_table[4],
|
||||
rbase = isRgb ? 8 : 0;
|
||||
gbase = 4;
|
||||
bbase = isRgb ? 0 : 8;
|
||||
c->yuvTable = av_malloc(1024 * 3 * 2);
|
||||
ALLOC_YUV_TABLE(1024 * 3 * 2);
|
||||
y_table16 = c->yuvTable;
|
||||
yb = -(384 << 16) - oy;
|
||||
for (i = 0; i < 1024; i++) {
|
||||
@@ -860,7 +864,7 @@ av_cold int ff_yuv2rgb_c_init_tables(SwsContext *c, const int inv_table[4],
|
||||
rbase = isRgb ? bpp - 5 : 0;
|
||||
gbase = 5;
|
||||
bbase = isRgb ? 0 : (bpp - 5);
|
||||
c->yuvTable = av_malloc(1024 * 3 * 2);
|
||||
ALLOC_YUV_TABLE(1024 * 3 * 2);
|
||||
y_table16 = c->yuvTable;
|
||||
yb = -(384 << 16) - oy;
|
||||
for (i = 0; i < 1024; i++) {
|
||||
@@ -880,7 +884,7 @@ av_cold int ff_yuv2rgb_c_init_tables(SwsContext *c, const int inv_table[4],
|
||||
break;
|
||||
case 24:
|
||||
case 48:
|
||||
c->yuvTable = av_malloc(1024);
|
||||
ALLOC_YUV_TABLE(1024);
|
||||
y_table = c->yuvTable;
|
||||
yb = -(384 << 16) - oy;
|
||||
for (i = 0; i < 1024; i++) {
|
||||
@@ -901,7 +905,7 @@ av_cold int ff_yuv2rgb_c_init_tables(SwsContext *c, const int inv_table[4],
|
||||
needAlpha = CONFIG_SWSCALE_ALPHA && isALPHA(c->srcFormat);
|
||||
if (!needAlpha)
|
||||
abase = (base + 24) & 31;
|
||||
c->yuvTable = av_malloc(1024 * 3 * 4);
|
||||
ALLOC_YUV_TABLE(1024 * 3 * 4);
|
||||
y_table32 = c->yuvTable;
|
||||
yb = -(384 << 16) - oy;
|
||||
for (i = 0; i < 1024; i++) {
|
||||
|
@@ -1 +1 @@
|
||||
pp3 39af1a30d0ea0e906df264773adfcaa6
|
||||
pp3 c8277ef31ab01bad51356841c9634522
|
||||
|
Reference in New Issue
Block a user