Compare commits
25 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
c60caa5769 | ||
![]() |
96e2a4ba74 | ||
![]() |
a9c0f905aa | ||
![]() |
42b7d224bc | ||
![]() |
45bb57e009 | ||
![]() |
ec0f77fbff | ||
![]() |
3aa99bed5d | ||
![]() |
ece0c9c4b0 | ||
![]() |
0da0d7754e | ||
![]() |
c03491d96b | ||
![]() |
237958f8e8 | ||
![]() |
8e17d28863 | ||
![]() |
084102cd47 | ||
![]() |
0d5e626197 | ||
![]() |
6560e61826 | ||
![]() |
71b3f64935 | ||
![]() |
69a92ae397 | ||
![]() |
d723f92dbf | ||
![]() |
11e5106c56 | ||
![]() |
a65573c45d | ||
![]() |
6c28673cb3 | ||
![]() |
5284d17562 | ||
![]() |
01384e3dde | ||
![]() |
2d39d8ffc1 | ||
![]() |
5d33142fd6 |
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
||||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER = 1.2.10
|
||||
PROJECT_NUMBER = 1.2.11
|
||||
|
||||
# With the PROJECT_LOGO tag one can specify an logo or icon that is included
|
||||
# in the documentation. The maximum height of the logo should not exceed 55
|
||||
|
@@ -463,10 +463,10 @@ static int decode_frame(FLACContext *s)
|
||||
ret = allocate_buffers(s);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ff_flacdsp_init(&s->dsp, s->avctx->sample_fmt, s->bps);
|
||||
s->got_streaminfo = 1;
|
||||
dump_headers(s->avctx, (FLACStreaminfo *)s);
|
||||
}
|
||||
ff_flacdsp_init(&s->dsp, s->avctx->sample_fmt, s->bps);
|
||||
|
||||
// dump_headers(s->avctx, (FLACStreaminfo *)s);
|
||||
|
||||
|
@@ -1043,6 +1043,7 @@ static void free_tables(H264Context *h, int free_rbsp)
|
||||
av_freep(&h->visualization_buffer[i]);
|
||||
|
||||
if (free_rbsp) {
|
||||
memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
|
||||
for (i = 0; i < h->picture_count && !h->avctx->internal->is_copy; i++)
|
||||
free_picture(h, &h->DPB[i]);
|
||||
av_freep(&h->DPB);
|
||||
@@ -2431,6 +2432,16 @@ static int pred_weight_table(H264Context *h)
|
||||
h->luma_log2_weight_denom = get_ue_golomb(&h->gb);
|
||||
if (h->sps.chroma_format_idc)
|
||||
h->chroma_log2_weight_denom = get_ue_golomb(&h->gb);
|
||||
|
||||
if (h->luma_log2_weight_denom > 7U) {
|
||||
av_log(h->avctx, AV_LOG_ERROR, "luma_log2_weight_denom %d is out of range\n", h->luma_log2_weight_denom);
|
||||
h->luma_log2_weight_denom = 0;
|
||||
}
|
||||
if (h->chroma_log2_weight_denom > 7U) {
|
||||
av_log(h->avctx, AV_LOG_ERROR, "chroma_log2_weight_denom %d is out of range\n", h->chroma_log2_weight_denom);
|
||||
h->chroma_log2_weight_denom = 0;
|
||||
}
|
||||
|
||||
luma_def = 1 << h->luma_log2_weight_denom;
|
||||
chroma_def = 1 << h->chroma_log2_weight_denom;
|
||||
|
||||
|
@@ -255,6 +255,7 @@ typedef struct MMCO {
|
||||
* H264Context
|
||||
*/
|
||||
typedef struct H264Context {
|
||||
AVClass *av_class;
|
||||
AVCodecContext *avctx;
|
||||
VideoDSPContext vdsp;
|
||||
H264DSPContext h264dsp;
|
||||
|
@@ -95,7 +95,7 @@ typedef struct Indeo3DecodeContext {
|
||||
|
||||
int16_t width, height;
|
||||
uint32_t frame_num; ///< current frame number (zero-based)
|
||||
uint32_t data_size; ///< size of the frame data in bytes
|
||||
int data_size; ///< size of the frame data in bytes
|
||||
uint16_t frame_flags; ///< frame properties
|
||||
uint8_t cb_offset; ///< needed for selecting VQ tables
|
||||
uint8_t buf_sel; ///< active frame buffer: 0 - primary, 1 -secondary
|
||||
@@ -897,7 +897,8 @@ static int decode_frame_headers(Indeo3DecodeContext *ctx, AVCodecContext *avctx,
|
||||
{
|
||||
const uint8_t *buf_ptr = buf, *bs_hdr;
|
||||
uint32_t frame_num, word2, check_sum, data_size;
|
||||
uint32_t y_offset, u_offset, v_offset, starts[3], ends[3];
|
||||
int y_offset, u_offset, v_offset;
|
||||
uint32_t starts[3], ends[3];
|
||||
uint16_t height, width;
|
||||
int i, j;
|
||||
|
||||
@@ -977,7 +978,8 @@ static int decode_frame_headers(Indeo3DecodeContext *ctx, AVCodecContext *avctx,
|
||||
ctx->y_data_size = ends[0] - starts[0];
|
||||
ctx->v_data_size = ends[1] - starts[1];
|
||||
ctx->u_data_size = ends[2] - starts[2];
|
||||
if (FFMAX3(y_offset, v_offset, u_offset) >= ctx->data_size - 16 ||
|
||||
if (FFMIN3(y_offset, v_offset, u_offset) < 0 ||
|
||||
FFMAX3(y_offset, v_offset, u_offset) >= ctx->data_size - 16 ||
|
||||
FFMIN3(ctx->y_data_size, ctx->v_data_size, ctx->u_data_size) <= 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "One of the y/u/v offsets is invalid\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
@@ -1437,6 +1437,8 @@ static int mjpeg_decode_app(MJpegDecodeContext *s)
|
||||
}
|
||||
|
||||
if (id == AV_RB32("LJIF")) {
|
||||
int rgb = s->rgb;
|
||||
int pegasus_rct = s->pegasus_rct;
|
||||
if (s->avctx->debug & FF_DEBUG_PICT_INFO)
|
||||
av_log(s->avctx, AV_LOG_INFO,
|
||||
"Pegasus lossless jpeg header found\n");
|
||||
@@ -1446,17 +1448,27 @@ static int mjpeg_decode_app(MJpegDecodeContext *s)
|
||||
skip_bits(&s->gb, 16); /* unknown always 0? */
|
||||
switch (get_bits(&s->gb, 8)) {
|
||||
case 1:
|
||||
s->rgb = 1;
|
||||
s->pegasus_rct = 0;
|
||||
rgb = 1;
|
||||
pegasus_rct = 0;
|
||||
break;
|
||||
case 2:
|
||||
s->rgb = 1;
|
||||
s->pegasus_rct = 1;
|
||||
rgb = 1;
|
||||
pegasus_rct = 1;
|
||||
break;
|
||||
default:
|
||||
av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace\n");
|
||||
}
|
||||
|
||||
len -= 9;
|
||||
if (s->got_picture)
|
||||
if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) {
|
||||
av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
s->rgb = rgb;
|
||||
s->pegasus_rct = pegasus_rct;
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@@ -189,7 +189,13 @@ static av_always_inline int cmp_inline(MpegEncContext *s, const int x, const int
|
||||
int uvdxy; /* no, it might not be used uninitialized */
|
||||
if(dxy){
|
||||
if(qpel){
|
||||
c->qpel_put[size][dxy](c->temp, ref[0] + x + y*stride, stride); //FIXME prototype (add h)
|
||||
if (h << size == 16) {
|
||||
c->qpel_put[size][dxy](c->temp, ref[0] + x + y*stride, stride); //FIXME prototype (add h)
|
||||
} else if (size == 0 && h == 8) {
|
||||
c->qpel_put[1][dxy](c->temp , ref[0] + x + y*stride , stride);
|
||||
c->qpel_put[1][dxy](c->temp + 8, ref[0] + x + y*stride + 8, stride);
|
||||
} else
|
||||
av_assert2(0);
|
||||
if(chroma){
|
||||
int cx= hx/2;
|
||||
int cy= hy/2;
|
||||
|
@@ -102,8 +102,8 @@ static const AVOption options[]={
|
||||
{"extradata_size", NULL, OFFSET(extradata_size), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
|
||||
{"time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, {.dbl = 0}, INT_MIN, INT_MAX},
|
||||
{"g", "set the group of picture (GOP) size", OFFSET(gop_size), AV_OPT_TYPE_INT, {.i64 = 12 }, INT_MIN, INT_MAX, V|E},
|
||||
{"ar", "set audio sampling rate (in Hz)", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, A|D|E},
|
||||
{"ac", "set number of audio channels", OFFSET(channels), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, A|D|E},
|
||||
{"ar", "set audio sampling rate (in Hz)", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX, A|D|E},
|
||||
{"ac", "set number of audio channels", OFFSET(channels), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX, A|D|E},
|
||||
{"cutoff", "set cutoff bandwidth", OFFSET(cutoff), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, A|E},
|
||||
{"frame_size", NULL, OFFSET(frame_size), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, A|E},
|
||||
{"frame_number", NULL, OFFSET(frame_number), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
|
||||
|
@@ -563,6 +563,12 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
case MKTAG('I', 'H', 'D', 'R'):
|
||||
if (length != 13)
|
||||
goto fail;
|
||||
|
||||
if (s->state & PNG_IDAT) {
|
||||
av_log(avctx, AV_LOG_ERROR, "IHDR after IDAT\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
s->width = bytestream2_get_be32(&s->gb);
|
||||
s->height = bytestream2_get_be32(&s->gb);
|
||||
if (av_image_check_size(s->width, s->height, 0, avctx)) {
|
||||
|
@@ -2876,6 +2876,11 @@ int avpriv_bprint_to_extradata(AVCodecContext *avctx, struct AVBPrint *buf)
|
||||
ret = av_bprint_finalize(buf, &str);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (!av_bprint_is_complete(buf)) {
|
||||
av_free(str);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
avctx->extradata = str;
|
||||
/* Note: the string is NUL terminated (so extradata can be read as a
|
||||
* string), but the ending character is not accounted in the size (in
|
||||
|
@@ -212,6 +212,8 @@ static void restore_median(uint8_t *src, int step, int stride,
|
||||
slice_height = ((((slice + 1) * height) / slices) & cmask) -
|
||||
slice_start;
|
||||
|
||||
if (!slice_height)
|
||||
continue;
|
||||
bsrc = src + slice_start * stride;
|
||||
|
||||
// first line - left neighbour prediction
|
||||
@@ -222,7 +224,7 @@ static void restore_median(uint8_t *src, int step, int stride,
|
||||
A = bsrc[i];
|
||||
}
|
||||
bsrc += stride;
|
||||
if (slice_height == 1)
|
||||
if (slice_height <= 1)
|
||||
continue;
|
||||
// second line - first element has top prediction, the rest uses median
|
||||
C = bsrc[-stride];
|
||||
@@ -267,6 +269,8 @@ static void restore_median_il(uint8_t *src, int step, int stride,
|
||||
slice_height = ((((slice + 1) * height) / slices) & cmask) -
|
||||
slice_start;
|
||||
slice_height >>= 1;
|
||||
if (!slice_height)
|
||||
continue;
|
||||
|
||||
bsrc = src + slice_start * stride;
|
||||
|
||||
@@ -282,7 +286,7 @@ static void restore_median_il(uint8_t *src, int step, int stride,
|
||||
A = bsrc[stride + i];
|
||||
}
|
||||
bsrc += stride2;
|
||||
if (slice_height == 1)
|
||||
if (slice_height <= 1)
|
||||
continue;
|
||||
// second line - first element has top prediction, the rest uses median
|
||||
C = bsrc[-stride2];
|
||||
|
@@ -343,7 +343,7 @@ static void vmd_decode(VmdVideoContext *s)
|
||||
if (*pb++ == 0xFF)
|
||||
len = rle_unpack(pb, pb_end - pb, len, &dp[ofs], frame_width - ofs);
|
||||
else {
|
||||
if (pb_end - pb < len)
|
||||
if (ofs + len > frame_width || pb_end - pb < len)
|
||||
return;
|
||||
memcpy(&dp[ofs], pb, len);
|
||||
}
|
||||
|
@@ -409,6 +409,9 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
offset &= ~3;
|
||||
if (offset > s->sfb_offsets[i][band - 1])
|
||||
s->sfb_offsets[i][band++] = offset;
|
||||
|
||||
if (offset >= subframe_len)
|
||||
break;
|
||||
}
|
||||
s->sfb_offsets[i][band - 1] = subframe_len;
|
||||
s->num_sfb[i] = band - 1;
|
||||
|
@@ -216,6 +216,9 @@ int64_t avio_seek(AVIOContext *s, int64_t offset, int whence)
|
||||
return offset1;
|
||||
offset += offset1;
|
||||
}
|
||||
if (offset < 0)
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
offset1 = offset - pos;
|
||||
if (!s->must_flush && (!s->direct || !s->seek) &&
|
||||
offset1 >= 0 && offset1 <= (s->buf_end - s->buffer)) {
|
||||
|
@@ -305,9 +305,10 @@ static int hls_write_trailer(struct AVFormatContext *s)
|
||||
|
||||
av_write_trailer(oc);
|
||||
avio_closep(&oc->pb);
|
||||
avformat_free_context(oc);
|
||||
av_free(hls->basename);
|
||||
append_entry(hls, hls->duration);
|
||||
avformat_free_context(oc);
|
||||
hls->avf = NULL;
|
||||
hls_window(s, 1);
|
||||
|
||||
free_entries(hls);
|
||||
|
@@ -162,6 +162,7 @@ typedef struct MOVContext {
|
||||
int use_absolute_path;
|
||||
int ignore_editlist;
|
||||
int64_t next_root_atom; ///< offset of the next root atom
|
||||
int atom_depth;
|
||||
} MOVContext;
|
||||
|
||||
int ff_mp4_read_descr_len(AVIOContext *pb);
|
||||
|
@@ -1362,13 +1362,17 @@ static void matroska_execute_seekhead(MatroskaDemuxContext *matroska)
|
||||
EbmlList *seekhead_list = &matroska->seekhead;
|
||||
int64_t before_pos = avio_tell(matroska->ctx->pb);
|
||||
int i;
|
||||
int nb_elem;
|
||||
|
||||
// we should not do any seeking in the streaming case
|
||||
if (!matroska->ctx->pb->seekable ||
|
||||
(matroska->ctx->flags & AVFMT_FLAG_IGNIDX))
|
||||
return;
|
||||
|
||||
for (i = 0; i < seekhead_list->nb_elem; i++) {
|
||||
// do not read entries that are added while parsing seekhead entries
|
||||
nb_elem = seekhead_list->nb_elem;
|
||||
|
||||
for (i = 0; i < nb_elem; i++) {
|
||||
MatroskaSeekhead *seekhead = seekhead_list->elem;
|
||||
if (seekhead[i].pos <= before_pos)
|
||||
continue;
|
||||
|
@@ -2788,6 +2788,12 @@ static int mov_read_default(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
MOVAtom a;
|
||||
int i;
|
||||
|
||||
if (c->atom_depth > 10) {
|
||||
av_log(c->fc, AV_LOG_ERROR, "Atoms too deeply nested\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
c->atom_depth ++;
|
||||
|
||||
if (atom.size < 0)
|
||||
atom.size = INT64_MAX;
|
||||
while (total_size + 8 <= atom.size && !url_feof(pb)) {
|
||||
@@ -2804,6 +2810,7 @@ static int mov_read_default(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
{
|
||||
av_log(c->fc, AV_LOG_ERROR, "Broken file, trak/mdat not at top-level\n");
|
||||
avio_skip(pb, -8);
|
||||
c->atom_depth --;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@@ -2840,13 +2847,16 @@ static int mov_read_default(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
int64_t start_pos = avio_tell(pb);
|
||||
int64_t left;
|
||||
int err = parse(c, pb, a);
|
||||
if (err < 0)
|
||||
if (err < 0) {
|
||||
c->atom_depth --;
|
||||
return err;
|
||||
}
|
||||
if (c->found_moov && c->found_mdat &&
|
||||
((!pb->seekable || c->fc->flags & AVFMT_FLAG_IGNIDX) ||
|
||||
start_pos + a.size == avio_size(pb))) {
|
||||
if (!pb->seekable || c->fc->flags & AVFMT_FLAG_IGNIDX)
|
||||
c->next_root_atom = start_pos + a.size;
|
||||
c->atom_depth --;
|
||||
return 0;
|
||||
}
|
||||
left = a.size - avio_tell(pb) + start_pos;
|
||||
@@ -2864,6 +2874,7 @@ static int mov_read_default(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
if (total_size < atom.size && atom.size < 0x7ffff)
|
||||
avio_skip(pb, atom.size - total_size);
|
||||
|
||||
c->atom_depth --;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -309,6 +309,9 @@ ff_rm_read_mdpr_codecdata (AVFormatContext *s, AVIOContext *pb,
|
||||
int64_t codec_pos;
|
||||
int ret;
|
||||
|
||||
if (codec_data_size < 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
avpriv_set_pts_info(st, 64, 1, 1000);
|
||||
codec_pos = avio_tell(pb);
|
||||
v = avio_rb32(pb);
|
||||
|
@@ -76,8 +76,12 @@ static int process(
|
||||
AudioData *src, int src_size, int *consumed){
|
||||
size_t idone, odone;
|
||||
soxr_error_t error = soxr_set_error((soxr_t)c, soxr_set_num_channels((soxr_t)c, src->ch_count));
|
||||
error = soxr_process((soxr_t)c, src->ch, (size_t)src_size,
|
||||
&idone, dst->ch, (size_t)dst_size, &odone);
|
||||
if (!error)
|
||||
error = soxr_process((soxr_t)c, src->ch, (size_t)src_size,
|
||||
&idone, dst->ch, (size_t)dst_size, &odone);
|
||||
else
|
||||
idone = 0;
|
||||
|
||||
*consumed = (int)idone;
|
||||
return error? -1 : odone;
|
||||
}
|
||||
|
@@ -37,7 +37,7 @@
|
||||
|
||||
#define STR(s) AV_TOSTRING(s) // AV_STRINGIFY is too long
|
||||
|
||||
#define YUVRGB_TABLE_HEADROOM 128
|
||||
#define YUVRGB_TABLE_HEADROOM 256
|
||||
|
||||
#define FAST_BGR2YV12 // use 7-bit instead of 15-bit coefficients
|
||||
|
||||
|
@@ -1616,6 +1616,16 @@ static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_
|
||||
{
|
||||
int y;
|
||||
const x86_reg chromWidth= width>>1;
|
||||
|
||||
if (height > 2) {
|
||||
rgb24toyv12_c(src, ydst, udst, vdst, width, 2, lumStride, chromStride, srcStride);
|
||||
src += 2*srcStride;
|
||||
ydst += 2*lumStride;
|
||||
udst += chromStride;
|
||||
vdst += chromStride;
|
||||
height -= 2;
|
||||
}
|
||||
|
||||
for (y=0; y<height-2; y+=2) {
|
||||
int i;
|
||||
for (i=0; i<2; i++) {
|
||||
@@ -1864,6 +1874,7 @@ static void RENAME(interleaveBytes)(const uint8_t *src1, const uint8_t *src2, ui
|
||||
for (h=0; h < height; h++) {
|
||||
int w;
|
||||
|
||||
if (width >= 16)
|
||||
#if COMPILE_TEMPLATE_SSE2
|
||||
__asm__(
|
||||
"xor %%"REG_a", %%"REG_a" \n\t"
|
||||
|
Reference in New Issue
Block a user