Compare commits
75 Commits
n2.4.12
...
release/2.
Author | SHA1 | Date | |
---|---|---|---|
![]() |
fcbbe36082 | ||
![]() |
3709c43887 | ||
![]() |
8380f62155 | ||
![]() |
a49d870aac | ||
![]() |
a944744f19 | ||
![]() |
9a1433683c | ||
![]() |
ac8a265be8 | ||
![]() |
106e0fff2e | ||
![]() |
53f5efcae1 | ||
![]() |
4f52c0a619 | ||
![]() |
fa9873cce8 | ||
![]() |
76de78a9db | ||
![]() |
9e44ea7c0f | ||
![]() |
0732e7b0ea | ||
![]() |
5af5932904 | ||
![]() |
a9a6e4e9c1 | ||
![]() |
49ae02d36f | ||
![]() |
c88fa43a3a | ||
![]() |
7142ddcf92 | ||
![]() |
5d40272ba8 | ||
![]() |
f8728dc834 | ||
![]() |
af384c8703 | ||
![]() |
e4b2c75c2a | ||
![]() |
a6ef7205e9 | ||
![]() |
2a2205b051 | ||
![]() |
c0df58b0e5 | ||
![]() |
990abbd1c6 | ||
![]() |
38369313b9 | ||
![]() |
ed44b57935 | ||
![]() |
70b35708b9 | ||
![]() |
66aeb5467e | ||
![]() |
937f3058fa | ||
![]() |
78f9c7dd14 | ||
![]() |
250e5cb71d | ||
![]() |
60bc36193e | ||
![]() |
d8cb5887c1 | ||
![]() |
5c0d8a8387 | ||
![]() |
f6a503c443 | ||
![]() |
dd28571530 | ||
![]() |
5fe8dad467 | ||
![]() |
593dea80f2 | ||
![]() |
8158fb129e | ||
![]() |
7ea0e525ed | ||
![]() |
0affd64b1c | ||
![]() |
41289bc853 | ||
![]() |
ffda227636 | ||
![]() |
8132ed4a43 | ||
![]() |
fc0f08f9fb | ||
![]() |
a2966c7d1f | ||
![]() |
859a348e44 | ||
![]() |
368a1803ff | ||
![]() |
ab13ba2ae8 | ||
![]() |
d5b1ea8c7a | ||
![]() |
bc4332b3fc | ||
![]() |
7b6f048506 | ||
![]() |
33ad09205a | ||
![]() |
46fcc2ba55 | ||
![]() |
a2667c60ec | ||
![]() |
2b2943e1ef | ||
![]() |
030fed62f4 | ||
![]() |
fbfb2814b2 | ||
![]() |
1317c63b4b | ||
![]() |
b0a8095f2b | ||
![]() |
f0eea9cc3a | ||
![]() |
bbe1c9839b | ||
![]() |
dcecc180a6 | ||
![]() |
2306964b3a | ||
![]() |
e7b09eaefa | ||
![]() |
e32095807b | ||
![]() |
465dd4bc94 | ||
![]() |
7a26ea7a7e | ||
![]() |
6fc3f6f43b | ||
![]() |
7f33fef2a2 | ||
![]() |
3440a9ba4f | ||
![]() |
bfebe3defe |
75
Changelog
75
Changelog
@@ -1,6 +1,81 @@
|
||||
Entries are sorted chronologically from oldest to youngest within each release,
|
||||
releases are sorted from youngest to oldest.
|
||||
|
||||
version 2.4.13:
|
||||
- mov: Add an option to toggle dref opening
|
||||
- MAINTAINERS: remove unmaintained releases
|
||||
- avcodec/jpeg2000dec: More completely check cdef
|
||||
- avutil/opt: check for and handle errors in av_opt_set_dict2()
|
||||
- avcodec/flacenc: fix calculation of bits required in case of custom sample rate
|
||||
- avformat: Document urls a bit
|
||||
- avformat/concat: Check protocol prefix
|
||||
- doc/demuxers: Document enable_drefs and use_absolute_path
|
||||
- avcodec/mjpegdec: Check for end for both bytes in unescaping
|
||||
- avformat/avformat: Replace some references to filenames by urls
|
||||
- avcodec/wmaenc: Check ff_wma_init() for failure
|
||||
- avcodec/mpeg12enc: Move high resolution thread check to before initializing threads
|
||||
- avformat/avio: Limit url option parsing to the documented cases
|
||||
- avcodec/ass_split: Fix null pointer dereference in ff_ass_style_get()
|
||||
- avcodec/gif: Fix lzw buffer size
|
||||
- avcodec/put_bits: Assert buf_ptr in flush_put_bits()
|
||||
- avcodec/tiff: Check subsample & rps values more completely
|
||||
- swscale/swscale: Add some sanity checks for srcSlice* parameters
|
||||
- swscale/x86/rgb2rgb_template: Fix planar2x() for short width
|
||||
- swscale/swscale_unscaled: Fix odd height inputs for bayer_to_yv12_wrapper()
|
||||
- swscale/swscale_unscaled: Fix odd height inputs for bayer_to_rgb24_wrapper()
|
||||
- avcodec/wavpackenc: print channel count in av_log call
|
||||
- configure: bump copyright year to 2016
|
||||
- avformat/hls: Even stricter URL checks
|
||||
- avformat/hls: More strict url checks
|
||||
- swscale/yuv2rgb: Increase YUV2RGB table headroom
|
||||
- swscale/yuv2rgb: Factor YUVRGB_TABLE_LUMA_HEADROOM out
|
||||
- avformat/hls: forbid all protocols except http(s) & file
|
||||
- avformat/aviobuf: Fix end check in put_str16()
|
||||
- avformat/asfenc: Check pts
|
||||
- avcodec/mpeg4video: Check time_incr
|
||||
- avcodec/wavpackenc: Check the number of channels
|
||||
- avcodec/wavpackenc: Headers are per channel
|
||||
- avcodec/dvdec: Fix "left shift of negative value -254"
|
||||
- avcodec/mjpegdec: Fix negative shift
|
||||
- avcodec/mss2: Check for repeat overflow
|
||||
- avformat: Add integer fps from 31 to 60 to get_std_framerate()
|
||||
- avcodec/mpegvideo_enc: Clip bits_per_raw_sample within valid range
|
||||
- avcodec/motion_est: Fix mv_penalty table size
|
||||
- avcodec/h264_slice: Fix integer overflow in implicit weight computation
|
||||
- swscale/utils: Use normal bilinear scaler if fast cannot be used due to tiny dimensions
|
||||
- avcodec/put_bits: Always check buffer end before writing
|
||||
- swscale/utils: Fix intermediate format for cascaded alpha downscaling
|
||||
- avcodec/h264_refs: Fix long_idx check
|
||||
- avfilter/vf_mpdecimate: Add missing emms_c()
|
||||
- avformat/mxfenc: Do not crash if there is no packet in the first stream
|
||||
- swscale/swscale-test: Fix slice height in random reference data creation.
|
||||
- dca: fix misaligned access in avpriv_dca_convert_bitstream
|
||||
- brstm: fix missing closing brace
|
||||
- brstm: also allocate b->table in read_packet
|
||||
- brstm: make sure an ADPC chunk was read for adpcm_thp
|
||||
- vorbisdec: reject rangebits 0 with non-0 partitions
|
||||
- vorbisdec: reject channel mapping with less than two channels
|
||||
- ffmdec: reset packet_end in case of failure
|
||||
- mjpegdec: extend check for incompatible values of s->rgb and s->ls
|
||||
- avformat/ipmovie: put video decoding_map_size into packet and use it in decoder
|
||||
- avcodec/samidec: make sure to properly restore parsing context after a tag
|
||||
- x86/float_dsp: zero extend offset from ff_scalarproduct_float_sse
|
||||
- avcodec/mpeg4videodec: also for empty partitioned slices
|
||||
- nuv: sanitize negative fps rate
|
||||
- rawdec: only exempt BIT0 with need_copy from buffer sanity check
|
||||
- mlvdec: check that index_entries exist
|
||||
- nutdec: reject negative value_len in read_sm_data
|
||||
- xwddec: prevent overflow of lsize * avctx->height
|
||||
- nutdec: only copy the header if it exists
|
||||
- exr: fix out of bounds read in get_code
|
||||
- on2avc: limit number of bits to 30 in get_egolomb
|
||||
- sonic: make sure num_taps * channels is not larger than frame_size
|
||||
- opus_silk: fix typo causing overflow in silk_stabilize_lsf
|
||||
- ffm: reject invalid codec_id and codec_type
|
||||
- aaccoder: prevent crash of anmr coder
|
||||
- swscale/x86/rgb2rgb_template: Fallback to mmx in interleaveBytes() if the alignment is insufficient for SSE*
|
||||
- swscale/x86/rgb2rgb_template: Do not crash on misaligend stride
|
||||
|
||||
version 2.4.12:
|
||||
- avcodec/ffv1dec: Clear quant_table_count if its invalid
|
||||
- avcodec/ffv1dec: Print an error if the quant table count is invalid
|
||||
|
@@ -533,10 +533,6 @@ x86 Michael Niedermayer
|
||||
Releases
|
||||
========
|
||||
|
||||
2.4 Michael Niedermayer
|
||||
2.2 Michael Niedermayer
|
||||
1.2 Michael Niedermayer
|
||||
|
||||
If you want to maintain an older release, please contact us
|
||||
|
||||
|
||||
|
2
configure
vendored
2
configure
vendored
@@ -5603,7 +5603,7 @@ cat > $TMPH <<EOF
|
||||
#define FFMPEG_CONFIG_H
|
||||
#define FFMPEG_CONFIGURATION "$(c_escape $FFMPEG_CONFIGURATION)"
|
||||
#define FFMPEG_LICENSE "$(c_escape $license)"
|
||||
#define CONFIG_THIS_YEAR 2015
|
||||
#define CONFIG_THIS_YEAR 2016
|
||||
#define FFMPEG_DATADIR "$(eval c_escape $datadir)"
|
||||
#define AVCONV_DATADIR "$(eval c_escape $datadir)"
|
||||
#define CC_IDENT "$(c_escape ${cc_ident:-Unknown compiler})"
|
||||
|
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
||||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER = 2.4.12
|
||||
PROJECT_NUMBER = 2.4.13
|
||||
|
||||
# With the PROJECT_LOGO tag one can specify a logo or icon that is included
|
||||
# in the documentation. The maximum height of the logo should not exceed 55
|
||||
|
@@ -339,6 +339,23 @@ ffmpeg -framerate 10 -pattern_type glob -i "*.png" out.mkv
|
||||
@end example
|
||||
@end itemize
|
||||
|
||||
@section mov/mp4/3gp/Quicktme
|
||||
|
||||
Quicktime / MP4 demuxer.
|
||||
|
||||
This demuxer accepts the following options:
|
||||
@table @option
|
||||
@item enable_drefs
|
||||
Enable loading of external tracks, disabled by default.
|
||||
Enabling this can theoretically leak information in some use cases.
|
||||
|
||||
@item use_absolute_path
|
||||
Allows loading of external tracks via absolute paths, disabled by default.
|
||||
Enabling this poses a security risk. It should only be enabled if the source
|
||||
is known to be non malicious.
|
||||
|
||||
@end table
|
||||
|
||||
@section mpegts
|
||||
|
||||
MPEG-2 transport stream demuxer.
|
||||
|
@@ -861,7 +861,7 @@ Return 1 if @var{x} is lesser than or equal to @var{y}, 0 otherwise.
|
||||
Return the maximum between @var{x} and @var{y}.
|
||||
|
||||
@item min(x, y)
|
||||
Return the maximum between @var{x} and @var{y}.
|
||||
Return the minimum between @var{x} and @var{y}.
|
||||
|
||||
@item mod(x, y)
|
||||
Compute the remainder of division of @var{x} by @var{y}.
|
||||
|
@@ -691,7 +691,7 @@ static void search_for_quantizers_anmr(AVCodecContext *avctx, AACEncContext *s,
|
||||
}
|
||||
while (idx) {
|
||||
sce->sf_idx[bandaddr[idx]] = minq + q0;
|
||||
minq = paths[idx][minq].prev;
|
||||
minq = FFMAX(paths[idx][minq].prev, 0);
|
||||
idx--;
|
||||
}
|
||||
//set the same quantizers inside window groups
|
||||
|
@@ -470,7 +470,7 @@ ASSStyle *ff_ass_style_get(ASSSplitContext *ctx, const char *style)
|
||||
if (!style || !*style)
|
||||
style = "Default";
|
||||
for (i=0; i<ass->styles_count; i++)
|
||||
if (!strcmp(ass->styles[i].name, style))
|
||||
if (ass->styles[i].name && !strcmp(ass->styles[i].name, style))
|
||||
return ass->styles + i;
|
||||
return NULL;
|
||||
}
|
||||
|
@@ -41,8 +41,6 @@ int avpriv_dca_convert_bitstream(const uint8_t *src, int src_size, uint8_t *dst,
|
||||
{
|
||||
uint32_t mrk;
|
||||
int i, tmp;
|
||||
const uint16_t *ssrc = (const uint16_t *) src;
|
||||
uint16_t *sdst = (uint16_t *) dst;
|
||||
PutBitContext pb;
|
||||
|
||||
if ((unsigned) src_size > (unsigned) max_size)
|
||||
@@ -54,8 +52,11 @@ int avpriv_dca_convert_bitstream(const uint8_t *src, int src_size, uint8_t *dst,
|
||||
memcpy(dst, src, src_size);
|
||||
return src_size;
|
||||
case DCA_MARKER_RAW_LE:
|
||||
for (i = 0; i < (src_size + 1) >> 1; i++)
|
||||
*sdst++ = av_bswap16(*ssrc++);
|
||||
for (i = 0; i < (src_size + 1) >> 1; i++) {
|
||||
AV_WB16(dst, AV_RL16(src));
|
||||
src += 2;
|
||||
dst += 2;
|
||||
}
|
||||
return src_size;
|
||||
case DCA_MARKER_14B_BE:
|
||||
case DCA_MARKER_14B_LE:
|
||||
|
@@ -221,7 +221,7 @@ static int dv_decode_video_segment(AVCodecContext *avctx, void *arg)
|
||||
dct_mode * 22 * 64 +
|
||||
(quant + ff_dv_quant_offset[class1]) * 64];
|
||||
}
|
||||
dc = dc << 2;
|
||||
dc = dc * 4;
|
||||
/* convert to unsigned because 128 is not added in the
|
||||
* standard IDCT */
|
||||
dc += 1024;
|
||||
|
@@ -460,7 +460,7 @@ static int huf_build_dec_table(const uint64_t *hcode, int im,
|
||||
lc += 8; \
|
||||
}
|
||||
|
||||
#define get_code(po, rlc, c, lc, gb, out, oe) \
|
||||
#define get_code(po, rlc, c, lc, gb, out, oe, outb) \
|
||||
{ \
|
||||
if (po == rlc) { \
|
||||
if (lc < 8) \
|
||||
@@ -469,7 +469,7 @@ static int huf_build_dec_table(const uint64_t *hcode, int im,
|
||||
\
|
||||
cs = c >> lc; \
|
||||
\
|
||||
if (out + cs > oe) \
|
||||
if (out + cs > oe || out == outb) \
|
||||
return AVERROR_INVALIDDATA; \
|
||||
\
|
||||
s = out[-1]; \
|
||||
@@ -502,7 +502,7 @@ static int huf_decode(const uint64_t *hcode, const HufDec *hdecod,
|
||||
|
||||
if (pl.len) {
|
||||
lc -= pl.len;
|
||||
get_code(pl.lit, rlc, c, lc, gb, out, oe);
|
||||
get_code(pl.lit, rlc, c, lc, gb, out, oe, outb);
|
||||
} else {
|
||||
int j;
|
||||
|
||||
@@ -519,7 +519,7 @@ static int huf_decode(const uint64_t *hcode, const HufDec *hdecod,
|
||||
if ((hcode[pl.p[j]] >> 6) ==
|
||||
((c >> (lc - l)) & ((1LL << l) - 1))) {
|
||||
lc -= l;
|
||||
get_code(pl.p[j], rlc, c, lc, gb, out, oe);
|
||||
get_code(pl.p[j], rlc, c, lc, gb, out, oe, outb);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -540,7 +540,7 @@ static int huf_decode(const uint64_t *hcode, const HufDec *hdecod,
|
||||
|
||||
if (pl.len) {
|
||||
lc -= pl.len;
|
||||
get_code(pl.lit, rlc, c, lc, gb, out, oe);
|
||||
get_code(pl.lit, rlc, c, lc, gb, out, oe, outb);
|
||||
} else {
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
@@ -916,7 +916,7 @@ static int count_frame_header(FlacEncodeContext *s)
|
||||
count += 16;
|
||||
|
||||
/* explicit sample rate */
|
||||
count += ((s->sr_code[0] == 12) + (s->sr_code[0] > 12)) * 8;
|
||||
count += ((s->sr_code[0] == 12) + (s->sr_code[0] > 12) * 2) * 8;
|
||||
|
||||
/* frame header CRC-8 */
|
||||
count += 8;
|
||||
|
@@ -43,6 +43,7 @@ typedef struct {
|
||||
const AVClass *class;
|
||||
LZWState *lzw;
|
||||
uint8_t *buf;
|
||||
int buf_size;
|
||||
AVFrame *last_frame;
|
||||
int flags;
|
||||
uint32_t palette[AVPALETTE_COUNT]; ///< local reference palette for !pal8
|
||||
@@ -168,7 +169,7 @@ static int gif_image_write_image(AVCodecContext *avctx,
|
||||
|
||||
bytestream_put_byte(bytestream, 0x08);
|
||||
|
||||
ff_lzw_encode_init(s->lzw, s->buf, 2 * width * height,
|
||||
ff_lzw_encode_init(s->lzw, s->buf, s->buf_size,
|
||||
12, FF_LZW_GIF, put_bits);
|
||||
|
||||
ptr = buf + y_start*linesize + x_start;
|
||||
@@ -224,7 +225,8 @@ static av_cold int gif_encode_init(AVCodecContext *avctx)
|
||||
avctx->coded_frame->key_frame = 1;
|
||||
|
||||
s->lzw = av_mallocz(ff_lzw_encode_state_size);
|
||||
s->buf = av_malloc(avctx->width*avctx->height*2);
|
||||
s->buf_size = avctx->width*avctx->height*2 + 1000;
|
||||
s->buf = av_malloc(s->buf_size);
|
||||
s->tmpl = av_malloc(avctx->width);
|
||||
if (!s->tmpl || !s->buf || !s->lzw)
|
||||
return AVERROR(ENOMEM);
|
||||
@@ -283,6 +285,7 @@ static int gif_encode_close(AVCodecContext *avctx)
|
||||
|
||||
av_freep(&s->lzw);
|
||||
av_freep(&s->buf);
|
||||
s->buf_size = 0;
|
||||
av_frame_free(&s->last_frame);
|
||||
av_freep(&s->tmpl);
|
||||
return 0;
|
||||
|
@@ -283,7 +283,7 @@ int ff_h264_decode_ref_pic_list_reordering(H264Context *h)
|
||||
|
||||
long_idx = pic_num_extract(h, pic_id, &pic_structure);
|
||||
|
||||
if (long_idx > 31) {
|
||||
if (long_idx > 31U) {
|
||||
av_log(h->avctx, AV_LOG_ERROR,
|
||||
"long_term_pic_idx overflow\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
@@ -927,7 +927,7 @@ static void implicit_weight_table(H264Context *h, int field)
|
||||
cur_poc = h->cur_pic_ptr->field_poc[h->picture_structure - 1];
|
||||
}
|
||||
if (h->ref_count[0] == 1 && h->ref_count[1] == 1 && !FRAME_MBAFF(h) &&
|
||||
h->ref_list[0][0].poc + h->ref_list[1][0].poc == 2 * cur_poc) {
|
||||
h->ref_list[0][0].poc + (int64_t)h->ref_list[1][0].poc == 2 * cur_poc) {
|
||||
h->use_weight = 0;
|
||||
h->use_weight_chroma = 0;
|
||||
return;
|
||||
@@ -948,7 +948,7 @@ static void implicit_weight_table(H264Context *h, int field)
|
||||
h->chroma_log2_weight_denom = 5;
|
||||
|
||||
for (ref0 = ref_start; ref0 < ref_count0; ref0++) {
|
||||
int poc0 = h->ref_list[0][ref0].poc;
|
||||
int64_t poc0 = h->ref_list[0][ref0].poc;
|
||||
for (ref1 = ref_start; ref1 < ref_count1; ref1++) {
|
||||
int w = 32;
|
||||
if (!h->ref_list[0][ref0].long_ref && !h->ref_list[1][ref1].long_ref) {
|
||||
|
@@ -38,6 +38,7 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "libavutil/intreadwrite.h"
|
||||
#include "avcodec.h"
|
||||
#include "bytestream.h"
|
||||
#include "hpeldsp.h"
|
||||
@@ -949,7 +950,7 @@ static void ipvideo_decode_opcodes(IpvideoContext *s, AVFrame *frame)
|
||||
}
|
||||
}
|
||||
if (bytestream2_get_bytes_left(&s->stream_ptr) > 1) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
av_log(s->avctx, AV_LOG_DEBUG,
|
||||
"decode finished with %d bytes left over\n",
|
||||
bytestream2_get_bytes_left(&s->stream_ptr));
|
||||
}
|
||||
@@ -987,12 +988,15 @@ static int ipvideo_decode_frame(AVCodecContext *avctx,
|
||||
AVFrame *frame = data;
|
||||
int ret;
|
||||
|
||||
if (buf_size < 2)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
/* decoding map contains 4 bits of information per 8x8 block */
|
||||
s->decoding_map_size = avctx->width * avctx->height / (8 * 8 * 2);
|
||||
s->decoding_map_size = AV_RL16(avpkt->data);
|
||||
|
||||
/* compressed buffer needs to be large enough to at least hold an entire
|
||||
* decoding map */
|
||||
if (buf_size < s->decoding_map_size)
|
||||
if (buf_size < s->decoding_map_size + 2)
|
||||
return buf_size;
|
||||
|
||||
if (av_packet_get_side_data(avpkt, AV_PKT_DATA_PARAM_CHANGE, NULL)) {
|
||||
@@ -1000,8 +1004,8 @@ static int ipvideo_decode_frame(AVCodecContext *avctx,
|
||||
av_frame_unref(s->second_last_frame);
|
||||
}
|
||||
|
||||
s->decoding_map = buf;
|
||||
bytestream2_init(&s->stream_ptr, buf + s->decoding_map_size,
|
||||
s->decoding_map = buf + 2;
|
||||
bytestream2_init(&s->stream_ptr, buf + 2 + s->decoding_map_size,
|
||||
buf_size - s->decoding_map_size);
|
||||
|
||||
if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
|
||||
|
@@ -43,7 +43,7 @@
|
||||
/**
|
||||
* Table of number of bits a motion vector component needs.
|
||||
*/
|
||||
static uint8_t mv_penalty[MAX_FCODE+1][MAX_MV*2+1];
|
||||
static uint8_t mv_penalty[MAX_FCODE+1][MAX_DMV*2+1];
|
||||
|
||||
/**
|
||||
* Minimal fcode that a motion vector component would need.
|
||||
@@ -676,7 +676,7 @@ static av_cold void init_mv_penalty_and_fcode(MpegEncContext *s)
|
||||
int mv;
|
||||
|
||||
for(f_code=1; f_code<=MAX_FCODE; f_code++){
|
||||
for(mv=-MAX_MV; mv<=MAX_MV; mv++){
|
||||
for(mv=-MAX_DMV; mv<=MAX_DMV; mv++){
|
||||
int len;
|
||||
|
||||
if(mv==0) len= ff_mvtab[0][1];
|
||||
@@ -697,7 +697,7 @@ static av_cold void init_mv_penalty_and_fcode(MpegEncContext *s)
|
||||
}
|
||||
}
|
||||
|
||||
mv_penalty[f_code][mv+MAX_MV]= len;
|
||||
mv_penalty[f_code][mv+MAX_DMV]= len;
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -1292,11 +1292,15 @@ static int jpeg2000_decode_tile(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile,
|
||||
if (tile->codsty[0].mct)
|
||||
mct_decode(s, tile);
|
||||
|
||||
if (s->cdef[0] < 0) {
|
||||
for (x = 0; x < s->ncomponents; x++)
|
||||
s->cdef[x] = x + 1;
|
||||
if ((s->ncomponents & 1) == 0)
|
||||
s->cdef[s->ncomponents-1] = 0;
|
||||
for (x = 0; x < s->ncomponents; x++) {
|
||||
if (s->cdef[x] < 0) {
|
||||
for (x = 0; x < s->ncomponents; x++) {
|
||||
s->cdef[x] = x + 1;
|
||||
}
|
||||
if ((s->ncomponents & 1) == 0)
|
||||
s->cdef[s->ncomponents-1] = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (s->precision <= 8) {
|
||||
|
@@ -584,7 +584,8 @@ unk_pixfmt:
|
||||
av_log(s->avctx, AV_LOG_DEBUG, "decode_sof0: error, len(%d) mismatch\n", len);
|
||||
}
|
||||
|
||||
if (s->rgb && !s->lossless && !s->ls) {
|
||||
if ((s->rgb && !s->lossless && !s->ls) ||
|
||||
(!s->rgb && s->ls && s->nb_components > 1)) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Unsupported coding and pixel format combination\n");
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
@@ -949,7 +950,7 @@ static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int p
|
||||
return -1;
|
||||
|
||||
left[i] = buffer[mb_x][i] =
|
||||
mask & (pred + (dc << point_transform));
|
||||
mask & (pred + (dc * (1 << point_transform)));
|
||||
}
|
||||
|
||||
if (s->restart_interval && !--s->restart_count) {
|
||||
@@ -1914,7 +1915,7 @@ int ff_mjpeg_find_marker(MJpegDecodeContext *s,
|
||||
while (b < t) {
|
||||
uint8_t x = src[b++];
|
||||
put_bits(&pb, 8, x);
|
||||
if (x == 0xFF) {
|
||||
if (x == 0xFF && b < t) {
|
||||
x = src[b++];
|
||||
if (x & 0x80) {
|
||||
av_log(s->avctx, AV_LOG_WARNING, "Invalid escape sequence\n");
|
||||
|
@@ -906,7 +906,7 @@ void ff_estimate_p_frame_motion(MpegEncContext * s,
|
||||
c->penalty_factor = get_penalty_factor(s->lambda, s->lambda2, c->avctx->me_cmp);
|
||||
c->sub_penalty_factor= get_penalty_factor(s->lambda, s->lambda2, c->avctx->me_sub_cmp);
|
||||
c->mb_penalty_factor = get_penalty_factor(s->lambda, s->lambda2, c->avctx->mb_cmp);
|
||||
c->current_mv_penalty= c->mv_penalty[s->f_code] + MAX_MV;
|
||||
c->current_mv_penalty= c->mv_penalty[s->f_code] + MAX_DMV;
|
||||
|
||||
get_limits(s, 16*mb_x, 16*mb_y);
|
||||
c->skip=0;
|
||||
@@ -1102,7 +1102,7 @@ int ff_pre_estimate_p_frame_motion(MpegEncContext * s,
|
||||
av_assert0(s->quarter_sample==0 || s->quarter_sample==1);
|
||||
|
||||
c->pre_penalty_factor = get_penalty_factor(s->lambda, s->lambda2, c->avctx->me_pre_cmp);
|
||||
c->current_mv_penalty= c->mv_penalty[s->f_code] + MAX_MV;
|
||||
c->current_mv_penalty= c->mv_penalty[s->f_code] + MAX_DMV;
|
||||
|
||||
get_limits(s, 16*mb_x, 16*mb_y);
|
||||
c->skip=0;
|
||||
@@ -1151,7 +1151,7 @@ static int estimate_motion_b(MpegEncContext *s, int mb_x, int mb_y,
|
||||
const int shift= 1+s->quarter_sample;
|
||||
const int mot_stride = s->mb_stride;
|
||||
const int mot_xy = mb_y*mot_stride + mb_x;
|
||||
uint8_t * const mv_penalty= c->mv_penalty[f_code] + MAX_MV;
|
||||
uint8_t * const mv_penalty= c->mv_penalty[f_code] + MAX_DMV;
|
||||
int mv_scale;
|
||||
|
||||
c->penalty_factor = get_penalty_factor(s->lambda, s->lambda2, c->avctx->me_cmp);
|
||||
@@ -1225,8 +1225,8 @@ static inline int check_bidir_mv(MpegEncContext * s,
|
||||
//FIXME better f_code prediction (max mv & distance)
|
||||
//FIXME pointers
|
||||
MotionEstContext * const c= &s->me;
|
||||
uint8_t * const mv_penalty_f= c->mv_penalty[s->f_code] + MAX_MV; // f_code of the prev frame
|
||||
uint8_t * const mv_penalty_b= c->mv_penalty[s->b_code] + MAX_MV; // f_code of the prev frame
|
||||
uint8_t * const mv_penalty_f= c->mv_penalty[s->f_code] + MAX_DMV; // f_code of the prev frame
|
||||
uint8_t * const mv_penalty_b= c->mv_penalty[s->b_code] + MAX_DMV; // f_code of the prev frame
|
||||
int stride= c->stride;
|
||||
uint8_t *dest_y = c->scratchpad;
|
||||
uint8_t *ptr;
|
||||
@@ -1439,7 +1439,7 @@ static inline int direct_search(MpegEncContext * s, int mb_x, int mb_y)
|
||||
int mx, my, xmin, xmax, ymin, ymax;
|
||||
int16_t (*mv_table)[2]= s->b_direct_mv_table;
|
||||
|
||||
c->current_mv_penalty= c->mv_penalty[1] + MAX_MV;
|
||||
c->current_mv_penalty= c->mv_penalty[1] + MAX_DMV;
|
||||
ymin= xmin=(-32)>>shift;
|
||||
ymax= xmax= 31>>shift;
|
||||
|
||||
@@ -1575,11 +1575,11 @@ void ff_estimate_b_frame_motion(MpegEncContext * s,
|
||||
if(s->flags & CODEC_FLAG_INTERLACED_ME){
|
||||
//FIXME mb type penalty
|
||||
c->skip=0;
|
||||
c->current_mv_penalty= c->mv_penalty[s->f_code] + MAX_MV;
|
||||
c->current_mv_penalty= c->mv_penalty[s->f_code] + MAX_DMV;
|
||||
fimin= interlaced_search(s, 0,
|
||||
s->b_field_mv_table[0], s->b_field_select_table[0],
|
||||
s->b_forw_mv_table[xy][0], s->b_forw_mv_table[xy][1], 0);
|
||||
c->current_mv_penalty= c->mv_penalty[s->b_code] + MAX_MV;
|
||||
c->current_mv_penalty= c->mv_penalty[s->b_code] + MAX_DMV;
|
||||
bimin= interlaced_search(s, 2,
|
||||
s->b_field_mv_table[1], s->b_field_select_table[1],
|
||||
s->b_back_mv_table[xy][0], s->b_back_mv_table[xy][1], 0);
|
||||
|
@@ -52,7 +52,7 @@ static const uint8_t svcd_scan_offset_placeholder[] = {
|
||||
0x81, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
};
|
||||
|
||||
static uint8_t mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
|
||||
static uint8_t mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
|
||||
static uint8_t fcode_tab[MAX_MV * 2 + 1];
|
||||
|
||||
static uint8_t uni_mpeg1_ac_vlc_len[64 * 64 * 2];
|
||||
@@ -144,9 +144,6 @@ static av_cold int encode_init(AVCodecContext *avctx)
|
||||
{
|
||||
MpegEncContext *s = avctx->priv_data;
|
||||
|
||||
if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO && avctx->height > 2800)
|
||||
avctx->thread_count = 1;
|
||||
|
||||
if (ff_mpv_encode_init(avctx) < 0)
|
||||
return -1;
|
||||
|
||||
@@ -1050,7 +1047,7 @@ av_cold void ff_mpeg1_encode_init(MpegEncContext *s)
|
||||
}
|
||||
|
||||
for (f_code = 1; f_code <= MAX_FCODE; f_code++)
|
||||
for (mv = -MAX_MV; mv <= MAX_MV; mv++) {
|
||||
for (mv = -MAX_DMV; mv <= MAX_DMV; mv++) {
|
||||
int len;
|
||||
|
||||
if (mv == 0) {
|
||||
@@ -1073,7 +1070,7 @@ av_cold void ff_mpeg1_encode_init(MpegEncContext *s)
|
||||
2 + bit_size;
|
||||
}
|
||||
|
||||
mv_penalty[f_code][mv + MAX_MV] = len;
|
||||
mv_penalty[f_code][mv + MAX_DMV] = len;
|
||||
}
|
||||
|
||||
|
||||
|
@@ -137,7 +137,7 @@ void ff_mpeg4_encode_mb(MpegEncContext *s,
|
||||
void ff_mpeg4_pred_ac(MpegEncContext *s, int16_t *block, int n,
|
||||
int dir);
|
||||
void ff_set_mpeg4_time(MpegEncContext *s);
|
||||
void ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number);
|
||||
int ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number);
|
||||
|
||||
int ff_mpeg4_decode_picture_header(Mpeg4DecContext *ctx, GetBitContext *gb);
|
||||
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s);
|
||||
|
@@ -881,7 +881,7 @@ int ff_mpeg4_decode_partitions(Mpeg4DecContext *ctx)
|
||||
const int part_a_end = s->pict_type == AV_PICTURE_TYPE_I ? (ER_DC_END | ER_MV_END) : ER_MV_END;
|
||||
|
||||
mb_num = mpeg4_decode_partition_a(ctx);
|
||||
if (mb_num < 0) {
|
||||
if (mb_num <= 0) {
|
||||
ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
|
||||
s->mb_x, s->mb_y, part_a_error);
|
||||
return -1;
|
||||
|
@@ -1086,7 +1086,7 @@ static void mpeg4_encode_vol_header(MpegEncContext *s,
|
||||
}
|
||||
|
||||
/* write mpeg4 VOP header */
|
||||
void ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
|
||||
int ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
|
||||
{
|
||||
int time_incr;
|
||||
int time_div, time_mod;
|
||||
@@ -1112,6 +1112,12 @@ void ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
|
||||
time_mod = FFUMOD(s->time, s->avctx->time_base.den);
|
||||
time_incr = time_div - s->last_time_base;
|
||||
av_assert0(time_incr >= 0);
|
||||
|
||||
// This limits the frame duration to max 1 hour
|
||||
if (time_incr > 3600) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "time_incr %d too large\n", time_incr);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
while (time_incr--)
|
||||
put_bits(&s->pb, 1, 1);
|
||||
|
||||
@@ -1137,6 +1143,8 @@ void ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
|
||||
put_bits(&s->pb, 3, s->f_code); /* fcode_for */
|
||||
if (s->pict_type == AV_PICTURE_TYPE_B)
|
||||
put_bits(&s->pb, 3, s->b_code); /* fcode_back */
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_cold void init_uni_dc_tab(void)
|
||||
|
@@ -64,6 +64,7 @@ enum OutputFormat {
|
||||
|
||||
#define MAX_FCODE 7
|
||||
#define MAX_MV 4096
|
||||
#define MAX_DMV (2*MAX_MV)
|
||||
|
||||
#define MAX_THREADS 32
|
||||
#define MAX_PICTURE_COUNT 36
|
||||
@@ -196,7 +197,7 @@ typedef struct MotionEstContext{
|
||||
op_pixels_func (*hpel_avg)[4];
|
||||
qpel_mc_func (*qpel_put)[16];
|
||||
qpel_mc_func (*qpel_avg)[16];
|
||||
uint8_t (*mv_penalty)[MAX_MV*2+1]; ///< amount of bits needed to encode a MV
|
||||
uint8_t (*mv_penalty)[MAX_DMV*2+1]; ///< amount of bits needed to encode a MV
|
||||
uint8_t *current_mv_penalty;
|
||||
int (*sub_motion_search)(struct MpegEncContext * s,
|
||||
int *mx_ptr, int *my_ptr, int dmin,
|
||||
|
@@ -70,7 +70,7 @@ static int sse_mb(MpegEncContext *s);
|
||||
static void denoise_dct_c(MpegEncContext *s, int16_t *block);
|
||||
static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
|
||||
|
||||
static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
|
||||
static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
|
||||
static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
|
||||
|
||||
const AVOption ff_mpv_generic_options[] = {
|
||||
@@ -316,6 +316,7 @@ av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
|
||||
break;
|
||||
}
|
||||
|
||||
avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
|
||||
s->bit_rate = avctx->bit_rate;
|
||||
s->width = avctx->width;
|
||||
s->height = avctx->height;
|
||||
@@ -3655,9 +3656,11 @@ static int encode_picture(MpegEncContext *s, int picture_number)
|
||||
ff_wmv2_encode_picture_header(s, picture_number);
|
||||
else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
|
||||
ff_msmpeg4_encode_picture_header(s, picture_number);
|
||||
else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
|
||||
ff_mpeg4_encode_picture_header(s, picture_number);
|
||||
else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
|
||||
else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
|
||||
ret = ff_mpeg4_encode_picture_header(s, picture_number);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
} else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
|
||||
ret = ff_rv10_encode_picture_header(s, picture_number);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@@ -209,8 +209,13 @@ static int decode_555(GetByteContext *gB, uint16_t *dst, int stride,
|
||||
last_symbol = b << 8 | bytestream2_get_byte(gB);
|
||||
else if (b > 129) {
|
||||
repeat = 0;
|
||||
while (b-- > 130)
|
||||
while (b-- > 130) {
|
||||
if (repeat >= (INT_MAX >> 8) - 1) {
|
||||
av_log(NULL, AV_LOG_ERROR, "repeat overflow\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
repeat = (repeat << 8) + bytestream2_get_byte(gB) + 1;
|
||||
}
|
||||
if (last_symbol == -2) {
|
||||
int skip = FFMIN((unsigned)repeat, dst + w - p);
|
||||
repeat -= skip;
|
||||
|
@@ -211,9 +211,16 @@ static inline int get_egolomb(GetBitContext *gb)
|
||||
{
|
||||
int v = 4;
|
||||
|
||||
while (get_bits1(gb)) v++;
|
||||
while (get_bits1(gb)) {
|
||||
v++;
|
||||
if (v > 30) {
|
||||
av_log(NULL, AV_LOG_WARNING, "Too large golomb code in get_egolomb.\n");
|
||||
v = 30;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return (1 << v) + get_bits(gb, v);
|
||||
return (1 << v) + get_bits_long(gb, v);
|
||||
}
|
||||
|
||||
static int on2avc_decode_pairs(On2AVCContext *c, GetBitContext *gb, float *dst,
|
||||
|
@@ -824,7 +824,7 @@ static inline void silk_stabilize_lsf(int16_t nlsf[16], int order, const uint16_
|
||||
|
||||
/* upper extent */
|
||||
for (i = order; i > k; i--)
|
||||
max_center -= min_delta[k];
|
||||
max_center -= min_delta[i];
|
||||
max_center -= min_delta[k] >> 1;
|
||||
|
||||
/* move apart */
|
||||
|
@@ -181,6 +181,12 @@ int ff_slice_thread_init(AVCodecContext *avctx)
|
||||
w32thread_init();
|
||||
#endif
|
||||
|
||||
// We cannot do this in the encoder init as the threads are created before
|
||||
if (av_codec_is_encoder(avctx->codec) &&
|
||||
avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
|
||||
avctx->height > 2800)
|
||||
thread_count = avctx->thread_count = 1;
|
||||
|
||||
if (!thread_count) {
|
||||
int nb_cpus = av_cpu_count();
|
||||
if (avctx->height)
|
||||
|
@@ -88,7 +88,7 @@ static inline void flush_put_bits(PutBitContext *s)
|
||||
s->bit_buf <<= s->bit_left;
|
||||
#endif
|
||||
while (s->bit_left < 32) {
|
||||
/* XXX: should test end of buffer */
|
||||
av_assert0(s->buf_ptr < s->buf_end);
|
||||
#ifdef BITSTREAM_WRITER_LE
|
||||
*s->buf_ptr++ = s->bit_buf;
|
||||
s->bit_buf >>= 8;
|
||||
@@ -146,9 +146,13 @@ static inline void put_bits(PutBitContext *s, int n, unsigned int value)
|
||||
#ifdef BITSTREAM_WRITER_LE
|
||||
bit_buf |= value << (32 - bit_left);
|
||||
if (n >= bit_left) {
|
||||
av_assert2(s->buf_ptr+3<s->buf_end);
|
||||
AV_WL32(s->buf_ptr, bit_buf);
|
||||
s->buf_ptr += 4;
|
||||
if (3 < s->buf_end - s->buf_ptr) {
|
||||
AV_WL32(s->buf_ptr, bit_buf);
|
||||
s->buf_ptr += 4;
|
||||
} else {
|
||||
av_log(NULL, AV_LOG_ERROR, "Internal error, put_bits buffer too small\n");
|
||||
av_assert2(0);
|
||||
}
|
||||
bit_buf = (bit_left == 32) ? 0 : value >> bit_left;
|
||||
bit_left += 32;
|
||||
}
|
||||
@@ -160,9 +164,13 @@ static inline void put_bits(PutBitContext *s, int n, unsigned int value)
|
||||
} else {
|
||||
bit_buf <<= bit_left;
|
||||
bit_buf |= value >> (n - bit_left);
|
||||
av_assert2(s->buf_ptr+3<s->buf_end);
|
||||
AV_WB32(s->buf_ptr, bit_buf);
|
||||
s->buf_ptr += 4;
|
||||
if (3 < s->buf_end - s->buf_ptr) {
|
||||
AV_WB32(s->buf_ptr, bit_buf);
|
||||
s->buf_ptr += 4;
|
||||
} else {
|
||||
av_log(NULL, AV_LOG_ERROR, "Internal error, put_bits buffer too small\n");
|
||||
av_assert2(0);
|
||||
}
|
||||
bit_left += 32 - n;
|
||||
bit_buf = value;
|
||||
}
|
||||
|
@@ -258,7 +258,7 @@ static int raw_decode(AVCodecContext *avctx, void *data, int *got_frame,
|
||||
buf += buf_size - context->frame_size;
|
||||
|
||||
len = context->frame_size - (avctx->pix_fmt==AV_PIX_FMT_PAL8 ? AVPALETTE_SIZE : 0);
|
||||
if (buf_size < len && (avctx->codec_tag & 0xFFFFFF) != MKTAG('B','I','T', 0)) {
|
||||
if (buf_size < len && ((avctx->codec_tag & 0xFFFFFF) != MKTAG('B','I','T', 0) || !need_copy)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid buffer size, packet size %d < expected frame_size %d\n", buf_size, len);
|
||||
av_buffer_unref(&frame->buf[0]);
|
||||
return AVERROR(EINVAL);
|
||||
|
@@ -91,6 +91,7 @@ static int sami_paragraph_to_ass(AVCodecContext *avctx, const char *src)
|
||||
break;
|
||||
if (*p == '>')
|
||||
p++;
|
||||
continue;
|
||||
}
|
||||
if (!av_isspace(*p))
|
||||
av_bprint_chars(dst, *p, 1);
|
||||
|
@@ -285,7 +285,7 @@ static int encode_q_branch(SnowContext *s, int level, int x, int y){
|
||||
c->penalty_factor = get_penalty_factor(s->lambda, s->lambda2, c->avctx->me_cmp);
|
||||
c->sub_penalty_factor= get_penalty_factor(s->lambda, s->lambda2, c->avctx->me_sub_cmp);
|
||||
c->mb_penalty_factor = get_penalty_factor(s->lambda, s->lambda2, c->avctx->mb_cmp);
|
||||
c->current_mv_penalty= c->mv_penalty[s->m.f_code=1] + MAX_MV;
|
||||
c->current_mv_penalty= c->mv_penalty[s->m.f_code=1] + MAX_DMV;
|
||||
|
||||
c->xmin = - x*block_w - 16+3;
|
||||
c->ymin = - y*block_w - 16+3;
|
||||
|
@@ -925,6 +925,13 @@ static av_cold int sonic_decode_init(AVCodecContext *avctx)
|
||||
s->frame_size = s->channels*s->block_align*s->downsampling;
|
||||
// avctx->frame_size = s->block_align;
|
||||
|
||||
if (s->num_taps * s->channels > s->frame_size) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"number of taps times channels (%d * %d) larger than frame size %d\n",
|
||||
s->num_taps, s->channels, s->frame_size);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
av_log(avctx, AV_LOG_INFO, "Sonic: ver: %d.%d ls: %d dr: %d taps: %d block: %d frame: %d downsamp: %d\n",
|
||||
s->version, s->minor_version, s->lossless, s->decorrelation, s->num_taps, s->block_align, s->frame_size, s->downsampling);
|
||||
|
||||
|
@@ -1005,8 +1005,13 @@ static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
|
||||
av_log(s->avctx, AV_LOG_ERROR, "subsample count invalid\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
for (i = 0; i < count; i++)
|
||||
for (i = 0; i < count; i++) {
|
||||
s->subsampling[i] = ff_tget(&s->gb, type, s->le);
|
||||
if (s->subsampling[i] <= 0) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "subsampling %d is invalid\n", s->subsampling[i]);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case TIFF_T4OPTIONS:
|
||||
if (s->compr == TIFF_G3)
|
||||
@@ -1254,7 +1259,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
avpkt->size - s->strippos);
|
||||
}
|
||||
|
||||
if (s->rps <= 0) {
|
||||
if (s->rps <= 0 || s->rps % s->subsampling[1]) {
|
||||
av_log(avctx, AV_LOG_ERROR, "rps %d invalid\n", s->rps);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
@@ -567,6 +567,11 @@ static int vorbis_parse_setup_hdr_floors(vorbis_context *vc)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
rangebits = get_bits(gb, 4);
|
||||
if (!rangebits && floor_setup->data.t1.partitions) {
|
||||
av_log(vc->avctx, AV_LOG_ERROR,
|
||||
"A rangebits value of 0 is not compliant with the Vorbis I specification.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
rangemax = (1 << rangebits);
|
||||
if (rangemax > vc->blocksize[1] / 2) {
|
||||
av_log(vc->avctx, AV_LOG_ERROR,
|
||||
@@ -783,6 +788,11 @@ static int vorbis_parse_setup_hdr_mappings(vorbis_context *vc)
|
||||
|
||||
if (get_bits1(gb)) {
|
||||
mapping_setup->coupling_steps = get_bits(gb, 8) + 1;
|
||||
if (vc->audio_channels < 2) {
|
||||
av_log(vc->avctx, AV_LOG_ERROR,
|
||||
"Square polar channel mapping with less than two channels is not compliant with the Vorbis I specification.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
mapping_setup->magnitude = av_mallocz(mapping_setup->coupling_steps *
|
||||
sizeof(*mapping_setup->magnitude));
|
||||
mapping_setup->angle = av_mallocz(mapping_setup->coupling_steps *
|
||||
|
@@ -128,6 +128,11 @@ static av_cold int wavpack_encode_init(AVCodecContext *avctx)
|
||||
|
||||
s->avctx = avctx;
|
||||
|
||||
if (avctx->channels > 255) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid channel count: %d\n", avctx->channels);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
if (!avctx->frame_size) {
|
||||
int block_samples;
|
||||
if (!(avctx->sample_rate & 1))
|
||||
@@ -2882,7 +2887,7 @@ static int wavpack_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
}
|
||||
|
||||
buf_size = s->block_samples * avctx->channels * 8
|
||||
+ 200 /* for headers */;
|
||||
+ 200 * avctx->channels /* for headers */;
|
||||
if ((ret = ff_alloc_packet2(avctx, avpkt, buf_size)) < 0)
|
||||
return ret;
|
||||
buf = avpkt->data;
|
||||
|
@@ -32,6 +32,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
|
||||
WMACodecContext *s = avctx->priv_data;
|
||||
int i, flags1, flags2, block_align;
|
||||
uint8_t *extradata;
|
||||
int ret;
|
||||
|
||||
s->avctx = avctx;
|
||||
|
||||
@@ -78,7 +79,8 @@ static av_cold int encode_init(AVCodecContext *avctx)
|
||||
if (avctx->channels == 2)
|
||||
s->ms_stereo = 1;
|
||||
|
||||
ff_wma_init(avctx, flags2);
|
||||
if ((ret = ff_wma_init(avctx, flags2)) < 0)
|
||||
return ret;
|
||||
|
||||
/* init MDCT */
|
||||
for (i = 0; i < s->nb_block_sizes; i++)
|
||||
|
@@ -141,7 +141,7 @@ static int xwd_decode_frame(AVCodecContext *avctx, void *data,
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (bytestream2_get_bytes_left(&gb) < ncolors * XWD_CMAP_SIZE + avctx->height * lsize) {
|
||||
if (bytestream2_get_bytes_left(&gb) < ncolors * XWD_CMAP_SIZE + (uint64_t)avctx->height * lsize) {
|
||||
av_log(avctx, AV_LOG_ERROR, "input buffer too small\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
@@ -120,10 +120,13 @@ static int decimate_frame(AVFilterContext *ctx,
|
||||
cur->data[plane], cur->linesize[plane],
|
||||
ref->data[plane], ref->linesize[plane],
|
||||
FF_CEIL_RSHIFT(ref->width, hsub),
|
||||
FF_CEIL_RSHIFT(ref->height, vsub)))
|
||||
FF_CEIL_RSHIFT(ref->height, vsub))) {
|
||||
emms_c();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
emms_c();
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@@ -929,6 +929,11 @@ static int asf_write_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
|
||||
pts = (pkt->pts != AV_NOPTS_VALUE) ? pkt->pts : pkt->dts;
|
||||
av_assert0(pts != AV_NOPTS_VALUE);
|
||||
if ( pts < - PREROLL_TIME
|
||||
|| pts > (INT_MAX-3)/10000LL * ASF_INDEXED_INTERVAL - PREROLL_TIME) {
|
||||
av_log(s, AV_LOG_ERROR, "input pts %"PRId64" is invalid\n", pts);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
pts *= 10000;
|
||||
asf->duration = FFMAX(asf->duration, pts + pkt->duration * 10000);
|
||||
|
||||
|
@@ -78,6 +78,18 @@
|
||||
* if its AVClass is non-NULL, and the protocols layer. See the discussion on
|
||||
* nesting in @ref avoptions documentation to learn how to access those.
|
||||
*
|
||||
* @section urls
|
||||
* URL strings in libavformat are made of a scheme/protocol, a ':', and a
|
||||
* scheme specific string. URLs without a scheme and ':' used for local files
|
||||
* are supported but deprecated. "file:" should be used for local files.
|
||||
*
|
||||
* It is important that the scheme string is not taken from untrusted
|
||||
* sources without checks.
|
||||
*
|
||||
* Note that some schemes/protocols are quite powerful, allowing access to
|
||||
* both local and remote files, parts of them, concatenations of them, local
|
||||
* audio and video devices and so on.
|
||||
*
|
||||
* @defgroup lavf_decoding Demuxing
|
||||
* @{
|
||||
* Demuxers read a media file and split it into chunks of data (@em packets). A
|
||||
@@ -88,10 +100,10 @@
|
||||
* cleanup.
|
||||
*
|
||||
* @section lavf_decoding_open Opening a media file
|
||||
* The minimum information required to open a file is its URL or filename, which
|
||||
* The minimum information required to open a file is its URL, which
|
||||
* is passed to avformat_open_input(), as in the following code:
|
||||
* @code
|
||||
* const char *url = "in.mp3";
|
||||
* const char *url = "file:in.mp3";
|
||||
* AVFormatContext *s = NULL;
|
||||
* int ret = avformat_open_input(&s, url, NULL, NULL);
|
||||
* if (ret < 0)
|
||||
@@ -916,7 +928,7 @@ typedef struct AVStream {
|
||||
/**
|
||||
* Stream information used internally by av_find_stream_info()
|
||||
*/
|
||||
#define MAX_STD_TIMEBASES (30*12+7+6)
|
||||
#define MAX_STD_TIMEBASES (30*12+30+3+6)
|
||||
struct {
|
||||
int64_t last_dts;
|
||||
int64_t duration_gcd;
|
||||
@@ -1938,7 +1950,7 @@ AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score
|
||||
*
|
||||
* @param pb the bytestream to probe
|
||||
* @param fmt the input format is put here
|
||||
* @param filename the filename of the stream
|
||||
* @param url the url of the stream
|
||||
* @param logctx the log context
|
||||
* @param offset the offset within the bytestream to probe from
|
||||
* @param max_probe_size the maximum probe buffer size (zero for default)
|
||||
@@ -1947,14 +1959,14 @@ AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score
|
||||
* AVERROR code otherwise
|
||||
*/
|
||||
int av_probe_input_buffer2(AVIOContext *pb, AVInputFormat **fmt,
|
||||
const char *filename, void *logctx,
|
||||
const char *url, void *logctx,
|
||||
unsigned int offset, unsigned int max_probe_size);
|
||||
|
||||
/**
|
||||
* Like av_probe_input_buffer2() but returns 0 on success
|
||||
*/
|
||||
int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
|
||||
const char *filename, void *logctx,
|
||||
const char *url, void *logctx,
|
||||
unsigned int offset, unsigned int max_probe_size);
|
||||
|
||||
/**
|
||||
@@ -1965,7 +1977,7 @@ int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
|
||||
* May be a pointer to NULL, in which case an AVFormatContext is allocated by this
|
||||
* function and written into ps.
|
||||
* Note that a user-supplied AVFormatContext will be freed on failure.
|
||||
* @param filename Name of the stream to open.
|
||||
* @param url URL of the stream to open.
|
||||
* @param fmt If non-NULL, this parameter forces a specific input format.
|
||||
* Otherwise the format is autodetected.
|
||||
* @param options A dictionary filled with AVFormatContext and demuxer-private options.
|
||||
@@ -1976,7 +1988,7 @@ int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
|
||||
*
|
||||
* @note If you want to use custom IO, preallocate the format context and set its pb field.
|
||||
*/
|
||||
int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options);
|
||||
int avformat_open_input(AVFormatContext **ps, const char *url, AVInputFormat *fmt, AVDictionary **options);
|
||||
|
||||
attribute_deprecated
|
||||
int av_demuxer_open(AVFormatContext *ic);
|
||||
|
@@ -155,9 +155,16 @@ static int url_alloc_for_protocol(URLContext **puc, struct URLProtocol *up,
|
||||
char sep= *++p;
|
||||
char *key, *val;
|
||||
p++;
|
||||
|
||||
if (strcmp(up->name, "subfile"))
|
||||
ret = AVERROR(EINVAL);
|
||||
|
||||
while(ret >= 0 && (key= strchr(p, sep)) && p<key && (val = strchr(key+1, sep))){
|
||||
*val= *key= 0;
|
||||
ret= av_opt_set(uc->priv_data, p, key+1, 0);
|
||||
if (strcmp(p, "start") && strcmp(p, "end")) {
|
||||
ret = AVERROR_OPTION_NOT_FOUND;
|
||||
} else
|
||||
ret= av_opt_set(uc->priv_data, p, key+1, 0);
|
||||
if (ret == AVERROR_OPTION_NOT_FOUND)
|
||||
av_log(uc, AV_LOG_ERROR, "Key '%s' not found.\n", p);
|
||||
*val= *key= sep;
|
||||
@@ -222,7 +229,7 @@ static struct URLProtocol *url_find_protocol(const char *filename)
|
||||
size_t proto_len = strspn(filename, URL_SCHEME_CHARS);
|
||||
|
||||
if (filename[proto_len] != ':' &&
|
||||
(filename[proto_len] != ',' || !strchr(filename + proto_len + 1, ':')) ||
|
||||
(strncmp(filename, "subfile,", 8) || !strchr(filename + proto_len + 1, ':')) ||
|
||||
is_dos_path(filename))
|
||||
strcpy(proto_str, "file");
|
||||
else
|
||||
|
@@ -356,6 +356,8 @@ int avio_put_str16le(AVIOContext *s, const char *str)
|
||||
invalid:
|
||||
av_log(s, AV_LOG_ERROR, "Invaid UTF8 sequence in avio_put_str16le\n");
|
||||
err = AVERROR(EINVAL);
|
||||
if (!*(q-1))
|
||||
break;
|
||||
}
|
||||
avio_wl16(s, 0);
|
||||
if (err)
|
||||
|
@@ -260,6 +260,16 @@ static int read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
if (codec->codec_id == AV_CODEC_ID_ADPCM_THP) {
|
||||
uint8_t *dst;
|
||||
|
||||
if (!b->adpc) {
|
||||
av_log(s, AV_LOG_ERROR, "adpcm_thp requires ADPC chunk, but none was found.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (!b->table) {
|
||||
b->table = av_mallocz(32 * codec->channels);
|
||||
if (!b->table)
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
if (av_new_packet(pkt, 8 + (32 + 4) * codec->channels + size) < 0)
|
||||
return AVERROR(ENOMEM);
|
||||
dst = pkt->data;
|
||||
|
@@ -64,7 +64,10 @@ static av_cold int concat_open(URLContext *h, const char *uri, int flags)
|
||||
struct concat_data *data = h->priv_data;
|
||||
struct concat_nodes *nodes;
|
||||
|
||||
av_strstart(uri, "concat:", &uri);
|
||||
if (!av_strstart(uri, "concat:", &uri)) {
|
||||
av_log(h, AV_LOG_ERROR, "URL %s lacks prefix\n", uri);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
for (i = 0, len = 1; uri[i]; i++)
|
||||
if (uri[i] == *AV_CAT_SEPARATOR)
|
||||
|
@@ -110,9 +110,10 @@ static int ffm_read_data(AVFormatContext *s,
|
||||
ffm->dts = avio_rb64(pb);
|
||||
frame_offset = avio_rb16(pb);
|
||||
avio_read(pb, ffm->packet, ffm->packet_size - FFM_HEADER_SIZE);
|
||||
ffm->packet_end = ffm->packet + (ffm->packet_size - FFM_HEADER_SIZE - fill_size);
|
||||
if (ffm->packet_end < ffm->packet || frame_offset < 0)
|
||||
if (ffm->packet_size < FFM_HEADER_SIZE + fill_size || frame_offset < 0) {
|
||||
return -1;
|
||||
}
|
||||
ffm->packet_end = ffm->packet + (ffm->packet_size - FFM_HEADER_SIZE - fill_size);
|
||||
/* if first packet or resynchronization packet, we must
|
||||
handle it specifically */
|
||||
if (ffm->first_packet || (frame_offset & 0x8000)) {
|
||||
@@ -128,8 +129,10 @@ static int ffm_read_data(AVFormatContext *s,
|
||||
return 0;
|
||||
}
|
||||
ffm->first_packet = 0;
|
||||
if ((frame_offset & 0x7fff) < FFM_HEADER_SIZE)
|
||||
if ((frame_offset & 0x7fff) < FFM_HEADER_SIZE) {
|
||||
ffm->packet_end = ffm->packet_ptr;
|
||||
return -1;
|
||||
}
|
||||
ffm->packet_ptr = ffm->packet + (frame_offset & 0x7fff) - FFM_HEADER_SIZE;
|
||||
if (!header)
|
||||
break;
|
||||
@@ -243,6 +246,7 @@ static int ffm2_read_header(AVFormatContext *s)
|
||||
AVStream *st;
|
||||
AVIOContext *pb = s->pb;
|
||||
AVCodecContext *codec;
|
||||
const AVCodecDescriptor *codec_desc;
|
||||
int ret;
|
||||
|
||||
ffm->packet_size = avio_rb32(pb);
|
||||
@@ -289,7 +293,20 @@ static int ffm2_read_header(AVFormatContext *s)
|
||||
codec = st->codec;
|
||||
/* generic info */
|
||||
codec->codec_id = avio_rb32(pb);
|
||||
codec_desc = avcodec_descriptor_get(codec->codec_id);
|
||||
if (!codec_desc) {
|
||||
av_log(s, AV_LOG_ERROR, "Invalid codec id: %d\n", codec->codec_id);
|
||||
codec->codec_id = AV_CODEC_ID_NONE;
|
||||
goto fail;
|
||||
}
|
||||
codec->codec_type = avio_r8(pb);
|
||||
if (codec->codec_type != codec_desc->type) {
|
||||
av_log(s, AV_LOG_ERROR, "Codec type mismatch: expected %d, found %d\n",
|
||||
codec_desc->type, codec->codec_type);
|
||||
codec->codec_id = AV_CODEC_ID_NONE;
|
||||
codec->codec_type = AVMEDIA_TYPE_UNKNOWN;
|
||||
goto fail;
|
||||
}
|
||||
codec->bit_rate = avio_rb32(pb);
|
||||
codec->flags = avio_rb32(pb);
|
||||
codec->flags2 = avio_rb32(pb);
|
||||
@@ -389,6 +406,7 @@ static int ffm_read_header(AVFormatContext *s)
|
||||
AVStream *st;
|
||||
AVIOContext *pb = s->pb;
|
||||
AVCodecContext *codec;
|
||||
const AVCodecDescriptor *codec_desc;
|
||||
int i, nb_streams;
|
||||
uint32_t tag;
|
||||
|
||||
@@ -426,7 +444,20 @@ static int ffm_read_header(AVFormatContext *s)
|
||||
codec = st->codec;
|
||||
/* generic info */
|
||||
codec->codec_id = avio_rb32(pb);
|
||||
codec_desc = avcodec_descriptor_get(codec->codec_id);
|
||||
if (!codec_desc) {
|
||||
av_log(s, AV_LOG_ERROR, "Invalid codec id: %d\n", codec->codec_id);
|
||||
codec->codec_id = AV_CODEC_ID_NONE;
|
||||
goto fail;
|
||||
}
|
||||
codec->codec_type = avio_r8(pb); /* codec_type */
|
||||
if (codec->codec_type != codec_desc->type) {
|
||||
av_log(s, AV_LOG_ERROR, "Codec type mismatch: expected %d, found %d\n",
|
||||
codec_desc->type, codec->codec_type);
|
||||
codec->codec_id = AV_CODEC_ID_NONE;
|
||||
codec->codec_type = AVMEDIA_TYPE_UNKNOWN;
|
||||
goto fail;
|
||||
}
|
||||
codec->bit_rate = avio_rb32(pb);
|
||||
codec->flags = avio_rb32(pb);
|
||||
codec->flags2 = avio_rb32(pb);
|
||||
|
@@ -900,6 +900,24 @@ static void intercept_id3(struct playlist *pls, uint8_t *buf,
|
||||
pls->is_id3_timestamped = (pls->id3_mpegts_timestamp != AV_NOPTS_VALUE);
|
||||
}
|
||||
|
||||
|
||||
static int check_url(const char *url) {
|
||||
const char *proto_name = avio_find_protocol_name(url);
|
||||
|
||||
if (!proto_name)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if (!av_strstart(proto_name, "http", NULL) && !av_strstart(proto_name, "file", NULL))
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if (!strncmp(proto_name, url, strlen(proto_name)) && url[strlen(proto_name)] == ':')
|
||||
return 0;
|
||||
else if (strcmp(proto_name, "file") || !strncmp(url, "file,", 5))
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int open_input(HLSContext *c, struct playlist *pls)
|
||||
{
|
||||
AVDictionary *opts = NULL;
|
||||
@@ -927,6 +945,10 @@ static int open_input(HLSContext *c, struct playlist *pls)
|
||||
seg->url, seg->url_offset, pls->index);
|
||||
|
||||
if (seg->key_type == KEY_NONE) {
|
||||
ret = check_url(seg->url);
|
||||
if (ret < 0)
|
||||
goto cleanup;
|
||||
|
||||
ret = ffurl_open(&pls->input, seg->url, AVIO_FLAG_READ,
|
||||
&pls->parent->interrupt_callback, &opts);
|
||||
|
||||
@@ -934,6 +956,10 @@ static int open_input(HLSContext *c, struct playlist *pls)
|
||||
char iv[33], key[33], url[MAX_URL_SIZE];
|
||||
if (strcmp(seg->key, pls->key_url)) {
|
||||
URLContext *uc;
|
||||
ret = check_url(seg->key);
|
||||
if (ret < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (ffurl_open(&uc, seg->key, AVIO_FLAG_READ,
|
||||
&pls->parent->interrupt_callback, &opts2) == 0) {
|
||||
if (ffurl_read_complete(uc, pls->key, sizeof(pls->key))
|
||||
|
@@ -156,7 +156,7 @@ static int load_ipmovie_packet(IPMVEContext *s, AVIOContext *pb,
|
||||
|
||||
/* send both the decode map and the video data together */
|
||||
|
||||
if (av_new_packet(pkt, s->decode_map_chunk_size + s->video_chunk_size))
|
||||
if (av_new_packet(pkt, 2 + s->decode_map_chunk_size + s->video_chunk_size))
|
||||
return CHUNK_NOMEM;
|
||||
|
||||
if (s->has_palette) {
|
||||
@@ -178,7 +178,8 @@ static int load_ipmovie_packet(IPMVEContext *s, AVIOContext *pb,
|
||||
avio_seek(pb, s->decode_map_chunk_offset, SEEK_SET);
|
||||
s->decode_map_chunk_offset = 0;
|
||||
|
||||
if (avio_read(pb, pkt->data, s->decode_map_chunk_size) !=
|
||||
AV_WL16(pkt->data, s->decode_map_chunk_size);
|
||||
if (avio_read(pb, pkt->data + 2, s->decode_map_chunk_size) !=
|
||||
s->decode_map_chunk_size) {
|
||||
av_free_packet(pkt);
|
||||
return CHUNK_EOF;
|
||||
@@ -187,7 +188,7 @@ static int load_ipmovie_packet(IPMVEContext *s, AVIOContext *pb,
|
||||
avio_seek(pb, s->video_chunk_offset, SEEK_SET);
|
||||
s->video_chunk_offset = 0;
|
||||
|
||||
if (avio_read(pb, pkt->data + s->decode_map_chunk_size,
|
||||
if (avio_read(pb, pkt->data + 2 + s->decode_map_chunk_size,
|
||||
s->video_chunk_size) != s->video_chunk_size) {
|
||||
av_free_packet(pkt);
|
||||
return CHUNK_EOF;
|
||||
|
@@ -172,6 +172,7 @@ typedef struct MOVContext {
|
||||
int bitrates_count;
|
||||
int moov_retry;
|
||||
int atom_depth;
|
||||
int enable_drefs;
|
||||
} MOVContext;
|
||||
|
||||
int ff_mp4_read_descr_len(AVIOContext *pb);
|
||||
|
@@ -363,6 +363,11 @@ static int read_header(AVFormatContext *avctx)
|
||||
if (ast)
|
||||
ast->duration = ast->nb_index_entries;
|
||||
|
||||
if ((vst && !vst->nb_index_entries) || (ast && !ast->nb_index_entries)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "no index entries found\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (vst && ast)
|
||||
avio_seek(pb, FFMIN(vst->index_entries[0].pos, ast->index_entries[0].pos), SEEK_SET);
|
||||
else if (vst)
|
||||
|
@@ -2465,13 +2465,23 @@ static int mov_read_trak(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
|
||||
if (sc->dref_id-1 < sc->drefs_count && sc->drefs[sc->dref_id-1].path) {
|
||||
MOVDref *dref = &sc->drefs[sc->dref_id - 1];
|
||||
if (mov_open_dref(&sc->pb, c->fc->filename, dref, &c->fc->interrupt_callback,
|
||||
c->use_absolute_path, c->fc) < 0)
|
||||
av_log(c->fc, AV_LOG_ERROR,
|
||||
"stream %d, error opening alias: path='%s', dir='%s', "
|
||||
"filename='%s', volume='%s', nlvl_from=%d, nlvl_to=%d\n",
|
||||
if (c->enable_drefs) {
|
||||
if (mov_open_dref(&sc->pb, c->fc->filename, dref, &c->fc->interrupt_callback,
|
||||
c->use_absolute_path, c->fc) < 0)
|
||||
av_log(c->fc, AV_LOG_ERROR,
|
||||
"stream %d, error opening alias: path='%s', dir='%s', "
|
||||
"filename='%s', volume='%s', nlvl_from=%d, nlvl_to=%d\n",
|
||||
st->index, dref->path, dref->dir, dref->filename,
|
||||
dref->volume, dref->nlvl_from, dref->nlvl_to);
|
||||
} else {
|
||||
av_log(c->fc, AV_LOG_WARNING,
|
||||
"Skipped opening external track: "
|
||||
"stream %d, alias: path='%s', dir='%s', "
|
||||
"filename='%s', volume='%s', nlvl_from=%d, nlvl_to=%d."
|
||||
"Set enable_drefs to allow this.\n",
|
||||
st->index, dref->path, dref->dir, dref->filename,
|
||||
dref->volume, dref->nlvl_from, dref->nlvl_to);
|
||||
}
|
||||
} else {
|
||||
sc->pb = c->fc->pb;
|
||||
sc->pb_is_copied = 1;
|
||||
@@ -3904,6 +3914,8 @@ static int mov_read_seek(AVFormatContext *s, int stream_index, int64_t sample_ti
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define OFFSET(x) offsetof(MOVContext, x)
|
||||
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
|
||||
static const AVOption options[] = {
|
||||
{"use_absolute_path",
|
||||
"allow using absolute path when opening alias, this is a possible security issue",
|
||||
@@ -3911,6 +3923,8 @@ static const AVOption options[] = {
|
||||
0, 1, AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_DECODING_PARAM},
|
||||
{"ignore_editlist", "", offsetof(MOVContext, ignore_editlist), FF_OPT_TYPE_INT, {.i64 = 0},
|
||||
0, 1, AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_DECODING_PARAM},
|
||||
{ "enable_drefs", "Enable external track support.", OFFSET(enable_drefs), AV_OPT_TYPE_INT,
|
||||
{.i64 = 0}, 0, 1, FLAGS },
|
||||
{NULL}
|
||||
};
|
||||
|
||||
|
@@ -2007,6 +2007,10 @@ static int mxf_write_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
}
|
||||
mxf->edit_units_count++;
|
||||
} else if (!mxf->edit_unit_byte_count && st->index == 1) {
|
||||
if (!mxf->edit_units_count) {
|
||||
av_log(s, AV_LOG_ERROR, "No packets in first stream\n");
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
mxf->index_entries[mxf->edit_units_count-1].slice_offset =
|
||||
mxf->body_offset - mxf->index_entries[mxf->edit_units_count-1].offset;
|
||||
}
|
||||
|
@@ -885,7 +885,7 @@ static int read_sm_data(AVFormatContext *s, AVIOContext *bc, AVPacket *pkt, int
|
||||
|
||||
get_str(bc, type_str, sizeof(type_str));
|
||||
value_len = ffio_read_varlen(bc);
|
||||
if (avio_tell(bc) + value_len >= maxpos)
|
||||
if (value_len < 0 || value_len >= maxpos - avio_tell(bc))
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (!strcmp(name, "Palette")) {
|
||||
dst = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE, value_len);
|
||||
@@ -1082,7 +1082,8 @@ static int decode_frame(NUTContext *nut, AVPacket *pkt, int frame_code)
|
||||
ret = av_new_packet(pkt, size + nut->header_len[header_idx]);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
memcpy(pkt->data, nut->header[header_idx], nut->header_len[header_idx]);
|
||||
if (nut->header[header_idx])
|
||||
memcpy(pkt->data, nut->header[header_idx], nut->header_len[header_idx]);
|
||||
pkt->pos = avio_tell(bc); // FIXME
|
||||
if (stc->last_flags & FLAG_SM_DATA) {
|
||||
int sm_size;
|
||||
|
@@ -171,6 +171,15 @@ static int nuv_header(AVFormatContext *s)
|
||||
if (aspect > 0.9999 && aspect < 1.0001)
|
||||
aspect = 4.0 / 3.0;
|
||||
fps = av_int2double(avio_rl64(pb));
|
||||
if (fps < 0.0f) {
|
||||
if (s->error_recognition & AV_EF_EXPLODE) {
|
||||
av_log(s, AV_LOG_ERROR, "Invalid frame rate %f\n", fps);
|
||||
return AVERROR_INVALIDDATA;
|
||||
} else {
|
||||
av_log(s, AV_LOG_WARNING, "Invalid frame rate %f, setting to 0.\n", fps);
|
||||
fps = 0.0f;
|
||||
}
|
||||
}
|
||||
|
||||
// number of packets per stream type, -1 means unknown, e.g. streaming
|
||||
v_packs = avio_rl32(pb);
|
||||
|
@@ -2758,10 +2758,14 @@ static int get_std_framerate(int i)
|
||||
return (i + 1) * 1001;
|
||||
i -= 30*12;
|
||||
|
||||
if (i < 7)
|
||||
return ((const int[]) { 40, 48, 50, 60, 80, 120, 240})[i] * 1001 * 12;
|
||||
if (i < 30)
|
||||
return (i + 31) * 1001 * 12;
|
||||
i -= 30;
|
||||
|
||||
i -= 7;
|
||||
if (i < 3)
|
||||
return ((const int[]) { 80, 120, 240})[i] * 1001 * 12;
|
||||
|
||||
i -= 3;
|
||||
|
||||
return ((const int[]) { 24, 30, 60, 12, 15, 48 })[i] * 1000 * 12;
|
||||
}
|
||||
|
@@ -1468,10 +1468,11 @@ int av_opt_set_dict2(void *obj, AVDictionary **options, int search_flags)
|
||||
while ((t = av_dict_get(*options, "", t, AV_DICT_IGNORE_SUFFIX))) {
|
||||
ret = av_opt_set(obj, t->key, t->value, search_flags);
|
||||
if (ret == AVERROR_OPTION_NOT_FOUND)
|
||||
av_dict_set(&tmp, t->key, t->value, 0);
|
||||
else if (ret < 0) {
|
||||
ret = av_dict_set(&tmp, t->key, t->value, 0);
|
||||
if (ret < 0) {
|
||||
av_log(obj, AV_LOG_ERROR, "Error setting option %s to value %s.\n", t->key, t->value);
|
||||
break;
|
||||
av_dict_free(&tmp);
|
||||
return ret;
|
||||
}
|
||||
ret = 0;
|
||||
}
|
||||
|
@@ -332,10 +332,10 @@ VECTOR_FMUL_REVERSE
|
||||
; float scalarproduct_float_sse(const float *v1, const float *v2, int len)
|
||||
INIT_XMM sse
|
||||
cglobal scalarproduct_float, 3,3,2, v1, v2, offset
|
||||
shl offsetd, 2
|
||||
add v1q, offsetq
|
||||
add v2q, offsetq
|
||||
neg offsetq
|
||||
shl offsetq, 2
|
||||
sub v1q, offsetq
|
||||
sub v2q, offsetq
|
||||
xorps xmm0, xmm0
|
||||
.loop:
|
||||
movaps xmm1, [v1q+offsetq]
|
||||
|
@@ -399,7 +399,7 @@ bad_option:
|
||||
for (y = 0; y < H; y++)
|
||||
for (x = 0; x < W * 4; x++)
|
||||
rgb_data[ x + y * 4 * W] = av_lfg_get(&rand);
|
||||
sws_scale(sws, rgb_src, rgb_stride, 0, H, src, stride);
|
||||
sws_scale(sws, rgb_src, rgb_stride, 0, H / 12, src, stride);
|
||||
sws_freeContext(sws);
|
||||
av_free(rgb_data);
|
||||
|
||||
|
@@ -895,11 +895,19 @@ int attribute_align_arg sws_scale(struct SwsContext *c,
|
||||
const uint8_t *src2[4];
|
||||
uint8_t *dst2[4];
|
||||
uint8_t *rgb0_tmp = NULL;
|
||||
int macro_height = isBayer(c->srcFormat) ? 2 : (1 << c->chrSrcVSubSample);
|
||||
|
||||
if (!srcStride || !dstStride || !dst || !srcSlice) {
|
||||
av_log(c, AV_LOG_ERROR, "One of the input parameters to sws_scale() is NULL, please check the calling code\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if ((srcSliceY & (macro_height-1)) ||
|
||||
((srcSliceH& (macro_height-1)) && srcSliceY + srcSliceH != c->srcH) ||
|
||||
srcSliceY + srcSliceH > c->srcH) {
|
||||
av_log(c, AV_LOG_ERROR, "Slice parameters %d, %d are invalid\n", srcSliceY, srcSliceH);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
if (c->cascaded_context[0] && srcSliceY == 0 && srcSliceH == c->cascaded_context[0]->srcH) {
|
||||
ret = sws_scale(c->cascaded_context[0],
|
||||
srcSlice, srcStride, srcSliceY, srcSliceH,
|
||||
|
@@ -39,7 +39,8 @@
|
||||
|
||||
#define STR(s) AV_TOSTRING(s) // AV_STRINGIFY is too long
|
||||
|
||||
#define YUVRGB_TABLE_HEADROOM 256
|
||||
#define YUVRGB_TABLE_HEADROOM 512
|
||||
#define YUVRGB_TABLE_LUMA_HEADROOM 512
|
||||
|
||||
#define MAX_FILTER_SIZE SWS_MAX_FILTER_SIZE
|
||||
|
||||
|
@@ -1058,6 +1058,8 @@ static int bayer_to_rgb24_wrapper(SwsContext *c, const uint8_t* src[], int srcSt
|
||||
default: return 0;
|
||||
}
|
||||
|
||||
av_assert0(srcSliceH > 1);
|
||||
|
||||
copy(srcPtr, srcStride[0], dstPtr, dstStride[0], c->srcW);
|
||||
srcPtr += 2 * srcStride[0];
|
||||
dstPtr += 2 * dstStride[0];
|
||||
@@ -1068,7 +1070,10 @@ static int bayer_to_rgb24_wrapper(SwsContext *c, const uint8_t* src[], int srcSt
|
||||
dstPtr += 2 * dstStride[0];
|
||||
}
|
||||
|
||||
copy(srcPtr, srcStride[0], dstPtr, dstStride[0], c->srcW);
|
||||
if (i + 1 == srcSliceH) {
|
||||
copy(srcPtr, -srcStride[0], dstPtr, -dstStride[0], c->srcW);
|
||||
} else if (i < srcSliceH)
|
||||
copy(srcPtr, srcStride[0], dstPtr, dstStride[0], c->srcW);
|
||||
return srcSliceH;
|
||||
}
|
||||
|
||||
@@ -1104,6 +1109,8 @@ static int bayer_to_yv12_wrapper(SwsContext *c, const uint8_t* src[], int srcStr
|
||||
default: return 0;
|
||||
}
|
||||
|
||||
av_assert0(srcSliceH > 1);
|
||||
|
||||
copy(srcPtr, srcStride[0], dstY, dstU, dstV, dstStride[0], c->srcW, c->input_rgb2yuv_table);
|
||||
srcPtr += 2 * srcStride[0];
|
||||
dstY += 2 * dstStride[0];
|
||||
@@ -1118,7 +1125,10 @@ static int bayer_to_yv12_wrapper(SwsContext *c, const uint8_t* src[], int srcStr
|
||||
dstV += dstStride[1];
|
||||
}
|
||||
|
||||
copy(srcPtr, srcStride[0], dstY, dstU, dstV, dstStride[0], c->srcW, c->input_rgb2yuv_table);
|
||||
if (i + 1 == srcSliceH) {
|
||||
copy(srcPtr, -srcStride[0], dstY, dstU, dstV, -dstStride[0], c->srcW, c->input_rgb2yuv_table);
|
||||
} else if (i < srcSliceH)
|
||||
copy(srcPtr, srcStride[0], dstY, dstU, dstV, dstStride[0], c->srcW, c->input_rgb2yuv_table);
|
||||
return srcSliceH;
|
||||
}
|
||||
|
||||
|
@@ -1056,6 +1056,12 @@ av_cold int sws_init_context(SwsContext *c, SwsFilter *srcFilter,
|
||||
srcW, srcH, dstW, dstH);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
if (flags & SWS_FAST_BILINEAR) {
|
||||
if (srcW < 8 || dstW < 8) {
|
||||
flags ^= SWS_FAST_BILINEAR | SWS_BILINEAR;
|
||||
c->flags = flags;
|
||||
}
|
||||
}
|
||||
|
||||
if (!dstFilter)
|
||||
dstFilter = &dummyFilter;
|
||||
@@ -1532,6 +1538,9 @@ fail: // FIXME replace things by appropriate error codes
|
||||
int tmpH = sqrt(srcH * (int64_t)dstH);
|
||||
enum AVPixelFormat tmpFormat = AV_PIX_FMT_YUV420P;
|
||||
|
||||
if (isALPHA(srcFormat))
|
||||
tmpFormat = AV_PIX_FMT_YUVA420P;
|
||||
|
||||
if (srcW*(int64_t)srcH <= 4LL*dstW*dstH)
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
|
@@ -1434,7 +1434,9 @@ static inline void RENAME(planar2x)(const uint8_t *src, uint8_t *dst, int srcWid
|
||||
dst+= dstStride;
|
||||
|
||||
for (y=1; y<srcHeight; y++) {
|
||||
const x86_reg mmxSize= srcWidth&~15;
|
||||
x86_reg mmxSize= srcWidth&~15;
|
||||
|
||||
if (mmxSize) {
|
||||
__asm__ volatile(
|
||||
"mov %4, %%"REG_a" \n\t"
|
||||
"movq "MANGLE(mmx_ff)", %%mm0 \n\t"
|
||||
@@ -1481,6 +1483,11 @@ static inline void RENAME(planar2x)(const uint8_t *src, uint8_t *dst, int srcWid
|
||||
NAMED_CONSTRAINTS_ADD(mmx_ff)
|
||||
: "%"REG_a
|
||||
);
|
||||
} else {
|
||||
mmxSize = 1;
|
||||
dst[0] = (src[0] * 3 + src[srcStride]) >> 2;
|
||||
dst[dstStride] = (src[0] + 3 * src[srcStride]) >> 2;
|
||||
}
|
||||
|
||||
for (x=mmxSize-1; x<srcWidth-1; x++) {
|
||||
dst[2*x +1]= (3*src[x+0] + src[x+srcStride+1])>>2;
|
||||
@@ -1887,8 +1894,9 @@ static void RENAME(interleaveBytes)(const uint8_t *src1, const uint8_t *src2, ui
|
||||
for (h=0; h < height; h++) {
|
||||
int w;
|
||||
|
||||
if (width >= 16)
|
||||
if (width >= 16) {
|
||||
#if COMPILE_TEMPLATE_SSE2
|
||||
if (!((((intptr_t)src1) | ((intptr_t)src2) | ((intptr_t)dest))&15)) {
|
||||
__asm__(
|
||||
"xor %%"REG_a", %%"REG_a" \n\t"
|
||||
"1: \n\t"
|
||||
@@ -1907,7 +1915,8 @@ static void RENAME(interleaveBytes)(const uint8_t *src1, const uint8_t *src2, ui
|
||||
::"r"(dest), "r"(src1), "r"(src2), "r" ((x86_reg)width-15)
|
||||
: "memory", XMM_CLOBBERS("xmm0", "xmm1", "xmm2",) "%"REG_a
|
||||
);
|
||||
#else
|
||||
} else
|
||||
#endif
|
||||
__asm__(
|
||||
"xor %%"REG_a", %%"REG_a" \n\t"
|
||||
"1: \n\t"
|
||||
@@ -1933,7 +1942,8 @@ static void RENAME(interleaveBytes)(const uint8_t *src1, const uint8_t *src2, ui
|
||||
::"r"(dest), "r"(src1), "r"(src2), "r" ((x86_reg)width-15)
|
||||
: "memory", "%"REG_a
|
||||
);
|
||||
#endif
|
||||
|
||||
}
|
||||
for (w= (width&(~15)); w < width; w++) {
|
||||
dest[2*w+0] = src1[w];
|
||||
dest[2*w+1] = src2[w];
|
||||
|
@@ -720,7 +720,8 @@ av_cold int ff_yuv2rgb_c_init_tables(SwsContext *c, const int inv_table[4],
|
||||
uint16_t *y_table16;
|
||||
uint32_t *y_table32;
|
||||
int i, base, rbase, gbase, bbase, av_uninit(abase), needAlpha;
|
||||
const int yoffs = fullRange ? 384 : 326;
|
||||
const int yoffs = (fullRange ? 384 : 326) + YUVRGB_TABLE_LUMA_HEADROOM;
|
||||
const int table_plane_size = 1024 + 2*YUVRGB_TABLE_LUMA_HEADROOM;
|
||||
|
||||
int64_t crv = inv_table[0];
|
||||
int64_t cbu = inv_table[1];
|
||||
@@ -777,10 +778,10 @@ av_cold int ff_yuv2rgb_c_init_tables(SwsContext *c, const int inv_table[4],
|
||||
return AVERROR(ENOMEM);
|
||||
switch (bpp) {
|
||||
case 1:
|
||||
ALLOC_YUV_TABLE(1024);
|
||||
ALLOC_YUV_TABLE(table_plane_size);
|
||||
y_table = c->yuvTable;
|
||||
yb = -(384 << 16) - oy;
|
||||
for (i = 0; i < 1024 - 110; i++) {
|
||||
yb = -(384 << 16) - YUVRGB_TABLE_LUMA_HEADROOM*cy - oy;
|
||||
for (i = 0; i < table_plane_size - 110; i++) {
|
||||
y_table[i + 110] = av_clip_uint8((yb + 0x8000) >> 16) >> 7;
|
||||
yb += cy;
|
||||
}
|
||||
@@ -792,60 +793,60 @@ av_cold int ff_yuv2rgb_c_init_tables(SwsContext *c, const int inv_table[4],
|
||||
rbase = isRgb ? 3 : 0;
|
||||
gbase = 1;
|
||||
bbase = isRgb ? 0 : 3;
|
||||
ALLOC_YUV_TABLE(1024 * 3);
|
||||
ALLOC_YUV_TABLE(table_plane_size * 3);
|
||||
y_table = c->yuvTable;
|
||||
yb = -(384 << 16) - oy;
|
||||
for (i = 0; i < 1024 - 110; i++) {
|
||||
yb = -(384 << 16) - YUVRGB_TABLE_LUMA_HEADROOM*cy - oy;
|
||||
for (i = 0; i < table_plane_size - 110; i++) {
|
||||
int yval = av_clip_uint8((yb + 0x8000) >> 16);
|
||||
y_table[i + 110] = (yval >> 7) << rbase;
|
||||
y_table[i + 37 + 1024] = ((yval + 43) / 85) << gbase;
|
||||
y_table[i + 110 + 2048] = (yval >> 7) << bbase;
|
||||
y_table[i + 37 + table_plane_size] = ((yval + 43) / 85) << gbase;
|
||||
y_table[i + 110 + 2*table_plane_size] = (yval >> 7) << bbase;
|
||||
yb += cy;
|
||||
}
|
||||
fill_table(c->table_rV, 1, crv, y_table + yoffs);
|
||||
fill_table(c->table_gU, 1, cgu, y_table + yoffs + 1024);
|
||||
fill_table(c->table_bU, 1, cbu, y_table + yoffs + 2048);
|
||||
fill_table(c->table_gU, 1, cgu, y_table + yoffs + table_plane_size);
|
||||
fill_table(c->table_bU, 1, cbu, y_table + yoffs + 2*table_plane_size);
|
||||
fill_gv_table(c->table_gV, 1, cgv);
|
||||
break;
|
||||
case 8:
|
||||
rbase = isRgb ? 5 : 0;
|
||||
gbase = isRgb ? 2 : 3;
|
||||
bbase = isRgb ? 0 : 6;
|
||||
ALLOC_YUV_TABLE(1024 * 3);
|
||||
ALLOC_YUV_TABLE(table_plane_size * 3);
|
||||
y_table = c->yuvTable;
|
||||
yb = -(384 << 16) - oy;
|
||||
for (i = 0; i < 1024 - 38; i++) {
|
||||
yb = -(384 << 16) - YUVRGB_TABLE_LUMA_HEADROOM*cy - oy;
|
||||
for (i = 0; i < table_plane_size - 38; i++) {
|
||||
int yval = av_clip_uint8((yb + 0x8000) >> 16);
|
||||
y_table[i + 16] = ((yval + 18) / 36) << rbase;
|
||||
y_table[i + 16 + 1024] = ((yval + 18) / 36) << gbase;
|
||||
y_table[i + 37 + 2048] = ((yval + 43) / 85) << bbase;
|
||||
y_table[i + 16 + table_plane_size] = ((yval + 18) / 36) << gbase;
|
||||
y_table[i + 37 + 2*table_plane_size] = ((yval + 43) / 85) << bbase;
|
||||
yb += cy;
|
||||
}
|
||||
fill_table(c->table_rV, 1, crv, y_table + yoffs);
|
||||
fill_table(c->table_gU, 1, cgu, y_table + yoffs + 1024);
|
||||
fill_table(c->table_bU, 1, cbu, y_table + yoffs + 2048);
|
||||
fill_table(c->table_gU, 1, cgu, y_table + yoffs + table_plane_size);
|
||||
fill_table(c->table_bU, 1, cbu, y_table + yoffs + 2*table_plane_size);
|
||||
fill_gv_table(c->table_gV, 1, cgv);
|
||||
break;
|
||||
case 12:
|
||||
rbase = isRgb ? 8 : 0;
|
||||
gbase = 4;
|
||||
bbase = isRgb ? 0 : 8;
|
||||
ALLOC_YUV_TABLE(1024 * 3 * 2);
|
||||
ALLOC_YUV_TABLE(table_plane_size * 3 * 2);
|
||||
y_table16 = c->yuvTable;
|
||||
yb = -(384 << 16) - oy;
|
||||
for (i = 0; i < 1024; i++) {
|
||||
yb = -(384 << 16) - YUVRGB_TABLE_LUMA_HEADROOM*cy - oy;
|
||||
for (i = 0; i < table_plane_size; i++) {
|
||||
uint8_t yval = av_clip_uint8((yb + 0x8000) >> 16);
|
||||
y_table16[i] = (yval >> 4) << rbase;
|
||||
y_table16[i + 1024] = (yval >> 4) << gbase;
|
||||
y_table16[i + 2048] = (yval >> 4) << bbase;
|
||||
y_table16[i + table_plane_size] = (yval >> 4) << gbase;
|
||||
y_table16[i + 2*table_plane_size] = (yval >> 4) << bbase;
|
||||
yb += cy;
|
||||
}
|
||||
if (isNotNe)
|
||||
for (i = 0; i < 1024 * 3; i++)
|
||||
for (i = 0; i < table_plane_size * 3; i++)
|
||||
y_table16[i] = av_bswap16(y_table16[i]);
|
||||
fill_table(c->table_rV, 2, crv, y_table16 + yoffs);
|
||||
fill_table(c->table_gU, 2, cgu, y_table16 + yoffs + 1024);
|
||||
fill_table(c->table_bU, 2, cbu, y_table16 + yoffs + 2048);
|
||||
fill_table(c->table_gU, 2, cgu, y_table16 + yoffs + table_plane_size);
|
||||
fill_table(c->table_bU, 2, cbu, y_table16 + yoffs + 2*table_plane_size);
|
||||
fill_gv_table(c->table_gV, 2, cgv);
|
||||
break;
|
||||
case 15:
|
||||
@@ -853,30 +854,30 @@ av_cold int ff_yuv2rgb_c_init_tables(SwsContext *c, const int inv_table[4],
|
||||
rbase = isRgb ? bpp - 5 : 0;
|
||||
gbase = 5;
|
||||
bbase = isRgb ? 0 : (bpp - 5);
|
||||
ALLOC_YUV_TABLE(1024 * 3 * 2);
|
||||
ALLOC_YUV_TABLE(table_plane_size * 3 * 2);
|
||||
y_table16 = c->yuvTable;
|
||||
yb = -(384 << 16) - oy;
|
||||
for (i = 0; i < 1024; i++) {
|
||||
yb = -(384 << 16) - YUVRGB_TABLE_LUMA_HEADROOM*cy - oy;
|
||||
for (i = 0; i < table_plane_size; i++) {
|
||||
uint8_t yval = av_clip_uint8((yb + 0x8000) >> 16);
|
||||
y_table16[i] = (yval >> 3) << rbase;
|
||||
y_table16[i + 1024] = (yval >> (18 - bpp)) << gbase;
|
||||
y_table16[i + 2048] = (yval >> 3) << bbase;
|
||||
y_table16[i + table_plane_size] = (yval >> (18 - bpp)) << gbase;
|
||||
y_table16[i + 2*table_plane_size] = (yval >> 3) << bbase;
|
||||
yb += cy;
|
||||
}
|
||||
if (isNotNe)
|
||||
for (i = 0; i < 1024 * 3; i++)
|
||||
for (i = 0; i < table_plane_size * 3; i++)
|
||||
y_table16[i] = av_bswap16(y_table16[i]);
|
||||
fill_table(c->table_rV, 2, crv, y_table16 + yoffs);
|
||||
fill_table(c->table_gU, 2, cgu, y_table16 + yoffs + 1024);
|
||||
fill_table(c->table_bU, 2, cbu, y_table16 + yoffs + 2048);
|
||||
fill_table(c->table_gU, 2, cgu, y_table16 + yoffs + table_plane_size);
|
||||
fill_table(c->table_bU, 2, cbu, y_table16 + yoffs + 2*table_plane_size);
|
||||
fill_gv_table(c->table_gV, 2, cgv);
|
||||
break;
|
||||
case 24:
|
||||
case 48:
|
||||
ALLOC_YUV_TABLE(1024);
|
||||
ALLOC_YUV_TABLE(table_plane_size);
|
||||
y_table = c->yuvTable;
|
||||
yb = -(384 << 16) - oy;
|
||||
for (i = 0; i < 1024; i++) {
|
||||
yb = -(384 << 16) - YUVRGB_TABLE_LUMA_HEADROOM*cy - oy;
|
||||
for (i = 0; i < table_plane_size; i++) {
|
||||
y_table[i] = av_clip_uint8((yb + 0x8000) >> 16);
|
||||
yb += cy;
|
||||
}
|
||||
@@ -895,20 +896,20 @@ av_cold int ff_yuv2rgb_c_init_tables(SwsContext *c, const int inv_table[4],
|
||||
needAlpha = CONFIG_SWSCALE_ALPHA && isALPHA(c->srcFormat);
|
||||
if (!needAlpha)
|
||||
abase = (base + 24) & 31;
|
||||
ALLOC_YUV_TABLE(1024 * 3 * 4);
|
||||
ALLOC_YUV_TABLE(table_plane_size * 3 * 4);
|
||||
y_table32 = c->yuvTable;
|
||||
yb = -(384 << 16) - oy;
|
||||
for (i = 0; i < 1024; i++) {
|
||||
yb = -(384 << 16) - YUVRGB_TABLE_LUMA_HEADROOM*cy - oy;
|
||||
for (i = 0; i < table_plane_size; i++) {
|
||||
unsigned yval = av_clip_uint8((yb + 0x8000) >> 16);
|
||||
y_table32[i] = (yval << rbase) +
|
||||
(needAlpha ? 0 : (255u << abase));
|
||||
y_table32[i + 1024] = yval << gbase;
|
||||
y_table32[i + 2048] = yval << bbase;
|
||||
y_table32[i + table_plane_size] = yval << gbase;
|
||||
y_table32[i + 2*table_plane_size] = yval << bbase;
|
||||
yb += cy;
|
||||
}
|
||||
fill_table(c->table_rV, 4, crv, y_table32 + yoffs);
|
||||
fill_table(c->table_gU, 4, cgu, y_table32 + yoffs + 1024);
|
||||
fill_table(c->table_bU, 4, cbu, y_table32 + yoffs + 2048);
|
||||
fill_table(c->table_gU, 4, cgu, y_table32 + yoffs + table_plane_size);
|
||||
fill_table(c->table_bU, 4, cbu, y_table32 + yoffs + 2*table_plane_size);
|
||||
fill_gv_table(c->table_gV, 4, cgv);
|
||||
break;
|
||||
default:
|
||||
|
Reference in New Issue
Block a user