Compare commits
1 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
da2186be81 |
1
.gitattributes
vendored
1
.gitattributes
vendored
@@ -1 +0,0 @@
|
||||
*.pnm -diff -text
|
234
Changelog
234
Changelog
@@ -2,240 +2,6 @@ Entries are sorted chronologically from oldest to youngest within each release,
|
||||
releases are sorted from youngest to oldest.
|
||||
|
||||
version <next>:
|
||||
|
||||
version 2.4.8:
|
||||
- avutil/cpu: add missing check for mmxext to av_force_cpu_flags
|
||||
- avcodec/msrledec: restructure msrle_decode_pal4() based on the line number instead of the pixel pointer
|
||||
- avcodec/hevc_ps: Check cropping parameters more correctly
|
||||
- avcodec/dnxhddec: Check that the frame is interlaced before using cur_field
|
||||
- avformat/mov: Disallow ".." in dref unless use_absolute_path is set
|
||||
- avformat/mov: Check for string truncation in mov_open_dref()
|
||||
- ac3_fixed: fix out-of-bound read
|
||||
- avcodec/012v: redesign main loop
|
||||
- avcodec/012v: Check dimensions more completely
|
||||
- asfenc: fix leaking asf->index_ptr on error
|
||||
- avcodec/options_table: remove extradata_size from the AVOptions table
|
||||
- ffmdec: limit the backward seek to the last resync position
|
||||
- ffmdec: make sure the time base is valid
|
||||
- ffmdec: fix infinite loop at EOF
|
||||
- avcodec/tiff: move bpp check to after "end:"
|
||||
- avcodec/opusdec: Fix delayed sample value
|
||||
- avcodec/utils: Align YUV411 by as much as the other YUV variants
|
||||
- vp9: fix segmentation map retention with threading enabled.
|
||||
- doc/protocols/tcp: fix units of listen_timeout option value, from microseconds to milliseconds
|
||||
- fix VP9 packet decoder returning 0 instead of the used data size
|
||||
- avformat/bit: only accept the g729 codec and 1 channel
|
||||
- avformat/adxdec: check avctx->channels for invalid values
|
||||
- Fix buffer_size argument to init_put_bits() in multiple encoders.
|
||||
- mips/acelp_filters: fix incorrect register constraint
|
||||
- avcodec/hevc_ps: Sanity checks for some log2_* values
|
||||
- avcodec/zmbv: Check len before reading in decode_frame()
|
||||
- avcodec/snowdec: Fix ref value check
|
||||
- swscale/utils: More carefully merge and clear coefficients outside the input
|
||||
- avcodec/a64multienc: fix use of uninitialized values in to_meta_with_crop
|
||||
- avcodec/a64multienc: don't set incorrect packet size
|
||||
- webp: ensure that each transform is only used once
|
||||
- avcodec/hevc_ps: More complete window reset
|
||||
- vp9: make above buffer pointer 32-byte aligned.
|
||||
- avformat/rm: limit packet size
|
||||
- avcodec/webp: validate the distance prefix code
|
||||
- avcodec/gif: fix off by one in column offsetting finding
|
||||
|
||||
|
||||
version 2.4.7:
|
||||
- avcodec/flac_parser: fix handling EOF if no headers are found
|
||||
- avfilter/vf_framepack: Check and update frame_rate
|
||||
- avcodec/hevc: Fix handling of skipped_bytes() reallocation failures
|
||||
- qpeg: avoid pointless invalid memcpy()
|
||||
- avcodec/arm/videodsp_armv5te: Fix linking failure with "g++ -shared -D__STDC_CONSTANT_MACROS -o test.so ... libavcodec.a"
|
||||
- avcodec/mjpegdec: Skip blocks which are outside the visible area
|
||||
- lavc/aarch64: Do not use the neon horizontal chroma loop filter for H.264 4:2:2. (cherry picked from commit 4faea46bd906b3897018736208123aa36c3f45d5)
|
||||
- avcodec/h264_slice: assert that reinit does not occur after the first slice
|
||||
- avcodec/h264_slice: ignore SAR changes in slices after the first
|
||||
- avcodec/h264_slice: Check picture structure before setting the related fields
|
||||
- avcodec/h264_slice: Do not change frame_num after the first slice
|
||||
- avutil/opt: Fix type used to access AV_OPT_TYPE_SAMPLE_FMT
|
||||
- avutil/opt: Fix types used to access AV_OPT_TYPE_PIXEL_FMT
|
||||
- avcodec/h264: Be more strict on rejecting pps/sps changes
|
||||
- avcodec/h264: Be more strict on rejecting pps_id changes
|
||||
- avcodec/h264_ps: More completely check the bit depths
|
||||
- avformat/thp: Check av_get_packet() for failure not only for partial output
|
||||
- swscale/utils: Limit filter shifting so as not to read from prior the array
|
||||
- avcodec/mpegvideo_motion: Fix gmc chroma dimensions
|
||||
- avcodec/mjpegdec: Check number of components for JPEG-LS
|
||||
- avcodec/mjpegdec: Check escape sequence validity
|
||||
- avformat/mpc8: Use uint64_t in *_get_v() to avoid undefined behavior
|
||||
- avformat/mpc8: fix broken pointer math
|
||||
- avformat/mpc8: fix hang with fuzzed file
|
||||
- avformat/tta: fix crash with corrupted files
|
||||
|
||||
version 2.4.6:
|
||||
- doc/examples: fix lib math dep for decoding_encoding
|
||||
- avformat/movenc: workaround bug in "PathScale EKOPath(tm) Compiler Suite Version 4.0.12.1"
|
||||
- vp9: fix parser return values in error case
|
||||
- ffmpeg: Clear error message array at init.
|
||||
- avcodec/dvdsubdec: fix accessing dangling pointers
|
||||
- avcodec/dvdsubdec: error on bitmaps with size 0
|
||||
- avformat/mov: Fix mixed declaration and statement warning
|
||||
- cmdutils: Use 64bit for file size/offset related variable in cmdutils_read_file()
|
||||
- avformat/utils: Clear pointer in ff_alloc_extradata() to avoid leaving a stale pointer in memory
|
||||
- avformat/matroskadec: Use av_freep() to avoid leaving stale pointers in memory
|
||||
- lavfi: check av_strdup() return value
|
||||
- mov: Fix negative size calculation in mov_read_default().
|
||||
- avformat/mov: fix integer overflow in mov_read_udta_string()
|
||||
- mov: Avoid overflow with mov_metadata_raw()
|
||||
- avcodec/dvdsubdec: fix out of bounds accesses
|
||||
- avfilter/vf_sab: fix filtering tiny images
|
||||
- avformat/flvdec: Increase string array size
|
||||
- avformat/flvdec: do not inject dts=0 metadata packets which failed to be parsed into a new data stream
|
||||
- avformat/cdxl: Fix integer overflow of image_size
|
||||
- avformat/segment: Use av_freep() avoid leaving stale pointers in memory
|
||||
- avformat/mov: Fix memleaks for duplicate STCO/CO64/STSC atoms
|
||||
- mov: avoid a memleak when multiple stss boxes are presen
|
||||
|
||||
version 2.4.5:
|
||||
- lavu/frame: fix malloc error path in av_frame_copy_props()
|
||||
- avformat/utils: Do not update programs streams from program-less streams in update_wrap_reference()
|
||||
- avformat/aviobuf: Check that avio_seek() target is non negative
|
||||
- swresample/soxr_resample: fix error handling
|
||||
- avformat/flvdec: fix potential use of uninitialized variables
|
||||
- avformat/matroskadec: fix handling of recursive SeekHead elements
|
||||
- doc/examples/transcoding: check encoder before using it
|
||||
- swscale/x86/rgb2rgb_template: fix crash with tiny size and nv12 output
|
||||
- avformat/rmdec: Check codec_data_size
|
||||
- avformat/aviobuf: Fix infinite loop in ff_get_line()
|
||||
- vc1: Do not assume seek happens after decoding
|
||||
- mmvideo: check frame dimensions
|
||||
- jvdec: check frame dimensions
|
||||
- avcodec/indeo3: ensure offsets are non negative
|
||||
- avcodec/h264: Check *log2_weight_denom
|
||||
- avcodec/hevc_ps: Check diff_cu_qp_delta_depth
|
||||
- avcodec/h264: Clear delayed_pic on deallocation
|
||||
- avcodec/hevc: clear filter_slice_edges() on allocation
|
||||
- avcodec/dcadec: Check that the added xch channel isnt already there
|
||||
- avcodec/indeo3: use signed variables to avoid underflow
|
||||
- swscale: increase yuv2rgb table headroom
|
||||
- avformat/mov: fix integer overflow of size
|
||||
- avformat/mov: check atom nesting depth
|
||||
- avcodec/utvideodec: Fix handling of slice_height=0
|
||||
- avcodec/vmdvideo: Check len before using it in method 3
|
||||
- avformat/flvdec: Use av_freep() avoid leaving stale pointers in memory
|
||||
- avformat/hdsenc: Use av_freep() avoid leaving stale pointers in memory
|
||||
- configure: create the tests directory like the doc directory
|
||||
- v4l2: Make use of the VIDIOC_ENUM_FRAMESIZES ioctl on OpenBSD
|
||||
- avcodec/motion_est: use 2x8x8 for interlaced qpel
|
||||
- Treat all '*.pnm' files as non-text file
|
||||
|
||||
version 2.4.4:
|
||||
- avformat: replace some odd 30-60 rates by higher less odd ones in get_std_framerate()
|
||||
- swscale: fix yuv2yuvX_8 assembly on x86
|
||||
- avcodec/hevc_ps: Check num_long_term_ref_pics_sps
|
||||
- avcodec/mjpegdec: Fix integer overflow in shift
|
||||
- avcodec/hevc_ps: Check return code from pps_range_extensions()
|
||||
- avcodec/rawdec: Check the return code of avpicture_get_size()
|
||||
- avcodec/pngdec: Check IHDR/IDAT order
|
||||
- avcodec/flacdec: Call ff_flacdsp_init() unconditionally
|
||||
- avcodec/utils: Check that the data is complete in avpriv_bprint_to_extradata()
|
||||
- avcodec/mjpegdec: Fix context fields becoming inconsistent
|
||||
- avcodec/mjpegdec: Check for pixfmtid 0x42111100 || 0x24111100 with more than 8 bits
|
||||
- swscale/x86/rgb2rgb_template: handle the first 2 lines with C in rgb24toyv12_*()
|
||||
- doc/APIchanges: Fix some wrong versions
|
||||
- avformat/hlsenc: Free context after hls_append_segment
|
||||
- avcodec/mpeg4video_parser: fix spurious extradata parse warnings
|
||||
- lavu/opt: fix av_opt_get function
|
||||
- avcodec/wmaprodec: Fix integer overflow in sfb_offsets initialization
|
||||
- avcodec/utvideodec: fix assumtation that slice_height >= 1
|
||||
- avcodec/options_table fix min of audio channels and sample rate
|
||||
- libavutil/thread.h: Support OS/2 threads
|
||||
- fix Makefile objects for pulseaudio support
|
||||
- opusdec: make sure all substreams have the same number of coded samples
|
||||
- lavu: add wrappers for the pthreads mutex API
|
||||
- avformat/avidec: fix handling dv in avi
|
||||
- avfilter/vf_lut: gammaval709()
|
||||
- cinedec: report white balance gain coefficients using metadata
|
||||
- swscale/utils: support bayer input + scaling, and bayer input + any supported output
|
||||
- swscale: support internal scaler cascades
|
||||
- avformat/dtsdec: dts_probe: check reserved bit, check lfe, check sr_code similarity
|
||||
- avformat/segment: export inner muxer timebase
|
||||
- Remove fminf() emulation, fix build issues
|
||||
- avcodec/mpegaudio_parser: fix off by 1 error in bitrate calculation
|
||||
- Use -fno-optimize-sibling-calls on parisc also for gcc 4.9.
|
||||
- ffmpeg_opt: store canvas size in decoder context
|
||||
- avcodec/mpeg12dec: do not trust AVCodecContext input dimensions
|
||||
|
||||
version 2.4.3:
|
||||
- avcodec/svq1dec: zero terminate embedded message before printing
|
||||
- avcodec/cook: check that the subpacket sizes fit in block_align
|
||||
- avcodec/g2meet: check tile dimensions to avoid integer overflow
|
||||
- avcodec/utils: Align dimensions by at least their chroma sub-sampling factors.
|
||||
- avcodec/dnxhddec: treat pix_fmt like width/height
|
||||
- avcodec/dxa: check dimensions
|
||||
- avcodec/dirac_arith: fix integer overflow
|
||||
- avcodec/diracdec: Tighter checks on CODEBLOCKS_X/Y
|
||||
- avcodec/diracdec: Use 64bit in calculation of codeblock coordinates
|
||||
- avcodec/sgidec: fix count check
|
||||
- avcodec/sgidec: fix linesize for 16bit
|
||||
- avcodec/hevc_ps: Check default display window bitstream and skip if invalid
|
||||
- avcodec/tiffenc: properly compute packet size
|
||||
- lavd: export all symbols with av_ prefix
|
||||
- avformat/mxfdec: Fix termination of mxf_data_essence_container_uls
|
||||
- postproc: fix qp count
|
||||
- postproc/postprocess: fix quant store for fq mode
|
||||
- vf_drawtext: add missing clear of pointers after av_expr_free()
|
||||
- utvideoenc: properly set slice height/last line
|
||||
- swresample: fix sample drop loop end condition
|
||||
- resample: Avoid off-by-1 errors in PTS calcs.
|
||||
- imc: fix order of operations in coefficients read
|
||||
- hevc_mvs: make sure to always initialize the temporal MV fully
|
||||
- hevc_mvs: initialize the temporal MV in case of missing reference
|
||||
|
||||
version 2.4.2:
|
||||
- avcodec/on2avc: Check number of channels
|
||||
- avcodec/hevc: fix chroma transform_add size
|
||||
- avcodec/h264: Check mode before considering mixed mode intra prediction
|
||||
- avformat/mpegts: use a padded buffer in read_sl_header()
|
||||
- avformat/mpegts: Check desc_len / get8() return code
|
||||
- avcodec/vorbisdec: Fix off by 1 error in ptns_to_read
|
||||
- sdp: add support for H.261
|
||||
- avcodec/svq3: Do not memcpy AVFrame
|
||||
- avcodec/smc: fix off by 1 error
|
||||
- avcodec/qpeg: fix off by 1 error in MV bounds check
|
||||
- avcodec/gifdec: factorize interleave end handling out
|
||||
- avcodec/cinepak: fix integer underflow
|
||||
- avcodec/pngdec: Check bits per pixel before setting monoblack pixel format
|
||||
- avcodec/pngdec: Calculate MPNG bytewidth more defensively
|
||||
- avcodec/tiff: more completely check bpp/bppcount
|
||||
- avcodec/mmvideo: Bounds check 2nd line of HHV Intra blocks
|
||||
- avcodec/h263dec: Fix decoding messenger.h263
|
||||
- avcodec/utils: Add case for jv to avcodec_align_dimensions2()
|
||||
- avcodec/mjpegdec: check bits per pixel for changes similar to dimensions
|
||||
- avcodec/jpeglsdec: Check run value more completely in ls_decode_line()
|
||||
- avformat/hlsenc: export inner muxer timebase
|
||||
- configure: add noexecstack to linker options if supported.
|
||||
- avcodec/ac3enc_template: fix out of array read
|
||||
- avutil/x86/cpu: fix cpuid sub-leaf selection
|
||||
- avformat/img2dec: enable generic seeking for image pipes
|
||||
- avformat/img2dec: initialize pkt->pos for image pipes
|
||||
- avformat/img2dec: pass error code and signal EOF
|
||||
- avformat/img2dec: fix error code at EOF for pipes
|
||||
- libavutil/opt: fix av_opt_set_channel_layout() to access correct memory address
|
||||
- tests/fate-run.sh: Cat .err file in case of error with V>0
|
||||
- avformat/riffenc: Filter out "BottomUp" in ff_put_bmp_header()
|
||||
- avcodec/webp: fix default palette color 0xff000000 -> 0x00000000
|
||||
- avcodec/asvenc: fix AAN scaling
|
||||
- Fix compile error on arm4/arm5 platform
|
||||
|
||||
|
||||
version 2.4.1:
|
||||
- swscale: Allow chroma samples to be above and to the left of luma samples
|
||||
- avcodec/libilbc: support for latest git of libilbc
|
||||
- avcodec/webp: treat out-of-bound palette index as translucent black
|
||||
- vf_deshake: rename Transform.vector to Transform.vec to avoid compiler confusion
|
||||
- apetag: Fix APE tag size check
|
||||
- tools/crypto_bench: fix build when AV_READ_TIME is unavailable
|
||||
|
||||
|
||||
version 2.4:
|
||||
- Icecast protocol
|
||||
- ported lenscorrection filter from frei0r filter
|
||||
- large optimizations in dctdnoiz to make it usable
|
||||
|
2
Makefile
2
Makefile
@@ -111,7 +111,7 @@ endef
|
||||
|
||||
$(foreach P,$(PROGS),$(eval $(call DOPROG,$(P:$(PROGSSUF)$(EXESUF)=))))
|
||||
|
||||
ffprobe.o cmdutils.o libavcodec/utils.o libavformat/utils.o libavdevice/avdevice.o libavfilter/avfilter.o libavutil/utils.o libpostproc/postprocess.o libswresample/swresample.o libswscale/utils.o : libavutil/ffversion.h
|
||||
ffprobe.o cmdutils.o : libavutil/ffversion.h
|
||||
|
||||
$(PROGS): %$(PROGSSUF)$(EXESUF): %$(PROGSSUF)_g$(EXESUF)
|
||||
$(CP) $< $@
|
||||
|
@@ -53,8 +53,6 @@
|
||||
• API for live metadata updates through event flags.
|
||||
• UTF-16 support in text subtitles formats.
|
||||
• The ASS muxer now reorders the Dialogue events properly.
|
||||
• support for H.261 RTP payload format (RFC 4587)
|
||||
• HEVC/H.265 RTP payload format (draft v6) depacketizer
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ libavfilter │
|
||||
|
36
cmdutils.c
36
cmdutils.c
@@ -444,7 +444,7 @@ int locate_option(int argc, char **argv, const OptionDef *options,
|
||||
(po->name && !strcmp(optname, po->name)))
|
||||
return i;
|
||||
|
||||
if (!po->name || po->flags & HAS_ARG)
|
||||
if (po->flags & HAS_ARG)
|
||||
i++;
|
||||
}
|
||||
return 0;
|
||||
@@ -1857,7 +1857,7 @@ int read_yesno(void)
|
||||
|
||||
int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
|
||||
{
|
||||
int64_t ret;
|
||||
int ret;
|
||||
FILE *f = av_fopen_utf8(filename, "rb");
|
||||
|
||||
if (!f) {
|
||||
@@ -1865,31 +1865,19 @@ int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
|
||||
strerror(errno));
|
||||
return AVERROR(errno);
|
||||
}
|
||||
|
||||
ret = fseek(f, 0, SEEK_END);
|
||||
if (ret == -1) {
|
||||
ret = AVERROR(errno);
|
||||
goto out;
|
||||
fseek(f, 0, SEEK_END);
|
||||
*size = ftell(f);
|
||||
fseek(f, 0, SEEK_SET);
|
||||
if (*size == (size_t)-1) {
|
||||
av_log(NULL, AV_LOG_ERROR, "IO error: %s\n", strerror(errno));
|
||||
fclose(f);
|
||||
return AVERROR(errno);
|
||||
}
|
||||
|
||||
ret = ftell(f);
|
||||
if (ret < 0) {
|
||||
ret = AVERROR(errno);
|
||||
goto out;
|
||||
}
|
||||
*size = ret;
|
||||
|
||||
ret = fseek(f, 0, SEEK_SET);
|
||||
if (ret == -1) {
|
||||
ret = AVERROR(errno);
|
||||
goto out;
|
||||
}
|
||||
|
||||
*bufptr = av_malloc(*size + 1);
|
||||
if (!*bufptr) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Could not allocate file buffer\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto out;
|
||||
fclose(f);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
ret = fread(*bufptr, 1, *size, f);
|
||||
if (ret < *size) {
|
||||
@@ -1905,8 +1893,6 @@ int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
|
||||
(*bufptr)[(*size)++] = '\0';
|
||||
}
|
||||
|
||||
out:
|
||||
av_log(NULL, AV_LOG_ERROR, "IO error: %s\n", av_err2str(ret));
|
||||
fclose(f);
|
||||
return ret;
|
||||
}
|
||||
|
22
configure
vendored
22
configure
vendored
@@ -1620,6 +1620,7 @@ HEADERS_LIST="
|
||||
asm_types_h
|
||||
cdio_paranoia_h
|
||||
cdio_paranoia_paranoia_h
|
||||
CL_cl_h
|
||||
dev_bktr_ioctl_bt848_h
|
||||
dev_bktr_ioctl_meteor_h
|
||||
dev_ic_bt8xx_h
|
||||
@@ -1667,6 +1668,7 @@ MATH_FUNCS="
|
||||
exp2
|
||||
exp2f
|
||||
expf
|
||||
fminf
|
||||
isinf
|
||||
isnan
|
||||
ldexpf
|
||||
@@ -1742,7 +1744,6 @@ SYSTEM_FUNCS="
|
||||
TOOLCHAIN_FEATURES="
|
||||
as_dn_directive
|
||||
as_func
|
||||
as_object_arch
|
||||
asm_mod_q
|
||||
attribute_may_alias
|
||||
attribute_packed
|
||||
@@ -3933,9 +3934,6 @@ case "$arch" in
|
||||
;;
|
||||
x86)
|
||||
check_64bit x86_32 x86_64 'sizeof(void *) > 4'
|
||||
# Treat x32 as x64 for now. Note it also needs spic=$shared
|
||||
test "$subarch" = "x86_32" && check_cpp_condition stddef.h 'defined(__x86_64__)' &&
|
||||
subarch=x86_64
|
||||
if test "$subarch" = "x86_64"; then
|
||||
spic=$shared
|
||||
fi
|
||||
@@ -4425,11 +4423,6 @@ if enabled_any arm aarch64 || enabled_all ppc altivec && enabled asm; then
|
||||
check_as <<EOF && enable as_func
|
||||
.func test
|
||||
.endfunc
|
||||
EOF
|
||||
|
||||
# llvm's integrated assembler supports .object_arch from llvm 3.5
|
||||
enabled arm && test "$objformat" = elf && check_as <<EOF && enable as_object_arch
|
||||
.object_arch armv4
|
||||
EOF
|
||||
fi
|
||||
|
||||
@@ -4507,7 +4500,7 @@ elif enabled parisc; then
|
||||
|
||||
if enabled gcc; then
|
||||
case $($cc -dumpversion) in
|
||||
4.[3-9].*) check_cflags -fno-optimize-sibling-calls ;;
|
||||
4.[3-8].*) check_cflags -fno-optimize-sibling-calls ;;
|
||||
esac
|
||||
fi
|
||||
|
||||
@@ -4615,7 +4608,6 @@ fi
|
||||
check_code cc arm_neon.h "int16x8_t test = vdupq_n_s16(0)" && enable intrinsics_neon
|
||||
|
||||
check_ldflags -Wl,--as-needed
|
||||
check_ldflags -Wl,-z,noexecstack
|
||||
|
||||
if check_func dlopen; then
|
||||
ldl=
|
||||
@@ -4715,6 +4707,7 @@ check_func_headers glob.h glob
|
||||
enabled xlib &&
|
||||
check_func_headers "X11/Xlib.h X11/extensions/Xvlib.h" XvGetPortAttribute -lXv -lX11 -lXext
|
||||
|
||||
check_header cl/cl.h
|
||||
check_header direct.h
|
||||
check_header dlfcn.h
|
||||
check_header dxva.h
|
||||
@@ -4788,6 +4781,7 @@ disabled crystalhd || check_lib libcrystalhd/libcrystalhd_if.h DtsCrystalHDVersi
|
||||
atan2f_args=2
|
||||
ldexpf_args=2
|
||||
powf_args=2
|
||||
fminf_args=2
|
||||
|
||||
for func in $MATH_FUNCS; do
|
||||
eval check_mathfunc $func \${${func}_args:-1}
|
||||
@@ -4948,7 +4942,6 @@ check_header linux/videodev2.h
|
||||
check_code cc linux/videodev2.h "struct v4l2_frmsizeenum vfse; vfse.discrete.width = 0;" && enable_safe struct_v4l2_frmivalenum_discrete
|
||||
|
||||
check_header sys/videoio.h
|
||||
check_code cc sys/videoio.h "struct v4l2_frmsizeenum vfse; vfse.discrete.width = 0;" && enable_safe struct_v4l2_frmivalenum_discrete
|
||||
|
||||
check_func_headers "windows.h vfw.h" capCreateCaptureWindow "$vfwcap_indev_extralibs"
|
||||
# check that WM_CAP_DRIVER_CONNECT is defined to the proper value
|
||||
@@ -4988,7 +4981,7 @@ enabled_any sndio_indev sndio_outdev && check_lib2 sndio.h sio_open -lsndio
|
||||
if enabled libcdio; then
|
||||
check_lib2 "cdio/cdda.h cdio/paranoia.h" cdio_cddap_open -lcdio_paranoia -lcdio_cdda -lcdio ||
|
||||
check_lib2 "cdio/paranoia/cdda.h cdio/paranoia/paranoia.h" cdio_cddap_open -lcdio_paranoia -lcdio_cdda -lcdio ||
|
||||
die "ERROR: No usable libcdio/cdparanoia found"
|
||||
die "ERROR: libcdio-paranoia not found"
|
||||
fi
|
||||
|
||||
enabled xlib &&
|
||||
@@ -5604,7 +5597,7 @@ cat > $TMPH <<EOF
|
||||
#define FFMPEG_CONFIG_H
|
||||
#define FFMPEG_CONFIGURATION "$(c_escape $FFMPEG_CONFIGURATION)"
|
||||
#define FFMPEG_LICENSE "$(c_escape $license)"
|
||||
#define CONFIG_THIS_YEAR 2015
|
||||
#define CONFIG_THIS_YEAR 2014
|
||||
#define FFMPEG_DATADIR "$(eval c_escape $datadir)"
|
||||
#define AVCONV_DATADIR "$(eval c_escape $datadir)"
|
||||
#define CC_IDENT "$(c_escape ${cc_ident:-Unknown compiler})"
|
||||
@@ -5632,7 +5625,6 @@ enabled getenv || echo "#define getenv(x) NULL" >> $TMPH
|
||||
|
||||
|
||||
mkdir -p doc
|
||||
mkdir -p tests
|
||||
echo "@c auto-generated by configure" > doc/config.texi
|
||||
|
||||
print_config ARCH_ "$config_files" $ARCH_LIST
|
||||
|
@@ -148,7 +148,7 @@ API changes, most recent first:
|
||||
Increase FF_INPUT_BUFFER_PADDING_SIZE to 32 due to some corner cases needing
|
||||
it
|
||||
|
||||
2014-06-10 - 5482780 - lavf 55.43.100 - avformat.h
|
||||
2014-06-10 - xxxxxxx - lavf 55.43.100 - avformat.h
|
||||
New field int64_t max_analyze_duration2 instead of deprecated
|
||||
int max_analyze_duration.
|
||||
|
||||
@@ -172,7 +172,7 @@ API changes, most recent first:
|
||||
Add strict_std_compliance and related AVOptions to support experimental
|
||||
muxing.
|
||||
|
||||
2014-05-26 - 55cc60c - lavu 52.87.100 - threadmessage.h
|
||||
2014-05-26 - xxxxxxx - lavu 52.87.100 - threadmessage.h
|
||||
Add thread message queue API.
|
||||
|
||||
2014-05-26 - c37d179 - lavf 55.41.100 - avformat.h
|
||||
@@ -182,7 +182,7 @@ API changes, most recent first:
|
||||
Add av_stream_get_side_data() to access stream-level side data
|
||||
in the same way as av_packet_get_side_data().
|
||||
|
||||
2014-05-20 - 7336e39 - lavu 52.86.100 - fifo.h
|
||||
2014-05-xx - xxxxxxx - lavu 52.86.100 - fifo.h
|
||||
Add av_fifo_alloc_array() function.
|
||||
|
||||
2014-05-19 - ef1d4ee / bddd8cb - lavu 52.85.100 / 53.15.0 - frame.h, display.h
|
||||
@@ -214,10 +214,10 @@ API changes, most recent first:
|
||||
2014-05-11 - 14aef38 / 66e6c8a - lavu 52.83.100 / 53.14.0 - pixfmt.h
|
||||
Add AV_PIX_FMT_VDA for new-style VDA acceleration.
|
||||
|
||||
2014-05-07 - 351f611 - lavu 52.82.100 - fifo.h
|
||||
2014-05-xx - xxxxxxx - lavu 52.82.0 - fifo.h
|
||||
Add av_fifo_freep() function.
|
||||
|
||||
2014-05-02 - ba52fb11 - lavu 52.81.100 - opt.h
|
||||
2014-05-02 - ba52fb11 - lavu 52.81.0 - opt.h
|
||||
Add av_opt_set_dict2() function.
|
||||
|
||||
2014-05-01 - e77b985 / a2941c8 - lavc 55.60.103 / 55.50.3 - avcodec.h
|
||||
@@ -236,14 +236,10 @@ API changes, most recent first:
|
||||
Deprecate CODEC_FLAG_INPUT_PRESERVED. Its functionality is replaced by passing
|
||||
reference-counted frames to encoders.
|
||||
|
||||
2014-04-30 - 617e866 - lavu 52.81.100 - pixdesc.h
|
||||
Add av_find_best_pix_fmt_of_2(), av_get_pix_fmt_loss()
|
||||
Deprecate avcodec_get_pix_fmt_loss(), avcodec_find_best_pix_fmt_of_2()
|
||||
|
||||
2014-04-29 - 1bf6396 - lavc 55.60.100 - avcodec.h
|
||||
Add AVCodecDescriptor.mime_types field.
|
||||
|
||||
2014-04-29 - b804eb4 - lavu 52.80.100 - hash.h
|
||||
2014-04-29 - xxxxxxx - lavu 52.80.0 - hash.h
|
||||
Add av_hash_final_bin(), av_hash_final_hex() and av_hash_final_b64().
|
||||
|
||||
2014-03-07 - 8b2a130 - lavc 55.50.0 / 55.53.100 - dxva2.h
|
||||
@@ -255,7 +251,7 @@ API changes, most recent first:
|
||||
2014-04-17 - a8d01a7 / 0983d48 - lavu 53.12.0 / 52.77.100 - crc.h
|
||||
Add AV_CRC_16_ANSI_LE crc variant.
|
||||
|
||||
2014-04-15 - ef818d8 - lavf 55.37.101 - avformat.h
|
||||
2014-04-XX - xxxxxxx - lavf xx.xx.1xx - avformat.h
|
||||
Add av_format_inject_global_side_data()
|
||||
|
||||
2014-04-12 - 4f698be - lavu 52.76.100 - log.h
|
||||
@@ -335,7 +331,7 @@ API changes, most recent first:
|
||||
2014-02-19 - f4c8d00 / 6bb8720 - lavu 52.64.101 / 53.3.1 - opt.h
|
||||
Deprecate unused AV_OPT_FLAG_METADATA.
|
||||
|
||||
2014-02-16 - 81c3f81 - lavd 55.10.100 - avdevice.h
|
||||
2014-02-xx - xxxxxxx - lavd 55.10.100 - avdevice.h
|
||||
Add avdevice_list_devices() and avdevice_free_list_devices()
|
||||
|
||||
2014-02-16 - db3c970 - lavf 55.33.100 - avio.h
|
||||
@@ -376,7 +372,7 @@ API changes, most recent first:
|
||||
2014-01-19 - 1a193c4 - lavf 55.25.100 - avformat.h
|
||||
Add avformat_get_mov_video_tags() and avformat_get_mov_audio_tags().
|
||||
|
||||
2014-01-19 - 3532dd5 - lavu 52.63.100 - rational.h
|
||||
2014-01-19 - xxxxxxx - lavu 52.63.100 - rational.h
|
||||
Add av_make_q() function.
|
||||
|
||||
2014-01-05 - 4cf4da9 / 5b4797a - lavu 52.62.100 / 53.2.0 - frame.h
|
||||
|
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
||||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER = 2.4.8
|
||||
PROJECT_NUMBER =
|
||||
|
||||
# With the PROJECT_LOGO tag one can specify a logo or icon that is included
|
||||
# in the documentation. The maximum height of the logo should not exceed 55
|
||||
|
@@ -29,7 +29,6 @@ OBJS=$(addsuffix .o,$(EXAMPLES))
|
||||
|
||||
# the following examples make explicit use of the math library
|
||||
avcodec: LDLIBS += -lm
|
||||
decoding_encoding: LDLIBS += -lm
|
||||
muxing: LDLIBS += -lm
|
||||
resampling_audio: LDLIBS += -lm
|
||||
|
||||
|
@@ -199,7 +199,8 @@ int main(int argc, char **argv)
|
||||
fmt, dst_ch_layout, dst_nb_channels, dst_rate, dst_filename);
|
||||
|
||||
end:
|
||||
fclose(dst_file);
|
||||
if (dst_file)
|
||||
fclose(dst_file);
|
||||
|
||||
if (src_data)
|
||||
av_freep(&src_data[0]);
|
||||
|
@@ -132,7 +132,8 @@ int main(int argc, char **argv)
|
||||
av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h, dst_filename);
|
||||
|
||||
end:
|
||||
fclose(dst_file);
|
||||
if (dst_file)
|
||||
fclose(dst_file);
|
||||
av_freep(&src_data[0]);
|
||||
av_freep(&dst_data[0]);
|
||||
sws_freeContext(sws_ctx);
|
||||
|
@@ -116,10 +116,6 @@ static int open_output_file(const char *filename)
|
||||
|| dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
/* in this example, we choose transcoding to same codec */
|
||||
encoder = avcodec_find_encoder(dec_ctx->codec_id);
|
||||
if (!encoder) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Neccessary encoder not found\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* In this example, we transcode to same properties (picture size,
|
||||
* sample rate etc.). These properties can be changed for output
|
||||
|
@@ -298,7 +298,7 @@ FFmpeg has a @url{http://ffmpeg.org/ffmpeg-protocols.html#concat,
|
||||
@code{concat}} protocol designed specifically for that, with examples in the
|
||||
documentation.
|
||||
|
||||
A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow one to concatenate
|
||||
A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow to concatenate
|
||||
video by merely concatenating the files containing them.
|
||||
|
||||
Hence you may concatenate your multimedia files by first transcoding them to
|
||||
|
@@ -71,7 +71,7 @@ the HTTP server (configured through the @option{HTTPPort} option), and
|
||||
configuration file.
|
||||
|
||||
Each feed is associated to a file which is stored on disk. This stored
|
||||
file is used to send pre-recorded data to a player as fast as
|
||||
file is used to allow to send pre-recorded data to a player as fast as
|
||||
possible when new content is added in real-time to the stream.
|
||||
|
||||
A "live-stream" or "stream" is a resource published by
|
||||
|
@@ -3389,7 +3389,7 @@ Set number overlapping pixels for each block. Since the filter can be slow, you
|
||||
may want to reduce this value, at the cost of a less effective filter and the
|
||||
risk of various artefacts.
|
||||
|
||||
If the overlapping value doesn't permit processing the whole input width or
|
||||
If the overlapping value doesn't allow to process the whole input width or
|
||||
height, a warning will be displayed and according borders won't be denoised.
|
||||
|
||||
Default value is @var{blocksize}-1, which is the best possible setting.
|
||||
|
@@ -23,7 +23,7 @@ Reduce buffering.
|
||||
|
||||
@item probesize @var{integer} (@emph{input})
|
||||
Set probing size in bytes, i.e. the size of the data to analyze to get
|
||||
stream information. A higher value will enable detecting more
|
||||
stream information. A higher value will allow to detect more
|
||||
information in case it is dispersed into the stream, but will increase
|
||||
latency. Must be an integer not lesser than 32. It is 5000000 by default.
|
||||
|
||||
@@ -63,7 +63,7 @@ Default is 0.
|
||||
|
||||
@item analyzeduration @var{integer} (@emph{input})
|
||||
Specify how many microseconds are analyzed to probe the input. A
|
||||
higher value will enable detecting more accurate information, but will
|
||||
higher value will allow to detect more accurate information, but will
|
||||
increase latency. It defaults to 5,000,000 microseconds = 5 seconds.
|
||||
|
||||
@item cryptokey @var{hexadecimal string} (@emph{input})
|
||||
|
@@ -1,7 +1,7 @@
|
||||
@chapter Input Devices
|
||||
@c man begin INPUT DEVICES
|
||||
|
||||
Input devices are configured elements in FFmpeg which enable accessing
|
||||
Input devices are configured elements in FFmpeg which allow to access
|
||||
the data coming from a multimedia device attached to your system.
|
||||
|
||||
When you configure your FFmpeg build, all the supported input devices
|
||||
|
@@ -1081,8 +1081,8 @@ Set raise error timeout, expressed in microseconds.
|
||||
This option is only relevant in read mode: if no data arrived in more
|
||||
than this time interval, raise error.
|
||||
|
||||
@item listen_timeout=@var{milliseconds}
|
||||
Set listen timeout, expressed in milliseconds.
|
||||
@item listen_timeout=@var{microseconds}
|
||||
Set listen timeout, expressed in microseconds.
|
||||
@end table
|
||||
|
||||
The following example shows how to setup a listening TCP connection
|
||||
|
@@ -14,9 +14,9 @@
|
||||
# FFmpeg is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
# General Public License for more details.
|
||||
# Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public
|
||||
# You should have received a copy of the GNU Lesser General Public
|
||||
# License along with FFmpeg; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
|
@@ -844,7 +844,7 @@ Return 1.0 if @var{x} is +/-INFINITY, 0.0 otherwise.
|
||||
Return 1.0 if @var{x} is NAN, 0.0 otherwise.
|
||||
|
||||
@item ld(var)
|
||||
Load the value of the internal variable with number
|
||||
Allow to load the value of the internal variable with number
|
||||
@var{var}, which was previously stored with st(@var{var}, @var{expr}).
|
||||
The function returns the loaded value.
|
||||
|
||||
@@ -912,7 +912,7 @@ Compute the square root of @var{expr}. This is equivalent to
|
||||
Compute expression @code{1/(1 + exp(4*x))}.
|
||||
|
||||
@item st(var, expr)
|
||||
Store the value of the expression @var{expr} in an internal
|
||||
Allow to store the value of the expression @var{expr} in an internal
|
||||
variable. @var{var} specifies the number of the variable where to
|
||||
store the value, and it is a value ranging from 0 to 9. The function
|
||||
returns the value stored in the internal variable.
|
||||
|
8
ffmpeg.c
8
ffmpeg.c
@@ -978,8 +978,10 @@ static void do_video_out(AVFormatContext *s,
|
||||
/* raw pictures are written as AVPicture structure to
|
||||
avoid any copies. We support temporarily the older
|
||||
method. */
|
||||
if (in_picture->interlaced_frame)
|
||||
mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
|
||||
mux_enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
|
||||
mux_enc->coded_frame->top_field_first = in_picture->top_field_first;
|
||||
if (mux_enc->coded_frame->interlaced_frame)
|
||||
mux_enc->field_order = mux_enc->coded_frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
|
||||
else
|
||||
mux_enc->field_order = AV_FIELD_PROGRESSIVE;
|
||||
pkt.data = (uint8_t *)in_picture;
|
||||
@@ -2461,7 +2463,7 @@ static int transcode_init(void)
|
||||
AVFormatContext *oc;
|
||||
OutputStream *ost;
|
||||
InputStream *ist;
|
||||
char error[1024] = {0};
|
||||
char error[1024];
|
||||
int want_sdp = 1;
|
||||
|
||||
for (i = 0; i < nb_filtergraphs; i++) {
|
||||
|
@@ -702,7 +702,7 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
|
||||
MATCH_PER_STREAM_OPT(fix_sub_duration, i, ist->fix_sub_duration, ic, st);
|
||||
MATCH_PER_STREAM_OPT(canvas_sizes, str, canvas_size, ic, st);
|
||||
if (canvas_size &&
|
||||
av_parse_video_size(&ist->dec_ctx->width, &ist->dec_ctx->height, canvas_size) < 0) {
|
||||
av_parse_video_size(&dec->width, &dec->height, canvas_size) < 0) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Invalid canvas size: %s.\n", canvas_size);
|
||||
exit_program(1);
|
||||
}
|
||||
|
@@ -38,15 +38,15 @@ static av_cold int zero12v_decode_init(AVCodecContext *avctx)
|
||||
static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
||||
int *got_frame, AVPacket *avpkt)
|
||||
{
|
||||
int line, ret;
|
||||
int line = 0, ret;
|
||||
const int width = avctx->width;
|
||||
AVFrame *pic = data;
|
||||
uint16_t *y, *u, *v;
|
||||
const uint8_t *line_end, *src = avpkt->data;
|
||||
int stride = avctx->width * 8 / 3;
|
||||
|
||||
if (width <= 1 || avctx->height <= 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Dimensions %dx%d not supported.\n", width, avctx->height);
|
||||
if (width == 1) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Width 1 not supported.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
@@ -67,45 +67,45 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
||||
pic->pict_type = AV_PICTURE_TYPE_I;
|
||||
pic->key_frame = 1;
|
||||
|
||||
y = (uint16_t *)pic->data[0];
|
||||
u = (uint16_t *)pic->data[1];
|
||||
v = (uint16_t *)pic->data[2];
|
||||
line_end = avpkt->data + stride;
|
||||
for (line = 0; line < avctx->height; line++) {
|
||||
uint16_t y_temp[6] = {0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000};
|
||||
uint16_t u_temp[3] = {0x8000, 0x8000, 0x8000};
|
||||
uint16_t v_temp[3] = {0x8000, 0x8000, 0x8000};
|
||||
int x;
|
||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||
|
||||
for (x = 0; x < width; x += 6) {
|
||||
uint32_t t;
|
||||
|
||||
if (width - x < 6 || line_end - src < 16) {
|
||||
y = y_temp;
|
||||
u = u_temp;
|
||||
v = v_temp;
|
||||
}
|
||||
|
||||
if (line_end - src < 4)
|
||||
break;
|
||||
|
||||
t = AV_RL32(src);
|
||||
while (line++ < avctx->height) {
|
||||
while (1) {
|
||||
uint32_t t = AV_RL32(src);
|
||||
src += 4;
|
||||
*u++ = t << 6 & 0xFFC0;
|
||||
*y++ = t >> 4 & 0xFFC0;
|
||||
*v++ = t >> 14 & 0xFFC0;
|
||||
|
||||
if (line_end - src < 4)
|
||||
if (src >= line_end - 1) {
|
||||
*y = 0x80;
|
||||
src++;
|
||||
line_end += stride;
|
||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||
break;
|
||||
}
|
||||
|
||||
t = AV_RL32(src);
|
||||
src += 4;
|
||||
*y++ = t << 6 & 0xFFC0;
|
||||
*u++ = t >> 4 & 0xFFC0;
|
||||
*y++ = t >> 14 & 0xFFC0;
|
||||
|
||||
if (line_end - src < 4)
|
||||
if (src >= line_end - 2) {
|
||||
if (!(width & 1)) {
|
||||
*y = 0x80;
|
||||
src += 2;
|
||||
}
|
||||
line_end += stride;
|
||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||
break;
|
||||
}
|
||||
|
||||
t = AV_RL32(src);
|
||||
src += 4;
|
||||
@@ -113,8 +113,15 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
||||
*y++ = t >> 4 & 0xFFC0;
|
||||
*u++ = t >> 14 & 0xFFC0;
|
||||
|
||||
if (line_end - src < 4)
|
||||
if (src >= line_end - 1) {
|
||||
*y = 0x80;
|
||||
src++;
|
||||
line_end += stride;
|
||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||
break;
|
||||
}
|
||||
|
||||
t = AV_RL32(src);
|
||||
src += 4;
|
||||
@@ -122,21 +129,18 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
||||
*v++ = t >> 4 & 0xFFC0;
|
||||
*y++ = t >> 14 & 0xFFC0;
|
||||
|
||||
if (width - x < 6)
|
||||
if (src >= line_end - 2) {
|
||||
if (width & 1) {
|
||||
*y = 0x80;
|
||||
src += 2;
|
||||
}
|
||||
line_end += stride;
|
||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (x < width) {
|
||||
y = x + (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||
u = x/2 + (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||
v = x/2 + (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||
memcpy(y, y_temp, sizeof(*y) * (width - x));
|
||||
memcpy(u, u_temp, sizeof(*u) * (width - x + 1) / 2);
|
||||
memcpy(v, v_temp, sizeof(*v) * (width - x + 1) / 2);
|
||||
}
|
||||
|
||||
line_end += stride;
|
||||
src = line_end - stride;
|
||||
}
|
||||
|
||||
*got_frame = 1;
|
||||
|
@@ -206,7 +206,7 @@ OBJS-$(CONFIG_DVVIDEO_DECODER) += dvdec.o dv.o dvdata.o
|
||||
OBJS-$(CONFIG_DVVIDEO_ENCODER) += dvenc.o dv.o dvdata.o
|
||||
OBJS-$(CONFIG_DXA_DECODER) += dxa.o
|
||||
OBJS-$(CONFIG_DXTORY_DECODER) += dxtory.o
|
||||
OBJS-$(CONFIG_EAC3_DECODER) += eac3_data.o
|
||||
OBJS-$(CONFIG_EAC3_DECODER) += eac3dec.o eac3_data.o
|
||||
OBJS-$(CONFIG_EAC3_ENCODER) += eac3enc.o eac3_data.o
|
||||
OBJS-$(CONFIG_EACMV_DECODER) += eacmv.o
|
||||
OBJS-$(CONFIG_EAMAD_DECODER) += eamad.o eaidct.o mpeg12.o \
|
||||
@@ -651,7 +651,7 @@ OBJS-$(CONFIG_VAAPI) += vaapi.o
|
||||
OBJS-$(CONFIG_VDA) += vda.o
|
||||
OBJS-$(CONFIG_VDPAU) += vdpau.o
|
||||
|
||||
OBJS-$(CONFIG_H263_VAAPI_HWACCEL) += vaapi_mpeg4.o
|
||||
OBJS-$(CONFIG_H263_VAAPI_HWACCEL) += vaapi_mpeg4.o vaapi_mpeg.o
|
||||
OBJS-$(CONFIG_H263_VDPAU_HWACCEL) += vdpau_mpeg4.o
|
||||
OBJS-$(CONFIG_H264_DXVA2_HWACCEL) += dxva2_h264.o
|
||||
OBJS-$(CONFIG_H264_VAAPI_HWACCEL) += vaapi_h264.o
|
||||
@@ -660,13 +660,13 @@ OBJS-$(CONFIG_H264_VDPAU_HWACCEL) += vdpau_h264.o
|
||||
OBJS-$(CONFIG_MPEG1_VDPAU_HWACCEL) += vdpau_mpeg12.o
|
||||
OBJS-$(CONFIG_MPEG1_XVMC_HWACCEL) += mpegvideo_xvmc.o
|
||||
OBJS-$(CONFIG_MPEG2_DXVA2_HWACCEL) += dxva2_mpeg2.o
|
||||
OBJS-$(CONFIG_MPEG2_VAAPI_HWACCEL) += vaapi_mpeg2.o
|
||||
OBJS-$(CONFIG_MPEG2_VAAPI_HWACCEL) += vaapi_mpeg2.o vaapi_mpeg.o
|
||||
OBJS-$(CONFIG_MPEG2_VDPAU_HWACCEL) += vdpau_mpeg12.o
|
||||
OBJS-$(CONFIG_MPEG2_XVMC_HWACCEL) += mpegvideo_xvmc.o
|
||||
OBJS-$(CONFIG_MPEG4_VAAPI_HWACCEL) += vaapi_mpeg4.o
|
||||
OBJS-$(CONFIG_MPEG4_VAAPI_HWACCEL) += vaapi_mpeg4.o vaapi_mpeg.o
|
||||
OBJS-$(CONFIG_MPEG4_VDPAU_HWACCEL) += vdpau_mpeg4.o
|
||||
OBJS-$(CONFIG_VC1_DXVA2_HWACCEL) += dxva2_vc1.o
|
||||
OBJS-$(CONFIG_VC1_VAAPI_HWACCEL) += vaapi_vc1.o
|
||||
OBJS-$(CONFIG_VC1_VAAPI_HWACCEL) += vaapi_vc1.o vaapi_mpeg.o
|
||||
OBJS-$(CONFIG_VC1_VDPAU_HWACCEL) += vdpau_vc1.o
|
||||
|
||||
# libavformat dependencies
|
||||
|
@@ -28,7 +28,6 @@
|
||||
#include "a64tables.h"
|
||||
#include "elbg.h"
|
||||
#include "internal.h"
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/common.h"
|
||||
#include "libavutil/intreadwrite.h"
|
||||
|
||||
@@ -66,7 +65,7 @@ static const int mc_colors[5]={0x0,0xb,0xc,0xf,0x1};
|
||||
//static const int mc_colors[5]={0x0,0x8,0xa,0xf,0x7};
|
||||
//static const int mc_colors[5]={0x0,0x9,0x8,0xa,0x3};
|
||||
|
||||
static void to_meta_with_crop(AVCodecContext *avctx, const AVFrame *p, int *dest)
|
||||
static void to_meta_with_crop(AVCodecContext *avctx, AVFrame *p, int *dest)
|
||||
{
|
||||
int blockx, blocky, x, y;
|
||||
int luma = 0;
|
||||
@@ -79,13 +78,9 @@ static void to_meta_with_crop(AVCodecContext *avctx, const AVFrame *p, int *dest
|
||||
for (y = blocky; y < blocky + 8 && y < C64YRES; y++) {
|
||||
for (x = blockx; x < blockx + 8 && x < C64XRES; x += 2) {
|
||||
if(x < width && y < height) {
|
||||
if (x + 1 < width) {
|
||||
/* build average over 2 pixels */
|
||||
luma = (src[(x + 0 + y * p->linesize[0])] +
|
||||
src[(x + 1 + y * p->linesize[0])]) / 2;
|
||||
} else {
|
||||
luma = src[(x + y * p->linesize[0])];
|
||||
}
|
||||
/* build average over 2 pixels */
|
||||
luma = (src[(x + 0 + y * p->linesize[0])] +
|
||||
src[(x + 1 + y * p->linesize[0])]) / 2;
|
||||
/* write blocks as linear data now so they are suitable for elbg */
|
||||
dest[0] = luma;
|
||||
}
|
||||
@@ -191,6 +186,7 @@ static void render_charset(AVCodecContext *avctx, uint8_t *charset,
|
||||
static av_cold int a64multi_close_encoder(AVCodecContext *avctx)
|
||||
{
|
||||
A64Context *c = avctx->priv_data;
|
||||
av_frame_free(&avctx->coded_frame);
|
||||
av_freep(&c->mc_meta_charset);
|
||||
av_freep(&c->mc_best_cb);
|
||||
av_freep(&c->mc_charset);
|
||||
@@ -224,7 +220,7 @@ static av_cold int a64multi_encode_init(AVCodecContext *avctx)
|
||||
a64_palette[mc_colors[a]][2] * 0.11;
|
||||
}
|
||||
|
||||
if (!(c->mc_meta_charset = av_mallocz_array(c->mc_lifetime, 32000 * sizeof(int))) ||
|
||||
if (!(c->mc_meta_charset = av_malloc_array(c->mc_lifetime, 32000 * sizeof(int))) ||
|
||||
!(c->mc_best_cb = av_malloc(CHARSET_CHARS * 32 * sizeof(int))) ||
|
||||
!(c->mc_charmap = av_mallocz_array(c->mc_lifetime, 1000 * sizeof(int))) ||
|
||||
!(c->mc_colram = av_mallocz(CHARSET_CHARS * sizeof(uint8_t))) ||
|
||||
@@ -242,6 +238,14 @@ static av_cold int a64multi_encode_init(AVCodecContext *avctx)
|
||||
AV_WB32(avctx->extradata, c->mc_lifetime);
|
||||
AV_WB32(avctx->extradata + 16, INTERLACED);
|
||||
|
||||
avctx->coded_frame = av_frame_alloc();
|
||||
if (!avctx->coded_frame) {
|
||||
a64multi_close_encoder(avctx);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
|
||||
avctx->coded_frame->key_frame = 1;
|
||||
if (!avctx->codec_tag)
|
||||
avctx->codec_tag = AV_RL32("a64m");
|
||||
|
||||
@@ -266,9 +270,10 @@ static void a64_compress_colram(unsigned char *buf, int *charmap, uint8_t *colra
|
||||
}
|
||||
|
||||
static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
const AVFrame *p, int *got_packet)
|
||||
const AVFrame *pict, int *got_packet)
|
||||
{
|
||||
A64Context *c = avctx->priv_data;
|
||||
AVFrame *const p = avctx->coded_frame;
|
||||
|
||||
int frame;
|
||||
int x, y;
|
||||
@@ -299,7 +304,7 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
}
|
||||
|
||||
/* no data, means end encoding asap */
|
||||
if (!p) {
|
||||
if (!pict) {
|
||||
/* all done, end encoding */
|
||||
if (!c->mc_lifetime) return 0;
|
||||
/* no more frames in queue, prepare to flush remaining frames */
|
||||
@@ -312,10 +317,13 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
} else {
|
||||
/* fill up mc_meta_charset with data until lifetime exceeds */
|
||||
if (c->mc_frame_counter < c->mc_lifetime) {
|
||||
*p = *pict;
|
||||
p->pict_type = AV_PICTURE_TYPE_I;
|
||||
p->key_frame = 1;
|
||||
to_meta_with_crop(avctx, p, meta + 32000 * c->mc_frame_counter);
|
||||
c->mc_frame_counter++;
|
||||
if (c->next_pts == AV_NOPTS_VALUE)
|
||||
c->next_pts = p->pts;
|
||||
c->next_pts = pict->pts;
|
||||
/* lifetime is not reached so wait for next frame first */
|
||||
return 0;
|
||||
}
|
||||
@@ -326,8 +334,8 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
req_size = 0;
|
||||
/* any frames to encode? */
|
||||
if (c->mc_lifetime) {
|
||||
int alloc_size = charset_size + c->mc_lifetime*(screen_size + colram_size);
|
||||
if ((ret = ff_alloc_packet2(avctx, pkt, alloc_size)) < 0)
|
||||
req_size = charset_size + c->mc_lifetime*(screen_size + colram_size);
|
||||
if ((ret = ff_alloc_packet2(avctx, pkt, req_size)) < 0)
|
||||
return ret;
|
||||
buf = pkt->data;
|
||||
|
||||
@@ -343,7 +351,6 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
|
||||
/* advance pointers */
|
||||
buf += charset_size;
|
||||
req_size += charset_size;
|
||||
}
|
||||
|
||||
/* write x frames to buf */
|
||||
@@ -380,7 +387,6 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
pkt->pts = pkt->dts = c->next_pts;
|
||||
c->next_pts = AV_NOPTS_VALUE;
|
||||
|
||||
av_assert0(pkt->size >= req_size);
|
||||
pkt->size = req_size;
|
||||
pkt->flags |= AV_PKT_FLAG_KEY;
|
||||
*got_packet = !!req_size;
|
||||
|
@@ -165,7 +165,7 @@ static void put_audio_specific_config(AVCodecContext *avctx)
|
||||
PutBitContext pb;
|
||||
AACEncContext *s = avctx->priv_data;
|
||||
|
||||
init_put_bits(&pb, avctx->extradata, avctx->extradata_size);
|
||||
init_put_bits(&pb, avctx->extradata, avctx->extradata_size*8);
|
||||
put_bits(&pb, 5, 2); //object type - AAC-LC
|
||||
put_bits(&pb, 4, s->samplerate_index); //sample rate index
|
||||
put_bits(&pb, 4, s->channels);
|
||||
@@ -746,10 +746,10 @@ static av_cold int aac_encode_init(AVCodecContext *avctx)
|
||||
|
||||
s->chan_map = aac_chan_configs[s->channels-1];
|
||||
|
||||
if ((ret = dsp_init(avctx, s)) < 0)
|
||||
if (ret = dsp_init(avctx, s))
|
||||
goto fail;
|
||||
|
||||
if ((ret = alloc_buffers(avctx, s)) < 0)
|
||||
if (ret = alloc_buffers(avctx, s))
|
||||
goto fail;
|
||||
|
||||
avctx->extradata_size = 5;
|
||||
@@ -761,8 +761,7 @@ static av_cold int aac_encode_init(AVCodecContext *avctx)
|
||||
lengths[1] = ff_aac_num_swb_128[i];
|
||||
for (i = 0; i < s->chan_map[0]; i++)
|
||||
grouping[i] = s->chan_map[i + 1] == TYPE_CPE;
|
||||
if ((ret = ff_psy_init(&s->psy, avctx, 2, sizes, lengths,
|
||||
s->chan_map[0], grouping)) < 0)
|
||||
if (ret = ff_psy_init(&s->psy, avctx, 2, sizes, lengths, s->chan_map[0], grouping))
|
||||
goto fail;
|
||||
s->psypp = ff_psy_preprocess_init(avctx);
|
||||
s->coder = &ff_aac_coders[s->options.aac_coder];
|
||||
|
@@ -556,8 +556,7 @@ static int sbr_hf_calc_npatches(AACContext *ac, SpectralBandReplication *sbr)
|
||||
k = sbr->n_master;
|
||||
} while (sb != sbr->kx[1] + sbr->m[1]);
|
||||
|
||||
if (sbr->num_patches > 1 &&
|
||||
sbr->patch_num_subbands[sbr->num_patches - 1] < 3)
|
||||
if (sbr->num_patches > 1 && sbr->patch_num_subbands[sbr->num_patches-1] < 3)
|
||||
sbr->num_patches--;
|
||||
|
||||
return 0;
|
||||
|
@@ -78,7 +78,6 @@ av_cold void ff_h264dsp_init_aarch64(H264DSPContext *c, const int bit_depth,
|
||||
c->h264_v_loop_filter_luma = ff_h264_v_loop_filter_luma_neon;
|
||||
c->h264_h_loop_filter_luma = ff_h264_h_loop_filter_luma_neon;
|
||||
c->h264_v_loop_filter_chroma = ff_h264_v_loop_filter_chroma_neon;
|
||||
if (chroma_format_idc <= 1)
|
||||
c->h264_h_loop_filter_chroma = ff_h264_h_loop_filter_chroma_neon;
|
||||
|
||||
c->weight_h264_pixels_tab[0] = ff_weight_h264_pixels_16_neon;
|
||||
|
@@ -872,7 +872,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
|
||||
start_subband += start_subband - 7;
|
||||
end_subband = get_bits(gbc, 3) + 5;
|
||||
#if USE_FIXED
|
||||
s->spx_dst_end_freq = end_freq_inv_tab[end_subband-5];
|
||||
s->spx_dst_end_freq = end_freq_inv_tab[end_subband];
|
||||
#endif
|
||||
if (end_subband > 7)
|
||||
end_subband += end_subband - 7;
|
||||
@@ -939,7 +939,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
|
||||
nblend = 0;
|
||||
sblend = 0x800000;
|
||||
} else if (nratio > 0x7fffff) {
|
||||
nblend = 14529495; // sqrt(3) in FP.23
|
||||
nblend = 0x800000;
|
||||
sblend = 0;
|
||||
} else {
|
||||
nblend = fixed_sqrt(nratio, 23);
|
||||
|
@@ -243,19 +243,19 @@ typedef struct AC3DecodeContext {
|
||||
* Parse the E-AC-3 frame header.
|
||||
* This parses both the bit stream info and audio frame header.
|
||||
*/
|
||||
static int ff_eac3_parse_header(AC3DecodeContext *s);
|
||||
int ff_eac3_parse_header(AC3DecodeContext *s);
|
||||
|
||||
/**
|
||||
* Decode mantissas in a single channel for the entire frame.
|
||||
* This is used when AHT mode is enabled.
|
||||
*/
|
||||
static void ff_eac3_decode_transform_coeffs_aht_ch(AC3DecodeContext *s, int ch);
|
||||
void ff_eac3_decode_transform_coeffs_aht_ch(AC3DecodeContext *s, int ch);
|
||||
|
||||
/**
|
||||
* Apply spectral extension to each channel by copying lower frequency
|
||||
* coefficients to higher frequency bins and applying side information to
|
||||
* approximate the original high frequency signal.
|
||||
*/
|
||||
static void ff_eac3_apply_spectral_extension(AC3DecodeContext *s);
|
||||
void ff_eac3_apply_spectral_extension(AC3DecodeContext *s);
|
||||
|
||||
#endif /* AVCODEC_AC3DEC_H */
|
||||
|
@@ -164,7 +164,6 @@ static void ac3_downmix_c_fixed16(int16_t **samples, int16_t (*matrix)[2],
|
||||
}
|
||||
}
|
||||
|
||||
#include "eac3dec.c"
|
||||
#include "ac3dec.c"
|
||||
|
||||
static const AVOption options[] = {
|
||||
|
@@ -28,7 +28,6 @@
|
||||
* Upmix delay samples from stereo to original channel layout.
|
||||
*/
|
||||
#include "ac3dec.h"
|
||||
#include "eac3dec.c"
|
||||
#include "ac3dec.c"
|
||||
|
||||
static const AVOption options[] = {
|
||||
|
@@ -263,7 +263,7 @@ static void apply_channel_coupling(AC3EncodeContext *s)
|
||||
energy_cpl = energy[blk][CPL_CH][bnd];
|
||||
energy_ch = energy[blk][ch][bnd];
|
||||
blk1 = blk+1;
|
||||
while (blk1 < s->num_blocks && !s->blocks[blk1].new_cpl_coords[ch]) {
|
||||
while (!s->blocks[blk1].new_cpl_coords[ch] && blk1 < s->num_blocks) {
|
||||
if (s->blocks[blk1].cpl_in_use) {
|
||||
energy_cpl += energy[blk1][CPL_CH][bnd];
|
||||
energy_ch += energy[blk1][ch][bnd];
|
||||
|
@@ -541,7 +541,7 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
case AV_CODEC_ID_ADPCM_IMA_QT:
|
||||
{
|
||||
PutBitContext pb;
|
||||
init_put_bits(&pb, dst, pkt_size);
|
||||
init_put_bits(&pb, dst, pkt_size * 8);
|
||||
|
||||
for (ch = 0; ch < avctx->channels; ch++) {
|
||||
ADPCMChannelStatus *status = &c->status[ch];
|
||||
@@ -571,7 +571,7 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
case AV_CODEC_ID_ADPCM_SWF:
|
||||
{
|
||||
PutBitContext pb;
|
||||
init_put_bits(&pb, dst, pkt_size);
|
||||
init_put_bits(&pb, dst, pkt_size * 8);
|
||||
|
||||
n = frame->nb_samples - 1;
|
||||
|
||||
|
@@ -438,8 +438,8 @@ static av_cold int aic_decode_init(AVCodecContext *avctx)
|
||||
ctx->mb_width = FFALIGN(avctx->width, 16) >> 4;
|
||||
ctx->mb_height = FFALIGN(avctx->height, 16) >> 4;
|
||||
|
||||
ctx->num_x_slices = (ctx->mb_width + 15) >> 4;
|
||||
ctx->slice_width = 16;
|
||||
ctx->num_x_slices = 16;
|
||||
ctx->slice_width = ctx->mb_width / 16;
|
||||
for (i = 1; i < 32; i++) {
|
||||
if (!(ctx->mb_width % i) && (ctx->mb_width / i < 32)) {
|
||||
ctx->slice_width = ctx->mb_width / i;
|
||||
|
@@ -90,11 +90,9 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
s->fg = DEFAULT_FG_COLOR;
|
||||
s->bg = DEFAULT_BG_COLOR;
|
||||
|
||||
if (!avctx->width || !avctx->height) {
|
||||
int ret = ff_set_dimensions(avctx, 80 << 3, 25 << 4);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
if (!avctx->width || !avctx->height)
|
||||
ff_set_dimensions(avctx, 80 << 3, 25 << 4);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -107,10 +107,8 @@ av_cold void ff_h264dsp_init_arm(H264DSPContext *c, const int bit_depth,
|
||||
{
|
||||
int cpu_flags = av_get_cpu_flags();
|
||||
|
||||
#if HAVE_ARMV6
|
||||
if (have_setend(cpu_flags))
|
||||
c->startcode_find_candidate = ff_startcode_find_candidate_armv6;
|
||||
#endif
|
||||
if (have_neon(cpu_flags))
|
||||
h264dsp_init_neon(c, bit_depth, chroma_format_idc);
|
||||
}
|
||||
|
@@ -28,10 +28,8 @@ av_cold void ff_vc1dsp_init_arm(VC1DSPContext *dsp)
|
||||
{
|
||||
int cpu_flags = av_get_cpu_flags();
|
||||
|
||||
#if HAVE_ARMV6
|
||||
if (have_setend(cpu_flags))
|
||||
dsp->startcode_find_candidate = ff_startcode_find_candidate_armv6;
|
||||
#endif
|
||||
if (have_neon(cpu_flags))
|
||||
ff_vc1dsp_init_neon(dsp);
|
||||
}
|
||||
|
@@ -23,10 +23,9 @@
|
||||
#include "libavutil/arm/asm.S"
|
||||
|
||||
function ff_prefetch_arm, export=1
|
||||
1:
|
||||
subs r2, r2, #1
|
||||
pld [r0]
|
||||
add r0, r0, r1
|
||||
bne 1b
|
||||
bne X(ff_prefetch_arm)
|
||||
bx lr
|
||||
endfunc
|
||||
|
@@ -26,10 +26,8 @@
|
||||
#include "libavutil/attributes.h"
|
||||
#include "libavutil/mem.h"
|
||||
|
||||
#include "aandcttab.h"
|
||||
#include "asv.h"
|
||||
#include "avcodec.h"
|
||||
#include "dct.h"
|
||||
#include "fdctdsp.h"
|
||||
#include "internal.h"
|
||||
#include "mathops.h"
|
||||
@@ -333,13 +331,8 @@ static av_cold int encode_init(AVCodecContext *avctx)
|
||||
((uint32_t *) avctx->extradata)[1] = av_le2ne32(AV_RL32("ASUS"));
|
||||
|
||||
for (i = 0; i < 64; i++) {
|
||||
if (a->fdsp.fdct == ff_fdct_ifast) {
|
||||
int q = 32LL * scale * ff_mpeg1_default_intra_matrix[i] * ff_aanscales[i];
|
||||
a->q_intra_matrix[i] = (((int64_t)a->inv_qscale << 30) + q / 2) / q;
|
||||
} else {
|
||||
int q = 32 * scale * ff_mpeg1_default_intra_matrix[i];
|
||||
a->q_intra_matrix[i] = ((a->inv_qscale << 16) + q / 2) / q;
|
||||
}
|
||||
int q = 32 * scale * ff_mpeg1_default_intra_matrix[i];
|
||||
a->q_intra_matrix[i] = ((a->inv_qscale << 16) + q / 2) / q;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@@ -165,8 +165,9 @@ static av_cold int avs_decode_init(AVCodecContext * avctx)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
avctx->pix_fmt = AV_PIX_FMT_PAL8;
|
||||
ff_set_dimensions(avctx, 318, 198);
|
||||
|
||||
return ff_set_dimensions(avctx, 318, 198);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_cold int avs_decode_end(AVCodecContext *avctx)
|
||||
|
@@ -30,7 +30,6 @@
|
||||
#include "golomb.h"
|
||||
#include "h264chroma.h"
|
||||
#include "idctdsp.h"
|
||||
#include "internal.h"
|
||||
#include "mathops.h"
|
||||
#include "qpeldsp.h"
|
||||
#include "cavs.h"
|
||||
@@ -540,8 +539,8 @@ static inline void scale_mv(AVSContext *h, int *d_x, int *d_y,
|
||||
{
|
||||
int den = h->scale_den[FFMAX(src->ref, 0)];
|
||||
|
||||
*d_x = (src->x * distp * den + 256 + FF_SIGNBIT(src->x)) >> 9;
|
||||
*d_y = (src->y * distp * den + 256 + FF_SIGNBIT(src->y)) >> 9;
|
||||
*d_x = (src->x * distp * den + 256 + (src->x >> 31)) >> 9;
|
||||
*d_y = (src->y * distp * den + 256 + (src->y >> 31)) >> 9;
|
||||
}
|
||||
|
||||
static inline void mv_pred_median(AVSContext *h,
|
||||
|
@@ -467,7 +467,7 @@ static inline void mv_pred_direct(AVSContext *h, cavs_vector *pmv_fw,
|
||||
{
|
||||
cavs_vector *pmv_bw = pmv_fw + MV_BWD_OFFS;
|
||||
int den = h->direct_den[col_mv->ref];
|
||||
int m = FF_SIGNBIT(col_mv->x);
|
||||
int m = col_mv->x >> 31;
|
||||
|
||||
pmv_fw->dist = h->dist[1];
|
||||
pmv_bw->dist = h->dist[0];
|
||||
@@ -476,7 +476,7 @@ static inline void mv_pred_direct(AVSContext *h, cavs_vector *pmv_fw,
|
||||
/* scale the co-located motion vector according to its temporal span */
|
||||
pmv_fw->x = (((den + (den * col_mv->x * pmv_fw->dist ^ m) - m - 1) >> 14) ^ m) - m;
|
||||
pmv_bw->x = m - (((den + (den * col_mv->x * pmv_bw->dist ^ m) - m - 1) >> 14) ^ m);
|
||||
m = FF_SIGNBIT(col_mv->y);
|
||||
m = col_mv->y >> 31;
|
||||
pmv_fw->y = (((den + (den * col_mv->y * pmv_fw->dist ^ m) - m - 1) >> 14) ^ m) - m;
|
||||
pmv_bw->y = m - (((den + (den * col_mv->y * pmv_bw->dist ^ m) - m - 1) >> 14) ^ m);
|
||||
}
|
||||
|
@@ -135,7 +135,7 @@ static int cinepak_decode_vectors (CinepakContext *s, cvid_strip *strip,
|
||||
const uint8_t *eod = (data + size);
|
||||
uint32_t flag, mask;
|
||||
uint8_t *cb0, *cb1, *cb2, *cb3;
|
||||
int x, y;
|
||||
unsigned int x, y;
|
||||
char *ip0, *ip1, *ip2, *ip3;
|
||||
|
||||
flag = 0;
|
||||
|
@@ -1058,7 +1058,7 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
|
||||
q->avctx = avctx;
|
||||
|
||||
/* Take care of the codec specific extradata. */
|
||||
if (extradata_size < 8) {
|
||||
if (extradata_size <= 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Necessary extradata missing!\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
@@ -1215,8 +1215,8 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
|
||||
|
||||
q->num_subpackets++;
|
||||
s++;
|
||||
if (s > FFMIN(MAX_SUBPACKETS, avctx->block_align)) {
|
||||
avpriv_request_sample(avctx, "subpackets > %d", FFMIN(MAX_SUBPACKETS, avctx->block_align));
|
||||
if (s > MAX_SUBPACKETS) {
|
||||
avpriv_request_sample(avctx, "subpackets > %d", MAX_SUBPACKETS);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
}
|
||||
|
@@ -2359,10 +2359,6 @@ FF_ENABLE_DEPRECATION_WARNINGS
|
||||
#else
|
||||
if (s->xch_present && !s->xch_disable) {
|
||||
#endif
|
||||
if (avctx->channel_layout & AV_CH_BACK_CENTER) {
|
||||
avpriv_request_sample(avctx, "XCh with Back center channel");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
avctx->channel_layout |= AV_CH_BACK_CENTER;
|
||||
if (s->lfe) {
|
||||
avctx->channel_layout |= AV_CH_LOW_FREQUENCY;
|
||||
|
@@ -171,10 +171,6 @@ static inline int dirac_get_arith_uint(DiracArith *c, int follow_ctx, int data_c
|
||||
{
|
||||
int ret = 1;
|
||||
while (!dirac_get_arith_bit(c, follow_ctx)) {
|
||||
if (ret >= 0x40000000) {
|
||||
av_log(NULL, AV_LOG_ERROR, "dirac_get_arith_uint overflow\n");
|
||||
return -1;
|
||||
}
|
||||
ret <<= 1;
|
||||
ret += dirac_get_arith_bit(c, data_ctx);
|
||||
follow_ctx = ff_dirac_next_ctx[follow_ctx];
|
||||
|
@@ -612,10 +612,10 @@ static av_always_inline void decode_subband_internal(DiracContext *s, SubBand *b
|
||||
|
||||
top = 0;
|
||||
for (cb_y = 0; cb_y < cb_height; cb_y++) {
|
||||
bottom = (b->height * (cb_y+1LL)) / cb_height;
|
||||
bottom = (b->height * (cb_y+1)) / cb_height;
|
||||
left = 0;
|
||||
for (cb_x = 0; cb_x < cb_width; cb_x++) {
|
||||
right = (b->width * (cb_x+1LL)) / cb_width;
|
||||
right = (b->width * (cb_x+1)) / cb_width;
|
||||
codeblock(s, b, &gb, &c, left, right, top, bottom, blockcnt_one, is_arith);
|
||||
left = right;
|
||||
}
|
||||
@@ -1004,8 +1004,8 @@ static int dirac_unpack_idwt_params(DiracContext *s)
|
||||
/* Codeblock parameters (core syntax only) */
|
||||
if (get_bits1(gb)) {
|
||||
for (i = 0; i <= s->wavelet_depth; i++) {
|
||||
CHECKEDREAD(s->codeblock[i].width , tmp < 1 || tmp > (s->avctx->width >>s->wavelet_depth-i), "codeblock width invalid\n")
|
||||
CHECKEDREAD(s->codeblock[i].height, tmp < 1 || tmp > (s->avctx->height>>s->wavelet_depth-i), "codeblock height invalid\n")
|
||||
CHECKEDREAD(s->codeblock[i].width , tmp < 1, "codeblock width invalid\n")
|
||||
CHECKEDREAD(s->codeblock[i].height, tmp < 1, "codeblock height invalid\n")
|
||||
}
|
||||
|
||||
CHECKEDREAD(s->codeblock_mode, tmp > 1, "unknown codeblock mode\n")
|
||||
|
@@ -38,7 +38,6 @@ typedef struct DNXHDContext {
|
||||
BlockDSPContext bdsp;
|
||||
int64_t cid; ///< compression id
|
||||
unsigned int width, height;
|
||||
enum AVPixelFormat pix_fmt;
|
||||
unsigned int mb_width, mb_height;
|
||||
uint32_t mb_scan_index[68]; /* max for 1080p */
|
||||
int cur_field; ///< current interlaced field
|
||||
@@ -142,7 +141,7 @@ static int dnxhd_decode_header(DNXHDContext *ctx, AVFrame *frame,
|
||||
|
||||
ctx->is_444 = 0;
|
||||
if (buf[0x4] == 0x2) {
|
||||
ctx->pix_fmt = AV_PIX_FMT_YUV444P10;
|
||||
ctx->avctx->pix_fmt = AV_PIX_FMT_YUV444P10;
|
||||
ctx->avctx->bits_per_raw_sample = 10;
|
||||
if (ctx->bit_depth != 10) {
|
||||
ff_blockdsp_init(&ctx->bdsp, ctx->avctx);
|
||||
@@ -152,7 +151,7 @@ static int dnxhd_decode_header(DNXHDContext *ctx, AVFrame *frame,
|
||||
}
|
||||
ctx->is_444 = 1;
|
||||
} else if (buf[0x21] & 0x40) {
|
||||
ctx->pix_fmt = AV_PIX_FMT_YUV422P10;
|
||||
ctx->avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
|
||||
ctx->avctx->bits_per_raw_sample = 10;
|
||||
if (ctx->bit_depth != 10) {
|
||||
ff_blockdsp_init(&ctx->bdsp, ctx->avctx);
|
||||
@@ -161,7 +160,7 @@ static int dnxhd_decode_header(DNXHDContext *ctx, AVFrame *frame,
|
||||
ctx->decode_dct_block = dnxhd_decode_dct_block_10;
|
||||
}
|
||||
} else {
|
||||
ctx->pix_fmt = AV_PIX_FMT_YUV422P;
|
||||
ctx->avctx->pix_fmt = AV_PIX_FMT_YUV422P;
|
||||
ctx->avctx->bits_per_raw_sample = 8;
|
||||
if (ctx->bit_depth != 8) {
|
||||
ff_blockdsp_init(&ctx->bdsp, ctx->avctx);
|
||||
@@ -363,7 +362,7 @@ static int dnxhd_decode_macroblock(DNXHDContext *ctx, AVFrame *frame,
|
||||
dest_u = frame->data[1] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1 + ctx->is_444));
|
||||
dest_v = frame->data[2] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1 + ctx->is_444));
|
||||
|
||||
if (frame->interlaced_frame && ctx->cur_field) {
|
||||
if (ctx->cur_field) {
|
||||
dest_y += frame->linesize[0];
|
||||
dest_u += frame->linesize[1];
|
||||
dest_v += frame->linesize[2];
|
||||
@@ -447,13 +446,7 @@ decode_coding_unit:
|
||||
avctx->width, avctx->height, ctx->width, ctx->height);
|
||||
first_field = 1;
|
||||
}
|
||||
if (avctx->pix_fmt != AV_PIX_FMT_NONE && avctx->pix_fmt != ctx->pix_fmt) {
|
||||
av_log(avctx, AV_LOG_WARNING, "pix_fmt changed: %s -> %s\n",
|
||||
av_get_pix_fmt_name(avctx->pix_fmt), av_get_pix_fmt_name(ctx->pix_fmt));
|
||||
first_field = 1;
|
||||
}
|
||||
|
||||
avctx->pix_fmt = ctx->pix_fmt;
|
||||
ret = ff_set_dimensions(avctx, ctx->width, ctx->height);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@@ -117,7 +117,7 @@ static int dnxhd_10bit_dct_quantize(MpegEncContext *ctx, int16_t *block,
|
||||
|
||||
for (i = 1; i < 64; ++i) {
|
||||
int j = scantable[i];
|
||||
int sign = FF_SIGNBIT(block[j]);
|
||||
int sign = block[j] >> 31;
|
||||
int level = (block[j] ^ sign) - sign;
|
||||
level = level * qmat[j] >> DNX10BIT_QMAT_SHIFT;
|
||||
block[j] = (level ^ sign) - sign;
|
||||
|
@@ -37,7 +37,7 @@ typedef struct DVDSubContext
|
||||
int has_palette;
|
||||
uint8_t colormap[4];
|
||||
uint8_t alpha[256];
|
||||
uint8_t buf[0x10000];
|
||||
uint8_t *buf;
|
||||
int buf_size;
|
||||
#ifdef DEBUG
|
||||
int sub_id;
|
||||
@@ -105,12 +105,6 @@ static int decode_rle(uint8_t *bitmap, int linesize, int w, int h,
|
||||
int x, y, len, color;
|
||||
uint8_t *d;
|
||||
|
||||
if (start >= buf_size)
|
||||
return -1;
|
||||
|
||||
if (w <= 0 || h <= 0)
|
||||
return -1;
|
||||
|
||||
bit_len = (buf_size - start) * 8;
|
||||
init_get_bits(&gb, buf + start, bit_len);
|
||||
|
||||
@@ -362,12 +356,10 @@ static int decode_dvd_subtitles(DVDSubContext *ctx, AVSubtitle *sub_header,
|
||||
sub_header->rects[0] = av_mallocz(sizeof(AVSubtitleRect));
|
||||
sub_header->num_rects = 1;
|
||||
sub_header->rects[0]->pict.data[0] = bitmap;
|
||||
if (decode_rle(bitmap, w * 2, w, (h + 1) / 2,
|
||||
buf, offset1, buf_size, is_8bit) < 0)
|
||||
goto fail;
|
||||
if (decode_rle(bitmap + w, w * 2, w, h / 2,
|
||||
buf, offset2, buf_size, is_8bit) < 0)
|
||||
goto fail;
|
||||
decode_rle(bitmap, w * 2, w, (h + 1) / 2,
|
||||
buf, offset1, buf_size, is_8bit);
|
||||
decode_rle(bitmap + w, w * 2, w, h / 2,
|
||||
buf, offset2, buf_size, is_8bit);
|
||||
sub_header->rects[0]->pict.data[1] = av_mallocz(AVPALETTE_SIZE);
|
||||
if (is_8bit) {
|
||||
if (!yuv_palette)
|
||||
@@ -506,11 +498,15 @@ static int append_to_cached_buf(AVCodecContext *avctx,
|
||||
{
|
||||
DVDSubContext *ctx = avctx->priv_data;
|
||||
|
||||
if (ctx->buf_size >= sizeof(ctx->buf) - buf_size) {
|
||||
if (ctx->buf_size > 0xffff - buf_size) {
|
||||
av_log(avctx, AV_LOG_WARNING, "Attempt to reconstruct "
|
||||
"too large SPU packets aborted.\n");
|
||||
av_freep(&ctx->buf);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
ctx->buf = av_realloc(ctx->buf, ctx->buf_size + buf_size);
|
||||
if (!ctx->buf)
|
||||
return AVERROR(ENOMEM);
|
||||
memcpy(ctx->buf + ctx->buf_size, buf, buf_size);
|
||||
ctx->buf_size += buf_size;
|
||||
return 0;
|
||||
@@ -526,7 +522,7 @@ static int dvdsub_decode(AVCodecContext *avctx,
|
||||
AVSubtitle *sub = data;
|
||||
int is_menu;
|
||||
|
||||
if (ctx->buf_size) {
|
||||
if (ctx->buf) {
|
||||
int ret = append_to_cached_buf(avctx, buf, buf_size);
|
||||
if (ret < 0) {
|
||||
*data_size = 0;
|
||||
@@ -565,6 +561,7 @@ static int dvdsub_decode(AVCodecContext *avctx,
|
||||
}
|
||||
#endif
|
||||
|
||||
av_freep(&ctx->buf);
|
||||
ctx->buf_size = 0;
|
||||
*data_size = 1;
|
||||
return buf_size;
|
||||
@@ -586,7 +583,6 @@ static int dvdsub_parse_extradata(AVCodecContext *avctx)
|
||||
{
|
||||
DVDSubContext *ctx = (DVDSubContext*) avctx->priv_data;
|
||||
char *dataorig, *data;
|
||||
int ret = 1;
|
||||
|
||||
if (!avctx->extradata || !avctx->extradata_size)
|
||||
return 1;
|
||||
@@ -607,9 +603,11 @@ static int dvdsub_parse_extradata(AVCodecContext *avctx)
|
||||
} else if (strncmp("size:", data, 5) == 0) {
|
||||
int w, h;
|
||||
if (sscanf(data + 5, "%dx%d", &w, &h) == 2) {
|
||||
ret = ff_set_dimensions(avctx, w, h);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
int ret = ff_set_dimensions(avctx, w, h);
|
||||
if (ret < 0) {
|
||||
av_free(dataorig);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -617,9 +615,8 @@ static int dvdsub_parse_extradata(AVCodecContext *avctx)
|
||||
data += strspn(data, "\n\r");
|
||||
}
|
||||
|
||||
fail:
|
||||
av_free(dataorig);
|
||||
return ret;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static av_cold int dvdsub_init(AVCodecContext *avctx)
|
||||
@@ -646,6 +643,7 @@ static av_cold int dvdsub_init(AVCodecContext *avctx)
|
||||
static av_cold int dvdsub_close(AVCodecContext *avctx)
|
||||
{
|
||||
DVDSubContext *ctx = avctx->priv_data;
|
||||
av_freep(&ctx->buf);
|
||||
ctx->buf_size = 0;
|
||||
return 0;
|
||||
}
|
||||
|
@@ -329,11 +329,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
DxaDecContext * const c = avctx->priv_data;
|
||||
|
||||
if (avctx->width%4 || avctx->height%4) {
|
||||
avpriv_request_sample(avctx, "dimensions are not a multiple of 4");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
c->prev = av_frame_alloc();
|
||||
if (!c->prev)
|
||||
return AVERROR(ENOMEM);
|
||||
|
@@ -65,7 +65,7 @@ static int dxtory_decode_v1_410(AVCodecContext *avctx, AVFrame *pic,
|
||||
uint8_t *Y1, *Y2, *Y3, *Y4, *U, *V;
|
||||
int ret;
|
||||
|
||||
if (src_size < avctx->width * avctx->height * 9LL / 8) {
|
||||
if (src_size < avctx->width * avctx->height * 9L / 8) {
|
||||
av_log(avctx, AV_LOG_ERROR, "packet too small\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
@@ -108,7 +108,7 @@ static int dxtory_decode_v1_420(AVCodecContext *avctx, AVFrame *pic,
|
||||
uint8_t *Y1, *Y2, *U, *V;
|
||||
int ret;
|
||||
|
||||
if (src_size < avctx->width * avctx->height * 3LL / 2) {
|
||||
if (src_size < avctx->width * avctx->height * 3L / 2) {
|
||||
av_log(avctx, AV_LOG_ERROR, "packet too small\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
@@ -145,7 +145,7 @@ static int dxtory_decode_v1_444(AVCodecContext *avctx, AVFrame *pic,
|
||||
uint8_t *Y, *U, *V;
|
||||
int ret;
|
||||
|
||||
if (src_size < avctx->width * avctx->height * 3LL) {
|
||||
if (src_size < avctx->width * avctx->height * 3L) {
|
||||
av_log(avctx, AV_LOG_ERROR, "packet too small\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
@@ -63,7 +63,7 @@ typedef enum {
|
||||
|
||||
#define EAC3_SR_CODE_REDUCED 3
|
||||
|
||||
static void ff_eac3_apply_spectral_extension(AC3DecodeContext *s)
|
||||
void ff_eac3_apply_spectral_extension(AC3DecodeContext *s)
|
||||
{
|
||||
int bin, bnd, ch, i;
|
||||
uint8_t wrapflag[SPX_MAX_BANDS]={1,0,}, num_copy_sections, copy_sizes[SPX_MAX_BANDS];
|
||||
@@ -101,7 +101,7 @@ static void ff_eac3_apply_spectral_extension(AC3DecodeContext *s)
|
||||
for (i = 0; i < num_copy_sections; i++) {
|
||||
memcpy(&s->transform_coeffs[ch][bin],
|
||||
&s->transform_coeffs[ch][s->spx_dst_start_freq],
|
||||
copy_sizes[i]*sizeof(INTFLOAT));
|
||||
copy_sizes[i]*sizeof(float));
|
||||
bin += copy_sizes[i];
|
||||
}
|
||||
|
||||
@@ -124,7 +124,7 @@ static void ff_eac3_apply_spectral_extension(AC3DecodeContext *s)
|
||||
bin = s->spx_src_start_freq - 2;
|
||||
for (bnd = 0; bnd < s->num_spx_bands; bnd++) {
|
||||
if (wrapflag[bnd]) {
|
||||
INTFLOAT *coeffs = &s->transform_coeffs[ch][bin];
|
||||
float *coeffs = &s->transform_coeffs[ch][bin];
|
||||
coeffs[0] *= atten_tab[0];
|
||||
coeffs[1] *= atten_tab[1];
|
||||
coeffs[2] *= atten_tab[2];
|
||||
@@ -142,11 +142,6 @@ static void ff_eac3_apply_spectral_extension(AC3DecodeContext *s)
|
||||
for (bnd = 0; bnd < s->num_spx_bands; bnd++) {
|
||||
float nscale = s->spx_noise_blend[ch][bnd] * rms_energy[bnd] * (1.0f / INT32_MIN);
|
||||
float sscale = s->spx_signal_blend[ch][bnd];
|
||||
#if USE_FIXED
|
||||
// spx_noise_blend and spx_signal_blend are both FP.23
|
||||
nscale *= 1.0 / (1<<23);
|
||||
sscale *= 1.0 / (1<<23);
|
||||
#endif
|
||||
for (i = 0; i < s->spx_band_sizes[bnd]; i++) {
|
||||
float noise = nscale * (int32_t)av_lfg_get(&s->dith_state);
|
||||
s->transform_coeffs[ch][bin] *= sscale;
|
||||
@@ -200,7 +195,7 @@ static void idct6(int pre_mant[6])
|
||||
pre_mant[5] = even0 - odd0;
|
||||
}
|
||||
|
||||
static void ff_eac3_decode_transform_coeffs_aht_ch(AC3DecodeContext *s, int ch)
|
||||
void ff_eac3_decode_transform_coeffs_aht_ch(AC3DecodeContext *s, int ch)
|
||||
{
|
||||
int bin, blk, gs;
|
||||
int end_bap, gaq_mode;
|
||||
@@ -293,7 +288,7 @@ static void ff_eac3_decode_transform_coeffs_aht_ch(AC3DecodeContext *s, int ch)
|
||||
}
|
||||
}
|
||||
|
||||
static int ff_eac3_parse_header(AC3DecodeContext *s)
|
||||
int ff_eac3_parse_header(AC3DecodeContext *s)
|
||||
{
|
||||
int i, blk, ch;
|
||||
int ac3_exponent_strategy, parse_aht_info, parse_spx_atten_data;
|
||||
|
@@ -151,11 +151,6 @@ static inline int decode_block_intra(MadContext *s, int16_t * block)
|
||||
break;
|
||||
} else if (level != 0) {
|
||||
i += run;
|
||||
if (i > 63) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
|
||||
return -1;
|
||||
}
|
||||
j = scantable[i];
|
||||
level = (level*quant_matrix[j]) >> 4;
|
||||
level = (level-1)|1;
|
||||
@@ -170,11 +165,6 @@ static inline int decode_block_intra(MadContext *s, int16_t * block)
|
||||
run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6);
|
||||
|
||||
i += run;
|
||||
if (i > 63) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
|
||||
return -1;
|
||||
}
|
||||
j = scantable[i];
|
||||
if (level < 0) {
|
||||
level = -level;
|
||||
@@ -186,6 +176,10 @@ static inline int decode_block_intra(MadContext *s, int16_t * block)
|
||||
level = (level-1)|1;
|
||||
}
|
||||
}
|
||||
if (i > 63) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
|
||||
return -1;
|
||||
}
|
||||
|
||||
block[j] = level;
|
||||
}
|
||||
|
@@ -251,7 +251,7 @@ static void put_line(uint8_t *dst, int size, int width, const int *runs)
|
||||
PutBitContext pb;
|
||||
int run, mode = ~0, pix_left = width, run_idx = 0;
|
||||
|
||||
init_put_bits(&pb, dst, size);
|
||||
init_put_bits(&pb, dst, size * 8);
|
||||
while (pix_left > 0) {
|
||||
run = runs[run_idx++];
|
||||
mode = ~mode;
|
||||
|
@@ -166,7 +166,7 @@ static void find_best_state(uint8_t best_state[256][256],
|
||||
best_len[k] = len;
|
||||
best_state[i][k] = j;
|
||||
}
|
||||
for (m = 1; m < 256; m++)
|
||||
for (m = 0; m < 256; m++)
|
||||
if (occ[m]) {
|
||||
newocc[ one_state[ m]] += occ[m] * p;
|
||||
newocc[256 - one_state[256 - m]] += occ[m] * (1 - p);
|
||||
|
@@ -697,7 +697,7 @@ static int flac_parse(AVCodecParserContext *s, AVCodecContext *avctx,
|
||||
handle_error:
|
||||
*poutbuf = NULL;
|
||||
*poutbuf_size = 0;
|
||||
return buf_size ? read_end - buf : 0;
|
||||
return read_end - buf;
|
||||
}
|
||||
|
||||
static av_cold int flac_parse_init(AVCodecParserContext *c)
|
||||
|
@@ -473,10 +473,10 @@ static int decode_frame(FLACContext *s)
|
||||
ret = allocate_buffers(s);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ff_flacdsp_init(&s->dsp, s->avctx->sample_fmt, s->bps);
|
||||
s->got_streaminfo = 1;
|
||||
dump_headers(s->avctx, (FLACStreaminfo *)s);
|
||||
}
|
||||
ff_flacdsp_init(&s->dsp, s->avctx->sample_fmt, s->bps);
|
||||
|
||||
// dump_headers(s->avctx, (FLACStreaminfo *)s);
|
||||
|
||||
|
@@ -287,7 +287,7 @@ static int write_header(FlashSV2Context * s, uint8_t * buf, int buf_size)
|
||||
if (buf_size < 5)
|
||||
return -1;
|
||||
|
||||
init_put_bits(&pb, buf, buf_size);
|
||||
init_put_bits(&pb, buf, buf_size * 8);
|
||||
|
||||
put_bits(&pb, 4, (s->block_width >> 4) - 1);
|
||||
put_bits(&pb, 12, s->image_width);
|
||||
|
@@ -151,7 +151,7 @@ static int encode_bitstream(FlashSVContext *s, const AVFrame *p, uint8_t *buf,
|
||||
int buf_pos, res;
|
||||
int pred_blocks = 0;
|
||||
|
||||
init_put_bits(&pb, buf, buf_size);
|
||||
init_put_bits(&pb, buf, buf_size * 8);
|
||||
|
||||
put_bits(&pb, 4, block_width / 16 - 1);
|
||||
put_bits(&pb, 12, s->image_width);
|
||||
|
@@ -736,10 +736,8 @@ static int g2m_decode_frame(AVCodecContext *avctx, void *data,
|
||||
}
|
||||
c->tile_width = bytestream2_get_be32(&bc);
|
||||
c->tile_height = bytestream2_get_be32(&bc);
|
||||
if (c->tile_width <= 0 || c->tile_height <= 0 ||
|
||||
((c->tile_width | c->tile_height) & 0xF) ||
|
||||
c->tile_width * 4LL * c->tile_height >= INT_MAX
|
||||
) {
|
||||
if (!c->tile_width || !c->tile_height ||
|
||||
((c->tile_width | c->tile_height) & 0xF)) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Invalid tile dimensions %dx%d\n",
|
||||
c->tile_width, c->tile_height);
|
||||
|
@@ -105,7 +105,7 @@ static int gif_image_write_image(AVCodecContext *avctx,
|
||||
/* skip common columns */
|
||||
while (x_start < x_end) {
|
||||
int same_column = 1;
|
||||
for (y = y_start; y <= y_end; y++) {
|
||||
for (y = y_start; y < y_end; y++) {
|
||||
if (ref[y*ref_linesize + x_start] != buf[y*linesize + x_start]) {
|
||||
same_column = 0;
|
||||
break;
|
||||
@@ -117,7 +117,7 @@ static int gif_image_write_image(AVCodecContext *avctx,
|
||||
}
|
||||
while (x_end > x_start) {
|
||||
int same_column = 1;
|
||||
for (y = y_start; y <= y_end; y++) {
|
||||
for (y = y_start; y < y_end; y++) {
|
||||
if (ref[y*ref_linesize + x_end] != buf[y*linesize + x_end]) {
|
||||
same_column = 0;
|
||||
break;
|
||||
|
@@ -271,21 +271,26 @@ static int gif_read_image(GifState *s, AVFrame *frame)
|
||||
case 1:
|
||||
y1 += 8;
|
||||
ptr += linesize * 8;
|
||||
if (y1 >= height) {
|
||||
y1 = pass ? 2 : 4;
|
||||
ptr = ptr1 + linesize * y1;
|
||||
pass++;
|
||||
}
|
||||
break;
|
||||
case 2:
|
||||
y1 += 4;
|
||||
ptr += linesize * 4;
|
||||
if (y1 >= height) {
|
||||
y1 = 1;
|
||||
ptr = ptr1 + linesize;
|
||||
pass++;
|
||||
}
|
||||
break;
|
||||
case 3:
|
||||
y1 += 2;
|
||||
ptr += linesize * 2;
|
||||
break;
|
||||
}
|
||||
while (y1 >= height) {
|
||||
y1 = 4 >> pass;
|
||||
ptr = ptr1 + linesize * y1;
|
||||
pass++;
|
||||
}
|
||||
} else {
|
||||
ptr += linesize;
|
||||
}
|
||||
|
@@ -336,14 +336,6 @@ static int decode_slice(MpegEncContext *s)
|
||||
s->padding_bug_score += 32;
|
||||
}
|
||||
|
||||
if (s->codec_id == AV_CODEC_ID_H263 &&
|
||||
(s->workaround_bugs & FF_BUG_AUTODETECT) &&
|
||||
get_bits_left(&s->gb) >= 64 &&
|
||||
AV_RB64(s->gb.buffer_end - 8) == 0xCDCDCDCDFC7F0000) {
|
||||
|
||||
s->padding_bug_score += 32;
|
||||
}
|
||||
|
||||
if (s->workaround_bugs & FF_BUG_AUTODETECT) {
|
||||
if (s->padding_bug_score > -2 && !s->data_partitioning)
|
||||
s->workaround_bugs |= FF_BUG_NO_PADDING;
|
||||
|
@@ -215,18 +215,18 @@ int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma)
|
||||
|
||||
if ((h->left_samples_available & 0x8080) != 0x8080) {
|
||||
mode = left[mode];
|
||||
if (mode < 0) {
|
||||
av_log(h->avctx, AV_LOG_ERROR,
|
||||
"left block unavailable for requested intra mode at %d %d\n",
|
||||
h->mb_x, h->mb_y);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (is_chroma && (h->left_samples_available & 0x8080)) {
|
||||
// mad cow disease mode, aka MBAFF + constrained_intra_pred
|
||||
mode = ALZHEIMER_DC_L0T_PRED8x8 +
|
||||
(!(h->left_samples_available & 0x8000)) +
|
||||
2 * (mode == DC_128_PRED8x8);
|
||||
}
|
||||
if (mode < 0) {
|
||||
av_log(h->avctx, AV_LOG_ERROR,
|
||||
"left block unavailable for requested intra mode at %d %d\n",
|
||||
h->mb_x, h->mb_y);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
|
||||
return mode;
|
||||
@@ -391,7 +391,6 @@ void ff_h264_free_tables(H264Context *h, int free_rbsp)
|
||||
if (free_rbsp && h->DPB) {
|
||||
for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
|
||||
ff_h264_unref_picture(h, &h->DPB[i]);
|
||||
memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
|
||||
av_freep(&h->DPB);
|
||||
} else if (h->DPB) {
|
||||
for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
|
||||
@@ -727,9 +726,8 @@ static int decode_init_thread_copy(AVCodecContext *avctx)
|
||||
memset(h->sps_buffers, 0, sizeof(h->sps_buffers));
|
||||
memset(h->pps_buffers, 0, sizeof(h->pps_buffers));
|
||||
|
||||
h->avctx = avctx;
|
||||
h->rbsp_buffer[0] = NULL;
|
||||
h->rbsp_buffer[1] = NULL;
|
||||
h->rbsp_buffer[0] = NULL;
|
||||
h->rbsp_buffer[1] = NULL;
|
||||
h->rbsp_buffer_size[0] = 0;
|
||||
h->rbsp_buffer_size[1] = 0;
|
||||
h->context_initialized = 0;
|
||||
@@ -879,7 +877,7 @@ static void decode_postinit(H264Context *h, int setup_finished)
|
||||
if (rotation) {
|
||||
av_display_rotation_set((int32_t *)rotation->data, angle);
|
||||
av_display_matrix_flip((int32_t *)rotation->data,
|
||||
h->sei_hflip, h->sei_vflip);
|
||||
h->sei_vflip, h->sei_hflip);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -992,16 +990,6 @@ int ff_pred_weight_table(H264Context *h)
|
||||
h->luma_log2_weight_denom = get_ue_golomb(&h->gb);
|
||||
if (h->sps.chroma_format_idc)
|
||||
h->chroma_log2_weight_denom = get_ue_golomb(&h->gb);
|
||||
|
||||
if (h->luma_log2_weight_denom > 7U) {
|
||||
av_log(h->avctx, AV_LOG_ERROR, "luma_log2_weight_denom %d is out of range\n", h->luma_log2_weight_denom);
|
||||
h->luma_log2_weight_denom = 0;
|
||||
}
|
||||
if (h->chroma_log2_weight_denom > 7U) {
|
||||
av_log(h->avctx, AV_LOG_ERROR, "chroma_log2_weight_denom %d is out of range\n", h->chroma_log2_weight_denom);
|
||||
h->chroma_log2_weight_denom = 0;
|
||||
}
|
||||
|
||||
luma_def = 1 << h->luma_log2_weight_denom;
|
||||
chroma_def = 1 << h->chroma_log2_weight_denom;
|
||||
|
||||
@@ -1342,6 +1330,43 @@ int ff_set_ref_count(H264Context *h)
|
||||
|
||||
static const uint8_t start_code[] = { 0x00, 0x00, 0x01 };
|
||||
|
||||
static int find_start_code(const uint8_t *buf, int buf_size,
|
||||
int buf_index, int next_avc)
|
||||
{
|
||||
// start code prefix search
|
||||
for (; buf_index + 3 < next_avc; buf_index++)
|
||||
// This should always succeed in the first iteration.
|
||||
if (buf[buf_index] == 0 &&
|
||||
buf[buf_index + 1] == 0 &&
|
||||
buf[buf_index + 2] == 1)
|
||||
break;
|
||||
|
||||
buf_index += 3;
|
||||
|
||||
if (buf_index >= buf_size)
|
||||
return buf_size;
|
||||
|
||||
return buf_index;
|
||||
}
|
||||
|
||||
static int get_avc_nalsize(H264Context *h, const uint8_t *buf,
|
||||
int buf_size, int *buf_index)
|
||||
{
|
||||
int i, nalsize = 0;
|
||||
|
||||
if (*buf_index >= buf_size - h->nal_length_size)
|
||||
return -1;
|
||||
|
||||
for (i = 0; i < h->nal_length_size; i++)
|
||||
nalsize = (nalsize << 8) | buf[(*buf_index)++];
|
||||
if (nalsize <= 0 || nalsize > buf_size - *buf_index) {
|
||||
av_log(h->avctx, AV_LOG_ERROR,
|
||||
"AVC: nal size %d\n", nalsize);
|
||||
return -1;
|
||||
}
|
||||
return nalsize;
|
||||
}
|
||||
|
||||
static int get_bit_length(H264Context *h, const uint8_t *buf,
|
||||
const uint8_t *ptr, int dst_length,
|
||||
int i, int next_avc)
|
||||
@@ -1516,8 +1541,8 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size,
|
||||
continue;
|
||||
|
||||
again:
|
||||
if ( (!(avctx->active_thread_type & FF_THREAD_FRAME) || nals_needed >= nal_index)
|
||||
&& !h->current_slice)
|
||||
if ( !(avctx->active_thread_type & FF_THREAD_FRAME)
|
||||
|| nals_needed >= nal_index)
|
||||
h->au_pps_id = -1;
|
||||
/* Ignore per frame NAL unit type during extradata
|
||||
* parsing. Decoding slices is not possible in codec init
|
||||
|
@@ -36,7 +36,6 @@
|
||||
#include "h264dsp.h"
|
||||
#include "h264pred.h"
|
||||
#include "h264qpel.h"
|
||||
#include "internal.h" // for avpriv_find_start_code()
|
||||
#include "me_cmp.h"
|
||||
#include "mpegutils.h"
|
||||
#include "parser.h"
|
||||
@@ -338,7 +337,6 @@ typedef struct H264Picture {
|
||||
* H264Context
|
||||
*/
|
||||
typedef struct H264Context {
|
||||
AVClass *av_class;
|
||||
AVCodecContext *avctx;
|
||||
MECmpContext mecc;
|
||||
VideoDSPContext vdsp;
|
||||
@@ -1094,34 +1092,6 @@ static av_always_inline int get_dct8x8_allowed(H264Context *h)
|
||||
0x0001000100010001ULL));
|
||||
}
|
||||
|
||||
static inline int find_start_code(const uint8_t *buf, int buf_size,
|
||||
int buf_index, int next_avc)
|
||||
{
|
||||
uint32_t state = -1;
|
||||
|
||||
buf_index = avpriv_find_start_code(buf + buf_index, buf + next_avc + 1, &state) - buf - 1;
|
||||
|
||||
return FFMIN(buf_index, buf_size);
|
||||
}
|
||||
|
||||
static inline int get_avc_nalsize(H264Context *h, const uint8_t *buf,
|
||||
int buf_size, int *buf_index)
|
||||
{
|
||||
int i, nalsize = 0;
|
||||
|
||||
if (*buf_index >= buf_size - h->nal_length_size)
|
||||
return -1;
|
||||
|
||||
for (i = 0; i < h->nal_length_size; i++)
|
||||
nalsize = ((unsigned)nalsize << 8) | buf[(*buf_index)++];
|
||||
if (nalsize <= 0 || nalsize > buf_size - *buf_index) {
|
||||
av_log(h->avctx, AV_LOG_ERROR,
|
||||
"AVC: nal size %d\n", nalsize);
|
||||
return -1;
|
||||
}
|
||||
return nalsize;
|
||||
}
|
||||
|
||||
int ff_h264_field_end(H264Context *h, int in_setup);
|
||||
|
||||
int ff_h264_ref_picture(H264Context *h, H264Picture *dst, H264Picture *src);
|
||||
|
@@ -1281,7 +1281,7 @@ void ff_h264_init_cabac_states(H264Context *h) {
|
||||
}
|
||||
|
||||
static int decode_cabac_field_decoding_flag(H264Context *h) {
|
||||
const int mbb_xy = h->mb_xy - 2*h->mb_stride;
|
||||
const long mbb_xy = h->mb_xy - 2L*h->mb_stride;
|
||||
|
||||
unsigned long ctx = 0;
|
||||
|
||||
@@ -1713,7 +1713,7 @@ decode_cabac_residual_internal(H264Context *h, int16_t *block,
|
||||
\
|
||||
if( coeff_abs >= 15 ) { \
|
||||
int j = 0; \
|
||||
while (get_cabac_bypass(CC) && j < 30) { \
|
||||
while(get_cabac_bypass( CC ) && j<30) { \
|
||||
j++; \
|
||||
} \
|
||||
\
|
||||
|
@@ -202,10 +202,10 @@ static int scan_mmco_reset(AVCodecParserContext *s)
|
||||
*/
|
||||
static inline int parse_nal_units(AVCodecParserContext *s,
|
||||
AVCodecContext *avctx,
|
||||
const uint8_t * const buf, int buf_size)
|
||||
const uint8_t *buf, int buf_size)
|
||||
{
|
||||
H264Context *h = s->priv_data;
|
||||
int buf_index, next_avc;
|
||||
const uint8_t *buf_end = buf + buf_size;
|
||||
unsigned int pps_id;
|
||||
unsigned int slice_type;
|
||||
int state = -1, got_reset = 0;
|
||||
@@ -225,26 +225,26 @@ static inline int parse_nal_units(AVCodecParserContext *s,
|
||||
if (!buf_size)
|
||||
return 0;
|
||||
|
||||
buf_index = 0;
|
||||
next_avc = h->is_avc ? 0 : buf_size;
|
||||
for (;;) {
|
||||
int src_length, dst_length, consumed, nalsize = 0;
|
||||
|
||||
if (buf_index >= next_avc) {
|
||||
nalsize = get_avc_nalsize(h, buf, buf_size, &buf_index);
|
||||
if (nalsize < 0)
|
||||
if (h->is_avc) {
|
||||
int i;
|
||||
if (h->nal_length_size >= buf_end - buf) break;
|
||||
nalsize = 0;
|
||||
for (i = 0; i < h->nal_length_size; i++)
|
||||
nalsize = (nalsize << 8) | *buf++;
|
||||
if (nalsize <= 0 || nalsize > buf_end - buf) {
|
||||
av_log(h->avctx, AV_LOG_ERROR, "AVC: nal size %d\n", nalsize);
|
||||
break;
|
||||
next_avc = buf_index + nalsize;
|
||||
}
|
||||
src_length = nalsize;
|
||||
} else {
|
||||
buf_index = find_start_code(buf, buf_size, buf_index, next_avc);
|
||||
if (buf_index >= buf_size)
|
||||
break;
|
||||
if (buf_index >= next_avc)
|
||||
continue;
|
||||
buf = avpriv_find_start_code(buf, buf_end, &state);
|
||||
if (buf >= buf_end)
|
||||
break;
|
||||
--buf;
|
||||
src_length = buf_end - buf;
|
||||
}
|
||||
src_length = next_avc - buf_index;
|
||||
|
||||
state = buf[buf_index];
|
||||
switch (state & 0x1f) {
|
||||
case NAL_SLICE:
|
||||
case NAL_IDR_SLICE:
|
||||
@@ -261,13 +261,10 @@ static inline int parse_nal_units(AVCodecParserContext *s,
|
||||
}
|
||||
break;
|
||||
}
|
||||
ptr = ff_h264_decode_nal(h, buf + buf_index, &dst_length,
|
||||
&consumed, src_length);
|
||||
ptr = ff_h264_decode_nal(h, buf, &dst_length, &consumed, src_length);
|
||||
if (!ptr || dst_length < 0)
|
||||
break;
|
||||
|
||||
buf_index += consumed;
|
||||
|
||||
init_get_bits(&h->gb, ptr, 8 * dst_length);
|
||||
switch (h->nal_unit_type) {
|
||||
case NAL_SPS:
|
||||
@@ -442,6 +439,7 @@ static inline int parse_nal_units(AVCodecParserContext *s,
|
||||
|
||||
return 0; /* no need to evaluate the rest */
|
||||
}
|
||||
buf += h->is_avc ? nalsize : consumed;
|
||||
}
|
||||
if (q264)
|
||||
return 0;
|
||||
|
@@ -391,8 +391,7 @@ int ff_h264_decode_seq_parameter_set(H264Context *h)
|
||||
"Different chroma and luma bit depth");
|
||||
goto fail;
|
||||
}
|
||||
if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 14 ||
|
||||
sps->bit_depth_chroma < 8 || sps->bit_depth_chroma > 14) {
|
||||
if (sps->bit_depth_luma > 14U || sps->bit_depth_chroma > 14U) {
|
||||
av_log(h->avctx, AV_LOG_ERROR, "illegal bit depth value (%d, %d)\n",
|
||||
sps->bit_depth_luma, sps->bit_depth_chroma);
|
||||
goto fail;
|
||||
@@ -480,10 +479,10 @@ int ff_h264_decode_seq_parameter_set(H264Context *h)
|
||||
#endif
|
||||
sps->crop = get_bits1(&h->gb);
|
||||
if (sps->crop) {
|
||||
unsigned int crop_left = get_ue_golomb(&h->gb);
|
||||
unsigned int crop_right = get_ue_golomb(&h->gb);
|
||||
unsigned int crop_top = get_ue_golomb(&h->gb);
|
||||
unsigned int crop_bottom = get_ue_golomb(&h->gb);
|
||||
int crop_left = get_ue_golomb(&h->gb);
|
||||
int crop_right = get_ue_golomb(&h->gb);
|
||||
int crop_top = get_ue_golomb(&h->gb);
|
||||
int crop_bottom = get_ue_golomb(&h->gb);
|
||||
int width = 16 * sps->mb_width;
|
||||
int height = 16 * sps->mb_height * (2 - sps->frame_mbs_only_flag);
|
||||
|
||||
|
@@ -281,7 +281,7 @@ static int decode_display_orientation(H264Context *h)
|
||||
|
||||
int ff_h264_decode_sei(H264Context *h)
|
||||
{
|
||||
while (get_bits_left(&h->gb) > 16 && show_bits(&h->gb, 16)) {
|
||||
while (get_bits_left(&h->gb) > 16) {
|
||||
int type = 0;
|
||||
unsigned size = 0;
|
||||
unsigned next;
|
||||
|
@@ -585,17 +585,6 @@ int ff_h264_update_thread_context(AVCodecContext *dst,
|
||||
h->mb_type_pool = NULL;
|
||||
h->ref_index_pool = NULL;
|
||||
h->motion_val_pool = NULL;
|
||||
h->intra4x4_pred_mode= NULL;
|
||||
h->non_zero_count = NULL;
|
||||
h->slice_table_base = NULL;
|
||||
h->slice_table = NULL;
|
||||
h->cbp_table = NULL;
|
||||
h->chroma_pred_mode_table = NULL;
|
||||
memset(h->mvd_table, 0, sizeof(h->mvd_table));
|
||||
h->direct_table = NULL;
|
||||
h->list_counts = NULL;
|
||||
h->mb2b_xy = NULL;
|
||||
h->mb2br_xy = NULL;
|
||||
for (i = 0; i < 2; i++) {
|
||||
h->rbsp_buffer[i] = NULL;
|
||||
h->rbsp_buffer_size[i] = 0;
|
||||
@@ -647,11 +636,8 @@ int ff_h264_update_thread_context(AVCodecContext *dst,
|
||||
|
||||
h->cur_pic_ptr = REBASE_PICTURE(h1->cur_pic_ptr, h, h1);
|
||||
ff_h264_unref_picture(h, &h->cur_pic);
|
||||
if (h1->cur_pic.f.buf[0]) {
|
||||
ret = ff_h264_ref_picture(h, &h->cur_pic, &h1->cur_pic);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
if (h1->cur_pic.f.buf[0] && (ret = ff_h264_ref_picture(h, &h->cur_pic, &h1->cur_pic)) < 0)
|
||||
return ret;
|
||||
|
||||
h->workaround_bugs = h1->workaround_bugs;
|
||||
h->low_delay = h1->low_delay;
|
||||
@@ -1039,79 +1025,76 @@ static int clone_slice(H264Context *dst, H264Context *src)
|
||||
|
||||
static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
|
||||
{
|
||||
enum AVPixelFormat pix_fmts[2];
|
||||
const enum AVPixelFormat *choices = pix_fmts;
|
||||
int i;
|
||||
|
||||
pix_fmts[1] = AV_PIX_FMT_NONE;
|
||||
|
||||
switch (h->sps.bit_depth_luma) {
|
||||
case 9:
|
||||
if (CHROMA444(h)) {
|
||||
if (h->avctx->colorspace == AVCOL_SPC_RGB) {
|
||||
pix_fmts[0] = AV_PIX_FMT_GBRP9;
|
||||
return AV_PIX_FMT_GBRP9;
|
||||
} else
|
||||
pix_fmts[0] = AV_PIX_FMT_YUV444P9;
|
||||
return AV_PIX_FMT_YUV444P9;
|
||||
} else if (CHROMA422(h))
|
||||
pix_fmts[0] = AV_PIX_FMT_YUV422P9;
|
||||
return AV_PIX_FMT_YUV422P9;
|
||||
else
|
||||
pix_fmts[0] = AV_PIX_FMT_YUV420P9;
|
||||
return AV_PIX_FMT_YUV420P9;
|
||||
break;
|
||||
case 10:
|
||||
if (CHROMA444(h)) {
|
||||
if (h->avctx->colorspace == AVCOL_SPC_RGB) {
|
||||
pix_fmts[0] = AV_PIX_FMT_GBRP10;
|
||||
return AV_PIX_FMT_GBRP10;
|
||||
} else
|
||||
pix_fmts[0] = AV_PIX_FMT_YUV444P10;
|
||||
return AV_PIX_FMT_YUV444P10;
|
||||
} else if (CHROMA422(h))
|
||||
pix_fmts[0] = AV_PIX_FMT_YUV422P10;
|
||||
return AV_PIX_FMT_YUV422P10;
|
||||
else
|
||||
pix_fmts[0] = AV_PIX_FMT_YUV420P10;
|
||||
return AV_PIX_FMT_YUV420P10;
|
||||
break;
|
||||
case 12:
|
||||
if (CHROMA444(h)) {
|
||||
if (h->avctx->colorspace == AVCOL_SPC_RGB) {
|
||||
pix_fmts[0] = AV_PIX_FMT_GBRP12;
|
||||
return AV_PIX_FMT_GBRP12;
|
||||
} else
|
||||
pix_fmts[0] = AV_PIX_FMT_YUV444P12;
|
||||
return AV_PIX_FMT_YUV444P12;
|
||||
} else if (CHROMA422(h))
|
||||
pix_fmts[0] = AV_PIX_FMT_YUV422P12;
|
||||
return AV_PIX_FMT_YUV422P12;
|
||||
else
|
||||
pix_fmts[0] = AV_PIX_FMT_YUV420P12;
|
||||
return AV_PIX_FMT_YUV420P12;
|
||||
break;
|
||||
case 14:
|
||||
if (CHROMA444(h)) {
|
||||
if (h->avctx->colorspace == AVCOL_SPC_RGB) {
|
||||
pix_fmts[0] = AV_PIX_FMT_GBRP14;
|
||||
return AV_PIX_FMT_GBRP14;
|
||||
} else
|
||||
pix_fmts[0] = AV_PIX_FMT_YUV444P14;
|
||||
return AV_PIX_FMT_YUV444P14;
|
||||
} else if (CHROMA422(h))
|
||||
pix_fmts[0] = AV_PIX_FMT_YUV422P14;
|
||||
return AV_PIX_FMT_YUV422P14;
|
||||
else
|
||||
pix_fmts[0] = AV_PIX_FMT_YUV420P14;
|
||||
return AV_PIX_FMT_YUV420P14;
|
||||
break;
|
||||
case 8:
|
||||
if (CHROMA444(h)) {
|
||||
if (h->avctx->colorspace == AVCOL_SPC_YCGCO)
|
||||
if (h->avctx->colorspace == AVCOL_SPC_RGB) {
|
||||
av_log(h->avctx, AV_LOG_DEBUG, "Detected GBR colorspace.\n");
|
||||
return AV_PIX_FMT_GBR24P;
|
||||
} else if (h->avctx->colorspace == AVCOL_SPC_YCGCO) {
|
||||
av_log(h->avctx, AV_LOG_WARNING, "Detected unsupported YCgCo colorspace.\n");
|
||||
if (h->avctx->colorspace == AVCOL_SPC_RGB)
|
||||
pix_fmts[0] = AV_PIX_FMT_GBRP;
|
||||
else if (h->avctx->color_range == AVCOL_RANGE_JPEG)
|
||||
pix_fmts[0] = AV_PIX_FMT_YUVJ444P;
|
||||
else
|
||||
pix_fmts[0] = AV_PIX_FMT_YUV444P;
|
||||
}
|
||||
return h->avctx->color_range == AVCOL_RANGE_JPEG ? AV_PIX_FMT_YUVJ444P
|
||||
: AV_PIX_FMT_YUV444P;
|
||||
} else if (CHROMA422(h)) {
|
||||
if (h->avctx->color_range == AVCOL_RANGE_JPEG)
|
||||
pix_fmts[0] = AV_PIX_FMT_YUVJ422P;
|
||||
else
|
||||
pix_fmts[0] = AV_PIX_FMT_YUV422P;
|
||||
return h->avctx->color_range == AVCOL_RANGE_JPEG ? AV_PIX_FMT_YUVJ422P
|
||||
: AV_PIX_FMT_YUV422P;
|
||||
} else {
|
||||
if (h->avctx->codec->pix_fmts)
|
||||
choices = h->avctx->codec->pix_fmts;
|
||||
else if (h->avctx->color_range == AVCOL_RANGE_JPEG)
|
||||
choices = h264_hwaccel_pixfmt_list_jpeg_420;
|
||||
else
|
||||
choices = h264_hwaccel_pixfmt_list_420;
|
||||
int i;
|
||||
const enum AVPixelFormat * fmt = h->avctx->codec->pix_fmts ?
|
||||
h->avctx->codec->pix_fmts :
|
||||
h->avctx->color_range == AVCOL_RANGE_JPEG ?
|
||||
h264_hwaccel_pixfmt_list_jpeg_420 :
|
||||
h264_hwaccel_pixfmt_list_420;
|
||||
|
||||
for (i=0; fmt[i] != AV_PIX_FMT_NONE; i++)
|
||||
if (fmt[i] == h->avctx->pix_fmt && !force_callback)
|
||||
return fmt[i];
|
||||
return ff_thread_get_format(h->avctx, fmt);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
@@ -1119,11 +1102,6 @@ static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
|
||||
"Unsupported bit depth %d\n", h->sps.bit_depth_luma);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
for (i=0; choices[i] != AV_PIX_FMT_NONE; i++)
|
||||
if (choices[i] == h->avctx->pix_fmt && !force_callback)
|
||||
return choices[i];
|
||||
return ff_thread_get_format(h->avctx, choices);
|
||||
}
|
||||
|
||||
/* export coded and cropped frame dimensions to AVCodecContext */
|
||||
@@ -1308,9 +1286,6 @@ int ff_h264_decode_slice_header(H264Context *h, H264Context *h0)
|
||||
int must_reinit;
|
||||
int needs_reinit = 0;
|
||||
int field_pic_flag, bottom_field_flag;
|
||||
int first_slice = h == h0 && !h0->current_slice;
|
||||
int frame_num, picture_structure, droppable;
|
||||
PPS *pps;
|
||||
|
||||
h->qpel_put = h->h264qpel.put_h264_qpel_pixels_tab;
|
||||
h->qpel_avg = h->h264qpel.avg_h264_qpel_pixels_tab;
|
||||
@@ -1384,27 +1359,18 @@ int ff_h264_decode_slice_header(H264Context *h, H264Context *h0)
|
||||
h0->au_pps_id, pps_id);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
h->pps = *h0->pps_buffers[pps_id];
|
||||
|
||||
pps = h0->pps_buffers[pps_id];
|
||||
|
||||
if (!h0->sps_buffers[pps->sps_id]) {
|
||||
if (!h0->sps_buffers[h->pps.sps_id]) {
|
||||
av_log(h->avctx, AV_LOG_ERROR,
|
||||
"non-existing SPS %u referenced\n",
|
||||
h->pps.sps_id);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (first_slice)
|
||||
h->pps = *h0->pps_buffers[pps_id];
|
||||
|
||||
if (pps->sps_id != h->sps.sps_id ||
|
||||
pps->sps_id != h->current_sps_id ||
|
||||
h0->sps_buffers[pps->sps_id]->new) {
|
||||
|
||||
if (!first_slice) {
|
||||
av_log(h->avctx, AV_LOG_ERROR,
|
||||
"SPS changed in the middle of the frame\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (h->pps.sps_id != h->sps.sps_id ||
|
||||
h->pps.sps_id != h->current_sps_id ||
|
||||
h0->sps_buffers[h->pps.sps_id]->new) {
|
||||
|
||||
h->sps = *h0->sps_buffers[h->pps.sps_id];
|
||||
|
||||
@@ -1434,15 +1400,13 @@ int ff_h264_decode_slice_header(H264Context *h, H264Context *h0)
|
||||
|| 16*h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) != h->avctx->coded_height
|
||||
|| h->avctx->bits_per_raw_sample != h->sps.bit_depth_luma
|
||||
|| h->cur_chroma_format_idc != h->sps.chroma_format_idc
|
||||
|| av_cmp_q(h->sps.sar, h->avctx->sample_aspect_ratio)
|
||||
|| h->mb_width != h->sps.mb_width
|
||||
|| h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag)
|
||||
));
|
||||
if (non_j_pixfmt(h0->avctx->pix_fmt) != non_j_pixfmt(get_pixel_format(h0, 0)))
|
||||
must_reinit = 1;
|
||||
|
||||
if (first_slice && av_cmp_q(h->sps.sar, h->avctx->sample_aspect_ratio))
|
||||
must_reinit = 1;
|
||||
|
||||
h->mb_width = h->sps.mb_width;
|
||||
h->mb_height = h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag);
|
||||
h->mb_num = h->mb_width * h->mb_height;
|
||||
@@ -1483,8 +1447,6 @@ int ff_h264_decode_slice_header(H264Context *h, H264Context *h0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
av_assert1(first_slice);
|
||||
|
||||
ff_h264_flush_change(h);
|
||||
|
||||
if ((ret = get_pixel_format(h, 1)) < 0)
|
||||
@@ -1518,48 +1480,44 @@ int ff_h264_decode_slice_header(H264Context *h, H264Context *h0)
|
||||
}
|
||||
}
|
||||
|
||||
if (first_slice && h->dequant_coeff_pps != pps_id) {
|
||||
if (h == h0 && h->dequant_coeff_pps != pps_id) {
|
||||
h->dequant_coeff_pps = pps_id;
|
||||
h264_init_dequant_tables(h);
|
||||
}
|
||||
|
||||
frame_num = get_bits(&h->gb, h->sps.log2_max_frame_num);
|
||||
if (!first_slice) {
|
||||
if (h0->frame_num != frame_num) {
|
||||
av_log(h->avctx, AV_LOG_ERROR, "Frame num change from %d to %d\n",
|
||||
h0->frame_num, frame_num);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
h->frame_num = get_bits(&h->gb, h->sps.log2_max_frame_num);
|
||||
|
||||
h->mb_mbaff = 0;
|
||||
h->mb_aff_frame = 0;
|
||||
last_pic_structure = h0->picture_structure;
|
||||
last_pic_droppable = h0->droppable;
|
||||
droppable = h->nal_ref_idc == 0;
|
||||
h->droppable = h->nal_ref_idc == 0;
|
||||
if (h->sps.frame_mbs_only_flag) {
|
||||
picture_structure = PICT_FRAME;
|
||||
h->picture_structure = PICT_FRAME;
|
||||
} else {
|
||||
if (!h->sps.direct_8x8_inference_flag && slice_type == AV_PICTURE_TYPE_B) {
|
||||
av_log(h->avctx, AV_LOG_ERROR, "This stream was generated by a broken encoder, invalid 8x8 inference\n");
|
||||
return -1;
|
||||
}
|
||||
field_pic_flag = get_bits1(&h->gb);
|
||||
|
||||
if (field_pic_flag) {
|
||||
bottom_field_flag = get_bits1(&h->gb);
|
||||
picture_structure = PICT_TOP_FIELD + bottom_field_flag;
|
||||
h->picture_structure = PICT_TOP_FIELD + bottom_field_flag;
|
||||
} else {
|
||||
picture_structure = PICT_FRAME;
|
||||
h->picture_structure = PICT_FRAME;
|
||||
h->mb_aff_frame = h->sps.mb_aff;
|
||||
}
|
||||
}
|
||||
if (h0->current_slice) {
|
||||
if (last_pic_structure != picture_structure ||
|
||||
last_pic_droppable != droppable) {
|
||||
h->mb_field_decoding_flag = h->picture_structure != PICT_FRAME;
|
||||
|
||||
if (h0->current_slice != 0) {
|
||||
if (last_pic_structure != h->picture_structure ||
|
||||
last_pic_droppable != h->droppable) {
|
||||
av_log(h->avctx, AV_LOG_ERROR,
|
||||
"Changing field mode (%d -> %d) between slices is not allowed\n",
|
||||
last_pic_structure, h->picture_structure);
|
||||
h->picture_structure = last_pic_structure;
|
||||
h->droppable = last_pic_droppable;
|
||||
return AVERROR_INVALIDDATA;
|
||||
} else if (!h0->cur_pic_ptr) {
|
||||
av_log(h->avctx, AV_LOG_ERROR,
|
||||
@@ -1567,14 +1525,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264Context *h0)
|
||||
h0->current_slice + 1);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
|
||||
h->picture_structure = picture_structure;
|
||||
h->droppable = droppable;
|
||||
h->frame_num = frame_num;
|
||||
h->mb_field_decoding_flag = picture_structure != PICT_FRAME;
|
||||
|
||||
if (h0->current_slice == 0) {
|
||||
} else {
|
||||
/* Shorten frame num gaps so we don't have to allocate reference
|
||||
* frames just to throw them away */
|
||||
if (h->frame_num != h->prev_frame_num) {
|
||||
|
@@ -108,7 +108,7 @@ static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps)
|
||||
if (!s->tab_ipm || !s->cbf_luma || !s->is_pcm)
|
||||
goto fail;
|
||||
|
||||
s->filter_slice_edges = av_mallocz(ctb_count);
|
||||
s->filter_slice_edges = av_malloc(ctb_count);
|
||||
s->tab_slice_address = av_malloc_array(pic_size_in_ctb,
|
||||
sizeof(*s->tab_slice_address));
|
||||
s->qp_y_tab = av_malloc_array(pic_size_in_ctb,
|
||||
@@ -144,7 +144,7 @@ static void pred_weight_table(HEVCContext *s, GetBitContext *gb)
|
||||
uint8_t luma_weight_l1_flag[16];
|
||||
uint8_t chroma_weight_l1_flag[16];
|
||||
|
||||
s->sh.luma_log2_weight_denom = av_clip_c(get_ue_golomb_long(gb), 0, 7);
|
||||
s->sh.luma_log2_weight_denom = get_ue_golomb_long(gb);
|
||||
if (s->sps->chroma_format_idc != 0) {
|
||||
int delta = get_se_golomb(gb);
|
||||
s->sh.chroma_log2_weight_denom = av_clip(s->sh.luma_log2_weight_denom + delta, 0, 7);
|
||||
@@ -981,7 +981,7 @@ static int hls_transform_unit(HEVCContext *s, int x0, int y0,
|
||||
for (i = 0; i < (size * size); i++) {
|
||||
coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
|
||||
}
|
||||
s->hevcdsp.transform_add[log2_trafo_size_c-2](dst, coeffs, stride);
|
||||
s->hevcdsp.transform_add[log2_trafo_size-2](dst, coeffs, stride);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1010,7 +1010,7 @@ static int hls_transform_unit(HEVCContext *s, int x0, int y0,
|
||||
for (i = 0; i < (size * size); i++) {
|
||||
coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
|
||||
}
|
||||
s->hevcdsp.transform_add[log2_trafo_size_c-2](dst, coeffs, stride);
|
||||
s->hevcdsp.transform_add[log2_trafo_size-2](dst, coeffs, stride);
|
||||
}
|
||||
}
|
||||
} else if (blk_idx == 3) {
|
||||
@@ -2532,7 +2532,7 @@ static int set_side_data(HEVCContext *s)
|
||||
|
||||
av_display_rotation_set((int32_t *)rotation->data, angle);
|
||||
av_display_matrix_flip((int32_t *)rotation->data,
|
||||
s->sei_hflip, s->sei_vflip);
|
||||
s->sei_vflip, s->sei_hflip);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -2888,30 +2888,17 @@ static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length)
|
||||
|
||||
if (s->nals_allocated < s->nb_nals + 1) {
|
||||
int new_size = s->nals_allocated + 1;
|
||||
void *tmp = av_realloc_array(s->nals, new_size, sizeof(*s->nals));
|
||||
ret = AVERROR(ENOMEM);
|
||||
HEVCNAL *tmp = av_realloc_array(s->nals, new_size, sizeof(*tmp));
|
||||
if (!tmp) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
s->nals = tmp;
|
||||
memset(s->nals + s->nals_allocated, 0,
|
||||
(new_size - s->nals_allocated) * sizeof(*s->nals));
|
||||
|
||||
tmp = av_realloc_array(s->skipped_bytes_nal, new_size, sizeof(*s->skipped_bytes_nal));
|
||||
if (!tmp)
|
||||
goto fail;
|
||||
s->skipped_bytes_nal = tmp;
|
||||
|
||||
tmp = av_realloc_array(s->skipped_bytes_pos_size_nal, new_size, sizeof(*s->skipped_bytes_pos_size_nal));
|
||||
if (!tmp)
|
||||
goto fail;
|
||||
s->skipped_bytes_pos_size_nal = tmp;
|
||||
|
||||
tmp = av_realloc_array(s->skipped_bytes_pos_nal, new_size, sizeof(*s->skipped_bytes_pos_nal));
|
||||
if (!tmp)
|
||||
goto fail;
|
||||
s->skipped_bytes_pos_nal = tmp;
|
||||
|
||||
(new_size - s->nals_allocated) * sizeof(*tmp));
|
||||
av_reallocp_array(&s->skipped_bytes_nal, new_size, sizeof(*s->skipped_bytes_nal));
|
||||
av_reallocp_array(&s->skipped_bytes_pos_size_nal, new_size, sizeof(*s->skipped_bytes_pos_size_nal));
|
||||
av_reallocp_array(&s->skipped_bytes_pos_nal, new_size, sizeof(*s->skipped_bytes_pos_nal));
|
||||
s->skipped_bytes_pos_size_nal[s->nals_allocated] = 1024; // initial buffer size
|
||||
s->skipped_bytes_pos_nal[s->nals_allocated] = av_malloc_array(s->skipped_bytes_pos_size_nal[s->nals_allocated], sizeof(*s->skipped_bytes_pos));
|
||||
s->nals_allocated = new_size;
|
||||
|
@@ -298,10 +298,10 @@ typedef struct RefPicListTab {
|
||||
} RefPicListTab;
|
||||
|
||||
typedef struct HEVCWindow {
|
||||
unsigned int left_offset;
|
||||
unsigned int right_offset;
|
||||
unsigned int top_offset;
|
||||
unsigned int bottom_offset;
|
||||
int left_offset;
|
||||
int right_offset;
|
||||
int top_offset;
|
||||
int bottom_offset;
|
||||
} HEVCWindow;
|
||||
|
||||
typedef struct VUI {
|
||||
|
@@ -227,10 +227,8 @@ static int temporal_luma_motion_vector(HEVCContext *s, int x0, int y0,
|
||||
|
||||
HEVCFrame *ref = s->ref->collocated_ref;
|
||||
|
||||
if (!ref) {
|
||||
memset(mvLXCol, 0, sizeof(*mvLXCol));
|
||||
if (!ref)
|
||||
return 0;
|
||||
}
|
||||
|
||||
tab_mvf = ref->tab_mvf;
|
||||
colPic = ref->poc;
|
||||
@@ -417,10 +415,14 @@ static void derive_spatial_merge_candidates(HEVCContext *s, int x0, int y0,
|
||||
|
||||
if (available_l0 || available_l1) {
|
||||
mergecandlist[nb_merge_cand].pred_flag = available_l0 + (available_l1 << 1);
|
||||
AV_ZERO16(mergecandlist[nb_merge_cand].ref_idx);
|
||||
mergecandlist[nb_merge_cand].mv[0] = mv_l0_col;
|
||||
mergecandlist[nb_merge_cand].mv[1] = mv_l1_col;
|
||||
|
||||
if (available_l0) {
|
||||
mergecandlist[nb_merge_cand].mv[0] = mv_l0_col;
|
||||
mergecandlist[nb_merge_cand].ref_idx[0] = 0;
|
||||
}
|
||||
if (available_l1) {
|
||||
mergecandlist[nb_merge_cand].mv[1] = mv_l1_col;
|
||||
mergecandlist[nb_merge_cand].ref_idx[1] = 0;
|
||||
}
|
||||
if (merge_idx == nb_merge_cand)
|
||||
return;
|
||||
nb_merge_cand++;
|
||||
|
@@ -525,11 +525,7 @@ static void decode_vui(HEVCContext *s, HEVCSPS *sps)
|
||||
vui->field_seq_flag = get_bits1(gb);
|
||||
vui->frame_field_info_present_flag = get_bits1(gb);
|
||||
|
||||
if (get_bits_left(gb) >= 68 && show_bits_long(gb, 21) == 0x100000) {
|
||||
vui->default_display_window_flag = 0;
|
||||
av_log(s->avctx, AV_LOG_WARNING, "Invalid default display window\n");
|
||||
} else
|
||||
vui->default_display_window_flag = get_bits1(gb);
|
||||
vui->default_display_window_flag = get_bits1(gb);
|
||||
// Backup context in case an alternate header is detected
|
||||
memcpy(&backup, gb, sizeof(backup));
|
||||
|
||||
@@ -895,30 +891,11 @@ int ff_hevc_decode_nal_sps(HEVCContext *s)
|
||||
sps->log2_max_trafo_size = log2_diff_max_min_transform_block_size +
|
||||
sps->log2_min_tb_size;
|
||||
|
||||
if (sps->log2_min_cb_size < 3 || sps->log2_min_cb_size > 30) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid value %d for log2_min_cb_size", sps->log2_min_cb_size);
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (sps->log2_diff_max_min_coding_block_size > 30) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid value %d for log2_diff_max_min_coding_block_size", sps->log2_diff_max_min_coding_block_size);
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (sps->log2_min_tb_size >= sps->log2_min_cb_size || sps->log2_min_tb_size < 2) {
|
||||
if (sps->log2_min_tb_size >= sps->log2_min_cb_size) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid value for log2_min_tb_size");
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (log2_diff_max_min_transform_block_size < 0 || log2_diff_max_min_transform_block_size > 30) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid value %d for log2_diff_max_min_transform_block_size", log2_diff_max_min_transform_block_size);
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
goto err;
|
||||
}
|
||||
|
||||
sps->max_transform_hierarchy_depth_inter = get_ue_golomb_long(gb);
|
||||
sps->max_transform_hierarchy_depth_intra = get_ue_golomb_long(gb);
|
||||
|
||||
@@ -970,11 +947,6 @@ int ff_hevc_decode_nal_sps(HEVCContext *s)
|
||||
sps->long_term_ref_pics_present_flag = get_bits1(gb);
|
||||
if (sps->long_term_ref_pics_present_flag) {
|
||||
sps->num_long_term_ref_pics_sps = get_ue_golomb_long(gb);
|
||||
if (sps->num_long_term_ref_pics_sps > 31U) {
|
||||
av_log(0, AV_LOG_ERROR, "num_long_term_ref_pics_sps %d is out of range.\n",
|
||||
sps->num_long_term_ref_pics_sps);
|
||||
goto err;
|
||||
}
|
||||
for (i = 0; i < sps->num_long_term_ref_pics_sps; i++) {
|
||||
sps->lt_ref_pic_poc_lsb_sps[i] = get_bits(gb, sps->log2_max_poc_lsb);
|
||||
sps->used_by_curr_pic_lt_sps_flag[i] = get_bits1(gb);
|
||||
@@ -1040,8 +1012,7 @@ int ff_hevc_decode_nal_sps(HEVCContext *s)
|
||||
(sps->output_window.left_offset + sps->output_window.right_offset);
|
||||
sps->output_height = sps->height -
|
||||
(sps->output_window.top_offset + sps->output_window.bottom_offset);
|
||||
if (sps->width <= sps->output_window.left_offset + (int64_t)sps->output_window.right_offset ||
|
||||
sps->height <= sps->output_window.top_offset + (int64_t)sps->output_window.bottom_offset) {
|
||||
if (sps->output_width <= 0 || sps->output_height <= 0) {
|
||||
av_log(s->avctx, AV_LOG_WARNING, "Invalid visible frame dimensions: %dx%d.\n",
|
||||
sps->output_width, sps->output_height);
|
||||
if (s->avctx->err_recognition & AV_EF_EXPLODE) {
|
||||
@@ -1050,8 +1021,10 @@ int ff_hevc_decode_nal_sps(HEVCContext *s)
|
||||
}
|
||||
av_log(s->avctx, AV_LOG_WARNING,
|
||||
"Displaying the whole video surface.\n");
|
||||
memset(&sps->pic_conf_win, 0, sizeof(sps->pic_conf_win));
|
||||
memset(&sps->output_window, 0, sizeof(sps->output_window));
|
||||
sps->pic_conf_win.left_offset =
|
||||
sps->pic_conf_win.right_offset =
|
||||
sps->pic_conf_win.top_offset =
|
||||
sps->pic_conf_win.bottom_offset = 0;
|
||||
sps->output_width = sps->width;
|
||||
sps->output_height = sps->height;
|
||||
}
|
||||
@@ -1275,14 +1248,6 @@ int ff_hevc_decode_nal_pps(HEVCContext *s)
|
||||
if (pps->cu_qp_delta_enabled_flag)
|
||||
pps->diff_cu_qp_delta_depth = get_ue_golomb_long(gb);
|
||||
|
||||
if (pps->diff_cu_qp_delta_depth < 0 ||
|
||||
pps->diff_cu_qp_delta_depth > sps->log2_diff_max_min_coding_block_size) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "diff_cu_qp_delta_depth %d is invalid\n",
|
||||
pps->diff_cu_qp_delta_depth);
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
goto err;
|
||||
}
|
||||
|
||||
pps->cb_qp_offset = get_se_golomb(gb);
|
||||
if (pps->cb_qp_offset < -12 || pps->cb_qp_offset > 12) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "pps_cb_qp_offset out of range: %d\n",
|
||||
@@ -1406,8 +1371,7 @@ int ff_hevc_decode_nal_pps(HEVCContext *s)
|
||||
int pps_range_extensions_flag = get_bits1(gb);
|
||||
/* int pps_extension_7bits = */ get_bits(gb, 7);
|
||||
if (sps->ptl.general_ptl.profile_idc == FF_PROFILE_HEVC_REXT && pps_range_extensions_flag) {
|
||||
if ((ret = pps_range_extensions(s, pps, sps)) < 0)
|
||||
goto err;
|
||||
pps_range_extensions(s, pps, sps);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -22,7 +22,6 @@
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#include "libavutil/imgutils.h"
|
||||
#include "libavutil/internal.h"
|
||||
#include "libavutil/intreadwrite.h"
|
||||
#include "libavutil/mem.h"
|
||||
@@ -454,7 +453,6 @@ static int hnm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
static av_cold int hnm_decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
Hnm4VideoContext *hnm = avctx->priv_data;
|
||||
int ret;
|
||||
|
||||
if (avctx->extradata_size < 1) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
@@ -462,10 +460,6 @@ static av_cold int hnm_decode_init(AVCodecContext *avctx)
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
ret = av_image_check_size(avctx->width, avctx->height, 0, avctx);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
hnm->version = avctx->extradata[0];
|
||||
avctx->pix_fmt = AV_PIX_FMT_PAL8;
|
||||
hnm->width = avctx->width;
|
||||
|
@@ -887,14 +887,14 @@ static int imc_decode_block(AVCodecContext *avctx, IMCContext *q, int ch)
|
||||
|
||||
flag = get_bits1(&q->gb);
|
||||
if (stream_format_code & 0x1)
|
||||
imc_decode_level_coefficients_raw(q, chctx->levlCoeffBuf,
|
||||
chctx->flcoeffs1, chctx->flcoeffs2);
|
||||
else if (stream_format_code & 0x1)
|
||||
imc_read_level_coeffs_raw(q, stream_format_code, chctx->levlCoeffBuf);
|
||||
else
|
||||
imc_read_level_coeffs(q, stream_format_code, chctx->levlCoeffBuf);
|
||||
|
||||
if (stream_format_code & 0x1)
|
||||
imc_decode_level_coefficients_raw(q, chctx->levlCoeffBuf,
|
||||
chctx->flcoeffs1, chctx->flcoeffs2);
|
||||
else if (stream_format_code & 0x4)
|
||||
if (stream_format_code & 0x4)
|
||||
imc_decode_level_coefficients(q, chctx->levlCoeffBuf,
|
||||
chctx->flcoeffs1, chctx->flcoeffs2);
|
||||
else
|
||||
|
@@ -94,7 +94,7 @@ typedef struct Indeo3DecodeContext {
|
||||
|
||||
int16_t width, height;
|
||||
uint32_t frame_num; ///< current frame number (zero-based)
|
||||
int data_size; ///< size of the frame data in bytes
|
||||
uint32_t data_size; ///< size of the frame data in bytes
|
||||
uint16_t frame_flags; ///< frame properties
|
||||
uint8_t cb_offset; ///< needed for selecting VQ tables
|
||||
uint8_t buf_sel; ///< active frame buffer: 0 - primary, 1 -secondary
|
||||
@@ -899,8 +899,7 @@ static int decode_frame_headers(Indeo3DecodeContext *ctx, AVCodecContext *avctx,
|
||||
GetByteContext gb;
|
||||
const uint8_t *bs_hdr;
|
||||
uint32_t frame_num, word2, check_sum, data_size;
|
||||
int y_offset, u_offset, v_offset;
|
||||
uint32_t starts[3], ends[3];
|
||||
uint32_t y_offset, u_offset, v_offset, starts[3], ends[3];
|
||||
uint16_t height, width;
|
||||
int i, j;
|
||||
|
||||
@@ -982,8 +981,7 @@ static int decode_frame_headers(Indeo3DecodeContext *ctx, AVCodecContext *avctx,
|
||||
ctx->y_data_size = ends[0] - starts[0];
|
||||
ctx->v_data_size = ends[1] - starts[1];
|
||||
ctx->u_data_size = ends[2] - starts[2];
|
||||
if (FFMIN3(y_offset, v_offset, u_offset) < 0 ||
|
||||
FFMAX3(y_offset, v_offset, u_offset) >= ctx->data_size - 16 ||
|
||||
if (FFMAX3(y_offset, v_offset, u_offset) >= ctx->data_size - 16 ||
|
||||
FFMIN3(y_offset, v_offset, u_offset) < gb.buffer - bs_hdr + 16 ||
|
||||
FFMIN3(ctx->y_data_size, ctx->v_data_size, ctx->u_data_size) <= 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "One of the y/u/v offsets is invalid\n");
|
||||
|
@@ -35,8 +35,6 @@
|
||||
|
||||
#define FF_SANE_NB_CHANNELS 63U
|
||||
|
||||
#define FF_SIGNBIT(x) (x >> CHAR_BIT * sizeof(x) - 1)
|
||||
|
||||
#if HAVE_AVX
|
||||
# define STRIDE_ALIGN 32
|
||||
#elif HAVE_SIMD_ALIGN_16
|
||||
|
@@ -269,11 +269,6 @@ static inline void ls_decode_line(JLSState *state, MJpegDecodeContext *s,
|
||||
x += stride;
|
||||
}
|
||||
|
||||
if (x >= w) {
|
||||
av_log(NULL, AV_LOG_ERROR, "run overflow\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* decode run termination value */
|
||||
Rb = R(last, x);
|
||||
RItype = (FFABS(Ra - Rb) <= state->near) ? 1 : 0;
|
||||
|
@@ -43,13 +43,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
JvContext *s = avctx->priv_data;
|
||||
|
||||
if (!avctx->width || !avctx->height ||
|
||||
(avctx->width & 7) || (avctx->height & 7)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid video dimensions: %dx%d\n",
|
||||
avctx->width, avctx->height);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
s->frame = av_frame_alloc();
|
||||
if (!s->frame)
|
||||
return AVERROR(ENOMEM);
|
||||
|
@@ -96,7 +96,8 @@ static int ilbc_decode_frame(AVCodecContext *avctx, void *data,
|
||||
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
|
||||
return ret;
|
||||
|
||||
WebRtcIlbcfix_DecodeImpl((int16_t *) frame->data[0], (const uint16_t *) buf, &s->decoder, 1);
|
||||
WebRtcIlbcfix_DecodeImpl((WebRtc_Word16*) frame->data[0],
|
||||
(const WebRtc_UWord16*) buf, &s->decoder, 1);
|
||||
|
||||
*got_frame_ptr = 1;
|
||||
|
||||
@@ -169,7 +170,7 @@ static int ilbc_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
if ((ret = ff_alloc_packet2(avctx, avpkt, 50)) < 0)
|
||||
return ret;
|
||||
|
||||
WebRtcIlbcfix_EncodeImpl((uint16_t *) avpkt->data, (const int16_t *) frame->data[0], &s->encoder);
|
||||
WebRtcIlbcfix_EncodeImpl((WebRtc_UWord16*) avpkt->data, (const WebRtc_Word16*) frame->data[0], &s->encoder);
|
||||
|
||||
avpkt->size = s->encoder.no_of_bytes;
|
||||
*got_packet_ptr = 1;
|
||||
|
@@ -170,11 +170,10 @@ static av_cold int libopus_encode_init(AVCodecContext *avctx)
|
||||
|
||||
/* FIXME: Opus can handle up to 255 channels. However, the mapping for
|
||||
* anything greater than 8 is undefined. */
|
||||
if (avctx->channels > 8) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
if (avctx->channels > 8)
|
||||
av_log(avctx, AV_LOG_WARNING,
|
||||
"Channel layout undefined for %d channels.\n", avctx->channels);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
if (!avctx->bit_rate) {
|
||||
/* Sane default copied from opusenc */
|
||||
avctx->bit_rate = 64000 * opus->stream_count +
|
||||
|
@@ -152,8 +152,8 @@ static int twolame_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
if (ret < 0) // twolame error
|
||||
return AVERROR_UNKNOWN;
|
||||
|
||||
avpkt->duration = ff_samples_to_time_base(avctx, frame->nb_samples);
|
||||
if (frame) {
|
||||
avpkt->duration = ff_samples_to_time_base(avctx, frame->nb_samples);
|
||||
if (frame->pts != AV_NOPTS_VALUE)
|
||||
avpkt->pts = frame->pts - ff_samples_to_time_base(avctx, avctx->delay);
|
||||
} else {
|
||||
|
@@ -88,12 +88,7 @@ static inline int mdec_decode_block_intra(MDECContext *a, int16_t *block, int n)
|
||||
if (level == 127) {
|
||||
break;
|
||||
} else if (level != 0) {
|
||||
i += run;
|
||||
if (i > 63) {
|
||||
av_log(a->avctx, AV_LOG_ERROR,
|
||||
"ac-tex damaged at %d %d\n", a->mb_x, a->mb_y);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
i += run;
|
||||
j = scantable[i];
|
||||
level = (level * qscale * quant_matrix[j]) >> 3;
|
||||
level = (level ^ SHOW_SBITS(re, &a->gb, 1)) - SHOW_SBITS(re, &a->gb, 1);
|
||||
@@ -103,13 +98,8 @@ static inline int mdec_decode_block_intra(MDECContext *a, int16_t *block, int n)
|
||||
run = SHOW_UBITS(re, &a->gb, 6)+1; LAST_SKIP_BITS(re, &a->gb, 6);
|
||||
UPDATE_CACHE(re, &a->gb);
|
||||
level = SHOW_SBITS(re, &a->gb, 10); SKIP_BITS(re, &a->gb, 10);
|
||||
i += run;
|
||||
if (i > 63) {
|
||||
av_log(a->avctx, AV_LOG_ERROR,
|
||||
"ac-tex damaged at %d %d\n", a->mb_x, a->mb_y);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
j = scantable[i];
|
||||
i += run;
|
||||
j = scantable[i];
|
||||
if (level < 0) {
|
||||
level = -level;
|
||||
level = (level * qscale * quant_matrix[j]) >> 3;
|
||||
@@ -120,6 +110,10 @@ static inline int mdec_decode_block_intra(MDECContext *a, int16_t *block, int n)
|
||||
level = (level - 1) | 1;
|
||||
}
|
||||
}
|
||||
if (i > 63) {
|
||||
av_log(a->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", a->mb_x, a->mb_y);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
block[j] = level;
|
||||
}
|
||||
|
@@ -89,7 +89,7 @@ static void ff_acelp_interpolatef_mips(float *out, const float *in,
|
||||
"addu %[p_filter_coeffs_m], %[p_filter_coeffs_m], %[prec] \n\t"
|
||||
"madd.s %[v],%[v],%[in_val_m], %[fc_val_m] \n\t"
|
||||
|
||||
: [v] "+&f" (v),[p_in_p] "+r" (p_in_p), [p_in_m] "+r" (p_in_m),
|
||||
: [v] "=&f" (v),[p_in_p] "+r" (p_in_p), [p_in_m] "+r" (p_in_m),
|
||||
[p_filter_coeffs_p] "+r" (p_filter_coeffs_p),
|
||||
[in_val_p] "=&f" (in_val_p), [in_val_m] "=&f" (in_val_m),
|
||||
[fc_val_p] "=&f" (fc_val_p), [fc_val_m] "=&f" (fc_val_m),
|
||||
|
@@ -244,10 +244,9 @@ int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
|
||||
|
||||
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
||||
{
|
||||
int len, nb_components, i, width, height, bits, ret;
|
||||
unsigned pix_fmt_id;
|
||||
int h_count[MAX_COMPONENTS] = { 0 };
|
||||
int v_count[MAX_COMPONENTS] = { 0 };
|
||||
int len, nb_components, i, width, height, pix_fmt_id, ret;
|
||||
int h_count[MAX_COMPONENTS];
|
||||
int v_count[MAX_COMPONENTS];
|
||||
|
||||
s->cur_scan = 0;
|
||||
s->upscale_h = s->upscale_v = 0;
|
||||
@@ -255,11 +254,11 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
||||
/* XXX: verify len field validity */
|
||||
len = get_bits(&s->gb, 16);
|
||||
s->avctx->bits_per_raw_sample =
|
||||
bits = get_bits(&s->gb, 8);
|
||||
s->bits = get_bits(&s->gb, 8);
|
||||
|
||||
if (s->pegasus_rct)
|
||||
bits = 9;
|
||||
if (bits == 9 && !s->pegasus_rct)
|
||||
s->bits = 9;
|
||||
if (s->bits == 9 && !s->pegasus_rct)
|
||||
s->rct = 1; // FIXME ugly
|
||||
|
||||
if(s->lossless && s->avctx->lowres){
|
||||
@@ -292,7 +291,7 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
if (s->ls && !(bits <= 8 || nb_components == 1)) {
|
||||
if (s->ls && !(s->bits <= 8 || nb_components == 1)) {
|
||||
avpriv_report_missing_feature(s->avctx,
|
||||
"JPEG-LS that is not <= 8 "
|
||||
"bits/component or 16-bit gray");
|
||||
@@ -301,6 +300,8 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
||||
s->nb_components = nb_components;
|
||||
s->h_max = 1;
|
||||
s->v_max = 1;
|
||||
memset(h_count, 0, sizeof(h_count));
|
||||
memset(v_count, 0, sizeof(v_count));
|
||||
for (i = 0; i < nb_components; i++) {
|
||||
/* component id */
|
||||
s->component_id[i] = get_bits(&s->gb, 8) - 1;
|
||||
@@ -335,13 +336,12 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
||||
|
||||
|
||||
/* if different size, realloc/alloc picture */
|
||||
if (width != s->width || height != s->height || bits != s->bits ||
|
||||
memcmp(s->h_count, h_count, sizeof(h_count)) ||
|
||||
memcmp(s->v_count, v_count, sizeof(v_count))) {
|
||||
if ( width != s->width || height != s->height
|
||||
|| memcmp(s->h_count, h_count, sizeof(h_count))
|
||||
|| memcmp(s->v_count, v_count, sizeof(v_count))) {
|
||||
|
||||
s->width = width;
|
||||
s->height = height;
|
||||
s->bits = bits;
|
||||
memcpy(s->h_count, h_count, sizeof(h_count));
|
||||
memcpy(s->v_count, v_count, sizeof(v_count));
|
||||
s->interlaced = 0;
|
||||
@@ -376,7 +376,7 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
||||
else if (!s->lossless)
|
||||
s->rgb = 0;
|
||||
/* XXX: not complete test ! */
|
||||
pix_fmt_id = ((unsigned)s->h_count[0] << 28) | (s->v_count[0] << 24) |
|
||||
pix_fmt_id = (s->h_count[0] << 28) | (s->v_count[0] << 24) |
|
||||
(s->h_count[1] << 20) | (s->v_count[1] << 16) |
|
||||
(s->h_count[2] << 12) | (s->v_count[2] << 8) |
|
||||
(s->h_count[3] << 4) | s->v_count[3];
|
||||
@@ -512,8 +512,6 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
||||
else s->avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
|
||||
s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
|
||||
if (pix_fmt_id == 0x42111100) {
|
||||
if (s->bits > 8)
|
||||
goto unk_pixfmt;
|
||||
s->upscale_h = 6;
|
||||
s->chroma_height = (s->height + 1) / 2;
|
||||
}
|
||||
@@ -536,12 +534,9 @@ unk_pixfmt:
|
||||
}
|
||||
if (s->ls) {
|
||||
s->upscale_h = s->upscale_v = 0;
|
||||
if (s->nb_components == 3) {
|
||||
if (s->nb_components > 1)
|
||||
s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
|
||||
} else if (s->nb_components != 1) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Unsupported number of components %d\n", s->nb_components);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
} else if (s->palette_index && s->bits <= 8)
|
||||
else if (s->palette_index && s->bits <= 8)
|
||||
s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
|
||||
else if (s->bits <= 8)
|
||||
s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
|
||||
@@ -1226,18 +1221,13 @@ static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
|
||||
|
||||
if (s->interlaced && s->bottom_field)
|
||||
block_offset += linesize[c] >> 1;
|
||||
if ( 8*(h * mb_x + x) < s->width
|
||||
&& 8*(v * mb_y + y) < s->height) {
|
||||
ptr = data[c] + block_offset;
|
||||
} else
|
||||
ptr = NULL;
|
||||
ptr = data[c] + block_offset;
|
||||
if (!s->progressive) {
|
||||
if (copy_mb) {
|
||||
if (ptr)
|
||||
mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
|
||||
linesize[c], s->avctx->lowres);
|
||||
if (copy_mb)
|
||||
mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
|
||||
linesize[c], s->avctx->lowres);
|
||||
|
||||
} else {
|
||||
else {
|
||||
s->bdsp.clear_block(s->block);
|
||||
if (decode_block(s, s->block, i,
|
||||
s->dc_index[i], s->ac_index[i],
|
||||
@@ -1246,11 +1236,9 @@ static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
|
||||
"error y=%d x=%d\n", mb_y, mb_x);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (ptr) {
|
||||
s->idsp.idct_put(ptr, linesize[c], s->block);
|
||||
if (s->bits & 7)
|
||||
shift_output(s, ptr, linesize[c]);
|
||||
}
|
||||
s->idsp.idct_put(ptr, linesize[c], s->block);
|
||||
if (s->bits & 7)
|
||||
shift_output(s, ptr, linesize[c]);
|
||||
}
|
||||
} else {
|
||||
int block_idx = s->block_stride[c] * (v * mb_y + y) +
|
||||
@@ -1604,8 +1592,6 @@ static int mjpeg_decode_app(MJpegDecodeContext *s)
|
||||
}
|
||||
|
||||
if (id == AV_RB32("LJIF")) {
|
||||
int rgb = s->rgb;
|
||||
int pegasus_rct = s->pegasus_rct;
|
||||
if (s->avctx->debug & FF_DEBUG_PICT_INFO)
|
||||
av_log(s->avctx, AV_LOG_INFO,
|
||||
"Pegasus lossless jpeg header found\n");
|
||||
@@ -1615,27 +1601,17 @@ static int mjpeg_decode_app(MJpegDecodeContext *s)
|
||||
skip_bits(&s->gb, 16); /* unknown always 0? */
|
||||
switch (i=get_bits(&s->gb, 8)) {
|
||||
case 1:
|
||||
rgb = 1;
|
||||
pegasus_rct = 0;
|
||||
s->rgb = 1;
|
||||
s->pegasus_rct = 0;
|
||||
break;
|
||||
case 2:
|
||||
rgb = 1;
|
||||
pegasus_rct = 1;
|
||||
s->rgb = 1;
|
||||
s->pegasus_rct = 1;
|
||||
break;
|
||||
default:
|
||||
av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace %d\n", i);
|
||||
}
|
||||
|
||||
len -= 9;
|
||||
if (s->got_picture)
|
||||
if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) {
|
||||
av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
s->rgb = rgb;
|
||||
s->pegasus_rct = pegasus_rct;
|
||||
|
||||
goto out;
|
||||
}
|
||||
if (id == AV_RL32("colr") && len > 0) {
|
||||
@@ -1887,10 +1863,6 @@ int ff_mjpeg_find_marker(MJpegDecodeContext *s,
|
||||
put_bits(&pb, 8, x);
|
||||
if (x == 0xFF) {
|
||||
x = src[b++];
|
||||
if (x & 0x80) {
|
||||
av_log(s->avctx, AV_LOG_WARNING, "Invalid escape sequence\n");
|
||||
x &= 0x7f;
|
||||
}
|
||||
put_bits(&pb, 7, x);
|
||||
bit_count--;
|
||||
}
|
||||
|
@@ -241,7 +241,7 @@ AVCodec ff_amv_encoder = {
|
||||
.encode2 = amv_encode_picture,
|
||||
.close = ff_mpv_encode_end,
|
||||
.pix_fmts = (const enum AVPixelFormat[]){
|
||||
AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_NONE
|
||||
AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_NONE
|
||||
},
|
||||
};
|
||||
#endif
|
||||
|
@@ -61,13 +61,6 @@ static av_cold int mm_decode_init(AVCodecContext *avctx)
|
||||
|
||||
avctx->pix_fmt = AV_PIX_FMT_PAL8;
|
||||
|
||||
if (!avctx->width || !avctx->height ||
|
||||
(avctx->width & 1) || (avctx->height & 1)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid video dimensions: %dx%d\n",
|
||||
avctx->width, avctx->height);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
s->frame = av_frame_alloc();
|
||||
if (!s->frame)
|
||||
return AVERROR(ENOMEM);
|
||||
@@ -116,7 +109,7 @@ static int mm_decode_intra(MmContext * s, int half_horiz, int half_vert)
|
||||
|
||||
if (color) {
|
||||
memset(s->frame->data[0] + y*s->frame->linesize[0] + x, color, run_length);
|
||||
if (half_vert && y + half_vert < s->avctx->height)
|
||||
if (half_vert)
|
||||
memset(s->frame->data[0] + (y+1)*s->frame->linesize[0] + x, color, run_length);
|
||||
}
|
||||
x+= run_length;
|
||||
|
@@ -193,13 +193,7 @@ static av_always_inline int cmp_inline(MpegEncContext *s, const int x, const int
|
||||
int uvdxy; /* no, it might not be used uninitialized */
|
||||
if(dxy){
|
||||
if(qpel){
|
||||
if (h << size == 16) {
|
||||
c->qpel_put[size][dxy](c->temp, ref[0] + x + y*stride, stride); //FIXME prototype (add h)
|
||||
} else if (size == 0 && h == 8) {
|
||||
c->qpel_put[1][dxy](c->temp , ref[0] + x + y*stride , stride);
|
||||
c->qpel_put[1][dxy](c->temp + 8, ref[0] + x + y*stride + 8, stride);
|
||||
} else
|
||||
av_assert2(0);
|
||||
c->qpel_put[size][dxy](c->temp, ref[0] + x + y*stride, stride); //FIXME prototype (add h)
|
||||
if(chroma){
|
||||
int cx= hx/2;
|
||||
int cy= hy/2;
|
||||
|
@@ -1120,10 +1120,6 @@ static av_cold int mpeg_decode_init(AVCodecContext *avctx)
|
||||
MpegEncContext *s2 = &s->mpeg_enc_ctx;
|
||||
|
||||
ff_mpv_decode_defaults(s2);
|
||||
|
||||
if ( avctx->codec_tag != AV_RL32("VCR2")
|
||||
&& avctx->codec_tag != AV_RL32("BW10"))
|
||||
avctx->coded_width = avctx->coded_height = 0; // do not trust dimensions from input
|
||||
ff_mpv_decode_init(s2, avctx);
|
||||
|
||||
s->mpeg_enc_ctx.avctx = avctx;
|
||||
@@ -1213,16 +1209,6 @@ static const enum AVPixelFormat mpeg2_hwaccel_pixfmt_list_420[] = {
|
||||
AV_PIX_FMT_NONE
|
||||
};
|
||||
|
||||
static const enum AVPixelFormat mpeg12_pixfmt_list_422[] = {
|
||||
AV_PIX_FMT_YUV422P,
|
||||
AV_PIX_FMT_NONE
|
||||
};
|
||||
|
||||
static const enum AVPixelFormat mpeg12_pixfmt_list_444[] = {
|
||||
AV_PIX_FMT_YUV444P,
|
||||
AV_PIX_FMT_NONE
|
||||
};
|
||||
|
||||
static inline int uses_vdpau(AVCodecContext *avctx) {
|
||||
return avctx->pix_fmt == AV_PIX_FMT_VDPAU_MPEG1 || avctx->pix_fmt == AV_PIX_FMT_VDPAU_MPEG2;
|
||||
}
|
||||
@@ -1231,18 +1217,16 @@ static enum AVPixelFormat mpeg_get_pixelformat(AVCodecContext *avctx)
|
||||
{
|
||||
Mpeg1Context *s1 = avctx->priv_data;
|
||||
MpegEncContext *s = &s1->mpeg_enc_ctx;
|
||||
const enum AVPixelFormat *pix_fmts;
|
||||
|
||||
if (s->chroma_format < 2)
|
||||
pix_fmts = avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO ?
|
||||
return ff_thread_get_format(avctx,
|
||||
avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO ?
|
||||
mpeg1_hwaccel_pixfmt_list_420 :
|
||||
mpeg2_hwaccel_pixfmt_list_420;
|
||||
mpeg2_hwaccel_pixfmt_list_420);
|
||||
else if (s->chroma_format == 2)
|
||||
pix_fmts = mpeg12_pixfmt_list_422;
|
||||
return AV_PIX_FMT_YUV422P;
|
||||
else
|
||||
pix_fmts = mpeg12_pixfmt_list_444;
|
||||
|
||||
return ff_thread_get_format(avctx, pix_fmts);
|
||||
return AV_PIX_FMT_YUV444P;
|
||||
}
|
||||
|
||||
static void setup_hwaccel_for_pixfmt(AVCodecContext *avctx)
|
||||
|
@@ -82,15 +82,10 @@ int avpriv_mpeg4audio_get_config(MPEG4AudioConfig *c, const uint8_t *buf,
|
||||
int bit_size, int sync_extension)
|
||||
{
|
||||
GetBitContext gb;
|
||||
int specific_config_bitindex, ret;
|
||||
int specific_config_bitindex;
|
||||
|
||||
if (bit_size <= 0)
|
||||
if (bit_size <= 0 || init_get_bits(&gb, buf, bit_size) < 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
ret = init_get_bits(&gb, buf, bit_size);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
c->object_type = get_object_type(&gb);
|
||||
c->sample_rate = get_sample_rate(&gb, &c->sampling_index);
|
||||
c->chan_config = get_bits(&gb, 4);
|
||||
|
@@ -88,7 +88,7 @@ static int mpeg4_decode_header(AVCodecParserContext *s1, AVCodecContext *avctx,
|
||||
if (avctx->extradata_size && pc->first_picture) {
|
||||
init_get_bits(gb, avctx->extradata, avctx->extradata_size * 8);
|
||||
ret = ff_mpeg4_decode_picture_header(dec_ctx, gb);
|
||||
if (ret < -1)
|
||||
if (ret < 0)
|
||||
av_log(avctx, AV_LOG_WARNING, "Failed to parse extradata\n");
|
||||
}
|
||||
|
||||
|
@@ -2793,7 +2793,7 @@ AVCodec ff_mpeg4_vdpau_decoder = {
|
||||
.long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 (VDPAU)"),
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.id = AV_CODEC_ID_MPEG4,
|
||||
.priv_data_size = sizeof(Mpeg4DecContext),
|
||||
.priv_data_size = sizeof(MpegEncContext),
|
||||
.init = decode_init,
|
||||
.close = ff_h263_decode_end,
|
||||
.decode = ff_h263_decode_frame,
|
||||
|
@@ -73,21 +73,20 @@ static int mpegaudio_parse(AVCodecParserContext *s1,
|
||||
if (i > 4)
|
||||
s->header_count = -2;
|
||||
} else {
|
||||
int header_threshold = avctx->codec_id != AV_CODEC_ID_NONE && avctx->codec_id != codec_id;
|
||||
if((state&SAME_HEADER_MASK) != (s->header&SAME_HEADER_MASK) && s->header)
|
||||
s->header_count= -3;
|
||||
s->header= state;
|
||||
s->header_count++;
|
||||
s->frame_size = ret-4;
|
||||
|
||||
if (s->header_count > header_threshold) {
|
||||
if (s->header_count > 0 + (avctx->codec_id != AV_CODEC_ID_NONE && avctx->codec_id != codec_id)) {
|
||||
avctx->sample_rate= sr;
|
||||
avctx->channels = channels;
|
||||
s1->duration = frame_size;
|
||||
avctx->codec_id = codec_id;
|
||||
if (s->no_bitrate || !avctx->bit_rate) {
|
||||
s->no_bitrate = 1;
|
||||
avctx->bit_rate += (bit_rate - avctx->bit_rate) / (s->header_count - header_threshold);
|
||||
avctx->bit_rate += (bit_rate - avctx->bit_rate) / s->header_count;
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
@@ -851,7 +851,7 @@ extern const uint8_t ff_aic_dc_scale_table[32];
|
||||
extern const uint8_t ff_h263_chroma_qscale_table[32];
|
||||
|
||||
/* rv10.c */
|
||||
int ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number);
|
||||
void ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number);
|
||||
int ff_rv_decode_dc(MpegEncContext *s, int n);
|
||||
void ff_rv20_encode_picture_header(MpegEncContext *s, int picture_number);
|
||||
|
||||
|
@@ -389,18 +389,18 @@ av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
|
||||
switch(avctx->codec_id) {
|
||||
case AV_CODEC_ID_MPEG1VIDEO:
|
||||
case AV_CODEC_ID_MPEG2VIDEO:
|
||||
avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
|
||||
avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112L / 15000000 * 16384;
|
||||
break;
|
||||
case AV_CODEC_ID_MPEG4:
|
||||
case AV_CODEC_ID_MSMPEG4V1:
|
||||
case AV_CODEC_ID_MSMPEG4V2:
|
||||
case AV_CODEC_ID_MSMPEG4V3:
|
||||
if (avctx->rc_max_rate >= 15000000) {
|
||||
avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
|
||||
avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000L) * (760-320) / (38400000 - 15000000);
|
||||
} else if(avctx->rc_max_rate >= 2000000) {
|
||||
avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
|
||||
avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000L) * (320- 80) / (15000000 - 2000000);
|
||||
} else if(avctx->rc_max_rate >= 384000) {
|
||||
avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
|
||||
avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000L) * ( 80- 40) / ( 2000000 - 384000);
|
||||
} else
|
||||
avctx->rc_buffer_size = 40;
|
||||
avctx->rc_buffer_size *= 16384;
|
||||
@@ -3657,11 +3657,8 @@ static int encode_picture(MpegEncContext *s, int picture_number)
|
||||
ff_msmpeg4_encode_picture_header(s, picture_number);
|
||||
else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
|
||||
ff_mpeg4_encode_picture_header(s, picture_number);
|
||||
else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
|
||||
ret = ff_rv10_encode_picture_header(s, picture_number);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10)
|
||||
ff_rv10_encode_picture_header(s, picture_number);
|
||||
else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
|
||||
ff_rv20_encode_picture_header(s, picture_number);
|
||||
else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
|
||||
|
@@ -178,7 +178,7 @@ static void gmc_motion(MpegEncContext *s,
|
||||
s->sprite_delta[0][0], s->sprite_delta[0][1],
|
||||
s->sprite_delta[1][0], s->sprite_delta[1][1],
|
||||
a + 1, (1 << (2 * a + 1)) - s->no_rounding,
|
||||
(s->h_edge_pos + 1) >> 1, (s->v_edge_pos + 1) >> 1);
|
||||
s->h_edge_pos >> 1, s->v_edge_pos >> 1);
|
||||
|
||||
ptr = ref_picture[2];
|
||||
s->mdsp.gmc(dest_cr, ptr, uvlinesize, 8,
|
||||
@@ -186,7 +186,7 @@ static void gmc_motion(MpegEncContext *s,
|
||||
s->sprite_delta[0][0], s->sprite_delta[0][1],
|
||||
s->sprite_delta[1][0], s->sprite_delta[1][1],
|
||||
a + 1, (1 << (2 * a + 1)) - s->no_rounding,
|
||||
(s->h_edge_pos + 1) >> 1, (s->v_edge_pos + 1) >> 1);
|
||||
s->h_edge_pos >> 1, s->v_edge_pos >> 1);
|
||||
}
|
||||
|
||||
static inline int hpel_motion(MpegEncContext *s,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user