Compare commits
214 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
d0599a3516 | ||
![]() |
742d7e9a6e | ||
![]() |
eb6f2a183a | ||
![]() |
1e86b7108e | ||
![]() |
4be1cc7b1d | ||
![]() |
61dbd3f3d0 | ||
![]() |
38d6ff31b7 | ||
![]() |
b0cd6fb590 | ||
![]() |
50fd06ea32 | ||
![]() |
808d5444c4 | ||
![]() |
6915dd49c7 | ||
![]() |
c657b08fd7 | ||
![]() |
6d14bea8b5 | ||
![]() |
749cd89ca9 | ||
![]() |
2b408d257f | ||
![]() |
8d853dc341 | ||
![]() |
86960b1101 | ||
![]() |
94354e368d | ||
![]() |
93a0682b1d | ||
![]() |
0e16c3843a | ||
![]() |
819955f0c6 | ||
![]() |
b36bda3c82 | ||
![]() |
25b8d52fdd | ||
![]() |
1a2aaa7497 | ||
![]() |
f18fc45d18 | ||
![]() |
2684ff3573 | ||
![]() |
ea0f616a57 | ||
![]() |
51b911e948 | ||
![]() |
bb508ddb8b | ||
![]() |
9246eb1ec5 | ||
![]() |
c5b2ef3bdf | ||
![]() |
07df052d8d | ||
![]() |
9bb7e2bd90 | ||
![]() |
90fa2460c0 | ||
![]() |
50f5037947 | ||
![]() |
21533730fc | ||
![]() |
032476f830 | ||
![]() |
57c7922331 | ||
![]() |
73dd8f0a24 | ||
![]() |
55637b2e5e | ||
![]() |
e6bc1fe10c | ||
![]() |
e6b18f5700 | ||
![]() |
3791436eb5 | ||
![]() |
61147f58ab | ||
![]() |
29e435ca33 | ||
![]() |
0540d5c5fc | ||
![]() |
7f97231d97 | ||
![]() |
4005a71def | ||
![]() |
429347afa7 | ||
![]() |
a81b6a662a | ||
![]() |
6168fe32f1 | ||
![]() |
711374b626 | ||
![]() |
9a63a36dc6 | ||
![]() |
db1a99a209 | ||
![]() |
fe8c81a0f3 | ||
![]() |
728051d9b1 | ||
![]() |
99d58a0da4 | ||
![]() |
9783f9fb98 | ||
![]() |
2ed0a77b7b | ||
![]() |
804e1e1610 | ||
![]() |
99d2d1404c | ||
![]() |
cb1c9294f3 | ||
![]() |
84341627d7 | ||
![]() |
4f694182e0 | ||
![]() |
a2cfb784fb | ||
![]() |
6faf18acbd | ||
![]() |
96807933d8 | ||
![]() |
4ef32aa2a6 | ||
![]() |
727730e279 | ||
![]() |
5a829ee69e | ||
![]() |
7fe22c3fe6 | ||
![]() |
c7565b143c | ||
![]() |
303ecfc373 | ||
![]() |
1456ed2dd5 | ||
![]() |
79c9d9b134 | ||
![]() |
d0aa3d13fa | ||
![]() |
e5cc73e0a5 | ||
![]() |
3429714f3d | ||
![]() |
ee902d3d2d | ||
![]() |
9bc62da980 | ||
![]() |
2a6d16ba5f | ||
![]() |
c7e967a7cb | ||
![]() |
5262c88bb0 | ||
![]() |
bd78b9416d | ||
![]() |
48ae72e501 | ||
![]() |
0f671dfeac | ||
![]() |
345962121d | ||
![]() |
0cbf53bdf5 | ||
![]() |
b20409c690 | ||
![]() |
7997ec54c9 | ||
![]() |
e805826903 | ||
![]() |
137a000377 | ||
![]() |
78c314e39e | ||
![]() |
756d85dc14 | ||
![]() |
eeab3e1b20 | ||
![]() |
a75787a71a | ||
![]() |
09425294c9 | ||
![]() |
3572eaaf02 | ||
![]() |
25da8d84a4 | ||
![]() |
0f642909d8 | ||
![]() |
6252e9141b | ||
![]() |
c65a731b6f | ||
![]() |
58096b70fa | ||
![]() |
0ae93844d0 | ||
![]() |
657dc91b44 | ||
![]() |
352d17086f | ||
![]() |
193440f566 | ||
![]() |
563e542b31 | ||
![]() |
b0b6d8de7e | ||
![]() |
2f5c5767d1 | ||
![]() |
979a54ed18 | ||
![]() |
d11bca8043 | ||
![]() |
a3ef410b9c | ||
![]() |
260e5c6dbe | ||
![]() |
bac6554c74 | ||
![]() |
1e366c15ed | ||
![]() |
675fb3a8af | ||
![]() |
25fc0faccb | ||
![]() |
164083434e | ||
![]() |
8026606497 | ||
![]() |
6ac8ac0109 | ||
![]() |
f07e2ff697 | ||
![]() |
b62b3e1a25 | ||
![]() |
a45b8af839 | ||
![]() |
a443b48ccf | ||
![]() |
a45402d4c0 | ||
![]() |
0bdc64e8b9 | ||
![]() |
5fbf63ea39 | ||
![]() |
4d74bb24e3 | ||
![]() |
7f8d0cf93a | ||
![]() |
28fba55306 | ||
![]() |
7caee17204 | ||
![]() |
edec2a4da3 | ||
![]() |
65074a5dae | ||
![]() |
e3b6144e0c | ||
![]() |
8acbba0ec3 | ||
![]() |
9e9cde6afa | ||
![]() |
4f8814964c | ||
![]() |
8a6770a214 | ||
![]() |
763e6ecf83 | ||
![]() |
07d508e4f5 | ||
![]() |
b089b58250 | ||
![]() |
a7dd37169c | ||
![]() |
3032291b3a | ||
![]() |
f9bbc26e69 | ||
![]() |
b895e29941 | ||
![]() |
3d71024f8a | ||
![]() |
b834dc14da | ||
![]() |
d0041dc8c4 | ||
![]() |
b3c082412c | ||
![]() |
918ed73b70 | ||
![]() |
2791eba1d7 | ||
![]() |
0d3a07852c | ||
![]() |
c7a2ac6b6b | ||
![]() |
0ce35b8ce8 | ||
![]() |
25312a427b | ||
![]() |
9143ab0e5a | ||
![]() |
022bfd3dd4 | ||
![]() |
e0a12b3dc3 | ||
![]() |
252ba4a925 | ||
![]() |
5bb31e856d | ||
![]() |
eac0451e47 | ||
![]() |
68c6347089 | ||
![]() |
bcd7f35717 | ||
![]() |
93cbdcd4d3 | ||
![]() |
959ab06c68 | ||
![]() |
46db3121c6 | ||
![]() |
c96c755320 | ||
![]() |
9f8cdd520b | ||
![]() |
f5631d23e0 | ||
![]() |
50f4543c6b | ||
![]() |
1344e91f33 | ||
![]() |
f13e6ec7a6 | ||
![]() |
bf2c9e1ad4 | ||
![]() |
0663aab1d9 | ||
![]() |
e911f125fc | ||
![]() |
5aead5ee05 | ||
![]() |
3a5b749d7c | ||
![]() |
4b4d0b0290 | ||
![]() |
c9b25252cb | ||
![]() |
4400385d5f | ||
![]() |
d85e25fe0b | ||
![]() |
3d1972d182 | ||
![]() |
71b1abe638 | ||
![]() |
991ef3a67e | ||
![]() |
b850b01533 | ||
![]() |
23a17b4a3d | ||
![]() |
3a8ad4b878 | ||
![]() |
b85a939633 | ||
![]() |
7e130ca5b4 | ||
![]() |
f295f9488a | ||
![]() |
2c01dd2ea5 | ||
![]() |
5e26152ee4 | ||
![]() |
135c733125 | ||
![]() |
3b3a3c3d44 | ||
![]() |
13244abcd3 | ||
![]() |
9783d5bfda | ||
![]() |
0e4efad93c | ||
![]() |
0103bc67fd | ||
![]() |
d5af3fb1c5 | ||
![]() |
6ec5a199ea | ||
![]() |
f7b5366657 | ||
![]() |
542332e523 | ||
![]() |
aa24dd487f | ||
![]() |
3bcd1daad7 | ||
![]() |
91f88eab32 | ||
![]() |
aac467ae17 | ||
![]() |
968e733b2e | ||
![]() |
192d46e6d1 | ||
![]() |
bb8614cb7b | ||
![]() |
d9bdf7d9ae | ||
![]() |
a588e1e560 | ||
![]() |
36e7385d0e | ||
![]() |
5108323aa9 |
156
Changelog
156
Changelog
@ -1,7 +1,159 @@
|
|||||||
Entries are sorted chronologically from oldest to youngest within each release,
|
Entries are sorted chronologically from oldest to youngest within each release,
|
||||||
releases are sorted from youngest to oldest.
|
releases are sorted from youngest to oldest.
|
||||||
|
|
||||||
version <next>:
|
version 2.5.5:
|
||||||
|
- vp9: make above buffer pointer 32-byte aligned.
|
||||||
|
- avcodec/dnxhddec: Check that the frame is interlaced before using cur_field
|
||||||
|
- avformat/mov: Disallow ".." in dref unless use_absolute_path is set
|
||||||
|
- avformat/mov: Check for string truncation in mov_open_dref()
|
||||||
|
- avformat/mov: Use sizeof(filename) instead of a literal number
|
||||||
|
- eac3dec: fix scaling
|
||||||
|
- ac3_fixed: fix computation of spx_noise_blend
|
||||||
|
- ac3_fixed: fix out-of-bound read
|
||||||
|
- ac3dec_fixed: always use the USE_FIXED=1 variant of the AC3DecodeContext
|
||||||
|
- avcodec/012v: redesign main loop
|
||||||
|
- avcodec/012v: Check dimensions more completely
|
||||||
|
- asfenc: fix leaking asf->index_ptr on error
|
||||||
|
- avcodec/options_table: remove extradata_size from the AVOptions table
|
||||||
|
- ffmdec: limit the backward seek to the last resync position
|
||||||
|
- ffmdec: make sure the time base is valid
|
||||||
|
- ffmdec: fix infinite loop at EOF
|
||||||
|
- ffmdec: initialize f_cprv, f_stvi and f_stau
|
||||||
|
- avformat/rm: limit packet size
|
||||||
|
- avcodec/webp: validate the distance prefix code
|
||||||
|
- avcodec/rv10: check size of s->mb_width * s->mb_height
|
||||||
|
- eamad: check for out of bounds read
|
||||||
|
- mdec: check for out of bounds read
|
||||||
|
- arm: Suppress tags about used cpu arch and extensions
|
||||||
|
- aic: Fix decoding files with odd dimensions
|
||||||
|
- avcodec/tiff: move bpp check to after "end:"
|
||||||
|
- mxfdec: Fix the error handling for when strftime fails
|
||||||
|
- avcodec/opusdec: Fix delayed sample value
|
||||||
|
- avcodec/opusdec: Clear out pointers per packet
|
||||||
|
- avcodec/utils: Align YUV411 by as much as the other YUV variants
|
||||||
|
- vp9: fix segmentation map retention with threading enabled.
|
||||||
|
- webp: ensure that each transform is only used once
|
||||||
|
- doc/protocols/tcp: fix units of listen_timeout option value, from microseconds to milliseconds
|
||||||
|
- fix VP9 packet decoder returning 0 instead of the used data size
|
||||||
|
- avformat/flvenc: check that the codec_tag fits in the available bits
|
||||||
|
- avcodec/utils: use correct printf specifier in ff_set_sar
|
||||||
|
- avutil/imgutils: correctly check for negative SAR components
|
||||||
|
- swscale/utils: clear formatConvBuffer on allocation
|
||||||
|
- avformat/bit: only accept the g729 codec and 1 channel
|
||||||
|
- avformat/bit: check that pkt->size is 10 in write_packet
|
||||||
|
- avformat/adxdec: check avctx->channels for invalid values
|
||||||
|
- avformat/adxdec: set avctx->channels in adx_read_header
|
||||||
|
- Fix buffer_size argument to init_put_bits() in multiple encoders.
|
||||||
|
- mips/acelp_filters: fix incorrect register constraint
|
||||||
|
- avcodec/hevc_ps: Sanity checks for some log2_* values
|
||||||
|
- avcodec/zmbv: Check len before reading in decode_frame()
|
||||||
|
- avcodec/h264: Only reinit quant tables if a new PPS is allowed
|
||||||
|
- avcodec/snowdec: Fix ref value check
|
||||||
|
- swscale/utils: More carefully merge and clear coefficients outside the input
|
||||||
|
- avcodec/a64multienc: Assert that the Packet size does not grow
|
||||||
|
- avcodec/a64multienc: simplify frame handling code
|
||||||
|
- avcodec/a64multienc: fix use of uninitialized values in to_meta_with_crop
|
||||||
|
- avcodec/a64multienc: initialize mc_meta_charset to zero
|
||||||
|
- avcodec/a64multienc: don't set incorrect packet size
|
||||||
|
- avcodec/a64multienc: use av_frame_ref instead of copying the frame
|
||||||
|
- avcodec/x86/mlpdsp_init: Simplify mlp_filter_channel_x86()
|
||||||
|
- h264: initialize H264Context.avctx in init_thread_copy
|
||||||
|
- wtvdec: fix integer overflow resulting in errors with large files
|
||||||
|
- avcodec/gif: fix off by one in column offsetting finding
|
||||||
|
|
||||||
|
|
||||||
|
version 2.5.4:
|
||||||
|
- avcodec/arm/videodsp_armv5te: Fix linking failure with shared libs
|
||||||
|
- avcodec/mjpegdec: Skip blocks which are outside the visible area
|
||||||
|
- avcodec/h264_slice: ignore SAR changes in slices after the first
|
||||||
|
- avcodec/h264_slice: Check picture structure before setting the related fields
|
||||||
|
- avcodec/h264_slice: Do not change frame_num after the first slice
|
||||||
|
- avutil/opt: Fix type used to access AV_OPT_TYPE_SAMPLE_FMT
|
||||||
|
- avutil/opt: Fix types used to access AV_OPT_TYPE_PIXEL_FMT
|
||||||
|
- avcodec/h264: Be more strict on rejecting pps/sps changes
|
||||||
|
- avcodec/h264: Be more strict on rejecting pps_id changes
|
||||||
|
- avcodec/h264_ps: More completely check the bit depths
|
||||||
|
- avformat/thp: Check av_get_packet() for failure not only for partial output
|
||||||
|
- swscale/utils: Limit filter shifting so as not to read from prior the array
|
||||||
|
- avcodec/mpegvideo_motion: Fix gmc chroma dimensions
|
||||||
|
- avcodec/mjpegdec: Check number of components for JPEG-LS
|
||||||
|
- avcodec/mjpegdec: Check escape sequence validity
|
||||||
|
- avformat/mpc8: Use uint64_t in *_get_v() to avoid undefined behavior
|
||||||
|
- avformat/mpc8: fix broken pointer math
|
||||||
|
- avformat/mpc8: fix hang with fuzzed file
|
||||||
|
- avformat/tta: fix crash with corrupted files
|
||||||
|
- avcodec/ppc/idctdsp.c: POWER LE support in idct_add_altivec()
|
||||||
|
- swscale/input: fix rgba64 alpha non native
|
||||||
|
- swscale/input: Fix alpha of YA16 input
|
||||||
|
- libavcodec/ppc/mpegvideoencdsp.c: fix stack smashing in pix_norm1_altivec() and pix_sum_altivec()
|
||||||
|
- avformat/rmdec: Check for overflow in ff_rm_read_mdpr_codecdata()
|
||||||
|
- avformat/mpeg: do not count PES packets inside PES packets during probing
|
||||||
|
- hevc: always clip luma_log2_weight_denom
|
||||||
|
- rtpdec_h263_rfc2190: Clear the stored bits if discarding buffered data
|
||||||
|
- aacenc: correctly check returned value
|
||||||
|
- swscale: check memory allocations
|
||||||
|
- opt: check memory allocation
|
||||||
|
- avformat/utils: check for malloc failure
|
||||||
|
- avcodec/flac_parser: fix handling EOF if no headers are found
|
||||||
|
- avfilter/vf_framepack: Check and update frame_rate
|
||||||
|
- vp8: improve memory allocation checks
|
||||||
|
- configure: enable vsx together with altivec for ppc64el
|
||||||
|
- avcodec/hevc: Fix handling of skipped_bytes() reallocation failures
|
||||||
|
- qpeg: avoid pointless invalid memcpy()
|
||||||
|
|
||||||
|
|
||||||
|
version 2.5.3:
|
||||||
|
- vp9: fix parser return values in error case
|
||||||
|
- ffmpeg: Clear error message array at init.
|
||||||
|
- avcodec/dvdsubdec: fix accessing dangling pointers
|
||||||
|
- avcodec/dvdsubdec: error on bitmaps with size 0
|
||||||
|
- cmdutils: Use 64bit for file size/offset related variable in cmdutils_read_file()
|
||||||
|
- mov: Fix negative size calculation in mov_read_default().
|
||||||
|
- avformat/mov: fix integer overflow in mov_read_udta_string()
|
||||||
|
- mov: Fix overflow and error handling in read_tfra().
|
||||||
|
- mov: Avoid overflow with mov_metadata_raw()
|
||||||
|
- avcodec/dvdsubdec: fix out of bounds accesses
|
||||||
|
- avfilter/vf_sab: fix filtering tiny images
|
||||||
|
- avformat/flvdec: Increase string array size
|
||||||
|
- avformat/flvdec: do not inject dts=0 metadata packets which failed to be parsed into a new data stream
|
||||||
|
- avformat/cdxl: Fix integer overflow of image_size
|
||||||
|
- libavformat: Build hevc.o when building the RTP muxer
|
||||||
|
|
||||||
|
version 2.5.2:
|
||||||
|
- avcodec/indeo3: ensure offsets are non negative
|
||||||
|
- avcodec/h264: Check *log2_weight_denom
|
||||||
|
- avcodec/hevc_ps: Check diff_cu_qp_delta_depth
|
||||||
|
- avcodec/h264: Clear delayed_pic on deallocation
|
||||||
|
- avcodec/hevc: clear filter_slice_edges() on allocation
|
||||||
|
- avcodec/dcadec: Check that the added xch channel isnt already there
|
||||||
|
- avcodec/indeo3: use signed variables to avoid underflow
|
||||||
|
- swscale: increase yuv2rgb table headroom
|
||||||
|
- avformat/mov: fix integer overflow of size
|
||||||
|
- avformat/mov: check atom nesting depth
|
||||||
|
- avcodec/utvideodec: Fix handling of slice_height=0
|
||||||
|
- avcodec/xface: correct the XFACE_MAX_* values
|
||||||
|
- avcodec/vmdvideo: Check len before using it in method 3
|
||||||
|
- configure: create the tests directory like the doc directory
|
||||||
|
- mmvideo: check frame dimensions
|
||||||
|
- jvdec: check frame dimensions
|
||||||
|
|
||||||
|
version 2.5.1:
|
||||||
|
- lavu/frame: fix malloc error path in av_frame_copy_props()
|
||||||
|
- avformat/aviobuf: Check that avio_seek() target is non negative
|
||||||
|
- swresample/soxr_resample: fix error handling
|
||||||
|
- avformat/flvdec: fix potential use of uninitialized variables
|
||||||
|
- avformat/crypto: fix key vs iv typo
|
||||||
|
- configure: use use_pkg_config() instead of check_pkg_config() for libsmbclient
|
||||||
|
- avcodec/ppc/vp3dsp_altivec: POWER LE support to vp3_idct_add_altivec()
|
||||||
|
- avformat/matroskadec: fix handling of recursive SeekHead elements
|
||||||
|
- doc/examples/filtering_video: fix frame rate
|
||||||
|
- avcodec/mpegaudiodec_template: only allocate fdsp when its used
|
||||||
|
- doc/examples/transcoding: check encoder before using it
|
||||||
|
- update MAINTAINERS file
|
||||||
|
- POWER LE support in put_vp8_epel_h_altivec_core() put_vp8_epel_v_altivec_core() put_vp8_pixels16_altivec()
|
||||||
|
- POWER LE support in vc1_inv_trans_8x4_altivec()
|
||||||
|
|
||||||
|
version 2.5:
|
||||||
- HEVC/H.265 RTP payload format (draft v6) packetizer
|
- HEVC/H.265 RTP payload format (draft v6) packetizer
|
||||||
- SUP/PGS subtitle demuxer
|
- SUP/PGS subtitle demuxer
|
||||||
- ffprobe -show_pixel_formats option
|
- ffprobe -show_pixel_formats option
|
||||||
@ -16,7 +168,7 @@ version <next>:
|
|||||||
- creating DASH compatible fragmented MP4, MPEG-DASH segmenting muxer
|
- creating DASH compatible fragmented MP4, MPEG-DASH segmenting muxer
|
||||||
- WebP muxer with animated WebP support
|
- WebP muxer with animated WebP support
|
||||||
- zygoaudio decoding support
|
- zygoaudio decoding support
|
||||||
- APNG demuxer
|
- APNG decoder and demuxer
|
||||||
- postproc visualization support
|
- postproc visualization support
|
||||||
|
|
||||||
|
|
||||||
|
@ -537,6 +537,7 @@ x86 Michael Niedermayer
|
|||||||
Releases
|
Releases
|
||||||
========
|
========
|
||||||
|
|
||||||
|
2.5 Michael Niedermayer
|
||||||
2.4 Michael Niedermayer
|
2.4 Michael Niedermayer
|
||||||
2.2 Michael Niedermayer
|
2.2 Michael Niedermayer
|
||||||
1.2 Michael Niedermayer
|
1.2 Michael Niedermayer
|
||||||
|
2
Makefile
2
Makefile
@ -112,7 +112,7 @@ endef
|
|||||||
|
|
||||||
$(foreach P,$(PROGS),$(eval $(call DOPROG,$(P:$(PROGSSUF)$(EXESUF)=))))
|
$(foreach P,$(PROGS),$(eval $(call DOPROG,$(P:$(PROGSSUF)$(EXESUF)=))))
|
||||||
|
|
||||||
ffprobe.o cmdutils.o : libavutil/ffversion.h
|
ffprobe.o cmdutils.o libavcodec/utils.o libavformat/utils.o libavdevice/avdevice.o libavfilter/avfilter.o libavutil/utils.o libpostproc/postprocess.o libswresample/swresample.o libswscale/utils.o : libavutil/ffversion.h
|
||||||
|
|
||||||
$(PROGS): %$(PROGSSUF)$(EXESUF): %$(PROGSSUF)_g$(EXESUF)
|
$(PROGS): %$(PROGSSUF)$(EXESUF): %$(PROGSSUF)_g$(EXESUF)
|
||||||
$(CP) $< $@
|
$(CP) $< $@
|
||||||
|
@ -2,9 +2,13 @@
|
|||||||
│ RELEASE NOTES for FFmpeg 2.5 "Bohr" │
|
│ RELEASE NOTES for FFmpeg 2.5 "Bohr" │
|
||||||
└────────────────────────────────────────┘
|
└────────────────────────────────────────┘
|
||||||
|
|
||||||
The FFmpeg Project proudly presents FFmpeg 2.5 "Bohr", just 2.5 months
|
The FFmpeg Project proudly presents FFmpeg 2.5 "Bohr", 2.5 months after the
|
||||||
after the release of 2.4. Since this wasn't a long time ago, the Changelog
|
release of 2.4.
|
||||||
is a bit short this time.
|
|
||||||
|
The most important new features are AVFoundation screen-grabbing support,
|
||||||
|
animated WebP decoding support, and Animated PNG support. In addition, many
|
||||||
|
exciting features for video streaming are also implemented, including MPEG-
|
||||||
|
DASH fragmenting muxer, HEVC RTP payload muxer, and UDP Lite support.
|
||||||
|
|
||||||
As usual, if you have any question on this release or any FFmpeg related
|
As usual, if you have any question on this release or any FFmpeg related
|
||||||
topic, feel free to join us on the #ffmpeg IRC channel (on
|
topic, feel free to join us on the #ffmpeg IRC channel (on
|
||||||
@ -56,6 +60,7 @@
|
|||||||
• libutvideo YUV 4:2:2 10bit support
|
• libutvideo YUV 4:2:2 10bit support
|
||||||
• animated WebP decoding support
|
• animated WebP decoding support
|
||||||
• zygoaudio decoding support
|
• zygoaudio decoding support
|
||||||
|
• APNG decoder
|
||||||
|
|
||||||
┌────────────────────────────┐
|
┌────────────────────────────┐
|
||||||
│ libavdevice │
|
│ libavdevice │
|
||||||
@ -72,7 +77,8 @@
|
|||||||
• SUP/PGS subtitle demuxer
|
• SUP/PGS subtitle demuxer
|
||||||
• STL subtitle demuxer
|
• STL subtitle demuxer
|
||||||
• UDP-Lite support (RFC 3828)
|
• UDP-Lite support (RFC 3828)
|
||||||
• creating DASH compatible fragmented MP4, MPEG-DASH segmenting muxer
|
• MPEG-DASH segmenting muxer, which allows creating DASH compatible
|
||||||
|
fragmented MP4
|
||||||
• WebP muxer
|
• WebP muxer
|
||||||
• APNG demuxer
|
• APNG demuxer
|
||||||
|
|
||||||
@ -93,7 +99,3 @@
|
|||||||
└────────────────────────────┘
|
└────────────────────────────┘
|
||||||
|
|
||||||
• visualization support
|
• visualization support
|
||||||
|
|
||||||
┌────────────────────────────┐
|
|
||||||
│ ⚠ Behaviour changes │
|
|
||||||
└────────────────────────────┘
|
|
||||||
|
@ -1860,7 +1860,7 @@ int read_yesno(void)
|
|||||||
|
|
||||||
int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
|
int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
|
||||||
{
|
{
|
||||||
int ret;
|
int64_t ret;
|
||||||
FILE *f = av_fopen_utf8(filename, "rb");
|
FILE *f = av_fopen_utf8(filename, "rb");
|
||||||
|
|
||||||
if (!f) {
|
if (!f) {
|
||||||
|
15
configure
vendored
15
configure
vendored
@ -1635,7 +1635,6 @@ HEADERS_LIST="
|
|||||||
asm_types_h
|
asm_types_h
|
||||||
cdio_paranoia_h
|
cdio_paranoia_h
|
||||||
cdio_paranoia_paranoia_h
|
cdio_paranoia_paranoia_h
|
||||||
CL_cl_h
|
|
||||||
dev_bktr_ioctl_bt848_h
|
dev_bktr_ioctl_bt848_h
|
||||||
dev_bktr_ioctl_meteor_h
|
dev_bktr_ioctl_meteor_h
|
||||||
dev_ic_bt8xx_h
|
dev_ic_bt8xx_h
|
||||||
@ -1761,6 +1760,7 @@ SYSTEM_FUNCS="
|
|||||||
TOOLCHAIN_FEATURES="
|
TOOLCHAIN_FEATURES="
|
||||||
as_dn_directive
|
as_dn_directive
|
||||||
as_func
|
as_func
|
||||||
|
as_object_arch
|
||||||
asm_mod_q
|
asm_mod_q
|
||||||
attribute_may_alias
|
attribute_may_alias
|
||||||
attribute_packed
|
attribute_packed
|
||||||
@ -4413,7 +4413,7 @@ unsigned int endian = 'B' << 24 | 'I' << 16 | 'G' << 8 | 'E';
|
|||||||
EOF
|
EOF
|
||||||
od -t x1 $TMPO | grep -q '42 *49 *47 *45' && enable bigendian
|
od -t x1 $TMPO | grep -q '42 *49 *47 *45' && enable bigendian
|
||||||
|
|
||||||
if [ "$cpu" = "power7" ] || [ "$cpu" = "power8" ] ;then
|
if [ "$cpu" = "power7" ] || [ "$cpu" = "power8" ] || enabled ppc64; then
|
||||||
if ! enabled bigendian && enabled altivec ;then
|
if ! enabled bigendian && enabled altivec ;then
|
||||||
enable vsx
|
enable vsx
|
||||||
fi
|
fi
|
||||||
@ -4520,6 +4520,11 @@ EOF
|
|||||||
check_as <<EOF && enable as_dn_directive
|
check_as <<EOF && enable as_dn_directive
|
||||||
ra .dn d0.i16
|
ra .dn d0.i16
|
||||||
.unreq ra
|
.unreq ra
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# llvm's integrated assembler supports .object_arch from llvm 3.5
|
||||||
|
[ "$objformat" = elf ] && check_as <<EOF && enable as_object_arch
|
||||||
|
.object_arch armv4
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
[ $target_os != win32 ] && enabled_all armv6t2 shared !pic && enable_weak_pic
|
[ $target_os != win32 ] && enabled_all armv6t2 shared !pic && enable_weak_pic
|
||||||
@ -4756,7 +4761,6 @@ check_func_headers glob.h glob
|
|||||||
enabled xlib &&
|
enabled xlib &&
|
||||||
check_func_headers "X11/Xlib.h X11/extensions/Xvlib.h" XvGetPortAttribute -lXv -lX11 -lXext
|
check_func_headers "X11/Xlib.h X11/extensions/Xvlib.h" XvGetPortAttribute -lXv -lX11 -lXext
|
||||||
|
|
||||||
check_header cl/cl.h
|
|
||||||
check_header direct.h
|
check_header direct.h
|
||||||
check_header dlfcn.h
|
check_header dlfcn.h
|
||||||
check_header dxva.h
|
check_header dxva.h
|
||||||
@ -4883,7 +4887,7 @@ enabled libquvi && require_pkg_config libquvi quvi/quvi.h quvi_init
|
|||||||
enabled librtmp && require_pkg_config librtmp librtmp/rtmp.h RTMP_Socket
|
enabled librtmp && require_pkg_config librtmp librtmp/rtmp.h RTMP_Socket
|
||||||
enabled libschroedinger && require_pkg_config schroedinger-1.0 schroedinger/schro.h schro_init
|
enabled libschroedinger && require_pkg_config schroedinger-1.0 schroedinger/schro.h schro_init
|
||||||
enabled libshine && require_pkg_config shine shine/layer3.h shine_encode_buffer
|
enabled libshine && require_pkg_config shine shine/layer3.h shine_encode_buffer
|
||||||
enabled libsmbclient && { check_pkg_config smbclient libsmbclient.h smbc_init ||
|
enabled libsmbclient && { use_pkg_config smbclient libsmbclient.h smbc_init ||
|
||||||
require smbclient libsmbclient.h smbc_init -lsmbclient; }
|
require smbclient libsmbclient.h smbc_init -lsmbclient; }
|
||||||
enabled libsoxr && require libsoxr soxr.h soxr_create -lsoxr
|
enabled libsoxr && require libsoxr soxr.h soxr_create -lsoxr
|
||||||
enabled libssh && require_pkg_config libssh libssh/sftp.h sftp_init
|
enabled libssh && require_pkg_config libssh libssh/sftp.h sftp_init
|
||||||
@ -5678,7 +5682,7 @@ cat > $TMPH <<EOF
|
|||||||
#define FFMPEG_CONFIG_H
|
#define FFMPEG_CONFIG_H
|
||||||
#define FFMPEG_CONFIGURATION "$(c_escape $FFMPEG_CONFIGURATION)"
|
#define FFMPEG_CONFIGURATION "$(c_escape $FFMPEG_CONFIGURATION)"
|
||||||
#define FFMPEG_LICENSE "$(c_escape $license)"
|
#define FFMPEG_LICENSE "$(c_escape $license)"
|
||||||
#define CONFIG_THIS_YEAR 2014
|
#define CONFIG_THIS_YEAR 2015
|
||||||
#define FFMPEG_DATADIR "$(eval c_escape $datadir)"
|
#define FFMPEG_DATADIR "$(eval c_escape $datadir)"
|
||||||
#define AVCONV_DATADIR "$(eval c_escape $datadir)"
|
#define AVCONV_DATADIR "$(eval c_escape $datadir)"
|
||||||
#define CC_IDENT "$(c_escape ${cc_ident:-Unknown compiler})"
|
#define CC_IDENT "$(c_escape ${cc_ident:-Unknown compiler})"
|
||||||
@ -5706,6 +5710,7 @@ enabled getenv || echo "#define getenv(x) NULL" >> $TMPH
|
|||||||
|
|
||||||
|
|
||||||
mkdir -p doc
|
mkdir -p doc
|
||||||
|
mkdir -p tests
|
||||||
echo "@c auto-generated by configure" > doc/config.texi
|
echo "@c auto-generated by configure" > doc/config.texi
|
||||||
|
|
||||||
print_config ARCH_ "$config_files" $ARCH_LIST
|
print_config ARCH_ "$config_files" $ARCH_LIST
|
||||||
|
@ -200,7 +200,7 @@ API changes, most recent first:
|
|||||||
Increase FF_INPUT_BUFFER_PADDING_SIZE to 32 due to some corner cases needing
|
Increase FF_INPUT_BUFFER_PADDING_SIZE to 32 due to some corner cases needing
|
||||||
it
|
it
|
||||||
|
|
||||||
2014-06-10 - xxxxxxx - lavf 55.43.100 - avformat.h
|
2014-06-10 - 5482780 - lavf 55.43.100 - avformat.h
|
||||||
New field int64_t max_analyze_duration2 instead of deprecated
|
New field int64_t max_analyze_duration2 instead of deprecated
|
||||||
int max_analyze_duration.
|
int max_analyze_duration.
|
||||||
|
|
||||||
@ -224,7 +224,7 @@ API changes, most recent first:
|
|||||||
Add strict_std_compliance and related AVOptions to support experimental
|
Add strict_std_compliance and related AVOptions to support experimental
|
||||||
muxing.
|
muxing.
|
||||||
|
|
||||||
2014-05-26 - xxxxxxx - lavu 52.87.100 - threadmessage.h
|
2014-05-26 - 55cc60c - lavu 52.87.100 - threadmessage.h
|
||||||
Add thread message queue API.
|
Add thread message queue API.
|
||||||
|
|
||||||
2014-05-26 - c37d179 - lavf 55.41.100 - avformat.h
|
2014-05-26 - c37d179 - lavf 55.41.100 - avformat.h
|
||||||
@ -234,7 +234,7 @@ API changes, most recent first:
|
|||||||
Add av_stream_get_side_data() to access stream-level side data
|
Add av_stream_get_side_data() to access stream-level side data
|
||||||
in the same way as av_packet_get_side_data().
|
in the same way as av_packet_get_side_data().
|
||||||
|
|
||||||
2014-05-xx - xxxxxxx - lavu 52.86.100 - fifo.h
|
2014-05-20 - 7336e39 - lavu 52.86.100 - fifo.h
|
||||||
Add av_fifo_alloc_array() function.
|
Add av_fifo_alloc_array() function.
|
||||||
|
|
||||||
2014-05-19 - ef1d4ee / bddd8cb - lavu 52.85.100 / 53.15.0 - frame.h, display.h
|
2014-05-19 - ef1d4ee / bddd8cb - lavu 52.85.100 / 53.15.0 - frame.h, display.h
|
||||||
@ -266,7 +266,7 @@ API changes, most recent first:
|
|||||||
2014-05-11 - 14aef38 / 66e6c8a - lavu 52.83.100 / 53.14.0 - pixfmt.h
|
2014-05-11 - 14aef38 / 66e6c8a - lavu 52.83.100 / 53.14.0 - pixfmt.h
|
||||||
Add AV_PIX_FMT_VDA for new-style VDA acceleration.
|
Add AV_PIX_FMT_VDA for new-style VDA acceleration.
|
||||||
|
|
||||||
2014-05-xx - xxxxxxx - lavu 52.82.100 - fifo.h
|
2014-05-07 - 351f611 - lavu 52.82.100 - fifo.h
|
||||||
Add av_fifo_freep() function.
|
Add av_fifo_freep() function.
|
||||||
|
|
||||||
2014-05-02 - ba52fb11 - lavu 52.81.100 - opt.h
|
2014-05-02 - ba52fb11 - lavu 52.81.100 - opt.h
|
||||||
@ -288,10 +288,14 @@ API changes, most recent first:
|
|||||||
Deprecate CODEC_FLAG_INPUT_PRESERVED. Its functionality is replaced by passing
|
Deprecate CODEC_FLAG_INPUT_PRESERVED. Its functionality is replaced by passing
|
||||||
reference-counted frames to encoders.
|
reference-counted frames to encoders.
|
||||||
|
|
||||||
|
2014-04-30 - 617e866 - lavu 52.81.100 - pixdesc.h
|
||||||
|
Add av_find_best_pix_fmt_of_2(), av_get_pix_fmt_loss()
|
||||||
|
Deprecate avcodec_get_pix_fmt_loss(), avcodec_find_best_pix_fmt_of_2()
|
||||||
|
|
||||||
2014-04-29 - 1bf6396 - lavc 55.60.100 - avcodec.h
|
2014-04-29 - 1bf6396 - lavc 55.60.100 - avcodec.h
|
||||||
Add AVCodecDescriptor.mime_types field.
|
Add AVCodecDescriptor.mime_types field.
|
||||||
|
|
||||||
2014-04-29 - xxxxxxx - lavu 52.80.0 - hash.h
|
2014-04-29 - b804eb4 - lavu 52.80.100 - hash.h
|
||||||
Add av_hash_final_bin(), av_hash_final_hex() and av_hash_final_b64().
|
Add av_hash_final_bin(), av_hash_final_hex() and av_hash_final_b64().
|
||||||
|
|
||||||
2014-03-07 - 8b2a130 - lavc 55.50.0 / 55.53.100 - dxva2.h
|
2014-03-07 - 8b2a130 - lavc 55.50.0 / 55.53.100 - dxva2.h
|
||||||
@ -303,7 +307,7 @@ API changes, most recent first:
|
|||||||
2014-04-17 - a8d01a7 / 0983d48 - lavu 53.12.0 / 52.77.100 - crc.h
|
2014-04-17 - a8d01a7 / 0983d48 - lavu 53.12.0 / 52.77.100 - crc.h
|
||||||
Add AV_CRC_16_ANSI_LE crc variant.
|
Add AV_CRC_16_ANSI_LE crc variant.
|
||||||
|
|
||||||
2014-04-XX - xxxxxxx - lavf xx.xx.1xx - avformat.h
|
2014-04-15 - ef818d8 - lavf 55.37.101 - avformat.h
|
||||||
Add av_format_inject_global_side_data()
|
Add av_format_inject_global_side_data()
|
||||||
|
|
||||||
2014-04-12 - 4f698be - lavu 52.76.100 - log.h
|
2014-04-12 - 4f698be - lavu 52.76.100 - log.h
|
||||||
@ -383,7 +387,7 @@ API changes, most recent first:
|
|||||||
2014-02-19 - f4c8d00 / 6bb8720 - lavu 52.64.101 / 53.3.1 - opt.h
|
2014-02-19 - f4c8d00 / 6bb8720 - lavu 52.64.101 / 53.3.1 - opt.h
|
||||||
Deprecate unused AV_OPT_FLAG_METADATA.
|
Deprecate unused AV_OPT_FLAG_METADATA.
|
||||||
|
|
||||||
2014-02-xx - xxxxxxx - lavd 55.10.100 - avdevice.h
|
2014-02-16 - 81c3f81 - lavd 55.10.100 - avdevice.h
|
||||||
Add avdevice_list_devices() and avdevice_free_list_devices()
|
Add avdevice_list_devices() and avdevice_free_list_devices()
|
||||||
|
|
||||||
2014-02-16 - db3c970 - lavf 55.33.100 - avio.h
|
2014-02-16 - db3c970 - lavf 55.33.100 - avio.h
|
||||||
@ -424,7 +428,7 @@ API changes, most recent first:
|
|||||||
2014-01-19 - 1a193c4 - lavf 55.25.100 - avformat.h
|
2014-01-19 - 1a193c4 - lavf 55.25.100 - avformat.h
|
||||||
Add avformat_get_mov_video_tags() and avformat_get_mov_audio_tags().
|
Add avformat_get_mov_video_tags() and avformat_get_mov_audio_tags().
|
||||||
|
|
||||||
2014-01-19 - xxxxxxx - lavu 52.63.100 - rational.h
|
2014-01-19 - 3532dd5 - lavu 52.63.100 - rational.h
|
||||||
Add av_make_q() function.
|
Add av_make_q() function.
|
||||||
|
|
||||||
2014-01-05 - 4cf4da9 / 5b4797a - lavu 52.62.100 / 53.2.0 - frame.h
|
2014-01-05 - 4cf4da9 / 5b4797a - lavu 52.62.100 / 53.2.0 - frame.h
|
||||||
|
@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
|||||||
# This could be handy for archiving the generated documentation or
|
# This could be handy for archiving the generated documentation or
|
||||||
# if some version control system is used.
|
# if some version control system is used.
|
||||||
|
|
||||||
PROJECT_NUMBER =
|
PROJECT_NUMBER = 2.5.5
|
||||||
|
|
||||||
# With the PROJECT_LOGO tag one can specify a logo or icon that is included
|
# With the PROJECT_LOGO tag one can specify a logo or icon that is included
|
||||||
# in the documentation. The maximum height of the logo should not exceed 55
|
# in the documentation. The maximum height of the logo should not exceed 55
|
||||||
|
@ -29,6 +29,7 @@ OBJS=$(addsuffix .o,$(EXAMPLES))
|
|||||||
|
|
||||||
# the following examples make explicit use of the math library
|
# the following examples make explicit use of the math library
|
||||||
avcodec: LDLIBS += -lm
|
avcodec: LDLIBS += -lm
|
||||||
|
decoding_encoding: LDLIBS += -lm
|
||||||
muxing: LDLIBS += -lm
|
muxing: LDLIBS += -lm
|
||||||
resampling_audio: LDLIBS += -lm
|
resampling_audio: LDLIBS += -lm
|
||||||
|
|
||||||
|
@ -90,6 +90,7 @@ static int init_filters(const char *filters_descr)
|
|||||||
AVFilter *buffersink = avfilter_get_by_name("buffersink");
|
AVFilter *buffersink = avfilter_get_by_name("buffersink");
|
||||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||||
|
AVRational time_base = fmt_ctx->streams[video_stream_index]->time_base;
|
||||||
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
|
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
|
||||||
|
|
||||||
filter_graph = avfilter_graph_alloc();
|
filter_graph = avfilter_graph_alloc();
|
||||||
@ -102,7 +103,7 @@ static int init_filters(const char *filters_descr)
|
|||||||
snprintf(args, sizeof(args),
|
snprintf(args, sizeof(args),
|
||||||
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
|
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
|
||||||
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
|
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
|
||||||
dec_ctx->time_base.num, dec_ctx->time_base.den,
|
time_base.num, time_base.den,
|
||||||
dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);
|
dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);
|
||||||
|
|
||||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
|
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
|
||||||
|
@ -116,6 +116,10 @@ static int open_output_file(const char *filename)
|
|||||||
|| dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
|| dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||||
/* in this example, we choose transcoding to same codec */
|
/* in this example, we choose transcoding to same codec */
|
||||||
encoder = avcodec_find_encoder(dec_ctx->codec_id);
|
encoder = avcodec_find_encoder(dec_ctx->codec_id);
|
||||||
|
if (!encoder) {
|
||||||
|
av_log(NULL, AV_LOG_FATAL, "Neccessary encoder not found\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
/* In this example, we transcode to same properties (picture size,
|
/* In this example, we transcode to same properties (picture size,
|
||||||
* sample rate etc.). These properties can be changed for output
|
* sample rate etc.). These properties can be changed for output
|
||||||
|
@ -298,7 +298,7 @@ FFmpeg has a @url{http://ffmpeg.org/ffmpeg-protocols.html#concat,
|
|||||||
@code{concat}} protocol designed specifically for that, with examples in the
|
@code{concat}} protocol designed specifically for that, with examples in the
|
||||||
documentation.
|
documentation.
|
||||||
|
|
||||||
A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow to concatenate
|
A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow one to concatenate
|
||||||
video by merely concatenating the files containing them.
|
video by merely concatenating the files containing them.
|
||||||
|
|
||||||
Hence you may concatenate your multimedia files by first transcoding them to
|
Hence you may concatenate your multimedia files by first transcoding them to
|
||||||
|
@ -71,7 +71,7 @@ the HTTP server (configured through the @option{HTTPPort} option), and
|
|||||||
configuration file.
|
configuration file.
|
||||||
|
|
||||||
Each feed is associated to a file which is stored on disk. This stored
|
Each feed is associated to a file which is stored on disk. This stored
|
||||||
file is used to allow to send pre-recorded data to a player as fast as
|
file is used to send pre-recorded data to a player as fast as
|
||||||
possible when new content is added in real-time to the stream.
|
possible when new content is added in real-time to the stream.
|
||||||
|
|
||||||
A "live-stream" or "stream" is a resource published by
|
A "live-stream" or "stream" is a resource published by
|
||||||
|
@ -3378,7 +3378,7 @@ Set number overlapping pixels for each block. Since the filter can be slow, you
|
|||||||
may want to reduce this value, at the cost of a less effective filter and the
|
may want to reduce this value, at the cost of a less effective filter and the
|
||||||
risk of various artefacts.
|
risk of various artefacts.
|
||||||
|
|
||||||
If the overlapping value doesn't allow to process the whole input width or
|
If the overlapping value doesn't permit processing the whole input width or
|
||||||
height, a warning will be displayed and according borders won't be denoised.
|
height, a warning will be displayed and according borders won't be denoised.
|
||||||
|
|
||||||
Default value is @var{blocksize}-1, which is the best possible setting.
|
Default value is @var{blocksize}-1, which is the best possible setting.
|
||||||
|
@ -23,7 +23,7 @@ Reduce buffering.
|
|||||||
|
|
||||||
@item probesize @var{integer} (@emph{input})
|
@item probesize @var{integer} (@emph{input})
|
||||||
Set probing size in bytes, i.e. the size of the data to analyze to get
|
Set probing size in bytes, i.e. the size of the data to analyze to get
|
||||||
stream information. A higher value will allow to detect more
|
stream information. A higher value will enable detecting more
|
||||||
information in case it is dispersed into the stream, but will increase
|
information in case it is dispersed into the stream, but will increase
|
||||||
latency. Must be an integer not lesser than 32. It is 5000000 by default.
|
latency. Must be an integer not lesser than 32. It is 5000000 by default.
|
||||||
|
|
||||||
@ -67,7 +67,7 @@ Default is 0.
|
|||||||
|
|
||||||
@item analyzeduration @var{integer} (@emph{input})
|
@item analyzeduration @var{integer} (@emph{input})
|
||||||
Specify how many microseconds are analyzed to probe the input. A
|
Specify how many microseconds are analyzed to probe the input. A
|
||||||
higher value will allow to detect more accurate information, but will
|
higher value will enable detecting more accurate information, but will
|
||||||
increase latency. It defaults to 5,000,000 microseconds = 5 seconds.
|
increase latency. It defaults to 5,000,000 microseconds = 5 seconds.
|
||||||
|
|
||||||
@item cryptokey @var{hexadecimal string} (@emph{input})
|
@item cryptokey @var{hexadecimal string} (@emph{input})
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
@chapter Input Devices
|
@chapter Input Devices
|
||||||
@c man begin INPUT DEVICES
|
@c man begin INPUT DEVICES
|
||||||
|
|
||||||
Input devices are configured elements in FFmpeg which allow to access
|
Input devices are configured elements in FFmpeg which enable accessing
|
||||||
the data coming from a multimedia device attached to your system.
|
the data coming from a multimedia device attached to your system.
|
||||||
|
|
||||||
When you configure your FFmpeg build, all the supported input devices
|
When you configure your FFmpeg build, all the supported input devices
|
||||||
|
@ -1081,8 +1081,8 @@ Set raise error timeout, expressed in microseconds.
|
|||||||
This option is only relevant in read mode: if no data arrived in more
|
This option is only relevant in read mode: if no data arrived in more
|
||||||
than this time interval, raise error.
|
than this time interval, raise error.
|
||||||
|
|
||||||
@item listen_timeout=@var{microseconds}
|
@item listen_timeout=@var{milliseconds}
|
||||||
Set listen timeout, expressed in microseconds.
|
Set listen timeout, expressed in milliseconds.
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
The following example shows how to setup a listening TCP connection
|
The following example shows how to setup a listening TCP connection
|
||||||
|
@ -844,7 +844,7 @@ Return 1.0 if @var{x} is +/-INFINITY, 0.0 otherwise.
|
|||||||
Return 1.0 if @var{x} is NAN, 0.0 otherwise.
|
Return 1.0 if @var{x} is NAN, 0.0 otherwise.
|
||||||
|
|
||||||
@item ld(var)
|
@item ld(var)
|
||||||
Allow to load the value of the internal variable with number
|
Load the value of the internal variable with number
|
||||||
@var{var}, which was previously stored with st(@var{var}, @var{expr}).
|
@var{var}, which was previously stored with st(@var{var}, @var{expr}).
|
||||||
The function returns the loaded value.
|
The function returns the loaded value.
|
||||||
|
|
||||||
@ -912,7 +912,7 @@ Compute the square root of @var{expr}. This is equivalent to
|
|||||||
Compute expression @code{1/(1 + exp(4*x))}.
|
Compute expression @code{1/(1 + exp(4*x))}.
|
||||||
|
|
||||||
@item st(var, expr)
|
@item st(var, expr)
|
||||||
Allow to store the value of the expression @var{expr} in an internal
|
Store the value of the expression @var{expr} in an internal
|
||||||
variable. @var{var} specifies the number of the variable where to
|
variable. @var{var} specifies the number of the variable where to
|
||||||
store the value, and it is a value ranging from 0 to 9. The function
|
store the value, and it is a value ranging from 0 to 9. The function
|
||||||
returns the value stored in the internal variable.
|
returns the value stored in the internal variable.
|
||||||
|
2
ffmpeg.c
2
ffmpeg.c
@ -2521,7 +2521,7 @@ static int transcode_init(void)
|
|||||||
AVFormatContext *oc;
|
AVFormatContext *oc;
|
||||||
OutputStream *ost;
|
OutputStream *ost;
|
||||||
InputStream *ist;
|
InputStream *ist;
|
||||||
char error[1024];
|
char error[1024] = {0};
|
||||||
int want_sdp = 1;
|
int want_sdp = 1;
|
||||||
|
|
||||||
for (i = 0; i < nb_filtergraphs; i++) {
|
for (i = 0; i < nb_filtergraphs; i++) {
|
||||||
|
@ -38,15 +38,15 @@ static av_cold int zero12v_decode_init(AVCodecContext *avctx)
|
|||||||
static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
||||||
int *got_frame, AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
int line = 0, ret;
|
int line, ret;
|
||||||
const int width = avctx->width;
|
const int width = avctx->width;
|
||||||
AVFrame *pic = data;
|
AVFrame *pic = data;
|
||||||
uint16_t *y, *u, *v;
|
uint16_t *y, *u, *v;
|
||||||
const uint8_t *line_end, *src = avpkt->data;
|
const uint8_t *line_end, *src = avpkt->data;
|
||||||
int stride = avctx->width * 8 / 3;
|
int stride = avctx->width * 8 / 3;
|
||||||
|
|
||||||
if (width == 1) {
|
if (width <= 1 || avctx->height <= 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Width 1 not supported.\n");
|
av_log(avctx, AV_LOG_ERROR, "Dimensions %dx%d not supported.\n", width, avctx->height);
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -67,45 +67,45 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
pic->pict_type = AV_PICTURE_TYPE_I;
|
pic->pict_type = AV_PICTURE_TYPE_I;
|
||||||
pic->key_frame = 1;
|
pic->key_frame = 1;
|
||||||
|
|
||||||
y = (uint16_t *)pic->data[0];
|
|
||||||
u = (uint16_t *)pic->data[1];
|
|
||||||
v = (uint16_t *)pic->data[2];
|
|
||||||
line_end = avpkt->data + stride;
|
line_end = avpkt->data + stride;
|
||||||
|
for (line = 0; line < avctx->height; line++) {
|
||||||
|
uint16_t y_temp[6] = {0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000};
|
||||||
|
uint16_t u_temp[3] = {0x8000, 0x8000, 0x8000};
|
||||||
|
uint16_t v_temp[3] = {0x8000, 0x8000, 0x8000};
|
||||||
|
int x;
|
||||||
|
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||||
|
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||||
|
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||||
|
|
||||||
while (line++ < avctx->height) {
|
for (x = 0; x < width; x += 6) {
|
||||||
while (1) {
|
uint32_t t;
|
||||||
uint32_t t = AV_RL32(src);
|
|
||||||
|
if (width - x < 6 || line_end - src < 16) {
|
||||||
|
y = y_temp;
|
||||||
|
u = u_temp;
|
||||||
|
v = v_temp;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (line_end - src < 4)
|
||||||
|
break;
|
||||||
|
|
||||||
|
t = AV_RL32(src);
|
||||||
src += 4;
|
src += 4;
|
||||||
*u++ = t << 6 & 0xFFC0;
|
*u++ = t << 6 & 0xFFC0;
|
||||||
*y++ = t >> 4 & 0xFFC0;
|
*y++ = t >> 4 & 0xFFC0;
|
||||||
*v++ = t >> 14 & 0xFFC0;
|
*v++ = t >> 14 & 0xFFC0;
|
||||||
|
|
||||||
if (src >= line_end - 1) {
|
if (line_end - src < 4)
|
||||||
*y = 0x80;
|
|
||||||
src++;
|
|
||||||
line_end += stride;
|
|
||||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
|
||||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
|
||||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
|
|
||||||
t = AV_RL32(src);
|
t = AV_RL32(src);
|
||||||
src += 4;
|
src += 4;
|
||||||
*y++ = t << 6 & 0xFFC0;
|
*y++ = t << 6 & 0xFFC0;
|
||||||
*u++ = t >> 4 & 0xFFC0;
|
*u++ = t >> 4 & 0xFFC0;
|
||||||
*y++ = t >> 14 & 0xFFC0;
|
*y++ = t >> 14 & 0xFFC0;
|
||||||
if (src >= line_end - 2) {
|
|
||||||
if (!(width & 1)) {
|
if (line_end - src < 4)
|
||||||
*y = 0x80;
|
|
||||||
src += 2;
|
|
||||||
}
|
|
||||||
line_end += stride;
|
|
||||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
|
||||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
|
||||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
|
|
||||||
t = AV_RL32(src);
|
t = AV_RL32(src);
|
||||||
src += 4;
|
src += 4;
|
||||||
@ -113,15 +113,8 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
*y++ = t >> 4 & 0xFFC0;
|
*y++ = t >> 4 & 0xFFC0;
|
||||||
*u++ = t >> 14 & 0xFFC0;
|
*u++ = t >> 14 & 0xFFC0;
|
||||||
|
|
||||||
if (src >= line_end - 1) {
|
if (line_end - src < 4)
|
||||||
*y = 0x80;
|
|
||||||
src++;
|
|
||||||
line_end += stride;
|
|
||||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
|
||||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
|
||||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
|
|
||||||
t = AV_RL32(src);
|
t = AV_RL32(src);
|
||||||
src += 4;
|
src += 4;
|
||||||
@ -129,18 +122,21 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
*v++ = t >> 4 & 0xFFC0;
|
*v++ = t >> 4 & 0xFFC0;
|
||||||
*y++ = t >> 14 & 0xFFC0;
|
*y++ = t >> 14 & 0xFFC0;
|
||||||
|
|
||||||
if (src >= line_end - 2) {
|
if (width - x < 6)
|
||||||
if (width & 1) {
|
|
||||||
*y = 0x80;
|
|
||||||
src += 2;
|
|
||||||
}
|
|
||||||
line_end += stride;
|
|
||||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
|
||||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
|
||||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (x < width) {
|
||||||
|
y = x + (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||||
|
u = x/2 + (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||||
|
v = x/2 + (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||||
|
memcpy(y, y_temp, sizeof(*y) * (width - x));
|
||||||
|
memcpy(u, u_temp, sizeof(*u) * (width - x + 1) / 2);
|
||||||
|
memcpy(v, v_temp, sizeof(*v) * (width - x + 1) / 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
line_end += stride;
|
||||||
|
src = line_end - stride;
|
||||||
}
|
}
|
||||||
|
|
||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
|
@ -210,7 +210,7 @@ OBJS-$(CONFIG_DVVIDEO_DECODER) += dvdec.o dv.o dvdata.o
|
|||||||
OBJS-$(CONFIG_DVVIDEO_ENCODER) += dvenc.o dv.o dvdata.o
|
OBJS-$(CONFIG_DVVIDEO_ENCODER) += dvenc.o dv.o dvdata.o
|
||||||
OBJS-$(CONFIG_DXA_DECODER) += dxa.o
|
OBJS-$(CONFIG_DXA_DECODER) += dxa.o
|
||||||
OBJS-$(CONFIG_DXTORY_DECODER) += dxtory.o
|
OBJS-$(CONFIG_DXTORY_DECODER) += dxtory.o
|
||||||
OBJS-$(CONFIG_EAC3_DECODER) += eac3dec.o eac3_data.o
|
OBJS-$(CONFIG_EAC3_DECODER) += eac3_data.o
|
||||||
OBJS-$(CONFIG_EAC3_ENCODER) += eac3enc.o eac3_data.o
|
OBJS-$(CONFIG_EAC3_ENCODER) += eac3enc.o eac3_data.o
|
||||||
OBJS-$(CONFIG_EACMV_DECODER) += eacmv.o
|
OBJS-$(CONFIG_EACMV_DECODER) += eacmv.o
|
||||||
OBJS-$(CONFIG_EAMAD_DECODER) += eamad.o eaidct.o mpeg12.o \
|
OBJS-$(CONFIG_EAMAD_DECODER) += eamad.o eaidct.o mpeg12.o \
|
||||||
|
@ -28,6 +28,7 @@
|
|||||||
#include "a64tables.h"
|
#include "a64tables.h"
|
||||||
#include "elbg.h"
|
#include "elbg.h"
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
#include "libavutil/avassert.h"
|
||||||
#include "libavutil/common.h"
|
#include "libavutil/common.h"
|
||||||
#include "libavutil/intreadwrite.h"
|
#include "libavutil/intreadwrite.h"
|
||||||
|
|
||||||
@ -65,7 +66,7 @@ static const int mc_colors[5]={0x0,0xb,0xc,0xf,0x1};
|
|||||||
//static const int mc_colors[5]={0x0,0x8,0xa,0xf,0x7};
|
//static const int mc_colors[5]={0x0,0x8,0xa,0xf,0x7};
|
||||||
//static const int mc_colors[5]={0x0,0x9,0x8,0xa,0x3};
|
//static const int mc_colors[5]={0x0,0x9,0x8,0xa,0x3};
|
||||||
|
|
||||||
static void to_meta_with_crop(AVCodecContext *avctx, AVFrame *p, int *dest)
|
static void to_meta_with_crop(AVCodecContext *avctx, const AVFrame *p, int *dest)
|
||||||
{
|
{
|
||||||
int blockx, blocky, x, y;
|
int blockx, blocky, x, y;
|
||||||
int luma = 0;
|
int luma = 0;
|
||||||
@ -78,9 +79,13 @@ static void to_meta_with_crop(AVCodecContext *avctx, AVFrame *p, int *dest)
|
|||||||
for (y = blocky; y < blocky + 8 && y < C64YRES; y++) {
|
for (y = blocky; y < blocky + 8 && y < C64YRES; y++) {
|
||||||
for (x = blockx; x < blockx + 8 && x < C64XRES; x += 2) {
|
for (x = blockx; x < blockx + 8 && x < C64XRES; x += 2) {
|
||||||
if(x < width && y < height) {
|
if(x < width && y < height) {
|
||||||
/* build average over 2 pixels */
|
if (x + 1 < width) {
|
||||||
luma = (src[(x + 0 + y * p->linesize[0])] +
|
/* build average over 2 pixels */
|
||||||
src[(x + 1 + y * p->linesize[0])]) / 2;
|
luma = (src[(x + 0 + y * p->linesize[0])] +
|
||||||
|
src[(x + 1 + y * p->linesize[0])]) / 2;
|
||||||
|
} else {
|
||||||
|
luma = src[(x + y * p->linesize[0])];
|
||||||
|
}
|
||||||
/* write blocks as linear data now so they are suitable for elbg */
|
/* write blocks as linear data now so they are suitable for elbg */
|
||||||
dest[0] = luma;
|
dest[0] = luma;
|
||||||
}
|
}
|
||||||
@ -186,7 +191,6 @@ static void render_charset(AVCodecContext *avctx, uint8_t *charset,
|
|||||||
static av_cold int a64multi_close_encoder(AVCodecContext *avctx)
|
static av_cold int a64multi_close_encoder(AVCodecContext *avctx)
|
||||||
{
|
{
|
||||||
A64Context *c = avctx->priv_data;
|
A64Context *c = avctx->priv_data;
|
||||||
av_frame_free(&avctx->coded_frame);
|
|
||||||
av_freep(&c->mc_meta_charset);
|
av_freep(&c->mc_meta_charset);
|
||||||
av_freep(&c->mc_best_cb);
|
av_freep(&c->mc_best_cb);
|
||||||
av_freep(&c->mc_charset);
|
av_freep(&c->mc_charset);
|
||||||
@ -220,7 +224,7 @@ static av_cold int a64multi_encode_init(AVCodecContext *avctx)
|
|||||||
a64_palette[mc_colors[a]][2] * 0.11;
|
a64_palette[mc_colors[a]][2] * 0.11;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!(c->mc_meta_charset = av_malloc_array(c->mc_lifetime, 32000 * sizeof(int))) ||
|
if (!(c->mc_meta_charset = av_mallocz_array(c->mc_lifetime, 32000 * sizeof(int))) ||
|
||||||
!(c->mc_best_cb = av_malloc(CHARSET_CHARS * 32 * sizeof(int))) ||
|
!(c->mc_best_cb = av_malloc(CHARSET_CHARS * 32 * sizeof(int))) ||
|
||||||
!(c->mc_charmap = av_mallocz_array(c->mc_lifetime, 1000 * sizeof(int))) ||
|
!(c->mc_charmap = av_mallocz_array(c->mc_lifetime, 1000 * sizeof(int))) ||
|
||||||
!(c->mc_colram = av_mallocz(CHARSET_CHARS * sizeof(uint8_t))) ||
|
!(c->mc_colram = av_mallocz(CHARSET_CHARS * sizeof(uint8_t))) ||
|
||||||
@ -238,14 +242,6 @@ static av_cold int a64multi_encode_init(AVCodecContext *avctx)
|
|||||||
AV_WB32(avctx->extradata, c->mc_lifetime);
|
AV_WB32(avctx->extradata, c->mc_lifetime);
|
||||||
AV_WB32(avctx->extradata + 16, INTERLACED);
|
AV_WB32(avctx->extradata + 16, INTERLACED);
|
||||||
|
|
||||||
avctx->coded_frame = av_frame_alloc();
|
|
||||||
if (!avctx->coded_frame) {
|
|
||||||
a64multi_close_encoder(avctx);
|
|
||||||
return AVERROR(ENOMEM);
|
|
||||||
}
|
|
||||||
|
|
||||||
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
|
|
||||||
avctx->coded_frame->key_frame = 1;
|
|
||||||
if (!avctx->codec_tag)
|
if (!avctx->codec_tag)
|
||||||
avctx->codec_tag = AV_RL32("a64m");
|
avctx->codec_tag = AV_RL32("a64m");
|
||||||
|
|
||||||
@ -270,10 +266,9 @@ static void a64_compress_colram(unsigned char *buf, int *charmap, uint8_t *colra
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||||
const AVFrame *pict, int *got_packet)
|
const AVFrame *p, int *got_packet)
|
||||||
{
|
{
|
||||||
A64Context *c = avctx->priv_data;
|
A64Context *c = avctx->priv_data;
|
||||||
AVFrame *const p = avctx->coded_frame;
|
|
||||||
|
|
||||||
int frame;
|
int frame;
|
||||||
int x, y;
|
int x, y;
|
||||||
@ -304,7 +299,7 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* no data, means end encoding asap */
|
/* no data, means end encoding asap */
|
||||||
if (!pict) {
|
if (!p) {
|
||||||
/* all done, end encoding */
|
/* all done, end encoding */
|
||||||
if (!c->mc_lifetime) return 0;
|
if (!c->mc_lifetime) return 0;
|
||||||
/* no more frames in queue, prepare to flush remaining frames */
|
/* no more frames in queue, prepare to flush remaining frames */
|
||||||
@ -317,13 +312,10 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
} else {
|
} else {
|
||||||
/* fill up mc_meta_charset with data until lifetime exceeds */
|
/* fill up mc_meta_charset with data until lifetime exceeds */
|
||||||
if (c->mc_frame_counter < c->mc_lifetime) {
|
if (c->mc_frame_counter < c->mc_lifetime) {
|
||||||
*p = *pict;
|
|
||||||
p->pict_type = AV_PICTURE_TYPE_I;
|
|
||||||
p->key_frame = 1;
|
|
||||||
to_meta_with_crop(avctx, p, meta + 32000 * c->mc_frame_counter);
|
to_meta_with_crop(avctx, p, meta + 32000 * c->mc_frame_counter);
|
||||||
c->mc_frame_counter++;
|
c->mc_frame_counter++;
|
||||||
if (c->next_pts == AV_NOPTS_VALUE)
|
if (c->next_pts == AV_NOPTS_VALUE)
|
||||||
c->next_pts = pict->pts;
|
c->next_pts = p->pts;
|
||||||
/* lifetime is not reached so wait for next frame first */
|
/* lifetime is not reached so wait for next frame first */
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -334,8 +326,8 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
req_size = 0;
|
req_size = 0;
|
||||||
/* any frames to encode? */
|
/* any frames to encode? */
|
||||||
if (c->mc_lifetime) {
|
if (c->mc_lifetime) {
|
||||||
req_size = charset_size + c->mc_lifetime*(screen_size + colram_size);
|
int alloc_size = charset_size + c->mc_lifetime*(screen_size + colram_size);
|
||||||
if ((ret = ff_alloc_packet2(avctx, pkt, req_size)) < 0)
|
if ((ret = ff_alloc_packet2(avctx, pkt, alloc_size)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
buf = pkt->data;
|
buf = pkt->data;
|
||||||
|
|
||||||
@ -351,6 +343,7 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
|
|
||||||
/* advance pointers */
|
/* advance pointers */
|
||||||
buf += charset_size;
|
buf += charset_size;
|
||||||
|
req_size += charset_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* write x frames to buf */
|
/* write x frames to buf */
|
||||||
@ -387,6 +380,7 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
pkt->pts = pkt->dts = c->next_pts;
|
pkt->pts = pkt->dts = c->next_pts;
|
||||||
c->next_pts = AV_NOPTS_VALUE;
|
c->next_pts = AV_NOPTS_VALUE;
|
||||||
|
|
||||||
|
av_assert0(pkt->size >= req_size);
|
||||||
pkt->size = req_size;
|
pkt->size = req_size;
|
||||||
pkt->flags |= AV_PKT_FLAG_KEY;
|
pkt->flags |= AV_PKT_FLAG_KEY;
|
||||||
*got_packet = !!req_size;
|
*got_packet = !!req_size;
|
||||||
|
@ -2780,7 +2780,7 @@ static void spectral_to_sample(AACContext *ac)
|
|||||||
apply_channel_coupling(ac, che, type, i, AFTER_IMDCT, apply_independent_coupling);
|
apply_channel_coupling(ac, che, type, i, AFTER_IMDCT, apply_independent_coupling);
|
||||||
che->present = 0;
|
che->present = 0;
|
||||||
} else if (che) {
|
} else if (che) {
|
||||||
av_log(ac->avctx, AV_LOG_WARNING, "ChannelElement %d.%d missing \n", type, i);
|
av_log(ac->avctx, AV_LOG_VERBOSE, "ChannelElement %d.%d missing \n", type, i);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -165,7 +165,7 @@ static void put_audio_specific_config(AVCodecContext *avctx)
|
|||||||
PutBitContext pb;
|
PutBitContext pb;
|
||||||
AACEncContext *s = avctx->priv_data;
|
AACEncContext *s = avctx->priv_data;
|
||||||
|
|
||||||
init_put_bits(&pb, avctx->extradata, avctx->extradata_size*8);
|
init_put_bits(&pb, avctx->extradata, avctx->extradata_size);
|
||||||
put_bits(&pb, 5, 2); //object type - AAC-LC
|
put_bits(&pb, 5, 2); //object type - AAC-LC
|
||||||
put_bits(&pb, 4, s->samplerate_index); //sample rate index
|
put_bits(&pb, 4, s->samplerate_index); //sample rate index
|
||||||
put_bits(&pb, 4, s->channels);
|
put_bits(&pb, 4, s->channels);
|
||||||
@ -753,10 +753,10 @@ static av_cold int aac_encode_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
s->chan_map = aac_chan_configs[s->channels-1];
|
s->chan_map = aac_chan_configs[s->channels-1];
|
||||||
|
|
||||||
if (ret = dsp_init(avctx, s))
|
if ((ret = dsp_init(avctx, s)) < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
if (ret = alloc_buffers(avctx, s))
|
if ((ret = alloc_buffers(avctx, s)) < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
avctx->extradata_size = 5;
|
avctx->extradata_size = 5;
|
||||||
@ -768,7 +768,8 @@ static av_cold int aac_encode_init(AVCodecContext *avctx)
|
|||||||
lengths[1] = ff_aac_num_swb_128[i];
|
lengths[1] = ff_aac_num_swb_128[i];
|
||||||
for (i = 0; i < s->chan_map[0]; i++)
|
for (i = 0; i < s->chan_map[0]; i++)
|
||||||
grouping[i] = s->chan_map[i + 1] == TYPE_CPE;
|
grouping[i] = s->chan_map[i + 1] == TYPE_CPE;
|
||||||
if (ret = ff_psy_init(&s->psy, avctx, 2, sizes, lengths, s->chan_map[0], grouping))
|
if ((ret = ff_psy_init(&s->psy, avctx, 2, sizes, lengths,
|
||||||
|
s->chan_map[0], grouping)) < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
s->psypp = ff_psy_preprocess_init(avctx);
|
s->psypp = ff_psy_preprocess_init(avctx);
|
||||||
s->coder = &ff_aac_coders[s->options.aac_coder];
|
s->coder = &ff_aac_coders[s->options.aac_coder];
|
||||||
|
@ -78,6 +78,7 @@ av_cold void ff_h264dsp_init_aarch64(H264DSPContext *c, const int bit_depth,
|
|||||||
c->h264_v_loop_filter_luma = ff_h264_v_loop_filter_luma_neon;
|
c->h264_v_loop_filter_luma = ff_h264_v_loop_filter_luma_neon;
|
||||||
c->h264_h_loop_filter_luma = ff_h264_h_loop_filter_luma_neon;
|
c->h264_h_loop_filter_luma = ff_h264_h_loop_filter_luma_neon;
|
||||||
c->h264_v_loop_filter_chroma = ff_h264_v_loop_filter_chroma_neon;
|
c->h264_v_loop_filter_chroma = ff_h264_v_loop_filter_chroma_neon;
|
||||||
|
if (chroma_format_idc <= 1)
|
||||||
c->h264_h_loop_filter_chroma = ff_h264_h_loop_filter_chroma_neon;
|
c->h264_h_loop_filter_chroma = ff_h264_h_loop_filter_chroma_neon;
|
||||||
|
|
||||||
c->weight_h264_pixels_tab[0] = ff_weight_h264_pixels_16_neon;
|
c->weight_h264_pixels_tab[0] = ff_weight_h264_pixels_16_neon;
|
||||||
|
@ -872,7 +872,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
|
|||||||
start_subband += start_subband - 7;
|
start_subband += start_subband - 7;
|
||||||
end_subband = get_bits(gbc, 3) + 5;
|
end_subband = get_bits(gbc, 3) + 5;
|
||||||
#if USE_FIXED
|
#if USE_FIXED
|
||||||
s->spx_dst_end_freq = end_freq_inv_tab[end_subband];
|
s->spx_dst_end_freq = end_freq_inv_tab[end_subband-5];
|
||||||
#endif
|
#endif
|
||||||
if (end_subband > 7)
|
if (end_subband > 7)
|
||||||
end_subband += end_subband - 7;
|
end_subband += end_subband - 7;
|
||||||
@ -939,7 +939,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
|
|||||||
nblend = 0;
|
nblend = 0;
|
||||||
sblend = 0x800000;
|
sblend = 0x800000;
|
||||||
} else if (nratio > 0x7fffff) {
|
} else if (nratio > 0x7fffff) {
|
||||||
nblend = 0x800000;
|
nblend = 14529495; // sqrt(3) in FP.23
|
||||||
sblend = 0;
|
sblend = 0;
|
||||||
} else {
|
} else {
|
||||||
nblend = fixed_sqrt(nratio, 23);
|
nblend = fixed_sqrt(nratio, 23);
|
||||||
|
@ -243,19 +243,19 @@ typedef struct AC3DecodeContext {
|
|||||||
* Parse the E-AC-3 frame header.
|
* Parse the E-AC-3 frame header.
|
||||||
* This parses both the bit stream info and audio frame header.
|
* This parses both the bit stream info and audio frame header.
|
||||||
*/
|
*/
|
||||||
int ff_eac3_parse_header(AC3DecodeContext *s);
|
static int ff_eac3_parse_header(AC3DecodeContext *s);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Decode mantissas in a single channel for the entire frame.
|
* Decode mantissas in a single channel for the entire frame.
|
||||||
* This is used when AHT mode is enabled.
|
* This is used when AHT mode is enabled.
|
||||||
*/
|
*/
|
||||||
void ff_eac3_decode_transform_coeffs_aht_ch(AC3DecodeContext *s, int ch);
|
static void ff_eac3_decode_transform_coeffs_aht_ch(AC3DecodeContext *s, int ch);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Apply spectral extension to each channel by copying lower frequency
|
* Apply spectral extension to each channel by copying lower frequency
|
||||||
* coefficients to higher frequency bins and applying side information to
|
* coefficients to higher frequency bins and applying side information to
|
||||||
* approximate the original high frequency signal.
|
* approximate the original high frequency signal.
|
||||||
*/
|
*/
|
||||||
void ff_eac3_apply_spectral_extension(AC3DecodeContext *s);
|
static void ff_eac3_apply_spectral_extension(AC3DecodeContext *s);
|
||||||
|
|
||||||
#endif /* AVCODEC_AC3DEC_H */
|
#endif /* AVCODEC_AC3DEC_H */
|
||||||
|
@ -164,6 +164,7 @@ static void ac3_downmix_c_fixed16(int16_t **samples, int16_t (*matrix)[2],
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#include "eac3dec.c"
|
||||||
#include "ac3dec.c"
|
#include "ac3dec.c"
|
||||||
|
|
||||||
static const AVOption options[] = {
|
static const AVOption options[] = {
|
||||||
|
@ -28,6 +28,7 @@
|
|||||||
* Upmix delay samples from stereo to original channel layout.
|
* Upmix delay samples from stereo to original channel layout.
|
||||||
*/
|
*/
|
||||||
#include "ac3dec.h"
|
#include "ac3dec.h"
|
||||||
|
#include "eac3dec.c"
|
||||||
#include "ac3dec.c"
|
#include "ac3dec.c"
|
||||||
|
|
||||||
static const AVOption options[] = {
|
static const AVOption options[] = {
|
||||||
|
@ -541,7 +541,7 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
case AV_CODEC_ID_ADPCM_IMA_QT:
|
case AV_CODEC_ID_ADPCM_IMA_QT:
|
||||||
{
|
{
|
||||||
PutBitContext pb;
|
PutBitContext pb;
|
||||||
init_put_bits(&pb, dst, pkt_size * 8);
|
init_put_bits(&pb, dst, pkt_size);
|
||||||
|
|
||||||
for (ch = 0; ch < avctx->channels; ch++) {
|
for (ch = 0; ch < avctx->channels; ch++) {
|
||||||
ADPCMChannelStatus *status = &c->status[ch];
|
ADPCMChannelStatus *status = &c->status[ch];
|
||||||
@ -571,7 +571,7 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
case AV_CODEC_ID_ADPCM_SWF:
|
case AV_CODEC_ID_ADPCM_SWF:
|
||||||
{
|
{
|
||||||
PutBitContext pb;
|
PutBitContext pb;
|
||||||
init_put_bits(&pb, dst, pkt_size * 8);
|
init_put_bits(&pb, dst, pkt_size);
|
||||||
|
|
||||||
n = frame->nb_samples - 1;
|
n = frame->nb_samples - 1;
|
||||||
|
|
||||||
|
@ -438,8 +438,8 @@ static av_cold int aic_decode_init(AVCodecContext *avctx)
|
|||||||
ctx->mb_width = FFALIGN(avctx->width, 16) >> 4;
|
ctx->mb_width = FFALIGN(avctx->width, 16) >> 4;
|
||||||
ctx->mb_height = FFALIGN(avctx->height, 16) >> 4;
|
ctx->mb_height = FFALIGN(avctx->height, 16) >> 4;
|
||||||
|
|
||||||
ctx->num_x_slices = 16;
|
ctx->num_x_slices = (ctx->mb_width + 15) >> 4;
|
||||||
ctx->slice_width = ctx->mb_width / 16;
|
ctx->slice_width = 16;
|
||||||
for (i = 1; i < 32; i++) {
|
for (i = 1; i < 32; i++) {
|
||||||
if (!(ctx->mb_width % i) && (ctx->mb_width / i < 32)) {
|
if (!(ctx->mb_width % i) && (ctx->mb_width / i < 32)) {
|
||||||
ctx->slice_width = ctx->mb_width / i;
|
ctx->slice_width = ctx->mb_width / i;
|
||||||
|
@ -23,9 +23,10 @@
|
|||||||
#include "libavutil/arm/asm.S"
|
#include "libavutil/arm/asm.S"
|
||||||
|
|
||||||
function ff_prefetch_arm, export=1
|
function ff_prefetch_arm, export=1
|
||||||
|
1:
|
||||||
subs r2, r2, #1
|
subs r2, r2, #1
|
||||||
pld [r0]
|
pld [r0]
|
||||||
add r0, r0, r1
|
add r0, r0, r1
|
||||||
bne X(ff_prefetch_arm)
|
bne 1b
|
||||||
bx lr
|
bx lr
|
||||||
endfunc
|
endfunc
|
||||||
|
@ -2360,6 +2360,10 @@ FF_ENABLE_DEPRECATION_WARNINGS
|
|||||||
#else
|
#else
|
||||||
if (s->xch_present && !s->xch_disable) {
|
if (s->xch_present && !s->xch_disable) {
|
||||||
#endif
|
#endif
|
||||||
|
if (avctx->channel_layout & AV_CH_BACK_CENTER) {
|
||||||
|
avpriv_request_sample(avctx, "XCh with Back center channel");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
avctx->channel_layout |= AV_CH_BACK_CENTER;
|
avctx->channel_layout |= AV_CH_BACK_CENTER;
|
||||||
if (s->lfe) {
|
if (s->lfe) {
|
||||||
avctx->channel_layout |= AV_CH_LOW_FREQUENCY;
|
avctx->channel_layout |= AV_CH_LOW_FREQUENCY;
|
||||||
|
@ -363,7 +363,7 @@ static int dnxhd_decode_macroblock(DNXHDContext *ctx, AVFrame *frame,
|
|||||||
dest_u = frame->data[1] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1 + ctx->is_444));
|
dest_u = frame->data[1] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1 + ctx->is_444));
|
||||||
dest_v = frame->data[2] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1 + ctx->is_444));
|
dest_v = frame->data[2] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1 + ctx->is_444));
|
||||||
|
|
||||||
if (ctx->cur_field) {
|
if (frame->interlaced_frame && ctx->cur_field) {
|
||||||
dest_y += frame->linesize[0];
|
dest_y += frame->linesize[0];
|
||||||
dest_u += frame->linesize[1];
|
dest_u += frame->linesize[1];
|
||||||
dest_v += frame->linesize[2];
|
dest_v += frame->linesize[2];
|
||||||
|
@ -39,7 +39,7 @@ typedef struct DVDSubContext
|
|||||||
int has_palette;
|
int has_palette;
|
||||||
uint8_t colormap[4];
|
uint8_t colormap[4];
|
||||||
uint8_t alpha[256];
|
uint8_t alpha[256];
|
||||||
uint8_t *buf;
|
uint8_t buf[0x10000];
|
||||||
int buf_size;
|
int buf_size;
|
||||||
int forced_subs_only;
|
int forced_subs_only;
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
@ -108,6 +108,12 @@ static int decode_rle(uint8_t *bitmap, int linesize, int w, int h,
|
|||||||
int x, y, len, color;
|
int x, y, len, color;
|
||||||
uint8_t *d;
|
uint8_t *d;
|
||||||
|
|
||||||
|
if (start >= buf_size)
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
if (w <= 0 || h <= 0)
|
||||||
|
return -1;
|
||||||
|
|
||||||
bit_len = (buf_size - start) * 8;
|
bit_len = (buf_size - start) * 8;
|
||||||
init_get_bits(&gb, buf + start, bit_len);
|
init_get_bits(&gb, buf + start, bit_len);
|
||||||
|
|
||||||
@ -359,10 +365,12 @@ static int decode_dvd_subtitles(DVDSubContext *ctx, AVSubtitle *sub_header,
|
|||||||
sub_header->rects[0] = av_mallocz(sizeof(AVSubtitleRect));
|
sub_header->rects[0] = av_mallocz(sizeof(AVSubtitleRect));
|
||||||
sub_header->num_rects = 1;
|
sub_header->num_rects = 1;
|
||||||
sub_header->rects[0]->pict.data[0] = bitmap;
|
sub_header->rects[0]->pict.data[0] = bitmap;
|
||||||
decode_rle(bitmap, w * 2, w, (h + 1) / 2,
|
if (decode_rle(bitmap, w * 2, w, (h + 1) / 2,
|
||||||
buf, offset1, buf_size, is_8bit);
|
buf, offset1, buf_size, is_8bit) < 0)
|
||||||
decode_rle(bitmap + w, w * 2, w, h / 2,
|
goto fail;
|
||||||
buf, offset2, buf_size, is_8bit);
|
if (decode_rle(bitmap + w, w * 2, w, h / 2,
|
||||||
|
buf, offset2, buf_size, is_8bit) < 0)
|
||||||
|
goto fail;
|
||||||
sub_header->rects[0]->pict.data[1] = av_mallocz(AVPALETTE_SIZE);
|
sub_header->rects[0]->pict.data[1] = av_mallocz(AVPALETTE_SIZE);
|
||||||
if (is_8bit) {
|
if (is_8bit) {
|
||||||
if (!yuv_palette)
|
if (!yuv_palette)
|
||||||
@ -501,15 +509,11 @@ static int append_to_cached_buf(AVCodecContext *avctx,
|
|||||||
{
|
{
|
||||||
DVDSubContext *ctx = avctx->priv_data;
|
DVDSubContext *ctx = avctx->priv_data;
|
||||||
|
|
||||||
if (ctx->buf_size > 0xffff - buf_size) {
|
if (ctx->buf_size >= sizeof(ctx->buf) - buf_size) {
|
||||||
av_log(avctx, AV_LOG_WARNING, "Attempt to reconstruct "
|
av_log(avctx, AV_LOG_WARNING, "Attempt to reconstruct "
|
||||||
"too large SPU packets aborted.\n");
|
"too large SPU packets aborted.\n");
|
||||||
av_freep(&ctx->buf);
|
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
ctx->buf = av_realloc(ctx->buf, ctx->buf_size + buf_size);
|
|
||||||
if (!ctx->buf)
|
|
||||||
return AVERROR(ENOMEM);
|
|
||||||
memcpy(ctx->buf + ctx->buf_size, buf, buf_size);
|
memcpy(ctx->buf + ctx->buf_size, buf, buf_size);
|
||||||
ctx->buf_size += buf_size;
|
ctx->buf_size += buf_size;
|
||||||
return 0;
|
return 0;
|
||||||
@ -525,7 +529,7 @@ static int dvdsub_decode(AVCodecContext *avctx,
|
|||||||
AVSubtitle *sub = data;
|
AVSubtitle *sub = data;
|
||||||
int is_menu;
|
int is_menu;
|
||||||
|
|
||||||
if (ctx->buf) {
|
if (ctx->buf_size) {
|
||||||
int ret = append_to_cached_buf(avctx, buf, buf_size);
|
int ret = append_to_cached_buf(avctx, buf, buf_size);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
*data_size = 0;
|
*data_size = 0;
|
||||||
@ -567,7 +571,6 @@ static int dvdsub_decode(AVCodecContext *avctx,
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
av_freep(&ctx->buf);
|
|
||||||
ctx->buf_size = 0;
|
ctx->buf_size = 0;
|
||||||
*data_size = 1;
|
*data_size = 1;
|
||||||
return buf_size;
|
return buf_size;
|
||||||
@ -711,7 +714,6 @@ static av_cold int dvdsub_init(AVCodecContext *avctx)
|
|||||||
static av_cold int dvdsub_close(AVCodecContext *avctx)
|
static av_cold int dvdsub_close(AVCodecContext *avctx)
|
||||||
{
|
{
|
||||||
DVDSubContext *ctx = avctx->priv_data;
|
DVDSubContext *ctx = avctx->priv_data;
|
||||||
av_freep(&ctx->buf);
|
|
||||||
ctx->buf_size = 0;
|
ctx->buf_size = 0;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -65,7 +65,7 @@ static int dxtory_decode_v1_410(AVCodecContext *avctx, AVFrame *pic,
|
|||||||
uint8_t *Y1, *Y2, *Y3, *Y4, *U, *V;
|
uint8_t *Y1, *Y2, *Y3, *Y4, *U, *V;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (src_size < avctx->width * avctx->height * 9L / 8) {
|
if (src_size < avctx->width * avctx->height * 9LL / 8) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "packet too small\n");
|
av_log(avctx, AV_LOG_ERROR, "packet too small\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
@ -108,7 +108,7 @@ static int dxtory_decode_v1_420(AVCodecContext *avctx, AVFrame *pic,
|
|||||||
uint8_t *Y1, *Y2, *U, *V;
|
uint8_t *Y1, *Y2, *U, *V;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (src_size < avctx->width * avctx->height * 3L / 2) {
|
if (src_size < avctx->width * avctx->height * 3LL / 2) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "packet too small\n");
|
av_log(avctx, AV_LOG_ERROR, "packet too small\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
@ -145,7 +145,7 @@ static int dxtory_decode_v1_444(AVCodecContext *avctx, AVFrame *pic,
|
|||||||
uint8_t *Y, *U, *V;
|
uint8_t *Y, *U, *V;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (src_size < avctx->width * avctx->height * 3L) {
|
if (src_size < avctx->width * avctx->height * 3LL) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "packet too small\n");
|
av_log(avctx, AV_LOG_ERROR, "packet too small\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
@ -63,7 +63,7 @@ typedef enum {
|
|||||||
|
|
||||||
#define EAC3_SR_CODE_REDUCED 3
|
#define EAC3_SR_CODE_REDUCED 3
|
||||||
|
|
||||||
void ff_eac3_apply_spectral_extension(AC3DecodeContext *s)
|
static void ff_eac3_apply_spectral_extension(AC3DecodeContext *s)
|
||||||
{
|
{
|
||||||
int bin, bnd, ch, i;
|
int bin, bnd, ch, i;
|
||||||
uint8_t wrapflag[SPX_MAX_BANDS]={1,0,}, num_copy_sections, copy_sizes[SPX_MAX_BANDS];
|
uint8_t wrapflag[SPX_MAX_BANDS]={1,0,}, num_copy_sections, copy_sizes[SPX_MAX_BANDS];
|
||||||
@ -101,7 +101,7 @@ void ff_eac3_apply_spectral_extension(AC3DecodeContext *s)
|
|||||||
for (i = 0; i < num_copy_sections; i++) {
|
for (i = 0; i < num_copy_sections; i++) {
|
||||||
memcpy(&s->transform_coeffs[ch][bin],
|
memcpy(&s->transform_coeffs[ch][bin],
|
||||||
&s->transform_coeffs[ch][s->spx_dst_start_freq],
|
&s->transform_coeffs[ch][s->spx_dst_start_freq],
|
||||||
copy_sizes[i]*sizeof(float));
|
copy_sizes[i]*sizeof(INTFLOAT));
|
||||||
bin += copy_sizes[i];
|
bin += copy_sizes[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -124,7 +124,7 @@ void ff_eac3_apply_spectral_extension(AC3DecodeContext *s)
|
|||||||
bin = s->spx_src_start_freq - 2;
|
bin = s->spx_src_start_freq - 2;
|
||||||
for (bnd = 0; bnd < s->num_spx_bands; bnd++) {
|
for (bnd = 0; bnd < s->num_spx_bands; bnd++) {
|
||||||
if (wrapflag[bnd]) {
|
if (wrapflag[bnd]) {
|
||||||
float *coeffs = &s->transform_coeffs[ch][bin];
|
INTFLOAT *coeffs = &s->transform_coeffs[ch][bin];
|
||||||
coeffs[0] *= atten_tab[0];
|
coeffs[0] *= atten_tab[0];
|
||||||
coeffs[1] *= atten_tab[1];
|
coeffs[1] *= atten_tab[1];
|
||||||
coeffs[2] *= atten_tab[2];
|
coeffs[2] *= atten_tab[2];
|
||||||
@ -142,6 +142,11 @@ void ff_eac3_apply_spectral_extension(AC3DecodeContext *s)
|
|||||||
for (bnd = 0; bnd < s->num_spx_bands; bnd++) {
|
for (bnd = 0; bnd < s->num_spx_bands; bnd++) {
|
||||||
float nscale = s->spx_noise_blend[ch][bnd] * rms_energy[bnd] * (1.0f / INT32_MIN);
|
float nscale = s->spx_noise_blend[ch][bnd] * rms_energy[bnd] * (1.0f / INT32_MIN);
|
||||||
float sscale = s->spx_signal_blend[ch][bnd];
|
float sscale = s->spx_signal_blend[ch][bnd];
|
||||||
|
#if USE_FIXED
|
||||||
|
// spx_noise_blend and spx_signal_blend are both FP.23
|
||||||
|
nscale *= 1.0 / (1<<23);
|
||||||
|
sscale *= 1.0 / (1<<23);
|
||||||
|
#endif
|
||||||
for (i = 0; i < s->spx_band_sizes[bnd]; i++) {
|
for (i = 0; i < s->spx_band_sizes[bnd]; i++) {
|
||||||
float noise = nscale * (int32_t)av_lfg_get(&s->dith_state);
|
float noise = nscale * (int32_t)av_lfg_get(&s->dith_state);
|
||||||
s->transform_coeffs[ch][bin] *= sscale;
|
s->transform_coeffs[ch][bin] *= sscale;
|
||||||
@ -195,7 +200,7 @@ static void idct6(int pre_mant[6])
|
|||||||
pre_mant[5] = even0 - odd0;
|
pre_mant[5] = even0 - odd0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ff_eac3_decode_transform_coeffs_aht_ch(AC3DecodeContext *s, int ch)
|
static void ff_eac3_decode_transform_coeffs_aht_ch(AC3DecodeContext *s, int ch)
|
||||||
{
|
{
|
||||||
int bin, blk, gs;
|
int bin, blk, gs;
|
||||||
int end_bap, gaq_mode;
|
int end_bap, gaq_mode;
|
||||||
@ -288,7 +293,7 @@ void ff_eac3_decode_transform_coeffs_aht_ch(AC3DecodeContext *s, int ch)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int ff_eac3_parse_header(AC3DecodeContext *s)
|
static int ff_eac3_parse_header(AC3DecodeContext *s)
|
||||||
{
|
{
|
||||||
int i, blk, ch;
|
int i, blk, ch;
|
||||||
int ac3_exponent_strategy, parse_aht_info, parse_spx_atten_data;
|
int ac3_exponent_strategy, parse_aht_info, parse_spx_atten_data;
|
||||||
|
@ -151,6 +151,11 @@ static inline int decode_block_intra(MadContext *s, int16_t * block)
|
|||||||
break;
|
break;
|
||||||
} else if (level != 0) {
|
} else if (level != 0) {
|
||||||
i += run;
|
i += run;
|
||||||
|
if (i > 63) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR,
|
||||||
|
"ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
j = scantable[i];
|
j = scantable[i];
|
||||||
level = (level*quant_matrix[j]) >> 4;
|
level = (level*quant_matrix[j]) >> 4;
|
||||||
level = (level-1)|1;
|
level = (level-1)|1;
|
||||||
@ -165,6 +170,11 @@ static inline int decode_block_intra(MadContext *s, int16_t * block)
|
|||||||
run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6);
|
run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6);
|
||||||
|
|
||||||
i += run;
|
i += run;
|
||||||
|
if (i > 63) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR,
|
||||||
|
"ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
j = scantable[i];
|
j = scantable[i];
|
||||||
if (level < 0) {
|
if (level < 0) {
|
||||||
level = -level;
|
level = -level;
|
||||||
@ -176,10 +186,6 @@ static inline int decode_block_intra(MadContext *s, int16_t * block)
|
|||||||
level = (level-1)|1;
|
level = (level-1)|1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (i > 63) {
|
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
block[j] = level;
|
block[j] = level;
|
||||||
}
|
}
|
||||||
|
@ -251,7 +251,7 @@ static void put_line(uint8_t *dst, int size, int width, const int *runs)
|
|||||||
PutBitContext pb;
|
PutBitContext pb;
|
||||||
int run, mode = ~0, pix_left = width, run_idx = 0;
|
int run, mode = ~0, pix_left = width, run_idx = 0;
|
||||||
|
|
||||||
init_put_bits(&pb, dst, size * 8);
|
init_put_bits(&pb, dst, size);
|
||||||
while (pix_left > 0) {
|
while (pix_left > 0) {
|
||||||
run = runs[run_idx++];
|
run = runs[run_idx++];
|
||||||
mode = ~mode;
|
mode = ~mode;
|
||||||
|
@ -697,7 +697,7 @@ static int flac_parse(AVCodecParserContext *s, AVCodecContext *avctx,
|
|||||||
handle_error:
|
handle_error:
|
||||||
*poutbuf = NULL;
|
*poutbuf = NULL;
|
||||||
*poutbuf_size = 0;
|
*poutbuf_size = 0;
|
||||||
return read_end - buf;
|
return buf_size ? read_end - buf : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static av_cold int flac_parse_init(AVCodecParserContext *c)
|
static av_cold int flac_parse_init(AVCodecParserContext *c)
|
||||||
|
@ -287,7 +287,7 @@ static int write_header(FlashSV2Context * s, uint8_t * buf, int buf_size)
|
|||||||
if (buf_size < 5)
|
if (buf_size < 5)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
init_put_bits(&pb, buf, buf_size * 8);
|
init_put_bits(&pb, buf, buf_size);
|
||||||
|
|
||||||
put_bits(&pb, 4, (s->block_width >> 4) - 1);
|
put_bits(&pb, 4, (s->block_width >> 4) - 1);
|
||||||
put_bits(&pb, 12, s->image_width);
|
put_bits(&pb, 12, s->image_width);
|
||||||
|
@ -151,7 +151,7 @@ static int encode_bitstream(FlashSVContext *s, const AVFrame *p, uint8_t *buf,
|
|||||||
int buf_pos, res;
|
int buf_pos, res;
|
||||||
int pred_blocks = 0;
|
int pred_blocks = 0;
|
||||||
|
|
||||||
init_put_bits(&pb, buf, buf_size * 8);
|
init_put_bits(&pb, buf, buf_size);
|
||||||
|
|
||||||
put_bits(&pb, 4, block_width / 16 - 1);
|
put_bits(&pb, 4, block_width / 16 - 1);
|
||||||
put_bits(&pb, 12, s->image_width);
|
put_bits(&pb, 12, s->image_width);
|
||||||
|
@ -105,7 +105,7 @@ static int gif_image_write_image(AVCodecContext *avctx,
|
|||||||
/* skip common columns */
|
/* skip common columns */
|
||||||
while (x_start < x_end) {
|
while (x_start < x_end) {
|
||||||
int same_column = 1;
|
int same_column = 1;
|
||||||
for (y = y_start; y < y_end; y++) {
|
for (y = y_start; y <= y_end; y++) {
|
||||||
if (ref[y*ref_linesize + x_start] != buf[y*linesize + x_start]) {
|
if (ref[y*ref_linesize + x_start] != buf[y*linesize + x_start]) {
|
||||||
same_column = 0;
|
same_column = 0;
|
||||||
break;
|
break;
|
||||||
@ -117,7 +117,7 @@ static int gif_image_write_image(AVCodecContext *avctx,
|
|||||||
}
|
}
|
||||||
while (x_end > x_start) {
|
while (x_end > x_start) {
|
||||||
int same_column = 1;
|
int same_column = 1;
|
||||||
for (y = y_start; y < y_end; y++) {
|
for (y = y_start; y <= y_end; y++) {
|
||||||
if (ref[y*ref_linesize + x_end] != buf[y*linesize + x_end]) {
|
if (ref[y*ref_linesize + x_end] != buf[y*linesize + x_end]) {
|
||||||
same_column = 0;
|
same_column = 0;
|
||||||
break;
|
break;
|
||||||
|
@ -391,6 +391,7 @@ void ff_h264_free_tables(H264Context *h, int free_rbsp)
|
|||||||
if (free_rbsp && h->DPB) {
|
if (free_rbsp && h->DPB) {
|
||||||
for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
|
for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
|
||||||
ff_h264_unref_picture(h, &h->DPB[i]);
|
ff_h264_unref_picture(h, &h->DPB[i]);
|
||||||
|
memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
|
||||||
av_freep(&h->DPB);
|
av_freep(&h->DPB);
|
||||||
} else if (h->DPB) {
|
} else if (h->DPB) {
|
||||||
for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
|
for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
|
||||||
@ -726,6 +727,7 @@ static int decode_init_thread_copy(AVCodecContext *avctx)
|
|||||||
memset(h->sps_buffers, 0, sizeof(h->sps_buffers));
|
memset(h->sps_buffers, 0, sizeof(h->sps_buffers));
|
||||||
memset(h->pps_buffers, 0, sizeof(h->pps_buffers));
|
memset(h->pps_buffers, 0, sizeof(h->pps_buffers));
|
||||||
|
|
||||||
|
h->avctx = avctx;
|
||||||
h->rbsp_buffer[0] = NULL;
|
h->rbsp_buffer[0] = NULL;
|
||||||
h->rbsp_buffer[1] = NULL;
|
h->rbsp_buffer[1] = NULL;
|
||||||
h->rbsp_buffer_size[0] = 0;
|
h->rbsp_buffer_size[0] = 0;
|
||||||
@ -990,6 +992,16 @@ int ff_pred_weight_table(H264Context *h)
|
|||||||
h->luma_log2_weight_denom = get_ue_golomb(&h->gb);
|
h->luma_log2_weight_denom = get_ue_golomb(&h->gb);
|
||||||
if (h->sps.chroma_format_idc)
|
if (h->sps.chroma_format_idc)
|
||||||
h->chroma_log2_weight_denom = get_ue_golomb(&h->gb);
|
h->chroma_log2_weight_denom = get_ue_golomb(&h->gb);
|
||||||
|
|
||||||
|
if (h->luma_log2_weight_denom > 7U) {
|
||||||
|
av_log(h->avctx, AV_LOG_ERROR, "luma_log2_weight_denom %d is out of range\n", h->luma_log2_weight_denom);
|
||||||
|
h->luma_log2_weight_denom = 0;
|
||||||
|
}
|
||||||
|
if (h->chroma_log2_weight_denom > 7U) {
|
||||||
|
av_log(h->avctx, AV_LOG_ERROR, "chroma_log2_weight_denom %d is out of range\n", h->chroma_log2_weight_denom);
|
||||||
|
h->chroma_log2_weight_denom = 0;
|
||||||
|
}
|
||||||
|
|
||||||
luma_def = 1 << h->luma_log2_weight_denom;
|
luma_def = 1 << h->luma_log2_weight_denom;
|
||||||
chroma_def = 1 << h->chroma_log2_weight_denom;
|
chroma_def = 1 << h->chroma_log2_weight_denom;
|
||||||
|
|
||||||
@ -1504,8 +1516,8 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size,
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
again:
|
again:
|
||||||
if ( !(avctx->active_thread_type & FF_THREAD_FRAME)
|
if ( (!(avctx->active_thread_type & FF_THREAD_FRAME) || nals_needed >= nal_index)
|
||||||
|| nals_needed >= nal_index)
|
&& !h->current_slice)
|
||||||
h->au_pps_id = -1;
|
h->au_pps_id = -1;
|
||||||
/* Ignore per frame NAL unit type during extradata
|
/* Ignore per frame NAL unit type during extradata
|
||||||
* parsing. Decoding slices is not possible in codec init
|
* parsing. Decoding slices is not possible in codec init
|
||||||
|
@ -338,6 +338,7 @@ typedef struct H264Picture {
|
|||||||
* H264Context
|
* H264Context
|
||||||
*/
|
*/
|
||||||
typedef struct H264Context {
|
typedef struct H264Context {
|
||||||
|
AVClass *av_class;
|
||||||
AVCodecContext *avctx;
|
AVCodecContext *avctx;
|
||||||
MECmpContext mecc;
|
MECmpContext mecc;
|
||||||
VideoDSPContext vdsp;
|
VideoDSPContext vdsp;
|
||||||
|
@ -1282,7 +1282,7 @@ void ff_h264_init_cabac_states(H264Context *h) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int decode_cabac_field_decoding_flag(H264Context *h) {
|
static int decode_cabac_field_decoding_flag(H264Context *h) {
|
||||||
const long mbb_xy = h->mb_xy - 2L*h->mb_stride;
|
const int mbb_xy = h->mb_xy - 2*h->mb_stride;
|
||||||
|
|
||||||
unsigned long ctx = 0;
|
unsigned long ctx = 0;
|
||||||
|
|
||||||
|
@ -371,7 +371,8 @@ int ff_h264_decode_seq_parameter_set(H264Context *h)
|
|||||||
"Different chroma and luma bit depth");
|
"Different chroma and luma bit depth");
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
if (sps->bit_depth_luma > 14U || sps->bit_depth_chroma > 14U) {
|
if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 14 ||
|
||||||
|
sps->bit_depth_chroma < 8 || sps->bit_depth_chroma > 14) {
|
||||||
av_log(h->avctx, AV_LOG_ERROR, "illegal bit depth value (%d, %d)\n",
|
av_log(h->avctx, AV_LOG_ERROR, "illegal bit depth value (%d, %d)\n",
|
||||||
sps->bit_depth_luma, sps->bit_depth_chroma);
|
sps->bit_depth_luma, sps->bit_depth_chroma);
|
||||||
goto fail;
|
goto fail;
|
||||||
|
@ -1305,6 +1305,9 @@ int ff_h264_decode_slice_header(H264Context *h, H264Context *h0)
|
|||||||
int must_reinit;
|
int must_reinit;
|
||||||
int needs_reinit = 0;
|
int needs_reinit = 0;
|
||||||
int field_pic_flag, bottom_field_flag;
|
int field_pic_flag, bottom_field_flag;
|
||||||
|
int first_slice = h == h0 && !h0->current_slice;
|
||||||
|
int frame_num, picture_structure, droppable;
|
||||||
|
PPS *pps;
|
||||||
|
|
||||||
h->qpel_put = h->h264qpel.put_h264_qpel_pixels_tab;
|
h->qpel_put = h->h264qpel.put_h264_qpel_pixels_tab;
|
||||||
h->qpel_avg = h->h264qpel.avg_h264_qpel_pixels_tab;
|
h->qpel_avg = h->h264qpel.avg_h264_qpel_pixels_tab;
|
||||||
@ -1378,18 +1381,27 @@ int ff_h264_decode_slice_header(H264Context *h, H264Context *h0)
|
|||||||
h0->au_pps_id, pps_id);
|
h0->au_pps_id, pps_id);
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
h->pps = *h0->pps_buffers[pps_id];
|
|
||||||
|
|
||||||
if (!h0->sps_buffers[h->pps.sps_id]) {
|
pps = h0->pps_buffers[pps_id];
|
||||||
|
|
||||||
|
if (!h0->sps_buffers[pps->sps_id]) {
|
||||||
av_log(h->avctx, AV_LOG_ERROR,
|
av_log(h->avctx, AV_LOG_ERROR,
|
||||||
"non-existing SPS %u referenced\n",
|
"non-existing SPS %u referenced\n",
|
||||||
h->pps.sps_id);
|
h->pps.sps_id);
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
if (first_slice)
|
||||||
|
h->pps = *h0->pps_buffers[pps_id];
|
||||||
|
|
||||||
if (h->pps.sps_id != h->sps.sps_id ||
|
if (pps->sps_id != h->sps.sps_id ||
|
||||||
h->pps.sps_id != h->current_sps_id ||
|
pps->sps_id != h->current_sps_id ||
|
||||||
h0->sps_buffers[h->pps.sps_id]->new) {
|
h0->sps_buffers[pps->sps_id]->new) {
|
||||||
|
|
||||||
|
if (!first_slice) {
|
||||||
|
av_log(h->avctx, AV_LOG_ERROR,
|
||||||
|
"SPS changed in the middle of the frame\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
h->sps = *h0->sps_buffers[h->pps.sps_id];
|
h->sps = *h0->sps_buffers[h->pps.sps_id];
|
||||||
|
|
||||||
@ -1419,13 +1431,15 @@ int ff_h264_decode_slice_header(H264Context *h, H264Context *h0)
|
|||||||
|| 16*h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) != h->avctx->coded_height
|
|| 16*h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) != h->avctx->coded_height
|
||||||
|| h->avctx->bits_per_raw_sample != h->sps.bit_depth_luma
|
|| h->avctx->bits_per_raw_sample != h->sps.bit_depth_luma
|
||||||
|| h->cur_chroma_format_idc != h->sps.chroma_format_idc
|
|| h->cur_chroma_format_idc != h->sps.chroma_format_idc
|
||||||
|| av_cmp_q(h->sps.sar, h->avctx->sample_aspect_ratio)
|
|
||||||
|| h->mb_width != h->sps.mb_width
|
|| h->mb_width != h->sps.mb_width
|
||||||
|| h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag)
|
|| h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag)
|
||||||
));
|
));
|
||||||
if (non_j_pixfmt(h0->avctx->pix_fmt) != non_j_pixfmt(get_pixel_format(h0, 0)))
|
if (non_j_pixfmt(h0->avctx->pix_fmt) != non_j_pixfmt(get_pixel_format(h0, 0)))
|
||||||
must_reinit = 1;
|
must_reinit = 1;
|
||||||
|
|
||||||
|
if (first_slice && av_cmp_q(h->sps.sar, h->avctx->sample_aspect_ratio))
|
||||||
|
must_reinit = 1;
|
||||||
|
|
||||||
h->mb_width = h->sps.mb_width;
|
h->mb_width = h->sps.mb_width;
|
||||||
h->mb_height = h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag);
|
h->mb_height = h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag);
|
||||||
h->mb_num = h->mb_width * h->mb_height;
|
h->mb_num = h->mb_width * h->mb_height;
|
||||||
@ -1466,6 +1480,8 @@ int ff_h264_decode_slice_header(H264Context *h, H264Context *h0)
|
|||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
av_assert1(first_slice);
|
||||||
|
|
||||||
ff_h264_flush_change(h);
|
ff_h264_flush_change(h);
|
||||||
|
|
||||||
if ((ret = get_pixel_format(h, 1)) < 0)
|
if ((ret = get_pixel_format(h, 1)) < 0)
|
||||||
@ -1499,44 +1515,48 @@ int ff_h264_decode_slice_header(H264Context *h, H264Context *h0)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (h == h0 && h->dequant_coeff_pps != pps_id) {
|
if (first_slice && h->dequant_coeff_pps != pps_id) {
|
||||||
h->dequant_coeff_pps = pps_id;
|
h->dequant_coeff_pps = pps_id;
|
||||||
h264_init_dequant_tables(h);
|
h264_init_dequant_tables(h);
|
||||||
}
|
}
|
||||||
|
|
||||||
h->frame_num = get_bits(&h->gb, h->sps.log2_max_frame_num);
|
frame_num = get_bits(&h->gb, h->sps.log2_max_frame_num);
|
||||||
|
if (!first_slice) {
|
||||||
|
if (h0->frame_num != frame_num) {
|
||||||
|
av_log(h->avctx, AV_LOG_ERROR, "Frame num change from %d to %d\n",
|
||||||
|
h0->frame_num, frame_num);
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
h->mb_mbaff = 0;
|
h->mb_mbaff = 0;
|
||||||
h->mb_aff_frame = 0;
|
h->mb_aff_frame = 0;
|
||||||
last_pic_structure = h0->picture_structure;
|
last_pic_structure = h0->picture_structure;
|
||||||
last_pic_droppable = h0->droppable;
|
last_pic_droppable = h0->droppable;
|
||||||
h->droppable = h->nal_ref_idc == 0;
|
droppable = h->nal_ref_idc == 0;
|
||||||
if (h->sps.frame_mbs_only_flag) {
|
if (h->sps.frame_mbs_only_flag) {
|
||||||
h->picture_structure = PICT_FRAME;
|
picture_structure = PICT_FRAME;
|
||||||
} else {
|
} else {
|
||||||
if (!h->sps.direct_8x8_inference_flag && slice_type == AV_PICTURE_TYPE_B) {
|
if (!h->sps.direct_8x8_inference_flag && slice_type == AV_PICTURE_TYPE_B) {
|
||||||
av_log(h->avctx, AV_LOG_ERROR, "This stream was generated by a broken encoder, invalid 8x8 inference\n");
|
av_log(h->avctx, AV_LOG_ERROR, "This stream was generated by a broken encoder, invalid 8x8 inference\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
field_pic_flag = get_bits1(&h->gb);
|
field_pic_flag = get_bits1(&h->gb);
|
||||||
|
|
||||||
if (field_pic_flag) {
|
if (field_pic_flag) {
|
||||||
bottom_field_flag = get_bits1(&h->gb);
|
bottom_field_flag = get_bits1(&h->gb);
|
||||||
h->picture_structure = PICT_TOP_FIELD + bottom_field_flag;
|
picture_structure = PICT_TOP_FIELD + bottom_field_flag;
|
||||||
} else {
|
} else {
|
||||||
h->picture_structure = PICT_FRAME;
|
picture_structure = PICT_FRAME;
|
||||||
h->mb_aff_frame = h->sps.mb_aff;
|
h->mb_aff_frame = h->sps.mb_aff;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
h->mb_field_decoding_flag = h->picture_structure != PICT_FRAME;
|
if (h0->current_slice) {
|
||||||
|
if (last_pic_structure != picture_structure ||
|
||||||
if (h0->current_slice != 0) {
|
last_pic_droppable != droppable) {
|
||||||
if (last_pic_structure != h->picture_structure ||
|
|
||||||
last_pic_droppable != h->droppable) {
|
|
||||||
av_log(h->avctx, AV_LOG_ERROR,
|
av_log(h->avctx, AV_LOG_ERROR,
|
||||||
"Changing field mode (%d -> %d) between slices is not allowed\n",
|
"Changing field mode (%d -> %d) between slices is not allowed\n",
|
||||||
last_pic_structure, h->picture_structure);
|
last_pic_structure, h->picture_structure);
|
||||||
h->picture_structure = last_pic_structure;
|
|
||||||
h->droppable = last_pic_droppable;
|
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
} else if (!h0->cur_pic_ptr) {
|
} else if (!h0->cur_pic_ptr) {
|
||||||
av_log(h->avctx, AV_LOG_ERROR,
|
av_log(h->avctx, AV_LOG_ERROR,
|
||||||
@ -1544,7 +1564,14 @@ int ff_h264_decode_slice_header(H264Context *h, H264Context *h0)
|
|||||||
h0->current_slice + 1);
|
h0->current_slice + 1);
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
} else {
|
}
|
||||||
|
|
||||||
|
h->picture_structure = picture_structure;
|
||||||
|
h->droppable = droppable;
|
||||||
|
h->frame_num = frame_num;
|
||||||
|
h->mb_field_decoding_flag = picture_structure != PICT_FRAME;
|
||||||
|
|
||||||
|
if (h0->current_slice == 0) {
|
||||||
/* Shorten frame num gaps so we don't have to allocate reference
|
/* Shorten frame num gaps so we don't have to allocate reference
|
||||||
* frames just to throw them away */
|
* frames just to throw them away */
|
||||||
if (h->frame_num != h->prev_frame_num) {
|
if (h->frame_num != h->prev_frame_num) {
|
||||||
|
@ -108,7 +108,7 @@ static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps)
|
|||||||
if (!s->tab_ipm || !s->cbf_luma || !s->is_pcm)
|
if (!s->tab_ipm || !s->cbf_luma || !s->is_pcm)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
s->filter_slice_edges = av_malloc(ctb_count);
|
s->filter_slice_edges = av_mallocz(ctb_count);
|
||||||
s->tab_slice_address = av_malloc_array(pic_size_in_ctb,
|
s->tab_slice_address = av_malloc_array(pic_size_in_ctb,
|
||||||
sizeof(*s->tab_slice_address));
|
sizeof(*s->tab_slice_address));
|
||||||
s->qp_y_tab = av_malloc_array(pic_size_in_ctb,
|
s->qp_y_tab = av_malloc_array(pic_size_in_ctb,
|
||||||
@ -144,7 +144,7 @@ static void pred_weight_table(HEVCContext *s, GetBitContext *gb)
|
|||||||
uint8_t luma_weight_l1_flag[16];
|
uint8_t luma_weight_l1_flag[16];
|
||||||
uint8_t chroma_weight_l1_flag[16];
|
uint8_t chroma_weight_l1_flag[16];
|
||||||
|
|
||||||
s->sh.luma_log2_weight_denom = get_ue_golomb_long(gb);
|
s->sh.luma_log2_weight_denom = av_clip_c(get_ue_golomb_long(gb), 0, 7);
|
||||||
if (s->sps->chroma_format_idc != 0) {
|
if (s->sps->chroma_format_idc != 0) {
|
||||||
int delta = get_se_golomb(gb);
|
int delta = get_se_golomb(gb);
|
||||||
s->sh.chroma_log2_weight_denom = av_clip(s->sh.luma_log2_weight_denom + delta, 0, 7);
|
s->sh.chroma_log2_weight_denom = av_clip(s->sh.luma_log2_weight_denom + delta, 0, 7);
|
||||||
@ -2870,17 +2870,30 @@ static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length)
|
|||||||
|
|
||||||
if (s->nals_allocated < s->nb_nals + 1) {
|
if (s->nals_allocated < s->nb_nals + 1) {
|
||||||
int new_size = s->nals_allocated + 1;
|
int new_size = s->nals_allocated + 1;
|
||||||
HEVCNAL *tmp = av_realloc_array(s->nals, new_size, sizeof(*tmp));
|
void *tmp = av_realloc_array(s->nals, new_size, sizeof(*s->nals));
|
||||||
|
ret = AVERROR(ENOMEM);
|
||||||
if (!tmp) {
|
if (!tmp) {
|
||||||
ret = AVERROR(ENOMEM);
|
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
s->nals = tmp;
|
s->nals = tmp;
|
||||||
memset(s->nals + s->nals_allocated, 0,
|
memset(s->nals + s->nals_allocated, 0,
|
||||||
(new_size - s->nals_allocated) * sizeof(*tmp));
|
(new_size - s->nals_allocated) * sizeof(*s->nals));
|
||||||
av_reallocp_array(&s->skipped_bytes_nal, new_size, sizeof(*s->skipped_bytes_nal));
|
|
||||||
av_reallocp_array(&s->skipped_bytes_pos_size_nal, new_size, sizeof(*s->skipped_bytes_pos_size_nal));
|
tmp = av_realloc_array(s->skipped_bytes_nal, new_size, sizeof(*s->skipped_bytes_nal));
|
||||||
av_reallocp_array(&s->skipped_bytes_pos_nal, new_size, sizeof(*s->skipped_bytes_pos_nal));
|
if (!tmp)
|
||||||
|
goto fail;
|
||||||
|
s->skipped_bytes_nal = tmp;
|
||||||
|
|
||||||
|
tmp = av_realloc_array(s->skipped_bytes_pos_size_nal, new_size, sizeof(*s->skipped_bytes_pos_size_nal));
|
||||||
|
if (!tmp)
|
||||||
|
goto fail;
|
||||||
|
s->skipped_bytes_pos_size_nal = tmp;
|
||||||
|
|
||||||
|
tmp = av_realloc_array(s->skipped_bytes_pos_nal, new_size, sizeof(*s->skipped_bytes_pos_nal));
|
||||||
|
if (!tmp)
|
||||||
|
goto fail;
|
||||||
|
s->skipped_bytes_pos_nal = tmp;
|
||||||
|
|
||||||
s->skipped_bytes_pos_size_nal[s->nals_allocated] = 1024; // initial buffer size
|
s->skipped_bytes_pos_size_nal[s->nals_allocated] = 1024; // initial buffer size
|
||||||
s->skipped_bytes_pos_nal[s->nals_allocated] = av_malloc_array(s->skipped_bytes_pos_size_nal[s->nals_allocated], sizeof(*s->skipped_bytes_pos));
|
s->skipped_bytes_pos_nal[s->nals_allocated] = av_malloc_array(s->skipped_bytes_pos_size_nal[s->nals_allocated], sizeof(*s->skipped_bytes_pos));
|
||||||
s->nals_allocated = new_size;
|
s->nals_allocated = new_size;
|
||||||
|
@ -298,10 +298,10 @@ typedef struct RefPicListTab {
|
|||||||
} RefPicListTab;
|
} RefPicListTab;
|
||||||
|
|
||||||
typedef struct HEVCWindow {
|
typedef struct HEVCWindow {
|
||||||
int left_offset;
|
unsigned int left_offset;
|
||||||
int right_offset;
|
unsigned int right_offset;
|
||||||
int top_offset;
|
unsigned int top_offset;
|
||||||
int bottom_offset;
|
unsigned int bottom_offset;
|
||||||
} HEVCWindow;
|
} HEVCWindow;
|
||||||
|
|
||||||
typedef struct VUI {
|
typedef struct VUI {
|
||||||
|
@ -895,11 +895,30 @@ int ff_hevc_decode_nal_sps(HEVCContext *s)
|
|||||||
sps->log2_max_trafo_size = log2_diff_max_min_transform_block_size +
|
sps->log2_max_trafo_size = log2_diff_max_min_transform_block_size +
|
||||||
sps->log2_min_tb_size;
|
sps->log2_min_tb_size;
|
||||||
|
|
||||||
if (sps->log2_min_tb_size >= sps->log2_min_cb_size) {
|
if (sps->log2_min_cb_size < 3 || sps->log2_min_cb_size > 30) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "Invalid value %d for log2_min_cb_size", sps->log2_min_cb_size);
|
||||||
|
ret = AVERROR_INVALIDDATA;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (sps->log2_diff_max_min_coding_block_size > 30) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "Invalid value %d for log2_diff_max_min_coding_block_size", sps->log2_diff_max_min_coding_block_size);
|
||||||
|
ret = AVERROR_INVALIDDATA;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (sps->log2_min_tb_size >= sps->log2_min_cb_size || sps->log2_min_tb_size < 2) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid value for log2_min_tb_size");
|
av_log(s->avctx, AV_LOG_ERROR, "Invalid value for log2_min_tb_size");
|
||||||
ret = AVERROR_INVALIDDATA;
|
ret = AVERROR_INVALIDDATA;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (log2_diff_max_min_transform_block_size < 0 || log2_diff_max_min_transform_block_size > 30) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "Invalid value %d for log2_diff_max_min_transform_block_size", log2_diff_max_min_transform_block_size);
|
||||||
|
ret = AVERROR_INVALIDDATA;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
sps->max_transform_hierarchy_depth_inter = get_ue_golomb_long(gb);
|
sps->max_transform_hierarchy_depth_inter = get_ue_golomb_long(gb);
|
||||||
sps->max_transform_hierarchy_depth_intra = get_ue_golomb_long(gb);
|
sps->max_transform_hierarchy_depth_intra = get_ue_golomb_long(gb);
|
||||||
|
|
||||||
@ -1021,7 +1040,8 @@ int ff_hevc_decode_nal_sps(HEVCContext *s)
|
|||||||
(sps->output_window.left_offset + sps->output_window.right_offset);
|
(sps->output_window.left_offset + sps->output_window.right_offset);
|
||||||
sps->output_height = sps->height -
|
sps->output_height = sps->height -
|
||||||
(sps->output_window.top_offset + sps->output_window.bottom_offset);
|
(sps->output_window.top_offset + sps->output_window.bottom_offset);
|
||||||
if (sps->output_width <= 0 || sps->output_height <= 0) {
|
if (sps->width <= sps->output_window.left_offset + (int64_t)sps->output_window.right_offset ||
|
||||||
|
sps->height <= sps->output_window.top_offset + (int64_t)sps->output_window.bottom_offset) {
|
||||||
av_log(s->avctx, AV_LOG_WARNING, "Invalid visible frame dimensions: %dx%d.\n",
|
av_log(s->avctx, AV_LOG_WARNING, "Invalid visible frame dimensions: %dx%d.\n",
|
||||||
sps->output_width, sps->output_height);
|
sps->output_width, sps->output_height);
|
||||||
if (s->avctx->err_recognition & AV_EF_EXPLODE) {
|
if (s->avctx->err_recognition & AV_EF_EXPLODE) {
|
||||||
@ -1255,6 +1275,14 @@ int ff_hevc_decode_nal_pps(HEVCContext *s)
|
|||||||
if (pps->cu_qp_delta_enabled_flag)
|
if (pps->cu_qp_delta_enabled_flag)
|
||||||
pps->diff_cu_qp_delta_depth = get_ue_golomb_long(gb);
|
pps->diff_cu_qp_delta_depth = get_ue_golomb_long(gb);
|
||||||
|
|
||||||
|
if (pps->diff_cu_qp_delta_depth < 0 ||
|
||||||
|
pps->diff_cu_qp_delta_depth > sps->log2_diff_max_min_coding_block_size) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "diff_cu_qp_delta_depth %d is invalid\n",
|
||||||
|
pps->diff_cu_qp_delta_depth);
|
||||||
|
ret = AVERROR_INVALIDDATA;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
pps->cb_qp_offset = get_se_golomb(gb);
|
pps->cb_qp_offset = get_se_golomb(gb);
|
||||||
if (pps->cb_qp_offset < -12 || pps->cb_qp_offset > 12) {
|
if (pps->cb_qp_offset < -12 || pps->cb_qp_offset > 12) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "pps_cb_qp_offset out of range: %d\n",
|
av_log(s->avctx, AV_LOG_ERROR, "pps_cb_qp_offset out of range: %d\n",
|
||||||
|
@ -94,7 +94,7 @@ typedef struct Indeo3DecodeContext {
|
|||||||
|
|
||||||
int16_t width, height;
|
int16_t width, height;
|
||||||
uint32_t frame_num; ///< current frame number (zero-based)
|
uint32_t frame_num; ///< current frame number (zero-based)
|
||||||
uint32_t data_size; ///< size of the frame data in bytes
|
int data_size; ///< size of the frame data in bytes
|
||||||
uint16_t frame_flags; ///< frame properties
|
uint16_t frame_flags; ///< frame properties
|
||||||
uint8_t cb_offset; ///< needed for selecting VQ tables
|
uint8_t cb_offset; ///< needed for selecting VQ tables
|
||||||
uint8_t buf_sel; ///< active frame buffer: 0 - primary, 1 -secondary
|
uint8_t buf_sel; ///< active frame buffer: 0 - primary, 1 -secondary
|
||||||
@ -899,7 +899,8 @@ static int decode_frame_headers(Indeo3DecodeContext *ctx, AVCodecContext *avctx,
|
|||||||
GetByteContext gb;
|
GetByteContext gb;
|
||||||
const uint8_t *bs_hdr;
|
const uint8_t *bs_hdr;
|
||||||
uint32_t frame_num, word2, check_sum, data_size;
|
uint32_t frame_num, word2, check_sum, data_size;
|
||||||
uint32_t y_offset, u_offset, v_offset, starts[3], ends[3];
|
int y_offset, u_offset, v_offset;
|
||||||
|
uint32_t starts[3], ends[3];
|
||||||
uint16_t height, width;
|
uint16_t height, width;
|
||||||
int i, j;
|
int i, j;
|
||||||
|
|
||||||
@ -981,7 +982,8 @@ static int decode_frame_headers(Indeo3DecodeContext *ctx, AVCodecContext *avctx,
|
|||||||
ctx->y_data_size = ends[0] - starts[0];
|
ctx->y_data_size = ends[0] - starts[0];
|
||||||
ctx->v_data_size = ends[1] - starts[1];
|
ctx->v_data_size = ends[1] - starts[1];
|
||||||
ctx->u_data_size = ends[2] - starts[2];
|
ctx->u_data_size = ends[2] - starts[2];
|
||||||
if (FFMAX3(y_offset, v_offset, u_offset) >= ctx->data_size - 16 ||
|
if (FFMIN3(y_offset, v_offset, u_offset) < 0 ||
|
||||||
|
FFMAX3(y_offset, v_offset, u_offset) >= ctx->data_size - 16 ||
|
||||||
FFMIN3(y_offset, v_offset, u_offset) < gb.buffer - bs_hdr + 16 ||
|
FFMIN3(y_offset, v_offset, u_offset) < gb.buffer - bs_hdr + 16 ||
|
||||||
FFMIN3(ctx->y_data_size, ctx->v_data_size, ctx->u_data_size) <= 0) {
|
FFMIN3(ctx->y_data_size, ctx->v_data_size, ctx->u_data_size) <= 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "One of the y/u/v offsets is invalid\n");
|
av_log(avctx, AV_LOG_ERROR, "One of the y/u/v offsets is invalid\n");
|
||||||
|
@ -43,6 +43,13 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
|||||||
{
|
{
|
||||||
JvContext *s = avctx->priv_data;
|
JvContext *s = avctx->priv_data;
|
||||||
|
|
||||||
|
if (!avctx->width || !avctx->height ||
|
||||||
|
(avctx->width & 7) || (avctx->height & 7)) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Invalid video dimensions: %dx%d\n",
|
||||||
|
avctx->width, avctx->height);
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
s->frame = av_frame_alloc();
|
s->frame = av_frame_alloc();
|
||||||
if (!s->frame)
|
if (!s->frame)
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
|
@ -88,7 +88,12 @@ static inline int mdec_decode_block_intra(MDECContext *a, int16_t *block, int n)
|
|||||||
if (level == 127) {
|
if (level == 127) {
|
||||||
break;
|
break;
|
||||||
} else if (level != 0) {
|
} else if (level != 0) {
|
||||||
i += run;
|
i += run;
|
||||||
|
if (i > 63) {
|
||||||
|
av_log(a->avctx, AV_LOG_ERROR,
|
||||||
|
"ac-tex damaged at %d %d\n", a->mb_x, a->mb_y);
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
j = scantable[i];
|
j = scantable[i];
|
||||||
level = (level * qscale * quant_matrix[j]) >> 3;
|
level = (level * qscale * quant_matrix[j]) >> 3;
|
||||||
level = (level ^ SHOW_SBITS(re, &a->gb, 1)) - SHOW_SBITS(re, &a->gb, 1);
|
level = (level ^ SHOW_SBITS(re, &a->gb, 1)) - SHOW_SBITS(re, &a->gb, 1);
|
||||||
@ -98,8 +103,13 @@ static inline int mdec_decode_block_intra(MDECContext *a, int16_t *block, int n)
|
|||||||
run = SHOW_UBITS(re, &a->gb, 6)+1; LAST_SKIP_BITS(re, &a->gb, 6);
|
run = SHOW_UBITS(re, &a->gb, 6)+1; LAST_SKIP_BITS(re, &a->gb, 6);
|
||||||
UPDATE_CACHE(re, &a->gb);
|
UPDATE_CACHE(re, &a->gb);
|
||||||
level = SHOW_SBITS(re, &a->gb, 10); SKIP_BITS(re, &a->gb, 10);
|
level = SHOW_SBITS(re, &a->gb, 10); SKIP_BITS(re, &a->gb, 10);
|
||||||
i += run;
|
i += run;
|
||||||
j = scantable[i];
|
if (i > 63) {
|
||||||
|
av_log(a->avctx, AV_LOG_ERROR,
|
||||||
|
"ac-tex damaged at %d %d\n", a->mb_x, a->mb_y);
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
j = scantable[i];
|
||||||
if (level < 0) {
|
if (level < 0) {
|
||||||
level = -level;
|
level = -level;
|
||||||
level = (level * qscale * quant_matrix[j]) >> 3;
|
level = (level * qscale * quant_matrix[j]) >> 3;
|
||||||
@ -110,10 +120,6 @@ static inline int mdec_decode_block_intra(MDECContext *a, int16_t *block, int n)
|
|||||||
level = (level - 1) | 1;
|
level = (level - 1) | 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (i > 63) {
|
|
||||||
av_log(a->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", a->mb_x, a->mb_y);
|
|
||||||
return AVERROR_INVALIDDATA;
|
|
||||||
}
|
|
||||||
|
|
||||||
block[j] = level;
|
block[j] = level;
|
||||||
}
|
}
|
||||||
|
@ -89,7 +89,7 @@ static void ff_acelp_interpolatef_mips(float *out, const float *in,
|
|||||||
"addu %[p_filter_coeffs_m], %[p_filter_coeffs_m], %[prec] \n\t"
|
"addu %[p_filter_coeffs_m], %[p_filter_coeffs_m], %[prec] \n\t"
|
||||||
"madd.s %[v],%[v],%[in_val_m], %[fc_val_m] \n\t"
|
"madd.s %[v],%[v],%[in_val_m], %[fc_val_m] \n\t"
|
||||||
|
|
||||||
: [v] "=&f" (v),[p_in_p] "+r" (p_in_p), [p_in_m] "+r" (p_in_m),
|
: [v] "+&f" (v),[p_in_p] "+r" (p_in_p), [p_in_m] "+r" (p_in_m),
|
||||||
[p_filter_coeffs_p] "+r" (p_filter_coeffs_p),
|
[p_filter_coeffs_p] "+r" (p_filter_coeffs_p),
|
||||||
[in_val_p] "=&f" (in_val_p), [in_val_m] "=&f" (in_val_m),
|
[in_val_p] "=&f" (in_val_p), [in_val_m] "=&f" (in_val_m),
|
||||||
[fc_val_p] "=&f" (fc_val_p), [fc_val_m] "=&f" (fc_val_m),
|
[fc_val_p] "=&f" (fc_val_p), [fc_val_m] "=&f" (fc_val_m),
|
||||||
|
@ -561,9 +561,12 @@ unk_pixfmt:
|
|||||||
}
|
}
|
||||||
if (s->ls) {
|
if (s->ls) {
|
||||||
s->upscale_h = s->upscale_v = 0;
|
s->upscale_h = s->upscale_v = 0;
|
||||||
if (s->nb_components > 1)
|
if (s->nb_components == 3) {
|
||||||
s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
|
s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
|
||||||
else if (s->palette_index && s->bits <= 8)
|
} else if (s->nb_components != 1) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "Unsupported number of components %d\n", s->nb_components);
|
||||||
|
return AVERROR_PATCHWELCOME;
|
||||||
|
} else if (s->palette_index && s->bits <= 8)
|
||||||
s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
|
s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
|
||||||
else if (s->bits <= 8)
|
else if (s->bits <= 8)
|
||||||
s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
|
s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
|
||||||
@ -1248,13 +1251,18 @@ static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
|
|||||||
|
|
||||||
if (s->interlaced && s->bottom_field)
|
if (s->interlaced && s->bottom_field)
|
||||||
block_offset += linesize[c] >> 1;
|
block_offset += linesize[c] >> 1;
|
||||||
ptr = data[c] + block_offset;
|
if ( 8*(h * mb_x + x) < s->width
|
||||||
|
&& 8*(v * mb_y + y) < s->height) {
|
||||||
|
ptr = data[c] + block_offset;
|
||||||
|
} else
|
||||||
|
ptr = NULL;
|
||||||
if (!s->progressive) {
|
if (!s->progressive) {
|
||||||
if (copy_mb)
|
if (copy_mb) {
|
||||||
mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
|
if (ptr)
|
||||||
linesize[c], s->avctx->lowres);
|
mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
|
||||||
|
linesize[c], s->avctx->lowres);
|
||||||
|
|
||||||
else {
|
} else {
|
||||||
s->bdsp.clear_block(s->block);
|
s->bdsp.clear_block(s->block);
|
||||||
if (decode_block(s, s->block, i,
|
if (decode_block(s, s->block, i,
|
||||||
s->dc_index[i], s->ac_index[i],
|
s->dc_index[i], s->ac_index[i],
|
||||||
@ -1263,9 +1271,11 @@ static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
|
|||||||
"error y=%d x=%d\n", mb_y, mb_x);
|
"error y=%d x=%d\n", mb_y, mb_x);
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
s->idsp.idct_put(ptr, linesize[c], s->block);
|
if (ptr) {
|
||||||
if (s->bits & 7)
|
s->idsp.idct_put(ptr, linesize[c], s->block);
|
||||||
shift_output(s, ptr, linesize[c]);
|
if (s->bits & 7)
|
||||||
|
shift_output(s, ptr, linesize[c]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
int block_idx = s->block_stride[c] * (v * mb_y + y) +
|
int block_idx = s->block_stride[c] * (v * mb_y + y) +
|
||||||
@ -1904,6 +1914,10 @@ int ff_mjpeg_find_marker(MJpegDecodeContext *s,
|
|||||||
put_bits(&pb, 8, x);
|
put_bits(&pb, 8, x);
|
||||||
if (x == 0xFF) {
|
if (x == 0xFF) {
|
||||||
x = src[b++];
|
x = src[b++];
|
||||||
|
if (x & 0x80) {
|
||||||
|
av_log(s->avctx, AV_LOG_WARNING, "Invalid escape sequence\n");
|
||||||
|
x &= 0x7f;
|
||||||
|
}
|
||||||
put_bits(&pb, 7, x);
|
put_bits(&pb, 7, x);
|
||||||
bit_count--;
|
bit_count--;
|
||||||
}
|
}
|
||||||
|
@ -61,6 +61,13 @@ static av_cold int mm_decode_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
avctx->pix_fmt = AV_PIX_FMT_PAL8;
|
avctx->pix_fmt = AV_PIX_FMT_PAL8;
|
||||||
|
|
||||||
|
if (!avctx->width || !avctx->height ||
|
||||||
|
(avctx->width & 1) || (avctx->height & 1)) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Invalid video dimensions: %dx%d\n",
|
||||||
|
avctx->width, avctx->height);
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
s->frame = av_frame_alloc();
|
s->frame = av_frame_alloc();
|
||||||
if (!s->frame)
|
if (!s->frame)
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
|
@ -428,9 +428,11 @@ static av_cold int decode_init(AVCodecContext * avctx)
|
|||||||
|
|
||||||
s->avctx = avctx;
|
s->avctx = avctx;
|
||||||
|
|
||||||
|
#if USE_FLOATS
|
||||||
s->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT);
|
s->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT);
|
||||||
if (!s->fdsp)
|
if (!s->fdsp)
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
|
#endif
|
||||||
|
|
||||||
ff_mpadsp_init(&s->mpadsp);
|
ff_mpadsp_init(&s->mpadsp);
|
||||||
|
|
||||||
|
@ -883,7 +883,7 @@ extern const uint8_t ff_aic_dc_scale_table[32];
|
|||||||
extern const uint8_t ff_h263_chroma_qscale_table[32];
|
extern const uint8_t ff_h263_chroma_qscale_table[32];
|
||||||
|
|
||||||
/* rv10.c */
|
/* rv10.c */
|
||||||
void ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number);
|
int ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number);
|
||||||
int ff_rv_decode_dc(MpegEncContext *s, int n);
|
int ff_rv_decode_dc(MpegEncContext *s, int n);
|
||||||
void ff_rv20_encode_picture_header(MpegEncContext *s, int picture_number);
|
void ff_rv20_encode_picture_header(MpegEncContext *s, int picture_number);
|
||||||
|
|
||||||
|
@ -395,18 +395,18 @@ av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
|
|||||||
switch(avctx->codec_id) {
|
switch(avctx->codec_id) {
|
||||||
case AV_CODEC_ID_MPEG1VIDEO:
|
case AV_CODEC_ID_MPEG1VIDEO:
|
||||||
case AV_CODEC_ID_MPEG2VIDEO:
|
case AV_CODEC_ID_MPEG2VIDEO:
|
||||||
avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112L / 15000000 * 16384;
|
avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
|
||||||
break;
|
break;
|
||||||
case AV_CODEC_ID_MPEG4:
|
case AV_CODEC_ID_MPEG4:
|
||||||
case AV_CODEC_ID_MSMPEG4V1:
|
case AV_CODEC_ID_MSMPEG4V1:
|
||||||
case AV_CODEC_ID_MSMPEG4V2:
|
case AV_CODEC_ID_MSMPEG4V2:
|
||||||
case AV_CODEC_ID_MSMPEG4V3:
|
case AV_CODEC_ID_MSMPEG4V3:
|
||||||
if (avctx->rc_max_rate >= 15000000) {
|
if (avctx->rc_max_rate >= 15000000) {
|
||||||
avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000L) * (760-320) / (38400000 - 15000000);
|
avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
|
||||||
} else if(avctx->rc_max_rate >= 2000000) {
|
} else if(avctx->rc_max_rate >= 2000000) {
|
||||||
avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000L) * (320- 80) / (15000000 - 2000000);
|
avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
|
||||||
} else if(avctx->rc_max_rate >= 384000) {
|
} else if(avctx->rc_max_rate >= 384000) {
|
||||||
avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000L) * ( 80- 40) / ( 2000000 - 384000);
|
avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
|
||||||
} else
|
} else
|
||||||
avctx->rc_buffer_size = 40;
|
avctx->rc_buffer_size = 40;
|
||||||
avctx->rc_buffer_size *= 16384;
|
avctx->rc_buffer_size *= 16384;
|
||||||
@ -3706,8 +3706,11 @@ static int encode_picture(MpegEncContext *s, int picture_number)
|
|||||||
ff_msmpeg4_encode_picture_header(s, picture_number);
|
ff_msmpeg4_encode_picture_header(s, picture_number);
|
||||||
else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
|
else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
|
||||||
ff_mpeg4_encode_picture_header(s, picture_number);
|
ff_mpeg4_encode_picture_header(s, picture_number);
|
||||||
else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10)
|
else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
|
||||||
ff_rv10_encode_picture_header(s, picture_number);
|
ret = ff_rv10_encode_picture_header(s, picture_number);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
|
else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
|
||||||
ff_rv20_encode_picture_header(s, picture_number);
|
ff_rv20_encode_picture_header(s, picture_number);
|
||||||
else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
|
else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
|
||||||
|
@ -178,7 +178,7 @@ static void gmc_motion(MpegEncContext *s,
|
|||||||
s->sprite_delta[0][0], s->sprite_delta[0][1],
|
s->sprite_delta[0][0], s->sprite_delta[0][1],
|
||||||
s->sprite_delta[1][0], s->sprite_delta[1][1],
|
s->sprite_delta[1][0], s->sprite_delta[1][1],
|
||||||
a + 1, (1 << (2 * a + 1)) - s->no_rounding,
|
a + 1, (1 << (2 * a + 1)) - s->no_rounding,
|
||||||
s->h_edge_pos >> 1, s->v_edge_pos >> 1);
|
(s->h_edge_pos + 1) >> 1, (s->v_edge_pos + 1) >> 1);
|
||||||
|
|
||||||
ptr = ref_picture[2];
|
ptr = ref_picture[2];
|
||||||
s->mdsp.gmc(dest_cr, ptr, uvlinesize, 8,
|
s->mdsp.gmc(dest_cr, ptr, uvlinesize, 8,
|
||||||
@ -186,7 +186,7 @@ static void gmc_motion(MpegEncContext *s,
|
|||||||
s->sprite_delta[0][0], s->sprite_delta[0][1],
|
s->sprite_delta[0][0], s->sprite_delta[0][1],
|
||||||
s->sprite_delta[1][0], s->sprite_delta[1][1],
|
s->sprite_delta[1][0], s->sprite_delta[1][1],
|
||||||
a + 1, (1 << (2 * a + 1)) - s->no_rounding,
|
a + 1, (1 << (2 * a + 1)) - s->no_rounding,
|
||||||
s->h_edge_pos >> 1, s->v_edge_pos >> 1);
|
(s->h_edge_pos + 1) >> 1, (s->v_edge_pos + 1) >> 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int hpel_motion(MpegEncContext *s,
|
static inline int hpel_motion(MpegEncContext *s,
|
||||||
|
@ -308,7 +308,7 @@ static void encode_block(NellyMoserEncodeContext *s, unsigned char *output, int
|
|||||||
|
|
||||||
apply_mdct(s);
|
apply_mdct(s);
|
||||||
|
|
||||||
init_put_bits(&pb, output, output_size * 8);
|
init_put_bits(&pb, output, output_size);
|
||||||
|
|
||||||
i = 0;
|
i = 0;
|
||||||
for (band = 0; band < NELLY_BANDS; band++) {
|
for (band = 0; band < NELLY_BANDS; band++) {
|
||||||
|
@ -103,7 +103,6 @@ static const AVOption avcodec_options[] = {
|
|||||||
{"hex", "hex motion estimation", 0, AV_OPT_TYPE_CONST, {.i64 = ME_HEX }, INT_MIN, INT_MAX, V|E, "me_method" },
|
{"hex", "hex motion estimation", 0, AV_OPT_TYPE_CONST, {.i64 = ME_HEX }, INT_MIN, INT_MAX, V|E, "me_method" },
|
||||||
{"umh", "umh motion estimation", 0, AV_OPT_TYPE_CONST, {.i64 = ME_UMH }, INT_MIN, INT_MAX, V|E, "me_method" },
|
{"umh", "umh motion estimation", 0, AV_OPT_TYPE_CONST, {.i64 = ME_UMH }, INT_MIN, INT_MAX, V|E, "me_method" },
|
||||||
{"iter", "iter motion estimation", 0, AV_OPT_TYPE_CONST, {.i64 = ME_ITER }, INT_MIN, INT_MAX, V|E, "me_method" },
|
{"iter", "iter motion estimation", 0, AV_OPT_TYPE_CONST, {.i64 = ME_ITER }, INT_MIN, INT_MAX, V|E, "me_method" },
|
||||||
{"extradata_size", NULL, OFFSET(extradata_size), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
|
|
||||||
{"time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, {.dbl = 0}, INT_MIN, INT_MAX},
|
{"time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, {.dbl = 0}, INT_MIN, INT_MAX},
|
||||||
{"g", "set the group of picture (GOP) size", OFFSET(gop_size), AV_OPT_TYPE_INT, {.i64 = 12 }, INT_MIN, INT_MAX, V|E},
|
{"g", "set the group of picture (GOP) size", OFFSET(gop_size), AV_OPT_TYPE_INT, {.i64 = 12 }, INT_MIN, INT_MAX, V|E},
|
||||||
{"ar", "set audio sampling rate (in Hz)", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX, A|D|E},
|
{"ar", "set audio sampling rate (in Hz)", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX, A|D|E},
|
||||||
|
@ -451,6 +451,14 @@ static int opus_decode_packet(AVCodecContext *avctx, void *data,
|
|||||||
int coded_samples = 0;
|
int coded_samples = 0;
|
||||||
int decoded_samples = 0;
|
int decoded_samples = 0;
|
||||||
int i, ret;
|
int i, ret;
|
||||||
|
int delayed_samples = 0;
|
||||||
|
|
||||||
|
for (i = 0; i < c->nb_streams; i++) {
|
||||||
|
OpusStreamContext *s = &c->streams[i];
|
||||||
|
s->out[0] =
|
||||||
|
s->out[1] = NULL;
|
||||||
|
delayed_samples = FFMAX(delayed_samples, s->delayed_samples);
|
||||||
|
}
|
||||||
|
|
||||||
/* decode the header of the first sub-packet to find out the sample count */
|
/* decode the header of the first sub-packet to find out the sample count */
|
||||||
if (buf) {
|
if (buf) {
|
||||||
@ -464,7 +472,7 @@ static int opus_decode_packet(AVCodecContext *avctx, void *data,
|
|||||||
c->streams[0].silk_samplerate = get_silk_samplerate(pkt->config);
|
c->streams[0].silk_samplerate = get_silk_samplerate(pkt->config);
|
||||||
}
|
}
|
||||||
|
|
||||||
frame->nb_samples = coded_samples + c->streams[0].delayed_samples;
|
frame->nb_samples = coded_samples + delayed_samples;
|
||||||
|
|
||||||
/* no input or buffered data => nothing to do */
|
/* no input or buffered data => nothing to do */
|
||||||
if (!frame->nb_samples) {
|
if (!frame->nb_samples) {
|
||||||
|
@ -209,16 +209,26 @@ static void idct_add_altivec(uint8_t *dest, int stride, int16_t *blk)
|
|||||||
|
|
||||||
IDCT;
|
IDCT;
|
||||||
|
|
||||||
|
#if HAVE_BIGENDIAN
|
||||||
p0 = vec_lvsl(0, dest);
|
p0 = vec_lvsl(0, dest);
|
||||||
p1 = vec_lvsl(stride, dest);
|
p1 = vec_lvsl(stride, dest);
|
||||||
p = vec_splat_u8(-1);
|
p = vec_splat_u8(-1);
|
||||||
perm0 = vec_mergeh(p, p0);
|
perm0 = vec_mergeh(p, p0);
|
||||||
perm1 = vec_mergeh(p, p1);
|
perm1 = vec_mergeh(p, p1);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if HAVE_BIGENDIAN
|
||||||
|
#define GET_TMP2(dest, prm) \
|
||||||
|
tmp = vec_ld(0, dest); \
|
||||||
|
tmp2 = (vec_s16) vec_perm(tmp, (vec_u8) zero, prm);
|
||||||
|
#else
|
||||||
|
#define GET_TMP2(dest, prm) \
|
||||||
|
tmp = vec_vsx_ld(0, dest); \
|
||||||
|
tmp2 = (vec_s16) vec_mergeh(tmp, (vec_u8) zero)
|
||||||
|
#endif
|
||||||
|
|
||||||
#define ADD(dest, src, perm) \
|
#define ADD(dest, src, perm) \
|
||||||
/* *(uint64_t *) &tmp = *(uint64_t *) dest; */ \
|
GET_TMP2(dest, perm); \
|
||||||
tmp = vec_ld(0, dest); \
|
|
||||||
tmp2 = (vec_s16) vec_perm(tmp, (vec_u8) zero, perm); \
|
|
||||||
tmp3 = vec_adds(tmp2, src); \
|
tmp3 = vec_adds(tmp2, src); \
|
||||||
tmp = vec_packsu(tmp3, tmp3); \
|
tmp = vec_packsu(tmp3, tmp3); \
|
||||||
vec_ste((vec_u32) tmp, 0, (unsigned int *) dest); \
|
vec_ste((vec_u32) tmp, 0, (unsigned int *) dest); \
|
||||||
|
@ -55,7 +55,7 @@ static int pix_norm1_altivec(uint8_t *pix, int line_size)
|
|||||||
/* Sum up the four partial sums, and put the result into s. */
|
/* Sum up the four partial sums, and put the result into s. */
|
||||||
sum = vec_sums((vector signed int) sv, (vector signed int) zero);
|
sum = vec_sums((vector signed int) sv, (vector signed int) zero);
|
||||||
sum = vec_splat(sum, 3);
|
sum = vec_splat(sum, 3);
|
||||||
vec_vsx_st(sum, 0, &s);
|
vec_ste(sum, 0, &s);
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
@ -113,7 +113,7 @@ static int pix_sum_altivec(uint8_t *pix, int line_size)
|
|||||||
/* Sum up the four partial sums, and put the result into s. */
|
/* Sum up the four partial sums, and put the result into s. */
|
||||||
sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
|
sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
|
||||||
sumdiffs = vec_splat(sumdiffs, 3);
|
sumdiffs = vec_splat(sumdiffs, 3);
|
||||||
vec_vsx_st(sumdiffs, 0, &s);
|
vec_ste(sumdiffs, 0, &s);
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
|
@ -304,16 +304,23 @@ static void vc1_inv_trans_8x4_altivec(uint8_t *dest, int stride, int16_t *block)
|
|||||||
src2 = vec_pack(s2, sA);
|
src2 = vec_pack(s2, sA);
|
||||||
src3 = vec_pack(s3, sB);
|
src3 = vec_pack(s3, sB);
|
||||||
|
|
||||||
|
#if HAVE_BIGENDIAN
|
||||||
p0 = vec_lvsl (0, dest);
|
p0 = vec_lvsl (0, dest);
|
||||||
p1 = vec_lvsl (stride, dest);
|
p1 = vec_lvsl (stride, dest);
|
||||||
p = vec_splat_u8 (-1);
|
p = vec_splat_u8 (-1);
|
||||||
perm0 = vec_mergeh (p, p0);
|
perm0 = vec_mergeh (p, p0);
|
||||||
perm1 = vec_mergeh (p, p1);
|
perm1 = vec_mergeh (p, p1);
|
||||||
|
#define GET_TMP2(dst, p) \
|
||||||
|
tmp = vec_ld (0, dest); \
|
||||||
|
tmp2 = (vector signed short)vec_perm (tmp, vec_splat_u8(0), p);
|
||||||
|
#else
|
||||||
|
#define GET_TMP2(dst,p) \
|
||||||
|
tmp = vec_vsx_ld (0, dst); \
|
||||||
|
tmp2 = (vector signed short)vec_mergeh (tmp, vec_splat_u8(0));
|
||||||
|
#endif
|
||||||
|
|
||||||
#define ADD(dest,src,perm) \
|
#define ADD(dest,src,perm) \
|
||||||
/* *(uint64_t *)&tmp = *(uint64_t *)dest; */ \
|
GET_TMP2(dest, perm); \
|
||||||
tmp = vec_ld (0, dest); \
|
|
||||||
tmp2 = (vector signed short)vec_perm (tmp, vec_splat_u8(0), perm); \
|
|
||||||
tmp3 = vec_adds (tmp2, src); \
|
tmp3 = vec_adds (tmp2, src); \
|
||||||
tmp = vec_packsu (tmp3, tmp3); \
|
tmp = vec_packsu (tmp3, tmp3); \
|
||||||
vec_ste ((vector unsigned int)tmp, 0, (unsigned int *)dest); \
|
vec_ste ((vector unsigned int)tmp, 0, (unsigned int *)dest); \
|
||||||
|
@ -32,8 +32,13 @@
|
|||||||
|
|
||||||
static const vec_s16 constants =
|
static const vec_s16 constants =
|
||||||
{0, 64277, 60547, 54491, 46341, 36410, 25080, 12785};
|
{0, 64277, 60547, 54491, 46341, 36410, 25080, 12785};
|
||||||
|
#if HAVE_BIGENDIAN
|
||||||
static const vec_u8 interleave_high =
|
static const vec_u8 interleave_high =
|
||||||
{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29};
|
{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29};
|
||||||
|
#else
|
||||||
|
static const vec_u8 interleave_high =
|
||||||
|
{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31};
|
||||||
|
#endif
|
||||||
|
|
||||||
#define IDCT_START \
|
#define IDCT_START \
|
||||||
vec_s16 A, B, C, D, Ad, Bd, Cd, Dd, E, F, G, H;\
|
vec_s16 A, B, C, D, Ad, Bd, Cd, Dd, E, F, G, H;\
|
||||||
@ -156,9 +161,18 @@ static void vp3_idct_add_altivec(uint8_t *dst, int stride, int16_t block[64])
|
|||||||
TRANSPOSE8(b0, b1, b2, b3, b4, b5, b6, b7);
|
TRANSPOSE8(b0, b1, b2, b3, b4, b5, b6, b7);
|
||||||
IDCT_1D(ADD8, SHIFT4)
|
IDCT_1D(ADD8, SHIFT4)
|
||||||
|
|
||||||
#define ADD(a)\
|
#if HAVE_BIGENDIAN
|
||||||
|
#define GET_VDST16\
|
||||||
vdst = vec_ld(0, dst);\
|
vdst = vec_ld(0, dst);\
|
||||||
vdst_16 = (vec_s16)vec_perm(vdst, zero_u8v, vdst_mask);\
|
vdst_16 = (vec_s16)vec_perm(vdst, zero_u8v, vdst_mask);
|
||||||
|
#else
|
||||||
|
#define GET_VDST16\
|
||||||
|
vdst = vec_vsx_ld(0,dst);\
|
||||||
|
vdst_16 = (vec_s16)vec_mergeh(vdst, zero_u8v);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define ADD(a)\
|
||||||
|
GET_VDST16;\
|
||||||
vdst_16 = vec_adds(a, vdst_16);\
|
vdst_16 = vec_adds(a, vdst_16);\
|
||||||
t = vec_packsu(vdst_16, vdst_16);\
|
t = vec_packsu(vdst_16, vdst_16);\
|
||||||
vec_ste((vec_u32)t, 0, (unsigned int *)dst);\
|
vec_ste((vec_u32)t, 0, (unsigned int *)dst);\
|
||||||
|
@ -59,17 +59,30 @@ static const vec_s8 h_subpel_filters_outer[3] =
|
|||||||
vec_s8 filter_outerh = h_subpel_filters_outer[(i)>>1]; \
|
vec_s8 filter_outerh = h_subpel_filters_outer[(i)>>1]; \
|
||||||
vec_s8 filter_outerl = vec_sld(filter_outerh, filter_outerh, 2)
|
vec_s8 filter_outerl = vec_sld(filter_outerh, filter_outerh, 2)
|
||||||
|
|
||||||
|
#if HAVE_BIGENDIAN
|
||||||
|
#define GET_PIXHL(offset) \
|
||||||
|
a = vec_ld((offset)-is6tap-1, src); \
|
||||||
|
b = vec_ld((offset)-is6tap-1+15, src); \
|
||||||
|
pixh = vec_perm(a, b, permh##offset); \
|
||||||
|
pixl = vec_perm(a, b, perml##offset)
|
||||||
|
|
||||||
|
#define GET_OUTER(offset) outer = vec_perm(a, b, perm_6tap##offset)
|
||||||
|
#else
|
||||||
|
#define GET_PIXHL(offset) \
|
||||||
|
a = vec_vsx_ld((offset)-is6tap-1, src); \
|
||||||
|
pixh = vec_perm(a, a, perm_inner); \
|
||||||
|
pixl = vec_perm(a, a, vec_add(perm_inner, vec_splat_u8(4)))
|
||||||
|
|
||||||
|
#define GET_OUTER(offset) outer = vec_perm(a, a, perm_outer)
|
||||||
|
#endif
|
||||||
|
|
||||||
#define FILTER_H(dstv, off) \
|
#define FILTER_H(dstv, off) \
|
||||||
a = vec_ld((off)-is6tap-1, src); \
|
GET_PIXHL(off); \
|
||||||
b = vec_ld((off)-is6tap-1+15, src); \
|
|
||||||
\
|
|
||||||
pixh = vec_perm(a, b, permh##off); \
|
|
||||||
pixl = vec_perm(a, b, perml##off); \
|
|
||||||
filth = vec_msum(filter_inner, pixh, c64); \
|
filth = vec_msum(filter_inner, pixh, c64); \
|
||||||
filtl = vec_msum(filter_inner, pixl, c64); \
|
filtl = vec_msum(filter_inner, pixl, c64); \
|
||||||
\
|
\
|
||||||
if (is6tap) { \
|
if (is6tap) { \
|
||||||
outer = vec_perm(a, b, perm_6tap##off); \
|
GET_OUTER(off); \
|
||||||
filth = vec_msum(filter_outerh, outer, filth); \
|
filth = vec_msum(filter_outerh, outer, filth); \
|
||||||
filtl = vec_msum(filter_outerl, outer, filtl); \
|
filtl = vec_msum(filter_outerl, outer, filtl); \
|
||||||
} \
|
} \
|
||||||
@ -84,9 +97,12 @@ void put_vp8_epel_h_altivec_core(uint8_t *dst, ptrdiff_t dst_stride,
|
|||||||
int h, int mx, int w, int is6tap)
|
int h, int mx, int w, int is6tap)
|
||||||
{
|
{
|
||||||
LOAD_H_SUBPEL_FILTER(mx-1);
|
LOAD_H_SUBPEL_FILTER(mx-1);
|
||||||
vec_u8 align_vec0, align_vec8, permh0, permh8, filt;
|
#if HAVE_BIGENDIAN
|
||||||
|
vec_u8 align_vec0, align_vec8, permh0, permh8;
|
||||||
vec_u8 perm_6tap0, perm_6tap8, perml0, perml8;
|
vec_u8 perm_6tap0, perm_6tap8, perml0, perml8;
|
||||||
vec_u8 a, b, pixh, pixl, outer;
|
vec_u8 b;
|
||||||
|
#endif
|
||||||
|
vec_u8 filt, a, pixh, pixl, outer;
|
||||||
vec_s16 f16h, f16l;
|
vec_s16 f16h, f16l;
|
||||||
vec_s32 filth, filtl;
|
vec_s32 filth, filtl;
|
||||||
|
|
||||||
@ -97,6 +113,7 @@ void put_vp8_epel_h_altivec_core(uint8_t *dst, ptrdiff_t dst_stride,
|
|||||||
vec_s32 c64 = vec_sl(vec_splat_s32(1), vec_splat_u32(6));
|
vec_s32 c64 = vec_sl(vec_splat_s32(1), vec_splat_u32(6));
|
||||||
vec_u16 c7 = vec_splat_u16(7);
|
vec_u16 c7 = vec_splat_u16(7);
|
||||||
|
|
||||||
|
#if HAVE_BIGENDIAN
|
||||||
align_vec0 = vec_lvsl( -is6tap-1, src);
|
align_vec0 = vec_lvsl( -is6tap-1, src);
|
||||||
align_vec8 = vec_lvsl(8-is6tap-1, src);
|
align_vec8 = vec_lvsl(8-is6tap-1, src);
|
||||||
|
|
||||||
@ -107,6 +124,7 @@ void put_vp8_epel_h_altivec_core(uint8_t *dst, ptrdiff_t dst_stride,
|
|||||||
perml8 = vec_perm(align_vec8, align_vec8, perm_inner);
|
perml8 = vec_perm(align_vec8, align_vec8, perm_inner);
|
||||||
perm_6tap0 = vec_perm(align_vec0, align_vec0, perm_outer);
|
perm_6tap0 = vec_perm(align_vec0, align_vec0, perm_outer);
|
||||||
perm_6tap8 = vec_perm(align_vec8, align_vec8, perm_outer);
|
perm_6tap8 = vec_perm(align_vec8, align_vec8, perm_outer);
|
||||||
|
#endif
|
||||||
|
|
||||||
while (h --> 0) {
|
while (h --> 0) {
|
||||||
FILTER_H(f16h, 0);
|
FILTER_H(f16h, 0);
|
||||||
@ -164,6 +182,12 @@ static const vec_u8 v_subpel_filters[7] =
|
|||||||
dstv = vec_adds(dstv, c64); \
|
dstv = vec_adds(dstv, c64); \
|
||||||
dstv = vec_sra(dstv, c7)
|
dstv = vec_sra(dstv, c7)
|
||||||
|
|
||||||
|
#if HAVE_BIGENDIAN
|
||||||
|
#define LOAD_HL(off, s, perm) load_with_perm_vec(off, s, perm)
|
||||||
|
#else
|
||||||
|
#define LOAD_HL(off, s, perm) vec_mergeh(vec_vsx_ld(off,s), vec_vsx_ld(off+8,s))
|
||||||
|
#endif
|
||||||
|
|
||||||
static av_always_inline
|
static av_always_inline
|
||||||
void put_vp8_epel_v_altivec_core(uint8_t *dst, ptrdiff_t dst_stride,
|
void put_vp8_epel_v_altivec_core(uint8_t *dst, ptrdiff_t dst_stride,
|
||||||
uint8_t *src, ptrdiff_t src_stride,
|
uint8_t *src, ptrdiff_t src_stride,
|
||||||
@ -175,6 +199,7 @@ void put_vp8_epel_v_altivec_core(uint8_t *dst, ptrdiff_t dst_stride,
|
|||||||
vec_s16 c64 = vec_sl(vec_splat_s16(1), vec_splat_u16(6));
|
vec_s16 c64 = vec_sl(vec_splat_s16(1), vec_splat_u16(6));
|
||||||
vec_u16 c7 = vec_splat_u16(7);
|
vec_u16 c7 = vec_splat_u16(7);
|
||||||
|
|
||||||
|
#if HAVE_BIGENDIAN
|
||||||
// we want pixels 0-7 to be in the even positions and 8-15 in the odd,
|
// we want pixels 0-7 to be in the even positions and 8-15 in the odd,
|
||||||
// so combine this permute with the alignment permute vector
|
// so combine this permute with the alignment permute vector
|
||||||
align_vech = vec_lvsl(0, src);
|
align_vech = vec_lvsl(0, src);
|
||||||
@ -183,22 +208,23 @@ void put_vp8_epel_v_altivec_core(uint8_t *dst, ptrdiff_t dst_stride,
|
|||||||
perm_vec = vec_mergeh(align_vech, align_vecl);
|
perm_vec = vec_mergeh(align_vech, align_vecl);
|
||||||
else
|
else
|
||||||
perm_vec = vec_mergeh(align_vech, align_vech);
|
perm_vec = vec_mergeh(align_vech, align_vech);
|
||||||
|
#endif
|
||||||
|
|
||||||
if (is6tap)
|
if (is6tap)
|
||||||
s0 = load_with_perm_vec(-2*src_stride, src, perm_vec);
|
s0 = LOAD_HL(-2*src_stride, src, perm_vec);
|
||||||
s1 = load_with_perm_vec(-1*src_stride, src, perm_vec);
|
s1 = LOAD_HL(-1*src_stride, src, perm_vec);
|
||||||
s2 = load_with_perm_vec( 0*src_stride, src, perm_vec);
|
s2 = LOAD_HL( 0*src_stride, src, perm_vec);
|
||||||
s3 = load_with_perm_vec( 1*src_stride, src, perm_vec);
|
s3 = LOAD_HL( 1*src_stride, src, perm_vec);
|
||||||
if (is6tap)
|
if (is6tap)
|
||||||
s4 = load_with_perm_vec( 2*src_stride, src, perm_vec);
|
s4 = LOAD_HL( 2*src_stride, src, perm_vec);
|
||||||
|
|
||||||
src += (2+is6tap)*src_stride;
|
src += (2+is6tap)*src_stride;
|
||||||
|
|
||||||
while (h --> 0) {
|
while (h --> 0) {
|
||||||
if (is6tap)
|
if (is6tap)
|
||||||
s5 = load_with_perm_vec(0, src, perm_vec);
|
s5 = LOAD_HL(0, src, perm_vec);
|
||||||
else
|
else
|
||||||
s4 = load_with_perm_vec(0, src, perm_vec);
|
s4 = LOAD_HL(0, src, perm_vec);
|
||||||
|
|
||||||
FILTER_V(f16h, vec_mule);
|
FILTER_V(f16h, vec_mule);
|
||||||
|
|
||||||
@ -272,39 +298,25 @@ EPEL_HV(4, 4,4)
|
|||||||
|
|
||||||
static void put_vp8_pixels16_altivec(uint8_t *dst, ptrdiff_t dstride, uint8_t *src, ptrdiff_t sstride, int h, int mx, int my)
|
static void put_vp8_pixels16_altivec(uint8_t *dst, ptrdiff_t dstride, uint8_t *src, ptrdiff_t sstride, int h, int mx, int my)
|
||||||
{
|
{
|
||||||
register vector unsigned char pixelsv1, pixelsv2;
|
register vector unsigned char perm;
|
||||||
register vector unsigned char pixelsv1B, pixelsv2B;
|
|
||||||
register vector unsigned char pixelsv1C, pixelsv2C;
|
|
||||||
register vector unsigned char pixelsv1D, pixelsv2D;
|
|
||||||
|
|
||||||
register vector unsigned char perm = vec_lvsl(0, src);
|
|
||||||
int i;
|
int i;
|
||||||
register ptrdiff_t dstride2 = dstride << 1, sstride2 = sstride << 1;
|
register ptrdiff_t dstride2 = dstride << 1, sstride2 = sstride << 1;
|
||||||
register ptrdiff_t dstride3 = dstride2 + dstride, sstride3 = sstride + sstride2;
|
register ptrdiff_t dstride3 = dstride2 + dstride, sstride3 = sstride + sstride2;
|
||||||
register ptrdiff_t dstride4 = dstride << 2, sstride4 = sstride << 2;
|
register ptrdiff_t dstride4 = dstride << 2, sstride4 = sstride << 2;
|
||||||
|
|
||||||
|
#if HAVE_BIGENDIAN
|
||||||
|
perm = vec_lvsl(0, src);
|
||||||
|
#endif
|
||||||
// hand-unrolling the loop by 4 gains about 15%
|
// hand-unrolling the loop by 4 gains about 15%
|
||||||
// mininum execution time goes from 74 to 60 cycles
|
// mininum execution time goes from 74 to 60 cycles
|
||||||
// it's faster than -funroll-loops, but using
|
// it's faster than -funroll-loops, but using
|
||||||
// -funroll-loops w/ this is bad - 74 cycles again.
|
// -funroll-loops w/ this is bad - 74 cycles again.
|
||||||
// all this is on a 7450, tuning for the 7450
|
// all this is on a 7450, tuning for the 7450
|
||||||
for (i = 0; i < h; i += 4) {
|
for (i = 0; i < h; i += 4) {
|
||||||
pixelsv1 = vec_ld( 0, src);
|
vec_st(load_with_perm_vec(0, src, perm), 0, dst);
|
||||||
pixelsv2 = vec_ld(15, src);
|
vec_st(load_with_perm_vec(sstride, src, perm), dstride, dst);
|
||||||
pixelsv1B = vec_ld(sstride, src);
|
vec_st(load_with_perm_vec(sstride2, src, perm), dstride2, dst);
|
||||||
pixelsv2B = vec_ld(15 + sstride, src);
|
vec_st(load_with_perm_vec(sstride3, src, perm), dstride3, dst);
|
||||||
pixelsv1C = vec_ld(sstride2, src);
|
|
||||||
pixelsv2C = vec_ld(15 + sstride2, src);
|
|
||||||
pixelsv1D = vec_ld(sstride3, src);
|
|
||||||
pixelsv2D = vec_ld(15 + sstride3, src);
|
|
||||||
vec_st(vec_perm(pixelsv1, pixelsv2, perm),
|
|
||||||
0, (unsigned char*)dst);
|
|
||||||
vec_st(vec_perm(pixelsv1B, pixelsv2B, perm),
|
|
||||||
dstride, (unsigned char*)dst);
|
|
||||||
vec_st(vec_perm(pixelsv1C, pixelsv2C, perm),
|
|
||||||
dstride2, (unsigned char*)dst);
|
|
||||||
vec_st(vec_perm(pixelsv1D, pixelsv2D, perm),
|
|
||||||
dstride3, (unsigned char*)dst);
|
|
||||||
src += sstride4;
|
src += sstride4;
|
||||||
dst += dstride4;
|
dst += dstride4;
|
||||||
}
|
}
|
||||||
|
@ -304,7 +304,7 @@ static int encode_slice_plane(AVCodecContext *avctx, int mb_count,
|
|||||||
}
|
}
|
||||||
|
|
||||||
blocks_per_slice = mb_count << (2 - chroma);
|
blocks_per_slice = mb_count << (2 - chroma);
|
||||||
init_put_bits(&pb, buf, buf_size << 3);
|
init_put_bits(&pb, buf, buf_size);
|
||||||
|
|
||||||
encode_dc_coeffs(&pb, blocks, blocks_per_slice, qmat);
|
encode_dc_coeffs(&pb, blocks, blocks_per_slice, qmat);
|
||||||
encode_ac_coeffs(avctx, &pb, blocks, blocks_per_slice, qmat);
|
encode_ac_coeffs(avctx, &pb, blocks, blocks_per_slice, qmat);
|
||||||
|
@ -1058,7 +1058,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
slice_hdr = pkt->data + (slice_hdr - start);
|
slice_hdr = pkt->data + (slice_hdr - start);
|
||||||
tmp = pkt->data + (tmp - start);
|
tmp = pkt->data + (tmp - start);
|
||||||
}
|
}
|
||||||
init_put_bits(&pb, buf, (pkt_size - (buf - orig_buf)) * 8);
|
init_put_bits(&pb, buf, (pkt_size - (buf - orig_buf)));
|
||||||
ret = encode_slice(avctx, pic, &pb, sizes, x, y, q,
|
ret = encode_slice(avctx, pic, &pb, sizes, x, y, q,
|
||||||
mbs_per_slice);
|
mbs_per_slice);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
|
@ -120,12 +120,13 @@ static void av_noinline qpeg_decode_inter(QpegContext *qctx, uint8_t *dst,
|
|||||||
int filled = 0;
|
int filled = 0;
|
||||||
int orig_height;
|
int orig_height;
|
||||||
|
|
||||||
if(!refdata)
|
if (refdata) {
|
||||||
refdata= dst;
|
/* copy prev frame */
|
||||||
|
for (i = 0; i < height; i++)
|
||||||
/* copy prev frame */
|
memcpy(dst + (i * stride), refdata + (i * stride), width);
|
||||||
for(i = 0; i < height; i++)
|
} else {
|
||||||
memcpy(dst + (i * stride), refdata + (i * stride), width);
|
refdata = dst;
|
||||||
|
}
|
||||||
|
|
||||||
orig_height = height;
|
orig_height = height;
|
||||||
height--;
|
height--;
|
||||||
|
@ -966,6 +966,8 @@ static av_cold int roq_encode_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
av_lfg_init(&enc->randctx, 1);
|
av_lfg_init(&enc->randctx, 1);
|
||||||
|
|
||||||
|
enc->avctx = avctx;
|
||||||
|
|
||||||
enc->framesSinceKeyframe = 0;
|
enc->framesSinceKeyframe = 0;
|
||||||
if ((avctx->width & 0xf) || (avctx->height & 0xf)) {
|
if ((avctx->width & 0xf) || (avctx->height & 0xf)) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Dimensions must be divisible by 16\n");
|
av_log(avctx, AV_LOG_ERROR, "Dimensions must be divisible by 16\n");
|
||||||
|
@ -28,7 +28,7 @@
|
|||||||
#include "mpegvideo.h"
|
#include "mpegvideo.h"
|
||||||
#include "put_bits.h"
|
#include "put_bits.h"
|
||||||
|
|
||||||
void ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number)
|
int ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number)
|
||||||
{
|
{
|
||||||
int full_frame= 0;
|
int full_frame= 0;
|
||||||
|
|
||||||
@ -48,12 +48,17 @@ void ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number)
|
|||||||
/* if multiple packets per frame are sent, the position at which
|
/* if multiple packets per frame are sent, the position at which
|
||||||
to display the macroblocks is coded here */
|
to display the macroblocks is coded here */
|
||||||
if(!full_frame){
|
if(!full_frame){
|
||||||
|
if (s->mb_width * s->mb_height >= (1U << 12)) {
|
||||||
|
avpriv_report_missing_feature(s, "Encoding frames with 4096 or more macroblocks");
|
||||||
|
return AVERROR(ENOSYS);
|
||||||
|
}
|
||||||
put_bits(&s->pb, 6, 0); /* mb_x */
|
put_bits(&s->pb, 6, 0); /* mb_x */
|
||||||
put_bits(&s->pb, 6, 0); /* mb_y */
|
put_bits(&s->pb, 6, 0); /* mb_y */
|
||||||
put_bits(&s->pb, 12, s->mb_width * s->mb_height);
|
put_bits(&s->pb, 12, s->mb_width * s->mb_height);
|
||||||
}
|
}
|
||||||
|
|
||||||
put_bits(&s->pb, 3, 0); /* ignored */
|
put_bits(&s->pb, 3, 0); /* ignored */
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
FF_MPV_GENERIC_CLASS(rv10)
|
FF_MPV_GENERIC_CLASS(rv10)
|
||||||
|
@ -82,7 +82,7 @@ static int s302m_encode2_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
o = avpkt->data;
|
o = avpkt->data;
|
||||||
init_put_bits(&pb, o, buf_size * 8);
|
init_put_bits(&pb, o, buf_size);
|
||||||
put_bits(&pb, 16, buf_size - AES3_HEADER_LEN);
|
put_bits(&pb, 16, buf_size - AES3_HEADER_LEN);
|
||||||
put_bits(&pb, 2, (avctx->channels - 2) >> 1); // number of channels
|
put_bits(&pb, 2, (avctx->channels - 2) >> 1); // number of channels
|
||||||
put_bits(&pb, 8, 0); // channel ID
|
put_bits(&pb, 8, 0); // channel ID
|
||||||
|
@ -152,7 +152,7 @@ static int decode_q_branch(SnowContext *s, int level, int x, int y){
|
|||||||
int l = left->color[0];
|
int l = left->color[0];
|
||||||
int cb= left->color[1];
|
int cb= left->color[1];
|
||||||
int cr= left->color[2];
|
int cr= left->color[2];
|
||||||
int ref = 0;
|
unsigned ref = 0;
|
||||||
int ref_context= av_log2(2*left->ref) + av_log2(2*top->ref);
|
int ref_context= av_log2(2*left->ref) + av_log2(2*top->ref);
|
||||||
int mx_context= av_log2(2*FFABS(left->mx - top->mx)) + 0*av_log2(2*FFABS(tr->mx - top->mx));
|
int mx_context= av_log2(2*FFABS(left->mx - top->mx)) + 0*av_log2(2*FFABS(tr->mx - top->mx));
|
||||||
int my_context= av_log2(2*FFABS(left->my - top->my)) + 0*av_log2(2*FFABS(tr->my - top->my));
|
int my_context= av_log2(2*FFABS(left->my - top->my)) + 0*av_log2(2*FFABS(tr->my - top->my));
|
||||||
|
@ -839,13 +839,6 @@ static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
|
|||||||
s->bpp = -1;
|
s->bpp = -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (s->bpp > 64U) {
|
|
||||||
av_log(s->avctx, AV_LOG_ERROR,
|
|
||||||
"This format is not supported (bpp=%d, %d components)\n",
|
|
||||||
s->bpp, count);
|
|
||||||
s->bpp = 0;
|
|
||||||
return AVERROR_INVALIDDATA;
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
case TIFF_SAMPLES_PER_PIXEL:
|
case TIFF_SAMPLES_PER_PIXEL:
|
||||||
if (count != 1) {
|
if (count != 1) {
|
||||||
@ -1158,6 +1151,13 @@ static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
end:
|
end:
|
||||||
|
if (s->bpp > 64U) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR,
|
||||||
|
"This format is not supported (bpp=%d, %d components)\n",
|
||||||
|
s->bpp, count);
|
||||||
|
s->bpp = 0;
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
bytestream2_seek(&s->gb, start, SEEK_SET);
|
bytestream2_seek(&s->gb, start, SEEK_SET);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -66,6 +66,9 @@
|
|||||||
#include "compat/os2threads.h"
|
#include "compat/os2threads.h"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#include "libavutil/ffversion.h"
|
||||||
|
const char av_codec_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
|
||||||
|
|
||||||
#if HAVE_PTHREADS || HAVE_W32THREADS || HAVE_OS2THREADS
|
#if HAVE_PTHREADS || HAVE_W32THREADS || HAVE_OS2THREADS
|
||||||
static int default_lockmgr_cb(void **arg, enum AVLockOp op)
|
static int default_lockmgr_cb(void **arg, enum AVLockOp op)
|
||||||
{
|
{
|
||||||
@ -243,7 +246,7 @@ int ff_set_sar(AVCodecContext *avctx, AVRational sar)
|
|||||||
int ret = av_image_check_sar(avctx->width, avctx->height, sar);
|
int ret = av_image_check_sar(avctx->width, avctx->height, sar);
|
||||||
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
|
av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %d/%d\n",
|
||||||
sar.num, sar.den);
|
sar.num, sar.den);
|
||||||
avctx->sample_aspect_ratio = (AVRational){ 0, 1 };
|
avctx->sample_aspect_ratio = (AVRational){ 0, 1 };
|
||||||
return ret;
|
return ret;
|
||||||
@ -371,7 +374,7 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
|
|||||||
case AV_PIX_FMT_YUVJ411P:
|
case AV_PIX_FMT_YUVJ411P:
|
||||||
case AV_PIX_FMT_UYYVYY411:
|
case AV_PIX_FMT_UYYVYY411:
|
||||||
w_align = 32;
|
w_align = 32;
|
||||||
h_align = 8;
|
h_align = 16 * 2;
|
||||||
break;
|
break;
|
||||||
case AV_PIX_FMT_YUV410P:
|
case AV_PIX_FMT_YUV410P:
|
||||||
if (s->codec_id == AV_CODEC_ID_SVQ1) {
|
if (s->codec_id == AV_CODEC_ID_SVQ1) {
|
||||||
|
@ -215,6 +215,8 @@ static void restore_median(uint8_t *src, int step, int stride,
|
|||||||
slice_height = ((((slice + 1) * height) / slices) & cmask) -
|
slice_height = ((((slice + 1) * height) / slices) & cmask) -
|
||||||
slice_start;
|
slice_start;
|
||||||
|
|
||||||
|
if (!slice_height)
|
||||||
|
continue;
|
||||||
bsrc = src + slice_start * stride;
|
bsrc = src + slice_start * stride;
|
||||||
|
|
||||||
// first line - left neighbour prediction
|
// first line - left neighbour prediction
|
||||||
@ -270,6 +272,8 @@ static void restore_median_il(uint8_t *src, int step, int stride,
|
|||||||
slice_height = ((((slice + 1) * height) / slices) & cmask) -
|
slice_height = ((((slice + 1) * height) / slices) & cmask) -
|
||||||
slice_start;
|
slice_start;
|
||||||
slice_height >>= 1;
|
slice_height >>= 1;
|
||||||
|
if (!slice_height)
|
||||||
|
continue;
|
||||||
|
|
||||||
bsrc = src + slice_start * stride;
|
bsrc = src + slice_start * stride;
|
||||||
|
|
||||||
|
@ -339,6 +339,9 @@ static int vmd_decode(VmdVideoContext *s, AVFrame *frame)
|
|||||||
ofs += slen;
|
ofs += slen;
|
||||||
bytestream2_skip(&gb, len);
|
bytestream2_skip(&gb, len);
|
||||||
} else {
|
} else {
|
||||||
|
if (ofs + len > frame_width ||
|
||||||
|
bytestream2_get_bytes_left(&gb) < len)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
bytestream2_get_buffer(&gb, &dp[ofs], len);
|
bytestream2_get_buffer(&gb, &dp[ofs], len);
|
||||||
ofs += len;
|
ofs += len;
|
||||||
}
|
}
|
||||||
|
@ -176,19 +176,25 @@ int update_dimensions(VP8Context *s, int width, int height, int is_vp7)
|
|||||||
s->top_border = av_mallocz((s->mb_width + 1) * sizeof(*s->top_border));
|
s->top_border = av_mallocz((s->mb_width + 1) * sizeof(*s->top_border));
|
||||||
s->thread_data = av_mallocz(MAX_THREADS * sizeof(VP8ThreadData));
|
s->thread_data = av_mallocz(MAX_THREADS * sizeof(VP8ThreadData));
|
||||||
|
|
||||||
|
if (!s->macroblocks_base || !s->top_nnz || !s->top_border ||
|
||||||
|
!s->thread_data || (!s->intra4x4_pred_mode_top && !s->mb_layout)) {
|
||||||
|
free_buffers(s);
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < MAX_THREADS; i++) {
|
for (i = 0; i < MAX_THREADS; i++) {
|
||||||
s->thread_data[i].filter_strength =
|
s->thread_data[i].filter_strength =
|
||||||
av_mallocz(s->mb_width * sizeof(*s->thread_data[0].filter_strength));
|
av_mallocz(s->mb_width * sizeof(*s->thread_data[0].filter_strength));
|
||||||
|
if (!s->thread_data[i].filter_strength) {
|
||||||
|
free_buffers(s);
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
}
|
||||||
#if HAVE_THREADS
|
#if HAVE_THREADS
|
||||||
pthread_mutex_init(&s->thread_data[i].lock, NULL);
|
pthread_mutex_init(&s->thread_data[i].lock, NULL);
|
||||||
pthread_cond_init(&s->thread_data[i].cond, NULL);
|
pthread_cond_init(&s->thread_data[i].cond, NULL);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!s->macroblocks_base || !s->top_nnz || !s->top_border ||
|
|
||||||
(!s->intra4x4_pred_mode_top && !s->mb_layout))
|
|
||||||
return AVERROR(ENOMEM);
|
|
||||||
|
|
||||||
s->macroblocks = s->macroblocks_base + 1;
|
s->macroblocks = s->macroblocks_base + 1;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -279,7 +279,8 @@ static int vp9_alloc_frame(AVCodecContext *ctx, VP9Frame *f)
|
|||||||
|
|
||||||
// retain segmentation map if it doesn't update
|
// retain segmentation map if it doesn't update
|
||||||
if (s->segmentation.enabled && !s->segmentation.update_map &&
|
if (s->segmentation.enabled && !s->segmentation.update_map &&
|
||||||
!s->intraonly && !s->keyframe && !s->errorres) {
|
!s->intraonly && !s->keyframe && !s->errorres &&
|
||||||
|
ctx->active_thread_type != FF_THREAD_FRAME) {
|
||||||
memcpy(f->segmentation_map, s->frames[LAST_FRAME].segmentation_map, sz);
|
memcpy(f->segmentation_map, s->frames[LAST_FRAME].segmentation_map, sz);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1351,9 +1352,18 @@ static void decode_mode(AVCodecContext *ctx)
|
|||||||
|
|
||||||
if (!s->last_uses_2pass)
|
if (!s->last_uses_2pass)
|
||||||
ff_thread_await_progress(&s->frames[LAST_FRAME].tf, row >> 3, 0);
|
ff_thread_await_progress(&s->frames[LAST_FRAME].tf, row >> 3, 0);
|
||||||
for (y = 0; y < h4; y++)
|
for (y = 0; y < h4; y++) {
|
||||||
|
int idx_base = (y + row) * 8 * s->sb_cols + col;
|
||||||
for (x = 0; x < w4; x++)
|
for (x = 0; x < w4; x++)
|
||||||
pred = FFMIN(pred, refsegmap[(y + row) * 8 * s->sb_cols + x + col]);
|
pred = FFMIN(pred, refsegmap[idx_base + x]);
|
||||||
|
if (!s->segmentation.update_map && ctx->active_thread_type == FF_THREAD_FRAME) {
|
||||||
|
// FIXME maybe retain reference to previous frame as
|
||||||
|
// segmap reference instead of copying the whole map
|
||||||
|
// into a new buffer
|
||||||
|
memcpy(&s->frames[CUR_FRAME].segmentation_map[idx_base],
|
||||||
|
&refsegmap[idx_base], w4);
|
||||||
|
}
|
||||||
|
}
|
||||||
av_assert1(pred < 8);
|
av_assert1(pred < 8);
|
||||||
b->seg_id = pred;
|
b->seg_id = pred;
|
||||||
} else {
|
} else {
|
||||||
@ -2496,7 +2506,7 @@ static void intra_recon(AVCodecContext *ctx, ptrdiff_t y_off, ptrdiff_t uv_off)
|
|||||||
for (x = 0; x < end_x; x += uvstep1d, ptr += 4 * uvstep1d,
|
for (x = 0; x < end_x; x += uvstep1d, ptr += 4 * uvstep1d,
|
||||||
ptr_r += 4 * uvstep1d, n += step) {
|
ptr_r += 4 * uvstep1d, n += step) {
|
||||||
int mode = b->uvmode;
|
int mode = b->uvmode;
|
||||||
uint8_t *a = &a_buf[16];
|
uint8_t *a = &a_buf[32];
|
||||||
int eob = b->skip ? 0 : b->uvtx > TX_8X8 ? AV_RN16A(&s->uveob[p][n]) : s->uveob[p][n];
|
int eob = b->skip ? 0 : b->uvtx > TX_8X8 ? AV_RN16A(&s->uveob[p][n]) : s->uveob[p][n];
|
||||||
|
|
||||||
mode = check_intra_mode(s, mode, &a, ptr_r,
|
mode = check_intra_mode(s, mode, &a, ptr_r,
|
||||||
@ -3748,7 +3758,7 @@ static int vp9_decode_frame(AVCodecContext *ctx, void *frame,
|
|||||||
if ((res = av_frame_ref(frame, s->refs[ref].f)) < 0)
|
if ((res = av_frame_ref(frame, s->refs[ref].f)) < 0)
|
||||||
return res;
|
return res;
|
||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
return 0;
|
return pkt->size;
|
||||||
}
|
}
|
||||||
data += res;
|
data += res;
|
||||||
size -= res;
|
size -= res;
|
||||||
@ -3972,7 +3982,7 @@ static int vp9_decode_frame(AVCodecContext *ctx, void *frame,
|
|||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return pkt->size;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vp9_decode_flush(AVCodecContext *ctx)
|
static void vp9_decode_flush(AVCodecContext *ctx)
|
||||||
|
@ -77,6 +77,8 @@ static int parse(AVCodecParserContext *ctx,
|
|||||||
idx += a; \
|
idx += a; \
|
||||||
if (sz > size) { \
|
if (sz > size) { \
|
||||||
s->n_frames = 0; \
|
s->n_frames = 0; \
|
||||||
|
*out_size = 0; \
|
||||||
|
*out_data = data; \
|
||||||
av_log(avctx, AV_LOG_ERROR, \
|
av_log(avctx, AV_LOG_ERROR, \
|
||||||
"Superframe packet size too big: %u > %d\n", \
|
"Superframe packet size too big: %u > %d\n", \
|
||||||
sz, size); \
|
sz, size); \
|
||||||
|
@ -694,6 +694,11 @@ static int decode_entropy_coded_image(WebPContext *s, enum ImageRole role,
|
|||||||
length = offset + get_bits(&s->gb, extra_bits) + 1;
|
length = offset + get_bits(&s->gb, extra_bits) + 1;
|
||||||
}
|
}
|
||||||
prefix_code = huff_reader_get_symbol(&hg[HUFF_IDX_DIST], &s->gb);
|
prefix_code = huff_reader_get_symbol(&hg[HUFF_IDX_DIST], &s->gb);
|
||||||
|
if (prefix_code > 39) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR,
|
||||||
|
"distance prefix code too large: %d\n", prefix_code);
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
if (prefix_code < 4) {
|
if (prefix_code < 4) {
|
||||||
distance = prefix_code + 1;
|
distance = prefix_code + 1;
|
||||||
} else {
|
} else {
|
||||||
@ -1099,7 +1104,7 @@ static int vp8_lossless_decode_frame(AVCodecContext *avctx, AVFrame *p,
|
|||||||
unsigned int data_size, int is_alpha_chunk)
|
unsigned int data_size, int is_alpha_chunk)
|
||||||
{
|
{
|
||||||
WebPContext *s = avctx->priv_data;
|
WebPContext *s = avctx->priv_data;
|
||||||
int w, h, ret, i;
|
int w, h, ret, i, used;
|
||||||
|
|
||||||
if (!is_alpha_chunk) {
|
if (!is_alpha_chunk) {
|
||||||
s->lossless = 1;
|
s->lossless = 1;
|
||||||
@ -1149,8 +1154,16 @@ static int vp8_lossless_decode_frame(AVCodecContext *avctx, AVFrame *p,
|
|||||||
/* parse transformations */
|
/* parse transformations */
|
||||||
s->nb_transforms = 0;
|
s->nb_transforms = 0;
|
||||||
s->reduced_width = 0;
|
s->reduced_width = 0;
|
||||||
|
used = 0;
|
||||||
while (get_bits1(&s->gb)) {
|
while (get_bits1(&s->gb)) {
|
||||||
enum TransformType transform = get_bits(&s->gb, 2);
|
enum TransformType transform = get_bits(&s->gb, 2);
|
||||||
|
if (used & (1 << transform)) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Transform %d used more than once\n",
|
||||||
|
transform);
|
||||||
|
ret = AVERROR_INVALIDDATA;
|
||||||
|
goto free_and_return;
|
||||||
|
}
|
||||||
|
used |= (1 << transform);
|
||||||
s->transforms[s->nb_transforms++] = transform;
|
s->transforms[s->nb_transforms++] = transform;
|
||||||
switch (transform) {
|
switch (transform) {
|
||||||
case PREDICTOR_TRANSFORM:
|
case PREDICTOR_TRANSFORM:
|
||||||
|
@ -26,12 +26,6 @@ SECTION_TEXT
|
|||||||
; int ff_scalarproduct_and_madd_int16(int16_t *v1, int16_t *v2, int16_t *v3,
|
; int ff_scalarproduct_and_madd_int16(int16_t *v1, int16_t *v2, int16_t *v3,
|
||||||
; int order, int mul)
|
; int order, int mul)
|
||||||
cglobal scalarproduct_and_madd_int16, 4,4,8, v1, v2, v3, order, mul
|
cglobal scalarproduct_and_madd_int16, 4,4,8, v1, v2, v3, order, mul
|
||||||
%if mmsize == 16
|
|
||||||
test orderq, 8
|
|
||||||
jnz scalarproduct_and_madd_int16_fallback
|
|
||||||
%else
|
|
||||||
scalarproduct_and_madd_int16_fallback
|
|
||||||
%endif
|
|
||||||
shl orderq, 1
|
shl orderq, 1
|
||||||
movd m7, mulm
|
movd m7, mulm
|
||||||
%if mmsize == 16
|
%if mmsize == 16
|
||||||
@ -123,8 +117,6 @@ align 16
|
|||||||
; int order, int mul)
|
; int order, int mul)
|
||||||
INIT_XMM ssse3
|
INIT_XMM ssse3
|
||||||
cglobal scalarproduct_and_madd_int16, 4,5,10, v1, v2, v3, order, mul
|
cglobal scalarproduct_and_madd_int16, 4,5,10, v1, v2, v3, order, mul
|
||||||
test orderq, 8
|
|
||||||
jnz scalarproduct_and_madd_int16_fallback
|
|
||||||
shl orderq, 1
|
shl orderq, 1
|
||||||
movd m7, mulm
|
movd m7, mulm
|
||||||
pshuflw m7, m7, 0
|
pshuflw m7, m7, 0
|
||||||
|
@ -31,17 +31,41 @@ int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, const int16_t *v2,
|
|||||||
const int16_t *v3,
|
const int16_t *v3,
|
||||||
int order, int mul);
|
int order, int mul);
|
||||||
|
|
||||||
|
#if HAVE_YASM
|
||||||
|
static int32_t scalarproduct_and_madd_int16_sse2(int16_t *v1, const int16_t *v2,
|
||||||
|
const int16_t *v3,
|
||||||
|
int order, int mul)
|
||||||
|
{
|
||||||
|
if (order & 8)
|
||||||
|
return ff_scalarproduct_and_madd_int16_mmxext(v1, v2, v3, order, mul);
|
||||||
|
else
|
||||||
|
return ff_scalarproduct_and_madd_int16_sse2(v1, v2, v3, order, mul);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t scalarproduct_and_madd_int16_ssse3(int16_t *v1, const int16_t *v2,
|
||||||
|
const int16_t *v3,
|
||||||
|
int order, int mul)
|
||||||
|
{
|
||||||
|
if (order & 8)
|
||||||
|
return ff_scalarproduct_and_madd_int16_mmxext(v1, v2, v3, order, mul);
|
||||||
|
else
|
||||||
|
return ff_scalarproduct_and_madd_int16_ssse3(v1, v2, v3, order, mul);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
av_cold void ff_llauddsp_init_x86(LLAudDSPContext *c)
|
av_cold void ff_llauddsp_init_x86(LLAudDSPContext *c)
|
||||||
{
|
{
|
||||||
|
#if HAVE_YASM
|
||||||
int cpu_flags = av_get_cpu_flags();
|
int cpu_flags = av_get_cpu_flags();
|
||||||
|
|
||||||
if (EXTERNAL_MMXEXT(cpu_flags))
|
if (EXTERNAL_MMXEXT(cpu_flags))
|
||||||
c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmxext;
|
c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmxext;
|
||||||
|
|
||||||
if (EXTERNAL_SSE2(cpu_flags))
|
if (EXTERNAL_SSE2(cpu_flags))
|
||||||
c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2;
|
c->scalarproduct_and_madd_int16 = scalarproduct_and_madd_int16_sse2;
|
||||||
|
|
||||||
if (EXTERNAL_SSSE3(cpu_flags) &&
|
if (EXTERNAL_SSSE3(cpu_flags) &&
|
||||||
!(cpu_flags & (AV_CPU_FLAG_SSE42 | AV_CPU_FLAG_3DNOW))) // cachesplit
|
!(cpu_flags & (AV_CPU_FLAG_SSE42 | AV_CPU_FLAG_3DNOW))) // cachesplit
|
||||||
c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3;
|
c->scalarproduct_and_madd_int16 = scalarproduct_and_madd_int16_ssse3;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -148,8 +148,8 @@ static void mlp_filter_channel_x86(int32_t *state, const int32_t *coeff,
|
|||||||
FIRMUL (ff_mlp_firorder_6, 0x14 )
|
FIRMUL (ff_mlp_firorder_6, 0x14 )
|
||||||
FIRMUL (ff_mlp_firorder_5, 0x10 )
|
FIRMUL (ff_mlp_firorder_5, 0x10 )
|
||||||
FIRMUL (ff_mlp_firorder_4, 0x0c )
|
FIRMUL (ff_mlp_firorder_4, 0x0c )
|
||||||
FIRMULREG(ff_mlp_firorder_3, 0x08,10)
|
FIRMUL (ff_mlp_firorder_3, 0x08 )
|
||||||
FIRMULREG(ff_mlp_firorder_2, 0x04, 9)
|
FIRMUL (ff_mlp_firorder_2, 0x04 )
|
||||||
FIRMULREG(ff_mlp_firorder_1, 0x00, 8)
|
FIRMULREG(ff_mlp_firorder_1, 0x00, 8)
|
||||||
LABEL_MANGLE(ff_mlp_firorder_0)":\n\t"
|
LABEL_MANGLE(ff_mlp_firorder_0)":\n\t"
|
||||||
"jmp *%6 \n\t"
|
"jmp *%6 \n\t"
|
||||||
@ -178,8 +178,6 @@ static void mlp_filter_channel_x86(int32_t *state, const int32_t *coeff,
|
|||||||
: /* 4*/"r"((x86_reg)mask), /* 5*/"r"(firjump),
|
: /* 4*/"r"((x86_reg)mask), /* 5*/"r"(firjump),
|
||||||
/* 6*/"r"(iirjump) , /* 7*/"c"(filter_shift)
|
/* 6*/"r"(iirjump) , /* 7*/"c"(filter_shift)
|
||||||
, /* 8*/"r"((int64_t)coeff[0])
|
, /* 8*/"r"((int64_t)coeff[0])
|
||||||
, /* 9*/"r"((int64_t)coeff[1])
|
|
||||||
, /*10*/"r"((int64_t)coeff[2])
|
|
||||||
: "rax", "rdx", "rsi"
|
: "rax", "rdx", "rsi"
|
||||||
#else /* ARCH_X86_32 */
|
#else /* ARCH_X86_32 */
|
||||||
/* 3*/"+m"(blocksize)
|
/* 3*/"+m"(blocksize)
|
||||||
|
@ -24,6 +24,8 @@
|
|||||||
* X-Face common data and utilities definition.
|
* X-Face common data and utilities definition.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include "libavutil/avassert.h"
|
||||||
|
|
||||||
#include "xface.h"
|
#include "xface.h"
|
||||||
|
|
||||||
void ff_big_add(BigInt *b, uint8_t a)
|
void ff_big_add(BigInt *b, uint8_t a)
|
||||||
@ -43,6 +45,7 @@ void ff_big_add(BigInt *b, uint8_t a)
|
|||||||
c >>= XFACE_BITSPERWORD;
|
c >>= XFACE_BITSPERWORD;
|
||||||
}
|
}
|
||||||
if (i == b->nb_words && c) {
|
if (i == b->nb_words && c) {
|
||||||
|
av_assert0(b->nb_words < XFACE_MAX_WORDS);
|
||||||
b->nb_words++;
|
b->nb_words++;
|
||||||
*w = c & XFACE_WORDMASK;
|
*w = c & XFACE_WORDMASK;
|
||||||
}
|
}
|
||||||
@ -98,6 +101,7 @@ void ff_big_mul(BigInt *b, uint8_t a)
|
|||||||
return;
|
return;
|
||||||
if (a == 0) {
|
if (a == 0) {
|
||||||
/* treat this as a == WORDCARRY and just shift everything left a WORD */
|
/* treat this as a == WORDCARRY and just shift everything left a WORD */
|
||||||
|
av_assert0(b->nb_words < XFACE_MAX_WORDS);
|
||||||
i = b->nb_words++;
|
i = b->nb_words++;
|
||||||
w = b->words + i;
|
w = b->words + i;
|
||||||
while (i--) {
|
while (i--) {
|
||||||
@ -116,6 +120,7 @@ void ff_big_mul(BigInt *b, uint8_t a)
|
|||||||
c >>= XFACE_BITSPERWORD;
|
c >>= XFACE_BITSPERWORD;
|
||||||
}
|
}
|
||||||
if (c) {
|
if (c) {
|
||||||
|
av_assert0(b->nb_words < XFACE_MAX_WORDS);
|
||||||
b->nb_words++;
|
b->nb_words++;
|
||||||
*w = c & XFACE_WORDMASK;
|
*w = c & XFACE_WORDMASK;
|
||||||
}
|
}
|
||||||
|
@ -41,17 +41,17 @@
|
|||||||
/*
|
/*
|
||||||
* Image is encoded as a big integer, using characters from '~' to
|
* Image is encoded as a big integer, using characters from '~' to
|
||||||
* '!', for a total of 94 symbols. In order to express
|
* '!', for a total of 94 symbols. In order to express
|
||||||
* 48x48*2=8*XFACE_MAX_WORDS=4608
|
* 48x48 pixels with the worst case encoding 666 symbols should
|
||||||
* bits, we need a total of 704 digits, as given by:
|
* be sufficient.
|
||||||
* ceil(lg_94(2^4608)) = 704
|
|
||||||
*/
|
*/
|
||||||
#define XFACE_MAX_DIGITS 704
|
#define XFACE_MAX_DIGITS 666
|
||||||
|
|
||||||
#define XFACE_BITSPERWORD 8
|
#define XFACE_BITSPERWORD 8
|
||||||
#define XFACE_WORDCARRY (1 << XFACE_BITSPERWORD)
|
#define XFACE_WORDCARRY (1 << XFACE_BITSPERWORD)
|
||||||
#define XFACE_WORDMASK (XFACE_WORDCARRY - 1)
|
#define XFACE_WORDMASK (XFACE_WORDCARRY - 1)
|
||||||
|
|
||||||
#define XFACE_MAX_WORDS ((XFACE_PIXELS * 2 + XFACE_BITSPERWORD - 1) / XFACE_BITSPERWORD)
|
// This must be larger or equal to log256(94^XFACE_MAX_DIGITS)
|
||||||
|
#define XFACE_MAX_WORDS 546
|
||||||
|
|
||||||
/* Portable, very large unsigned integer arithmetic is needed.
|
/* Portable, very large unsigned integer arithmetic is needed.
|
||||||
* Implementation uses arrays of WORDs. */
|
* Implementation uses arrays of WORDs. */
|
||||||
|
@ -410,11 +410,16 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
|
|||||||
int hi_ver, lo_ver, ret;
|
int hi_ver, lo_ver, ret;
|
||||||
|
|
||||||
/* parse header */
|
/* parse header */
|
||||||
|
if (len < 1)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
c->flags = buf[0];
|
c->flags = buf[0];
|
||||||
buf++; len--;
|
buf++; len--;
|
||||||
if (c->flags & ZMBV_KEYFRAME) {
|
if (c->flags & ZMBV_KEYFRAME) {
|
||||||
void *decode_intra = NULL;
|
void *decode_intra = NULL;
|
||||||
c->decode_intra= NULL;
|
c->decode_intra= NULL;
|
||||||
|
|
||||||
|
if (len < 6)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
hi_ver = buf[0];
|
hi_ver = buf[0];
|
||||||
lo_ver = buf[1];
|
lo_ver = buf[1];
|
||||||
c->comp = buf[2];
|
c->comp = buf[2];
|
||||||
|
@ -23,6 +23,9 @@
|
|||||||
#include "avdevice.h"
|
#include "avdevice.h"
|
||||||
#include "config.h"
|
#include "config.h"
|
||||||
|
|
||||||
|
#include "libavutil/ffversion.h"
|
||||||
|
const char av_device_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
|
||||||
|
|
||||||
#define E AV_OPT_FLAG_ENCODING_PARAM
|
#define E AV_OPT_FLAG_ENCODING_PARAM
|
||||||
#define D AV_OPT_FLAG_DECODING_PARAM
|
#define D AV_OPT_FLAG_DECODING_PARAM
|
||||||
#define A AV_OPT_FLAG_AUDIO_PARAM
|
#define A AV_OPT_FLAG_AUDIO_PARAM
|
||||||
@ -131,9 +134,9 @@ int avdevice_app_to_dev_control_message(struct AVFormatContext *s, enum AVAppToD
|
|||||||
int avdevice_dev_to_app_control_message(struct AVFormatContext *s, enum AVDevToAppMessageType type,
|
int avdevice_dev_to_app_control_message(struct AVFormatContext *s, enum AVDevToAppMessageType type,
|
||||||
void *data, size_t data_size)
|
void *data, size_t data_size)
|
||||||
{
|
{
|
||||||
if (!s->control_message_cb)
|
if (!av_format_get_control_message_cb(s))
|
||||||
return AVERROR(ENOSYS);
|
return AVERROR(ENOSYS);
|
||||||
return s->control_message_cb(s, type, data, data_size);
|
return av_format_get_control_message_cb(s)(s, type, data, data_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
int avdevice_capabilities_create(AVDeviceCapabilitiesQuery **caps, AVFormatContext *s,
|
int avdevice_capabilities_create(AVDeviceCapabilitiesQuery **caps, AVFormatContext *s,
|
||||||
|
@ -350,7 +350,7 @@ static int iec61883_read_header(AVFormatContext *context)
|
|||||||
if (!dv->max_packets)
|
if (!dv->max_packets)
|
||||||
dv->max_packets = 100;
|
dv->max_packets = 100;
|
||||||
|
|
||||||
if (dv->type == IEC61883_HDV) {
|
if (CONFIG_MPEGTS_DEMUXER && dv->type == IEC61883_HDV) {
|
||||||
|
|
||||||
/* Init HDV receive */
|
/* Init HDV receive */
|
||||||
|
|
||||||
@ -444,7 +444,7 @@ static int iec61883_close(AVFormatContext *context)
|
|||||||
pthread_mutex_destroy(&dv->mutex);
|
pthread_mutex_destroy(&dv->mutex);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (dv->type == IEC61883_HDV) {
|
if (CONFIG_MPEGTS_DEMUXER && dv->type == IEC61883_HDV) {
|
||||||
iec61883_mpeg2_recv_stop(dv->iec61883_mpeg2);
|
iec61883_mpeg2_recv_stop(dv->iec61883_mpeg2);
|
||||||
iec61883_mpeg2_close(dv->iec61883_mpeg2);
|
iec61883_mpeg2_close(dv->iec61883_mpeg2);
|
||||||
avpriv_mpegts_parse_close(dv->mpeg_demux);
|
avpriv_mpegts_parse_close(dv->mpeg_demux);
|
||||||
|
@ -496,6 +496,8 @@ static av_cold int init(AVFilterContext *ctx)
|
|||||||
snprintf(name, sizeof(name), "input%d", i);
|
snprintf(name, sizeof(name), "input%d", i);
|
||||||
pad.type = AVMEDIA_TYPE_AUDIO;
|
pad.type = AVMEDIA_TYPE_AUDIO;
|
||||||
pad.name = av_strdup(name);
|
pad.name = av_strdup(name);
|
||||||
|
if (!pad.name)
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
pad.filter_frame = filter_frame;
|
pad.filter_frame = filter_frame;
|
||||||
|
|
||||||
ff_insert_inpad(ctx, i, &pad);
|
ff_insert_inpad(ctx, i, &pad);
|
||||||
|
@ -214,6 +214,8 @@ static av_cold int join_init(AVFilterContext *ctx)
|
|||||||
snprintf(name, sizeof(name), "input%d", i);
|
snprintf(name, sizeof(name), "input%d", i);
|
||||||
pad.type = AVMEDIA_TYPE_AUDIO;
|
pad.type = AVMEDIA_TYPE_AUDIO;
|
||||||
pad.name = av_strdup(name);
|
pad.name = av_strdup(name);
|
||||||
|
if (!pad.name)
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
pad.filter_frame = filter_frame;
|
pad.filter_frame = filter_frame;
|
||||||
|
|
||||||
pad.needs_fifo = 1;
|
pad.needs_fifo = 1;
|
||||||
|
@ -37,6 +37,9 @@
|
|||||||
#include "formats.h"
|
#include "formats.h"
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
|
||||||
|
#include "libavutil/ffversion.h"
|
||||||
|
const char av_filter_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
|
||||||
|
|
||||||
static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame);
|
static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame);
|
||||||
|
|
||||||
void ff_tlog_ref(void *ctx, AVFrame *ref, int end)
|
void ff_tlog_ref(void *ctx, AVFrame *ref, int end)
|
||||||
|
@ -52,6 +52,8 @@ static av_cold int split_init(AVFilterContext *ctx)
|
|||||||
snprintf(name, sizeof(name), "output%d", i);
|
snprintf(name, sizeof(name), "output%d", i);
|
||||||
pad.type = ctx->filter->inputs[0].type;
|
pad.type = ctx->filter->inputs[0].type;
|
||||||
pad.name = av_strdup(name);
|
pad.name = av_strdup(name);
|
||||||
|
if (!pad.name)
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
|
||||||
ff_insert_outpad(ctx, i, &pad);
|
ff_insert_outpad(ctx, i, &pad);
|
||||||
}
|
}
|
||||||
|
@ -289,6 +289,8 @@ static av_cold int movie_common_init(AVFilterContext *ctx)
|
|||||||
snprintf(name, sizeof(name), "out%d", i);
|
snprintf(name, sizeof(name), "out%d", i);
|
||||||
pad.type = movie->st[i].st->codec->codec_type;
|
pad.type = movie->st[i].st->codec->codec_type;
|
||||||
pad.name = av_strdup(name);
|
pad.name = av_strdup(name);
|
||||||
|
if (!pad.name)
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
pad.config_props = movie_config_output_props;
|
pad.config_props = movie_config_output_props;
|
||||||
pad.request_frame = movie_request_frame;
|
pad.request_frame = movie_request_frame;
|
||||||
ff_insert_outpad(ctx, i, &pad);
|
ff_insert_outpad(ctx, i, &pad);
|
||||||
|
@ -534,7 +534,7 @@ static int config_input(AVFilterLink *inlink)
|
|||||||
/* each slice will need to (pre & re)process the top and bottom block of
|
/* each slice will need to (pre & re)process the top and bottom block of
|
||||||
* the previous one in in addition to its processing area. This is because
|
* the previous one in in addition to its processing area. This is because
|
||||||
* each pixel is averaged by all the surrounding blocks */
|
* each pixel is averaged by all the surrounding blocks */
|
||||||
slice_h = (int)ceilf(s->pr_height / s->nb_threads) + (s->bsize - 1) * 2;
|
slice_h = (int)ceilf(s->pr_height / (float)s->nb_threads) + (s->bsize - 1) * 2;
|
||||||
for (i = 0; i < s->nb_threads; i++) {
|
for (i = 0; i < s->nb_threads; i++) {
|
||||||
s->slices[i] = av_malloc_array(linesize, slice_h * sizeof(*s->slices[i]));
|
s->slices[i] = av_malloc_array(linesize, slice_h * sizeof(*s->slices[i]));
|
||||||
if (!s->slices[i])
|
if (!s->slices[i])
|
||||||
|
@ -82,6 +82,7 @@ static int config_output(AVFilterLink *outlink)
|
|||||||
int width = ctx->inputs[LEFT]->w;
|
int width = ctx->inputs[LEFT]->w;
|
||||||
int height = ctx->inputs[LEFT]->h;
|
int height = ctx->inputs[LEFT]->h;
|
||||||
AVRational time_base = ctx->inputs[LEFT]->time_base;
|
AVRational time_base = ctx->inputs[LEFT]->time_base;
|
||||||
|
AVRational frame_rate = ctx->inputs[LEFT]->frame_rate;
|
||||||
|
|
||||||
// check size and fps match on the other input
|
// check size and fps match on the other input
|
||||||
if (width != ctx->inputs[RIGHT]->w ||
|
if (width != ctx->inputs[RIGHT]->w ||
|
||||||
@ -93,11 +94,18 @@ static int config_output(AVFilterLink *outlink)
|
|||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
} else if (av_cmp_q(time_base, ctx->inputs[RIGHT]->time_base) != 0) {
|
} else if (av_cmp_q(time_base, ctx->inputs[RIGHT]->time_base) != 0) {
|
||||||
av_log(ctx, AV_LOG_ERROR,
|
av_log(ctx, AV_LOG_ERROR,
|
||||||
"Left and right framerates differ (%d/%d vs %d/%d).\n",
|
"Left and right time bases differ (%d/%d vs %d/%d).\n",
|
||||||
time_base.num, time_base.den,
|
time_base.num, time_base.den,
|
||||||
ctx->inputs[RIGHT]->time_base.num,
|
ctx->inputs[RIGHT]->time_base.num,
|
||||||
ctx->inputs[RIGHT]->time_base.den);
|
ctx->inputs[RIGHT]->time_base.den);
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
|
} else if (av_cmp_q(frame_rate, ctx->inputs[RIGHT]->frame_rate) != 0) {
|
||||||
|
av_log(ctx, AV_LOG_ERROR,
|
||||||
|
"Left and right framerates differ (%d/%d vs %d/%d).\n",
|
||||||
|
frame_rate.num, frame_rate.den,
|
||||||
|
ctx->inputs[RIGHT]->frame_rate.num,
|
||||||
|
ctx->inputs[RIGHT]->frame_rate.den);
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
s->pix_desc = av_pix_fmt_desc_get(outlink->format);
|
s->pix_desc = av_pix_fmt_desc_get(outlink->format);
|
||||||
@ -108,6 +116,8 @@ static int config_output(AVFilterLink *outlink)
|
|||||||
switch (s->format) {
|
switch (s->format) {
|
||||||
case AV_STEREO3D_FRAMESEQUENCE:
|
case AV_STEREO3D_FRAMESEQUENCE:
|
||||||
time_base.den *= 2;
|
time_base.den *= 2;
|
||||||
|
frame_rate.num *= 2;
|
||||||
|
|
||||||
s->double_pts = AV_NOPTS_VALUE;
|
s->double_pts = AV_NOPTS_VALUE;
|
||||||
break;
|
break;
|
||||||
case AV_STEREO3D_COLUMNS:
|
case AV_STEREO3D_COLUMNS:
|
||||||
@ -126,6 +136,7 @@ static int config_output(AVFilterLink *outlink)
|
|||||||
outlink->w = width;
|
outlink->w = width;
|
||||||
outlink->h = height;
|
outlink->h = height;
|
||||||
outlink->time_base = time_base;
|
outlink->time_base = time_base;
|
||||||
|
outlink->frame_rate= frame_rate;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user