Merge branch 'release/0.7' into oldabi
* release/0.7: (290 commits) nuv: Fix combination of size changes and LZO compression. av_lzo1x_decode: properly handle negative buffer length. Do not call parse_keyframes_index with NULL stream. update versions for 0.7 branch Version numbers for 0.8.6 snow: emu edge support Fixes Ticket592 imc: validate channel count imc: check for ff_fft_init() failure (cherry picked from commit95fee70d67
) libgsmdec: check output buffer size before decoding (cherry picked from commitb03761b130
) configure: fix arch x86_32 mp3enc: avoid truncating id3v1 tags by one byte asfdec: Check packet_replic_size earlier cin audio: validate the channel count binkaudio: add some buffer overread checks. atrac1: validate number of channels (cherry picked from commitbff5b2c1ca
) atrac1: check output buffer size before decoding (cherry picked from commit33684b9c12
) vp3: fix oob read for negative tokens and memleaks on error. (cherry picked from commit8370e426e4
) apedec: set s->currentframeblocks after validating nblocks apedec: use unsigned int for 'nblocks' and make sure that it's within int range apedec: check for data buffer realloc failure (cherry picked from commit11ca8b2d74
) ... Conflicts: Changelog Makefile RELEASE configure libavcodec/error_resilience.c libavcodec/mpegvideo.c libavformat/matroskaenc.c tests/ref/lavf/mxf Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
798
Changelog
798
Changelog
@@ -1,798 +0,0 @@
|
|||||||
Entries are sorted chronologically from oldest to youngest within each release,
|
|
||||||
releases are sorted from youngest to oldest.
|
|
||||||
|
|
||||||
version next:
|
|
||||||
|
|
||||||
<<<<<<< HEAD
|
|
||||||
- openal input device added
|
|
||||||
- boxblur filter added
|
|
||||||
- BWF muxer
|
|
||||||
- Flash Screen Video 2 decoder
|
|
||||||
||||||| merged common ancestors
|
|
||||||
version 0.7:
|
|
||||||
|
|
||||||
- many many things we forgot because we rather write code than changelogs
|
|
||||||
- libmpcodecs video filter support (3 times as many filters than before)
|
|
||||||
- mpeg2 aspect ratio dection fixed
|
|
||||||
- libxvid aspect pickiness fixed
|
|
||||||
- Frame multithreaded decoding
|
|
||||||
- E-AC-3 audio encoder
|
|
||||||
- ac3enc: add channel coupling support
|
|
||||||
- floating-point sample format support to the ac3, eac3, dca, aac, and vorbis decoders.
|
|
||||||
- H264/MPEG frame-level multi-threading
|
|
||||||
- All av_metadata_* functions renamed to av_dict_* and moved to libavutil
|
|
||||||
- 4:4:4 H.264 decoding support
|
|
||||||
- 10-bit H.264 optimizations for x86
|
|
||||||
- lut, lutrgb, and lutyuv filters added
|
|
||||||
- buffersink libavfilter sink added
|
|
||||||
- Bump libswscale for recently reported ABI break
|
|
||||||
|
|
||||||
=======
|
|
||||||
version 0.7.1:
|
|
||||||
|
|
||||||
- added various additional FOURCC codec identifiers
|
|
||||||
- H.264 4:4:4 fixes
|
|
||||||
- build system and compilation fixes
|
|
||||||
- Doxygen and general documentation corrections and improvements
|
|
||||||
- fixed segfault in ffprobe
|
|
||||||
- behavioral fix in av_open_input_stream()
|
|
||||||
- Licensing clarification for LGPL'ed vf_gradfun
|
|
||||||
- bugfixes while seeking in multithreaded decoding
|
|
||||||
- support newer versions of OpenCV
|
|
||||||
- ffmpeg: fix operation with --disable-avfilter
|
|
||||||
- fixed integer underflow in matroska decoder
|
|
||||||
|
|
||||||
version 0.7:
|
|
||||||
|
|
||||||
- many many things we forgot because we rather write code than changelogs
|
|
||||||
- libmpcodecs video filter support (3 times as many filters than before)
|
|
||||||
- mpeg2 aspect ratio dection fixed
|
|
||||||
- libxvid aspect pickiness fixed
|
|
||||||
- Frame multithreaded decoding
|
|
||||||
- E-AC-3 audio encoder
|
|
||||||
- ac3enc: add channel coupling support
|
|
||||||
- floating-point sample format support for (E-)AC-3, DCA, AAC, Vorbis decoders
|
|
||||||
- H.264/MPEG frame-level multithreading
|
|
||||||
- av_metadata_* functions renamed to av_dict_* and moved to libavutil
|
|
||||||
- 4:4:4 H.264 decoding support
|
|
||||||
- 10-bit H.264 optimizations for x86
|
|
||||||
- lut, lutrgb, and lutyuv filters added
|
|
||||||
- buffersink libavfilter sink added
|
|
||||||
- bump libswscale for recently reported ABI break
|
|
||||||
|
|
||||||
>>>>>>> release/0.7
|
|
||||||
|
|
||||||
<<<<<<< HEAD
|
|
||||||
||||||| merged common ancestors
|
|
||||||
version 0.7_beta2:
|
|
||||||
|
|
||||||
- VP8 frame-multithreading
|
|
||||||
- NEON optimizations for VP8
|
|
||||||
- Lots of deprecated API cruft removed
|
|
||||||
- fft and imdct optimizations for AVX (Sandy Bridge) processors
|
|
||||||
- showinfo filter added
|
|
||||||
- DPX image encoder
|
|
||||||
- SMPTE 302M AES3 audio decoder
|
|
||||||
- Apple Core Audio Format muxer
|
|
||||||
- 9bit and 10bit per sample support in the H.264 decoder
|
|
||||||
- 9bit and 10bit FFV1 encoding / decoding
|
|
||||||
- split filter added
|
|
||||||
- select filter added
|
|
||||||
- sdl output device added
|
|
||||||
=======
|
|
||||||
version 0.7_beta2:
|
|
||||||
|
|
||||||
- VP8 frame-level multithreading
|
|
||||||
- NEON optimizations for VP8
|
|
||||||
- removed a lot of deprecated API cruft
|
|
||||||
- FFT and IMDCT optimizations for AVX (Sandy Bridge) processors
|
|
||||||
- showinfo filter added
|
|
||||||
- DPX image encoder
|
|
||||||
- SMPTE 302M AES3 audio decoder
|
|
||||||
- Apple Core Audio Format muxer
|
|
||||||
- 9bit and 10bit per sample support in the H.264 decoder
|
|
||||||
- 9bit and 10bit FFV1 encoding / decoding
|
|
||||||
- split filter added
|
|
||||||
- select filter added
|
|
||||||
- sdl output device added
|
|
||||||
>>>>>>> release/0.7
|
|
||||||
|
|
||||||
version 0.8:
|
|
||||||
|
|
||||||
|
|
||||||
- many many things we forgot because we rather write code than changelogs
|
|
||||||
- WebM support in Matroska de/muxer
|
|
||||||
- low overhead Ogg muxing
|
|
||||||
- MMS-TCP support
|
|
||||||
- VP8 de/encoding via libvpx
|
|
||||||
- Demuxer for On2's IVF format
|
|
||||||
- Pictor/PC Paint decoder
|
|
||||||
- HE-AAC v2 decoder
|
|
||||||
- libfaad2 wrapper removed
|
|
||||||
- DTS-ES extension (XCh) decoding support
|
|
||||||
- native VP8 decoder
|
|
||||||
- RTSP tunneling over HTTP
|
|
||||||
- RTP depacketization of SVQ3
|
|
||||||
- -strict inofficial replaced by -strict unofficial
|
|
||||||
- ffplay -exitonkeydown and -exitonmousedown options added
|
|
||||||
- native GSM / GSM MS decoder
|
|
||||||
- RTP depacketization of QDM2
|
|
||||||
- ANSI/ASCII art playback system
|
|
||||||
- Lego Mindstorms RSO de/muxer
|
|
||||||
- libavcore added (and subsequently removed)
|
|
||||||
- SubRip subtitle file muxer and demuxer
|
|
||||||
- Chinese AVS encoding via libxavs
|
|
||||||
- ffprobe -show_packets option added
|
|
||||||
- RTP packetization of Theora and Vorbis
|
|
||||||
- RTP depacketization of MP4A-LATM
|
|
||||||
- RTP packetization and depacketization of VP8
|
|
||||||
- hflip filter
|
|
||||||
- Apple HTTP Live Streaming demuxer
|
|
||||||
- a64 codec
|
|
||||||
- MMS-HTTP support
|
|
||||||
- G.722 ADPCM audio encoder/decoder
|
|
||||||
- R10k video decoder
|
|
||||||
- ocv_smooth filter
|
|
||||||
- frei0r wrapper filter
|
|
||||||
- change crop filter syntax to width:height:x:y
|
|
||||||
- make the crop filter accept parametric expressions
|
|
||||||
- make ffprobe accept AVFormatContext options
|
|
||||||
- yadif filter
|
|
||||||
- blackframe filter
|
|
||||||
- Demuxer for Leitch/Harris' VR native stream format (LXF)
|
|
||||||
- RTP depacketization of the X-QT QuickTime format
|
|
||||||
- SAP (Session Announcement Protocol, RFC 2974) muxer and demuxer
|
|
||||||
- cropdetect filter
|
|
||||||
- ffmpeg -crop* options removed
|
|
||||||
- transpose filter added
|
|
||||||
- ffmpeg -force_key_frames option added
|
|
||||||
- demuxer for receiving raw rtp:// URLs without an SDP description
|
|
||||||
- single stream LATM/LOAS decoder
|
|
||||||
- setpts filter added
|
|
||||||
- Win64 support for optimized x86 assembly functions
|
|
||||||
- MJPEG/AVI1 to JPEG/JFIF bitstream filter
|
|
||||||
- ASS subtitle encoder and decoder
|
|
||||||
- IEC 61937 encapsulation for E-AC-3, TrueHD, DTS-HD (for HDMI passthrough)
|
|
||||||
- overlay filter added
|
|
||||||
- rename aspect filter to setdar, and pixelaspect to setsar
|
|
||||||
- IEC 61937 demuxer
|
|
||||||
- Mobotix .mxg demuxer
|
|
||||||
- frei0r source added
|
|
||||||
- hqdn3d filter added
|
|
||||||
- RTP depacketization of QCELP
|
|
||||||
- FLAC parser added
|
|
||||||
- gradfun filter added
|
|
||||||
- AMR-WB decoder
|
|
||||||
- replace the ocv_smooth filter with a more generic ocv filter
|
|
||||||
- Windows Televison (WTV) demuxer
|
|
||||||
- FFmpeg metadata format muxer and demuxer
|
|
||||||
- SubRip (srt) subtitle encoder and decoder
|
|
||||||
- floating-point AC-3 encoder added
|
|
||||||
- Lagarith decoder
|
|
||||||
- ffmpeg -copytb option added
|
|
||||||
- IVF muxer added
|
|
||||||
- Wing Commander IV movies decoder added
|
|
||||||
- movie source added
|
|
||||||
- Bink version 'b' audio and video decoder
|
|
||||||
- Bitmap Brothers JV playback system
|
|
||||||
- Apple HTTP Live Streaming protocol handler
|
|
||||||
- sndio support for playback and record
|
|
||||||
- Linux framebuffer input device added
|
|
||||||
- Chronomaster DFA decoder
|
|
||||||
- DPX image encoder
|
|
||||||
- MicroDVD subtitle file muxer and demuxer
|
|
||||||
- Playstation Portable PMP format demuxer
|
|
||||||
- fieldorder video filter added
|
|
||||||
- AAC encoding via libvo-aacenc
|
|
||||||
- AMR-WB encoding via libvo-amrwbenc
|
|
||||||
- xWMA demuxer
|
|
||||||
- Mobotix MxPEG decoder
|
|
||||||
- VP8 frame-multithreading
|
|
||||||
- NEON optimizations for VP8
|
|
||||||
- Lots of deprecated API cruft removed
|
|
||||||
- fft and imdct optimizations for AVX (Sandy Bridge) processors
|
|
||||||
- showinfo filter added
|
|
||||||
- SMPTE 302M AES3 audio decoder
|
|
||||||
- Apple Core Audio Format muxer
|
|
||||||
- 9bit and 10bit per sample support in the H.264 decoder
|
|
||||||
- 9bit and 10bit FFV1 encoding / decoding
|
|
||||||
- split filter added
|
|
||||||
- select filter added
|
|
||||||
- sdl output device added
|
|
||||||
- libmpcodecs video filter support (3 times as many filters than before)
|
|
||||||
- mpeg2 aspect ratio dection fixed
|
|
||||||
- libxvid aspect pickiness fixed
|
|
||||||
- Frame multithreaded decoding
|
|
||||||
- E-AC-3 audio encoder
|
|
||||||
- ac3enc: add channel coupling support
|
|
||||||
- floating-point sample format support to the ac3, eac3, dca, aac, and vorbis decoders.
|
|
||||||
- H264/MPEG frame-level multi-threading
|
|
||||||
- All av_metadata_* functions renamed to av_dict_* and moved to libavutil
|
|
||||||
- 4:4:4 H.264 decoding support
|
|
||||||
- 10-bit H.264 optimizations for x86
|
|
||||||
- lut, lutrgb, and lutyuv filters added
|
|
||||||
- buffersink libavfilter sink added
|
|
||||||
- Bump libswscale for recently reported ABI break
|
|
||||||
|
|
||||||
|
|
||||||
version 0.7:
|
|
||||||
|
|
||||||
- all the changes for 0.8, but keeping API/ABI compatibility with the 0.6 release
|
|
||||||
|
|
||||||
|
|
||||||
version 0.6:
|
|
||||||
|
|
||||||
- PB-frame decoding for H.263
|
|
||||||
- deprecated vhook subsystem removed
|
|
||||||
- deprecated old scaler removed
|
|
||||||
- VQF demuxer
|
|
||||||
- Alpha channel scaler
|
|
||||||
- PCX encoder
|
|
||||||
- RTP packetization of H.263
|
|
||||||
- RTP packetization of AMR
|
|
||||||
- RTP depacketization of Vorbis
|
|
||||||
- CorePNG decoding support
|
|
||||||
- Cook multichannel decoding support
|
|
||||||
- introduced avlanguage helpers in libavformat
|
|
||||||
- 8088flex TMV demuxer and decoder
|
|
||||||
- per-stream language-tags extraction in asfdec
|
|
||||||
- V210 decoder and encoder
|
|
||||||
- remaining GPL parts in AC-3 decoder converted to LGPL
|
|
||||||
- QCP demuxer
|
|
||||||
- SoX native format muxer and demuxer
|
|
||||||
- AMR-NB decoding/encoding, AMR-WB decoding via OpenCORE libraries
|
|
||||||
- DPX image decoder
|
|
||||||
- Electronic Arts Madcow decoder
|
|
||||||
- DivX (XSUB) subtitle encoder
|
|
||||||
- nonfree libamr support for AMR-NB/WB decoding/encoding removed
|
|
||||||
- experimental AAC encoder
|
|
||||||
- RTP depacketization of ASF and RTSP from WMS servers
|
|
||||||
- RTMP support in libavformat
|
|
||||||
- noX handling for OPT_BOOL X options
|
|
||||||
- Wave64 demuxer
|
|
||||||
- IEC-61937 compatible Muxer
|
|
||||||
- TwinVQ decoder
|
|
||||||
- Bluray (PGS) subtitle decoder
|
|
||||||
- LPCM support in MPEG-TS (HDMV RID as found on Blu-ray disks)
|
|
||||||
- WMA Pro decoder
|
|
||||||
- Core Audio Format demuxer
|
|
||||||
- Atrac1 decoder
|
|
||||||
- MD STUDIO audio demuxer
|
|
||||||
- RF64 support in WAV demuxer
|
|
||||||
- MPEG-4 Audio Lossless Coding (ALS) decoder
|
|
||||||
- -formats option split into -formats, -codecs, -bsfs, and -protocols
|
|
||||||
- IV8 demuxer
|
|
||||||
- CDG demuxer and decoder
|
|
||||||
- R210 decoder
|
|
||||||
- Auravision Aura 1 and 2 decoders
|
|
||||||
- Deluxe Paint Animation playback system
|
|
||||||
- SIPR decoder
|
|
||||||
- Adobe Filmstrip muxer and demuxer
|
|
||||||
- RTP depacketization of H.263
|
|
||||||
- Bink demuxer and audio/video decoders
|
|
||||||
- enable symbol versioning by default for linkers that support it
|
|
||||||
- IFF PBM/ILBM bitmap decoder
|
|
||||||
- concat protocol
|
|
||||||
- Indeo 5 decoder
|
|
||||||
- RTP depacketization of AMR
|
|
||||||
- WMA Voice decoder
|
|
||||||
- ffprobe tool
|
|
||||||
- AMR-NB decoder
|
|
||||||
- RTSP muxer
|
|
||||||
- HE-AAC v1 decoder
|
|
||||||
- Kega Game Video (KGV1) decoder
|
|
||||||
- VorbisComment writing for FLAC, Ogg FLAC and Ogg Speex files
|
|
||||||
- RTP depacketization of Theora
|
|
||||||
- HTTP Digest authentication
|
|
||||||
- RTMP/RTMPT/RTMPS/RTMPE/RTMPTE protocol support via librtmp
|
|
||||||
- Psygnosis YOP demuxer and video decoder
|
|
||||||
- spectral extension support in the E-AC-3 decoder
|
|
||||||
- unsharp video filter
|
|
||||||
- RTP hinting in the mov/3gp/mp4 muxer
|
|
||||||
- Dirac in Ogg demuxing
|
|
||||||
- seek to keyframes in Ogg
|
|
||||||
- 4:2:2 and 4:4:4 Theora decoding
|
|
||||||
- 35% faster VP3/Theora decoding
|
|
||||||
- faster AAC decoding
|
|
||||||
- faster H.264 decoding
|
|
||||||
- RealAudio 1.0 (14.4K) encoder
|
|
||||||
|
|
||||||
|
|
||||||
version 0.5:
|
|
||||||
|
|
||||||
- DV50 AKA DVCPRO50 encoder, decoder, muxer and demuxer
|
|
||||||
- TechSmith Camtasia (TSCC) video decoder
|
|
||||||
- IBM Ultimotion (ULTI) video decoder
|
|
||||||
- Sierra Online audio file demuxer and decoder
|
|
||||||
- Apple QuickDraw (qdrw) video decoder
|
|
||||||
- Creative ADPCM audio decoder (16 bits as well as 8 bits schemes)
|
|
||||||
- Electronic Arts Multimedia (WVE/UV2/etc.) file demuxer
|
|
||||||
- Miro VideoXL (VIXL) video decoder
|
|
||||||
- H.261 video encoder
|
|
||||||
- QPEG video decoder
|
|
||||||
- Nullsoft Video (NSV) file demuxer
|
|
||||||
- Shorten audio decoder
|
|
||||||
- LOCO video decoder
|
|
||||||
- Apple Lossless Audio Codec (ALAC) decoder
|
|
||||||
- Winnov WNV1 video decoder
|
|
||||||
- Autodesk Animator Studio Codec (AASC) decoder
|
|
||||||
- Indeo 2 video decoder
|
|
||||||
- Fraps FPS1 video decoder
|
|
||||||
- Snow video encoder/decoder
|
|
||||||
- Sonic audio encoder/decoder
|
|
||||||
- Vorbis audio decoder
|
|
||||||
- Macromedia ADPCM decoder
|
|
||||||
- Duck TrueMotion 2 video decoder
|
|
||||||
- support for decoding FLX and DTA extensions in FLIC files
|
|
||||||
- H.264 custom quantization matrices support
|
|
||||||
- ffserver fixed, it should now be usable again
|
|
||||||
- QDM2 audio decoder
|
|
||||||
- Real Cooker audio decoder
|
|
||||||
- TrueSpeech audio decoder
|
|
||||||
- WMA2 audio decoder fixed, now all files should play correctly
|
|
||||||
- RealAudio 14.4 and 28.8 decoders fixed
|
|
||||||
- JPEG-LS decoder
|
|
||||||
- build system improvements
|
|
||||||
- tabs and trailing whitespace removed from the codebase
|
|
||||||
- CamStudio video decoder
|
|
||||||
- AIFF/AIFF-C audio format, encoding and decoding
|
|
||||||
- ADTS AAC file reading and writing
|
|
||||||
- Creative VOC file reading and writing
|
|
||||||
- American Laser Games multimedia (*.mm) playback system
|
|
||||||
- Zip Motion Blocks Video decoder
|
|
||||||
- improved Theora/VP3 decoder
|
|
||||||
- True Audio (TTA) decoder
|
|
||||||
- AVS demuxer and video decoder
|
|
||||||
- JPEG-LS encoder
|
|
||||||
- Smacker demuxer and decoder
|
|
||||||
- NuppelVideo/MythTV demuxer and RTjpeg decoder
|
|
||||||
- KMVC decoder
|
|
||||||
- MPEG-2 intra VLC support
|
|
||||||
- MPEG-2 4:2:2 encoder
|
|
||||||
- Flash Screen Video decoder
|
|
||||||
- GXF demuxer
|
|
||||||
- Chinese AVS decoder
|
|
||||||
- GXF muxer
|
|
||||||
- MXF demuxer
|
|
||||||
- VC-1/WMV3/WMV9 video decoder
|
|
||||||
- MacIntel support
|
|
||||||
- AVISynth support
|
|
||||||
- VMware video decoder
|
|
||||||
- VP5 video decoder
|
|
||||||
- VP6 video decoder
|
|
||||||
- WavPack lossless audio decoder
|
|
||||||
- Targa (.TGA) picture decoder
|
|
||||||
- Vorbis audio encoder
|
|
||||||
- Delphine Software .cin demuxer/audio and video decoder
|
|
||||||
- Tiertex .seq demuxer/video decoder
|
|
||||||
- MTV demuxer
|
|
||||||
- TIFF picture encoder and decoder
|
|
||||||
- GIF picture decoder
|
|
||||||
- Intel Music Coder decoder
|
|
||||||
- Zip Motion Blocks Video encoder
|
|
||||||
- Musepack decoder
|
|
||||||
- Flash Screen Video encoder
|
|
||||||
- Theora encoding via libtheora
|
|
||||||
- BMP encoder
|
|
||||||
- WMA encoder
|
|
||||||
- GSM-MS encoder and decoder
|
|
||||||
- DCA decoder
|
|
||||||
- DXA demuxer and decoder
|
|
||||||
- DNxHD decoder
|
|
||||||
- Gamecube movie (.THP) playback system
|
|
||||||
- Blackfin optimizations
|
|
||||||
- Interplay C93 demuxer and video decoder
|
|
||||||
- Bethsoft VID demuxer and video decoder
|
|
||||||
- CRYO APC demuxer
|
|
||||||
- Atrac3 decoder
|
|
||||||
- V.Flash PTX decoder
|
|
||||||
- RoQ muxer, RoQ audio encoder
|
|
||||||
- Renderware TXD demuxer and decoder
|
|
||||||
- extern C declarations for C++ removed from headers
|
|
||||||
- sws_flags command line option
|
|
||||||
- codebook generator
|
|
||||||
- RoQ video encoder
|
|
||||||
- QTRLE encoder
|
|
||||||
- OS/2 support removed and restored again
|
|
||||||
- AC-3 decoder
|
|
||||||
- NUT muxer
|
|
||||||
- additional SPARC (VIS) optimizations
|
|
||||||
- Matroska muxer
|
|
||||||
- slice-based parallel H.264 decoding
|
|
||||||
- Monkey's Audio demuxer and decoder
|
|
||||||
- AMV audio and video decoder
|
|
||||||
- DNxHD encoder
|
|
||||||
- H.264 PAFF decoding
|
|
||||||
- Nellymoser ASAO decoder
|
|
||||||
- Beam Software SIFF demuxer and decoder
|
|
||||||
- libvorbis Vorbis decoding removed in favor of native decoder
|
|
||||||
- IntraX8 (J-Frame) subdecoder for WMV2 and VC-1
|
|
||||||
- Ogg (Theora, Vorbis and FLAC) muxer
|
|
||||||
- The "device" muxers and demuxers are now in a new libavdevice library
|
|
||||||
- PC Paintbrush PCX decoder
|
|
||||||
- Sun Rasterfile decoder
|
|
||||||
- TechnoTrend PVA demuxer
|
|
||||||
- Linux Media Labs MPEG-4 (LMLM4) demuxer
|
|
||||||
- AVM2 (Flash 9) SWF muxer
|
|
||||||
- QT variant of IMA ADPCM encoder
|
|
||||||
- VFW grabber
|
|
||||||
- iPod/iPhone compatible mp4 muxer
|
|
||||||
- Mimic decoder
|
|
||||||
- MSN TCP Webcam stream demuxer
|
|
||||||
- RL2 demuxer / decoder
|
|
||||||
- IFF demuxer
|
|
||||||
- 8SVX audio decoder
|
|
||||||
- non-recursive Makefiles
|
|
||||||
- BFI demuxer
|
|
||||||
- MAXIS EA XA (.xa) demuxer / decoder
|
|
||||||
- BFI video decoder
|
|
||||||
- OMA demuxer
|
|
||||||
- MLP/TrueHD decoder
|
|
||||||
- Electronic Arts CMV decoder
|
|
||||||
- Motion Pixels Video decoder
|
|
||||||
- Motion Pixels MVI demuxer
|
|
||||||
- removed animated GIF decoder/demuxer
|
|
||||||
- D-Cinema audio muxer
|
|
||||||
- Electronic Arts TGV decoder
|
|
||||||
- Apple Lossless Audio Codec (ALAC) encoder
|
|
||||||
- AAC decoder
|
|
||||||
- floating point PCM encoder/decoder
|
|
||||||
- MXF muxer
|
|
||||||
- DV100 AKA DVCPRO HD decoder and demuxer
|
|
||||||
- E-AC-3 support added to AC-3 decoder
|
|
||||||
- Nellymoser ASAO encoder
|
|
||||||
- ASS and SSA demuxer and muxer
|
|
||||||
- liba52 wrapper removed
|
|
||||||
- SVQ3 watermark decoding support
|
|
||||||
- Speex decoding via libspeex
|
|
||||||
- Electronic Arts TGQ decoder
|
|
||||||
- RV40 decoder
|
|
||||||
- QCELP / PureVoice decoder
|
|
||||||
- RV30 decoder
|
|
||||||
- hybrid WavPack support
|
|
||||||
- R3D REDCODE demuxer
|
|
||||||
- ALSA support for playback and record
|
|
||||||
- Electronic Arts TQI decoder
|
|
||||||
- OpenJPEG based JPEG 2000 decoder
|
|
||||||
- NC (NC4600) camera file demuxer
|
|
||||||
- Gopher client support
|
|
||||||
- MXF D-10 muxer
|
|
||||||
- generic metadata API
|
|
||||||
- flash ScreenVideo2 encoder
|
|
||||||
|
|
||||||
|
|
||||||
version 0.4.9-pre1:
|
|
||||||
|
|
||||||
- DV encoder, DV muxer
|
|
||||||
- Microsoft RLE video decoder
|
|
||||||
- Microsoft Video-1 decoder
|
|
||||||
- Apple Animation (RLE) decoder
|
|
||||||
- Apple Graphics (SMC) decoder
|
|
||||||
- Apple Video (RPZA) decoder
|
|
||||||
- Cinepak decoder
|
|
||||||
- Sega FILM (CPK) file demuxer
|
|
||||||
- Westwood multimedia support (VQA & AUD files)
|
|
||||||
- Id Quake II CIN playback support
|
|
||||||
- 8BPS video decoder
|
|
||||||
- FLIC playback support
|
|
||||||
- RealVideo 2.0 (RV20) decoder
|
|
||||||
- Duck TrueMotion v1 (DUCK) video decoder
|
|
||||||
- Sierra VMD demuxer and video decoder
|
|
||||||
- MSZH and ZLIB decoder support
|
|
||||||
- SVQ1 video encoder
|
|
||||||
- AMR-WB support
|
|
||||||
- PPC optimizations
|
|
||||||
- rate distortion optimal cbp support
|
|
||||||
- rate distorted optimal ac prediction for MPEG-4
|
|
||||||
- rate distorted optimal lambda->qp support
|
|
||||||
- AAC encoding with libfaac
|
|
||||||
- Sunplus JPEG codec (SP5X) support
|
|
||||||
- use Lagrange multipler instead of QP for ratecontrol
|
|
||||||
- Theora/VP3 decoding support
|
|
||||||
- XA and ADX ADPCM codecs
|
|
||||||
- export MPEG-2 active display area / pan scan
|
|
||||||
- Add support for configuring with IBM XLC
|
|
||||||
- floating point AAN DCT
|
|
||||||
- initial support for zygo video (not complete)
|
|
||||||
- RGB ffv1 support
|
|
||||||
- new audio/video parser API
|
|
||||||
- av_log() system
|
|
||||||
- av_read_frame() and av_seek_frame() support
|
|
||||||
- missing last frame fixes
|
|
||||||
- seek by mouse in ffplay
|
|
||||||
- noise reduction of DCT coefficients
|
|
||||||
- H.263 OBMC & 4MV support
|
|
||||||
- H.263 alternative inter vlc support
|
|
||||||
- H.263 loop filter
|
|
||||||
- H.263 slice structured mode
|
|
||||||
- interlaced DCT support for MPEG-2 encoding
|
|
||||||
- stuffing to stay above min_bitrate
|
|
||||||
- MB type & QP visualization
|
|
||||||
- frame stepping for ffplay
|
|
||||||
- interlaced motion estimation
|
|
||||||
- alternate scantable support
|
|
||||||
- SVCD scan offset support
|
|
||||||
- closed GOP support
|
|
||||||
- SSE2 FDCT
|
|
||||||
- quantizer noise shaping
|
|
||||||
- G.726 ADPCM audio codec
|
|
||||||
- MS ADPCM encoding
|
|
||||||
- multithreaded/SMP motion estimation
|
|
||||||
- multithreaded/SMP encoding for MPEG-1/MPEG-2/MPEG-4/H.263
|
|
||||||
- multithreaded/SMP decoding for MPEG-2
|
|
||||||
- FLAC decoder
|
|
||||||
- Metrowerks CodeWarrior suppport
|
|
||||||
- H.263+ custom pcf support
|
|
||||||
- nicer output for 'ffmpeg -formats'
|
|
||||||
- Matroska demuxer
|
|
||||||
- SGI image format, encoding and decoding
|
|
||||||
- H.264 loop filter support
|
|
||||||
- H.264 CABAC support
|
|
||||||
- nicer looking arrows for the motion vector visualization
|
|
||||||
- improved VCD support
|
|
||||||
- audio timestamp drift compensation
|
|
||||||
- MPEG-2 YUV 422/444 support
|
|
||||||
- polyphase kaiser windowed sinc and blackman nuttall windowed sinc audio resample
|
|
||||||
- better image scaling
|
|
||||||
- H.261 support
|
|
||||||
- correctly interleave packets during encoding
|
|
||||||
- VIS optimized motion compensation
|
|
||||||
- intra_dc_precision>0 encoding support
|
|
||||||
- support reuse of motion vectors/MB types/field select values of the source video
|
|
||||||
- more accurate deblock filter
|
|
||||||
- padding support
|
|
||||||
- many optimizations and bugfixes
|
|
||||||
- FunCom ISS audio file demuxer and according ADPCM decoding
|
|
||||||
|
|
||||||
|
|
||||||
version 0.4.8:
|
|
||||||
|
|
||||||
- MPEG-2 video encoding (Michael)
|
|
||||||
- Id RoQ playback subsystem (Mike Melanson and Tim Ferguson)
|
|
||||||
- Wing Commander III Movie (.mve) file playback subsystem (Mike Melanson
|
|
||||||
and Mario Brito)
|
|
||||||
- Xan DPCM audio decoder (Mario Brito)
|
|
||||||
- Interplay MVE playback subsystem (Mike Melanson)
|
|
||||||
- Duck DK3 and DK4 ADPCM audio decoders (Mike Melanson)
|
|
||||||
|
|
||||||
|
|
||||||
version 0.4.7:
|
|
||||||
|
|
||||||
- RealAudio 1.0 (14_4) and 2.0 (28_8) native decoders. Author unknown, code from mplayerhq
|
|
||||||
(originally from public domain player for Amiga at http://www.honeypot.net/audio)
|
|
||||||
- current version now also compiles with older GCC (Fabrice)
|
|
||||||
- 4X multimedia playback system including 4xm file demuxer (Mike
|
|
||||||
Melanson), and 4X video and audio codecs (Michael)
|
|
||||||
- Creative YUV (CYUV) decoder (Mike Melanson)
|
|
||||||
- FFV1 codec (our very simple lossless intra only codec, compresses much better
|
|
||||||
than HuffYUV) (Michael)
|
|
||||||
- ASV1 (Asus), H.264, Intel indeo3 codecs have been added (various)
|
|
||||||
- tiny PNG encoder and decoder, tiny GIF decoder, PAM decoder (PPM with
|
|
||||||
alpha support), JPEG YUV colorspace support. (Fabrice Bellard)
|
|
||||||
- ffplay has been replaced with a newer version which uses SDL (optionally)
|
|
||||||
for multiplatform support (Fabrice)
|
|
||||||
- Sorenson Version 3 codec (SVQ3) support has been added (decoding only) - donated
|
|
||||||
by anonymous
|
|
||||||
- AMR format has been added (Johannes Carlsson)
|
|
||||||
- 3GP support has been added (Johannes Carlsson)
|
|
||||||
- VP3 codec has been added (Mike Melanson)
|
|
||||||
- more MPEG-1/2 fixes
|
|
||||||
- better multiplatform support, MS Visual Studio fixes (various)
|
|
||||||
- AltiVec optimizations (Magnus Damn and others)
|
|
||||||
- SH4 processor support has been added (BERO)
|
|
||||||
- new public interfaces (avcodec_get_pix_fmt) (Roman Shaposhnick)
|
|
||||||
- VOB streaming support (Brian Foley)
|
|
||||||
- better MP3 autodetection (Andriy Rysin)
|
|
||||||
- qpel encoding (Michael)
|
|
||||||
- 4mv+b frames encoding finally fixed (Michael)
|
|
||||||
- chroma ME (Michael)
|
|
||||||
- 5 comparison functions for ME (Michael)
|
|
||||||
- B-frame encoding speedup (Michael)
|
|
||||||
- WMV2 codec (unfinished - Michael)
|
|
||||||
- user specified diamond size for EPZS (Michael)
|
|
||||||
- Playstation STR playback subsystem, still experimental (Mike and Michael)
|
|
||||||
- ASV2 codec (Michael)
|
|
||||||
- CLJR decoder (Alex)
|
|
||||||
|
|
||||||
.. And lots more new enhancements and fixes.
|
|
||||||
|
|
||||||
|
|
||||||
version 0.4.6:
|
|
||||||
|
|
||||||
- completely new integer only MPEG audio layer 1/2/3 decoder rewritten
|
|
||||||
from scratch
|
|
||||||
- Recoded DCT and motion vector search with gcc (no longer depends on nasm)
|
|
||||||
- fix quantization bug in AC3 encoder
|
|
||||||
- added PCM codecs and format. Corrected WAV/AVI/ASF PCM issues
|
|
||||||
- added prototype ffplay program
|
|
||||||
- added GOB header parsing on H.263/H.263+ decoder (Juanjo)
|
|
||||||
- bug fix on MCBPC tables of H.263 (Juanjo)
|
|
||||||
- bug fix on DC coefficients of H.263 (Juanjo)
|
|
||||||
- added Advanced Prediction Mode on H.263/H.263+ decoder (Juanjo)
|
|
||||||
- now we can decode H.263 streams found in QuickTime files (Juanjo)
|
|
||||||
- now we can decode H.263 streams found in VIVO v1 files(Juanjo)
|
|
||||||
- preliminary RTP "friendly" mode for H.263/H.263+ coding. (Juanjo)
|
|
||||||
- added GOB header for H.263/H.263+ coding on RTP mode (Juanjo)
|
|
||||||
- now H.263 picture size is returned on the first decoded frame (Juanjo)
|
|
||||||
- added first regression tests
|
|
||||||
- added MPEG-2 TS demuxer
|
|
||||||
- new demux API for libav
|
|
||||||
- more accurate and faster IDCT (Michael)
|
|
||||||
- faster and entropy-controlled motion search (Michael)
|
|
||||||
- two pass video encoding (Michael)
|
|
||||||
- new video rate control (Michael)
|
|
||||||
- added MSMPEG4V1, MSMPEGV2 and WMV1 support (Michael)
|
|
||||||
- great performance improvement of video encoders and decoders (Michael)
|
|
||||||
- new and faster bit readers and vlc parsers (Michael)
|
|
||||||
- high quality encoding mode: tries all macroblock/VLC types (Michael)
|
|
||||||
- added DV video decoder
|
|
||||||
- preliminary RTP/RTSP support in ffserver and libavformat
|
|
||||||
- H.263+ AIC decoding/encoding support (Juanjo)
|
|
||||||
- VCD MPEG-PS mode (Juanjo)
|
|
||||||
- PSNR stuff (Juanjo)
|
|
||||||
- simple stats output (Juanjo)
|
|
||||||
- 16-bit and 15-bit RGB/BGR/GBR support (Bisqwit)
|
|
||||||
|
|
||||||
|
|
||||||
version 0.4.5:
|
|
||||||
|
|
||||||
- some header fixes (Zdenek Kabelac <kabi at informatics.muni.cz>)
|
|
||||||
- many MMX optimizations (Nick Kurshev <nickols_k at mail.ru>)
|
|
||||||
- added configure system (actually a small shell script)
|
|
||||||
- added MPEG audio layer 1/2/3 decoding using LGPL'ed mpglib by
|
|
||||||
Michael Hipp (temporary solution - waiting for integer only
|
|
||||||
decoder)
|
|
||||||
- fixed VIDIOCSYNC interrupt
|
|
||||||
- added Intel H.263 decoding support ('I263' AVI fourCC)
|
|
||||||
- added Real Video 1.0 decoding (needs further testing)
|
|
||||||
- simplified image formats again. Added PGM format (=grey
|
|
||||||
pgm). Renamed old PGM to PGMYUV.
|
|
||||||
- fixed msmpeg4 slice issues (tell me if you still find problems)
|
|
||||||
- fixed OpenDivX bugs with newer versions (added VOL header decoding)
|
|
||||||
- added support for MPlayer interface
|
|
||||||
- added macroblock skip optimization
|
|
||||||
- added MJPEG decoder
|
|
||||||
- added mmx/mmxext IDCT from libmpeg2
|
|
||||||
- added pgmyuvpipe, ppm, and ppm_pipe formats (original patch by Celer
|
|
||||||
<celer at shell.scrypt.net>)
|
|
||||||
- added pixel format conversion layer (e.g. for MJPEG or PPM)
|
|
||||||
- added deinterlacing option
|
|
||||||
- MPEG-1/2 fixes
|
|
||||||
- MPEG-4 vol header fixes (Jonathan Marsden <snmjbm at pacbell.net>)
|
|
||||||
- ARM optimizations (Lionel Ulmer <lionel.ulmer at free.fr>).
|
|
||||||
- Windows porting of file converter
|
|
||||||
- added MJPEG raw format (input/ouput)
|
|
||||||
- added JPEG image format support (input/output)
|
|
||||||
|
|
||||||
|
|
||||||
version 0.4.4:
|
|
||||||
|
|
||||||
- fixed some std header definitions (Bjorn Lindgren
|
|
||||||
<bjorn.e.lindgren at telia.com>).
|
|
||||||
- added MPEG demuxer (MPEG-1 and 2 compatible).
|
|
||||||
- added ASF demuxer
|
|
||||||
- added prototype RM demuxer
|
|
||||||
- added AC3 decoding (done with libac3 by Aaron Holtzman)
|
|
||||||
- added decoding codec parameter guessing (.e.g. for MPEG, because the
|
|
||||||
header does not include them)
|
|
||||||
- fixed header generation in MPEG-1, AVI and ASF muxer: wmplayer can now
|
|
||||||
play them (only tested video)
|
|
||||||
- fixed H.263 white bug
|
|
||||||
- fixed phase rounding in img resample filter
|
|
||||||
- add MMX code for polyphase img resample filter
|
|
||||||
- added CPU autodetection
|
|
||||||
- added generic title/author/copyright/comment string handling (ASF and RM
|
|
||||||
use them)
|
|
||||||
- added SWF demux to extract MP3 track (not usable yet because no MP3
|
|
||||||
decoder)
|
|
||||||
- added fractional frame rate support
|
|
||||||
- codecs are no longer searched by read_header() (should fix ffserver
|
|
||||||
segfault)
|
|
||||||
|
|
||||||
|
|
||||||
version 0.4.3:
|
|
||||||
|
|
||||||
- BGR24 patch (initial patch by Jeroen Vreeken <pe1rxq at amsat.org>)
|
|
||||||
- fixed raw yuv output
|
|
||||||
- added motion rounding support in MPEG-4
|
|
||||||
- fixed motion bug rounding in MSMPEG4
|
|
||||||
- added B-frame handling in video core
|
|
||||||
- added full MPEG-1 decoding support
|
|
||||||
- added partial (frame only) MPEG-2 support
|
|
||||||
- changed the FOURCC code for H.263 to "U263" to be able to see the
|
|
||||||
+AVI/H.263 file with the UB Video H.263+ decoder. MPlayer works with
|
|
||||||
this +codec ;) (JuanJo).
|
|
||||||
- Halfpel motion estimation after MB type selection (JuanJo)
|
|
||||||
- added pgm and .Y.U.V output format
|
|
||||||
- suppressed 'img:' protocol. Simply use: /tmp/test%d.[pgm|Y] as input or
|
|
||||||
output.
|
|
||||||
- added pgmpipe I/O format (original patch from Martin Aumueller
|
|
||||||
<lists at reserv.at>, but changed completely since we use a format
|
|
||||||
instead of a protocol)
|
|
||||||
|
|
||||||
|
|
||||||
version 0.4.2:
|
|
||||||
|
|
||||||
- added H.263/MPEG-4/MSMPEG4 decoding support. MPEG-4 decoding support
|
|
||||||
(for OpenDivX) is almost complete: 8x8 MVs and rounding are
|
|
||||||
missing. MSMPEG4 support is complete.
|
|
||||||
- added prototype MPEG-1 decoder. Only I- and P-frames handled yet (it
|
|
||||||
can decode ffmpeg MPEGs :-)).
|
|
||||||
- added libavcodec API documentation (see apiexample.c).
|
|
||||||
- fixed image polyphase bug (the bottom of some images could be
|
|
||||||
greenish)
|
|
||||||
- added support for non clipped motion vectors (decoding only)
|
|
||||||
and image sizes non-multiple of 16
|
|
||||||
- added support for AC prediction (decoding only)
|
|
||||||
- added file overwrite confirmation (can be disabled with -y)
|
|
||||||
- added custom size picture to H.263 using H.263+ (Juanjo)
|
|
||||||
|
|
||||||
|
|
||||||
version 0.4.1:
|
|
||||||
|
|
||||||
- added MSMPEG4 (aka DivX) compatible encoder. Changed default codec
|
|
||||||
of AVI and ASF to DIV3.
|
|
||||||
- added -me option to set motion estimation method
|
|
||||||
(default=log). suppressed redundant -hq option.
|
|
||||||
- added options -acodec and -vcodec to force a given codec (useful for
|
|
||||||
AVI for example)
|
|
||||||
- fixed -an option
|
|
||||||
- improved dct_quantize speed
|
|
||||||
- factorized some motion estimation code
|
|
||||||
|
|
||||||
|
|
||||||
version 0.4.0:
|
|
||||||
|
|
||||||
- removing grab code from ffserver and moved it to ffmpeg. Added
|
|
||||||
multistream support to ffmpeg.
|
|
||||||
- added timeshifting support for live feeds (option ?date=xxx in the
|
|
||||||
URL)
|
|
||||||
- added high quality image resize code with polyphase filter (need
|
|
||||||
mmx/see optimization). Enable multiple image size support in ffserver.
|
|
||||||
- added multi live feed support in ffserver
|
|
||||||
- suppressed master feature from ffserver (it should be done with an
|
|
||||||
external program which opens the .ffm url and writes it to another
|
|
||||||
ffserver)
|
|
||||||
- added preliminary support for video stream parsing (WAV and AVI half
|
|
||||||
done). Added proper support for audio/video file conversion in
|
|
||||||
ffmpeg.
|
|
||||||
- added preliminary support for video file sending from ffserver
|
|
||||||
- redesigning I/O subsystem: now using URL based input and output
|
|
||||||
(see avio.h)
|
|
||||||
- added WAV format support
|
|
||||||
- added "tty user interface" to ffmpeg to stop grabbing gracefully
|
|
||||||
- added MMX/SSE optimizations to SAD (Sums of Absolutes Differences)
|
|
||||||
(Juan J. Sierralta P. a.k.a. "Juanjo" <juanjo at atmlab.utfsm.cl>)
|
|
||||||
- added MMX DCT from mpeg2_movie 1.5 (Juanjo)
|
|
||||||
- added new motion estimation algorithms, log and phods (Juanjo)
|
|
||||||
- changed directories: libav for format handling, libavcodec for
|
|
||||||
codecs
|
|
||||||
|
|
||||||
|
|
||||||
version 0.3.4:
|
|
||||||
|
|
||||||
- added stereo in MPEG audio encoder
|
|
||||||
|
|
||||||
|
|
||||||
version 0.3.3:
|
|
||||||
|
|
||||||
- added 'high quality' mode which use motion vectors. It can be used in
|
|
||||||
real time at low resolution.
|
|
||||||
- fixed rounding problems which caused quality problems at high
|
|
||||||
bitrates and large GOP size
|
|
||||||
|
|
||||||
|
|
||||||
version 0.3.2: small fixes
|
|
||||||
|
|
||||||
- ASF fixes
|
|
||||||
- put_seek bug fix
|
|
||||||
|
|
||||||
|
|
||||||
version 0.3.1: added avi/divx support
|
|
||||||
|
|
||||||
- added AVI support
|
|
||||||
- added MPEG-4 codec compatible with OpenDivX. It is based on the H.263 codec
|
|
||||||
- added sound for flash format (not tested)
|
|
||||||
|
|
||||||
|
|
||||||
version 0.3: initial public release
|
|
2
Doxyfile
2
Doxyfile
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
|||||||
# This could be handy for archiving the generated documentation or
|
# This could be handy for archiving the generated documentation or
|
||||||
# if some version control system is used.
|
# if some version control system is used.
|
||||||
|
|
||||||
PROJECT_NUMBER = 0.7.5
|
PROJECT_NUMBER = 0.7.7
|
||||||
|
|
||||||
# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
|
# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
|
||||||
# base path where the generated documentation will be put.
|
# base path where the generated documentation will be put.
|
||||||
|
6
LICENSE
6
LICENSE
@@ -41,6 +41,6 @@ is incompatible with the LGPL v2.1 and the GPL v2, but not with version 3 of
|
|||||||
those licenses. So to combine the OpenCORE libraries with FFmpeg, the license
|
those licenses. So to combine the OpenCORE libraries with FFmpeg, the license
|
||||||
version needs to be upgraded by passing --enable-version3 to configure.
|
version needs to be upgraded by passing --enable-version3 to configure.
|
||||||
|
|
||||||
The nonfree external library libfaac can be hooked up in FFmpeg. You need to
|
The nonfree external libraries libfaac and libaacplus can be hooked up in FFmpeg.
|
||||||
pass --enable-nonfree to configure to enable it. Employ this option with care
|
You need to pass --enable-nonfree to configure to enable it. Employ this option
|
||||||
as FFmpeg then becomes nonfree and unredistributable.
|
with care as FFmpeg then becomes nonfree and unredistributable.
|
||||||
|
@@ -19,7 +19,7 @@ ffmpeg:
|
|||||||
ffmpeg.c Michael Niedermayer
|
ffmpeg.c Michael Niedermayer
|
||||||
|
|
||||||
ffplay:
|
ffplay:
|
||||||
ffplay.c Michael Niedermayer
|
ffplay.c Marton Balint
|
||||||
|
|
||||||
ffprobe:
|
ffprobe:
|
||||||
ffprobe.c Stefano Sabatini
|
ffprobe.c Stefano Sabatini
|
||||||
|
21
configure
vendored
21
configure
vendored
@@ -162,6 +162,7 @@ External library support:
|
|||||||
--enable-bzlib enable bzlib [autodetect]
|
--enable-bzlib enable bzlib [autodetect]
|
||||||
--enable-libcelt enable CELT/Opus decoding via libcelt [no]
|
--enable-libcelt enable CELT/Opus decoding via libcelt [no]
|
||||||
--enable-frei0r enable frei0r video filtering
|
--enable-frei0r enable frei0r video filtering
|
||||||
|
--enable-libaacplus enable AAC+ encoding via libaacplus [no]
|
||||||
--enable-libopencore-amrnb enable AMR-NB de/encoding via libopencore-amrnb [no]
|
--enable-libopencore-amrnb enable AMR-NB de/encoding via libopencore-amrnb [no]
|
||||||
--enable-libopencore-amrwb enable AMR-WB decoding via libopencore-amrwb [no]
|
--enable-libopencore-amrwb enable AMR-WB decoding via libopencore-amrwb [no]
|
||||||
--enable-libopencv enable video filtering via libopencv [no]
|
--enable-libopencv enable video filtering via libopencv [no]
|
||||||
@@ -929,6 +930,8 @@ CONFIG_LIST="
|
|||||||
h264pred
|
h264pred
|
||||||
hardcoded_tables
|
hardcoded_tables
|
||||||
huffman
|
huffman
|
||||||
|
libaacplus
|
||||||
|
libcdio
|
||||||
libcelt
|
libcelt
|
||||||
libdc1394
|
libdc1394
|
||||||
libdirac
|
libdirac
|
||||||
@@ -1406,6 +1409,7 @@ vdpau_deps="vdpau_vdpau_h vdpau_vdpau_x11_h"
|
|||||||
h264_parser_select="golomb h264dsp h264pred"
|
h264_parser_select="golomb h264dsp h264pred"
|
||||||
|
|
||||||
# external libraries
|
# external libraries
|
||||||
|
libaacplus_encoder_deps="libaacplus"
|
||||||
libcelt_decoder_deps="libcelt"
|
libcelt_decoder_deps="libcelt"
|
||||||
libdirac_decoder_deps="libdirac !libschroedinger"
|
libdirac_decoder_deps="libdirac !libschroedinger"
|
||||||
libdirac_encoder_deps="libdirac"
|
libdirac_encoder_deps="libdirac"
|
||||||
@@ -1539,11 +1543,14 @@ test_deps(){
|
|||||||
dep=${v%=*}
|
dep=${v%=*}
|
||||||
tests=${v#*=}
|
tests=${v#*=}
|
||||||
for name in ${tests}; do
|
for name in ${tests}; do
|
||||||
eval ${name}_test_deps="'${dep}$suf1 ${dep}$suf2'"
|
append ${name}_test_deps ${dep}$suf1 ${dep}$suf2
|
||||||
done
|
done
|
||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mxf_d10_test_deps="avfilter"
|
||||||
|
seek_lavf_mxf_d10_test_deps="mxf_d10_test"
|
||||||
|
|
||||||
test_deps _encoder _decoder \
|
test_deps _encoder _decoder \
|
||||||
adpcm_g726=g726 \
|
adpcm_g726=g726 \
|
||||||
adpcm_ima_qt \
|
adpcm_ima_qt \
|
||||||
@@ -2191,7 +2198,7 @@ case "$arch" in
|
|||||||
arch="sparc"
|
arch="sparc"
|
||||||
subarch="sparc64"
|
subarch="sparc64"
|
||||||
;;
|
;;
|
||||||
i[3-6]86|i86pc|BePC|x86pc|x86_64|amd64)
|
i[3-6]86|i86pc|BePC|x86pc|x86_64|x86_32|amd64)
|
||||||
arch="x86"
|
arch="x86"
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
@@ -2582,6 +2589,7 @@ die_license_disabled gpl libxavs
|
|||||||
die_license_disabled gpl libxvid
|
die_license_disabled gpl libxvid
|
||||||
die_license_disabled gpl x11grab
|
die_license_disabled gpl x11grab
|
||||||
|
|
||||||
|
die_license_disabled nonfree libaacplus
|
||||||
die_license_disabled nonfree libfaac
|
die_license_disabled nonfree libfaac
|
||||||
|
|
||||||
die_license_disabled version3 libopencore_amrnb
|
die_license_disabled version3 libopencore_amrnb
|
||||||
@@ -2900,6 +2908,7 @@ check_mathfunc truncf
|
|||||||
enabled avisynth && require2 vfw32 "windows.h vfw.h" AVIFileInit -lavifil32
|
enabled avisynth && require2 vfw32 "windows.h vfw.h" AVIFileInit -lavifil32
|
||||||
enabled libcelt && require libcelt celt/celt.h celt_decode -lcelt0
|
enabled libcelt && require libcelt celt/celt.h celt_decode -lcelt0
|
||||||
enabled frei0r && { check_header frei0r.h || die "ERROR: frei0r.h header not found"; }
|
enabled frei0r && { check_header frei0r.h || die "ERROR: frei0r.h header not found"; }
|
||||||
|
enabled libaacplus && require "libaacplus >= 2.0.0" aacplus.h aacplusEncOpen -laacplus
|
||||||
enabled libdc1394 && require_pkg_config libdc1394-2 dc1394/dc1394.h dc1394_new
|
enabled libdc1394 && require_pkg_config libdc1394-2 dc1394/dc1394.h dc1394_new
|
||||||
enabled libdirac && require_pkg_config dirac \
|
enabled libdirac && require_pkg_config dirac \
|
||||||
"libdirac_decoder/dirac_parser.h libdirac_encoder/dirac_encoder.h" \
|
"libdirac_decoder/dirac_parser.h libdirac_encoder/dirac_encoder.h" \
|
||||||
@@ -3065,6 +3074,10 @@ else
|
|||||||
fi
|
fi
|
||||||
check_cflags -fno-math-errno
|
check_cflags -fno-math-errno
|
||||||
check_cflags -fno-signed-zeros
|
check_cflags -fno-signed-zeros
|
||||||
|
check_cc -mno-red-zone <<EOF && noredzone_flags="-mno-red-zone"
|
||||||
|
int x;
|
||||||
|
EOF
|
||||||
|
|
||||||
|
|
||||||
if enabled icc; then
|
if enabled icc; then
|
||||||
# Just warnings, no remarks
|
# Just warnings, no remarks
|
||||||
@@ -3143,7 +3156,7 @@ check_deps $CONFIG_LIST \
|
|||||||
|
|
||||||
enabled asm || { arch=c; disable $ARCH_LIST $ARCH_EXT_LIST; }
|
enabled asm || { arch=c; disable $ARCH_LIST $ARCH_EXT_LIST; }
|
||||||
|
|
||||||
if test $target_os == "haiku"; then
|
if test $target_os = "haiku"; then
|
||||||
disable memalign
|
disable memalign
|
||||||
disable posix_memalign
|
disable posix_memalign
|
||||||
fi
|
fi
|
||||||
@@ -3217,6 +3230,7 @@ echo "frei0r enabled ${frei0r-no}"
|
|||||||
echo "libdc1394 support ${libdc1394-no}"
|
echo "libdc1394 support ${libdc1394-no}"
|
||||||
echo "libdirac enabled ${libdirac-no}"
|
echo "libdirac enabled ${libdirac-no}"
|
||||||
echo "libfaac enabled ${libfaac-no}"
|
echo "libfaac enabled ${libfaac-no}"
|
||||||
|
echo "libaacplus enabled ${libaacplus-no}"
|
||||||
echo "libgsm enabled ${libgsm-no}"
|
echo "libgsm enabled ${libgsm-no}"
|
||||||
echo "libmp3lame enabled ${libmp3lame-no}"
|
echo "libmp3lame enabled ${libmp3lame-no}"
|
||||||
echo "libnut enabled ${libnut-no}"
|
echo "libnut enabled ${libnut-no}"
|
||||||
@@ -3343,6 +3357,7 @@ SLIB_INSTALL_LINKS=${SLIB_INSTALL_LINKS}
|
|||||||
SLIB_INSTALL_EXTRA_LIB=${SLIB_INSTALL_EXTRA_LIB}
|
SLIB_INSTALL_EXTRA_LIB=${SLIB_INSTALL_EXTRA_LIB}
|
||||||
SLIB_INSTALL_EXTRA_SHLIB=${SLIB_INSTALL_EXTRA_SHLIB}
|
SLIB_INSTALL_EXTRA_SHLIB=${SLIB_INSTALL_EXTRA_SHLIB}
|
||||||
SAMPLES:=${samples:-\$(FATE_SAMPLES)}
|
SAMPLES:=${samples:-\$(FATE_SAMPLES)}
|
||||||
|
NOREDZONE_FLAGS=$noredzone_flags
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
get_version(){
|
get_version(){
|
||||||
|
@@ -1769,7 +1769,7 @@ It accepts the following parameters:
|
|||||||
|
|
||||||
Negative values for the amount will blur the input video, while positive
|
Negative values for the amount will blur the input video, while positive
|
||||||
values will sharpen. All parameters are optional and default to the
|
values will sharpen. All parameters are optional and default to the
|
||||||
equivalent of the string '5:5:1.0:0:0:0.0'.
|
equivalent of the string '5:5:1.0:5:5:0.0'.
|
||||||
|
|
||||||
@table @option
|
@table @option
|
||||||
|
|
||||||
@@ -1787,11 +1787,11 @@ and 5.0, default value is 1.0.
|
|||||||
|
|
||||||
@item chroma_msize_x
|
@item chroma_msize_x
|
||||||
Set the chroma matrix horizontal size. It can be an integer between 3
|
Set the chroma matrix horizontal size. It can be an integer between 3
|
||||||
and 13, default value is 0.
|
and 13, default value is 5.
|
||||||
|
|
||||||
@item chroma_msize_y
|
@item chroma_msize_y
|
||||||
Set the chroma matrix vertical size. It can be an integer between 3
|
Set the chroma matrix vertical size. It can be an integer between 3
|
||||||
and 13, default value is 0.
|
and 13, default value is 5.
|
||||||
|
|
||||||
@item luma_amount
|
@item luma_amount
|
||||||
Set the chroma effect strength. It can be a float number between -2.0
|
Set the chroma effect strength. It can be a float number between -2.0
|
||||||
|
@@ -543,6 +543,8 @@ following image formats are supported:
|
|||||||
@multitable @columnfractions .4 .1 .1 .4
|
@multitable @columnfractions .4 .1 .1 .4
|
||||||
@item Name @tab Encoding @tab Decoding @tab Comments
|
@item Name @tab Encoding @tab Decoding @tab Comments
|
||||||
@item 8SVX audio @tab @tab X
|
@item 8SVX audio @tab @tab X
|
||||||
|
@item AAC+ @tab E @tab X
|
||||||
|
@tab encoding supported through external library libaacplus
|
||||||
@item AAC @tab E @tab X
|
@item AAC @tab E @tab X
|
||||||
@tab encoding supported through external library libfaac and libvo-aacenc
|
@tab encoding supported through external library libfaac and libvo-aacenc
|
||||||
@item AC-3 @tab IX @tab X
|
@item AC-3 @tab IX @tab X
|
||||||
@@ -1088,7 +1090,7 @@ These library packages are only available from
|
|||||||
@uref{http://sourceware.org/cygwinports/, Cygwin Ports}:
|
@uref{http://sourceware.org/cygwinports/, Cygwin Ports}:
|
||||||
|
|
||||||
@example
|
@example
|
||||||
yasm, libSDL-devel, libdirac-devel, libfaac-devel, libgsm-devel,
|
yasm, libSDL-devel, libdirac-devel, libfaac-devel, libaacplus-devel, libgsm-devel,
|
||||||
libmp3lame-devel, libschroedinger1.0-devel, speex-devel, libtheora-devel,
|
libmp3lame-devel, libschroedinger1.0-devel, speex-devel, libtheora-devel,
|
||||||
libxvidcore-devel
|
libxvidcore-devel
|
||||||
@end example
|
@end example
|
||||||
|
4
ffmpeg.c
4
ffmpeg.c
@@ -2411,9 +2411,9 @@ static int transcode(AVFormatContext **output_files,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if(codec->codec_type == AVMEDIA_TYPE_VIDEO){
|
if(codec->codec_type == AVMEDIA_TYPE_VIDEO){
|
||||||
/* maximum video buffer size is 6-bytes per pixel, plus DPX header size */
|
/* maximum video buffer size is 6-bytes per pixel, plus DPX header size (1664)*/
|
||||||
int size= codec->width * codec->height;
|
int size= codec->width * codec->height;
|
||||||
bit_buffer_size= FFMAX(bit_buffer_size, 6*size + 1664);
|
bit_buffer_size= FFMAX(bit_buffer_size, 7*size + 10000);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -133,7 +133,9 @@ typedef struct FourXContext{
|
|||||||
GetBitContext pre_gb; ///< ac/dc prefix
|
GetBitContext pre_gb; ///< ac/dc prefix
|
||||||
GetBitContext gb;
|
GetBitContext gb;
|
||||||
const uint8_t *bytestream;
|
const uint8_t *bytestream;
|
||||||
|
const uint8_t *bytestream_end;
|
||||||
const uint16_t *wordstream;
|
const uint16_t *wordstream;
|
||||||
|
const uint16_t *wordstream_end;
|
||||||
int mv[256];
|
int mv[256];
|
||||||
VLC pre_vlc;
|
VLC pre_vlc;
|
||||||
int last_dc;
|
int last_dc;
|
||||||
@@ -277,7 +279,7 @@ static void init_mv(FourXContext *f){
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static inline void mcdc(uint16_t *dst, uint16_t *src, int log2w, int h, int stride, int scale, int dc){
|
static inline void mcdc(uint16_t *dst, uint16_t *src, int log2w, int h, int stride, int scale, unsigned dc){
|
||||||
int i;
|
int i;
|
||||||
dc*= 0x10001;
|
dc*= 0x10001;
|
||||||
|
|
||||||
@@ -328,6 +330,10 @@ static void decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src, int lo
|
|||||||
assert(code>=0 && code<=6);
|
assert(code>=0 && code<=6);
|
||||||
|
|
||||||
if(code == 0){
|
if(code == 0){
|
||||||
|
if (f->bytestream_end - f->bytestream < 1){
|
||||||
|
av_log(f->avctx, AV_LOG_ERROR, "bytestream overread\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
src += f->mv[ *f->bytestream++ ];
|
src += f->mv[ *f->bytestream++ ];
|
||||||
if(start > src || src > end){
|
if(start > src || src > end){
|
||||||
av_log(f->avctx, AV_LOG_ERROR, "mv out of pic\n");
|
av_log(f->avctx, AV_LOG_ERROR, "mv out of pic\n");
|
||||||
@@ -345,15 +351,31 @@ static void decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src, int lo
|
|||||||
}else if(code == 3 && f->version<2){
|
}else if(code == 3 && f->version<2){
|
||||||
mcdc(dst, src, log2w, h, stride, 1, 0);
|
mcdc(dst, src, log2w, h, stride, 1, 0);
|
||||||
}else if(code == 4){
|
}else if(code == 4){
|
||||||
|
if (f->bytestream_end - f->bytestream < 1){
|
||||||
|
av_log(f->avctx, AV_LOG_ERROR, "bytestream overread\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
src += f->mv[ *f->bytestream++ ];
|
src += f->mv[ *f->bytestream++ ];
|
||||||
if(start > src || src > end){
|
if(start > src || src > end){
|
||||||
av_log(f->avctx, AV_LOG_ERROR, "mv out of pic\n");
|
av_log(f->avctx, AV_LOG_ERROR, "mv out of pic\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
if (f->wordstream_end - f->wordstream < 1){
|
||||||
|
av_log(f->avctx, AV_LOG_ERROR, "wordstream overread\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
mcdc(dst, src, log2w, h, stride, 1, av_le2ne16(*f->wordstream++));
|
mcdc(dst, src, log2w, h, stride, 1, av_le2ne16(*f->wordstream++));
|
||||||
}else if(code == 5){
|
}else if(code == 5){
|
||||||
|
if (f->wordstream_end - f->wordstream < 1){
|
||||||
|
av_log(f->avctx, AV_LOG_ERROR, "wordstream overread\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
mcdc(dst, src, log2w, h, stride, 0, av_le2ne16(*f->wordstream++));
|
mcdc(dst, src, log2w, h, stride, 0, av_le2ne16(*f->wordstream++));
|
||||||
}else if(code == 6){
|
}else if(code == 6){
|
||||||
|
if (f->wordstream_end - f->wordstream < 2){
|
||||||
|
av_log(f->avctx, AV_LOG_ERROR, "wordstream overread\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
if(log2w){
|
if(log2w){
|
||||||
dst[0] = av_le2ne16(*f->wordstream++);
|
dst[0] = av_le2ne16(*f->wordstream++);
|
||||||
dst[1] = av_le2ne16(*f->wordstream++);
|
dst[1] = av_le2ne16(*f->wordstream++);
|
||||||
@@ -375,6 +397,8 @@ static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length){
|
|||||||
|
|
||||||
if(f->version>1){
|
if(f->version>1){
|
||||||
extra=20;
|
extra=20;
|
||||||
|
if (length < extra)
|
||||||
|
return -1;
|
||||||
bitstream_size= AV_RL32(buf+8);
|
bitstream_size= AV_RL32(buf+8);
|
||||||
wordstream_size= AV_RL32(buf+12);
|
wordstream_size= AV_RL32(buf+12);
|
||||||
bytestream_size= AV_RL32(buf+16);
|
bytestream_size= AV_RL32(buf+16);
|
||||||
@@ -385,11 +409,10 @@ static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length){
|
|||||||
bytestream_size= FFMAX(length - bitstream_size - wordstream_size, 0);
|
bytestream_size= FFMAX(length - bitstream_size - wordstream_size, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if(bitstream_size+ bytestream_size+ wordstream_size + extra != length
|
if (bitstream_size > length ||
|
||||||
|| bitstream_size > (1<<26)
|
bytestream_size > length - bitstream_size ||
|
||||||
|| bytestream_size > (1<<26)
|
wordstream_size > length - bytestream_size - bitstream_size ||
|
||||||
|| wordstream_size > (1<<26)
|
extra > length - bytestream_size - bitstream_size - wordstream_size){
|
||||||
){
|
|
||||||
av_log(f->avctx, AV_LOG_ERROR, "lengths %d %d %d %d\n", bitstream_size, bytestream_size, wordstream_size,
|
av_log(f->avctx, AV_LOG_ERROR, "lengths %d %d %d %d\n", bitstream_size, bytestream_size, wordstream_size,
|
||||||
bitstream_size+ bytestream_size+ wordstream_size - length);
|
bitstream_size+ bytestream_size+ wordstream_size - length);
|
||||||
return -1;
|
return -1;
|
||||||
@@ -399,10 +422,13 @@ static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length){
|
|||||||
if (!f->bitstream_buffer)
|
if (!f->bitstream_buffer)
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
f->dsp.bswap_buf(f->bitstream_buffer, (const uint32_t*)(buf + extra), bitstream_size/4);
|
f->dsp.bswap_buf(f->bitstream_buffer, (const uint32_t*)(buf + extra), bitstream_size/4);
|
||||||
|
memset((uint8_t*)f->bitstream_buffer + bitstream_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
|
||||||
init_get_bits(&f->gb, f->bitstream_buffer, 8*bitstream_size);
|
init_get_bits(&f->gb, f->bitstream_buffer, 8*bitstream_size);
|
||||||
|
|
||||||
f->wordstream= (const uint16_t*)(buf + extra + bitstream_size);
|
f->wordstream= (const uint16_t*)(buf + extra + bitstream_size);
|
||||||
|
f->wordstream_end= f->wordstream + wordstream_size/2;
|
||||||
f->bytestream= buf + extra + bitstream_size + wordstream_size;
|
f->bytestream= buf + extra + bitstream_size + wordstream_size;
|
||||||
|
f->bytestream_end = f->bytestream + bytestream_size;
|
||||||
|
|
||||||
init_mv(f);
|
init_mv(f);
|
||||||
|
|
||||||
@@ -531,7 +557,7 @@ static int decode_i_mb(FourXContext *f){
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const uint8_t *read_huffman_tables(FourXContext *f, const uint8_t * const buf){
|
static const uint8_t *read_huffman_tables(FourXContext *f, const uint8_t * const buf, int buf_size){
|
||||||
int frequency[512];
|
int frequency[512];
|
||||||
uint8_t flag[512];
|
uint8_t flag[512];
|
||||||
int up[512];
|
int up[512];
|
||||||
@@ -539,6 +565,7 @@ static const uint8_t *read_huffman_tables(FourXContext *f, const uint8_t * const
|
|||||||
int bits_tab[257];
|
int bits_tab[257];
|
||||||
int start, end;
|
int start, end;
|
||||||
const uint8_t *ptr= buf;
|
const uint8_t *ptr= buf;
|
||||||
|
const uint8_t *ptr_end = buf + buf_size;
|
||||||
int j;
|
int j;
|
||||||
|
|
||||||
memset(frequency, 0, sizeof(frequency));
|
memset(frequency, 0, sizeof(frequency));
|
||||||
@@ -549,6 +576,8 @@ static const uint8_t *read_huffman_tables(FourXContext *f, const uint8_t * const
|
|||||||
for(;;){
|
for(;;){
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
if (start <= end && ptr_end - ptr < end - start + 1 + 1)
|
||||||
|
return NULL;
|
||||||
for(i=start; i<=end; i++){
|
for(i=start; i<=end; i++){
|
||||||
frequency[i]= *ptr++;
|
frequency[i]= *ptr++;
|
||||||
}
|
}
|
||||||
@@ -601,9 +630,10 @@ static const uint8_t *read_huffman_tables(FourXContext *f, const uint8_t * const
|
|||||||
len_tab[j]= len;
|
len_tab[j]= len;
|
||||||
}
|
}
|
||||||
|
|
||||||
init_vlc(&f->pre_vlc, ACDC_VLC_BITS, 257,
|
if (init_vlc(&f->pre_vlc, ACDC_VLC_BITS, 257,
|
||||||
len_tab , 1, 1,
|
len_tab , 1, 1,
|
||||||
bits_tab, 4, 4, 0);
|
bits_tab, 4, 4, 0))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
return ptr;
|
return ptr;
|
||||||
}
|
}
|
||||||
@@ -621,10 +651,13 @@ static int decode_i2_frame(FourXContext *f, const uint8_t *buf, int length){
|
|||||||
const int height= f->avctx->height;
|
const int height= f->avctx->height;
|
||||||
uint16_t *dst= (uint16_t*)f->current_picture.data[0];
|
uint16_t *dst= (uint16_t*)f->current_picture.data[0];
|
||||||
const int stride= f->current_picture.linesize[0]>>1;
|
const int stride= f->current_picture.linesize[0]>>1;
|
||||||
|
const uint8_t *buf_end = buf + length;
|
||||||
|
|
||||||
for(y=0; y<height; y+=16){
|
for(y=0; y<height; y+=16){
|
||||||
for(x=0; x<width; x+=16){
|
for(x=0; x<width; x+=16){
|
||||||
unsigned int color[4], bits;
|
unsigned int color[4], bits;
|
||||||
|
if (buf_end - buf < 8)
|
||||||
|
return -1;
|
||||||
memset(color, 0, sizeof(color));
|
memset(color, 0, sizeof(color));
|
||||||
//warning following is purely guessed ...
|
//warning following is purely guessed ...
|
||||||
color[0]= bytestream_get_le16(&buf);
|
color[0]= bytestream_get_le16(&buf);
|
||||||
@@ -658,18 +691,23 @@ static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length){
|
|||||||
uint16_t *dst= (uint16_t*)f->current_picture.data[0];
|
uint16_t *dst= (uint16_t*)f->current_picture.data[0];
|
||||||
const int stride= f->current_picture.linesize[0]>>1;
|
const int stride= f->current_picture.linesize[0]>>1;
|
||||||
const unsigned int bitstream_size= AV_RL32(buf);
|
const unsigned int bitstream_size= AV_RL32(buf);
|
||||||
const int token_count av_unused = AV_RL32(buf + bitstream_size + 8);
|
unsigned int prestream_size;
|
||||||
unsigned int prestream_size= 4*AV_RL32(buf + bitstream_size + 4);
|
const uint8_t *prestream;
|
||||||
const uint8_t *prestream= buf + bitstream_size + 12;
|
|
||||||
|
|
||||||
if(prestream_size + bitstream_size + 12 != length
|
if (bitstream_size > (1<<26) || length < bitstream_size + 12)
|
||||||
|| bitstream_size > (1<<26)
|
return -1;
|
||||||
|| prestream_size > (1<<26)){
|
prestream_size = 4*AV_RL32(buf + bitstream_size + 4);
|
||||||
|
prestream = buf + bitstream_size + 12;
|
||||||
|
|
||||||
|
if (prestream_size > (1<<26) ||
|
||||||
|
prestream_size != length - (bitstream_size + 12)){
|
||||||
av_log(f->avctx, AV_LOG_ERROR, "size mismatch %d %d %d\n", prestream_size, bitstream_size, length);
|
av_log(f->avctx, AV_LOG_ERROR, "size mismatch %d %d %d\n", prestream_size, bitstream_size, length);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
prestream= read_huffman_tables(f, prestream);
|
prestream= read_huffman_tables(f, prestream, buf + length - prestream);
|
||||||
|
if (!prestream)
|
||||||
|
return -1;
|
||||||
|
|
||||||
init_get_bits(&f->gb, buf + 4, 8*bitstream_size);
|
init_get_bits(&f->gb, buf + 4, 8*bitstream_size);
|
||||||
|
|
||||||
@@ -679,6 +717,7 @@ static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length){
|
|||||||
if (!f->bitstream_buffer)
|
if (!f->bitstream_buffer)
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
f->dsp.bswap_buf(f->bitstream_buffer, (const uint32_t*)prestream, prestream_size/4);
|
f->dsp.bswap_buf(f->bitstream_buffer, (const uint32_t*)prestream, prestream_size/4);
|
||||||
|
memset((uint8_t*)f->bitstream_buffer + prestream_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
|
||||||
init_get_bits(&f->pre_gb, f->bitstream_buffer, 8*prestream_size);
|
init_get_bits(&f->pre_gb, f->bitstream_buffer, 8*prestream_size);
|
||||||
|
|
||||||
f->last_dc= 0*128*8*8;
|
f->last_dc= 0*128*8*8;
|
||||||
@@ -710,6 +749,8 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
AVFrame *p, temp;
|
AVFrame *p, temp;
|
||||||
int i, frame_4cc, frame_size;
|
int i, frame_4cc, frame_size;
|
||||||
|
|
||||||
|
if (buf_size < 12)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
frame_4cc= AV_RL32(buf);
|
frame_4cc= AV_RL32(buf);
|
||||||
if(buf_size != AV_RL32(buf+4)+8 || buf_size < 20){
|
if(buf_size != AV_RL32(buf+4)+8 || buf_size < 20){
|
||||||
av_log(f->avctx, AV_LOG_ERROR, "size mismatch %d %d\n", buf_size, AV_RL32(buf+4));
|
av_log(f->avctx, AV_LOG_ERROR, "size mismatch %d %d\n", buf_size, AV_RL32(buf+4));
|
||||||
@@ -722,6 +763,11 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
const int whole_size= AV_RL32(buf+16);
|
const int whole_size= AV_RL32(buf+16);
|
||||||
CFrameBuffer *cfrm;
|
CFrameBuffer *cfrm;
|
||||||
|
|
||||||
|
if (data_size < 0 || whole_size < 0){
|
||||||
|
av_log(f->avctx, AV_LOG_ERROR, "sizes invalid\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
for(i=0; i<CFRAME_BUFFER_COUNT; i++){
|
for(i=0; i<CFRAME_BUFFER_COUNT; i++){
|
||||||
if(f->cfrm[i].id && f->cfrm[i].id < avctx->frame_number)
|
if(f->cfrm[i].id && f->cfrm[i].id < avctx->frame_number)
|
||||||
av_log(f->avctx, AV_LOG_ERROR, "lost c frame %d\n", f->cfrm[i].id);
|
av_log(f->avctx, AV_LOG_ERROR, "lost c frame %d\n", f->cfrm[i].id);
|
||||||
@@ -738,6 +784,8 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
}
|
}
|
||||||
cfrm= &f->cfrm[i];
|
cfrm= &f->cfrm[i];
|
||||||
|
|
||||||
|
if (data_size > UINT_MAX - cfrm->size - FF_INPUT_BUFFER_PADDING_SIZE)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
cfrm->data= av_fast_realloc(cfrm->data, &cfrm->allocated_size, cfrm->size + data_size + FF_INPUT_BUFFER_PADDING_SIZE);
|
cfrm->data= av_fast_realloc(cfrm->data, &cfrm->allocated_size, cfrm->size + data_size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||||
if(!cfrm->data){ //explicit check needed as memcpy below might not catch a NULL
|
if(!cfrm->data){ //explicit check needed as memcpy below might not catch a NULL
|
||||||
av_log(f->avctx, AV_LOG_ERROR, "realloc falure");
|
av_log(f->avctx, AV_LOG_ERROR, "realloc falure");
|
||||||
@@ -781,12 +829,16 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
|
|
||||||
if(frame_4cc == AV_RL32("ifr2")){
|
if(frame_4cc == AV_RL32("ifr2")){
|
||||||
p->pict_type= AV_PICTURE_TYPE_I;
|
p->pict_type= AV_PICTURE_TYPE_I;
|
||||||
if(decode_i2_frame(f, buf-4, frame_size) < 0)
|
if(decode_i2_frame(f, buf-4, frame_size+4) < 0){
|
||||||
|
av_log(f->avctx, AV_LOG_ERROR, "decode i2 frame failed\n");
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
}else if(frame_4cc == AV_RL32("ifrm")){
|
}else if(frame_4cc == AV_RL32("ifrm")){
|
||||||
p->pict_type= AV_PICTURE_TYPE_I;
|
p->pict_type= AV_PICTURE_TYPE_I;
|
||||||
if(decode_i_frame(f, buf, frame_size) < 0)
|
if(decode_i_frame(f, buf, frame_size) < 0){
|
||||||
|
av_log(f->avctx, AV_LOG_ERROR, "decode i frame failed\n");
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
}else if(frame_4cc == AV_RL32("pfrm") || frame_4cc == AV_RL32("pfr2")){
|
}else if(frame_4cc == AV_RL32("pfrm") || frame_4cc == AV_RL32("pfr2")){
|
||||||
if(!f->last_picture.data[0]){
|
if(!f->last_picture.data[0]){
|
||||||
f->last_picture.reference= 1;
|
f->last_picture.reference= 1;
|
||||||
@@ -797,8 +849,10 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
}
|
}
|
||||||
|
|
||||||
p->pict_type= AV_PICTURE_TYPE_P;
|
p->pict_type= AV_PICTURE_TYPE_P;
|
||||||
if(decode_p_frame(f, buf, frame_size) < 0)
|
if(decode_p_frame(f, buf, frame_size) < 0){
|
||||||
|
av_log(f->avctx, AV_LOG_ERROR, "decode p frame failed\n");
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
}else if(frame_4cc == AV_RL32("snd_")){
|
}else if(frame_4cc == AV_RL32("snd_")){
|
||||||
av_log(avctx, AV_LOG_ERROR, "ignoring snd_ chunk length:%d\n", buf_size);
|
av_log(avctx, AV_LOG_ERROR, "ignoring snd_ chunk length:%d\n", buf_size);
|
||||||
}else{
|
}else{
|
||||||
@@ -831,6 +885,10 @@ static av_cold int decode_init(AVCodecContext *avctx){
|
|||||||
av_log(avctx, AV_LOG_ERROR, "extradata wrong or missing\n");
|
av_log(avctx, AV_LOG_ERROR, "extradata wrong or missing\n");
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
if((avctx->width % 16) || (avctx->height % 16)) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "unsupported width/height\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&f->current_picture);
|
avcodec_get_frame_defaults(&f->current_picture);
|
||||||
avcodec_get_frame_defaults(&f->last_picture);
|
avcodec_get_frame_defaults(&f->last_picture);
|
||||||
|
@@ -570,6 +570,7 @@ OBJS-$(CONFIG_WEBM_MUXER) += xiph.o mpeg4audio.o \
|
|||||||
OBJS-$(CONFIG_WTV_DEMUXER) += mpeg4audio.o mpegaudiodata.o
|
OBJS-$(CONFIG_WTV_DEMUXER) += mpeg4audio.o mpegaudiodata.o
|
||||||
|
|
||||||
# external codec libraries
|
# external codec libraries
|
||||||
|
OBJS-$(CONFIG_LIBAACPLUS_ENCODER) += libaacplus.o
|
||||||
OBJS-$(CONFIG_LIBCELT_DECODER) += libcelt_dec.o
|
OBJS-$(CONFIG_LIBCELT_DECODER) += libcelt_dec.o
|
||||||
OBJS-$(CONFIG_LIBDIRAC_DECODER) += libdiracdec.o
|
OBJS-$(CONFIG_LIBDIRAC_DECODER) += libdiracdec.o
|
||||||
OBJS-$(CONFIG_LIBDIRAC_ENCODER) += libdiracenc.o libdirac_libschro.o
|
OBJS-$(CONFIG_LIBDIRAC_ENCODER) += libdiracenc.o libdirac_libschro.o
|
||||||
|
@@ -1126,7 +1126,7 @@ static int decode_spectrum_and_dequant(AACContext *ac, float coef[1024],
|
|||||||
GET_VLC(code, re, gb, vlc_tab, 8, 2);
|
GET_VLC(code, re, gb, vlc_tab, 8, 2);
|
||||||
cb_idx = cb_vector_idx[code];
|
cb_idx = cb_vector_idx[code];
|
||||||
nnz = cb_idx >> 8 & 15;
|
nnz = cb_idx >> 8 & 15;
|
||||||
bits = SHOW_UBITS(re, gb, nnz) << (32-nnz);
|
bits = nnz ? GET_CACHE(re, gb) : 0;
|
||||||
LAST_SKIP_BITS(re, gb, nnz);
|
LAST_SKIP_BITS(re, gb, nnz);
|
||||||
cf = VMUL4S(cf, vq, cb_idx, bits, sf + idx);
|
cf = VMUL4S(cf, vq, cb_idx, bits, sf + idx);
|
||||||
} while (len -= 4);
|
} while (len -= 4);
|
||||||
@@ -1166,7 +1166,7 @@ static int decode_spectrum_and_dequant(AACContext *ac, float coef[1024],
|
|||||||
GET_VLC(code, re, gb, vlc_tab, 8, 2);
|
GET_VLC(code, re, gb, vlc_tab, 8, 2);
|
||||||
cb_idx = cb_vector_idx[code];
|
cb_idx = cb_vector_idx[code];
|
||||||
nnz = cb_idx >> 8 & 15;
|
nnz = cb_idx >> 8 & 15;
|
||||||
sign = SHOW_UBITS(re, gb, nnz) << (cb_idx >> 12);
|
sign = nnz ? SHOW_UBITS(re, gb, nnz) << (cb_idx >> 12) : 0;
|
||||||
LAST_SKIP_BITS(re, gb, nnz);
|
LAST_SKIP_BITS(re, gb, nnz);
|
||||||
cf = VMUL2S(cf, vq, cb_idx, sign, sf + idx);
|
cf = VMUL2S(cf, vq, cb_idx, sign, sf + idx);
|
||||||
} while (len -= 2);
|
} while (len -= 2);
|
||||||
|
@@ -33,6 +33,7 @@
|
|||||||
#include "fft.h"
|
#include "fft.h"
|
||||||
#include "aacps.h"
|
#include "aacps.h"
|
||||||
#include "libavutil/libm.h"
|
#include "libavutil/libm.h"
|
||||||
|
#include "libavutil/avassert.h"
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include <float.h>
|
#include <float.h>
|
||||||
@@ -1457,6 +1458,7 @@ static void sbr_mapping(AACContext *ac, SpectralBandReplication *sbr,
|
|||||||
uint16_t *table = ch_data->bs_freq_res[e + 1] ? sbr->f_tablehigh : sbr->f_tablelow;
|
uint16_t *table = ch_data->bs_freq_res[e + 1] ? sbr->f_tablehigh : sbr->f_tablelow;
|
||||||
int k;
|
int k;
|
||||||
|
|
||||||
|
av_assert0(sbr->kx[1] <= table[0]);
|
||||||
for (i = 0; i < ilim; i++)
|
for (i = 0; i < ilim; i++)
|
||||||
for (m = table[i]; m < table[i + 1]; m++)
|
for (m = table[i]; m < table[i + 1]; m++)
|
||||||
sbr->e_origmapped[e][m - sbr->kx[1]] = ch_data->env_facs[e+1][i];
|
sbr->e_origmapped[e][m - sbr->kx[1]] = ch_data->env_facs[e+1][i];
|
||||||
|
@@ -1335,10 +1335,11 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
|
|||||||
buf_size -= 128;
|
buf_size -= 128;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case CODEC_ID_ADPCM_IMA_EA_EACS:
|
case CODEC_ID_ADPCM_IMA_EA_EACS: {
|
||||||
|
unsigned header_size = 4 + (8<<st);
|
||||||
samples_in_chunk = bytestream_get_le32(&src) >> (1-st);
|
samples_in_chunk = bytestream_get_le32(&src) >> (1-st);
|
||||||
|
|
||||||
if (samples_in_chunk > buf_size-4-(8<<st)) {
|
if (buf_size < header_size || samples_in_chunk > buf_size - header_size) {
|
||||||
src += buf_size - 4;
|
src += buf_size - 4;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -1353,6 +1354,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
|
|||||||
*samples++ = adpcm_ima_expand_nibble(&c->status[st], *src&0x0F, 3);
|
*samples++ = adpcm_ima_expand_nibble(&c->status[st], *src&0x0F, 3);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
case CODEC_ID_ADPCM_IMA_EA_SEAD:
|
case CODEC_ID_ADPCM_IMA_EA_SEAD:
|
||||||
for (; src < buf+buf_size; src++) {
|
for (; src < buf+buf_size; src++) {
|
||||||
*samples++ = adpcm_ima_expand_nibble(&c->status[0], src[0] >> 4, 6);
|
*samples++ = adpcm_ima_expand_nibble(&c->status[0], src[0] >> 4, 6);
|
||||||
|
@@ -370,6 +370,7 @@ void avcodec_register_all(void)
|
|||||||
REGISTER_ENCDEC (XSUB, xsub);
|
REGISTER_ENCDEC (XSUB, xsub);
|
||||||
|
|
||||||
/* external libraries */
|
/* external libraries */
|
||||||
|
REGISTER_ENCODER (LIBAACPLUS, libaacplus);
|
||||||
REGISTER_DECODER (LIBCELT, libcelt);
|
REGISTER_DECODER (LIBCELT, libcelt);
|
||||||
REGISTER_ENCDEC (LIBDIRAC, libdirac);
|
REGISTER_ENCDEC (LIBDIRAC, libdirac);
|
||||||
REGISTER_ENCODER (LIBFAAC, libfaac);
|
REGISTER_ENCODER (LIBFAAC, libfaac);
|
||||||
|
@@ -79,6 +79,8 @@ static inline int op(uint8_t **dst, const uint8_t *dst_end,
|
|||||||
int striplen = FFMIN(count, remaining);
|
int striplen = FFMIN(count, remaining);
|
||||||
if (buf) {
|
if (buf) {
|
||||||
striplen = FFMIN(striplen, buf_end - *buf);
|
striplen = FFMIN(striplen, buf_end - *buf);
|
||||||
|
if (*buf >= buf_end)
|
||||||
|
goto exhausted;
|
||||||
memcpy(*dst, *buf, striplen);
|
memcpy(*dst, *buf, striplen);
|
||||||
*buf += striplen;
|
*buf += striplen;
|
||||||
} else if (pixel >= 0)
|
} else if (pixel >= 0)
|
||||||
|
@@ -163,6 +163,18 @@ typedef struct APEContext {
|
|||||||
|
|
||||||
// TODO: dsputilize
|
// TODO: dsputilize
|
||||||
|
|
||||||
|
static av_cold int ape_decode_close(AVCodecContext * avctx)
|
||||||
|
{
|
||||||
|
APEContext *s = avctx->priv_data;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < APE_FILTER_LEVELS; i++)
|
||||||
|
av_freep(&s->filterbuf[i]);
|
||||||
|
|
||||||
|
av_freep(&s->data);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static av_cold int ape_decode_init(AVCodecContext * avctx)
|
static av_cold int ape_decode_init(AVCodecContext * avctx)
|
||||||
{
|
{
|
||||||
APEContext *s = avctx->priv_data;
|
APEContext *s = avctx->priv_data;
|
||||||
@@ -195,25 +207,18 @@ static av_cold int ape_decode_init(AVCodecContext * avctx)
|
|||||||
for (i = 0; i < APE_FILTER_LEVELS; i++) {
|
for (i = 0; i < APE_FILTER_LEVELS; i++) {
|
||||||
if (!ape_filter_orders[s->fset][i])
|
if (!ape_filter_orders[s->fset][i])
|
||||||
break;
|
break;
|
||||||
s->filterbuf[i] = av_malloc((ape_filter_orders[s->fset][i] * 3 + HISTORY_SIZE) * 4);
|
FF_ALLOC_OR_GOTO(avctx, s->filterbuf[i],
|
||||||
|
(ape_filter_orders[s->fset][i] * 3 + HISTORY_SIZE) * 4,
|
||||||
|
filter_alloc_fail);
|
||||||
}
|
}
|
||||||
|
|
||||||
dsputil_init(&s->dsp, avctx);
|
dsputil_init(&s->dsp, avctx);
|
||||||
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
|
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||||
avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
|
avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
filter_alloc_fail:
|
||||||
|
ape_decode_close(avctx);
|
||||||
static av_cold int ape_decode_close(AVCodecContext * avctx)
|
return AVERROR(ENOMEM);
|
||||||
{
|
|
||||||
APEContext *s = avctx->priv_data;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < APE_FILTER_LEVELS; i++)
|
|
||||||
av_freep(&s->filterbuf[i]);
|
|
||||||
|
|
||||||
av_freep(&s->data);
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -797,7 +802,7 @@ static int ape_decode_frame(AVCodecContext * avctx,
|
|||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
APEContext *s = avctx->priv_data;
|
APEContext *s = avctx->priv_data;
|
||||||
int16_t *samples = data;
|
int16_t *samples = data;
|
||||||
int nblocks;
|
uint32_t nblocks;
|
||||||
int i, n;
|
int i, n;
|
||||||
int blockstodecode;
|
int blockstodecode;
|
||||||
int bytes_used;
|
int bytes_used;
|
||||||
@@ -814,12 +819,15 @@ static int ape_decode_frame(AVCodecContext * avctx,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if(!s->samples){
|
if(!s->samples){
|
||||||
s->data = av_realloc(s->data, (buf_size + 3) & ~3);
|
void *tmp_data = av_realloc(s->data, (buf_size + 3) & ~3);
|
||||||
|
if (!tmp_data)
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
s->data = tmp_data;
|
||||||
s->dsp.bswap_buf((uint32_t*)s->data, (const uint32_t*)buf, buf_size >> 2);
|
s->dsp.bswap_buf((uint32_t*)s->data, (const uint32_t*)buf, buf_size >> 2);
|
||||||
s->ptr = s->last_ptr = s->data;
|
s->ptr = s->last_ptr = s->data;
|
||||||
s->data_end = s->data + buf_size;
|
s->data_end = s->data + buf_size;
|
||||||
|
|
||||||
nblocks = s->samples = bytestream_get_be32(&s->ptr);
|
nblocks = bytestream_get_be32(&s->ptr);
|
||||||
n = bytestream_get_be32(&s->ptr);
|
n = bytestream_get_be32(&s->ptr);
|
||||||
if(n < 0 || n > 3){
|
if(n < 0 || n > 3){
|
||||||
av_log(avctx, AV_LOG_ERROR, "Incorrect offset passed\n");
|
av_log(avctx, AV_LOG_ERROR, "Incorrect offset passed\n");
|
||||||
@@ -828,12 +836,13 @@ static int ape_decode_frame(AVCodecContext * avctx,
|
|||||||
}
|
}
|
||||||
s->ptr += n;
|
s->ptr += n;
|
||||||
|
|
||||||
s->currentframeblocks = nblocks;
|
|
||||||
buf += 4;
|
buf += 4;
|
||||||
if (s->samples <= 0) {
|
if (!nblocks || nblocks > INT_MAX) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Invalid sample count: %u.\n", nblocks);
|
||||||
*data_size = 0;
|
*data_size = 0;
|
||||||
return buf_size;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
s->currentframeblocks = s->samples = nblocks;
|
||||||
|
|
||||||
memset(s->decoded0, 0, sizeof(s->decoded0));
|
memset(s->decoded0, 0, sizeof(s->decoded0));
|
||||||
memset(s->decoded1, 0, sizeof(s->decoded1));
|
memset(s->decoded1, 0, sizeof(s->decoded1));
|
||||||
|
@@ -276,7 +276,7 @@ static int atrac1_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
AT1Ctx *q = avctx->priv_data;
|
AT1Ctx *q = avctx->priv_data;
|
||||||
int ch, ret, i;
|
int ch, ret, i, out_size;
|
||||||
GetBitContext gb;
|
GetBitContext gb;
|
||||||
float* samples = data;
|
float* samples = data;
|
||||||
|
|
||||||
@@ -286,6 +286,13 @@ static int atrac1_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
out_size = q->channels * AT1_SU_SAMPLES *
|
||||||
|
av_get_bytes_per_sample(avctx->sample_fmt);
|
||||||
|
if (*data_size < out_size) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
for (ch = 0; ch < q->channels; ch++) {
|
for (ch = 0; ch < q->channels; ch++) {
|
||||||
AT1SUCtx* su = &q->SUs[ch];
|
AT1SUCtx* su = &q->SUs[ch];
|
||||||
|
|
||||||
@@ -318,7 +325,7 @@ static int atrac1_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
*data_size = q->channels * AT1_SU_SAMPLES * sizeof(*samples);
|
*data_size = out_size;
|
||||||
return avctx->block_align;
|
return avctx->block_align;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -329,6 +336,11 @@ static av_cold int atrac1_decode_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
|
avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
|
||||||
|
|
||||||
|
if (avctx->channels < 1 || avctx->channels > AT1_MAX_CHANNELS) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Unsupported number of channels: %d\n",
|
||||||
|
avctx->channels);
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
q->channels = avctx->channels;
|
q->channels = avctx->channels;
|
||||||
|
|
||||||
/* Init the mdct transforms */
|
/* Init the mdct transforms */
|
||||||
|
@@ -47,6 +47,7 @@ avs_decode_frame(AVCodecContext * avctx,
|
|||||||
void *data, int *data_size, AVPacket *avpkt)
|
void *data, int *data_size, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
|
const uint8_t *buf_end = avpkt->data + avpkt->size;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
AvsContext *const avs = avctx->priv_data;
|
AvsContext *const avs = avctx->priv_data;
|
||||||
AVFrame *picture = data;
|
AVFrame *picture = data;
|
||||||
@@ -69,6 +70,8 @@ avs_decode_frame(AVCodecContext * avctx,
|
|||||||
out = avs->picture.data[0];
|
out = avs->picture.data[0];
|
||||||
stride = avs->picture.linesize[0];
|
stride = avs->picture.linesize[0];
|
||||||
|
|
||||||
|
if (buf_end - buf < 4)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
sub_type = buf[0];
|
sub_type = buf[0];
|
||||||
type = buf[1];
|
type = buf[1];
|
||||||
buf += 4;
|
buf += 4;
|
||||||
@@ -79,6 +82,8 @@ avs_decode_frame(AVCodecContext * avctx,
|
|||||||
|
|
||||||
first = AV_RL16(buf);
|
first = AV_RL16(buf);
|
||||||
last = first + AV_RL16(buf + 2);
|
last = first + AV_RL16(buf + 2);
|
||||||
|
if (first >= 256 || last > 256 || buf_end - buf < 4 + 4 + 3 * (last - first))
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
buf += 4;
|
buf += 4;
|
||||||
for (i=first; i<last; i++, buf+=3)
|
for (i=first; i<last; i++, buf+=3)
|
||||||
pal[i] = (buf[0] << 18) | (buf[1] << 10) | (buf[2] << 2);
|
pal[i] = (buf[0] << 18) | (buf[1] << 10) | (buf[2] << 2);
|
||||||
@@ -114,9 +119,13 @@ avs_decode_frame(AVCodecContext * avctx,
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (buf_end - buf < 256 * vect_w * vect_h)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
table = buf + (256 * vect_w * vect_h);
|
table = buf + (256 * vect_w * vect_h);
|
||||||
if (sub_type != AVS_I_FRAME) {
|
if (sub_type != AVS_I_FRAME) {
|
||||||
int map_size = ((318 / vect_w + 7) / 8) * (198 / vect_h);
|
int map_size = ((318 / vect_w + 7) / 8) * (198 / vect_h);
|
||||||
|
if (buf_end - table < map_size)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
init_get_bits(&change_map, table, map_size * 8);
|
init_get_bits(&change_map, table, map_size * 8);
|
||||||
table += map_size;
|
table += map_size;
|
||||||
}
|
}
|
||||||
@@ -124,6 +133,8 @@ avs_decode_frame(AVCodecContext * avctx,
|
|||||||
for (y=0; y<198; y+=vect_h) {
|
for (y=0; y<198; y+=vect_h) {
|
||||||
for (x=0; x<318; x+=vect_w) {
|
for (x=0; x<318; x+=vect_w) {
|
||||||
if (sub_type == AVS_I_FRAME || get_bits1(&change_map)) {
|
if (sub_type == AVS_I_FRAME || get_bits1(&change_map)) {
|
||||||
|
if (buf_end - table < 1)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
vect = &buf[*table++ * (vect_w * vect_h)];
|
vect = &buf[*table++ * (vect_w * vect_h)];
|
||||||
for (j=0; j<vect_w; j++) {
|
for (j=0; j<vect_w; j++) {
|
||||||
out[(y + 0) * stride + x + j] = vect[(0 * vect_w) + j];
|
out[(y + 0) * stride + x + j] = vect[(0 * vect_w) + j];
|
||||||
|
@@ -247,7 +247,7 @@ static void read_tree(GetBitContext *gb, Tree *tree)
|
|||||||
tree->syms[i] = get_bits(gb, 4);
|
tree->syms[i] = get_bits(gb, 4);
|
||||||
tmp1[tree->syms[i]] = 1;
|
tmp1[tree->syms[i]] = 1;
|
||||||
}
|
}
|
||||||
for (i = 0; i < 16; i++)
|
for (i = 0; i < 16 && len < 16 - 1; i++)
|
||||||
if (!tmp1[i])
|
if (!tmp1[i])
|
||||||
tree->syms[++len] = i;
|
tree->syms[++len] = i;
|
||||||
} else {
|
} else {
|
||||||
@@ -344,14 +344,14 @@ static int read_motion_values(AVCodecContext *avctx, GetBitContext *gb, Bundle *
|
|||||||
memset(b->cur_dec, v, t);
|
memset(b->cur_dec, v, t);
|
||||||
b->cur_dec += t;
|
b->cur_dec += t;
|
||||||
} else {
|
} else {
|
||||||
do {
|
while (b->cur_dec < dec_end) {
|
||||||
v = GET_HUFF(gb, b->tree);
|
v = GET_HUFF(gb, b->tree);
|
||||||
if (v) {
|
if (v) {
|
||||||
sign = -get_bits1(gb);
|
sign = -get_bits1(gb);
|
||||||
v = (v ^ sign) - sign;
|
v = (v ^ sign) - sign;
|
||||||
}
|
}
|
||||||
*b->cur_dec++ = v;
|
*b->cur_dec++ = v;
|
||||||
} while (b->cur_dec < dec_end);
|
}
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -375,7 +375,7 @@ static int read_block_types(AVCodecContext *avctx, GetBitContext *gb, Bundle *b)
|
|||||||
memset(b->cur_dec, v, t);
|
memset(b->cur_dec, v, t);
|
||||||
b->cur_dec += t;
|
b->cur_dec += t;
|
||||||
} else {
|
} else {
|
||||||
do {
|
while (b->cur_dec < dec_end) {
|
||||||
v = GET_HUFF(gb, b->tree);
|
v = GET_HUFF(gb, b->tree);
|
||||||
if (v < 12) {
|
if (v < 12) {
|
||||||
last = v;
|
last = v;
|
||||||
@@ -383,10 +383,12 @@ static int read_block_types(AVCodecContext *avctx, GetBitContext *gb, Bundle *b)
|
|||||||
} else {
|
} else {
|
||||||
int run = bink_rlelens[v - 12];
|
int run = bink_rlelens[v - 12];
|
||||||
|
|
||||||
|
if (dec_end - b->cur_dec < run)
|
||||||
|
return -1;
|
||||||
memset(b->cur_dec, last, run);
|
memset(b->cur_dec, last, run);
|
||||||
b->cur_dec += run;
|
b->cur_dec += run;
|
||||||
}
|
}
|
||||||
} while (b->cur_dec < dec_end);
|
}
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -457,6 +459,7 @@ static int read_dcs(AVCodecContext *avctx, GetBitContext *gb, Bundle *b,
|
|||||||
{
|
{
|
||||||
int i, j, len, len2, bsize, sign, v, v2;
|
int i, j, len, len2, bsize, sign, v, v2;
|
||||||
int16_t *dst = (int16_t*)b->cur_dec;
|
int16_t *dst = (int16_t*)b->cur_dec;
|
||||||
|
int16_t *dst_end =( int16_t*)b->data_end;
|
||||||
|
|
||||||
CHECK_READ_VAL(gb, b, len);
|
CHECK_READ_VAL(gb, b, len);
|
||||||
v = get_bits(gb, start_bits - has_sign);
|
v = get_bits(gb, start_bits - has_sign);
|
||||||
@@ -464,10 +467,14 @@ static int read_dcs(AVCodecContext *avctx, GetBitContext *gb, Bundle *b,
|
|||||||
sign = -get_bits1(gb);
|
sign = -get_bits1(gb);
|
||||||
v = (v ^ sign) - sign;
|
v = (v ^ sign) - sign;
|
||||||
}
|
}
|
||||||
|
if (dst_end - dst < 1)
|
||||||
|
return -1;
|
||||||
*dst++ = v;
|
*dst++ = v;
|
||||||
len--;
|
len--;
|
||||||
for (i = 0; i < len; i += 8) {
|
for (i = 0; i < len; i += 8) {
|
||||||
len2 = FFMIN(len - i, 8);
|
len2 = FFMIN(len - i, 8);
|
||||||
|
if (dst_end - dst < len2)
|
||||||
|
return -1;
|
||||||
bsize = get_bits(gb, 4);
|
bsize = get_bits(gb, 4);
|
||||||
if (bsize) {
|
if (bsize) {
|
||||||
for (j = 0; j < len2; j++) {
|
for (j = 0; j < len2; j++) {
|
||||||
@@ -535,6 +542,8 @@ static int binkb_read_bundle(BinkContext *c, GetBitContext *gb, int bundle_num)
|
|||||||
int i, len;
|
int i, len;
|
||||||
|
|
||||||
CHECK_READ_VAL(gb, b, len);
|
CHECK_READ_VAL(gb, b, len);
|
||||||
|
if (b->data_end - b->cur_dec < len * (1 + (bits > 8)))
|
||||||
|
return -1;
|
||||||
if (bits <= 8) {
|
if (bits <= 8) {
|
||||||
if (!issigned) {
|
if (!issigned) {
|
||||||
for (i = 0; i < len; i++)
|
for (i = 0; i < len; i++)
|
||||||
@@ -966,8 +975,9 @@ static int bink_decode_plane(BinkContext *c, GetBitContext *gb, int plane_idx,
|
|||||||
for (i = 0; i < BINK_NB_SRC; i++)
|
for (i = 0; i < BINK_NB_SRC; i++)
|
||||||
read_bundle(gb, c, i);
|
read_bundle(gb, c, i);
|
||||||
|
|
||||||
ref_start = c->last.data[plane_idx];
|
ref_start = c->last.data[plane_idx] ? c->last.data[plane_idx]
|
||||||
ref_end = c->last.data[plane_idx]
|
: c->pic.data[plane_idx];
|
||||||
|
ref_end = ref_start
|
||||||
+ (bw - 1 + c->last.linesize[plane_idx] * (bh - 1)) * 8;
|
+ (bw - 1 + c->last.linesize[plane_idx] * (bh - 1)) * 8;
|
||||||
|
|
||||||
for (i = 0; i < 64; i++)
|
for (i = 0; i < 64; i++)
|
||||||
@@ -996,7 +1006,8 @@ static int bink_decode_plane(BinkContext *c, GetBitContext *gb, int plane_idx,
|
|||||||
if (by == bh)
|
if (by == bh)
|
||||||
break;
|
break;
|
||||||
dst = c->pic.data[plane_idx] + 8*by*stride;
|
dst = c->pic.data[plane_idx] + 8*by*stride;
|
||||||
prev = c->last.data[plane_idx] + 8*by*stride;
|
prev = (c->last.data[plane_idx] ? c->last.data[plane_idx]
|
||||||
|
: c->pic.data[plane_idx]) + 8*by*stride;
|
||||||
for (bx = 0; bx < bw; bx++, dst += 8, prev += 8) {
|
for (bx = 0; bx < bw; bx++, dst += 8, prev += 8) {
|
||||||
blk = get_value(c, BINK_SRC_BLOCK_TYPES);
|
blk = get_value(c, BINK_SRC_BLOCK_TYPES);
|
||||||
// 16x16 block type on odd line means part of the already decoded block, so skip it
|
// 16x16 block type on odd line means part of the already decoded block, so skip it
|
||||||
|
@@ -152,11 +152,18 @@ static const uint8_t rle_length_tab[16] = {
|
|||||||
2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 32, 64
|
2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 32, 64
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define GET_BITS_SAFE(out, nbits) do { \
|
||||||
|
if (get_bits_left(gb) < nbits) \
|
||||||
|
return AVERROR_INVALIDDATA; \
|
||||||
|
out = get_bits(gb, nbits); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Decode Bink Audio block
|
* Decode Bink Audio block
|
||||||
* @param[out] out Output buffer (must contain s->block_size elements)
|
* @param[out] out Output buffer (must contain s->block_size elements)
|
||||||
|
* @return 0 on success, negative error code on failure
|
||||||
*/
|
*/
|
||||||
static void decode_block(BinkAudioContext *s, short *out, int use_dct)
|
static int decode_block(BinkAudioContext *s, short *out, int use_dct)
|
||||||
{
|
{
|
||||||
int ch, i, j, k;
|
int ch, i, j, k;
|
||||||
float q, quant[25];
|
float q, quant[25];
|
||||||
@@ -169,13 +176,19 @@ static void decode_block(BinkAudioContext *s, short *out, int use_dct)
|
|||||||
for (ch = 0; ch < s->channels; ch++) {
|
for (ch = 0; ch < s->channels; ch++) {
|
||||||
FFTSample *coeffs = s->coeffs_ptr[ch];
|
FFTSample *coeffs = s->coeffs_ptr[ch];
|
||||||
if (s->version_b) {
|
if (s->version_b) {
|
||||||
|
if (get_bits_left(gb) < 64)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
coeffs[0] = av_int2flt(get_bits(gb, 32)) * s->root;
|
coeffs[0] = av_int2flt(get_bits(gb, 32)) * s->root;
|
||||||
coeffs[1] = av_int2flt(get_bits(gb, 32)) * s->root;
|
coeffs[1] = av_int2flt(get_bits(gb, 32)) * s->root;
|
||||||
} else {
|
} else {
|
||||||
|
if (get_bits_left(gb) < 58)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
coeffs[0] = get_float(gb) * s->root;
|
coeffs[0] = get_float(gb) * s->root;
|
||||||
coeffs[1] = get_float(gb) * s->root;
|
coeffs[1] = get_float(gb) * s->root;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (get_bits_left(gb) < s->num_bands * 8)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
for (i = 0; i < s->num_bands; i++) {
|
for (i = 0; i < s->num_bands; i++) {
|
||||||
/* constant is result of 0.066399999/log10(M_E) */
|
/* constant is result of 0.066399999/log10(M_E) */
|
||||||
int value = get_bits(gb, 8);
|
int value = get_bits(gb, 8);
|
||||||
@@ -190,15 +203,20 @@ static void decode_block(BinkAudioContext *s, short *out, int use_dct)
|
|||||||
while (i < s->frame_len) {
|
while (i < s->frame_len) {
|
||||||
if (s->version_b) {
|
if (s->version_b) {
|
||||||
j = i + 16;
|
j = i + 16;
|
||||||
} else if (get_bits1(gb)) {
|
|
||||||
j = i + rle_length_tab[get_bits(gb, 4)] * 8;
|
|
||||||
} else {
|
} else {
|
||||||
j = i + 8;
|
int v;
|
||||||
|
GET_BITS_SAFE(v, 1);
|
||||||
|
if (v) {
|
||||||
|
GET_BITS_SAFE(v, 4);
|
||||||
|
j = i + rle_length_tab[v] * 8;
|
||||||
|
} else {
|
||||||
|
j = i + 8;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
j = FFMIN(j, s->frame_len);
|
j = FFMIN(j, s->frame_len);
|
||||||
|
|
||||||
width = get_bits(gb, 4);
|
GET_BITS_SAFE(width, 4);
|
||||||
if (width == 0) {
|
if (width == 0) {
|
||||||
memset(coeffs + i, 0, (j - i) * sizeof(*coeffs));
|
memset(coeffs + i, 0, (j - i) * sizeof(*coeffs));
|
||||||
i = j;
|
i = j;
|
||||||
@@ -208,9 +226,11 @@ static void decode_block(BinkAudioContext *s, short *out, int use_dct)
|
|||||||
while (i < j) {
|
while (i < j) {
|
||||||
if (s->bands[k] == i)
|
if (s->bands[k] == i)
|
||||||
q = quant[k++];
|
q = quant[k++];
|
||||||
coeff = get_bits(gb, width);
|
GET_BITS_SAFE(coeff, width);
|
||||||
if (coeff) {
|
if (coeff) {
|
||||||
if (get_bits1(gb))
|
int v;
|
||||||
|
GET_BITS_SAFE(v, 1);
|
||||||
|
if (v)
|
||||||
coeffs[i] = -q * coeff;
|
coeffs[i] = -q * coeff;
|
||||||
else
|
else
|
||||||
coeffs[i] = q * coeff;
|
coeffs[i] = q * coeff;
|
||||||
@@ -246,6 +266,8 @@ static void decode_block(BinkAudioContext *s, short *out, int use_dct)
|
|||||||
s->overlap_len * s->channels * sizeof(*out));
|
s->overlap_len * s->channels * sizeof(*out));
|
||||||
|
|
||||||
s->first = 0;
|
s->first = 0;
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static av_cold int decode_end(AVCodecContext *avctx)
|
static av_cold int decode_end(AVCodecContext *avctx)
|
||||||
@@ -277,12 +299,17 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
int reported_size;
|
int reported_size;
|
||||||
GetBitContext *gb = &s->gb;
|
GetBitContext *gb = &s->gb;
|
||||||
|
|
||||||
|
if (buf_size < 4) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
init_get_bits(gb, buf, buf_size * 8);
|
init_get_bits(gb, buf, buf_size * 8);
|
||||||
|
|
||||||
reported_size = get_bits_long(gb, 32);
|
reported_size = get_bits_long(gb, 32);
|
||||||
while (get_bits_count(gb) / 8 < buf_size &&
|
while (samples + s->block_size <= samples_end) {
|
||||||
samples + s->block_size <= samples_end) {
|
if (decode_block(s, samples, avctx->codec->id == CODEC_ID_BINKAUDIO_DCT))
|
||||||
decode_block(s, samples, avctx->codec->id == CODEC_ID_BINKAUDIO_DCT);
|
break;
|
||||||
samples += s->block_size;
|
samples += s->block_size;
|
||||||
get_bits_align32(gb);
|
get_bits_align32(gb);
|
||||||
}
|
}
|
||||||
|
@@ -105,8 +105,8 @@ static int alloc_table(VLC *vlc, int size, int use_static)
|
|||||||
if(use_static)
|
if(use_static)
|
||||||
abort(); //cant do anything, init_vlc() is used with too little memory
|
abort(); //cant do anything, init_vlc() is used with too little memory
|
||||||
vlc->table_allocated += (1 << vlc->bits);
|
vlc->table_allocated += (1 << vlc->bits);
|
||||||
vlc->table = av_realloc(vlc->table,
|
vlc->table = av_realloc_f(vlc->table,
|
||||||
sizeof(VLC_TYPE) * 2 * vlc->table_allocated);
|
vlc->table_allocated, sizeof(VLC_TYPE) * 2);
|
||||||
if (!vlc->table)
|
if (!vlc->table)
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@@ -898,15 +898,17 @@ static void qmf_32_subbands(DCAContext * s, int chans,
|
|||||||
else /* Perfect reconstruction */
|
else /* Perfect reconstruction */
|
||||||
prCoeff = fir_32bands_perfect;
|
prCoeff = fir_32bands_perfect;
|
||||||
|
|
||||||
|
for (i = sb_act; i < 32; i++)
|
||||||
|
s->raXin[i] = 0.0;
|
||||||
|
|
||||||
/* Reconstructed channel sample index */
|
/* Reconstructed channel sample index */
|
||||||
for (subindex = 0; subindex < 8; subindex++) {
|
for (subindex = 0; subindex < 8; subindex++) {
|
||||||
/* Load in one sample from each subband and clear inactive subbands */
|
/* Load in one sample from each subband and clear inactive subbands */
|
||||||
for (i = 0; i < sb_act; i++){
|
for (i = 0; i < sb_act; i++){
|
||||||
uint32_t v = AV_RN32A(&samples_in[i][subindex]) ^ ((i-1)&2)<<30;
|
unsigned sign = (i - 1) & 2;
|
||||||
|
uint32_t v = AV_RN32A(&samples_in[i][subindex]) ^ sign << 30;
|
||||||
AV_WN32A(&s->raXin[i], v);
|
AV_WN32A(&s->raXin[i], v);
|
||||||
}
|
}
|
||||||
for (; i < 32; i++)
|
|
||||||
s->raXin[i] = 0.0;
|
|
||||||
|
|
||||||
s->synth.synth_filter_float(&s->imdct,
|
s->synth.synth_filter_float(&s->imdct,
|
||||||
s->subband_fir_hist[chans], &s->hist_index[chans],
|
s->subband_fir_hist[chans], &s->hist_index[chans],
|
||||||
|
@@ -217,7 +217,11 @@ static int cinvideo_decode_frame(AVCodecContext *avctx,
|
|||||||
bitmap_frame_size = buf_size - 4;
|
bitmap_frame_size = buf_size - 4;
|
||||||
|
|
||||||
/* handle palette */
|
/* handle palette */
|
||||||
|
if (bitmap_frame_size < palette_colors_count * (3 + (palette_type != 0)))
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
if (palette_type == 0) {
|
if (palette_type == 0) {
|
||||||
|
if (palette_colors_count > 256)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
for (i = 0; i < palette_colors_count; ++i) {
|
for (i = 0; i < palette_colors_count; ++i) {
|
||||||
cin->palette[i] = bytestream_get_le24(&buf);
|
cin->palette[i] = bytestream_get_le24(&buf);
|
||||||
bitmap_frame_size -= 3;
|
bitmap_frame_size -= 3;
|
||||||
@@ -306,6 +310,11 @@ static av_cold int cinaudio_decode_init(AVCodecContext *avctx)
|
|||||||
CinAudioContext *cin = avctx->priv_data;
|
CinAudioContext *cin = avctx->priv_data;
|
||||||
|
|
||||||
cin->avctx = avctx;
|
cin->avctx = avctx;
|
||||||
|
if (avctx->channels != 1) {
|
||||||
|
av_log_ask_for_sample(avctx, "Number of channels is not supported\n");
|
||||||
|
return AVERROR_PATCHWELCOME;
|
||||||
|
}
|
||||||
|
|
||||||
cin->initial_decode_frame = 1;
|
cin->initial_decode_frame = 1;
|
||||||
cin->delta = 0;
|
cin->delta = 0;
|
||||||
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
|
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||||
|
@@ -248,11 +248,13 @@ static const DVprofile dv_profiles[] = {
|
|||||||
const DVprofile* ff_dv_frame_profile(const DVprofile *sys,
|
const DVprofile* ff_dv_frame_profile(const DVprofile *sys,
|
||||||
const uint8_t* frame, unsigned buf_size)
|
const uint8_t* frame, unsigned buf_size)
|
||||||
{
|
{
|
||||||
int i;
|
int i, dsf, stype;
|
||||||
|
|
||||||
int dsf = (frame[3] & 0x80) >> 7;
|
if(buf_size < DV_PROFILE_BYTES)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
int stype = frame[80*5 + 48 + 3] & 0x1f;
|
dsf = (frame[3] & 0x80) >> 7;
|
||||||
|
stype = frame[80*5 + 48 + 3] & 0x1f;
|
||||||
|
|
||||||
/* 576i50 25Mbps 4:1:1 is a special case */
|
/* 576i50 25Mbps 4:1:1 is a special case */
|
||||||
if (dsf == 1 && stype == 0 && frame[4] & 0x07 /* the APT field */) {
|
if (dsf == 1 && stype == 0 && frame[4] & 0x07 /* the APT field */) {
|
||||||
|
@@ -56,7 +56,7 @@ static void cmv_decode_intra(CmvContext * s, const uint8_t *buf, const uint8_t *
|
|||||||
unsigned char *dst = s->frame.data[0];
|
unsigned char *dst = s->frame.data[0];
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i=0; i < s->avctx->height && buf+s->avctx->width<=buf_end; i++) {
|
for (i=0; i < s->avctx->height && buf_end - buf >= s->avctx->width; i++) {
|
||||||
memcpy(dst, buf, s->avctx->width);
|
memcpy(dst, buf, s->avctx->width);
|
||||||
dst += s->frame.linesize[0];
|
dst += s->frame.linesize[0];
|
||||||
buf += s->avctx->width;
|
buf += s->avctx->width;
|
||||||
@@ -88,7 +88,7 @@ static void cmv_decode_inter(CmvContext * s, const uint8_t *buf, const uint8_t *
|
|||||||
|
|
||||||
i = 0;
|
i = 0;
|
||||||
for(y=0; y<s->avctx->height/4; y++)
|
for(y=0; y<s->avctx->height/4; y++)
|
||||||
for(x=0; x<s->avctx->width/4 && buf+i<buf_end; x++) {
|
for(x=0; x<s->avctx->width/4 && buf_end - buf > i; x++) {
|
||||||
if (buf[i]==0xFF) {
|
if (buf[i]==0xFF) {
|
||||||
unsigned char *dst = s->frame.data[0] + (y*4)*s->frame.linesize[0] + x*4;
|
unsigned char *dst = s->frame.data[0] + (y*4)*s->frame.linesize[0] + x*4;
|
||||||
if (raw+16<buf_end && *raw==0xFF) { /* intra */
|
if (raw+16<buf_end && *raw==0xFF) { /* intra */
|
||||||
@@ -110,9 +110,10 @@ static void cmv_decode_inter(CmvContext * s, const uint8_t *buf, const uint8_t *
|
|||||||
}else{ /* inter using last frame as reference */
|
}else{ /* inter using last frame as reference */
|
||||||
int xoffset = (buf[i] & 0xF) - 7;
|
int xoffset = (buf[i] & 0xF) - 7;
|
||||||
int yoffset = ((buf[i] >> 4)) - 7;
|
int yoffset = ((buf[i] >> 4)) - 7;
|
||||||
cmv_motcomp(s->frame.data[0], s->frame.linesize[0],
|
if (s->last_frame.data[0])
|
||||||
s->last_frame.data[0], s->last_frame.linesize[0],
|
cmv_motcomp(s->frame.data[0], s->frame.linesize[0],
|
||||||
x*4, y*4, xoffset, yoffset, s->avctx->width, s->avctx->height);
|
s->last_frame.data[0], s->last_frame.linesize[0],
|
||||||
|
x*4, y*4, xoffset, yoffset, s->avctx->width, s->avctx->height);
|
||||||
}
|
}
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
@@ -122,7 +123,7 @@ static void cmv_process_header(CmvContext *s, const uint8_t *buf, const uint8_t
|
|||||||
{
|
{
|
||||||
int pal_start, pal_count, i;
|
int pal_start, pal_count, i;
|
||||||
|
|
||||||
if(buf+16>=buf_end) {
|
if(buf_end - buf < 16) {
|
||||||
av_log(s->avctx, AV_LOG_WARNING, "truncated header\n");
|
av_log(s->avctx, AV_LOG_WARNING, "truncated header\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -139,7 +140,7 @@ static void cmv_process_header(CmvContext *s, const uint8_t *buf, const uint8_t
|
|||||||
pal_count = AV_RL16(&buf[14]);
|
pal_count = AV_RL16(&buf[14]);
|
||||||
|
|
||||||
buf += 16;
|
buf += 16;
|
||||||
for (i=pal_start; i<pal_start+pal_count && i<AVPALETTE_COUNT && buf+2<buf_end; i++) {
|
for (i=pal_start; i<pal_start+pal_count && i<AVPALETTE_COUNT && buf_end - buf >= 3; i++) {
|
||||||
s->palette[i] = AV_RB24(buf);
|
s->palette[i] = AV_RB24(buf);
|
||||||
buf += 3;
|
buf += 3;
|
||||||
}
|
}
|
||||||
@@ -157,6 +158,9 @@ static int cmv_decode_frame(AVCodecContext *avctx,
|
|||||||
CmvContext *s = avctx->priv_data;
|
CmvContext *s = avctx->priv_data;
|
||||||
const uint8_t *buf_end = buf + buf_size;
|
const uint8_t *buf_end = buf + buf_size;
|
||||||
|
|
||||||
|
if (buf_end - buf < EA_PREAMBLE_SIZE)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
if (AV_RL32(buf)==MVIh_TAG||AV_RB32(buf)==MVIh_TAG) {
|
if (AV_RL32(buf)==MVIh_TAG||AV_RB32(buf)==MVIh_TAG) {
|
||||||
cmv_process_header(s, buf+EA_PREAMBLE_SIZE, buf_end);
|
cmv_process_header(s, buf+EA_PREAMBLE_SIZE, buf_end);
|
||||||
return buf_size;
|
return buf_size;
|
||||||
|
@@ -85,15 +85,21 @@ static inline void comp_block(MadContext *t, int mb_x, int mb_y,
|
|||||||
{
|
{
|
||||||
MpegEncContext *s = &t->s;
|
MpegEncContext *s = &t->s;
|
||||||
if (j < 4) {
|
if (j < 4) {
|
||||||
|
unsigned offset = (mb_y*16 + ((j&2)<<2) + mv_y)*t->last_frame.linesize[0] + mb_x*16 + ((j&1)<<3) + mv_x;
|
||||||
|
if (offset >= (s->height - 7) * t->last_frame.linesize[0] - 7)
|
||||||
|
return;
|
||||||
comp(t->frame.data[0] + (mb_y*16 + ((j&2)<<2))*t->frame.linesize[0] + mb_x*16 + ((j&1)<<3),
|
comp(t->frame.data[0] + (mb_y*16 + ((j&2)<<2))*t->frame.linesize[0] + mb_x*16 + ((j&1)<<3),
|
||||||
t->frame.linesize[0],
|
t->frame.linesize[0],
|
||||||
t->last_frame.data[0] + (mb_y*16 + ((j&2)<<2) + mv_y)*t->last_frame.linesize[0] + mb_x*16 + ((j&1)<<3) + mv_x,
|
t->last_frame.data[0] + offset,
|
||||||
t->last_frame.linesize[0], add);
|
t->last_frame.linesize[0], add);
|
||||||
} else if (!(s->avctx->flags & CODEC_FLAG_GRAY)) {
|
} else if (!(s->avctx->flags & CODEC_FLAG_GRAY)) {
|
||||||
int index = j - 3;
|
int index = j - 3;
|
||||||
|
unsigned offset = (mb_y * 8 + (mv_y/2))*t->last_frame.linesize[index] + mb_x * 8 + (mv_x/2);
|
||||||
|
if (offset >= (s->height/2 - 7) * t->last_frame.linesize[index] - 7)
|
||||||
|
return;
|
||||||
comp(t->frame.data[index] + (mb_y*8)*t->frame.linesize[index] + mb_x * 8,
|
comp(t->frame.data[index] + (mb_y*8)*t->frame.linesize[index] + mb_x * 8,
|
||||||
t->frame.linesize[index],
|
t->frame.linesize[index],
|
||||||
t->last_frame.data[index] + (mb_y * 8 + (mv_y/2))*t->last_frame.linesize[index] + mb_x * 8 + (mv_x/2),
|
t->last_frame.data[index] + offset,
|
||||||
t->last_frame.linesize[index], add);
|
t->last_frame.linesize[index], add);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -205,7 +211,8 @@ static void decode_mb(MadContext *t, int inter)
|
|||||||
for (j=0; j<6; j++) {
|
for (j=0; j<6; j++) {
|
||||||
if (mv_map & (1<<j)) { // mv_x and mv_y are guarded by mv_map
|
if (mv_map & (1<<j)) { // mv_x and mv_y are guarded by mv_map
|
||||||
int add = 2*decode_motion(&s->gb);
|
int add = 2*decode_motion(&s->gb);
|
||||||
comp_block(t, s->mb_x, s->mb_y, j, mv_x, mv_y, add);
|
if (t->last_frame.data[0])
|
||||||
|
comp_block(t, s->mb_x, s->mb_y, j, mv_x, mv_y, add);
|
||||||
} else {
|
} else {
|
||||||
s->dsp.clear_block(t->block);
|
s->dsp.clear_block(t->block);
|
||||||
decode_block_intra(t, t->block);
|
decode_block_intra(t, t->block);
|
||||||
@@ -266,6 +273,8 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
avcodec_set_dimensions(avctx, s->width, s->height);
|
avcodec_set_dimensions(avctx, s->width, s->height);
|
||||||
if (t->frame.data[0])
|
if (t->frame.data[0])
|
||||||
avctx->release_buffer(avctx, &t->frame);
|
avctx->release_buffer(avctx, &t->frame);
|
||||||
|
if (t->last_frame.data[0])
|
||||||
|
avctx->release_buffer(avctx, &t->last_frame);
|
||||||
}
|
}
|
||||||
|
|
||||||
t->frame.reference = 1;
|
t->frame.reference = 1;
|
||||||
@@ -280,6 +289,7 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
if (!t->bitstream_buf)
|
if (!t->bitstream_buf)
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
bswap16_buf(t->bitstream_buf, (const uint16_t*)buf, (buf_end-buf)/2);
|
bswap16_buf(t->bitstream_buf, (const uint16_t*)buf, (buf_end-buf)/2);
|
||||||
|
memset((uint8_t*)t->bitstream_buf + (buf_end-buf), 0, FF_INPUT_BUFFER_PADDING_SIZE);
|
||||||
init_get_bits(&s->gb, t->bitstream_buf, 8*(buf_end-buf));
|
init_get_bits(&s->gb, t->bitstream_buf, 8*(buf_end-buf));
|
||||||
|
|
||||||
for (s->mb_y=0; s->mb_y < (avctx->height+15)/16; s->mb_y++)
|
for (s->mb_y=0; s->mb_y < (avctx->height+15)/16; s->mb_y++)
|
||||||
|
@@ -74,7 +74,7 @@ static int unpack(const uint8_t *src, const uint8_t *src_end, unsigned char *dst
|
|||||||
else
|
else
|
||||||
src += 2;
|
src += 2;
|
||||||
|
|
||||||
if (src+3>src_end)
|
if (src_end - src < 3)
|
||||||
return -1;
|
return -1;
|
||||||
size = AV_RB24(src);
|
size = AV_RB24(src);
|
||||||
src += 3;
|
src += 3;
|
||||||
@@ -138,7 +138,7 @@ static int unpack(const uint8_t *src, const uint8_t *src_end, unsigned char *dst
|
|||||||
* @return 0 on success, -1 on critical buffer underflow
|
* @return 0 on success, -1 on critical buffer underflow
|
||||||
*/
|
*/
|
||||||
static int tgv_decode_inter(TgvContext * s, const uint8_t *buf, const uint8_t *buf_end){
|
static int tgv_decode_inter(TgvContext * s, const uint8_t *buf, const uint8_t *buf_end){
|
||||||
unsigned char *frame0_end = s->last_frame.data[0] + s->avctx->width*s->last_frame.linesize[0];
|
unsigned last_frame_size = s->avctx->height*s->last_frame.linesize[0];
|
||||||
int num_mvs;
|
int num_mvs;
|
||||||
int num_blocks_raw;
|
int num_blocks_raw;
|
||||||
int num_blocks_packed;
|
int num_blocks_packed;
|
||||||
@@ -148,7 +148,7 @@ static int tgv_decode_inter(TgvContext * s, const uint8_t *buf, const uint8_t *b
|
|||||||
int mvbits;
|
int mvbits;
|
||||||
const unsigned char *blocks_raw;
|
const unsigned char *blocks_raw;
|
||||||
|
|
||||||
if(buf+12>buf_end)
|
if(buf_end - buf < 12)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
num_mvs = AV_RL16(&buf[0]);
|
num_mvs = AV_RL16(&buf[0]);
|
||||||
@@ -171,7 +171,7 @@ static int tgv_decode_inter(TgvContext * s, const uint8_t *buf, const uint8_t *b
|
|||||||
/* read motion vectors */
|
/* read motion vectors */
|
||||||
mvbits = (num_mvs*2*10+31) & ~31;
|
mvbits = (num_mvs*2*10+31) & ~31;
|
||||||
|
|
||||||
if (buf+(mvbits>>3)+16*num_blocks_raw+8*num_blocks_packed>buf_end)
|
if (buf_end - buf < (mvbits>>3)+16*num_blocks_raw+8*num_blocks_packed)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
init_get_bits(&gb, buf, mvbits);
|
init_get_bits(&gb, buf, mvbits);
|
||||||
@@ -207,12 +207,14 @@ static int tgv_decode_inter(TgvContext * s, const uint8_t *buf, const uint8_t *b
|
|||||||
int src_stride;
|
int src_stride;
|
||||||
|
|
||||||
if (vector < num_mvs) {
|
if (vector < num_mvs) {
|
||||||
src = s->last_frame.data[0] +
|
unsigned offset =
|
||||||
(y*4 + s->mv_codebook[vector][1])*s->last_frame.linesize[0] +
|
(y*4 + s->mv_codebook[vector][1])*s->last_frame.linesize[0] +
|
||||||
x*4 + s->mv_codebook[vector][0];
|
x*4 + s->mv_codebook[vector][0];
|
||||||
|
|
||||||
src_stride = s->last_frame.linesize[0];
|
src_stride = s->last_frame.linesize[0];
|
||||||
if (src+3*src_stride+3>=frame0_end)
|
if (offset >= last_frame_size - (3*src_stride+3))
|
||||||
continue;
|
continue;
|
||||||
|
src = s->last_frame.data[0] + offset;
|
||||||
}else{
|
}else{
|
||||||
int offset = vector - num_mvs;
|
int offset = vector - num_mvs;
|
||||||
if (offset<num_blocks_raw)
|
if (offset<num_blocks_raw)
|
||||||
@@ -252,12 +254,15 @@ static int tgv_decode_frame(AVCodecContext *avctx,
|
|||||||
const uint8_t *buf_end = buf + buf_size;
|
const uint8_t *buf_end = buf + buf_size;
|
||||||
int chunk_type;
|
int chunk_type;
|
||||||
|
|
||||||
|
if (buf_end - buf < EA_PREAMBLE_SIZE)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
chunk_type = AV_RL32(&buf[0]);
|
chunk_type = AV_RL32(&buf[0]);
|
||||||
buf += EA_PREAMBLE_SIZE;
|
buf += EA_PREAMBLE_SIZE;
|
||||||
|
|
||||||
if (chunk_type==kVGT_TAG) {
|
if (chunk_type==kVGT_TAG) {
|
||||||
int pal_count, i;
|
int pal_count, i;
|
||||||
if(buf+12>buf_end) {
|
if(buf_end - buf < 12) {
|
||||||
av_log(avctx, AV_LOG_WARNING, "truncated header\n");
|
av_log(avctx, AV_LOG_WARNING, "truncated header\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@@ -272,7 +277,7 @@ static int tgv_decode_frame(AVCodecContext *avctx,
|
|||||||
|
|
||||||
pal_count = AV_RL16(&buf[6]);
|
pal_count = AV_RL16(&buf[6]);
|
||||||
buf += 12;
|
buf += 12;
|
||||||
for(i=0; i<pal_count && i<AVPALETTE_COUNT && buf+2<buf_end; i++) {
|
for(i=0; i<pal_count && i<AVPALETTE_COUNT && buf_end - buf >= 3; i++) {
|
||||||
s->palette[i] = AV_RB24(buf);
|
s->palette[i] = AV_RB24(buf);
|
||||||
buf += 3;
|
buf += 3;
|
||||||
}
|
}
|
||||||
|
@@ -660,7 +660,8 @@ static int is_intra_more_likely(MpegEncContext *s){
|
|||||||
|
|
||||||
if(s->codec_id == CODEC_ID_H264){
|
if(s->codec_id == CODEC_ID_H264){
|
||||||
H264Context *h= (void*)s;
|
H264Context *h= (void*)s;
|
||||||
if (h->ref_count[0] <= 0 || !h->ref_list[0][0].f.data[0])
|
|
||||||
|
if (h->list_count <= 0 || h->ref_count[0] <= 0 || !h->ref_list[0][0].f.data[0])
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -226,9 +226,11 @@ static int get_metadata_size(const uint8_t *buf, int buf_size)
|
|||||||
|
|
||||||
buf += 4;
|
buf += 4;
|
||||||
do {
|
do {
|
||||||
|
if (buf_end - buf < 4)
|
||||||
|
return 0;
|
||||||
ff_flac_parse_block_header(buf, &metadata_last, NULL, &metadata_size);
|
ff_flac_parse_block_header(buf, &metadata_last, NULL, &metadata_size);
|
||||||
buf += 4;
|
buf += 4;
|
||||||
if (buf + metadata_size > buf_end) {
|
if (buf_end - buf < metadata_size) {
|
||||||
/* need more data in order to read the complete header */
|
/* need more data in order to read the complete header */
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@@ -131,7 +131,6 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
|||||||
FlicDecodeContext *s = avctx->priv_data;
|
FlicDecodeContext *s = avctx->priv_data;
|
||||||
|
|
||||||
int stream_ptr = 0;
|
int stream_ptr = 0;
|
||||||
int stream_ptr_after_color_chunk;
|
|
||||||
int pixel_ptr;
|
int pixel_ptr;
|
||||||
int palette_ptr;
|
int palette_ptr;
|
||||||
unsigned char palette_idx1;
|
unsigned char palette_idx1;
|
||||||
@@ -171,7 +170,11 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
|||||||
pixels = s->frame.data[0];
|
pixels = s->frame.data[0];
|
||||||
pixel_limit = s->avctx->height * s->frame.linesize[0];
|
pixel_limit = s->avctx->height * s->frame.linesize[0];
|
||||||
|
|
||||||
|
if (buf_size < 16 || buf_size > INT_MAX - (3 * 256 + FF_INPUT_BUFFER_PADDING_SIZE))
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
frame_size = AV_RL32(&buf[stream_ptr]);
|
frame_size = AV_RL32(&buf[stream_ptr]);
|
||||||
|
if (frame_size > buf_size)
|
||||||
|
frame_size = buf_size;
|
||||||
stream_ptr += 6; /* skip the magic number */
|
stream_ptr += 6; /* skip the magic number */
|
||||||
num_chunks = AV_RL16(&buf[stream_ptr]);
|
num_chunks = AV_RL16(&buf[stream_ptr]);
|
||||||
stream_ptr += 10; /* skip padding */
|
stream_ptr += 10; /* skip padding */
|
||||||
@@ -179,13 +182,16 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
|||||||
frame_size -= 16;
|
frame_size -= 16;
|
||||||
|
|
||||||
/* iterate through the chunks */
|
/* iterate through the chunks */
|
||||||
while ((frame_size > 0) && (num_chunks > 0)) {
|
while ((frame_size >= 6) && (num_chunks > 0)) {
|
||||||
|
int stream_ptr_after_chunk;
|
||||||
chunk_size = AV_RL32(&buf[stream_ptr]);
|
chunk_size = AV_RL32(&buf[stream_ptr]);
|
||||||
if (chunk_size > frame_size) {
|
if (chunk_size > frame_size) {
|
||||||
av_log(avctx, AV_LOG_WARNING,
|
av_log(avctx, AV_LOG_WARNING,
|
||||||
"Invalid chunk_size = %u > frame_size = %u\n", chunk_size, frame_size);
|
"Invalid chunk_size = %u > frame_size = %u\n", chunk_size, frame_size);
|
||||||
chunk_size = frame_size;
|
chunk_size = frame_size;
|
||||||
}
|
}
|
||||||
|
stream_ptr_after_chunk = stream_ptr + chunk_size;
|
||||||
|
|
||||||
stream_ptr += 4;
|
stream_ptr += 4;
|
||||||
chunk_type = AV_RL16(&buf[stream_ptr]);
|
chunk_type = AV_RL16(&buf[stream_ptr]);
|
||||||
stream_ptr += 2;
|
stream_ptr += 2;
|
||||||
@@ -193,8 +199,6 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
|||||||
switch (chunk_type) {
|
switch (chunk_type) {
|
||||||
case FLI_256_COLOR:
|
case FLI_256_COLOR:
|
||||||
case FLI_COLOR:
|
case FLI_COLOR:
|
||||||
stream_ptr_after_color_chunk = stream_ptr + chunk_size - 6;
|
|
||||||
|
|
||||||
/* check special case: If this file is from the Magic Carpet
|
/* check special case: If this file is from the Magic Carpet
|
||||||
* game and uses 6-bit colors even though it reports 256-color
|
* game and uses 6-bit colors even though it reports 256-color
|
||||||
* chunks in a 0xAF12-type file (fli_type is set to 0xAF13 during
|
* chunks in a 0xAF12-type file (fli_type is set to 0xAF13 during
|
||||||
@@ -218,6 +222,9 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
|||||||
if (color_changes == 0)
|
if (color_changes == 0)
|
||||||
color_changes = 256;
|
color_changes = 256;
|
||||||
|
|
||||||
|
if (stream_ptr + color_changes * 3 > stream_ptr_after_chunk)
|
||||||
|
break;
|
||||||
|
|
||||||
for (j = 0; j < color_changes; j++) {
|
for (j = 0; j < color_changes; j++) {
|
||||||
unsigned int entry;
|
unsigned int entry;
|
||||||
|
|
||||||
@@ -234,13 +241,6 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
|||||||
s->palette[palette_ptr++] = entry;
|
s->palette[palette_ptr++] = entry;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* color chunks sometimes have weird 16-bit alignment issues;
|
|
||||||
* therefore, take the hardline approach and set the stream_ptr
|
|
||||||
* to the value calculated w.r.t. the size specified by the color
|
|
||||||
* chunk header */
|
|
||||||
stream_ptr = stream_ptr_after_color_chunk;
|
|
||||||
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case FLI_DELTA:
|
case FLI_DELTA:
|
||||||
@@ -248,6 +248,8 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
|||||||
compressed_lines = AV_RL16(&buf[stream_ptr]);
|
compressed_lines = AV_RL16(&buf[stream_ptr]);
|
||||||
stream_ptr += 2;
|
stream_ptr += 2;
|
||||||
while (compressed_lines > 0) {
|
while (compressed_lines > 0) {
|
||||||
|
if (stream_ptr + 2 > stream_ptr_after_chunk)
|
||||||
|
break;
|
||||||
line_packets = AV_RL16(&buf[stream_ptr]);
|
line_packets = AV_RL16(&buf[stream_ptr]);
|
||||||
stream_ptr += 2;
|
stream_ptr += 2;
|
||||||
if ((line_packets & 0xC000) == 0xC000) {
|
if ((line_packets & 0xC000) == 0xC000) {
|
||||||
@@ -267,6 +269,8 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
|||||||
CHECK_PIXEL_PTR(0);
|
CHECK_PIXEL_PTR(0);
|
||||||
pixel_countdown = s->avctx->width;
|
pixel_countdown = s->avctx->width;
|
||||||
for (i = 0; i < line_packets; i++) {
|
for (i = 0; i < line_packets; i++) {
|
||||||
|
if (stream_ptr + 2 > stream_ptr_after_chunk)
|
||||||
|
break;
|
||||||
/* account for the skip bytes */
|
/* account for the skip bytes */
|
||||||
pixel_skip = buf[stream_ptr++];
|
pixel_skip = buf[stream_ptr++];
|
||||||
pixel_ptr += pixel_skip;
|
pixel_ptr += pixel_skip;
|
||||||
@@ -283,6 +287,8 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
CHECK_PIXEL_PTR(byte_run * 2);
|
CHECK_PIXEL_PTR(byte_run * 2);
|
||||||
|
if (stream_ptr + byte_run * 2 > stream_ptr_after_chunk)
|
||||||
|
break;
|
||||||
for (j = 0; j < byte_run * 2; j++, pixel_countdown--) {
|
for (j = 0; j < byte_run * 2; j++, pixel_countdown--) {
|
||||||
palette_idx1 = buf[stream_ptr++];
|
palette_idx1 = buf[stream_ptr++];
|
||||||
pixels[pixel_ptr++] = palette_idx1;
|
pixels[pixel_ptr++] = palette_idx1;
|
||||||
@@ -309,6 +315,8 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
|||||||
CHECK_PIXEL_PTR(0);
|
CHECK_PIXEL_PTR(0);
|
||||||
pixel_countdown = s->avctx->width;
|
pixel_countdown = s->avctx->width;
|
||||||
line_packets = buf[stream_ptr++];
|
line_packets = buf[stream_ptr++];
|
||||||
|
if (stream_ptr + 2 * line_packets > stream_ptr_after_chunk)
|
||||||
|
break;
|
||||||
if (line_packets > 0) {
|
if (line_packets > 0) {
|
||||||
for (i = 0; i < line_packets; i++) {
|
for (i = 0; i < line_packets; i++) {
|
||||||
/* account for the skip bytes */
|
/* account for the skip bytes */
|
||||||
@@ -318,6 +326,8 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
|||||||
byte_run = (signed char)(buf[stream_ptr++]);
|
byte_run = (signed char)(buf[stream_ptr++]);
|
||||||
if (byte_run > 0) {
|
if (byte_run > 0) {
|
||||||
CHECK_PIXEL_PTR(byte_run);
|
CHECK_PIXEL_PTR(byte_run);
|
||||||
|
if (stream_ptr + byte_run > stream_ptr_after_chunk)
|
||||||
|
break;
|
||||||
for (j = 0; j < byte_run; j++, pixel_countdown--) {
|
for (j = 0; j < byte_run; j++, pixel_countdown--) {
|
||||||
palette_idx1 = buf[stream_ptr++];
|
palette_idx1 = buf[stream_ptr++];
|
||||||
pixels[pixel_ptr++] = palette_idx1;
|
pixels[pixel_ptr++] = palette_idx1;
|
||||||
@@ -355,6 +365,8 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
|||||||
stream_ptr++;
|
stream_ptr++;
|
||||||
pixel_countdown = s->avctx->width;
|
pixel_countdown = s->avctx->width;
|
||||||
while (pixel_countdown > 0) {
|
while (pixel_countdown > 0) {
|
||||||
|
if (stream_ptr + 1 > stream_ptr_after_chunk)
|
||||||
|
break;
|
||||||
byte_run = (signed char)(buf[stream_ptr++]);
|
byte_run = (signed char)(buf[stream_ptr++]);
|
||||||
if (byte_run > 0) {
|
if (byte_run > 0) {
|
||||||
palette_idx1 = buf[stream_ptr++];
|
palette_idx1 = buf[stream_ptr++];
|
||||||
@@ -369,6 +381,8 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
|||||||
} else { /* copy bytes if byte_run < 0 */
|
} else { /* copy bytes if byte_run < 0 */
|
||||||
byte_run = -byte_run;
|
byte_run = -byte_run;
|
||||||
CHECK_PIXEL_PTR(byte_run);
|
CHECK_PIXEL_PTR(byte_run);
|
||||||
|
if (stream_ptr + byte_run > stream_ptr_after_chunk)
|
||||||
|
break;
|
||||||
for (j = 0; j < byte_run; j++) {
|
for (j = 0; j < byte_run; j++) {
|
||||||
palette_idx1 = buf[stream_ptr++];
|
palette_idx1 = buf[stream_ptr++];
|
||||||
pixels[pixel_ptr++] = palette_idx1;
|
pixels[pixel_ptr++] = palette_idx1;
|
||||||
@@ -386,10 +400,9 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
|||||||
|
|
||||||
case FLI_COPY:
|
case FLI_COPY:
|
||||||
/* copy the chunk (uncompressed frame) */
|
/* copy the chunk (uncompressed frame) */
|
||||||
if (chunk_size - 6 > s->avctx->width * s->avctx->height) {
|
if (chunk_size - 6 != s->avctx->width * s->avctx->height) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "In chunk FLI_COPY : source data (%d bytes) " \
|
av_log(avctx, AV_LOG_ERROR, "In chunk FLI_COPY : source data (%d bytes) " \
|
||||||
"bigger than image, skipping chunk\n", chunk_size - 6);
|
"has incorrect size, skipping chunk\n", chunk_size - 6);
|
||||||
stream_ptr += chunk_size - 6;
|
|
||||||
} else {
|
} else {
|
||||||
for (y_ptr = 0; y_ptr < s->frame.linesize[0] * s->avctx->height;
|
for (y_ptr = 0; y_ptr < s->frame.linesize[0] * s->avctx->height;
|
||||||
y_ptr += s->frame.linesize[0]) {
|
y_ptr += s->frame.linesize[0]) {
|
||||||
@@ -402,7 +415,6 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
|||||||
|
|
||||||
case FLI_MINI:
|
case FLI_MINI:
|
||||||
/* some sort of a thumbnail? disregard this chunk... */
|
/* some sort of a thumbnail? disregard this chunk... */
|
||||||
stream_ptr += chunk_size - 6;
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
@@ -410,6 +422,8 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
stream_ptr = stream_ptr_after_chunk;
|
||||||
|
|
||||||
frame_size -= chunk_size;
|
frame_size -= chunk_size;
|
||||||
num_chunks--;
|
num_chunks--;
|
||||||
}
|
}
|
||||||
|
@@ -75,6 +75,20 @@ static inline int get_ue_golomb(GetBitContext *gb){
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Read an unsigned Exp-Golomb code in the range 0 to UINT32_MAX-1.
|
||||||
|
*/
|
||||||
|
static inline unsigned get_ue_golomb_long(GetBitContext *gb)
|
||||||
|
{
|
||||||
|
unsigned buf, log;
|
||||||
|
|
||||||
|
buf = show_bits_long(gb, 32);
|
||||||
|
log = 31 - av_log2(buf);
|
||||||
|
skip_bits_long(gb, log);
|
||||||
|
|
||||||
|
return get_bits_long(gb, log + 1) - 1;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* read unsigned exp golomb code, constraint to a max of 31.
|
* read unsigned exp golomb code, constraint to a max of 31.
|
||||||
* the return value is undefined if the stored value exceeds 31.
|
* the return value is undefined if the stored value exceeds 31.
|
||||||
|
@@ -97,12 +97,9 @@ int ff_h264_check_intra4x4_pred_mode(H264Context *h){
|
|||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
} //FIXME cleanup like ff_h264_check_intra_pred_mode
|
} //FIXME cleanup like check_intra_pred_mode
|
||||||
|
|
||||||
/**
|
static int check_intra_pred_mode(H264Context *h, int mode, int is_chroma){
|
||||||
* checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
|
|
||||||
*/
|
|
||||||
int ff_h264_check_intra_pred_mode(H264Context *h, int mode){
|
|
||||||
MpegEncContext * const s = &h->s;
|
MpegEncContext * const s = &h->s;
|
||||||
static const int8_t top [7]= {LEFT_DC_PRED8x8, 1,-1,-1};
|
static const int8_t top [7]= {LEFT_DC_PRED8x8, 1,-1,-1};
|
||||||
static const int8_t left[7]= { TOP_DC_PRED8x8,-1, 2,-1,DC_128_PRED8x8};
|
static const int8_t left[7]= { TOP_DC_PRED8x8,-1, 2,-1,DC_128_PRED8x8};
|
||||||
@@ -122,7 +119,7 @@ int ff_h264_check_intra_pred_mode(H264Context *h, int mode){
|
|||||||
|
|
||||||
if((h->left_samples_available&0x8080) != 0x8080){
|
if((h->left_samples_available&0x8080) != 0x8080){
|
||||||
mode= left[ mode ];
|
mode= left[ mode ];
|
||||||
if(h->left_samples_available&0x8080){ //mad cow disease mode, aka MBAFF + constrained_intra_pred
|
if(is_chroma && (h->left_samples_available&0x8080)){ //mad cow disease mode, aka MBAFF + constrained_intra_pred
|
||||||
mode= ALZHEIMER_DC_L0T_PRED8x8 + (!(h->left_samples_available&0x8000)) + 2*(mode == DC_128_PRED8x8);
|
mode= ALZHEIMER_DC_L0T_PRED8x8 + (!(h->left_samples_available&0x8000)) + 2*(mode == DC_128_PRED8x8);
|
||||||
}
|
}
|
||||||
if(mode<0){
|
if(mode<0){
|
||||||
@@ -134,6 +131,23 @@ int ff_h264_check_intra_pred_mode(H264Context *h, int mode){
|
|||||||
return mode;
|
return mode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
|
||||||
|
*/
|
||||||
|
int ff_h264_check_intra16x16_pred_mode(H264Context *h, int mode)
|
||||||
|
{
|
||||||
|
return check_intra_pred_mode(h, mode, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
|
||||||
|
*/
|
||||||
|
int ff_h264_check_intra_chroma_pred_mode(H264Context *h, int mode)
|
||||||
|
{
|
||||||
|
return check_intra_pred_mode(h, mode, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src, int *dst_length, int *consumed, int length){
|
const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src, int *dst_length, int *consumed, int length){
|
||||||
int i, si, di;
|
int i, si, di;
|
||||||
uint8_t *dst;
|
uint8_t *dst;
|
||||||
@@ -978,8 +992,12 @@ static av_cold void common_init(H264Context *h){
|
|||||||
s->height = s->avctx->height;
|
s->height = s->avctx->height;
|
||||||
s->codec_id= s->avctx->codec->id;
|
s->codec_id= s->avctx->codec->id;
|
||||||
|
|
||||||
ff_h264dsp_init(&h->h264dsp, 8);
|
s->avctx->bits_per_raw_sample = 8;
|
||||||
ff_h264_pred_init(&h->hpc, s->codec_id, 8);
|
|
||||||
|
ff_h264dsp_init(&h->h264dsp,
|
||||||
|
s->avctx->bits_per_raw_sample);
|
||||||
|
ff_h264_pred_init(&h->hpc, s->codec_id,
|
||||||
|
s->avctx->bits_per_raw_sample);
|
||||||
|
|
||||||
h->dequant_coeff_pps= -1;
|
h->dequant_coeff_pps= -1;
|
||||||
s->unrestricted_mv=1;
|
s->unrestricted_mv=1;
|
||||||
@@ -991,17 +1009,20 @@ static av_cold void common_init(H264Context *h){
|
|||||||
memset(h->pps.scaling_matrix8, 16, 2*64*sizeof(uint8_t));
|
memset(h->pps.scaling_matrix8, 16, 2*64*sizeof(uint8_t));
|
||||||
}
|
}
|
||||||
|
|
||||||
int ff_h264_decode_extradata(H264Context *h)
|
int ff_h264_decode_extradata(H264Context *h, const uint8_t *buf, int size)
|
||||||
{
|
{
|
||||||
AVCodecContext *avctx = h->s.avctx;
|
AVCodecContext *avctx = h->s.avctx;
|
||||||
|
|
||||||
if(avctx->extradata[0] == 1){
|
if(!buf || size <= 0)
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
if(buf[0] == 1){
|
||||||
int i, cnt, nalsize;
|
int i, cnt, nalsize;
|
||||||
unsigned char *p = avctx->extradata;
|
const unsigned char *p = buf;
|
||||||
|
|
||||||
h->is_avc = 1;
|
h->is_avc = 1;
|
||||||
|
|
||||||
if(avctx->extradata_size < 7) {
|
if(size < 7) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "avcC too short\n");
|
av_log(avctx, AV_LOG_ERROR, "avcC too short\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@@ -1013,6 +1034,8 @@ int ff_h264_decode_extradata(H264Context *h)
|
|||||||
p += 6;
|
p += 6;
|
||||||
for (i = 0; i < cnt; i++) {
|
for (i = 0; i < cnt; i++) {
|
||||||
nalsize = AV_RB16(p) + 2;
|
nalsize = AV_RB16(p) + 2;
|
||||||
|
if(nalsize > size - (p-buf))
|
||||||
|
return -1;
|
||||||
if(decode_nal_units(h, p, nalsize) < 0) {
|
if(decode_nal_units(h, p, nalsize) < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Decoding sps %d from avcC failed\n", i);
|
av_log(avctx, AV_LOG_ERROR, "Decoding sps %d from avcC failed\n", i);
|
||||||
return -1;
|
return -1;
|
||||||
@@ -1023,6 +1046,8 @@ int ff_h264_decode_extradata(H264Context *h)
|
|||||||
cnt = *(p++); // Number of pps
|
cnt = *(p++); // Number of pps
|
||||||
for (i = 0; i < cnt; i++) {
|
for (i = 0; i < cnt; i++) {
|
||||||
nalsize = AV_RB16(p) + 2;
|
nalsize = AV_RB16(p) + 2;
|
||||||
|
if(nalsize > size - (p-buf))
|
||||||
|
return -1;
|
||||||
if (decode_nal_units(h, p, nalsize) < 0) {
|
if (decode_nal_units(h, p, nalsize) < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Decoding pps %d from avcC failed\n", i);
|
av_log(avctx, AV_LOG_ERROR, "Decoding pps %d from avcC failed\n", i);
|
||||||
return -1;
|
return -1;
|
||||||
@@ -1030,10 +1055,10 @@ int ff_h264_decode_extradata(H264Context *h)
|
|||||||
p += nalsize;
|
p += nalsize;
|
||||||
}
|
}
|
||||||
// Now store right nal length size, that will be use to parse all other nals
|
// Now store right nal length size, that will be use to parse all other nals
|
||||||
h->nal_length_size = (avctx->extradata[4] & 0x03) + 1;
|
h->nal_length_size = (buf[4] & 0x03) + 1;
|
||||||
} else {
|
} else {
|
||||||
h->is_avc = 0;
|
h->is_avc = 0;
|
||||||
if(decode_nal_units(h, avctx->extradata, avctx->extradata_size) < 0)
|
if(decode_nal_units(h, buf, size) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
@@ -1077,7 +1102,7 @@ av_cold int ff_h264_decode_init(AVCodecContext *avctx){
|
|||||||
}
|
}
|
||||||
|
|
||||||
if(avctx->extradata_size > 0 && avctx->extradata &&
|
if(avctx->extradata_size > 0 && avctx->extradata &&
|
||||||
ff_h264_decode_extradata(h))
|
ff_h264_decode_extradata(h, avctx->extradata, avctx->extradata_size))
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
if(h->sps.bitstream_restriction_flag && s->avctx->has_b_frames < h->sps.num_reorder_frames){
|
if(h->sps.bitstream_restriction_flag && s->avctx->has_b_frames < h->sps.num_reorder_frames){
|
||||||
@@ -2566,6 +2591,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
|||||||
free_tables(h, 0);
|
free_tables(h, 0);
|
||||||
flush_dpb(s->avctx);
|
flush_dpb(s->avctx);
|
||||||
MPV_common_end(s);
|
MPV_common_end(s);
|
||||||
|
h->list_count = 0;
|
||||||
}
|
}
|
||||||
if (!s->context_initialized) {
|
if (!s->context_initialized) {
|
||||||
if (h != h0) {
|
if (h != h0) {
|
||||||
@@ -2826,6 +2852,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
|||||||
h->ref_count[1]= h->pps.ref_count[1];
|
h->ref_count[1]= h->pps.ref_count[1];
|
||||||
|
|
||||||
if(h->slice_type_nos != AV_PICTURE_TYPE_I){
|
if(h->slice_type_nos != AV_PICTURE_TYPE_I){
|
||||||
|
unsigned max= (16<<(s->picture_structure != PICT_FRAME))-1;
|
||||||
if(h->slice_type_nos == AV_PICTURE_TYPE_B){
|
if(h->slice_type_nos == AV_PICTURE_TYPE_B){
|
||||||
h->direct_spatial_mv_pred= get_bits1(&s->gb);
|
h->direct_spatial_mv_pred= get_bits1(&s->gb);
|
||||||
}
|
}
|
||||||
@@ -2836,25 +2863,27 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
|||||||
if(h->slice_type_nos==AV_PICTURE_TYPE_B)
|
if(h->slice_type_nos==AV_PICTURE_TYPE_B)
|
||||||
h->ref_count[1]= get_ue_golomb(&s->gb) + 1;
|
h->ref_count[1]= get_ue_golomb(&s->gb) + 1;
|
||||||
|
|
||||||
if(h->ref_count[0]-1 > 32-1 || h->ref_count[1]-1 > 32-1){
|
}
|
||||||
av_log(h->s.avctx, AV_LOG_ERROR, "reference overflow\n");
|
if(h->ref_count[0]-1 > max || h->ref_count[1]-1 > max){
|
||||||
h->ref_count[0]= h->ref_count[1]= 1;
|
av_log(h->s.avctx, AV_LOG_ERROR, "reference overflow\n");
|
||||||
return -1;
|
h->ref_count[0]= h->ref_count[1]= 1;
|
||||||
}
|
return -1;
|
||||||
}
|
}
|
||||||
if(h->slice_type_nos == AV_PICTURE_TYPE_B)
|
if(h->slice_type_nos == AV_PICTURE_TYPE_B)
|
||||||
h->list_count= 2;
|
h->list_count= 2;
|
||||||
else
|
else
|
||||||
h->list_count= 1;
|
h->list_count= 1;
|
||||||
}else
|
}else
|
||||||
h->list_count= 0;
|
h->ref_count[1]= h->ref_count[0]= h->list_count= 0;
|
||||||
|
|
||||||
if(!default_ref_list_done){
|
if(!default_ref_list_done){
|
||||||
ff_h264_fill_default_ref_list(h);
|
ff_h264_fill_default_ref_list(h);
|
||||||
}
|
}
|
||||||
|
|
||||||
if(h->slice_type_nos!=AV_PICTURE_TYPE_I && ff_h264_decode_ref_pic_list_reordering(h) < 0)
|
if(h->slice_type_nos!=AV_PICTURE_TYPE_I && ff_h264_decode_ref_pic_list_reordering(h) < 0) {
|
||||||
|
h->ref_count[1]= h->ref_count[0]= 0;
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
if(h->slice_type_nos!=AV_PICTURE_TYPE_I){
|
if(h->slice_type_nos!=AV_PICTURE_TYPE_I){
|
||||||
s->last_picture_ptr= &h->ref_list[0][0];
|
s->last_picture_ptr= &h->ref_list[0][0];
|
||||||
@@ -3630,9 +3659,13 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
|||||||
switch (hx->nal_unit_type) {
|
switch (hx->nal_unit_type) {
|
||||||
case NAL_SPS:
|
case NAL_SPS:
|
||||||
case NAL_PPS:
|
case NAL_PPS:
|
||||||
|
nals_needed = nal_index;
|
||||||
|
break;
|
||||||
case NAL_IDR_SLICE:
|
case NAL_IDR_SLICE:
|
||||||
case NAL_SLICE:
|
case NAL_SLICE:
|
||||||
nals_needed = nal_index;
|
init_get_bits(&hx->s.gb, ptr, bit_length);
|
||||||
|
if(!get_ue_golomb(&hx->s.gb))
|
||||||
|
nals_needed = nal_index;
|
||||||
}
|
}
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@@ -109,7 +109,7 @@
|
|||||||
*/
|
*/
|
||||||
#define DELAYED_PIC_REF 4
|
#define DELAYED_PIC_REF 4
|
||||||
|
|
||||||
#define QP_MAX_NUM (51 + 2*6) // The maximum supported qp
|
#define QP_MAX_NUM (51 + 4*6) // The maximum supported qp
|
||||||
|
|
||||||
/* NAL unit types */
|
/* NAL unit types */
|
||||||
enum {
|
enum {
|
||||||
@@ -235,7 +235,7 @@ typedef struct PPS{
|
|||||||
int transform_8x8_mode; ///< transform_8x8_mode_flag
|
int transform_8x8_mode; ///< transform_8x8_mode_flag
|
||||||
uint8_t scaling_matrix4[6][16];
|
uint8_t scaling_matrix4[6][16];
|
||||||
uint8_t scaling_matrix8[6][64];
|
uint8_t scaling_matrix8[6][64];
|
||||||
uint8_t chroma_qp_table[2][64]; ///< pre-scaled (with chroma_qp_index_offset) version of qp_table
|
uint8_t chroma_qp_table[2][QP_MAX_NUM+1]; ///< pre-scaled (with chroma_qp_index_offset) version of qp_table
|
||||||
int chroma_qp_diff;
|
int chroma_qp_diff;
|
||||||
}PPS;
|
}PPS;
|
||||||
|
|
||||||
@@ -587,7 +587,7 @@ typedef struct H264Context{
|
|||||||
}H264Context;
|
}H264Context;
|
||||||
|
|
||||||
|
|
||||||
extern const uint8_t ff_h264_chroma_qp[3][QP_MAX_NUM+1]; ///< One chroma qp table for each supported bit depth (8, 9, 10).
|
extern const uint8_t ff_h264_chroma_qp[5][QP_MAX_NUM+1]; ///< One chroma qp table for each possible bit depth (8-12).
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Decode SEI
|
* Decode SEI
|
||||||
@@ -661,11 +661,16 @@ int ff_h264_check_intra4x4_pred_mode(H264Context *h);
|
|||||||
/**
|
/**
|
||||||
* Check if the top & left blocks are available if needed & change the dc mode so it only uses the available blocks.
|
* Check if the top & left blocks are available if needed & change the dc mode so it only uses the available blocks.
|
||||||
*/
|
*/
|
||||||
int ff_h264_check_intra_pred_mode(H264Context *h, int mode);
|
int ff_h264_check_intra16x16_pred_mode(H264Context *h, int mode);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if the top & left blocks are available if needed & change the dc mode so it only uses the available blocks.
|
||||||
|
*/
|
||||||
|
int ff_h264_check_intra_chroma_pred_mode(H264Context *h, int mode);
|
||||||
|
|
||||||
void ff_h264_hl_decode_mb(H264Context *h);
|
void ff_h264_hl_decode_mb(H264Context *h);
|
||||||
int ff_h264_frame_start(H264Context *h);
|
int ff_h264_frame_start(H264Context *h);
|
||||||
int ff_h264_decode_extradata(H264Context *h);
|
int ff_h264_decode_extradata(H264Context *h, const uint8_t *buf, int size);
|
||||||
av_cold int ff_h264_decode_init(AVCodecContext *avctx);
|
av_cold int ff_h264_decode_init(AVCodecContext *avctx);
|
||||||
av_cold int ff_h264_decode_end(AVCodecContext *avctx);
|
av_cold int ff_h264_decode_end(AVCodecContext *avctx);
|
||||||
av_cold void ff_h264_decode_init_vlc(void);
|
av_cold void ff_h264_decode_init_vlc(void);
|
||||||
|
@@ -2002,14 +2002,14 @@ decode_intra_mb:
|
|||||||
write_back_intra_pred_mode(h);
|
write_back_intra_pred_mode(h);
|
||||||
if( ff_h264_check_intra4x4_pred_mode(h) < 0 ) return -1;
|
if( ff_h264_check_intra4x4_pred_mode(h) < 0 ) return -1;
|
||||||
} else {
|
} else {
|
||||||
h->intra16x16_pred_mode= ff_h264_check_intra_pred_mode( h, h->intra16x16_pred_mode );
|
h->intra16x16_pred_mode= ff_h264_check_intra16x16_pred_mode( h, h->intra16x16_pred_mode );
|
||||||
if( h->intra16x16_pred_mode < 0 ) return -1;
|
if( h->intra16x16_pred_mode < 0 ) return -1;
|
||||||
}
|
}
|
||||||
if(decode_chroma){
|
if(decode_chroma){
|
||||||
h->chroma_pred_mode_table[mb_xy] =
|
h->chroma_pred_mode_table[mb_xy] =
|
||||||
pred_mode = decode_cabac_mb_chroma_pre_mode( h );
|
pred_mode = decode_cabac_mb_chroma_pre_mode( h );
|
||||||
|
|
||||||
pred_mode= ff_h264_check_intra_pred_mode( h, pred_mode );
|
pred_mode= ff_h264_check_intra_chroma_pred_mode( h, pred_mode );
|
||||||
if( pred_mode < 0 ) return -1;
|
if( pred_mode < 0 ) return -1;
|
||||||
h->chroma_pred_mode= pred_mode;
|
h->chroma_pred_mode= pred_mode;
|
||||||
} else {
|
} else {
|
||||||
|
@@ -735,12 +735,12 @@ decode_intra_mb:
|
|||||||
if( ff_h264_check_intra4x4_pred_mode(h) < 0)
|
if( ff_h264_check_intra4x4_pred_mode(h) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
}else{
|
}else{
|
||||||
h->intra16x16_pred_mode= ff_h264_check_intra_pred_mode(h, h->intra16x16_pred_mode);
|
h->intra16x16_pred_mode= ff_h264_check_intra16x16_pred_mode(h, h->intra16x16_pred_mode);
|
||||||
if(h->intra16x16_pred_mode < 0)
|
if(h->intra16x16_pred_mode < 0)
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
if(decode_chroma){
|
if(decode_chroma){
|
||||||
pred_mode= ff_h264_check_intra_pred_mode(h, get_ue_golomb_31(&s->gb));
|
pred_mode= ff_h264_check_intra_chroma_pred_mode(h, get_ue_golomb_31(&s->gb));
|
||||||
if(pred_mode < 0)
|
if(pred_mode < 0)
|
||||||
return -1;
|
return -1;
|
||||||
h->chroma_pred_mode= pred_mode;
|
h->chroma_pred_mode= pred_mode;
|
||||||
|
@@ -89,7 +89,8 @@ static void fill_colmap(H264Context *h, int map[2][16+32], int list, int field,
|
|||||||
for(j=start; j<end; j++){
|
for(j=start; j<end; j++){
|
||||||
if (4 * h->ref_list[0][j].frame_num + (h->ref_list[0][j].f.reference & 3) == poc) {
|
if (4 * h->ref_list[0][j].frame_num + (h->ref_list[0][j].f.reference & 3) == poc) {
|
||||||
int cur_ref= mbafi ? (j-16)^field : j;
|
int cur_ref= mbafi ? (j-16)^field : j;
|
||||||
map[list][2*old_ref + (rfield^field) + 16] = cur_ref;
|
if(ref1->mbaff)
|
||||||
|
map[list][2*old_ref + (rfield^field) + 16] = cur_ref;
|
||||||
if(rfield == field || !interl)
|
if(rfield == field || !interl)
|
||||||
map[list][old_ref] = cur_ref;
|
map[list][old_ref] = cur_ref;
|
||||||
break;
|
break;
|
||||||
|
@@ -251,7 +251,7 @@ static int h264_parse(AVCodecParserContext *s,
|
|||||||
h->got_first = 1;
|
h->got_first = 1;
|
||||||
if (avctx->extradata_size) {
|
if (avctx->extradata_size) {
|
||||||
h->s.avctx = avctx;
|
h->s.avctx = avctx;
|
||||||
ff_h264_decode_extradata(h);
|
ff_h264_decode_extradata(h, avctx->extradata, avctx->extradata_size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -70,7 +70,7 @@ static const AVRational pixel_aspect[17]={
|
|||||||
QP(37,d), QP(37,d), QP(37,d), QP(38,d), QP(38,d), QP(38,d),\
|
QP(37,d), QP(37,d), QP(37,d), QP(38,d), QP(38,d), QP(38,d),\
|
||||||
QP(39,d), QP(39,d), QP(39,d), QP(39,d)
|
QP(39,d), QP(39,d), QP(39,d), QP(39,d)
|
||||||
|
|
||||||
const uint8_t ff_h264_chroma_qp[3][QP_MAX_NUM+1] = {
|
const uint8_t ff_h264_chroma_qp[5][QP_MAX_NUM+1] = {
|
||||||
{
|
{
|
||||||
CHROMA_QP_TABLE_END(8)
|
CHROMA_QP_TABLE_END(8)
|
||||||
},
|
},
|
||||||
@@ -83,6 +83,19 @@ const uint8_t ff_h264_chroma_qp[3][QP_MAX_NUM+1] = {
|
|||||||
6, 7, 8, 9, 10, 11,
|
6, 7, 8, 9, 10, 11,
|
||||||
CHROMA_QP_TABLE_END(10)
|
CHROMA_QP_TABLE_END(10)
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
0, 1, 2, 3, 4, 5,
|
||||||
|
6, 7, 8, 9, 10, 11,
|
||||||
|
12,13,14,15, 16, 17,
|
||||||
|
CHROMA_QP_TABLE_END(11)
|
||||||
|
},
|
||||||
|
{
|
||||||
|
0, 1, 2, 3, 4, 5,
|
||||||
|
6, 7, 8, 9, 10, 11,
|
||||||
|
12,13,14,15, 16, 17,
|
||||||
|
18,19,20,21, 22, 23,
|
||||||
|
CHROMA_QP_TABLE_END(12)
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
static const uint8_t default_scaling4[2][16]={
|
static const uint8_t default_scaling4[2][16]={
|
||||||
@@ -130,8 +143,8 @@ static inline int decode_hrd_parameters(H264Context *h, SPS *sps){
|
|||||||
get_bits(&s->gb, 4); /* bit_rate_scale */
|
get_bits(&s->gb, 4); /* bit_rate_scale */
|
||||||
get_bits(&s->gb, 4); /* cpb_size_scale */
|
get_bits(&s->gb, 4); /* cpb_size_scale */
|
||||||
for(i=0; i<cpb_count; i++){
|
for(i=0; i<cpb_count; i++){
|
||||||
get_ue_golomb(&s->gb); /* bit_rate_value_minus1 */
|
get_ue_golomb_long(&s->gb); /* bit_rate_value_minus1 */
|
||||||
get_ue_golomb(&s->gb); /* cpb_size_value_minus1 */
|
get_ue_golomb_long(&s->gb); /* cpb_size_value_minus1 */
|
||||||
get_bits1(&s->gb); /* cbr_flag */
|
get_bits1(&s->gb); /* cbr_flag */
|
||||||
}
|
}
|
||||||
sps->initial_cpb_removal_delay_length = get_bits(&s->gb, 5) + 1;
|
sps->initial_cpb_removal_delay_length = get_bits(&s->gb, 5) + 1;
|
||||||
@@ -333,6 +346,11 @@ int ff_h264_decode_seq_parameter_set(H264Context *h){
|
|||||||
sps->residual_color_transform_flag = get_bits1(&s->gb);
|
sps->residual_color_transform_flag = get_bits1(&s->gb);
|
||||||
sps->bit_depth_luma = get_ue_golomb(&s->gb) + 8;
|
sps->bit_depth_luma = get_ue_golomb(&s->gb) + 8;
|
||||||
sps->bit_depth_chroma = get_ue_golomb(&s->gb) + 8;
|
sps->bit_depth_chroma = get_ue_golomb(&s->gb) + 8;
|
||||||
|
if (sps->bit_depth_luma > 12U || sps->bit_depth_chroma > 12U) {
|
||||||
|
av_log(h->s.avctx, AV_LOG_ERROR, "illegal bit depth value (%d, %d)\n",
|
||||||
|
sps->bit_depth_luma, sps->bit_depth_chroma);
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
sps->transform_bypass = get_bits1(&s->gb);
|
sps->transform_bypass = get_bits1(&s->gb);
|
||||||
decode_scaling_matrices(h, sps, NULL, 1, sps->scaling_matrix4, sps->scaling_matrix8);
|
decode_scaling_matrices(h, sps, NULL, 1, sps->scaling_matrix4, sps->scaling_matrix8);
|
||||||
}else{
|
}else{
|
||||||
@@ -365,7 +383,7 @@ int ff_h264_decode_seq_parameter_set(H264Context *h){
|
|||||||
}
|
}
|
||||||
|
|
||||||
sps->ref_frame_count= get_ue_golomb_31(&s->gb);
|
sps->ref_frame_count= get_ue_golomb_31(&s->gb);
|
||||||
if(sps->ref_frame_count > MAX_PICTURE_COUNT-2 || sps->ref_frame_count >= 32U){
|
if(sps->ref_frame_count > MAX_PICTURE_COUNT-2 || sps->ref_frame_count > 16U){
|
||||||
av_log(h->s.avctx, AV_LOG_ERROR, "too many reference frames\n");
|
av_log(h->s.avctx, AV_LOG_ERROR, "too many reference frames\n");
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
@@ -301,7 +301,7 @@ int ff_h264_decode_ref_pic_list_reordering(H264Context *h){
|
|||||||
|
|
||||||
void ff_h264_fill_mbaff_ref_list(H264Context *h){
|
void ff_h264_fill_mbaff_ref_list(H264Context *h){
|
||||||
int list, i, j;
|
int list, i, j;
|
||||||
for(list=0; list<2; list++){ //FIXME try list_count
|
for(list=0; list<h->list_count; list++){
|
||||||
for(i=0; i<h->ref_count[list]; i++){
|
for(i=0; i<h->ref_count[list]; i++){
|
||||||
Picture *frame = &h->ref_list[list][i];
|
Picture *frame = &h->ref_list[list][i];
|
||||||
Picture *field = &h->ref_list[list][16+2*i];
|
Picture *field = &h->ref_list[list][16+2*i];
|
||||||
|
@@ -40,7 +40,7 @@
|
|||||||
#undef BIT_DEPTH
|
#undef BIT_DEPTH
|
||||||
|
|
||||||
static void pred4x4_vertical_vp8_c(uint8_t *src, const uint8_t *topright, int stride){
|
static void pred4x4_vertical_vp8_c(uint8_t *src, const uint8_t *topright, int stride){
|
||||||
const int lt= src[-1-1*stride];
|
const unsigned lt = src[-1-1*stride];
|
||||||
LOAD_TOP_EDGE
|
LOAD_TOP_EDGE
|
||||||
LOAD_TOP_RIGHT_EDGE
|
LOAD_TOP_RIGHT_EDGE
|
||||||
uint32_t v = PACK_4U8((lt + 2*t0 + t1 + 2) >> 2,
|
uint32_t v = PACK_4U8((lt + 2*t0 + t1 + 2) >> 2,
|
||||||
@@ -55,7 +55,7 @@ static void pred4x4_vertical_vp8_c(uint8_t *src, const uint8_t *topright, int st
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void pred4x4_horizontal_vp8_c(uint8_t *src, const uint8_t *topright, int stride){
|
static void pred4x4_horizontal_vp8_c(uint8_t *src, const uint8_t *topright, int stride){
|
||||||
const int lt= src[-1-1*stride];
|
const unsigned lt = src[-1-1*stride];
|
||||||
LOAD_LEFT_EDGE
|
LOAD_LEFT_EDGE
|
||||||
|
|
||||||
AV_WN32A(src+0*stride, ((lt + 2*l0 + l1 + 2) >> 2)*0x01010101);
|
AV_WN32A(src+0*stride, ((lt + 2*l0 + l1 + 2) >> 2)*0x01010101);
|
||||||
@@ -292,7 +292,7 @@ static void pred16x16_tm_vp8_c(uint8_t *src, int stride){
|
|||||||
|
|
||||||
static void pred8x8_left_dc_rv40_c(uint8_t *src, int stride){
|
static void pred8x8_left_dc_rv40_c(uint8_t *src, int stride){
|
||||||
int i;
|
int i;
|
||||||
int dc0;
|
unsigned dc0;
|
||||||
|
|
||||||
dc0=0;
|
dc0=0;
|
||||||
for(i=0;i<8; i++)
|
for(i=0;i<8; i++)
|
||||||
@@ -307,7 +307,7 @@ static void pred8x8_left_dc_rv40_c(uint8_t *src, int stride){
|
|||||||
|
|
||||||
static void pred8x8_top_dc_rv40_c(uint8_t *src, int stride){
|
static void pred8x8_top_dc_rv40_c(uint8_t *src, int stride){
|
||||||
int i;
|
int i;
|
||||||
int dc0;
|
unsigned dc0;
|
||||||
|
|
||||||
dc0=0;
|
dc0=0;
|
||||||
for(i=0;i<8; i++)
|
for(i=0;i<8; i++)
|
||||||
@@ -322,7 +322,7 @@ static void pred8x8_top_dc_rv40_c(uint8_t *src, int stride){
|
|||||||
|
|
||||||
static void pred8x8_dc_rv40_c(uint8_t *src, int stride){
|
static void pred8x8_dc_rv40_c(uint8_t *src, int stride){
|
||||||
int i;
|
int i;
|
||||||
int dc0=0;
|
unsigned dc0 = 0;
|
||||||
|
|
||||||
for(i=0;i<4; i++){
|
for(i=0;i<4; i++){
|
||||||
dc0+= src[-1+i*stride] + src[i-stride];
|
dc0+= src[-1+i*stride] + src[i-stride];
|
||||||
|
@@ -121,28 +121,28 @@ static void FUNCC(pred4x4_129_dc)(uint8_t *_src, const uint8_t *topright, int _s
|
|||||||
|
|
||||||
|
|
||||||
#define LOAD_TOP_RIGHT_EDGE\
|
#define LOAD_TOP_RIGHT_EDGE\
|
||||||
const int av_unused t4= topright[0];\
|
const unsigned av_unused t4 = topright[0];\
|
||||||
const int av_unused t5= topright[1];\
|
const unsigned av_unused t5 = topright[1];\
|
||||||
const int av_unused t6= topright[2];\
|
const unsigned av_unused t6 = topright[2];\
|
||||||
const int av_unused t7= topright[3];\
|
const unsigned av_unused t7 = topright[3];\
|
||||||
|
|
||||||
#define LOAD_DOWN_LEFT_EDGE\
|
#define LOAD_DOWN_LEFT_EDGE\
|
||||||
const int av_unused l4= src[-1+4*stride];\
|
const unsigned av_unused l4 = src[-1+4*stride];\
|
||||||
const int av_unused l5= src[-1+5*stride];\
|
const unsigned av_unused l5 = src[-1+5*stride];\
|
||||||
const int av_unused l6= src[-1+6*stride];\
|
const unsigned av_unused l6 = src[-1+6*stride];\
|
||||||
const int av_unused l7= src[-1+7*stride];\
|
const unsigned av_unused l7 = src[-1+7*stride];\
|
||||||
|
|
||||||
#define LOAD_LEFT_EDGE\
|
#define LOAD_LEFT_EDGE\
|
||||||
const int av_unused l0= src[-1+0*stride];\
|
const unsigned av_unused l0 = src[-1+0*stride];\
|
||||||
const int av_unused l1= src[-1+1*stride];\
|
const unsigned av_unused l1 = src[-1+1*stride];\
|
||||||
const int av_unused l2= src[-1+2*stride];\
|
const unsigned av_unused l2 = src[-1+2*stride];\
|
||||||
const int av_unused l3= src[-1+3*stride];\
|
const unsigned av_unused l3 = src[-1+3*stride];\
|
||||||
|
|
||||||
#define LOAD_TOP_EDGE\
|
#define LOAD_TOP_EDGE\
|
||||||
const int av_unused t0= src[ 0-1*stride];\
|
const unsigned av_unused t0 = src[ 0-1*stride];\
|
||||||
const int av_unused t1= src[ 1-1*stride];\
|
const unsigned av_unused t1 = src[ 1-1*stride];\
|
||||||
const int av_unused t2= src[ 2-1*stride];\
|
const unsigned av_unused t2 = src[ 2-1*stride];\
|
||||||
const int av_unused t3= src[ 3-1*stride];\
|
const unsigned av_unused t3 = src[ 3-1*stride];\
|
||||||
|
|
||||||
static void FUNCC(pred4x4_down_right)(uint8_t *_src, const uint8_t *topright, int _stride){
|
static void FUNCC(pred4x4_down_right)(uint8_t *_src, const uint8_t *topright, int _stride){
|
||||||
pixel *src = (pixel*)_src;
|
pixel *src = (pixel*)_src;
|
||||||
|
@@ -104,10 +104,15 @@ static VLC_TYPE vlc_tables[VLC_TABLES_SIZE][2];
|
|||||||
|
|
||||||
static av_cold int imc_decode_init(AVCodecContext * avctx)
|
static av_cold int imc_decode_init(AVCodecContext * avctx)
|
||||||
{
|
{
|
||||||
int i, j;
|
int i, j, ret;
|
||||||
IMCContext *q = avctx->priv_data;
|
IMCContext *q = avctx->priv_data;
|
||||||
double r1, r2;
|
double r1, r2;
|
||||||
|
|
||||||
|
if (avctx->channels != 1) {
|
||||||
|
av_log_ask_for_sample(avctx, "Number of channels is not supported\n");
|
||||||
|
return AVERROR_PATCHWELCOME;
|
||||||
|
}
|
||||||
|
|
||||||
q->decoder_reset = 1;
|
q->decoder_reset = 1;
|
||||||
|
|
||||||
for(i = 0; i < BANDS; i++)
|
for(i = 0; i < BANDS; i++)
|
||||||
@@ -156,7 +161,10 @@ static av_cold int imc_decode_init(AVCodecContext * avctx)
|
|||||||
}
|
}
|
||||||
q->one_div_log2 = 1/log(2);
|
q->one_div_log2 = 1/log(2);
|
||||||
|
|
||||||
ff_fft_init(&q->fft, 7, 1);
|
if ((ret = ff_fft_init(&q->fft, 7, 1))) {
|
||||||
|
av_log(avctx, AV_LOG_INFO, "FFT init failed\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
dsputil_init(&q->dsp, avctx);
|
dsputil_init(&q->dsp, avctx);
|
||||||
avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
|
avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
|
||||||
avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
|
avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
|
||||||
|
@@ -344,6 +344,16 @@ int avpicture_layout(const AVPicture* src, enum PixelFormat pix_fmt, int width,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
switch (pix_fmt) {
|
||||||
|
case PIX_FMT_RGB8:
|
||||||
|
case PIX_FMT_BGR8:
|
||||||
|
case PIX_FMT_RGB4_BYTE:
|
||||||
|
case PIX_FMT_BGR4_BYTE:
|
||||||
|
case PIX_FMT_GRAY8:
|
||||||
|
// do not include palette for these pseudo-paletted formats
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
|
||||||
if (desc->flags & PIX_FMT_PAL)
|
if (desc->flags & PIX_FMT_PAL)
|
||||||
memcpy((unsigned char *)(((size_t)dest + 3) & ~3), src->data[1], 256 * 4);
|
memcpy((unsigned char *)(((size_t)dest + 3) & ~3), src->data[1], 256 * 4);
|
||||||
|
|
||||||
|
@@ -321,6 +321,8 @@ int ff_j2k_dwt_init(DWTContext *s, uint16_t border[2][2], int decomp_levels, int
|
|||||||
int i, j, lev = decomp_levels, maxlen,
|
int i, j, lev = decomp_levels, maxlen,
|
||||||
b[2][2];
|
b[2][2];
|
||||||
|
|
||||||
|
if (decomp_levels >= FF_DWT_MAX_DECLVLS)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
s->ndeclevels = decomp_levels;
|
s->ndeclevels = decomp_levels;
|
||||||
s->type = type;
|
s->type = type;
|
||||||
|
|
||||||
|
@@ -961,18 +961,20 @@ static int decode_codestream(J2kDecoderContext *s)
|
|||||||
|
|
||||||
static int jp2_find_codestream(J2kDecoderContext *s)
|
static int jp2_find_codestream(J2kDecoderContext *s)
|
||||||
{
|
{
|
||||||
int32_t atom_size;
|
uint32_t atom_size;
|
||||||
int found_codestream = 0, search_range = 10;
|
int found_codestream = 0, search_range = 10;
|
||||||
|
|
||||||
// skip jpeg2k signature atom
|
// skip jpeg2k signature atom
|
||||||
s->buf += 12;
|
s->buf += 12;
|
||||||
|
|
||||||
while(!found_codestream && search_range) {
|
while(!found_codestream && search_range && s->buf_end - s->buf >= 8) {
|
||||||
atom_size = AV_RB32(s->buf);
|
atom_size = AV_RB32(s->buf);
|
||||||
if(AV_RB32(s->buf + 4) == JP2_CODESTREAM) {
|
if(AV_RB32(s->buf + 4) == JP2_CODESTREAM) {
|
||||||
found_codestream = 1;
|
found_codestream = 1;
|
||||||
s->buf += 8;
|
s->buf += 8;
|
||||||
} else {
|
} else {
|
||||||
|
if (s->buf_end - s->buf < atom_size)
|
||||||
|
return 0;
|
||||||
s->buf += atom_size;
|
s->buf += atom_size;
|
||||||
search_range--;
|
search_range--;
|
||||||
}
|
}
|
||||||
@@ -1005,7 +1007,8 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
return AVERROR(EINVAL);
|
return AVERROR(EINVAL);
|
||||||
|
|
||||||
// check if the image is in jp2 format
|
// check if the image is in jp2 format
|
||||||
if((AV_RB32(s->buf) == 12) && (AV_RB32(s->buf + 4) == JP2_SIG_TYPE) &&
|
if(s->buf_end - s->buf >= 12 &&
|
||||||
|
(AV_RB32(s->buf) == 12) && (AV_RB32(s->buf + 4) == JP2_SIG_TYPE) &&
|
||||||
(AV_RB32(s->buf + 8) == JP2_SIG_VALUE)) {
|
(AV_RB32(s->buf + 8) == JP2_SIG_VALUE)) {
|
||||||
if(!jp2_find_codestream(s)) {
|
if(!jp2_find_codestream(s)) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "couldn't find jpeg2k codestream atom\n");
|
av_log(avctx, AV_LOG_ERROR, "couldn't find jpeg2k codestream atom\n");
|
||||||
|
136
libavcodec/libaacplus.c
Normal file
136
libavcodec/libaacplus.c
Normal file
@@ -0,0 +1,136 @@
|
|||||||
|
/*
|
||||||
|
* Interface to libaacplus for aac+ (sbr+ps) encoding
|
||||||
|
* Copyright (c) 2010 tipok <piratfm@gmail.com>
|
||||||
|
*
|
||||||
|
* This file is part of FFmpeg.
|
||||||
|
*
|
||||||
|
* FFmpeg is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU Lesser General Public
|
||||||
|
* License as published by the Free Software Foundation; either
|
||||||
|
* version 2.1 of the License, or (at your option) any later version.
|
||||||
|
*
|
||||||
|
* FFmpeg is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
* Lesser General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Lesser General Public
|
||||||
|
* License along with FFmpeg; if not, write to the Free Software
|
||||||
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file
|
||||||
|
* Interface to libaacplus for aac+ (sbr+ps) encoding.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "avcodec.h"
|
||||||
|
#include <aacplus.h>
|
||||||
|
|
||||||
|
typedef struct aacPlusAudioContext {
|
||||||
|
aacplusEncHandle aacplus_handle;
|
||||||
|
} aacPlusAudioContext;
|
||||||
|
|
||||||
|
static av_cold int aacPlus_encode_init(AVCodecContext *avctx)
|
||||||
|
{
|
||||||
|
aacPlusAudioContext *s = avctx->priv_data;
|
||||||
|
aacplusEncConfiguration *aacplus_cfg;
|
||||||
|
unsigned long samples_input, max_bytes_output;
|
||||||
|
|
||||||
|
/* number of channels */
|
||||||
|
if (avctx->channels < 1 || avctx->channels > 2) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "encoding %d channel(s) is not allowed\n", avctx->channels);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
s->aacplus_handle = aacplusEncOpen(avctx->sample_rate,
|
||||||
|
avctx->channels,
|
||||||
|
&samples_input, &max_bytes_output);
|
||||||
|
if(!s->aacplus_handle) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "can't open encoder\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* check aacplus version */
|
||||||
|
aacplus_cfg = aacplusEncGetCurrentConfiguration(s->aacplus_handle);
|
||||||
|
|
||||||
|
/* put the options in the configuration struct */
|
||||||
|
if(avctx->profile != FF_PROFILE_AAC_LOW && avctx->profile != FF_PROFILE_UNKNOWN) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "invalid AAC profile: %d, only LC supported\n", avctx->profile);
|
||||||
|
aacplusEncClose(s->aacplus_handle);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
aacplus_cfg->bitRate = avctx->bit_rate;
|
||||||
|
aacplus_cfg->bandWidth = avctx->cutoff;
|
||||||
|
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) {
|
||||||
|
aacplus_cfg->outputFormat = 0; //raw aac
|
||||||
|
}
|
||||||
|
aacplus_cfg->inputFormat = AACPLUS_INPUT_16BIT;
|
||||||
|
if (!aacplusEncSetConfiguration(s->aacplus_handle, aacplus_cfg)) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "libaacplus doesn't support this output format!\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
avctx->frame_size = samples_input / avctx->channels;
|
||||||
|
|
||||||
|
avctx->coded_frame= avcodec_alloc_frame();
|
||||||
|
avctx->coded_frame->key_frame= 1;
|
||||||
|
|
||||||
|
/* Set decoder specific info */
|
||||||
|
avctx->extradata_size = 0;
|
||||||
|
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) {
|
||||||
|
|
||||||
|
unsigned char *buffer = NULL;
|
||||||
|
unsigned long decoder_specific_info_size;
|
||||||
|
|
||||||
|
if (aacplusEncGetDecoderSpecificInfo(s->aacplus_handle, &buffer,
|
||||||
|
&decoder_specific_info_size) == 1) {
|
||||||
|
avctx->extradata = av_malloc(decoder_specific_info_size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||||
|
avctx->extradata_size = decoder_specific_info_size;
|
||||||
|
memcpy(avctx->extradata, buffer, avctx->extradata_size);
|
||||||
|
}
|
||||||
|
#undef free
|
||||||
|
free(buffer);
|
||||||
|
#define free please_use_av_free
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int aacPlus_encode_frame(AVCodecContext *avctx,
|
||||||
|
unsigned char *frame, int buf_size, void *data)
|
||||||
|
{
|
||||||
|
aacPlusAudioContext *s = avctx->priv_data;
|
||||||
|
int bytes_written;
|
||||||
|
|
||||||
|
bytes_written = aacplusEncEncode(s->aacplus_handle,
|
||||||
|
data,
|
||||||
|
avctx->frame_size * avctx->channels,
|
||||||
|
frame,
|
||||||
|
buf_size);
|
||||||
|
|
||||||
|
return bytes_written;
|
||||||
|
}
|
||||||
|
|
||||||
|
static av_cold int aacPlus_encode_close(AVCodecContext *avctx)
|
||||||
|
{
|
||||||
|
aacPlusAudioContext *s = avctx->priv_data;
|
||||||
|
|
||||||
|
av_freep(&avctx->coded_frame);
|
||||||
|
av_freep(&avctx->extradata);
|
||||||
|
|
||||||
|
aacplusEncClose(s->aacplus_handle);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
AVCodec ff_libaacplus_encoder = {
|
||||||
|
"libaacplus",
|
||||||
|
AVMEDIA_TYPE_AUDIO,
|
||||||
|
CODEC_ID_AAC,
|
||||||
|
sizeof(aacPlusAudioContext),
|
||||||
|
aacPlus_encode_init,
|
||||||
|
aacPlus_encode_frame,
|
||||||
|
aacPlus_encode_close,
|
||||||
|
.sample_fmts = (const enum SampleFormat[]){SAMPLE_FMT_S16,SAMPLE_FMT_NONE},
|
||||||
|
.long_name = NULL_IF_CONFIG_SMALL("libaacplus AAC+ (Advanced Audio Codec with SBR+PS)"),
|
||||||
|
};
|
@@ -139,18 +139,25 @@ static int libgsm_decode_frame(AVCodecContext *avctx,
|
|||||||
AVPacket *avpkt) {
|
AVPacket *avpkt) {
|
||||||
uint8_t *buf = avpkt->data;
|
uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
|
int out_size = avctx->frame_size * av_get_bytes_per_sample(avctx->sample_fmt);
|
||||||
|
|
||||||
|
if (*data_size < out_size) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
*data_size = 0; /* In case of error */
|
*data_size = 0; /* In case of error */
|
||||||
if(buf_size < avctx->block_align) return -1;
|
if(buf_size < avctx->block_align) return -1;
|
||||||
switch(avctx->codec_id) {
|
switch(avctx->codec_id) {
|
||||||
case CODEC_ID_GSM:
|
case CODEC_ID_GSM:
|
||||||
if(gsm_decode(avctx->priv_data,buf,data)) return -1;
|
if(gsm_decode(avctx->priv_data,buf,data)) return -1;
|
||||||
*data_size = GSM_FRAME_SIZE*sizeof(int16_t);
|
|
||||||
break;
|
break;
|
||||||
case CODEC_ID_GSM_MS:
|
case CODEC_ID_GSM_MS:
|
||||||
if(gsm_decode(avctx->priv_data,buf,data) ||
|
if(gsm_decode(avctx->priv_data,buf,data) ||
|
||||||
gsm_decode(avctx->priv_data,buf+33,((int16_t*)data)+GSM_FRAME_SIZE)) return -1;
|
gsm_decode(avctx->priv_data,buf+33,((int16_t*)data)+GSM_FRAME_SIZE)) return -1;
|
||||||
*data_size = GSM_FRAME_SIZE*sizeof(int16_t)*2;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
*data_size = out_size;
|
||||||
return avctx->block_align;
|
return avctx->block_align;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -481,8 +481,8 @@ static int queue_frames(AVCodecContext *avctx, uint8_t *buf, int buf_size,
|
|||||||
break;
|
break;
|
||||||
case VPX_CODEC_STATS_PKT: {
|
case VPX_CODEC_STATS_PKT: {
|
||||||
struct vpx_fixed_buf *stats = &ctx->twopass_stats;
|
struct vpx_fixed_buf *stats = &ctx->twopass_stats;
|
||||||
stats->buf = av_realloc(stats->buf,
|
stats->buf = av_realloc_f(stats->buf, 1,
|
||||||
stats->sz + pkt->data.twopass_stats.sz);
|
stats->sz + pkt->data.twopass_stats.sz);
|
||||||
if (!stats->buf) {
|
if (!stats->buf) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Stat buffer realloc failed\n");
|
av_log(avctx, AV_LOG_ERROR, "Stat buffer realloc failed\n");
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
|
@@ -70,9 +70,14 @@ static int encode_nals(AVCodecContext *ctx, uint8_t *buf, int size,
|
|||||||
|
|
||||||
/* Write the SEI as part of the first frame. */
|
/* Write the SEI as part of the first frame. */
|
||||||
if (x4->sei_size > 0 && nnal > 0) {
|
if (x4->sei_size > 0 && nnal > 0) {
|
||||||
|
if (x4->sei_size > size) {
|
||||||
|
av_log(ctx, AV_LOG_ERROR, "Error: nal buffer is too small\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
memcpy(p, x4->sei, x4->sei_size);
|
memcpy(p, x4->sei, x4->sei_size);
|
||||||
p += x4->sei_size;
|
p += x4->sei_size;
|
||||||
x4->sei_size = 0;
|
x4->sei_size = 0;
|
||||||
|
// why is x4->sei not freed?
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < nnal; i++){
|
for (i = 0; i < nnal; i++){
|
||||||
@@ -83,6 +88,11 @@ static int encode_nals(AVCodecContext *ctx, uint8_t *buf, int size,
|
|||||||
memcpy(x4->sei, nals[i].p_payload, nals[i].i_payload);
|
memcpy(x4->sei, nals[i].p_payload, nals[i].i_payload);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
if (nals[i].i_payload > (size - (p - buf))) {
|
||||||
|
// return only complete nals which fit in buf
|
||||||
|
av_log(ctx, AV_LOG_ERROR, "Error: nal buffer is too small\n");
|
||||||
|
break;
|
||||||
|
}
|
||||||
memcpy(p, nals[i].p_payload, nals[i].i_payload);
|
memcpy(p, nals[i].p_payload, nals[i].i_payload);
|
||||||
p += nals[i].i_payload;
|
p += nals[i].i_payload;
|
||||||
}
|
}
|
||||||
@@ -91,13 +101,14 @@ static int encode_nals(AVCodecContext *ctx, uint8_t *buf, int size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int X264_frame(AVCodecContext *ctx, uint8_t *buf,
|
static int X264_frame(AVCodecContext *ctx, uint8_t *buf,
|
||||||
int bufsize, void *data)
|
int orig_bufsize, void *data)
|
||||||
{
|
{
|
||||||
X264Context *x4 = ctx->priv_data;
|
X264Context *x4 = ctx->priv_data;
|
||||||
AVFrame *frame = data;
|
AVFrame *frame = data;
|
||||||
x264_nal_t *nal;
|
x264_nal_t *nal;
|
||||||
int nnal, i;
|
int nnal, i;
|
||||||
x264_picture_t pic_out;
|
x264_picture_t pic_out;
|
||||||
|
int bufsize;
|
||||||
|
|
||||||
x264_picture_init( &x4->pic );
|
x264_picture_init( &x4->pic );
|
||||||
x4->pic.img.i_csp = X264_CSP_I420;
|
x4->pic.img.i_csp = X264_CSP_I420;
|
||||||
@@ -128,6 +139,7 @@ static int X264_frame(AVCodecContext *ctx, uint8_t *buf,
|
|||||||
}
|
}
|
||||||
|
|
||||||
do {
|
do {
|
||||||
|
bufsize = orig_bufsize;
|
||||||
if (x264_encoder_encode(x4->enc, &nal, &nnal, frame? &x4->pic: NULL, &pic_out) < 0)
|
if (x264_encoder_encode(x4->enc, &nal, &nnal, frame? &x4->pic: NULL, &pic_out) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
|
@@ -950,7 +950,12 @@ static int output_data_internal(MLPDecodeContext *m, unsigned int substr,
|
|||||||
int32_t *data_32 = (int32_t*) data;
|
int32_t *data_32 = (int32_t*) data;
|
||||||
int16_t *data_16 = (int16_t*) data;
|
int16_t *data_16 = (int16_t*) data;
|
||||||
|
|
||||||
if (*data_size < (s->max_channel + 1) * s->blockpos * (is32 ? 4 : 2))
|
if (m->avctx->channels != s->max_matrix_channel + 1) {
|
||||||
|
av_log(m->avctx, AV_LOG_ERROR, "channel count mismatch\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (*data_size < m->avctx->channels * s->blockpos * (is32 ? 4 : 2))
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
for (i = 0; i < s->blockpos; i++) {
|
for (i = 0; i < s->blockpos; i++) {
|
||||||
|
@@ -1016,7 +1016,7 @@ void ff_estimate_p_frame_motion(MpegEncContext * s,
|
|||||||
/* intra / predictive decision */
|
/* intra / predictive decision */
|
||||||
pix = c->src[0][0];
|
pix = c->src[0][0];
|
||||||
sum = s->dsp.pix_sum(pix, s->linesize);
|
sum = s->dsp.pix_sum(pix, s->linesize);
|
||||||
varc = s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)(sum*sum))>>8) + 500;
|
varc = s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)sum*sum)>>8) + 500;
|
||||||
|
|
||||||
pic->mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
|
pic->mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
|
||||||
pic->mb_var [s->mb_stride * mb_y + mb_x] = (varc+128)>>8;
|
pic->mb_var [s->mb_stride * mb_y + mb_x] = (varc+128)>>8;
|
||||||
@@ -1178,7 +1178,7 @@ void ff_estimate_p_frame_motion(MpegEncContext * s,
|
|||||||
if((c->avctx->mb_cmp&0xFF)==FF_CMP_SSE){
|
if((c->avctx->mb_cmp&0xFF)==FF_CMP_SSE){
|
||||||
intra_score= varc - 500;
|
intra_score= varc - 500;
|
||||||
}else{
|
}else{
|
||||||
int mean= (sum+128)>>8;
|
unsigned mean = (sum+128)>>8;
|
||||||
mean*= 0x01010101;
|
mean*= 0x01010101;
|
||||||
|
|
||||||
for(i=0; i<16; i++){
|
for(i=0; i<16; i++){
|
||||||
|
@@ -52,14 +52,16 @@ typedef struct MotionPixelsContext {
|
|||||||
static av_cold int mp_decode_init(AVCodecContext *avctx)
|
static av_cold int mp_decode_init(AVCodecContext *avctx)
|
||||||
{
|
{
|
||||||
MotionPixelsContext *mp = avctx->priv_data;
|
MotionPixelsContext *mp = avctx->priv_data;
|
||||||
|
int w4 = (avctx->width + 3) & ~3;
|
||||||
|
int h4 = (avctx->height + 3) & ~3;
|
||||||
|
|
||||||
motionpixels_tableinit();
|
motionpixels_tableinit();
|
||||||
mp->avctx = avctx;
|
mp->avctx = avctx;
|
||||||
dsputil_init(&mp->dsp, avctx);
|
dsputil_init(&mp->dsp, avctx);
|
||||||
mp->changes_map = av_mallocz(avctx->width * avctx->height);
|
mp->changes_map = av_mallocz(avctx->width * h4);
|
||||||
mp->offset_bits_len = av_log2(avctx->width * avctx->height) + 1;
|
mp->offset_bits_len = av_log2(avctx->width * avctx->height) + 1;
|
||||||
mp->vpt = av_mallocz(avctx->height * sizeof(YuvPixel));
|
mp->vpt = av_mallocz(avctx->height * sizeof(YuvPixel));
|
||||||
mp->hpt = av_mallocz(avctx->height * avctx->width / 16 * sizeof(YuvPixel));
|
mp->hpt = av_mallocz(h4 * w4 / 16 * sizeof(YuvPixel));
|
||||||
avctx->pix_fmt = PIX_FMT_RGB555;
|
avctx->pix_fmt = PIX_FMT_RGB555;
|
||||||
avcodec_get_frame_defaults(&mp->frame);
|
avcodec_get_frame_defaults(&mp->frame);
|
||||||
return 0;
|
return 0;
|
||||||
@@ -253,6 +255,7 @@ static int mp_decode_frame(AVCodecContext *avctx,
|
|||||||
mp->dsp.bswap_buf((uint32_t *)mp->bswapbuf, (const uint32_t *)buf, buf_size / 4);
|
mp->dsp.bswap_buf((uint32_t *)mp->bswapbuf, (const uint32_t *)buf, buf_size / 4);
|
||||||
if (buf_size & 3)
|
if (buf_size & 3)
|
||||||
memcpy(mp->bswapbuf + (buf_size & ~3), buf + (buf_size & ~3), buf_size & 3);
|
memcpy(mp->bswapbuf + (buf_size & ~3), buf + (buf_size & ~3), buf_size & 3);
|
||||||
|
memset(mp->bswapbuf + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
|
||||||
init_get_bits(&gb, mp->bswapbuf, buf_size * 8);
|
init_get_bits(&gb, mp->bswapbuf, buf_size * 8);
|
||||||
|
|
||||||
memset(mp->changes_map, 0, avctx->width * avctx->height);
|
memset(mp->changes_map, 0, avctx->width * avctx->height);
|
||||||
@@ -279,6 +282,8 @@ static int mp_decode_frame(AVCodecContext *avctx,
|
|||||||
if (sz == 0)
|
if (sz == 0)
|
||||||
goto end;
|
goto end;
|
||||||
|
|
||||||
|
if (mp->max_codes_bits <= 0)
|
||||||
|
goto end;
|
||||||
if (init_vlc(&mp->vlc, mp->max_codes_bits, mp->codes_count, &mp->codes[0].size, sizeof(HuffCode), 1, &mp->codes[0].code, sizeof(HuffCode), 4, 0))
|
if (init_vlc(&mp->vlc, mp->max_codes_bits, mp->codes_count, &mp->codes[0].size, sizeof(HuffCode), 1, &mp->codes[0].code, sizeof(HuffCode), 4, 0))
|
||||||
goto end;
|
goto end;
|
||||||
mp_decode_frame_helper(mp, &gb);
|
mp_decode_frame_helper(mp, &gb);
|
||||||
|
@@ -197,12 +197,19 @@ static int mpc7_decode_frame(AVCodecContext * avctx,
|
|||||||
int i, ch;
|
int i, ch;
|
||||||
int mb = -1;
|
int mb = -1;
|
||||||
Band *bands = c->bands;
|
Band *bands = c->bands;
|
||||||
int off;
|
int off, out_size;
|
||||||
int bits_used, bits_avail;
|
int bits_used, bits_avail;
|
||||||
|
|
||||||
memset(bands, 0, sizeof(bands));
|
memset(bands, 0, sizeof(bands));
|
||||||
if(buf_size <= 4){
|
if(buf_size <= 4){
|
||||||
av_log(avctx, AV_LOG_ERROR, "Too small buffer passed (%i bytes)\n", buf_size);
|
av_log(avctx, AV_LOG_ERROR, "Too small buffer passed (%i bytes)\n", buf_size);
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
|
out_size = (buf[1] ? c->lastframelen : MPC_FRAME_SIZE) * 4;
|
||||||
|
if (*data_size < out_size) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
|
||||||
|
return AVERROR(EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
bits = av_malloc(((buf_size - 1) & ~3) + FF_INPUT_BUFFER_PADDING_SIZE);
|
bits = av_malloc(((buf_size - 1) & ~3) + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||||
@@ -277,7 +284,7 @@ static int mpc7_decode_frame(AVCodecContext * avctx,
|
|||||||
*data_size = 0;
|
*data_size = 0;
|
||||||
return buf_size;
|
return buf_size;
|
||||||
}
|
}
|
||||||
*data_size = (buf[1] ? c->lastframelen : MPC_FRAME_SIZE) * 4;
|
*data_size = out_size;
|
||||||
|
|
||||||
return buf_size;
|
return buf_size;
|
||||||
}
|
}
|
||||||
|
@@ -127,6 +127,8 @@ static av_cold int mpc8_decode_init(AVCodecContext * avctx)
|
|||||||
|
|
||||||
skip_bits(&gb, 3);//sample rate
|
skip_bits(&gb, 3);//sample rate
|
||||||
c->maxbands = get_bits(&gb, 5) + 1;
|
c->maxbands = get_bits(&gb, 5) + 1;
|
||||||
|
if (c->maxbands >= BANDS)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
channels = get_bits(&gb, 4) + 1;
|
channels = get_bits(&gb, 4) + 1;
|
||||||
if (channels > 2) {
|
if (channels > 2) {
|
||||||
av_log_missing_feature(avctx, "Multichannel MPC SV8", 1);
|
av_log_missing_feature(avctx, "Multichannel MPC SV8", 1);
|
||||||
@@ -241,10 +243,16 @@ static int mpc8_decode_frame(AVCodecContext * avctx,
|
|||||||
GetBitContext gb2, *gb = &gb2;
|
GetBitContext gb2, *gb = &gb2;
|
||||||
int i, j, k, ch, cnt, res, t;
|
int i, j, k, ch, cnt, res, t;
|
||||||
Band *bands = c->bands;
|
Band *bands = c->bands;
|
||||||
int off;
|
int off, out_size;
|
||||||
int maxband, keyframe;
|
int maxband, keyframe;
|
||||||
int last[2];
|
int last[2];
|
||||||
|
|
||||||
|
out_size = MPC_FRAME_SIZE * 2 * avctx->channels;
|
||||||
|
if (*data_size < out_size) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
keyframe = c->cur_frame == 0;
|
keyframe = c->cur_frame == 0;
|
||||||
|
|
||||||
if(keyframe){
|
if(keyframe){
|
||||||
@@ -260,6 +268,8 @@ static int mpc8_decode_frame(AVCodecContext * avctx,
|
|||||||
maxband = c->last_max_band + get_vlc2(gb, band_vlc.table, MPC8_BANDS_BITS, 2);
|
maxband = c->last_max_band + get_vlc2(gb, band_vlc.table, MPC8_BANDS_BITS, 2);
|
||||||
if(maxband > 32) maxband -= 33;
|
if(maxband > 32) maxband -= 33;
|
||||||
}
|
}
|
||||||
|
if(maxband > c->maxbands)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
c->last_max_band = maxband;
|
c->last_max_band = maxband;
|
||||||
|
|
||||||
/* read subband indexes */
|
/* read subband indexes */
|
||||||
@@ -400,7 +410,7 @@ static int mpc8_decode_frame(AVCodecContext * avctx,
|
|||||||
c->last_bits_used = get_bits_count(gb);
|
c->last_bits_used = get_bits_count(gb);
|
||||||
if(c->cur_frame >= c->frames)
|
if(c->cur_frame >= c->frames)
|
||||||
c->cur_frame = 0;
|
c->cur_frame = 0;
|
||||||
*data_size = MPC_FRAME_SIZE * 2 * avctx->channels;
|
*data_size = out_size;
|
||||||
|
|
||||||
return c->cur_frame ? c->last_bits_used >> 3 : buf_size;
|
return c->cur_frame ? c->last_bits_used >> 3 : buf_size;
|
||||||
}
|
}
|
||||||
|
@@ -1801,8 +1801,8 @@ static int decode_frame(AVCodecContext * avctx,
|
|||||||
avctx->bit_rate = s->bit_rate;
|
avctx->bit_rate = s->bit_rate;
|
||||||
avctx->sub_id = s->layer;
|
avctx->sub_id = s->layer;
|
||||||
|
|
||||||
if(*data_size < 1152*avctx->channels*sizeof(OUT_INT))
|
if (*data_size < avctx->frame_size * avctx->channels * sizeof(OUT_INT))
|
||||||
return -1;
|
return AVERROR(EINVAL);
|
||||||
*data_size = 0;
|
*data_size = 0;
|
||||||
|
|
||||||
if(s->frame_size<=0 || s->frame_size > buf_size){
|
if(s->frame_size<=0 || s->frame_size > buf_size){
|
||||||
@@ -1870,6 +1870,9 @@ static int decode_frame_adu(AVCodecContext * avctx,
|
|||||||
avctx->bit_rate = s->bit_rate;
|
avctx->bit_rate = s->bit_rate;
|
||||||
avctx->sub_id = s->layer;
|
avctx->sub_id = s->layer;
|
||||||
|
|
||||||
|
if (*data_size < avctx->frame_size * avctx->channels * sizeof(OUT_INT))
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
|
||||||
s->frame_size = len;
|
s->frame_size = len;
|
||||||
|
|
||||||
if (avctx->parse_only) {
|
if (avctx->parse_only) {
|
||||||
|
@@ -369,8 +369,8 @@ static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
// edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
|
// edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
|
||||||
FF_ALLOCZ_OR_GOTO(s->avctx, s->allocated_edge_emu_buffer, (s->width+64)*2*21*2, fail); //(width + edge + align)*interlaced*MBsize*tolerance
|
FF_ALLOCZ_OR_GOTO(s->avctx, s->allocated_edge_emu_buffer, (s->width+64)*2*21*2*2, fail); //(width + edge + align)*interlaced*MBsize*tolerance
|
||||||
s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*21;
|
s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*21*2;
|
||||||
|
|
||||||
//FIXME should be linesize instead of s->width*2 but that is not known before get_buffer()
|
//FIXME should be linesize instead of s->width*2 but that is not known before get_buffer()
|
||||||
FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t), fail)
|
FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t), fail)
|
||||||
@@ -2321,12 +2321,16 @@ void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
|
|||||||
|
|
||||||
edge_h= FFMIN(h, s->v_edge_pos - y);
|
edge_h= FFMIN(h, s->v_edge_pos - y);
|
||||||
|
|
||||||
s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize , s->linesize,
|
|
||||||
s->h_edge_pos , edge_h , EDGE_WIDTH , EDGE_WIDTH , sides);
|
s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
|
||||||
s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize, s->uvlinesize,
|
s->linesize, s->h_edge_pos, edge_h,
|
||||||
s->h_edge_pos>>hshift, edge_h>>hshift, EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
|
EDGE_WIDTH, EDGE_WIDTH, sides);
|
||||||
s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize, s->uvlinesize,
|
s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
|
||||||
s->h_edge_pos>>hshift, edge_h>>hshift, EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
|
s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
|
||||||
|
EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
|
||||||
|
s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
|
||||||
|
s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
|
||||||
|
EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
|
||||||
}
|
}
|
||||||
|
|
||||||
h= FFMIN(h, s->avctx->height - y);
|
h= FFMIN(h, s->avctx->height - y);
|
||||||
|
@@ -124,7 +124,7 @@ typedef struct Picture{
|
|||||||
int pic_id; /**< h264 pic_num (short -> no wrap version of pic_num,
|
int pic_id; /**< h264 pic_num (short -> no wrap version of pic_num,
|
||||||
pic_num & max_pic_num; long -> long_pic_num) */
|
pic_num & max_pic_num; long -> long_pic_num) */
|
||||||
int long_ref; ///< 1->long term reference 0->short term reference
|
int long_ref; ///< 1->long term reference 0->short term reference
|
||||||
int ref_poc[2][2][16]; ///< h264 POCs of the frames used as reference (FIXME need per slice)
|
int ref_poc[2][2][32]; ///< h264 POCs of the frames/fields used as reference (FIXME need per slice)
|
||||||
int ref_count[2][2]; ///< number of entries in ref_poc (FIXME need per slice)
|
int ref_count[2][2]; ///< number of entries in ref_poc (FIXME need per slice)
|
||||||
int mbaff; ///< h264 1 -> MBAFF frame 0-> not MBAFF
|
int mbaff; ///< h264 1 -> MBAFF frame 0-> not MBAFF
|
||||||
int field_picture; ///< whether or not the picture was encoded in seperate fields
|
int field_picture; ///< whether or not the picture was encoded in seperate fields
|
||||||
|
@@ -413,9 +413,10 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
|
|||||||
if ((s->codec_id == CODEC_ID_MPEG4 || s->codec_id == CODEC_ID_H263 ||
|
if ((s->codec_id == CODEC_ID_MPEG4 || s->codec_id == CODEC_ID_H263 ||
|
||||||
s->codec_id == CODEC_ID_H263P) &&
|
s->codec_id == CODEC_ID_H263P) &&
|
||||||
(avctx->sample_aspect_ratio.num > 255 || avctx->sample_aspect_ratio.den > 255)) {
|
(avctx->sample_aspect_ratio.num > 255 || avctx->sample_aspect_ratio.den > 255)) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Invalid pixel aspect ratio %i/%i, limit is 255/255\n",
|
av_log(avctx, AV_LOG_WARNING, "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
|
||||||
avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
|
avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
|
||||||
return -1;
|
av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
|
||||||
|
avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
|
||||||
}
|
}
|
||||||
|
|
||||||
if((s->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME|CODEC_FLAG_ALT_SCAN))
|
if((s->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME|CODEC_FLAG_ALT_SCAN))
|
||||||
@@ -2008,7 +2009,7 @@ static int mb_var_thread(AVCodecContext *c, void *arg){
|
|||||||
int varc;
|
int varc;
|
||||||
int sum = s->dsp.pix_sum(pix, s->linesize);
|
int sum = s->dsp.pix_sum(pix, s->linesize);
|
||||||
|
|
||||||
varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)(sum*sum))>>8) + 500 + 128)>>8;
|
varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)sum*sum)>>8) + 500 + 128)>>8;
|
||||||
|
|
||||||
s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
|
s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
|
||||||
s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
|
s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
|
||||||
|
@@ -20,6 +20,7 @@
|
|||||||
*/
|
*/
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
|
#include <limits.h>
|
||||||
|
|
||||||
#include "libavutil/bswap.h"
|
#include "libavutil/bswap.h"
|
||||||
#include "libavutil/lzo.h"
|
#include "libavutil/lzo.h"
|
||||||
@@ -112,19 +113,23 @@ static int codec_reinit(AVCodecContext *avctx, int width, int height, int qualit
|
|||||||
if (quality >= 0)
|
if (quality >= 0)
|
||||||
get_quant_quality(c, quality);
|
get_quant_quality(c, quality);
|
||||||
if (width != c->width || height != c->height) {
|
if (width != c->width || height != c->height) {
|
||||||
if (av_image_check_size(height, width, 0, avctx) < 0)
|
// also reserve space for a possible additional header
|
||||||
return 0;
|
int buf_size = 24 + height * width * 3 / 2 + AV_LZO_OUTPUT_PADDING;
|
||||||
|
if (av_image_check_size(height, width, 0, avctx) < 0 ||
|
||||||
|
buf_size > INT_MAX/8)
|
||||||
|
return -1;
|
||||||
avctx->width = c->width = width;
|
avctx->width = c->width = width;
|
||||||
avctx->height = c->height = height;
|
avctx->height = c->height = height;
|
||||||
av_fast_malloc(&c->decomp_buf, &c->decomp_size, c->height * c->width * 3 / 2);
|
av_fast_malloc(&c->decomp_buf, &c->decomp_size, buf_size);
|
||||||
if (!c->decomp_buf) {
|
if (!c->decomp_buf) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n");
|
av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n");
|
||||||
return 0;
|
return AVERROR(ENOMEM);
|
||||||
}
|
}
|
||||||
rtjpeg_decode_init(&c->rtj, &c->dsp, c->width, c->height, c->lq, c->cq);
|
rtjpeg_decode_init(&c->rtj, &c->dsp, c->width, c->height, c->lq, c->cq);
|
||||||
|
return 1;
|
||||||
} else if (quality != c->quality)
|
} else if (quality != c->quality)
|
||||||
rtjpeg_decode_init(&c->rtj, &c->dsp, c->width, c->height, c->lq, c->cq);
|
rtjpeg_decode_init(&c->rtj, &c->dsp, c->width, c->height, c->lq, c->cq);
|
||||||
return 1;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
||||||
@@ -135,6 +140,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
|||||||
AVFrame *picture = data;
|
AVFrame *picture = data;
|
||||||
int orig_size = buf_size;
|
int orig_size = buf_size;
|
||||||
int keyframe;
|
int keyframe;
|
||||||
|
int size_change = 0;
|
||||||
int result;
|
int result;
|
||||||
enum {NUV_UNCOMPRESSED = '0', NUV_RTJPEG = '1',
|
enum {NUV_UNCOMPRESSED = '0', NUV_RTJPEG = '1',
|
||||||
NUV_RTJPEG_IN_LZO = '2', NUV_LZO = '3',
|
NUV_RTJPEG_IN_LZO = '2', NUV_LZO = '3',
|
||||||
@@ -172,18 +178,19 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
|||||||
default:
|
default:
|
||||||
keyframe = 1; break;
|
keyframe = 1; break;
|
||||||
}
|
}
|
||||||
|
retry:
|
||||||
// skip rest of the frameheader.
|
// skip rest of the frameheader.
|
||||||
buf = &buf[12];
|
buf = &buf[12];
|
||||||
buf_size -= 12;
|
buf_size -= 12;
|
||||||
if (comptype == NUV_RTJPEG_IN_LZO || comptype == NUV_LZO) {
|
if (comptype == NUV_RTJPEG_IN_LZO || comptype == NUV_LZO) {
|
||||||
int outlen = c->decomp_size, inlen = buf_size;
|
int outlen = c->decomp_size - AV_LZO_OUTPUT_PADDING, inlen = buf_size;
|
||||||
if (av_lzo1x_decode(c->decomp_buf, &outlen, buf, &inlen))
|
if (av_lzo1x_decode(c->decomp_buf, &outlen, buf, &inlen))
|
||||||
av_log(avctx, AV_LOG_ERROR, "error during lzo decompression\n");
|
av_log(avctx, AV_LOG_ERROR, "error during lzo decompression\n");
|
||||||
buf = c->decomp_buf;
|
buf = c->decomp_buf;
|
||||||
buf_size = c->decomp_size;
|
buf_size = c->decomp_size - AV_LZO_OUTPUT_PADDING;
|
||||||
}
|
}
|
||||||
if (c->codec_frameheader) {
|
if (c->codec_frameheader) {
|
||||||
int w, h, q;
|
int w, h, q, res;
|
||||||
if (buf_size < 12) {
|
if (buf_size < 12) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "invalid nuv video frame\n");
|
av_log(avctx, AV_LOG_ERROR, "invalid nuv video frame\n");
|
||||||
return -1;
|
return -1;
|
||||||
@@ -191,13 +198,20 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
|||||||
w = AV_RL16(&buf[6]);
|
w = AV_RL16(&buf[6]);
|
||||||
h = AV_RL16(&buf[8]);
|
h = AV_RL16(&buf[8]);
|
||||||
q = buf[10];
|
q = buf[10];
|
||||||
if (!codec_reinit(avctx, w, h, q))
|
res = codec_reinit(avctx, w, h, q);
|
||||||
return -1;
|
if (res < 0)
|
||||||
|
return res;
|
||||||
|
if (res) {
|
||||||
|
buf = avpkt->data;
|
||||||
|
buf_size = avpkt->size;
|
||||||
|
size_change = 1;
|
||||||
|
goto retry;
|
||||||
|
}
|
||||||
buf = &buf[12];
|
buf = &buf[12];
|
||||||
buf_size -= 12;
|
buf_size -= 12;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (keyframe && c->pic.data[0])
|
if ((size_change || keyframe) && c->pic.data[0])
|
||||||
avctx->release_buffer(avctx, &c->pic);
|
avctx->release_buffer(avctx, &c->pic);
|
||||||
c->pic.reference = 3;
|
c->pic.reference = 3;
|
||||||
c->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_READABLE |
|
c->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_READABLE |
|
||||||
@@ -259,7 +273,7 @@ static av_cold int decode_init(AVCodecContext *avctx) {
|
|||||||
if (avctx->extradata_size)
|
if (avctx->extradata_size)
|
||||||
get_quant(avctx, c, avctx->extradata, avctx->extradata_size);
|
get_quant(avctx, c, avctx->extradata, avctx->extradata_size);
|
||||||
dsputil_init(&c->dsp, avctx);
|
dsputil_init(&c->dsp, avctx);
|
||||||
if (!codec_reinit(avctx, avctx->width, avctx->height, -1))
|
if (codec_reinit(avctx, avctx->width, avctx->height, -1) < 0)
|
||||||
return 1;
|
return 1;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@@ -44,10 +44,13 @@ X(\name):
|
|||||||
L(\name):
|
L(\name):
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro movrel rd, sym
|
.macro movrel rd, sym, gp
|
||||||
ld \rd, \sym@got(r2)
|
ld \rd, \sym@got(r2)
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
.macro get_got rd
|
||||||
|
.endm
|
||||||
|
|
||||||
#else /* ARCH_PPC64 */
|
#else /* ARCH_PPC64 */
|
||||||
|
|
||||||
#define PTR .int
|
#define PTR .int
|
||||||
@@ -65,19 +68,25 @@ X(\name):
|
|||||||
\name:
|
\name:
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro movrel rd, sym
|
.macro movrel rd, sym, gp
|
||||||
#if CONFIG_PIC
|
#if CONFIG_PIC
|
||||||
bcl 20, 31, lab_pic_\@
|
lwz \rd, \sym@got(\gp)
|
||||||
lab_pic_\@:
|
|
||||||
mflr \rd
|
|
||||||
addis \rd, \rd, (\sym - lab_pic_\@)@ha
|
|
||||||
addi \rd, \rd, (\sym - lab_pic_\@)@l
|
|
||||||
#else
|
#else
|
||||||
lis \rd, \sym@ha
|
lis \rd, \sym@ha
|
||||||
la \rd, \sym@l(\rd)
|
la \rd, \sym@l(\rd)
|
||||||
#endif
|
#endif
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
.macro get_got rd
|
||||||
|
#if CONFIG_PIC
|
||||||
|
bcl 20, 31, .Lgot\@
|
||||||
|
.Lgot\@:
|
||||||
|
mflr \rd
|
||||||
|
addis \rd, \rd, _GLOBAL_OFFSET_TABLE_ - .Lgot\@@ha
|
||||||
|
addi \rd, \rd, _GLOBAL_OFFSET_TABLE_ - .Lgot\@@l
|
||||||
|
#endif
|
||||||
|
.endm
|
||||||
|
|
||||||
#endif /* ARCH_PPC64 */
|
#endif /* ARCH_PPC64 */
|
||||||
|
|
||||||
#if HAVE_IBM_ASM
|
#if HAVE_IBM_ASM
|
||||||
|
@@ -353,6 +353,7 @@ extfunc ff_fft_calc\interleave\()_altivec
|
|||||||
mflr r0
|
mflr r0
|
||||||
stp r0, 2*PS(r1)
|
stp r0, 2*PS(r1)
|
||||||
stpu r1, -(160+16*PS)(r1)
|
stpu r1, -(160+16*PS)(r1)
|
||||||
|
get_got r11
|
||||||
addi r6, r1, 16*PS
|
addi r6, r1, 16*PS
|
||||||
stvm r6, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
|
stvm r6, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
|
||||||
mfvrsave r0
|
mfvrsave r0
|
||||||
@@ -360,14 +361,14 @@ extfunc ff_fft_calc\interleave\()_altivec
|
|||||||
li r6, 0xfffffffc
|
li r6, 0xfffffffc
|
||||||
mtvrsave r6
|
mtvrsave r6
|
||||||
|
|
||||||
movrel r6, fft_data
|
movrel r6, fft_data, r11
|
||||||
lvm r6, v14, v15, v16, v17, v18, v19, v20, v21
|
lvm r6, v14, v15, v16, v17, v18, v19, v20, v21
|
||||||
lvm r6, v22, v23, v24, v25, v26, v27, v28, v29
|
lvm r6, v22, v23, v24, v25, v26, v27, v28, v29
|
||||||
|
|
||||||
li r9, 16
|
li r9, 16
|
||||||
movrel r12, X(ff_cos_tabs)
|
movrel r12, X(ff_cos_tabs), r11
|
||||||
|
|
||||||
movrel r6, fft_dispatch_tab\interleave\()_altivec
|
movrel r6, fft_dispatch_tab\interleave\()_altivec, r11
|
||||||
lwz r3, 0(r3)
|
lwz r3, 0(r3)
|
||||||
subi r3, r3, 2
|
subi r3, r3, 2
|
||||||
slwi r3, r3, 2+ARCH_PPC64
|
slwi r3, r3, 2+ARCH_PPC64
|
||||||
|
@@ -332,6 +332,9 @@ static int update_context_from_thread(AVCodecContext *dst, AVCodecContext *src,
|
|||||||
dst->height = src->height;
|
dst->height = src->height;
|
||||||
dst->pix_fmt = src->pix_fmt;
|
dst->pix_fmt = src->pix_fmt;
|
||||||
|
|
||||||
|
dst->coded_width = src->coded_width;
|
||||||
|
dst->coded_height = src->coded_height;
|
||||||
|
|
||||||
dst->has_b_frames = src->has_b_frames;
|
dst->has_b_frames = src->has_b_frames;
|
||||||
dst->idct_algo = src->idct_algo;
|
dst->idct_algo = src->idct_algo;
|
||||||
dst->slice_count = src->slice_count;
|
dst->slice_count = src->slice_count;
|
||||||
|
@@ -39,12 +39,15 @@ static av_cold int ptx_init(AVCodecContext *avctx) {
|
|||||||
static int ptx_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
static int ptx_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
||||||
AVPacket *avpkt) {
|
AVPacket *avpkt) {
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
|
const uint8_t *buf_end = avpkt->data + avpkt->size;
|
||||||
PTXContext * const s = avctx->priv_data;
|
PTXContext * const s = avctx->priv_data;
|
||||||
AVFrame *picture = data;
|
AVFrame *picture = data;
|
||||||
AVFrame * const p = &s->picture;
|
AVFrame * const p = &s->picture;
|
||||||
unsigned int offset, w, h, y, stride, bytes_per_pixel;
|
unsigned int offset, w, h, y, stride, bytes_per_pixel;
|
||||||
uint8_t *ptr;
|
uint8_t *ptr;
|
||||||
|
|
||||||
|
if (buf_end - buf < 14)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
offset = AV_RL16(buf);
|
offset = AV_RL16(buf);
|
||||||
w = AV_RL16(buf+8);
|
w = AV_RL16(buf+8);
|
||||||
h = AV_RL16(buf+10);
|
h = AV_RL16(buf+10);
|
||||||
@@ -57,6 +60,9 @@ static int ptx_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
|||||||
|
|
||||||
avctx->pix_fmt = PIX_FMT_RGB555;
|
avctx->pix_fmt = PIX_FMT_RGB555;
|
||||||
|
|
||||||
|
|
||||||
|
if (buf_end - buf < offset)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
if (offset != 0x2c)
|
if (offset != 0x2c)
|
||||||
av_log_ask_for_sample(avctx, "offset != 0x2c\n");
|
av_log_ask_for_sample(avctx, "offset != 0x2c\n");
|
||||||
|
|
||||||
@@ -80,6 +86,8 @@ static int ptx_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
|||||||
stride = p->linesize[0];
|
stride = p->linesize[0];
|
||||||
|
|
||||||
for (y=0; y<h; y++) {
|
for (y=0; y<h; y++) {
|
||||||
|
if (buf_end - buf < w * bytes_per_pixel)
|
||||||
|
break;
|
||||||
#if HAVE_BIGENDIAN
|
#if HAVE_BIGENDIAN
|
||||||
unsigned int x;
|
unsigned int x;
|
||||||
for (x=0; x<w*bytes_per_pixel; x+=bytes_per_pixel)
|
for (x=0; x<w*bytes_per_pixel; x+=bytes_per_pixel)
|
||||||
|
@@ -78,7 +78,8 @@ static inline int put_bits_count(PutBitContext *s)
|
|||||||
static inline void flush_put_bits(PutBitContext *s)
|
static inline void flush_put_bits(PutBitContext *s)
|
||||||
{
|
{
|
||||||
#ifndef BITSTREAM_WRITER_LE
|
#ifndef BITSTREAM_WRITER_LE
|
||||||
s->bit_buf<<= s->bit_left;
|
if (s->bit_left < 32)
|
||||||
|
s->bit_buf<<= s->bit_left;
|
||||||
#endif
|
#endif
|
||||||
while (s->bit_left < 32) {
|
while (s->bit_left < 32) {
|
||||||
/* XXX: should test end of buffer */
|
/* XXX: should test end of buffer */
|
||||||
|
@@ -738,11 +738,17 @@ static int qcelp_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
|||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
QCELPContext *q = avctx->priv_data;
|
QCELPContext *q = avctx->priv_data;
|
||||||
float *outbuffer = data;
|
float *outbuffer = data;
|
||||||
int i;
|
int i, out_size;
|
||||||
float quantized_lspf[10], lpc[10];
|
float quantized_lspf[10], lpc[10];
|
||||||
float gain[16];
|
float gain[16];
|
||||||
float *formant_mem;
|
float *formant_mem;
|
||||||
|
|
||||||
|
out_size = 160 * av_get_bytes_per_sample(avctx->sample_fmt);
|
||||||
|
if (*data_size < out_size) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
if((q->bitrate = determine_bitrate(avctx, buf_size, &buf)) == I_F_Q)
|
if((q->bitrate = determine_bitrate(avctx, buf_size, &buf)) == I_F_Q)
|
||||||
{
|
{
|
||||||
warn_insufficient_frame_quality(avctx, "bitrate cannot be determined.");
|
warn_insufficient_frame_quality(avctx, "bitrate cannot be determined.");
|
||||||
@@ -837,7 +843,7 @@ erasure:
|
|||||||
memcpy(q->prev_lspf, quantized_lspf, sizeof(q->prev_lspf));
|
memcpy(q->prev_lspf, quantized_lspf, sizeof(q->prev_lspf));
|
||||||
q->prev_bitrate = q->bitrate;
|
q->prev_bitrate = q->bitrate;
|
||||||
|
|
||||||
*data_size = 160 * sizeof(*outbuffer);
|
*data_size = out_size;
|
||||||
|
|
||||||
return buf_size;
|
return buf_size;
|
||||||
}
|
}
|
||||||
|
@@ -1354,6 +1354,8 @@ static void qdm2_fft_decode_tones (QDM2Context *q, int duration, GetBitContext *
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
local_int_14 = (offset >> local_int_8);
|
local_int_14 = (offset >> local_int_8);
|
||||||
|
if (local_int_14 >= FF_ARRAY_ELEMS(fft_level_index_table))
|
||||||
|
return;
|
||||||
|
|
||||||
if (q->nb_channels > 1) {
|
if (q->nb_channels > 1) {
|
||||||
channel = get_bits1(gb);
|
channel = get_bits1(gb);
|
||||||
@@ -1798,6 +1800,8 @@ static av_cold int qdm2_decode_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
avctx->channels = s->nb_channels = s->channels = AV_RB32(extradata);
|
avctx->channels = s->nb_channels = s->channels = AV_RB32(extradata);
|
||||||
extradata += 4;
|
extradata += 4;
|
||||||
|
if (s->channels > MPA_MAX_CHANNELS)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
avctx->sample_rate = AV_RB32(extradata);
|
avctx->sample_rate = AV_RB32(extradata);
|
||||||
extradata += 4;
|
extradata += 4;
|
||||||
@@ -1819,6 +1823,8 @@ static av_cold int qdm2_decode_init(AVCodecContext *avctx)
|
|||||||
// something like max decodable tones
|
// something like max decodable tones
|
||||||
s->group_order = av_log2(s->group_size) + 1;
|
s->group_order = av_log2(s->group_size) + 1;
|
||||||
s->frame_size = s->group_size / 16; // 16 iterations per super block
|
s->frame_size = s->group_size / 16; // 16 iterations per super block
|
||||||
|
if (s->frame_size > FF_ARRAY_ELEMS(s->output_buffer) / 2)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
s->sub_sampling = s->fft_order - 7;
|
s->sub_sampling = s->fft_order - 7;
|
||||||
s->frequency_range = 255 / (1 << (2 - s->sub_sampling));
|
s->frequency_range = 255 / (1 << (2 - s->sub_sampling));
|
||||||
@@ -1953,13 +1959,20 @@ static int qdm2_decode_frame(AVCodecContext *avctx,
|
|||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
QDM2Context *s = avctx->priv_data;
|
QDM2Context *s = avctx->priv_data;
|
||||||
int16_t *out = data;
|
int16_t *out = data;
|
||||||
int i;
|
int i, out_size;
|
||||||
|
|
||||||
if(!buf)
|
if(!buf)
|
||||||
return 0;
|
return 0;
|
||||||
if(buf_size < s->checksum_size)
|
if(buf_size < s->checksum_size)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
|
out_size = 16 * s->channels * s->frame_size *
|
||||||
|
av_get_bytes_per_sample(avctx->sample_fmt);
|
||||||
|
if (*data_size < out_size) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
av_log(avctx, AV_LOG_DEBUG, "decode(%d): %p[%d] -> %p[%d]\n",
|
av_log(avctx, AV_LOG_DEBUG, "decode(%d): %p[%d] -> %p[%d]\n",
|
||||||
buf_size, buf, s->checksum_size, data, *data_size);
|
buf_size, buf, s->checksum_size, data, *data_size);
|
||||||
|
|
||||||
@@ -1969,7 +1982,7 @@ static int qdm2_decode_frame(AVCodecContext *avctx,
|
|||||||
out += s->channels * s->frame_size;
|
out += s->channels * s->frame_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
*data_size = (uint8_t*)out - (uint8_t*)data;
|
*data_size = out_size;
|
||||||
|
|
||||||
return s->checksum_size;
|
return s->checksum_size;
|
||||||
}
|
}
|
||||||
|
@@ -37,6 +37,7 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
AVPacket *avpkt)
|
AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
|
const uint8_t *buf_end = avpkt->data + avpkt->size;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
QdrawContext * const a = avctx->priv_data;
|
QdrawContext * const a = avctx->priv_data;
|
||||||
AVFrame * const p= (AVFrame*)&a->pic;
|
AVFrame * const p= (AVFrame*)&a->pic;
|
||||||
@@ -59,6 +60,8 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
|
|
||||||
outdata = a->pic.data[0];
|
outdata = a->pic.data[0];
|
||||||
|
|
||||||
|
if (buf_end - buf < 0x68 + 4)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
buf += 0x68; /* jump to palette */
|
buf += 0x68; /* jump to palette */
|
||||||
colors = AV_RB32(buf);
|
colors = AV_RB32(buf);
|
||||||
buf += 4;
|
buf += 4;
|
||||||
@@ -67,6 +70,8 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
av_log(avctx, AV_LOG_ERROR, "Error color count - %i(0x%X)\n", colors, colors);
|
av_log(avctx, AV_LOG_ERROR, "Error color count - %i(0x%X)\n", colors, colors);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
if (buf_end - buf < (colors + 1) * 8)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
pal = (uint32_t*)p->data[1];
|
pal = (uint32_t*)p->data[1];
|
||||||
for (i = 0; i <= colors; i++) {
|
for (i = 0; i <= colors; i++) {
|
||||||
@@ -89,6 +94,8 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
}
|
}
|
||||||
p->palette_has_changed = 1;
|
p->palette_has_changed = 1;
|
||||||
|
|
||||||
|
if (buf_end - buf < 18)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
buf += 18; /* skip unneeded data */
|
buf += 18; /* skip unneeded data */
|
||||||
for (i = 0; i < avctx->height; i++) {
|
for (i = 0; i < avctx->height; i++) {
|
||||||
int size, left, code, pix;
|
int size, left, code, pix;
|
||||||
@@ -100,6 +107,9 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
out = outdata;
|
out = outdata;
|
||||||
size = AV_RB16(buf); /* size of packed line */
|
size = AV_RB16(buf); /* size of packed line */
|
||||||
buf += 2;
|
buf += 2;
|
||||||
|
if (buf_end - buf < size)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
left = size;
|
left = size;
|
||||||
next = buf + size;
|
next = buf + size;
|
||||||
while (left > 0) {
|
while (left > 0) {
|
||||||
@@ -115,6 +125,8 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
} else { /* copy */
|
} else { /* copy */
|
||||||
if ((out + code) > (outdata + a->pic.linesize[0]))
|
if ((out + code) > (outdata + a->pic.linesize[0]))
|
||||||
break;
|
break;
|
||||||
|
if (buf_end - buf < code + 1)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
memcpy(out, buf, code + 1);
|
memcpy(out, buf, code + 1);
|
||||||
out += code + 1;
|
out += code + 1;
|
||||||
buf += code + 1;
|
buf += code + 1;
|
||||||
|
@@ -126,6 +126,7 @@ static inline void qtrle_decode_2n4bpp(QtrleContext *s, int stream_ptr,
|
|||||||
while (lines_to_change--) {
|
while (lines_to_change--) {
|
||||||
CHECK_STREAM_PTR(2);
|
CHECK_STREAM_PTR(2);
|
||||||
pixel_ptr = row_ptr + (num_pixels * (s->buf[stream_ptr++] - 1));
|
pixel_ptr = row_ptr + (num_pixels * (s->buf[stream_ptr++] - 1));
|
||||||
|
CHECK_PIXEL_PTR(0); /* make sure pixel_ptr is positive */
|
||||||
|
|
||||||
while ((rle_code = (signed char)s->buf[stream_ptr++]) != -1) {
|
while ((rle_code = (signed char)s->buf[stream_ptr++]) != -1) {
|
||||||
if (rle_code == 0) {
|
if (rle_code == 0) {
|
||||||
@@ -182,6 +183,7 @@ static void qtrle_decode_8bpp(QtrleContext *s, int stream_ptr, int row_ptr, int
|
|||||||
while (lines_to_change--) {
|
while (lines_to_change--) {
|
||||||
CHECK_STREAM_PTR(2);
|
CHECK_STREAM_PTR(2);
|
||||||
pixel_ptr = row_ptr + (4 * (s->buf[stream_ptr++] - 1));
|
pixel_ptr = row_ptr + (4 * (s->buf[stream_ptr++] - 1));
|
||||||
|
CHECK_PIXEL_PTR(0); /* make sure pixel_ptr is positive */
|
||||||
|
|
||||||
while ((rle_code = (signed char)s->buf[stream_ptr++]) != -1) {
|
while ((rle_code = (signed char)s->buf[stream_ptr++]) != -1) {
|
||||||
if (rle_code == 0) {
|
if (rle_code == 0) {
|
||||||
@@ -235,6 +237,7 @@ static void qtrle_decode_16bpp(QtrleContext *s, int stream_ptr, int row_ptr, int
|
|||||||
while (lines_to_change--) {
|
while (lines_to_change--) {
|
||||||
CHECK_STREAM_PTR(2);
|
CHECK_STREAM_PTR(2);
|
||||||
pixel_ptr = row_ptr + (s->buf[stream_ptr++] - 1) * 2;
|
pixel_ptr = row_ptr + (s->buf[stream_ptr++] - 1) * 2;
|
||||||
|
CHECK_PIXEL_PTR(0); /* make sure pixel_ptr is positive */
|
||||||
|
|
||||||
while ((rle_code = (signed char)s->buf[stream_ptr++]) != -1) {
|
while ((rle_code = (signed char)s->buf[stream_ptr++]) != -1) {
|
||||||
if (rle_code == 0) {
|
if (rle_code == 0) {
|
||||||
@@ -284,6 +287,7 @@ static void qtrle_decode_24bpp(QtrleContext *s, int stream_ptr, int row_ptr, int
|
|||||||
while (lines_to_change--) {
|
while (lines_to_change--) {
|
||||||
CHECK_STREAM_PTR(2);
|
CHECK_STREAM_PTR(2);
|
||||||
pixel_ptr = row_ptr + (s->buf[stream_ptr++] - 1) * 3;
|
pixel_ptr = row_ptr + (s->buf[stream_ptr++] - 1) * 3;
|
||||||
|
CHECK_PIXEL_PTR(0); /* make sure pixel_ptr is positive */
|
||||||
|
|
||||||
while ((rle_code = (signed char)s->buf[stream_ptr++]) != -1) {
|
while ((rle_code = (signed char)s->buf[stream_ptr++]) != -1) {
|
||||||
if (rle_code == 0) {
|
if (rle_code == 0) {
|
||||||
@@ -335,6 +339,7 @@ static void qtrle_decode_32bpp(QtrleContext *s, int stream_ptr, int row_ptr, int
|
|||||||
while (lines_to_change--) {
|
while (lines_to_change--) {
|
||||||
CHECK_STREAM_PTR(2);
|
CHECK_STREAM_PTR(2);
|
||||||
pixel_ptr = row_ptr + (s->buf[stream_ptr++] - 1) * 4;
|
pixel_ptr = row_ptr + (s->buf[stream_ptr++] - 1) * 4;
|
||||||
|
CHECK_PIXEL_PTR(0); /* make sure pixel_ptr is positive */
|
||||||
|
|
||||||
while ((rle_code = (signed char)s->buf[stream_ptr++]) != -1) {
|
while ((rle_code = (signed char)s->buf[stream_ptr++]) != -1) {
|
||||||
if (rle_code == 0) {
|
if (rle_code == 0) {
|
||||||
@@ -463,6 +468,8 @@ static int qtrle_decode_frame(AVCodecContext *avctx,
|
|||||||
stream_ptr += 4;
|
stream_ptr += 4;
|
||||||
height = AV_RB16(&s->buf[stream_ptr]);
|
height = AV_RB16(&s->buf[stream_ptr]);
|
||||||
stream_ptr += 4;
|
stream_ptr += 4;
|
||||||
|
if (height > s->avctx->height - start_line)
|
||||||
|
goto done;
|
||||||
} else {
|
} else {
|
||||||
start_line = 0;
|
start_line = 0;
|
||||||
height = s->avctx->height;
|
height = s->avctx->height;
|
||||||
|
@@ -337,9 +337,9 @@ int audio_resample(ReSampleContext *s, short *output, short *input, int nb_sampl
|
|||||||
if (s->sample_fmt[1] != AV_SAMPLE_FMT_S16) {
|
if (s->sample_fmt[1] != AV_SAMPLE_FMT_S16) {
|
||||||
output_bak = output;
|
output_bak = output;
|
||||||
|
|
||||||
if (!s->buffer_size[1] || s->buffer_size[1] < lenout) {
|
if (!s->buffer_size[1] || s->buffer_size[1] < 2*lenout) {
|
||||||
av_free(s->buffer[1]);
|
av_free(s->buffer[1]);
|
||||||
s->buffer_size[1] = lenout;
|
s->buffer_size[1] = 2*lenout;
|
||||||
s->buffer[1] = av_malloc(s->buffer_size[1]);
|
s->buffer[1] = av_malloc(s->buffer_size[1]);
|
||||||
if (!s->buffer[1]) {
|
if (!s->buffer[1]) {
|
||||||
av_log(s->resample_context, AV_LOG_ERROR, "Could not allocate buffer\n");
|
av_log(s->resample_context, AV_LOG_ERROR, "Could not allocate buffer\n");
|
||||||
|
@@ -207,8 +207,10 @@ AVResampleContext *av_resample_init(int out_rate, int in_rate, int filter_size,
|
|||||||
memcpy(&c->filter_bank[c->filter_length*phase_count+1], c->filter_bank, (c->filter_length-1)*sizeof(FELEM));
|
memcpy(&c->filter_bank[c->filter_length*phase_count+1], c->filter_bank, (c->filter_length-1)*sizeof(FELEM));
|
||||||
c->filter_bank[c->filter_length*phase_count]= c->filter_bank[c->filter_length - 1];
|
c->filter_bank[c->filter_length*phase_count]= c->filter_bank[c->filter_length - 1];
|
||||||
|
|
||||||
c->src_incr= out_rate;
|
if(!av_reduce(&c->src_incr, &c->dst_incr, out_rate, in_rate * (int64_t)phase_count, INT32_MAX/2))
|
||||||
c->ideal_dst_incr= c->dst_incr= in_rate * phase_count;
|
goto error;
|
||||||
|
c->ideal_dst_incr= c->dst_incr;
|
||||||
|
|
||||||
c->index= -phase_count*((c->filter_length-1)/2);
|
c->index= -phase_count*((c->filter_length-1)/2);
|
||||||
|
|
||||||
return c;
|
return c;
|
||||||
@@ -246,10 +248,9 @@ int av_resample(AVResampleContext *c, short *dst, short *src, int *consumed, int
|
|||||||
dst[dst_index] = src[index2>>32];
|
dst[dst_index] = src[index2>>32];
|
||||||
index2 += incr;
|
index2 += incr;
|
||||||
}
|
}
|
||||||
frac += dst_index * dst_incr_frac;
|
|
||||||
index += dst_index * dst_incr;
|
index += dst_index * dst_incr;
|
||||||
index += frac / c->src_incr;
|
index += (frac + dst_index * (int64_t)dst_incr_frac) / c->src_incr;
|
||||||
frac %= c->src_incr;
|
frac = (frac + dst_index * (int64_t)dst_incr_frac) % c->src_incr;
|
||||||
}else{
|
}else{
|
||||||
for(dst_index=0; dst_index < dst_size; dst_index++){
|
for(dst_index=0; dst_index < dst_size; dst_index++){
|
||||||
FELEM *filter= c->filter_bank + c->filter_length*(index & c->phase_mask);
|
FELEM *filter= c->filter_bank + c->filter_length*(index & c->phase_mask);
|
||||||
|
@@ -113,6 +113,7 @@ static int allocate_buffers(ShortenContext *s)
|
|||||||
{
|
{
|
||||||
int i, chan;
|
int i, chan;
|
||||||
int *coeffs;
|
int *coeffs;
|
||||||
|
void *tmp_ptr;
|
||||||
|
|
||||||
for (chan=0; chan<s->channels; chan++) {
|
for (chan=0; chan<s->channels; chan++) {
|
||||||
if(FFMAX(1, s->nmean) >= UINT_MAX/sizeof(int32_t)){
|
if(FFMAX(1, s->nmean) >= UINT_MAX/sizeof(int32_t)){
|
||||||
@@ -124,9 +125,15 @@ static int allocate_buffers(ShortenContext *s)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
s->offset[chan] = av_realloc(s->offset[chan], sizeof(int32_t)*FFMAX(1, s->nmean));
|
tmp_ptr = av_realloc(s->offset[chan], sizeof(int32_t)*FFMAX(1, s->nmean));
|
||||||
|
if (!tmp_ptr)
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
s->offset[chan] = tmp_ptr;
|
||||||
|
|
||||||
s->decoded[chan] = av_realloc(s->decoded[chan], sizeof(int32_t)*(s->blocksize + s->nwrap));
|
tmp_ptr = av_realloc(s->decoded[chan], sizeof(int32_t)*(s->blocksize + s->nwrap));
|
||||||
|
if (!tmp_ptr)
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
s->decoded[chan] = tmp_ptr;
|
||||||
for (i=0; i<s->nwrap; i++)
|
for (i=0; i<s->nwrap; i++)
|
||||||
s->decoded[chan][i] = 0;
|
s->decoded[chan][i] = 0;
|
||||||
s->decoded[chan] += s->nwrap;
|
s->decoded[chan] += s->nwrap;
|
||||||
@@ -155,7 +162,7 @@ static void fix_bitshift(ShortenContext *s, int32_t *buffer)
|
|||||||
|
|
||||||
if (s->bitshift != 0)
|
if (s->bitshift != 0)
|
||||||
for (i = 0; i < s->blocksize; i++)
|
for (i = 0; i < s->blocksize; i++)
|
||||||
buffer[s->nwrap + i] <<= s->bitshift;
|
buffer[i] <<= s->bitshift;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -284,8 +291,15 @@ static int shorten_decode_frame(AVCodecContext *avctx,
|
|||||||
int i, input_buf_size = 0;
|
int i, input_buf_size = 0;
|
||||||
int16_t *samples = data;
|
int16_t *samples = data;
|
||||||
if(s->max_framesize == 0){
|
if(s->max_framesize == 0){
|
||||||
|
void *tmp_ptr;
|
||||||
s->max_framesize= 1024; // should hopefully be enough for the first header
|
s->max_framesize= 1024; // should hopefully be enough for the first header
|
||||||
s->bitstream= av_fast_realloc(s->bitstream, &s->allocated_bitstream_size, s->max_framesize);
|
tmp_ptr = av_fast_realloc(s->bitstream, &s->allocated_bitstream_size,
|
||||||
|
s->max_framesize);
|
||||||
|
if (!tmp_ptr) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "error allocating bitstream buffer\n");
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
}
|
||||||
|
s->bitstream = tmp_ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(1 && s->max_framesize){//FIXME truncated
|
if(1 && s->max_framesize){//FIXME truncated
|
||||||
@@ -467,6 +481,12 @@ static int shorten_decode_frame(AVCodecContext *avctx,
|
|||||||
|
|
||||||
s->cur_chan++;
|
s->cur_chan++;
|
||||||
if (s->cur_chan == s->channels) {
|
if (s->cur_chan == s->channels) {
|
||||||
|
int out_size = s->blocksize * s->channels *
|
||||||
|
av_get_bytes_per_sample(avctx->sample_fmt);
|
||||||
|
if (*data_size < out_size) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
samples = interleave_buffer(samples, s->channels, s->blocksize, s->decoded);
|
samples = interleave_buffer(samples, s->channels, s->blocksize, s->decoded);
|
||||||
s->cur_chan = 0;
|
s->cur_chan = 0;
|
||||||
goto frame_done;
|
goto frame_done;
|
||||||
@@ -482,9 +502,15 @@ static int shorten_decode_frame(AVCodecContext *avctx,
|
|||||||
case FN_BITSHIFT:
|
case FN_BITSHIFT:
|
||||||
s->bitshift = get_ur_golomb_shorten(&s->gb, BITSHIFTSIZE);
|
s->bitshift = get_ur_golomb_shorten(&s->gb, BITSHIFTSIZE);
|
||||||
break;
|
break;
|
||||||
case FN_BLOCKSIZE:
|
case FN_BLOCKSIZE: {
|
||||||
s->blocksize = get_uint(s, av_log2(s->blocksize));
|
int blocksize = get_uint(s, av_log2(s->blocksize));
|
||||||
|
if (blocksize > s->blocksize) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Increasing block size is not supported\n");
|
||||||
|
return AVERROR_PATCHWELCOME;
|
||||||
|
}
|
||||||
|
s->blocksize = blocksize;
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
case FN_QUIT:
|
case FN_QUIT:
|
||||||
*data_size = 0;
|
*data_size = 0;
|
||||||
return buf_size;
|
return buf_size;
|
||||||
|
@@ -194,14 +194,16 @@ static void decode_parameters(SiprParameters* parms, GetBitContext *pgb,
|
|||||||
{
|
{
|
||||||
int i, j;
|
int i, j;
|
||||||
|
|
||||||
parms->ma_pred_switch = get_bits(pgb, p->ma_predictor_bits);
|
if (p->ma_predictor_bits)
|
||||||
|
parms->ma_pred_switch = get_bits(pgb, p->ma_predictor_bits);
|
||||||
|
|
||||||
for (i = 0; i < 5; i++)
|
for (i = 0; i < 5; i++)
|
||||||
parms->vq_indexes[i] = get_bits(pgb, p->vq_indexes_bits[i]);
|
parms->vq_indexes[i] = get_bits(pgb, p->vq_indexes_bits[i]);
|
||||||
|
|
||||||
for (i = 0; i < p->subframe_count; i++) {
|
for (i = 0; i < p->subframe_count; i++) {
|
||||||
parms->pitch_delay[i] = get_bits(pgb, p->pitch_delay_bits[i]);
|
parms->pitch_delay[i] = get_bits(pgb, p->pitch_delay_bits[i]);
|
||||||
parms->gp_index[i] = get_bits(pgb, p->gp_index_bits);
|
if (p->gp_index_bits)
|
||||||
|
parms->gp_index[i] = get_bits(pgb, p->gp_index_bits);
|
||||||
|
|
||||||
for (j = 0; j < p->number_of_fc_indexes; j++)
|
for (j = 0; j < p->number_of_fc_indexes; j++)
|
||||||
parms->fc_indexes[i][j] = get_bits(pgb, p->fc_index_bits[j]);
|
parms->fc_indexes[i][j] = get_bits(pgb, p->fc_index_bits[j]);
|
||||||
@@ -509,7 +511,7 @@ static int sipr_decode_frame(AVCodecContext *avctx, void *datap,
|
|||||||
GetBitContext gb;
|
GetBitContext gb;
|
||||||
float *data = datap;
|
float *data = datap;
|
||||||
int subframe_size = ctx->mode == MODE_16k ? L_SUBFR_16k : SUBFR_SIZE;
|
int subframe_size = ctx->mode == MODE_16k ? L_SUBFR_16k : SUBFR_SIZE;
|
||||||
int i;
|
int i, out_size;
|
||||||
|
|
||||||
ctx->avctx = avctx;
|
ctx->avctx = avctx;
|
||||||
if (avpkt->size < (mode_par->bits_per_frame >> 3)) {
|
if (avpkt->size < (mode_par->bits_per_frame >> 3)) {
|
||||||
@@ -520,7 +522,11 @@ static int sipr_decode_frame(AVCodecContext *avctx, void *datap,
|
|||||||
*data_size = 0;
|
*data_size = 0;
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
if (*data_size < subframe_size * mode_par->subframe_count * sizeof(float)) {
|
|
||||||
|
out_size = mode_par->frames_per_packet * subframe_size *
|
||||||
|
mode_par->subframe_count *
|
||||||
|
av_get_bytes_per_sample(avctx->sample_fmt);
|
||||||
|
if (*data_size < out_size) {
|
||||||
av_log(avctx, AV_LOG_ERROR,
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
"Error processing packet: output buffer (%d) too small\n",
|
"Error processing packet: output buffer (%d) too small\n",
|
||||||
*data_size);
|
*data_size);
|
||||||
@@ -542,8 +548,7 @@ static int sipr_decode_frame(AVCodecContext *avctx, void *datap,
|
|||||||
data += subframe_size * mode_par->subframe_count;
|
data += subframe_size * mode_par->subframe_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
*data_size = mode_par->frames_per_packet * subframe_size *
|
*data_size = out_size;
|
||||||
mode_par->subframe_count * sizeof(float);
|
|
||||||
|
|
||||||
return mode_par->bits_per_frame >> 3;
|
return mode_par->bits_per_frame >> 3;
|
||||||
}
|
}
|
||||||
|
@@ -560,6 +560,10 @@ static av_cold int decode_end(AVCodecContext *avctx)
|
|||||||
|
|
||||||
static av_cold int smka_decode_init(AVCodecContext *avctx)
|
static av_cold int smka_decode_init(AVCodecContext *avctx)
|
||||||
{
|
{
|
||||||
|
if (avctx->channels < 1 || avctx->channels > 2) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "invalid number of channels\n");
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
|
avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
|
||||||
avctx->sample_fmt = avctx->bits_per_coded_sample == 8 ? AV_SAMPLE_FMT_U8 : AV_SAMPLE_FMT_S16;
|
avctx->sample_fmt = avctx->bits_per_coded_sample == 8 ? AV_SAMPLE_FMT_U8 : AV_SAMPLE_FMT_S16;
|
||||||
return 0;
|
return 0;
|
||||||
@@ -583,6 +587,11 @@ static int smka_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
|||||||
int bits, stereo;
|
int bits, stereo;
|
||||||
int pred[2] = {0, 0};
|
int pred[2] = {0, 0};
|
||||||
|
|
||||||
|
if (buf_size <= 4) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "packet is too small\n");
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
unp_size = AV_RL32(buf);
|
unp_size = AV_RL32(buf);
|
||||||
|
|
||||||
init_get_bits(&gb, buf + 4, (buf_size - 4) * 8);
|
init_get_bits(&gb, buf + 4, (buf_size - 4) * 8);
|
||||||
@@ -598,6 +607,14 @@ static int smka_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
|||||||
av_log(avctx, AV_LOG_ERROR, "Frame is too large to fit in buffer\n");
|
av_log(avctx, AV_LOG_ERROR, "Frame is too large to fit in buffer\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
if (stereo ^ (avctx->channels != 1)) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "channels mismatch\n");
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
|
if (bits && avctx->sample_fmt == AV_SAMPLE_FMT_U8) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "sample format mismatch\n");
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
memset(vlc, 0, sizeof(VLC) * 4);
|
memset(vlc, 0, sizeof(VLC) * 4);
|
||||||
memset(h, 0, sizeof(HuffContext) * 4);
|
memset(h, 0, sizeof(HuffContext) * 4);
|
||||||
|
@@ -1606,8 +1606,6 @@ static void dwt_quantize(SnowContext *s, Plane *p, DWTELEM *buffer, int width, i
|
|||||||
static void halfpel_interpol(SnowContext *s, uint8_t *halfpel[4][4], AVFrame *frame){
|
static void halfpel_interpol(SnowContext *s, uint8_t *halfpel[4][4], AVFrame *frame){
|
||||||
int p,x,y;
|
int p,x,y;
|
||||||
|
|
||||||
assert(!(s->avctx->flags & CODEC_FLAG_EMU_EDGE));
|
|
||||||
|
|
||||||
for(p=0; p<3; p++){
|
for(p=0; p<3; p++){
|
||||||
int is_chroma= !!p;
|
int is_chroma= !!p;
|
||||||
int w= s->avctx->width >>is_chroma;
|
int w= s->avctx->width >>is_chroma;
|
||||||
@@ -1664,7 +1662,7 @@ static int frame_start(SnowContext *s){
|
|||||||
int w= s->avctx->width; //FIXME round up to x16 ?
|
int w= s->avctx->width; //FIXME round up to x16 ?
|
||||||
int h= s->avctx->height;
|
int h= s->avctx->height;
|
||||||
|
|
||||||
if(s->current_picture.data[0]){
|
if(s->current_picture.data[0] && !(s->avctx->flags&CODEC_FLAG_EMU_EDGE)){
|
||||||
s->dsp.draw_edges(s->current_picture.data[0],
|
s->dsp.draw_edges(s->current_picture.data[0],
|
||||||
s->current_picture.linesize[0], w , h ,
|
s->current_picture.linesize[0], w , h ,
|
||||||
EDGE_WIDTH , EDGE_WIDTH , EDGE_TOP | EDGE_BOTTOM);
|
EDGE_WIDTH , EDGE_WIDTH , EDGE_TOP | EDGE_BOTTOM);
|
||||||
|
@@ -46,6 +46,7 @@ static av_cold int sunrast_init(AVCodecContext *avctx) {
|
|||||||
static int sunrast_decode_frame(AVCodecContext *avctx, void *data,
|
static int sunrast_decode_frame(AVCodecContext *avctx, void *data,
|
||||||
int *data_size, AVPacket *avpkt) {
|
int *data_size, AVPacket *avpkt) {
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
|
const uint8_t *buf_end = avpkt->data + avpkt->size;
|
||||||
SUNRASTContext * const s = avctx->priv_data;
|
SUNRASTContext * const s = avctx->priv_data;
|
||||||
AVFrame *picture = data;
|
AVFrame *picture = data;
|
||||||
AVFrame * const p = &s->picture;
|
AVFrame * const p = &s->picture;
|
||||||
@@ -53,6 +54,9 @@ static int sunrast_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
uint8_t *ptr;
|
uint8_t *ptr;
|
||||||
const uint8_t *bufstart = buf;
|
const uint8_t *bufstart = buf;
|
||||||
|
|
||||||
|
if (avpkt->size < 32)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
if (AV_RB32(buf) != 0x59a66a95) {
|
if (AV_RB32(buf) != 0x59a66a95) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "this is not sunras encoded data\n");
|
av_log(avctx, AV_LOG_ERROR, "this is not sunras encoded data\n");
|
||||||
return -1;
|
return -1;
|
||||||
@@ -64,13 +68,14 @@ static int sunrast_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
type = AV_RB32(buf+20);
|
type = AV_RB32(buf+20);
|
||||||
maptype = AV_RB32(buf+24);
|
maptype = AV_RB32(buf+24);
|
||||||
maplength = AV_RB32(buf+28);
|
maplength = AV_RB32(buf+28);
|
||||||
|
buf += 32;
|
||||||
|
|
||||||
if (type == RT_FORMAT_TIFF || type == RT_FORMAT_IFF) {
|
if (type < RT_OLD || type > RT_FORMAT_IFF) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "unsupported (compression) type\n");
|
av_log(avctx, AV_LOG_ERROR, "invalid (compression) type\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
if (type > RT_FORMAT_IFF) {
|
if (av_image_check_size(w, h, 0, avctx)) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "invalid (compression) type\n");
|
av_log(avctx, AV_LOG_ERROR, "invalid image size\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
if (maptype & ~1) {
|
if (maptype & ~1) {
|
||||||
@@ -78,7 +83,10 @@ static int sunrast_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
buf += 32;
|
if (type == RT_FORMAT_TIFF || type == RT_FORMAT_IFF) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "unsupported (compression) type\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
switch (depth) {
|
switch (depth) {
|
||||||
case 1:
|
case 1:
|
||||||
@@ -98,8 +106,6 @@ static int sunrast_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
if (p->data[0])
|
if (p->data[0])
|
||||||
avctx->release_buffer(avctx, p);
|
avctx->release_buffer(avctx, p);
|
||||||
|
|
||||||
if (av_image_check_size(w, h, 0, avctx))
|
|
||||||
return -1;
|
|
||||||
if (w != avctx->width || h != avctx->height)
|
if (w != avctx->width || h != avctx->height)
|
||||||
avcodec_set_dimensions(avctx, w, h);
|
avcodec_set_dimensions(avctx, w, h);
|
||||||
if (avctx->get_buffer(avctx, p) < 0) {
|
if (avctx->get_buffer(avctx, p) < 0) {
|
||||||
@@ -109,6 +115,9 @@ static int sunrast_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
|
|
||||||
p->pict_type = AV_PICTURE_TYPE_I;
|
p->pict_type = AV_PICTURE_TYPE_I;
|
||||||
|
|
||||||
|
if (buf_end - buf < maplength)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
if (depth != 8 && maplength) {
|
if (depth != 8 && maplength) {
|
||||||
av_log(avctx, AV_LOG_WARNING, "useless colormap found or file is corrupted, trying to recover\n");
|
av_log(avctx, AV_LOG_WARNING, "useless colormap found or file is corrupted, trying to recover\n");
|
||||||
|
|
||||||
@@ -143,8 +152,11 @@ static int sunrast_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
uint8_t *end = ptr + h*stride;
|
uint8_t *end = ptr + h*stride;
|
||||||
|
|
||||||
x = 0;
|
x = 0;
|
||||||
while (ptr != end) {
|
while (ptr != end && buf < buf_end) {
|
||||||
run = 1;
|
run = 1;
|
||||||
|
if (buf_end - buf < 1)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
if ((value = *buf++) == 0x80) {
|
if ((value = *buf++) == 0x80) {
|
||||||
run = *buf++ + 1;
|
run = *buf++ + 1;
|
||||||
if (run != 1)
|
if (run != 1)
|
||||||
@@ -163,6 +175,8 @@ static int sunrast_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for (y=0; y<h; y++) {
|
for (y=0; y<h; y++) {
|
||||||
|
if (buf_end - buf < len)
|
||||||
|
break;
|
||||||
memcpy(ptr, buf, len);
|
memcpy(ptr, buf, len);
|
||||||
ptr += stride;
|
ptr += stride;
|
||||||
buf += alen;
|
buf += alen;
|
||||||
|
@@ -614,7 +614,7 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type)
|
|||||||
dir = i_mb_type_info[mb_type - 8].pred_mode;
|
dir = i_mb_type_info[mb_type - 8].pred_mode;
|
||||||
dir = (dir >> 1) ^ 3*(dir & 1) ^ 1;
|
dir = (dir >> 1) ^ 3*(dir & 1) ^ 1;
|
||||||
|
|
||||||
if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir)) == -1){
|
if ((h->intra16x16_pred_mode = ff_h264_check_intra16x16_pred_mode(h, dir)) == -1){
|
||||||
av_log(h->s.avctx, AV_LOG_ERROR, "check_intra_pred_mode = -1\n");
|
av_log(h->s.avctx, AV_LOG_ERROR, "check_intra_pred_mode = -1\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@@ -713,7 +713,7 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type)
|
|||||||
s->current_picture.f.mb_type[mb_xy] = mb_type;
|
s->current_picture.f.mb_type[mb_xy] = mb_type;
|
||||||
|
|
||||||
if (IS_INTRA(mb_type)) {
|
if (IS_INTRA(mb_type)) {
|
||||||
h->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, DC_PRED8x8);
|
h->chroma_pred_mode = ff_h264_check_intra_chroma_pred_mode(h, DC_PRED8x8);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@@ -35,15 +35,19 @@ typedef struct SeqVideoContext {
|
|||||||
} SeqVideoContext;
|
} SeqVideoContext;
|
||||||
|
|
||||||
|
|
||||||
static const unsigned char *seq_unpack_rle_block(const unsigned char *src, unsigned char *dst, int dst_size)
|
static const unsigned char *seq_unpack_rle_block(const unsigned char *src,
|
||||||
|
const unsigned char *src_end,
|
||||||
|
unsigned char *dst, int dst_size)
|
||||||
{
|
{
|
||||||
int i, len, sz;
|
int i, len, sz;
|
||||||
GetBitContext gb;
|
GetBitContext gb;
|
||||||
int code_table[64];
|
int code_table[64];
|
||||||
|
|
||||||
/* get the rle codes (at most 64 bytes) */
|
/* get the rle codes */
|
||||||
init_get_bits(&gb, src, 64 * 8);
|
init_get_bits(&gb, src, (src_end - src) * 8);
|
||||||
for (i = 0, sz = 0; i < 64 && sz < dst_size; i++) {
|
for (i = 0, sz = 0; i < 64 && sz < dst_size; i++) {
|
||||||
|
if (get_bits_left(&gb) < 4)
|
||||||
|
return NULL;
|
||||||
code_table[i] = get_sbits(&gb, 4);
|
code_table[i] = get_sbits(&gb, 4);
|
||||||
sz += FFABS(code_table[i]);
|
sz += FFABS(code_table[i]);
|
||||||
}
|
}
|
||||||
@@ -54,8 +58,12 @@ static const unsigned char *seq_unpack_rle_block(const unsigned char *src, unsig
|
|||||||
len = code_table[i];
|
len = code_table[i];
|
||||||
if (len < 0) {
|
if (len < 0) {
|
||||||
len = -len;
|
len = -len;
|
||||||
|
if (src_end - src < 1)
|
||||||
|
return NULL;
|
||||||
memset(dst, *src++, FFMIN(len, dst_size));
|
memset(dst, *src++, FFMIN(len, dst_size));
|
||||||
} else {
|
} else {
|
||||||
|
if (src_end - src < len)
|
||||||
|
return NULL;
|
||||||
memcpy(dst, src, FFMIN(len, dst_size));
|
memcpy(dst, src, FFMIN(len, dst_size));
|
||||||
src += len;
|
src += len;
|
||||||
}
|
}
|
||||||
@@ -65,25 +73,30 @@ static const unsigned char *seq_unpack_rle_block(const unsigned char *src, unsig
|
|||||||
return src;
|
return src;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const unsigned char *seq_decode_op1(SeqVideoContext *seq, const unsigned char *src, unsigned char *dst)
|
static const unsigned char *seq_decode_op1(SeqVideoContext *seq,
|
||||||
|
const unsigned char *src,
|
||||||
|
const unsigned char *src_end,
|
||||||
|
unsigned char *dst)
|
||||||
{
|
{
|
||||||
const unsigned char *color_table;
|
const unsigned char *color_table;
|
||||||
int b, i, len, bits;
|
int b, i, len, bits;
|
||||||
GetBitContext gb;
|
GetBitContext gb;
|
||||||
unsigned char block[8 * 8];
|
unsigned char block[8 * 8];
|
||||||
|
|
||||||
|
if (src_end - src < 1)
|
||||||
|
return NULL;
|
||||||
len = *src++;
|
len = *src++;
|
||||||
if (len & 0x80) {
|
if (len & 0x80) {
|
||||||
switch (len & 3) {
|
switch (len & 3) {
|
||||||
case 1:
|
case 1:
|
||||||
src = seq_unpack_rle_block(src, block, sizeof(block));
|
src = seq_unpack_rle_block(src, src_end, block, sizeof(block));
|
||||||
for (b = 0; b < 8; b++) {
|
for (b = 0; b < 8; b++) {
|
||||||
memcpy(dst, &block[b * 8], 8);
|
memcpy(dst, &block[b * 8], 8);
|
||||||
dst += seq->frame.linesize[0];
|
dst += seq->frame.linesize[0];
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
src = seq_unpack_rle_block(src, block, sizeof(block));
|
src = seq_unpack_rle_block(src, src_end, block, sizeof(block));
|
||||||
for (i = 0; i < 8; i++) {
|
for (i = 0; i < 8; i++) {
|
||||||
for (b = 0; b < 8; b++)
|
for (b = 0; b < 8; b++)
|
||||||
dst[b * seq->frame.linesize[0]] = block[i * 8 + b];
|
dst[b * seq->frame.linesize[0]] = block[i * 8 + b];
|
||||||
@@ -92,9 +105,13 @@ static const unsigned char *seq_decode_op1(SeqVideoContext *seq, const unsigned
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
if (len <= 0)
|
||||||
|
return NULL;
|
||||||
|
bits = ff_log2_tab[len - 1] + 1;
|
||||||
|
if (src_end - src < len + 8 * bits)
|
||||||
|
return NULL;
|
||||||
color_table = src;
|
color_table = src;
|
||||||
src += len;
|
src += len;
|
||||||
bits = ff_log2_tab[len - 1] + 1;
|
|
||||||
init_get_bits(&gb, src, bits * 8 * 8); src += bits * 8;
|
init_get_bits(&gb, src, bits * 8 * 8); src += bits * 8;
|
||||||
for (b = 0; b < 8; b++) {
|
for (b = 0; b < 8; b++) {
|
||||||
for (i = 0; i < 8; i++)
|
for (i = 0; i < 8; i++)
|
||||||
@@ -106,10 +123,16 @@ static const unsigned char *seq_decode_op1(SeqVideoContext *seq, const unsigned
|
|||||||
return src;
|
return src;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const unsigned char *seq_decode_op2(SeqVideoContext *seq, const unsigned char *src, unsigned char *dst)
|
static const unsigned char *seq_decode_op2(SeqVideoContext *seq,
|
||||||
|
const unsigned char *src,
|
||||||
|
const unsigned char *src_end,
|
||||||
|
unsigned char *dst)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
if (src_end - src < 8 * 8)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
for (i = 0; i < 8; i++) {
|
for (i = 0; i < 8; i++) {
|
||||||
memcpy(dst, src, 8);
|
memcpy(dst, src, 8);
|
||||||
src += 8;
|
src += 8;
|
||||||
@@ -119,11 +142,16 @@ static const unsigned char *seq_decode_op2(SeqVideoContext *seq, const unsigned
|
|||||||
return src;
|
return src;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const unsigned char *seq_decode_op3(SeqVideoContext *seq, const unsigned char *src, unsigned char *dst)
|
static const unsigned char *seq_decode_op3(SeqVideoContext *seq,
|
||||||
|
const unsigned char *src,
|
||||||
|
const unsigned char *src_end,
|
||||||
|
unsigned char *dst)
|
||||||
{
|
{
|
||||||
int pos, offset;
|
int pos, offset;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
|
if (src_end - src < 2)
|
||||||
|
return NULL;
|
||||||
pos = *src++;
|
pos = *src++;
|
||||||
offset = ((pos >> 3) & 7) * seq->frame.linesize[0] + (pos & 7);
|
offset = ((pos >> 3) & 7) * seq->frame.linesize[0] + (pos & 7);
|
||||||
dst[offset] = *src++;
|
dst[offset] = *src++;
|
||||||
@@ -132,8 +160,9 @@ static const unsigned char *seq_decode_op3(SeqVideoContext *seq, const unsigned
|
|||||||
return src;
|
return src;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void seqvideo_decode(SeqVideoContext *seq, const unsigned char *data, int data_size)
|
static int seqvideo_decode(SeqVideoContext *seq, const unsigned char *data, int data_size)
|
||||||
{
|
{
|
||||||
|
const unsigned char *data_end = data + data_size;
|
||||||
GetBitContext gb;
|
GetBitContext gb;
|
||||||
int flags, i, j, x, y, op;
|
int flags, i, j, x, y, op;
|
||||||
unsigned char c[3];
|
unsigned char c[3];
|
||||||
@@ -144,6 +173,8 @@ static void seqvideo_decode(SeqVideoContext *seq, const unsigned char *data, int
|
|||||||
|
|
||||||
if (flags & 1) {
|
if (flags & 1) {
|
||||||
palette = (uint32_t *)seq->frame.data[1];
|
palette = (uint32_t *)seq->frame.data[1];
|
||||||
|
if (data_end - data < 256 * 3)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
for (i = 0; i < 256; i++) {
|
for (i = 0; i < 256; i++) {
|
||||||
for (j = 0; j < 3; j++, data++)
|
for (j = 0; j < 3; j++, data++)
|
||||||
c[j] = (*data << 2) | (*data >> 4);
|
c[j] = (*data << 2) | (*data >> 4);
|
||||||
@@ -153,6 +184,8 @@ static void seqvideo_decode(SeqVideoContext *seq, const unsigned char *data, int
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (flags & 2) {
|
if (flags & 2) {
|
||||||
|
if (data_end - data < 128)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
init_get_bits(&gb, data, 128 * 8); data += 128;
|
init_get_bits(&gb, data, 128 * 8); data += 128;
|
||||||
for (y = 0; y < 128; y += 8)
|
for (y = 0; y < 128; y += 8)
|
||||||
for (x = 0; x < 256; x += 8) {
|
for (x = 0; x < 256; x += 8) {
|
||||||
@@ -160,17 +193,20 @@ static void seqvideo_decode(SeqVideoContext *seq, const unsigned char *data, int
|
|||||||
op = get_bits(&gb, 2);
|
op = get_bits(&gb, 2);
|
||||||
switch (op) {
|
switch (op) {
|
||||||
case 1:
|
case 1:
|
||||||
data = seq_decode_op1(seq, data, dst);
|
data = seq_decode_op1(seq, data, data_end, dst);
|
||||||
break;
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
data = seq_decode_op2(seq, data, dst);
|
data = seq_decode_op2(seq, data, data_end, dst);
|
||||||
break;
|
break;
|
||||||
case 3:
|
case 3:
|
||||||
data = seq_decode_op3(seq, data, dst);
|
data = seq_decode_op3(seq, data, data_end, dst);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
if (!data)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static av_cold int seqvideo_decode_init(AVCodecContext *avctx)
|
static av_cold int seqvideo_decode_init(AVCodecContext *avctx)
|
||||||
@@ -202,7 +238,8 @@ static int seqvideo_decode_frame(AVCodecContext *avctx,
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
seqvideo_decode(seq, buf, buf_size);
|
if (seqvideo_decode(seq, buf, buf_size))
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
*data_size = sizeof(AVFrame);
|
*data_size = sizeof(AVFrame);
|
||||||
*(AVFrame *)data = seq->frame;
|
*(AVFrame *)data = seq->frame;
|
||||||
|
@@ -172,6 +172,8 @@ static int tiff_unpack_strip(TiffContext *s, uint8_t* dst, int stride, const uin
|
|||||||
}
|
}
|
||||||
switch(s->compr){
|
switch(s->compr){
|
||||||
case TIFF_RAW:
|
case TIFF_RAW:
|
||||||
|
if (ssrc + size - src < width)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
if (!s->fill_order) {
|
if (!s->fill_order) {
|
||||||
memcpy(dst, src, width);
|
memcpy(dst, src, width);
|
||||||
} else {
|
} else {
|
||||||
@@ -279,6 +281,8 @@ static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *
|
|||||||
uint32_t *pal;
|
uint32_t *pal;
|
||||||
const uint8_t *rp, *gp, *bp;
|
const uint8_t *rp, *gp, *bp;
|
||||||
|
|
||||||
|
if (end_buf - buf < 12)
|
||||||
|
return -1;
|
||||||
tag = tget_short(&buf, s->le);
|
tag = tget_short(&buf, s->le);
|
||||||
type = tget_short(&buf, s->le);
|
type = tget_short(&buf, s->le);
|
||||||
count = tget_long(&buf, s->le);
|
count = tget_long(&buf, s->le);
|
||||||
@@ -338,7 +342,7 @@ static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *
|
|||||||
case TIFF_SHORT:
|
case TIFF_SHORT:
|
||||||
case TIFF_LONG:
|
case TIFF_LONG:
|
||||||
s->bpp = 0;
|
s->bpp = 0;
|
||||||
for(i = 0; i < count; i++) s->bpp += tget(&buf, type, s->le);
|
for(i = 0; i < count && buf < end_buf; i++) s->bpp += tget(&buf, type, s->le);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
s->bpp = -1;
|
s->bpp = -1;
|
||||||
@@ -452,6 +456,8 @@ static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *
|
|||||||
case TIFF_PAL:
|
case TIFF_PAL:
|
||||||
pal = (uint32_t *) s->palette;
|
pal = (uint32_t *) s->palette;
|
||||||
off = type_sizes[type];
|
off = type_sizes[type];
|
||||||
|
if (count / 3 > 256 || end_buf - buf < count / 3 * off * 3)
|
||||||
|
return -1;
|
||||||
rp = buf;
|
rp = buf;
|
||||||
gp = buf + count / 3 * off;
|
gp = buf + count / 3 * off;
|
||||||
bp = buf + count / 3 * off * 2;
|
bp = buf + count / 3 * off * 2;
|
||||||
@@ -495,12 +501,16 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
AVFrame *picture = data;
|
AVFrame *picture = data;
|
||||||
AVFrame * const p= (AVFrame*)&s->picture;
|
AVFrame * const p= (AVFrame*)&s->picture;
|
||||||
const uint8_t *orig_buf = buf, *end_buf = buf + buf_size;
|
const uint8_t *orig_buf = buf, *end_buf = buf + buf_size;
|
||||||
int id, le, off, ret;
|
unsigned off;
|
||||||
|
int id, le, ret;
|
||||||
int i, j, entries;
|
int i, j, entries;
|
||||||
int stride, soff, ssize;
|
int stride;
|
||||||
|
unsigned soff, ssize;
|
||||||
uint8_t *dst;
|
uint8_t *dst;
|
||||||
|
|
||||||
//parse image header
|
//parse image header
|
||||||
|
if (end_buf - buf < 8)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
id = AV_RL16(buf); buf += 2;
|
id = AV_RL16(buf); buf += 2;
|
||||||
if(id == 0x4949) le = 1;
|
if(id == 0x4949) le = 1;
|
||||||
else if(id == 0x4D4D) le = 0;
|
else if(id == 0x4D4D) le = 0;
|
||||||
@@ -520,9 +530,9 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
}
|
}
|
||||||
/* parse image file directory */
|
/* parse image file directory */
|
||||||
off = tget_long(&buf, le);
|
off = tget_long(&buf, le);
|
||||||
if(orig_buf + off + 14 >= end_buf){
|
if (off >= UINT_MAX - 14 || end_buf - orig_buf < off + 14) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
|
av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
|
||||||
return -1;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
buf = orig_buf + off;
|
buf = orig_buf + off;
|
||||||
entries = tget_short(&buf, le);
|
entries = tget_short(&buf, le);
|
||||||
@@ -546,23 +556,23 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
stride = p->linesize[0];
|
stride = p->linesize[0];
|
||||||
dst = p->data[0];
|
dst = p->data[0];
|
||||||
for(i = 0; i < s->height; i += s->rps){
|
for(i = 0; i < s->height; i += s->rps){
|
||||||
if(s->stripsizes)
|
if(s->stripsizes) {
|
||||||
|
if (s->stripsizes >= end_buf)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
ssize = tget(&s->stripsizes, s->sstype, s->le);
|
ssize = tget(&s->stripsizes, s->sstype, s->le);
|
||||||
else
|
} else
|
||||||
ssize = s->stripsize;
|
ssize = s->stripsize;
|
||||||
|
|
||||||
if (ssize > buf_size) {
|
|
||||||
av_log(avctx, AV_LOG_ERROR, "Buffer size is smaller than strip size\n");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if(s->stripdata){
|
if(s->stripdata){
|
||||||
|
if (s->stripdata >= end_buf)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
soff = tget(&s->stripdata, s->sot, s->le);
|
soff = tget(&s->stripdata, s->sot, s->le);
|
||||||
}else
|
}else
|
||||||
soff = s->stripoff;
|
soff = s->stripoff;
|
||||||
if (soff < 0) {
|
|
||||||
av_log(avctx, AV_LOG_ERROR, "Invalid stripoff: %d\n", soff);
|
if (soff > buf_size || ssize > buf_size - soff) {
|
||||||
return AVERROR(EINVAL);
|
av_log(avctx, AV_LOG_ERROR, "Invalid strip size/offset\n");
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
if(tiff_unpack_strip(s, dst, stride, orig_buf + soff, ssize, FFMIN(s->rps, s->height - i)) < 0)
|
if(tiff_unpack_strip(s, dst, stride, orig_buf + soff, ssize, FFMIN(s->rps, s->height - i)) < 0)
|
||||||
break;
|
break;
|
||||||
|
@@ -43,6 +43,7 @@ static const uint8_t type_sizes2[6] = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
typedef struct TiffEncoderContext {
|
typedef struct TiffEncoderContext {
|
||||||
|
AVClass *avclass;
|
||||||
AVCodecContext *avctx;
|
AVCodecContext *avctx;
|
||||||
AVFrame picture;
|
AVFrame picture;
|
||||||
|
|
||||||
@@ -217,6 +218,7 @@ static int encode_frame(AVCodecContext * avctx, unsigned char *buf,
|
|||||||
uint8_t *yuv_line = NULL;
|
uint8_t *yuv_line = NULL;
|
||||||
int shift_h, shift_v;
|
int shift_h, shift_v;
|
||||||
|
|
||||||
|
s->avctx = avctx;
|
||||||
s->buf_start = buf;
|
s->buf_start = buf;
|
||||||
s->buf = &ptr;
|
s->buf = &ptr;
|
||||||
s->buf_size = buf_size;
|
s->buf_size = buf_size;
|
||||||
|
@@ -56,6 +56,11 @@ static av_cold int truespeech_decode_init(AVCodecContext * avctx)
|
|||||||
{
|
{
|
||||||
// TSContext *c = avctx->priv_data;
|
// TSContext *c = avctx->priv_data;
|
||||||
|
|
||||||
|
if (avctx->channels != 1) {
|
||||||
|
av_log_ask_for_sample(avctx, "Unsupported channel count: %d\n", avctx->channels);
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
|
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@@ -822,7 +822,7 @@ static int twin_decode_frame(AVCodecContext * avctx, void *data,
|
|||||||
const ModeTab *mtab = tctx->mtab;
|
const ModeTab *mtab = tctx->mtab;
|
||||||
float *out = data;
|
float *out = data;
|
||||||
enum FrameType ftype;
|
enum FrameType ftype;
|
||||||
int window_type;
|
int window_type, out_size;
|
||||||
static const enum FrameType wtype_to_ftype_table[] = {
|
static const enum FrameType wtype_to_ftype_table[] = {
|
||||||
FT_LONG, FT_LONG, FT_SHORT, FT_LONG,
|
FT_LONG, FT_LONG, FT_SHORT, FT_LONG,
|
||||||
FT_MEDIUM, FT_LONG, FT_LONG, FT_MEDIUM, FT_MEDIUM
|
FT_MEDIUM, FT_LONG, FT_LONG, FT_MEDIUM, FT_MEDIUM
|
||||||
@@ -835,6 +835,13 @@ static int twin_decode_frame(AVCodecContext * avctx, void *data,
|
|||||||
return buf_size;
|
return buf_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
out_size = mtab->size * avctx->channels *
|
||||||
|
av_get_bytes_per_sample(avctx->sample_fmt);
|
||||||
|
if (*data_size < out_size) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n");
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
init_get_bits(&gb, buf, buf_size * 8);
|
init_get_bits(&gb, buf, buf_size * 8);
|
||||||
skip_bits(&gb, get_bits(&gb, 8));
|
skip_bits(&gb, get_bits(&gb, 8));
|
||||||
window_type = get_bits(&gb, WINDOW_TYPE_BITS);
|
window_type = get_bits(&gb, WINDOW_TYPE_BITS);
|
||||||
@@ -857,7 +864,7 @@ static int twin_decode_frame(AVCodecContext * avctx, void *data,
|
|||||||
return buf_size;
|
return buf_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
*data_size = mtab->size*avctx->channels*4;
|
*data_size = out_size;
|
||||||
|
|
||||||
return buf_size;
|
return buf_size;
|
||||||
}
|
}
|
||||||
|
@@ -23,6 +23,7 @@
|
|||||||
|
|
||||||
#include "libavutil/intreadwrite.h"
|
#include "libavutil/intreadwrite.h"
|
||||||
#include "libavutil/imgutils.h"
|
#include "libavutil/imgutils.h"
|
||||||
|
#include "bytestream.h"
|
||||||
#include "avcodec.h"
|
#include "avcodec.h"
|
||||||
#include "s3tc.h"
|
#include "s3tc.h"
|
||||||
|
|
||||||
@@ -42,6 +43,7 @@ static av_cold int txd_init(AVCodecContext *avctx) {
|
|||||||
static int txd_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
static int txd_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
||||||
AVPacket *avpkt) {
|
AVPacket *avpkt) {
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
|
const uint8_t *buf_end = avpkt->data + avpkt->size;
|
||||||
TXDContext * const s = avctx->priv_data;
|
TXDContext * const s = avctx->priv_data;
|
||||||
AVFrame *picture = data;
|
AVFrame *picture = data;
|
||||||
AVFrame * const p = &s->picture;
|
AVFrame * const p = &s->picture;
|
||||||
@@ -52,6 +54,8 @@ static int txd_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
|||||||
const uint32_t *palette = (const uint32_t *)(cur + 88);
|
const uint32_t *palette = (const uint32_t *)(cur + 88);
|
||||||
uint32_t *pal;
|
uint32_t *pal;
|
||||||
|
|
||||||
|
if (buf_end - cur < 92)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
version = AV_RL32(cur);
|
version = AV_RL32(cur);
|
||||||
d3d_format = AV_RL32(cur+76);
|
d3d_format = AV_RL32(cur+76);
|
||||||
w = AV_RL16(cur+80);
|
w = AV_RL16(cur+80);
|
||||||
@@ -69,6 +73,8 @@ static int txd_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
|||||||
|
|
||||||
if (depth == 8) {
|
if (depth == 8) {
|
||||||
avctx->pix_fmt = PIX_FMT_PAL8;
|
avctx->pix_fmt = PIX_FMT_PAL8;
|
||||||
|
if (buf_end - cur < 1024)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
cur += 1024;
|
cur += 1024;
|
||||||
} else if (depth == 16 || depth == 32)
|
} else if (depth == 16 || depth == 32)
|
||||||
avctx->pix_fmt = PIX_FMT_RGB32;
|
avctx->pix_fmt = PIX_FMT_RGB32;
|
||||||
@@ -100,6 +106,8 @@ static int txd_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
|||||||
v = AV_RB32(palette+y);
|
v = AV_RB32(palette+y);
|
||||||
pal[y] = (v>>8) + (v<<24);
|
pal[y] = (v>>8) + (v<<24);
|
||||||
}
|
}
|
||||||
|
if (buf_end - cur < w * h)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
for (y=0; y<h; y++) {
|
for (y=0; y<h; y++) {
|
||||||
memcpy(ptr, cur, w);
|
memcpy(ptr, cur, w);
|
||||||
ptr += stride;
|
ptr += stride;
|
||||||
@@ -110,9 +118,13 @@ static int txd_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
|||||||
case 0:
|
case 0:
|
||||||
if (!flags&1) goto unsupported;
|
if (!flags&1) goto unsupported;
|
||||||
case FF_S3TC_DXT1:
|
case FF_S3TC_DXT1:
|
||||||
|
if (buf_end - cur < (w/4) * (h/4) * 8)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
ff_decode_dxt1(cur, ptr, w, h, stride);
|
ff_decode_dxt1(cur, ptr, w, h, stride);
|
||||||
break;
|
break;
|
||||||
case FF_S3TC_DXT3:
|
case FF_S3TC_DXT3:
|
||||||
|
if (buf_end - cur < (w/4) * (h/4) * 16)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
ff_decode_dxt3(cur, ptr, w, h, stride);
|
ff_decode_dxt3(cur, ptr, w, h, stride);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
@@ -122,6 +134,8 @@ static int txd_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
|||||||
switch (d3d_format) {
|
switch (d3d_format) {
|
||||||
case 0x15:
|
case 0x15:
|
||||||
case 0x16:
|
case 0x16:
|
||||||
|
if (buf_end - cur < h * w * 4)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
for (y=0; y<h; y++) {
|
for (y=0; y<h; y++) {
|
||||||
memcpy(ptr, cur, w*4);
|
memcpy(ptr, cur, w*4);
|
||||||
ptr += stride;
|
ptr += stride;
|
||||||
@@ -133,8 +147,12 @@ static int txd_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (; mipmap_count > 1; mipmap_count--)
|
for (; mipmap_count > 1 && buf_end - cur >= 4; mipmap_count--) {
|
||||||
cur += AV_RL32(cur) + 4;
|
uint32_t length = bytestream_get_le32(&cur);
|
||||||
|
if (buf_end - cur < length)
|
||||||
|
break;
|
||||||
|
cur += length;
|
||||||
|
}
|
||||||
|
|
||||||
*picture = s->picture;
|
*picture = s->picture;
|
||||||
*data_size = sizeof(AVPicture);
|
*data_size = sizeof(AVPicture);
|
||||||
|
@@ -851,6 +851,11 @@ int attribute_align_arg avcodec_decode_audio3(AVCodecContext *avctx, int16_t *sa
|
|||||||
|
|
||||||
avctx->pkt = avpkt;
|
avctx->pkt = avpkt;
|
||||||
|
|
||||||
|
if (!avpkt->data && avpkt->size) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "invalid packet: NULL data, size != 0\n");
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
if((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size){
|
if((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size){
|
||||||
//FIXME remove the check below _after_ ensuring that all audio check that the available space is enough
|
//FIXME remove the check below _after_ ensuring that all audio check that the available space is enough
|
||||||
if(*frame_size_ptr < AVCODEC_MAX_AUDIO_FRAME_SIZE){
|
if(*frame_size_ptr < AVCODEC_MAX_AUDIO_FRAME_SIZE){
|
||||||
|
@@ -72,9 +72,11 @@ typedef struct VmdVideoContext {
|
|||||||
#define QUEUE_SIZE 0x1000
|
#define QUEUE_SIZE 0x1000
|
||||||
#define QUEUE_MASK 0x0FFF
|
#define QUEUE_MASK 0x0FFF
|
||||||
|
|
||||||
static void lz_unpack(const unsigned char *src, unsigned char *dest, int dest_len)
|
static void lz_unpack(const unsigned char *src, int src_len,
|
||||||
|
unsigned char *dest, int dest_len)
|
||||||
{
|
{
|
||||||
const unsigned char *s;
|
const unsigned char *s;
|
||||||
|
const unsigned char *s_end;
|
||||||
unsigned char *d;
|
unsigned char *d;
|
||||||
unsigned char *d_end;
|
unsigned char *d_end;
|
||||||
unsigned char queue[QUEUE_SIZE];
|
unsigned char queue[QUEUE_SIZE];
|
||||||
@@ -87,8 +89,12 @@ static void lz_unpack(const unsigned char *src, unsigned char *dest, int dest_le
|
|||||||
unsigned int i, j;
|
unsigned int i, j;
|
||||||
|
|
||||||
s = src;
|
s = src;
|
||||||
|
s_end = src + src_len;
|
||||||
d = dest;
|
d = dest;
|
||||||
d_end = d + dest_len;
|
d_end = d + dest_len;
|
||||||
|
|
||||||
|
if (s_end - s < 8)
|
||||||
|
return;
|
||||||
dataleft = AV_RL32(s);
|
dataleft = AV_RL32(s);
|
||||||
s += 4;
|
s += 4;
|
||||||
memset(queue, 0x20, QUEUE_SIZE);
|
memset(queue, 0x20, QUEUE_SIZE);
|
||||||
@@ -101,10 +107,10 @@ static void lz_unpack(const unsigned char *src, unsigned char *dest, int dest_le
|
|||||||
speclen = 100; /* no speclen */
|
speclen = 100; /* no speclen */
|
||||||
}
|
}
|
||||||
|
|
||||||
while (dataleft > 0) {
|
while (s_end - s > 0 && dataleft > 0) {
|
||||||
tag = *s++;
|
tag = *s++;
|
||||||
if ((tag == 0xFF) && (dataleft > 8)) {
|
if ((tag == 0xFF) && (dataleft > 8)) {
|
||||||
if (d + 8 > d_end)
|
if (d_end - d < 8 || s_end - s < 8)
|
||||||
return;
|
return;
|
||||||
for (i = 0; i < 8; i++) {
|
for (i = 0; i < 8; i++) {
|
||||||
queue[qpos++] = *d++ = *s++;
|
queue[qpos++] = *d++ = *s++;
|
||||||
@@ -116,18 +122,23 @@ static void lz_unpack(const unsigned char *src, unsigned char *dest, int dest_le
|
|||||||
if (dataleft == 0)
|
if (dataleft == 0)
|
||||||
break;
|
break;
|
||||||
if (tag & 0x01) {
|
if (tag & 0x01) {
|
||||||
if (d + 1 > d_end)
|
if (d_end - d < 1 || s_end - s < 1)
|
||||||
return;
|
return;
|
||||||
queue[qpos++] = *d++ = *s++;
|
queue[qpos++] = *d++ = *s++;
|
||||||
qpos &= QUEUE_MASK;
|
qpos &= QUEUE_MASK;
|
||||||
dataleft--;
|
dataleft--;
|
||||||
} else {
|
} else {
|
||||||
|
if (s_end - s < 2)
|
||||||
|
return;
|
||||||
chainofs = *s++;
|
chainofs = *s++;
|
||||||
chainofs |= ((*s & 0xF0) << 4);
|
chainofs |= ((*s & 0xF0) << 4);
|
||||||
chainlen = (*s++ & 0x0F) + 3;
|
chainlen = (*s++ & 0x0F) + 3;
|
||||||
if (chainlen == speclen)
|
if (chainlen == speclen) {
|
||||||
|
if (s_end - s < 1)
|
||||||
|
return;
|
||||||
chainlen = *s++ + 0xF + 3;
|
chainlen = *s++ + 0xF + 3;
|
||||||
if (d + chainlen > d_end)
|
}
|
||||||
|
if (d_end - d < chainlen)
|
||||||
return;
|
return;
|
||||||
for (j = 0; j < chainlen; j++) {
|
for (j = 0; j < chainlen; j++) {
|
||||||
*d = queue[chainofs++ & QUEUE_MASK];
|
*d = queue[chainofs++ & QUEUE_MASK];
|
||||||
@@ -142,32 +153,39 @@ static void lz_unpack(const unsigned char *src, unsigned char *dest, int dest_le
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int rle_unpack(const unsigned char *src, unsigned char *dest,
|
static int rle_unpack(const unsigned char *src, int src_len, int src_count,
|
||||||
int src_len, int dest_len)
|
unsigned char *dest, int dest_len)
|
||||||
{
|
{
|
||||||
const unsigned char *ps;
|
const unsigned char *ps;
|
||||||
|
const unsigned char *ps_end;
|
||||||
unsigned char *pd;
|
unsigned char *pd;
|
||||||
int i, l;
|
int i, l;
|
||||||
unsigned char *dest_end = dest + dest_len;
|
unsigned char *dest_end = dest + dest_len;
|
||||||
|
|
||||||
ps = src;
|
ps = src;
|
||||||
|
ps_end = src + src_len;
|
||||||
pd = dest;
|
pd = dest;
|
||||||
if (src_len & 1)
|
if (src_count & 1) {
|
||||||
|
if (ps_end - ps < 1)
|
||||||
|
return 0;
|
||||||
*pd++ = *ps++;
|
*pd++ = *ps++;
|
||||||
|
}
|
||||||
|
|
||||||
src_len >>= 1;
|
src_count >>= 1;
|
||||||
i = 0;
|
i = 0;
|
||||||
do {
|
do {
|
||||||
|
if (ps_end - ps < 1)
|
||||||
|
break;
|
||||||
l = *ps++;
|
l = *ps++;
|
||||||
if (l & 0x80) {
|
if (l & 0x80) {
|
||||||
l = (l & 0x7F) * 2;
|
l = (l & 0x7F) * 2;
|
||||||
if (pd + l > dest_end)
|
if (dest_end - pd < l || ps_end - ps < l)
|
||||||
return ps - src;
|
return ps - src;
|
||||||
memcpy(pd, ps, l);
|
memcpy(pd, ps, l);
|
||||||
ps += l;
|
ps += l;
|
||||||
pd += l;
|
pd += l;
|
||||||
} else {
|
} else {
|
||||||
if (pd + i > dest_end)
|
if (dest_end - pd < i || ps_end - ps < 2)
|
||||||
return ps - src;
|
return ps - src;
|
||||||
for (i = 0; i < l; i++) {
|
for (i = 0; i < l; i++) {
|
||||||
*pd++ = ps[0];
|
*pd++ = ps[0];
|
||||||
@@ -176,7 +194,7 @@ static int rle_unpack(const unsigned char *src, unsigned char *dest,
|
|||||||
ps += 2;
|
ps += 2;
|
||||||
}
|
}
|
||||||
i += l;
|
i += l;
|
||||||
} while (i < src_len);
|
} while (i < src_count);
|
||||||
|
|
||||||
return ps - src;
|
return ps - src;
|
||||||
}
|
}
|
||||||
@@ -189,8 +207,10 @@ static void vmd_decode(VmdVideoContext *s)
|
|||||||
|
|
||||||
/* point to the start of the encoded data */
|
/* point to the start of the encoded data */
|
||||||
const unsigned char *p = s->buf + 16;
|
const unsigned char *p = s->buf + 16;
|
||||||
|
const unsigned char *p_end = s->buf + s->size;
|
||||||
|
|
||||||
const unsigned char *pb;
|
const unsigned char *pb;
|
||||||
|
const unsigned char *pb_end;
|
||||||
unsigned char meth;
|
unsigned char meth;
|
||||||
unsigned char *dp; /* pointer to current frame */
|
unsigned char *dp; /* pointer to current frame */
|
||||||
unsigned char *pp; /* pointer to previous frame */
|
unsigned char *pp; /* pointer to previous frame */
|
||||||
@@ -204,6 +224,16 @@ static void vmd_decode(VmdVideoContext *s)
|
|||||||
frame_y = AV_RL16(&s->buf[8]);
|
frame_y = AV_RL16(&s->buf[8]);
|
||||||
frame_width = AV_RL16(&s->buf[10]) - frame_x + 1;
|
frame_width = AV_RL16(&s->buf[10]) - frame_x + 1;
|
||||||
frame_height = AV_RL16(&s->buf[12]) - frame_y + 1;
|
frame_height = AV_RL16(&s->buf[12]) - frame_y + 1;
|
||||||
|
if (frame_x < 0 || frame_width < 0 ||
|
||||||
|
frame_x >= s->avctx->width ||
|
||||||
|
frame_width > s->avctx->width ||
|
||||||
|
frame_x + frame_width > s->avctx->width)
|
||||||
|
return;
|
||||||
|
if (frame_y < 0 || frame_height < 0 ||
|
||||||
|
frame_y >= s->avctx->height ||
|
||||||
|
frame_height > s->avctx->height ||
|
||||||
|
frame_y + frame_height > s->avctx->height)
|
||||||
|
return;
|
||||||
|
|
||||||
if ((frame_width == s->avctx->width && frame_height == s->avctx->height) &&
|
if ((frame_width == s->avctx->width && frame_height == s->avctx->height) &&
|
||||||
(frame_x || frame_y)) {
|
(frame_x || frame_y)) {
|
||||||
@@ -216,8 +246,9 @@ static void vmd_decode(VmdVideoContext *s)
|
|||||||
|
|
||||||
/* if only a certain region will be updated, copy the entire previous
|
/* if only a certain region will be updated, copy the entire previous
|
||||||
* frame before the decode */
|
* frame before the decode */
|
||||||
if (frame_x || frame_y || (frame_width != s->avctx->width) ||
|
if (s->prev_frame.data[0] &&
|
||||||
(frame_height != s->avctx->height)) {
|
(frame_x || frame_y || (frame_width != s->avctx->width) ||
|
||||||
|
(frame_height != s->avctx->height))) {
|
||||||
|
|
||||||
memcpy(s->frame.data[0], s->prev_frame.data[0],
|
memcpy(s->frame.data[0], s->prev_frame.data[0],
|
||||||
s->avctx->height * s->frame.linesize[0]);
|
s->avctx->height * s->frame.linesize[0]);
|
||||||
@@ -225,6 +256,8 @@ static void vmd_decode(VmdVideoContext *s)
|
|||||||
|
|
||||||
/* check if there is a new palette */
|
/* check if there is a new palette */
|
||||||
if (s->buf[15] & 0x02) {
|
if (s->buf[15] & 0x02) {
|
||||||
|
if (p_end - p < 2 + 3 * PALETTE_COUNT)
|
||||||
|
return;
|
||||||
p += 2;
|
p += 2;
|
||||||
palette32 = (unsigned int *)s->palette;
|
palette32 = (unsigned int *)s->palette;
|
||||||
for (i = 0; i < PALETTE_COUNT; i++) {
|
for (i = 0; i < PALETTE_COUNT; i++) {
|
||||||
@@ -233,16 +266,17 @@ static void vmd_decode(VmdVideoContext *s)
|
|||||||
b = *p++ * 4;
|
b = *p++ * 4;
|
||||||
palette32[i] = (r << 16) | (g << 8) | (b);
|
palette32[i] = (r << 16) | (g << 8) | (b);
|
||||||
}
|
}
|
||||||
s->size -= (256 * 3 + 2);
|
|
||||||
}
|
}
|
||||||
if (s->size >= 0) {
|
if (p < p_end) {
|
||||||
/* originally UnpackFrame in VAG's code */
|
/* originally UnpackFrame in VAG's code */
|
||||||
pb = p;
|
pb = p;
|
||||||
|
pb_end = p_end;
|
||||||
meth = *pb++;
|
meth = *pb++;
|
||||||
if (meth & 0x80) {
|
if (meth & 0x80) {
|
||||||
lz_unpack(pb, s->unpack_buffer, s->unpack_buffer_size);
|
lz_unpack(pb, p_end - pb, s->unpack_buffer, s->unpack_buffer_size);
|
||||||
meth &= 0x7F;
|
meth &= 0x7F;
|
||||||
pb = s->unpack_buffer;
|
pb = s->unpack_buffer;
|
||||||
|
pb_end = s->unpack_buffer + s->unpack_buffer_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
dp = &s->frame.data[0][frame_y * s->frame.linesize[0] + frame_x];
|
dp = &s->frame.data[0][frame_y * s->frame.linesize[0] + frame_x];
|
||||||
@@ -252,17 +286,19 @@ static void vmd_decode(VmdVideoContext *s)
|
|||||||
for (i = 0; i < frame_height; i++) {
|
for (i = 0; i < frame_height; i++) {
|
||||||
ofs = 0;
|
ofs = 0;
|
||||||
do {
|
do {
|
||||||
|
if (pb_end - pb < 1)
|
||||||
|
return;
|
||||||
len = *pb++;
|
len = *pb++;
|
||||||
if (len & 0x80) {
|
if (len & 0x80) {
|
||||||
len = (len & 0x7F) + 1;
|
len = (len & 0x7F) + 1;
|
||||||
if (ofs + len > frame_width)
|
if (ofs + len > frame_width || pb_end - pb < len)
|
||||||
return;
|
return;
|
||||||
memcpy(&dp[ofs], pb, len);
|
memcpy(&dp[ofs], pb, len);
|
||||||
pb += len;
|
pb += len;
|
||||||
ofs += len;
|
ofs += len;
|
||||||
} else {
|
} else {
|
||||||
/* interframe pixel copy */
|
/* interframe pixel copy */
|
||||||
if (ofs + len + 1 > frame_width)
|
if (ofs + len + 1 > frame_width || !s->prev_frame.data[0])
|
||||||
return;
|
return;
|
||||||
memcpy(&dp[ofs], &pp[ofs], len + 1);
|
memcpy(&dp[ofs], &pp[ofs], len + 1);
|
||||||
ofs += len + 1;
|
ofs += len + 1;
|
||||||
@@ -280,6 +316,8 @@ static void vmd_decode(VmdVideoContext *s)
|
|||||||
|
|
||||||
case 2:
|
case 2:
|
||||||
for (i = 0; i < frame_height; i++) {
|
for (i = 0; i < frame_height; i++) {
|
||||||
|
if (pb_end -pb < frame_width)
|
||||||
|
return;
|
||||||
memcpy(dp, pb, frame_width);
|
memcpy(dp, pb, frame_width);
|
||||||
pb += frame_width;
|
pb += frame_width;
|
||||||
dp += s->frame.linesize[0];
|
dp += s->frame.linesize[0];
|
||||||
@@ -291,18 +329,25 @@ static void vmd_decode(VmdVideoContext *s)
|
|||||||
for (i = 0; i < frame_height; i++) {
|
for (i = 0; i < frame_height; i++) {
|
||||||
ofs = 0;
|
ofs = 0;
|
||||||
do {
|
do {
|
||||||
|
if (pb_end - pb < 1)
|
||||||
|
return;
|
||||||
len = *pb++;
|
len = *pb++;
|
||||||
if (len & 0x80) {
|
if (len & 0x80) {
|
||||||
len = (len & 0x7F) + 1;
|
len = (len & 0x7F) + 1;
|
||||||
|
if (pb_end - pb < 1)
|
||||||
|
return;
|
||||||
if (*pb++ == 0xFF)
|
if (*pb++ == 0xFF)
|
||||||
len = rle_unpack(pb, &dp[ofs], len, frame_width - ofs);
|
len = rle_unpack(pb, pb_end - pb, len, &dp[ofs], frame_width - ofs);
|
||||||
else
|
else {
|
||||||
|
if (pb_end - pb < len)
|
||||||
|
return;
|
||||||
memcpy(&dp[ofs], pb, len);
|
memcpy(&dp[ofs], pb, len);
|
||||||
|
}
|
||||||
pb += len;
|
pb += len;
|
||||||
ofs += len;
|
ofs += len;
|
||||||
} else {
|
} else {
|
||||||
/* interframe pixel copy */
|
/* interframe pixel copy */
|
||||||
if (ofs + len + 1 > frame_width)
|
if (ofs + len + 1 > frame_width || !s->prev_frame.data[0])
|
||||||
return;
|
return;
|
||||||
memcpy(&dp[ofs], &pp[ofs], len + 1);
|
memcpy(&dp[ofs], &pp[ofs], len + 1);
|
||||||
ofs += len + 1;
|
ofs += len + 1;
|
||||||
@@ -523,7 +568,10 @@ static int vmdaudio_decode_frame(AVCodecContext *avctx,
|
|||||||
|
|
||||||
silent_chunks = 0;
|
silent_chunks = 0;
|
||||||
if (block_type == BLOCK_TYPE_INITIAL) {
|
if (block_type == BLOCK_TYPE_INITIAL) {
|
||||||
uint32_t flags = AV_RB32(buf);
|
uint32_t flags;
|
||||||
|
if (buf_size < 4)
|
||||||
|
return -1;
|
||||||
|
flags = AV_RB32(buf);
|
||||||
silent_chunks = av_popcount(flags);
|
silent_chunks = av_popcount(flags);
|
||||||
buf += 4;
|
buf += 4;
|
||||||
buf_size -= 4;
|
buf_size -= 4;
|
||||||
|
@@ -1611,7 +1611,7 @@ static int vorbis_decode_frame(AVCodecContext *avccontext,
|
|||||||
vorbis_context *vc = avccontext->priv_data ;
|
vorbis_context *vc = avccontext->priv_data ;
|
||||||
GetBitContext *gb = &(vc->gb);
|
GetBitContext *gb = &(vc->gb);
|
||||||
const float *channel_ptrs[255];
|
const float *channel_ptrs[255];
|
||||||
int i, len;
|
int i, len, out_size;
|
||||||
|
|
||||||
if (!buf_size)
|
if (!buf_size)
|
||||||
return 0;
|
return 0;
|
||||||
@@ -1636,6 +1636,13 @@ static int vorbis_decode_frame(AVCodecContext *avccontext,
|
|||||||
av_dlog(NULL, "parsed %d bytes %d bits, returned %d samples (*ch*bits) \n",
|
av_dlog(NULL, "parsed %d bytes %d bits, returned %d samples (*ch*bits) \n",
|
||||||
get_bits_count(gb) / 8, get_bits_count(gb) % 8, len);
|
get_bits_count(gb) / 8, get_bits_count(gb) % 8, len);
|
||||||
|
|
||||||
|
out_size = len * vc->audio_channels *
|
||||||
|
av_get_bytes_per_sample(avccontext->sample_fmt);
|
||||||
|
if (*data_size < out_size) {
|
||||||
|
av_log(avccontext, AV_LOG_ERROR, "output buffer is too small\n");
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
if (vc->audio_channels > 8) {
|
if (vc->audio_channels > 8) {
|
||||||
for (i = 0; i < vc->audio_channels; i++)
|
for (i = 0; i < vc->audio_channels; i++)
|
||||||
channel_ptrs[i] = vc->channel_floors + i * len;
|
channel_ptrs[i] = vc->channel_floors + i * len;
|
||||||
@@ -1651,8 +1658,7 @@ static int vorbis_decode_frame(AVCodecContext *avccontext,
|
|||||||
vc->fmt_conv.float_to_int16_interleave(data, channel_ptrs, len,
|
vc->fmt_conv.float_to_int16_interleave(data, channel_ptrs, len,
|
||||||
vc->audio_channels);
|
vc->audio_channels);
|
||||||
|
|
||||||
*data_size = len * vc->audio_channels *
|
*data_size = out_size;
|
||||||
av_get_bytes_per_sample(avccontext->sample_fmt);
|
|
||||||
|
|
||||||
return buf_size ;
|
return buf_size ;
|
||||||
}
|
}
|
||||||
|
@@ -45,6 +45,7 @@
|
|||||||
#define FRAGMENT_PIXELS 8
|
#define FRAGMENT_PIXELS 8
|
||||||
|
|
||||||
static av_cold int vp3_decode_end(AVCodecContext *avctx);
|
static av_cold int vp3_decode_end(AVCodecContext *avctx);
|
||||||
|
static void vp3_decode_flush(AVCodecContext *avctx);
|
||||||
|
|
||||||
//FIXME split things out into their own arrays
|
//FIXME split things out into their own arrays
|
||||||
typedef struct Vp3Fragment {
|
typedef struct Vp3Fragment {
|
||||||
@@ -890,7 +891,7 @@ static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb,
|
|||||||
/* decode a VLC into a token */
|
/* decode a VLC into a token */
|
||||||
token = get_vlc2(gb, vlc_table, 11, 3);
|
token = get_vlc2(gb, vlc_table, 11, 3);
|
||||||
/* use the token to get a zero run, a coefficient, and an eob run */
|
/* use the token to get a zero run, a coefficient, and an eob run */
|
||||||
if (token <= 6) {
|
if ((unsigned) token <= 6U) {
|
||||||
eob_run = eob_run_base[token];
|
eob_run = eob_run_base[token];
|
||||||
if (eob_run_get_bits[token])
|
if (eob_run_get_bits[token])
|
||||||
eob_run += get_bits(gb, eob_run_get_bits[token]);
|
eob_run += get_bits(gb, eob_run_get_bits[token]);
|
||||||
@@ -908,7 +909,7 @@ static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb,
|
|||||||
coeff_i += eob_run;
|
coeff_i += eob_run;
|
||||||
eob_run = 0;
|
eob_run = 0;
|
||||||
}
|
}
|
||||||
} else {
|
} else if (token >= 0) {
|
||||||
bits_to_get = coeff_get_bits[token];
|
bits_to_get = coeff_get_bits[token];
|
||||||
if (bits_to_get)
|
if (bits_to_get)
|
||||||
bits_to_get = get_bits(gb, bits_to_get);
|
bits_to_get = get_bits(gb, bits_to_get);
|
||||||
@@ -942,6 +943,10 @@ static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb,
|
|||||||
for (i = coeff_index+1; i <= coeff_index+zero_run; i++)
|
for (i = coeff_index+1; i <= coeff_index+zero_run; i++)
|
||||||
s->num_coded_frags[plane][i]--;
|
s->num_coded_frags[plane][i]--;
|
||||||
coeff_i++;
|
coeff_i++;
|
||||||
|
} else {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR,
|
||||||
|
"Invalid token %d\n", token);
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -991,6 +996,8 @@ static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
|
|||||||
/* unpack the Y plane DC coefficients */
|
/* unpack the Y plane DC coefficients */
|
||||||
residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_y_table], 0,
|
residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_y_table], 0,
|
||||||
0, residual_eob_run);
|
0, residual_eob_run);
|
||||||
|
if (residual_eob_run < 0)
|
||||||
|
return residual_eob_run;
|
||||||
|
|
||||||
/* reverse prediction of the Y-plane DC coefficients */
|
/* reverse prediction of the Y-plane DC coefficients */
|
||||||
reverse_dc_prediction(s, 0, s->fragment_width[0], s->fragment_height[0]);
|
reverse_dc_prediction(s, 0, s->fragment_width[0], s->fragment_height[0]);
|
||||||
@@ -998,8 +1005,12 @@ static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
|
|||||||
/* unpack the C plane DC coefficients */
|
/* unpack the C plane DC coefficients */
|
||||||
residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0,
|
residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0,
|
||||||
1, residual_eob_run);
|
1, residual_eob_run);
|
||||||
|
if (residual_eob_run < 0)
|
||||||
|
return residual_eob_run;
|
||||||
residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0,
|
residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0,
|
||||||
2, residual_eob_run);
|
2, residual_eob_run);
|
||||||
|
if (residual_eob_run < 0)
|
||||||
|
return residual_eob_run;
|
||||||
|
|
||||||
/* reverse prediction of the C-plane DC coefficients */
|
/* reverse prediction of the C-plane DC coefficients */
|
||||||
if (!(s->avctx->flags & CODEC_FLAG_GRAY))
|
if (!(s->avctx->flags & CODEC_FLAG_GRAY))
|
||||||
@@ -1036,11 +1047,17 @@ static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
|
|||||||
for (i = 1; i <= 63; i++) {
|
for (i = 1; i <= 63; i++) {
|
||||||
residual_eob_run = unpack_vlcs(s, gb, y_tables[i], i,
|
residual_eob_run = unpack_vlcs(s, gb, y_tables[i], i,
|
||||||
0, residual_eob_run);
|
0, residual_eob_run);
|
||||||
|
if (residual_eob_run < 0)
|
||||||
|
return residual_eob_run;
|
||||||
|
|
||||||
residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i,
|
residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i,
|
||||||
1, residual_eob_run);
|
1, residual_eob_run);
|
||||||
|
if (residual_eob_run < 0)
|
||||||
|
return residual_eob_run;
|
||||||
residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i,
|
residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i,
|
||||||
2, residual_eob_run);
|
2, residual_eob_run);
|
||||||
|
if (residual_eob_run < 0)
|
||||||
|
return residual_eob_run;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@@ -1777,10 +1794,15 @@ static int vp3_update_thread_context(AVCodecContext *dst, const AVCodecContext *
|
|||||||
Vp3DecodeContext *s = dst->priv_data, *s1 = src->priv_data;
|
Vp3DecodeContext *s = dst->priv_data, *s1 = src->priv_data;
|
||||||
int qps_changed = 0, i, err;
|
int qps_changed = 0, i, err;
|
||||||
|
|
||||||
|
#define copy_fields(to, from, start_field, end_field) memcpy(&to->start_field, &from->start_field, (char*)&to->end_field - (char*)&to->start_field)
|
||||||
|
|
||||||
if (!s1->current_frame.data[0]
|
if (!s1->current_frame.data[0]
|
||||||
||s->width != s1->width
|
||s->width != s1->width
|
||||||
||s->height!= s1->height)
|
||s->height!= s1->height) {
|
||||||
|
if (s != s1)
|
||||||
|
copy_fields(s, s1, golden_frame, current_frame);
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
if (s != s1) {
|
if (s != s1) {
|
||||||
// init tables if the first frame hasn't been decoded
|
// init tables if the first frame hasn't been decoded
|
||||||
@@ -1796,8 +1818,6 @@ static int vp3_update_thread_context(AVCodecContext *dst, const AVCodecContext *
|
|||||||
memcpy(s->motion_val[1], s1->motion_val[1], c_fragment_count * sizeof(*s->motion_val[1]));
|
memcpy(s->motion_val[1], s1->motion_val[1], c_fragment_count * sizeof(*s->motion_val[1]));
|
||||||
}
|
}
|
||||||
|
|
||||||
#define copy_fields(to, from, start_field, end_field) memcpy(&to->start_field, &from->start_field, (char*)&to->end_field - (char*)&to->start_field)
|
|
||||||
|
|
||||||
// copy previous frame data
|
// copy previous frame data
|
||||||
copy_fields(s, s1, golden_frame, dsp);
|
copy_fields(s, s1, golden_frame, dsp);
|
||||||
|
|
||||||
@@ -1987,9 +2007,6 @@ static av_cold int vp3_decode_end(AVCodecContext *avctx)
|
|||||||
Vp3DecodeContext *s = avctx->priv_data;
|
Vp3DecodeContext *s = avctx->priv_data;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (avctx->is_copy && !s->current_frame.data[0])
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
av_free(s->superblock_coding);
|
av_free(s->superblock_coding);
|
||||||
av_free(s->all_fragments);
|
av_free(s->all_fragments);
|
||||||
av_free(s->coded_fragment_list[0]);
|
av_free(s->coded_fragment_list[0]);
|
||||||
@@ -2016,12 +2033,7 @@ static av_cold int vp3_decode_end(AVCodecContext *avctx)
|
|||||||
free_vlc(&s->motion_vector_vlc);
|
free_vlc(&s->motion_vector_vlc);
|
||||||
|
|
||||||
/* release all frames */
|
/* release all frames */
|
||||||
if (s->golden_frame.data[0])
|
vp3_decode_flush(avctx);
|
||||||
ff_thread_release_buffer(avctx, &s->golden_frame);
|
|
||||||
if (s->last_frame.data[0] && s->last_frame.type != FF_BUFFER_TYPE_COPY)
|
|
||||||
ff_thread_release_buffer(avctx, &s->last_frame);
|
|
||||||
/* no need to release the current_frame since it will always be pointing
|
|
||||||
* to the same frame as either the golden or last frame */
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -2341,6 +2353,23 @@ static void vp3_decode_flush(AVCodecContext *avctx)
|
|||||||
ff_thread_release_buffer(avctx, &s->current_frame);
|
ff_thread_release_buffer(avctx, &s->current_frame);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int vp3_init_thread_copy(AVCodecContext *avctx)
|
||||||
|
{
|
||||||
|
Vp3DecodeContext *s = avctx->priv_data;
|
||||||
|
|
||||||
|
s->superblock_coding = NULL;
|
||||||
|
s->all_fragments = NULL;
|
||||||
|
s->coded_fragment_list[0] = NULL;
|
||||||
|
s->dct_tokens_base = NULL;
|
||||||
|
s->superblock_fragments = NULL;
|
||||||
|
s->macroblock_coding = NULL;
|
||||||
|
s->motion_val[0] = NULL;
|
||||||
|
s->motion_val[1] = NULL;
|
||||||
|
s->edge_emu_buffer = NULL;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
AVCodec ff_theora_decoder = {
|
AVCodec ff_theora_decoder = {
|
||||||
.name = "theora",
|
.name = "theora",
|
||||||
.type = AVMEDIA_TYPE_VIDEO,
|
.type = AVMEDIA_TYPE_VIDEO,
|
||||||
@@ -2352,6 +2381,7 @@ AVCodec ff_theora_decoder = {
|
|||||||
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS,
|
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS,
|
||||||
.flush = vp3_decode_flush,
|
.flush = vp3_decode_flush,
|
||||||
.long_name = NULL_IF_CONFIG_SMALL("Theora"),
|
.long_name = NULL_IF_CONFIG_SMALL("Theora"),
|
||||||
|
.init_thread_copy = ONLY_IF_THREADS_ENABLED(vp3_init_thread_copy),
|
||||||
.update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context)
|
.update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context)
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
@@ -2368,5 +2398,6 @@ AVCodec ff_vp3_decoder = {
|
|||||||
.flush = vp3_decode_flush,
|
.flush = vp3_decode_flush,
|
||||||
|
|
||||||
.long_name = NULL_IF_CONFIG_SMALL("On2 VP3"),
|
.long_name = NULL_IF_CONFIG_SMALL("On2 VP3"),
|
||||||
|
.init_thread_copy = ONLY_IF_THREADS_ENABLED(vp3_init_thread_copy),
|
||||||
.update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context)
|
.update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context)
|
||||||
};
|
};
|
||||||
|
@@ -467,6 +467,7 @@ static int vp56_size_changed(AVCodecContext *avctx)
|
|||||||
s->mb_height = (avctx->coded_height+15) / 16;
|
s->mb_height = (avctx->coded_height+15) / 16;
|
||||||
|
|
||||||
if (s->mb_width > 1000 || s->mb_height > 1000) {
|
if (s->mb_width > 1000 || s->mb_height > 1000) {
|
||||||
|
avcodec_set_dimensions(avctx, 0, 0);
|
||||||
av_log(avctx, AV_LOG_ERROR, "picture too big\n");
|
av_log(avctx, AV_LOG_ERROR, "picture too big\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@@ -521,8 +522,10 @@ int ff_vp56_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
|||||||
if (s->frames[i].data[0])
|
if (s->frames[i].data[0])
|
||||||
avctx->release_buffer(avctx, &s->frames[i]);
|
avctx->release_buffer(avctx, &s->frames[i]);
|
||||||
}
|
}
|
||||||
if (is_alpha)
|
if (is_alpha) {
|
||||||
|
avcodec_set_dimensions(avctx, 0, 0);
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!is_alpha) {
|
if (!is_alpha) {
|
||||||
|
@@ -139,8 +139,11 @@ static int vp6_parse_header(VP56Context *s, const uint8_t *buf, int buf_size,
|
|||||||
if (coeff_offset) {
|
if (coeff_offset) {
|
||||||
buf += coeff_offset;
|
buf += coeff_offset;
|
||||||
buf_size -= coeff_offset;
|
buf_size -= coeff_offset;
|
||||||
if (buf_size < 0)
|
if (buf_size < 0) {
|
||||||
|
if (s->framep[VP56_FRAME_CURRENT]->key_frame)
|
||||||
|
avcodec_set_dimensions(s->avctx, 0, 0);
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
if (s->use_huffman) {
|
if (s->use_huffman) {
|
||||||
s->parse_coeff = vp6_parse_coeff_huffman;
|
s->parse_coeff = vp6_parse_coeff_huffman;
|
||||||
init_get_bits(&s->gb, buf, buf_size<<3);
|
init_get_bits(&s->gb, buf, buf_size<<3);
|
||||||
@@ -373,7 +376,7 @@ static void vp6_parse_coeff_huffman(VP56Context *s)
|
|||||||
if (b > 3) pt = 1;
|
if (b > 3) pt = 1;
|
||||||
vlc_coeff = &s->dccv_vlc[pt];
|
vlc_coeff = &s->dccv_vlc[pt];
|
||||||
|
|
||||||
for (coeff_idx=0; coeff_idx<64; ) {
|
for (coeff_idx = 0;;) {
|
||||||
int run = 1;
|
int run = 1;
|
||||||
if (coeff_idx<2 && s->nb_null[coeff_idx][pt]) {
|
if (coeff_idx<2 && s->nb_null[coeff_idx][pt]) {
|
||||||
s->nb_null[coeff_idx][pt]--;
|
s->nb_null[coeff_idx][pt]--;
|
||||||
@@ -410,6 +413,8 @@ static void vp6_parse_coeff_huffman(VP56Context *s)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
coeff_idx+=run;
|
coeff_idx+=run;
|
||||||
|
if (coeff_idx >= 64)
|
||||||
|
break;
|
||||||
cg = FFMIN(vp6_coeff_groups[coeff_idx], 3);
|
cg = FFMIN(vp6_coeff_groups[coeff_idx], 3);
|
||||||
vlc_coeff = &s->ract_vlc[pt][ct][cg];
|
vlc_coeff = &s->ract_vlc[pt][ct][cg];
|
||||||
}
|
}
|
||||||
|
@@ -33,6 +33,19 @@
|
|||||||
# include "arm/vp8.h"
|
# include "arm/vp8.h"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static void free_buffers(VP8Context *s)
|
||||||
|
{
|
||||||
|
av_freep(&s->macroblocks_base);
|
||||||
|
av_freep(&s->filter_strength);
|
||||||
|
av_freep(&s->intra4x4_pred_mode_top);
|
||||||
|
av_freep(&s->top_nnz);
|
||||||
|
av_freep(&s->edge_emu_buffer);
|
||||||
|
av_freep(&s->top_border);
|
||||||
|
av_freep(&s->segmentation_map);
|
||||||
|
|
||||||
|
s->macroblocks = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
static void vp8_decode_flush(AVCodecContext *avctx)
|
static void vp8_decode_flush(AVCodecContext *avctx)
|
||||||
{
|
{
|
||||||
VP8Context *s = avctx->priv_data;
|
VP8Context *s = avctx->priv_data;
|
||||||
@@ -45,15 +58,7 @@ static void vp8_decode_flush(AVCodecContext *avctx)
|
|||||||
}
|
}
|
||||||
memset(s->framep, 0, sizeof(s->framep));
|
memset(s->framep, 0, sizeof(s->framep));
|
||||||
|
|
||||||
av_freep(&s->macroblocks_base);
|
free_buffers(s);
|
||||||
av_freep(&s->filter_strength);
|
|
||||||
av_freep(&s->intra4x4_pred_mode_top);
|
|
||||||
av_freep(&s->top_nnz);
|
|
||||||
av_freep(&s->edge_emu_buffer);
|
|
||||||
av_freep(&s->top_border);
|
|
||||||
av_freep(&s->segmentation_map);
|
|
||||||
|
|
||||||
s->macroblocks = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int update_dimensions(VP8Context *s, int width, int height)
|
static int update_dimensions(VP8Context *s, int width, int height)
|
||||||
@@ -273,7 +278,7 @@ static int decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
|
|||||||
|
|
||||||
if (!s->macroblocks_base || /* first frame */
|
if (!s->macroblocks_base || /* first frame */
|
||||||
width != s->avctx->width || height != s->avctx->height) {
|
width != s->avctx->width || height != s->avctx->height) {
|
||||||
if ((ret = update_dimensions(s, width, height) < 0))
|
if ((ret = update_dimensions(s, width, height)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -487,6 +492,7 @@ void decode_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y)
|
|||||||
|
|
||||||
AV_ZERO32(&near_mv[0]);
|
AV_ZERO32(&near_mv[0]);
|
||||||
AV_ZERO32(&near_mv[1]);
|
AV_ZERO32(&near_mv[1]);
|
||||||
|
AV_ZERO32(&near_mv[2]);
|
||||||
|
|
||||||
/* Process MB on top, left and top-left */
|
/* Process MB on top, left and top-left */
|
||||||
#define MV_EDGE_CHECK(n)\
|
#define MV_EDGE_CHECK(n)\
|
||||||
@@ -919,7 +925,8 @@ void intra_predict(VP8Context *s, uint8_t *dst[3], VP8Macroblock *mb,
|
|||||||
int mb_x, int mb_y)
|
int mb_x, int mb_y)
|
||||||
{
|
{
|
||||||
AVCodecContext *avctx = s->avctx;
|
AVCodecContext *avctx = s->avctx;
|
||||||
int x, y, mode, nnz, tr;
|
int x, y, mode, nnz;
|
||||||
|
uint32_t tr;
|
||||||
|
|
||||||
// for the first row, we need to run xchg_mb_border to init the top edge to 127
|
// for the first row, we need to run xchg_mb_border to init the top edge to 127
|
||||||
// otherwise, skip it if we aren't going to deblock
|
// otherwise, skip it if we aren't going to deblock
|
||||||
@@ -948,7 +955,7 @@ void intra_predict(VP8Context *s, uint8_t *dst[3], VP8Macroblock *mb,
|
|||||||
// from the top macroblock
|
// from the top macroblock
|
||||||
if (!(!mb_y && avctx->flags & CODEC_FLAG_EMU_EDGE) &&
|
if (!(!mb_y && avctx->flags & CODEC_FLAG_EMU_EDGE) &&
|
||||||
mb_x == s->mb_width-1) {
|
mb_x == s->mb_width-1) {
|
||||||
tr = tr_right[-1]*0x01010101;
|
tr = tr_right[-1]*0x01010101u;
|
||||||
tr_right = (uint8_t *)&tr;
|
tr_right = (uint8_t *)&tr;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1749,6 +1756,11 @@ static int vp8_decode_update_thread_context(AVCodecContext *dst, const AVCodecCo
|
|||||||
{
|
{
|
||||||
VP8Context *s = dst->priv_data, *s_src = src->priv_data;
|
VP8Context *s = dst->priv_data, *s_src = src->priv_data;
|
||||||
|
|
||||||
|
if (s->macroblocks_base &&
|
||||||
|
(s_src->mb_width != s->mb_width || s_src->mb_height != s->mb_height)) {
|
||||||
|
free_buffers(s);
|
||||||
|
}
|
||||||
|
|
||||||
s->prob[0] = s_src->prob[!s_src->update_probabilities];
|
s->prob[0] = s_src->prob[!s_src->update_probabilities];
|
||||||
s->segmentation = s_src->segmentation;
|
s->segmentation = s_src->segmentation;
|
||||||
s->lf_delta = s_src->lf_delta;
|
s->lf_delta = s_src->lf_delta;
|
||||||
|
@@ -138,6 +138,10 @@ static av_cold int vqa_decode_init(AVCodecContext *avctx)
|
|||||||
/* load up the VQA parameters from the header */
|
/* load up the VQA parameters from the header */
|
||||||
vqa_header = (unsigned char *)s->avctx->extradata;
|
vqa_header = (unsigned char *)s->avctx->extradata;
|
||||||
s->vqa_version = vqa_header[0];
|
s->vqa_version = vqa_header[0];
|
||||||
|
if (s->vqa_version < 1 || s->vqa_version > 3) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, " VQA video: unsupported version %d\n", s->vqa_version);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
s->width = AV_RL16(&vqa_header[6]);
|
s->width = AV_RL16(&vqa_header[6]);
|
||||||
s->height = AV_RL16(&vqa_header[8]);
|
s->height = AV_RL16(&vqa_header[8]);
|
||||||
if(av_image_check_size(s->width, s->height, 0, avctx)){
|
if(av_image_check_size(s->width, s->height, 0, avctx)){
|
||||||
@@ -226,6 +230,8 @@ static void decode_format80(const unsigned char *src, int src_size,
|
|||||||
src_index += 2;
|
src_index += 2;
|
||||||
av_dlog(NULL, "(1) copy %X bytes from absolute pos %X\n", count, src_pos);
|
av_dlog(NULL, "(1) copy %X bytes from absolute pos %X\n", count, src_pos);
|
||||||
CHECK_COUNT();
|
CHECK_COUNT();
|
||||||
|
if (src_pos + count > dest_size)
|
||||||
|
return;
|
||||||
for (i = 0; i < count; i++)
|
for (i = 0; i < count; i++)
|
||||||
dest[dest_index + i] = dest[src_pos + i];
|
dest[dest_index + i] = dest[src_pos + i];
|
||||||
dest_index += count;
|
dest_index += count;
|
||||||
@@ -248,6 +254,8 @@ static void decode_format80(const unsigned char *src, int src_size,
|
|||||||
src_index += 2;
|
src_index += 2;
|
||||||
av_dlog(NULL, "(3) copy %X bytes from absolute pos %X\n", count, src_pos);
|
av_dlog(NULL, "(3) copy %X bytes from absolute pos %X\n", count, src_pos);
|
||||||
CHECK_COUNT();
|
CHECK_COUNT();
|
||||||
|
if (src_pos + count > dest_size)
|
||||||
|
return;
|
||||||
for (i = 0; i < count; i++)
|
for (i = 0; i < count; i++)
|
||||||
dest[dest_index + i] = dest[src_pos + i];
|
dest[dest_index + i] = dest[src_pos + i];
|
||||||
dest_index += count;
|
dest_index += count;
|
||||||
@@ -268,6 +276,8 @@ static void decode_format80(const unsigned char *src, int src_size,
|
|||||||
src_index += 2;
|
src_index += 2;
|
||||||
av_dlog(NULL, "(5) copy %X bytes from relpos %X\n", count, src_pos);
|
av_dlog(NULL, "(5) copy %X bytes from relpos %X\n", count, src_pos);
|
||||||
CHECK_COUNT();
|
CHECK_COUNT();
|
||||||
|
if (dest_index < src_pos)
|
||||||
|
return;
|
||||||
for (i = 0; i < count; i++)
|
for (i = 0; i < count; i++)
|
||||||
dest[dest_index + i] = dest[dest_index - src_pos + i];
|
dest[dest_index + i] = dest[dest_index - src_pos + i];
|
||||||
dest_index += count;
|
dest_index += count;
|
||||||
|
@@ -1173,6 +1173,15 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
|
|||||||
return samplecount * bpp;
|
return samplecount * bpp;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void wavpack_decode_flush(AVCodecContext *avctx)
|
||||||
|
{
|
||||||
|
WavpackContext *s = avctx->priv_data;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < s->fdec_num; i++)
|
||||||
|
wv_reset_saved_context(s->fdec[i]);
|
||||||
|
}
|
||||||
|
|
||||||
static int wavpack_decode_frame(AVCodecContext *avctx,
|
static int wavpack_decode_frame(AVCodecContext *avctx,
|
||||||
void *data, int *data_size,
|
void *data, int *data_size,
|
||||||
AVPacket *avpkt)
|
AVPacket *avpkt)
|
||||||
@@ -1205,11 +1214,14 @@ static int wavpack_decode_frame(AVCodecContext *avctx,
|
|||||||
if(frame_size < 0 || frame_size > buf_size){
|
if(frame_size < 0 || frame_size > buf_size){
|
||||||
av_log(avctx, AV_LOG_ERROR, "Block %d has invalid size (size %d vs. %d bytes left)\n",
|
av_log(avctx, AV_LOG_ERROR, "Block %d has invalid size (size %d vs. %d bytes left)\n",
|
||||||
s->block, frame_size, buf_size);
|
s->block, frame_size, buf_size);
|
||||||
|
wavpack_decode_flush(avctx);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
if((samplecount = wavpack_decode_block(avctx, s->block, data,
|
if((samplecount = wavpack_decode_block(avctx, s->block, data,
|
||||||
data_size, buf, frame_size)) < 0)
|
data_size, buf, frame_size)) < 0) {
|
||||||
|
wavpack_decode_flush(avctx);
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
s->block++;
|
s->block++;
|
||||||
buf += frame_size; buf_size -= frame_size;
|
buf += frame_size; buf_size -= frame_size;
|
||||||
}
|
}
|
||||||
|
@@ -309,10 +309,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
|||||||
s->samples_per_frame = 1 << ff_wma_get_frame_len_bits(avctx->sample_rate,
|
s->samples_per_frame = 1 << ff_wma_get_frame_len_bits(avctx->sample_rate,
|
||||||
3, s->decode_flags);
|
3, s->decode_flags);
|
||||||
|
|
||||||
/** init previous block len */
|
|
||||||
for (i = 0; i < avctx->channels; i++)
|
|
||||||
s->channel[i].prev_block_len = s->samples_per_frame;
|
|
||||||
|
|
||||||
/** subframe info */
|
/** subframe info */
|
||||||
log2_max_num_subframes = ((s->decode_flags & 0x38) >> 3);
|
log2_max_num_subframes = ((s->decode_flags & 0x38) >> 3);
|
||||||
s->max_num_subframes = 1 << log2_max_num_subframes;
|
s->max_num_subframes = 1 << log2_max_num_subframes;
|
||||||
@@ -332,6 +328,18 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
s->num_channels = avctx->channels;
|
s->num_channels = avctx->channels;
|
||||||
|
|
||||||
|
if (s->num_channels < 0) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "invalid number of channels %d\n", s->num_channels);
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
} else if (s->num_channels > WMAPRO_MAX_CHANNELS) {
|
||||||
|
av_log_ask_for_sample(avctx, "unsupported number of channels\n");
|
||||||
|
return AVERROR_PATCHWELCOME;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** init previous block len */
|
||||||
|
for (i = 0; i < s->num_channels; i++)
|
||||||
|
s->channel[i].prev_block_len = s->samples_per_frame;
|
||||||
|
|
||||||
/** extract lfe channel position */
|
/** extract lfe channel position */
|
||||||
s->lfe_channel = -1;
|
s->lfe_channel = -1;
|
||||||
|
|
||||||
@@ -343,14 +351,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (s->num_channels < 0) {
|
|
||||||
av_log(avctx, AV_LOG_ERROR, "invalid number of channels %d\n", s->num_channels);
|
|
||||||
return AVERROR_INVALIDDATA;
|
|
||||||
} else if (s->num_channels > WMAPRO_MAX_CHANNELS) {
|
|
||||||
av_log_ask_for_sample(avctx, "unsupported number of channels\n");
|
|
||||||
return AVERROR_PATCHWELCOME;
|
|
||||||
}
|
|
||||||
|
|
||||||
INIT_VLC_STATIC(&sf_vlc, SCALEVLCBITS, HUFF_SCALE_SIZE,
|
INIT_VLC_STATIC(&sf_vlc, SCALEVLCBITS, HUFF_SCALE_SIZE,
|
||||||
scale_huffbits, 1, 1,
|
scale_huffbits, 1, 1,
|
||||||
scale_huffcodes, 2, 2, 616);
|
scale_huffcodes, 2, 2, 616);
|
||||||
@@ -1436,7 +1436,7 @@ static void save_bits(WMAProDecodeCtx *s, GetBitContext* gb, int len,
|
|||||||
init_put_bits(&s->pb, s->frame_data, MAX_FRAMESIZE);
|
init_put_bits(&s->pb, s->frame_data, MAX_FRAMESIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
buflen = (s->num_saved_bits + len + 8) >> 3;
|
buflen = (put_bits_count(&s->pb) + len + 8) >> 3;
|
||||||
|
|
||||||
if (len <= 0 || buflen > MAX_FRAMESIZE) {
|
if (len <= 0 || buflen > MAX_FRAMESIZE) {
|
||||||
av_log_ask_for_sample(s->avctx, "input buffer too small\n");
|
av_log_ask_for_sample(s->avctx, "input buffer too small\n");
|
||||||
|
@@ -401,6 +401,10 @@ static av_cold int wmavoice_decode_init(AVCodecContext *ctx)
|
|||||||
s->min_pitch_val = ((ctx->sample_rate << 8) / 400 + 50) >> 8;
|
s->min_pitch_val = ((ctx->sample_rate << 8) / 400 + 50) >> 8;
|
||||||
s->max_pitch_val = ((ctx->sample_rate << 8) * 37 / 2000 + 50) >> 8;
|
s->max_pitch_val = ((ctx->sample_rate << 8) * 37 / 2000 + 50) >> 8;
|
||||||
pitch_range = s->max_pitch_val - s->min_pitch_val;
|
pitch_range = s->max_pitch_val - s->min_pitch_val;
|
||||||
|
if (pitch_range <= 0) {
|
||||||
|
av_log(ctx, AV_LOG_ERROR, "Invalid pitch range; broken extradata?\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
s->pitch_nbits = av_ceil_log2(pitch_range);
|
s->pitch_nbits = av_ceil_log2(pitch_range);
|
||||||
s->last_pitch_val = 40;
|
s->last_pitch_val = 40;
|
||||||
s->last_acb_type = ACB_TYPE_NONE;
|
s->last_acb_type = ACB_TYPE_NONE;
|
||||||
@@ -422,6 +426,10 @@ static av_cold int wmavoice_decode_init(AVCodecContext *ctx)
|
|||||||
s->block_conv_table[2] = (pitch_range * 44) >> 6;
|
s->block_conv_table[2] = (pitch_range * 44) >> 6;
|
||||||
s->block_conv_table[3] = s->max_pitch_val - 1;
|
s->block_conv_table[3] = s->max_pitch_val - 1;
|
||||||
s->block_delta_pitch_hrange = (pitch_range >> 3) & ~0xF;
|
s->block_delta_pitch_hrange = (pitch_range >> 3) & ~0xF;
|
||||||
|
if (s->block_delta_pitch_hrange <= 0) {
|
||||||
|
av_log(ctx, AV_LOG_ERROR, "Invalid delta pitch hrange; broken extradata?\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
s->block_delta_pitch_nbits = 1 + av_ceil_log2(s->block_delta_pitch_hrange);
|
s->block_delta_pitch_nbits = 1 + av_ceil_log2(s->block_delta_pitch_hrange);
|
||||||
s->block_pitch_range = s->block_conv_table[2] +
|
s->block_pitch_range = s->block_conv_table[2] +
|
||||||
s->block_conv_table[3] + 1 +
|
s->block_conv_table[3] + 1 +
|
||||||
@@ -1077,7 +1085,7 @@ static void aw_pulse_set2(WMAVoiceContext *s, GetBitContext *gb,
|
|||||||
int excl_range = s->aw_pulse_range; // always 16 or 24
|
int excl_range = s->aw_pulse_range; // always 16 or 24
|
||||||
uint16_t *use_mask_ptr = &use_mask[idx >> 4];
|
uint16_t *use_mask_ptr = &use_mask[idx >> 4];
|
||||||
int first_sh = 16 - (idx & 15);
|
int first_sh = 16 - (idx & 15);
|
||||||
*use_mask_ptr++ &= 0xFFFF << first_sh;
|
*use_mask_ptr++ &= 0xFFFFu << first_sh;
|
||||||
excl_range -= first_sh;
|
excl_range -= first_sh;
|
||||||
if (excl_range >= 16) {
|
if (excl_range >= 16) {
|
||||||
*use_mask_ptr++ = 0;
|
*use_mask_ptr++ = 0;
|
||||||
@@ -1880,6 +1888,8 @@ static void copy_bits(PutBitContext *pb,
|
|||||||
rmn_bits = rmn_bytes = get_bits_left(gb);
|
rmn_bits = rmn_bytes = get_bits_left(gb);
|
||||||
if (rmn_bits < nbits)
|
if (rmn_bits < nbits)
|
||||||
return;
|
return;
|
||||||
|
if (nbits > pb->size_in_bits - put_bits_count(pb))
|
||||||
|
return;
|
||||||
rmn_bits &= 7; rmn_bytes >>= 3;
|
rmn_bits &= 7; rmn_bytes >>= 3;
|
||||||
if ((rmn_bits = FFMIN(rmn_bits, nbits)) > 0)
|
if ((rmn_bits = FFMIN(rmn_bits, nbits)) > 0)
|
||||||
put_bits(pb, rmn_bits, get_bits(gb, rmn_bits));
|
put_bits(pb, rmn_bits, get_bits(gb, rmn_bits));
|
||||||
|
@@ -37,13 +37,16 @@ static const int8_t ws_adpcm_4bit[] = {
|
|||||||
-9, -8, -6, -5, -4, -3, -2, -1,
|
-9, -8, -6, -5, -4, -3, -2, -1,
|
||||||
0, 1, 2, 3, 4, 5, 6, 8 };
|
0, 1, 2, 3, 4, 5, 6, 8 };
|
||||||
|
|
||||||
#define CLIP8(a) if(a>127)a=127;if(a<-128)a=-128;
|
|
||||||
|
|
||||||
static av_cold int ws_snd_decode_init(AVCodecContext * avctx)
|
static av_cold int ws_snd_decode_init(AVCodecContext * avctx)
|
||||||
{
|
{
|
||||||
// WSSNDContext *c = avctx->priv_data;
|
// WSSNDContext *c = avctx->priv_data;
|
||||||
|
|
||||||
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
|
if (avctx->channels != 1) {
|
||||||
|
av_log_ask_for_sample(avctx, "unsupported number of channels\n");
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
|
avctx->sample_fmt = AV_SAMPLE_FMT_U8;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -56,15 +59,19 @@ static int ws_snd_decode_frame(AVCodecContext *avctx,
|
|||||||
// WSSNDContext *c = avctx->priv_data;
|
// WSSNDContext *c = avctx->priv_data;
|
||||||
|
|
||||||
int in_size, out_size;
|
int in_size, out_size;
|
||||||
int sample = 0;
|
int sample = 128;
|
||||||
int i;
|
int i;
|
||||||
short *samples = data;
|
uint8_t *samples = data;
|
||||||
|
|
||||||
if (!buf_size)
|
if (!buf_size)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
if (buf_size < 4) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "packet is too small\n");
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
out_size = AV_RL16(&buf[0]);
|
out_size = AV_RL16(&buf[0]);
|
||||||
*data_size = out_size * 2;
|
|
||||||
in_size = AV_RL16(&buf[2]);
|
in_size = AV_RL16(&buf[2]);
|
||||||
buf += 4;
|
buf += 4;
|
||||||
|
|
||||||
@@ -76,34 +83,54 @@ static int ws_snd_decode_frame(AVCodecContext *avctx,
|
|||||||
av_log(avctx, AV_LOG_ERROR, "Frame data is larger than input buffer\n");
|
av_log(avctx, AV_LOG_ERROR, "Frame data is larger than input buffer\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (in_size == out_size) {
|
if (in_size == out_size) {
|
||||||
for (i = 0; i < out_size; i++)
|
for (i = 0; i < out_size; i++)
|
||||||
*samples++ = (*buf++ - 0x80) << 8;
|
*samples++ = *buf++;
|
||||||
|
*data_size = out_size;
|
||||||
return buf_size;
|
return buf_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
while (out_size > 0) {
|
while (out_size > 0 && buf - avpkt->data < buf_size) {
|
||||||
int code;
|
int code, smp, size;
|
||||||
uint8_t count;
|
uint8_t count;
|
||||||
code = (*buf) >> 6;
|
code = (*buf) >> 6;
|
||||||
count = (*buf) & 0x3F;
|
count = (*buf) & 0x3F;
|
||||||
buf++;
|
buf++;
|
||||||
|
|
||||||
|
/* make sure we don't write more than out_size samples */
|
||||||
|
switch (code) {
|
||||||
|
case 0: smp = 4; break;
|
||||||
|
case 1: smp = 2; break;
|
||||||
|
case 2: smp = (count & 0x20) ? 1 : count + 1; break;
|
||||||
|
default: smp = count + 1; break;
|
||||||
|
}
|
||||||
|
if (out_size < smp) {
|
||||||
|
out_size = 0;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* make sure we don't read past the input buffer */
|
||||||
|
size = ((code == 2 && (count & 0x20)) || code == 3) ? 0 : count + 1;
|
||||||
|
if ((buf - avpkt->data) + size > buf_size)
|
||||||
|
break;
|
||||||
|
|
||||||
switch(code) {
|
switch(code) {
|
||||||
case 0: /* ADPCM 2-bit */
|
case 0: /* ADPCM 2-bit */
|
||||||
for (count++; count > 0; count--) {
|
for (count++; count > 0; count--) {
|
||||||
code = *buf++;
|
code = *buf++;
|
||||||
sample += ws_adpcm_2bit[code & 0x3];
|
sample += ws_adpcm_2bit[code & 0x3];
|
||||||
CLIP8(sample);
|
sample = av_clip_uint8(sample);
|
||||||
*samples++ = sample << 8;
|
*samples++ = sample;
|
||||||
sample += ws_adpcm_2bit[(code >> 2) & 0x3];
|
sample += ws_adpcm_2bit[(code >> 2) & 0x3];
|
||||||
CLIP8(sample);
|
sample = av_clip_uint8(sample);
|
||||||
*samples++ = sample << 8;
|
*samples++ = sample;
|
||||||
sample += ws_adpcm_2bit[(code >> 4) & 0x3];
|
sample += ws_adpcm_2bit[(code >> 4) & 0x3];
|
||||||
CLIP8(sample);
|
sample = av_clip_uint8(sample);
|
||||||
*samples++ = sample << 8;
|
*samples++ = sample;
|
||||||
sample += ws_adpcm_2bit[(code >> 6) & 0x3];
|
sample += ws_adpcm_2bit[(code >> 6) & 0x3];
|
||||||
CLIP8(sample);
|
sample = av_clip_uint8(sample);
|
||||||
*samples++ = sample << 8;
|
*samples++ = sample;
|
||||||
out_size -= 4;
|
out_size -= 4;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@@ -111,11 +138,11 @@ static int ws_snd_decode_frame(AVCodecContext *avctx,
|
|||||||
for (count++; count > 0; count--) {
|
for (count++; count > 0; count--) {
|
||||||
code = *buf++;
|
code = *buf++;
|
||||||
sample += ws_adpcm_4bit[code & 0xF];
|
sample += ws_adpcm_4bit[code & 0xF];
|
||||||
CLIP8(sample);
|
sample = av_clip_uint8(sample);
|
||||||
*samples++ = sample << 8;
|
*samples++ = sample;
|
||||||
sample += ws_adpcm_4bit[code >> 4];
|
sample += ws_adpcm_4bit[code >> 4];
|
||||||
CLIP8(sample);
|
sample = av_clip_uint8(sample);
|
||||||
*samples++ = sample << 8;
|
*samples++ = sample;
|
||||||
out_size -= 2;
|
out_size -= 2;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@@ -125,24 +152,27 @@ static int ws_snd_decode_frame(AVCodecContext *avctx,
|
|||||||
t = count;
|
t = count;
|
||||||
t <<= 3;
|
t <<= 3;
|
||||||
sample += t >> 3;
|
sample += t >> 3;
|
||||||
*samples++ = sample << 8;
|
sample = av_clip_uint8(sample);
|
||||||
|
*samples++ = sample;
|
||||||
out_size--;
|
out_size--;
|
||||||
} else { /* copy */
|
} else { /* copy */
|
||||||
for (count++; count > 0; count--) {
|
for (count++; count > 0; count--) {
|
||||||
*samples++ = (*buf++ - 0x80) << 8;
|
*samples++ = *buf++;
|
||||||
out_size--;
|
out_size--;
|
||||||
}
|
}
|
||||||
sample = buf[-1] - 0x80;
|
sample = buf[-1];
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
default: /* run */
|
default: /* run */
|
||||||
for(count++; count > 0; count--) {
|
for(count++; count > 0; count--) {
|
||||||
*samples++ = sample << 8;
|
*samples++ = sample;
|
||||||
out_size--;
|
out_size--;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
*data_size = samples - (uint8_t *)data;
|
||||||
|
|
||||||
return buf_size;
|
return buf_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -23,7 +23,7 @@
|
|||||||
#include "libavcodec/dsputil.h"
|
#include "libavcodec/dsputil.h"
|
||||||
#include "fft.h"
|
#include "fft.h"
|
||||||
|
|
||||||
DECLARE_ALIGNED(8, static const int, m1m1)[2] = { 1<<31, 1<<31 };
|
DECLARE_ALIGNED(8, static const unsigned int, m1m1)[2] = { 1U<<31, 1U<<31 };
|
||||||
|
|
||||||
#ifdef EMULATE_3DNOWEXT
|
#ifdef EMULATE_3DNOWEXT
|
||||||
#define PSWAPD(s,d)\
|
#define PSWAPD(s,d)\
|
||||||
@@ -70,7 +70,7 @@ void ff_imdct_half_3dn2(FFTContext *s, FFTSample *output, const FFTSample *input
|
|||||||
in1 = input;
|
in1 = input;
|
||||||
in2 = input + n2 - 1;
|
in2 = input + n2 - 1;
|
||||||
#ifdef EMULATE_3DNOWEXT
|
#ifdef EMULATE_3DNOWEXT
|
||||||
__asm__ volatile("movd %0, %%mm7" ::"r"(1<<31));
|
__asm__ volatile("movd %0, %%mm7" ::"r"(1U<<31));
|
||||||
#endif
|
#endif
|
||||||
for(k = 0; k < n4; k++) {
|
for(k = 0; k < n4; k++) {
|
||||||
// FIXME a single block is faster, but gcc 2.95 and 3.4.x on 32bit can't compile it
|
// FIXME a single block is faster, but gcc 2.95 and 3.4.x on 32bit can't compile it
|
||||||
|
@@ -24,8 +24,8 @@
|
|||||||
#include "fft.h"
|
#include "fft.h"
|
||||||
#include "config.h"
|
#include "config.h"
|
||||||
|
|
||||||
DECLARE_ASM_CONST(16, int, ff_m1m1m1m1)[4] =
|
DECLARE_ASM_CONST(16, unsigned int, ff_m1m1m1m1)[4] =
|
||||||
{ 1 << 31, 1 << 31, 1 << 31, 1 << 31 };
|
{ 1U << 31, 1U << 31, 1U << 31, 1U << 31 };
|
||||||
|
|
||||||
void ff_fft_dispatch_sse(FFTComplex *z, int nbits);
|
void ff_fft_dispatch_sse(FFTComplex *z, int nbits);
|
||||||
void ff_fft_dispatch_interleave_sse(FFTComplex *z, int nbits);
|
void ff_fft_dispatch_interleave_sse(FFTComplex *z, int nbits);
|
||||||
|
@@ -114,7 +114,10 @@ static int xan_huffman_decode(unsigned char *dest, int dest_len,
|
|||||||
init_get_bits(&gb, ptr, ptr_len * 8);
|
init_get_bits(&gb, ptr, ptr_len * 8);
|
||||||
|
|
||||||
while ( val != 0x16 ) {
|
while ( val != 0x16 ) {
|
||||||
val = src[val - 0x17 + get_bits1(&gb) * byte];
|
unsigned idx = val - 0x17 + get_bits1(&gb) * byte;
|
||||||
|
if (idx >= 2 * byte)
|
||||||
|
return -1;
|
||||||
|
val = src[idx];
|
||||||
|
|
||||||
if ( val < 0x16 ) {
|
if ( val < 0x16 ) {
|
||||||
if (dest >= dest_end)
|
if (dest >= dest_end)
|
||||||
@@ -132,13 +135,16 @@ static int xan_huffman_decode(unsigned char *dest, int dest_len,
|
|||||||
*
|
*
|
||||||
* @param dest destination buffer of dest_len, must be padded with at least 130 bytes
|
* @param dest destination buffer of dest_len, must be padded with at least 130 bytes
|
||||||
*/
|
*/
|
||||||
static void xan_unpack(unsigned char *dest, const unsigned char *src, int dest_len)
|
static void xan_unpack(unsigned char *dest, int dest_len,
|
||||||
|
const unsigned char *src, int src_len)
|
||||||
{
|
{
|
||||||
unsigned char opcode;
|
unsigned char opcode;
|
||||||
int size;
|
int size;
|
||||||
|
unsigned char *dest_org = dest;
|
||||||
unsigned char *dest_end = dest + dest_len;
|
unsigned char *dest_end = dest + dest_len;
|
||||||
|
const unsigned char *src_end = src + src_len;
|
||||||
|
|
||||||
while (dest < dest_end) {
|
while (dest < dest_end && src < src_end) {
|
||||||
opcode = *src++;
|
opcode = *src++;
|
||||||
|
|
||||||
if (opcode < 0xe0) {
|
if (opcode < 0xe0) {
|
||||||
@@ -163,9 +169,11 @@ static void xan_unpack(unsigned char *dest, const unsigned char *src, int dest_l
|
|||||||
|
|
||||||
back = ((opcode & 0x10) << 12) + bytestream_get_be16(&src) + 1;
|
back = ((opcode & 0x10) << 12) + bytestream_get_be16(&src) + 1;
|
||||||
size2 = ((opcode & 0x0c) << 6) + *src++ + 5;
|
size2 = ((opcode & 0x0c) << 6) + *src++ + 5;
|
||||||
if (size + size2 > dest_end - dest)
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
if (dest_end - dest < size + size2 ||
|
||||||
|
dest + size - dest_org < back ||
|
||||||
|
src_end - src < size)
|
||||||
|
return;
|
||||||
memcpy(dest, src, size); dest += size; src += size;
|
memcpy(dest, src, size); dest += size; src += size;
|
||||||
av_memcpy_backptr(dest, back, size2);
|
av_memcpy_backptr(dest, back, size2);
|
||||||
dest += size2;
|
dest += size2;
|
||||||
@@ -173,6 +181,8 @@ static void xan_unpack(unsigned char *dest, const unsigned char *src, int dest_l
|
|||||||
int finish = opcode >= 0xfc;
|
int finish = opcode >= 0xfc;
|
||||||
size = finish ? opcode & 3 : ((opcode & 0x1f) << 2) + 4;
|
size = finish ? opcode & 3 : ((opcode & 0x1f) << 2) + 4;
|
||||||
|
|
||||||
|
if (dest_end - dest < size || src_end - src < size)
|
||||||
|
return;
|
||||||
memcpy(dest, src, size); dest += size; src += size;
|
memcpy(dest, src, size); dest += size; src += size;
|
||||||
if (finish)
|
if (finish)
|
||||||
return;
|
return;
|
||||||
@@ -220,15 +230,23 @@ static inline void xan_wc3_copy_pixel_run(XanContext *s,
|
|||||||
int width = s->avctx->width;
|
int width = s->avctx->width;
|
||||||
unsigned char *palette_plane, *prev_palette_plane;
|
unsigned char *palette_plane, *prev_palette_plane;
|
||||||
|
|
||||||
|
if ( y + motion_y < 0 || y + motion_y >= s->avctx->height ||
|
||||||
|
x + motion_x < 0 || x + motion_x >= s->avctx->width)
|
||||||
|
return;
|
||||||
|
|
||||||
palette_plane = s->current_frame.data[0];
|
palette_plane = s->current_frame.data[0];
|
||||||
prev_palette_plane = s->last_frame.data[0];
|
prev_palette_plane = s->last_frame.data[0];
|
||||||
|
if (!prev_palette_plane)
|
||||||
|
prev_palette_plane = palette_plane;
|
||||||
stride = s->current_frame.linesize[0];
|
stride = s->current_frame.linesize[0];
|
||||||
line_inc = stride - width;
|
line_inc = stride - width;
|
||||||
curframe_index = y * stride + x;
|
curframe_index = y * stride + x;
|
||||||
curframe_x = x;
|
curframe_x = x;
|
||||||
prevframe_index = (y + motion_y) * stride + x + motion_x;
|
prevframe_index = (y + motion_y) * stride + x + motion_x;
|
||||||
prevframe_x = x + motion_x;
|
prevframe_x = x + motion_x;
|
||||||
while(pixel_count && (curframe_index < s->frame_size)) {
|
while(pixel_count &&
|
||||||
|
curframe_index < s->frame_size &&
|
||||||
|
prevframe_index < s->frame_size) {
|
||||||
int count = FFMIN3(pixel_count, width - curframe_x, width - prevframe_x);
|
int count = FFMIN3(pixel_count, width - curframe_x, width - prevframe_x);
|
||||||
|
|
||||||
memcpy(palette_plane + curframe_index, prev_palette_plane + prevframe_index, count);
|
memcpy(palette_plane + curframe_index, prev_palette_plane + prevframe_index, count);
|
||||||
@@ -262,6 +280,7 @@ static int xan_wc3_decode_frame(XanContext *s) {
|
|||||||
int x, y;
|
int x, y;
|
||||||
|
|
||||||
unsigned char *opcode_buffer = s->buffer1;
|
unsigned char *opcode_buffer = s->buffer1;
|
||||||
|
unsigned char *opcode_buffer_end = s->buffer1 + s->buffer1_size;
|
||||||
int opcode_buffer_size = s->buffer1_size;
|
int opcode_buffer_size = s->buffer1_size;
|
||||||
const unsigned char *imagedata_buffer = s->buffer2;
|
const unsigned char *imagedata_buffer = s->buffer2;
|
||||||
|
|
||||||
@@ -270,7 +289,7 @@ static int xan_wc3_decode_frame(XanContext *s) {
|
|||||||
const unsigned char *size_segment;
|
const unsigned char *size_segment;
|
||||||
const unsigned char *vector_segment;
|
const unsigned char *vector_segment;
|
||||||
const unsigned char *imagedata_segment;
|
const unsigned char *imagedata_segment;
|
||||||
int huffman_offset, size_offset, vector_offset, imagedata_offset;
|
int huffman_offset, size_offset, vector_offset, imagedata_offset, imagedata_size;
|
||||||
|
|
||||||
if (s->size < 8)
|
if (s->size < 8)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
@@ -295,14 +314,18 @@ static int xan_wc3_decode_frame(XanContext *s) {
|
|||||||
huffman_segment, s->size - huffman_offset) < 0)
|
huffman_segment, s->size - huffman_offset) < 0)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
if (imagedata_segment[0] == 2)
|
if (imagedata_segment[0] == 2) {
|
||||||
xan_unpack(s->buffer2, &imagedata_segment[1], s->buffer2_size);
|
xan_unpack(s->buffer2, s->buffer2_size,
|
||||||
else
|
&imagedata_segment[1], s->size - imagedata_offset - 1);
|
||||||
|
imagedata_size = s->buffer2_size;
|
||||||
|
} else {
|
||||||
|
imagedata_size = s->size - imagedata_offset - 1;
|
||||||
imagedata_buffer = &imagedata_segment[1];
|
imagedata_buffer = &imagedata_segment[1];
|
||||||
|
}
|
||||||
|
|
||||||
/* use the decoded data segments to build the frame */
|
/* use the decoded data segments to build the frame */
|
||||||
x = y = 0;
|
x = y = 0;
|
||||||
while (total_pixels) {
|
while (total_pixels && opcode_buffer < opcode_buffer_end) {
|
||||||
|
|
||||||
opcode = *opcode_buffer++;
|
opcode = *opcode_buffer++;
|
||||||
size = 0;
|
size = 0;
|
||||||
@@ -351,6 +374,8 @@ static int xan_wc3_decode_frame(XanContext *s) {
|
|||||||
size_segment += 3;
|
size_segment += 3;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
if (size > total_pixels)
|
||||||
|
break;
|
||||||
|
|
||||||
if (opcode < 12) {
|
if (opcode < 12) {
|
||||||
flag ^= 1;
|
flag ^= 1;
|
||||||
@@ -359,8 +384,11 @@ static int xan_wc3_decode_frame(XanContext *s) {
|
|||||||
xan_wc3_copy_pixel_run(s, x, y, size, 0, 0);
|
xan_wc3_copy_pixel_run(s, x, y, size, 0, 0);
|
||||||
} else {
|
} else {
|
||||||
/* output a run of pixels from imagedata_buffer */
|
/* output a run of pixels from imagedata_buffer */
|
||||||
|
if (imagedata_size < size)
|
||||||
|
break;
|
||||||
xan_wc3_output_pixel_run(s, imagedata_buffer, x, y, size);
|
xan_wc3_output_pixel_run(s, imagedata_buffer, x, y, size);
|
||||||
imagedata_buffer += size;
|
imagedata_buffer += size;
|
||||||
|
imagedata_size -= size;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* run-based motion compensation from last frame */
|
/* run-based motion compensation from last frame */
|
||||||
@@ -527,6 +555,9 @@ static int xan_decode_frame(AVCodecContext *avctx,
|
|||||||
}
|
}
|
||||||
buf_size = buf_end - buf;
|
buf_size = buf_end - buf;
|
||||||
}
|
}
|
||||||
|
if (s->palettes_count <= 0)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
if ((ret = avctx->get_buffer(avctx, &s->current_frame))) {
|
if ((ret = avctx->get_buffer(avctx, &s->current_frame))) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
return ret;
|
return ret;
|
||||||
|
@@ -129,7 +129,9 @@ static int xan_unpack(uint8_t *dest, const int dest_len,
|
|||||||
if (size + size2 > dest_end - dest)
|
if (size + size2 > dest_end - dest)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (src + size > src_end || dest + size + size2 > dest_end)
|
if (src + size > src_end ||
|
||||||
|
dest + size + size2 > dest_end ||
|
||||||
|
dest + size - orig_dest < back )
|
||||||
return -1;
|
return -1;
|
||||||
bytestream_get_buffer(&src, dest, size);
|
bytestream_get_buffer(&src, dest, size);
|
||||||
dest += size;
|
dest += size;
|
||||||
@@ -194,6 +196,8 @@ static int xan_decode_chroma(AVCodecContext *avctx, AVPacket *avpkt)
|
|||||||
if (mode) {
|
if (mode) {
|
||||||
for (j = 0; j < avctx->height >> 1; j++) {
|
for (j = 0; j < avctx->height >> 1; j++) {
|
||||||
for (i = 0; i < avctx->width >> 1; i++) {
|
for (i = 0; i < avctx->width >> 1; i++) {
|
||||||
|
if (src_end - src < 1)
|
||||||
|
return 0;
|
||||||
val = *src++;
|
val = *src++;
|
||||||
if (val) {
|
if (val) {
|
||||||
val = AV_RL16(table + (val << 1));
|
val = AV_RL16(table + (val << 1));
|
||||||
@@ -202,8 +206,6 @@ static int xan_decode_chroma(AVCodecContext *avctx, AVPacket *avpkt)
|
|||||||
U[i] = uval | (uval >> 5);
|
U[i] = uval | (uval >> 5);
|
||||||
V[i] = vval | (vval >> 5);
|
V[i] = vval | (vval >> 5);
|
||||||
}
|
}
|
||||||
if (src == src_end)
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
U += s->pic.linesize[1];
|
U += s->pic.linesize[1];
|
||||||
V += s->pic.linesize[2];
|
V += s->pic.linesize[2];
|
||||||
@@ -214,6 +216,8 @@ static int xan_decode_chroma(AVCodecContext *avctx, AVPacket *avpkt)
|
|||||||
|
|
||||||
for (j = 0; j < avctx->height >> 2; j++) {
|
for (j = 0; j < avctx->height >> 2; j++) {
|
||||||
for (i = 0; i < avctx->width >> 1; i += 2) {
|
for (i = 0; i < avctx->width >> 1; i += 2) {
|
||||||
|
if (src_end - src < 1)
|
||||||
|
return 0;
|
||||||
val = *src++;
|
val = *src++;
|
||||||
if (val) {
|
if (val) {
|
||||||
val = AV_RL16(table + (val << 1));
|
val = AV_RL16(table + (val << 1));
|
||||||
@@ -302,6 +306,9 @@ static int xan_decode_frame_type0(AVCodecContext *avctx, AVPacket *avpkt)
|
|||||||
corr_end - corr_off);
|
corr_end - corr_off);
|
||||||
if (dec_size < 0)
|
if (dec_size < 0)
|
||||||
dec_size = 0;
|
dec_size = 0;
|
||||||
|
else
|
||||||
|
dec_size = FFMIN(dec_size, s->buffer_size/2 - 1);
|
||||||
|
|
||||||
for (i = 0; i < dec_size; i++)
|
for (i = 0; i < dec_size; i++)
|
||||||
s->y_buffer[i*2+1] = (s->y_buffer[i*2+1] + (s->scratch_buffer[i] << 1)) & 0x3F;
|
s->y_buffer[i*2+1] = (s->y_buffer[i*2+1] + (s->scratch_buffer[i] << 1)) & 0x3F;
|
||||||
}
|
}
|
||||||
|
@@ -237,7 +237,7 @@ static int config_props(AVFilterLink *outlink)
|
|||||||
scale->isws[1] = sws_getContext(inlink ->w, inlink ->h/2, inlink ->format,
|
scale->isws[1] = sws_getContext(inlink ->w, inlink ->h/2, inlink ->format,
|
||||||
outlink->w, outlink->h/2, outlink->format,
|
outlink->w, outlink->h/2, outlink->format,
|
||||||
scale->flags, NULL, NULL, NULL);
|
scale->flags, NULL, NULL, NULL);
|
||||||
if (!scale->sws)
|
if (!scale->sws || !scale->isws[0] || !scale->isws[1])
|
||||||
return AVERROR(EINVAL);
|
return AVERROR(EINVAL);
|
||||||
|
|
||||||
if (inlink->sample_aspect_ratio.num){
|
if (inlink->sample_aspect_ratio.num){
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user