Compare commits
56 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
61f55565fb | ||
![]() |
b6b46db9e4 | ||
![]() |
21d99be9dc | ||
![]() |
7b6b9be861 | ||
![]() |
374409eb1a | ||
![]() |
a352fedb24 | ||
![]() |
c92068430d | ||
![]() |
274a5b7cdb | ||
![]() |
95345e942c | ||
![]() |
87757508ab | ||
![]() |
f66418afba | ||
![]() |
7371b0ca6f | ||
![]() |
c5cbda5079 | ||
![]() |
8893f7d815 | ||
![]() |
7c772ccd27 | ||
![]() |
cf82c5cd5b | ||
![]() |
cb8577a4da | ||
![]() |
b55b34f862 | ||
![]() |
609d299ed0 | ||
![]() |
01a0612c70 | ||
![]() |
dcf1830a15 | ||
![]() |
0a48a67e57 | ||
![]() |
72ac64544f | ||
![]() |
b62c0c0bce | ||
![]() |
00498a7e59 | ||
![]() |
06107e9605 | ||
![]() |
ce993ce791 | ||
![]() |
c0b90d4088 | ||
![]() |
b7b61ff6a3 | ||
![]() |
7a6e47b99d | ||
![]() |
f84c349b3b | ||
![]() |
26f732e21d | ||
![]() |
82b2dd5ee4 | ||
![]() |
e82ddde05a | ||
![]() |
07f5da6128 | ||
![]() |
e845455225 | ||
![]() |
3fedb3e65c | ||
![]() |
1986380df2 | ||
![]() |
df3850db49 | ||
![]() |
082b4f8348 | ||
![]() |
788c313b50 | ||
![]() |
779d7610c7 | ||
![]() |
56629aa012 | ||
![]() |
33651e3edf | ||
![]() |
d46aada5c2 | ||
![]() |
66b1f210c0 | ||
![]() |
d4b98d475f | ||
![]() |
8d8962ca3e | ||
![]() |
329559ae50 | ||
![]() |
0b3a88fe15 | ||
![]() |
563fe360c3 | ||
![]() |
73a502dd43 | ||
![]() |
ea189b77eb | ||
![]() |
2ebd47841f | ||
![]() |
9d7244c4c6 | ||
![]() |
7aee089978 |
4
.gitignore
vendored
4
.gitignore
vendored
@@ -7,10 +7,6 @@
|
||||
*-example
|
||||
*-test
|
||||
*_g
|
||||
*.def
|
||||
*.dll
|
||||
*.lib
|
||||
*.exp
|
||||
config.*
|
||||
doc/*.1
|
||||
doc/*.html
|
||||
|
720
Changelog
Normal file
720
Changelog
Normal file
@@ -0,0 +1,720 @@
|
||||
Entries are sorted chronologically from oldest to youngest within each release,
|
||||
releases are sorted from youngest to oldest.
|
||||
|
||||
|
||||
version 0.7.1:
|
||||
|
||||
- added various additional FOURCC codec identifiers
|
||||
- H.264 4:4:4 fixes
|
||||
- build system and compilation fixes
|
||||
- Doxygen and general documentation corrections and improvements
|
||||
- fixed segfault in ffprobe
|
||||
- behavioral fix in av_open_input_stream()
|
||||
- Licensing clarification for LGPL'ed vf_gradfun
|
||||
- bugfixes while seeking in multithreaded decoding
|
||||
- support newer versions of OpenCV
|
||||
- ffmpeg: fix operation with --disable-avfilter
|
||||
- fixed integer underflow in matroska decoder
|
||||
|
||||
version 0.7:
|
||||
|
||||
- many many things we forgot because we rather write code than changelogs
|
||||
- libmpcodecs video filter support (3 times as many filters than before)
|
||||
- mpeg2 aspect ratio dection fixed
|
||||
- libxvid aspect pickiness fixed
|
||||
- Frame multithreaded decoding
|
||||
- E-AC-3 audio encoder
|
||||
- ac3enc: add channel coupling support
|
||||
- floating-point sample format support for (E-)AC-3, DCA, AAC, Vorbis decoders
|
||||
- H.264/MPEG frame-level multithreading
|
||||
- av_metadata_* functions renamed to av_dict_* and moved to libavutil
|
||||
- 4:4:4 H.264 decoding support
|
||||
- 10-bit H.264 optimizations for x86
|
||||
- lut, lutrgb, and lutyuv filters added
|
||||
- buffersink libavfilter sink added
|
||||
- bump libswscale for recently reported ABI break
|
||||
|
||||
|
||||
version 0.7_beta2:
|
||||
|
||||
- VP8 frame-level multithreading
|
||||
- NEON optimizations for VP8
|
||||
- removed a lot of deprecated API cruft
|
||||
- FFT and IMDCT optimizations for AVX (Sandy Bridge) processors
|
||||
- showinfo filter added
|
||||
- DPX image encoder
|
||||
- SMPTE 302M AES3 audio decoder
|
||||
- Apple Core Audio Format muxer
|
||||
- 9bit and 10bit per sample support in the H.264 decoder
|
||||
- 9bit and 10bit FFV1 encoding / decoding
|
||||
- split filter added
|
||||
- select filter added
|
||||
- sdl output device added
|
||||
|
||||
|
||||
version 0.7_beta1:
|
||||
|
||||
- WebM support in Matroska de/muxer
|
||||
- low overhead Ogg muxing
|
||||
- MMS-TCP support
|
||||
- VP8 de/encoding via libvpx
|
||||
- Demuxer for On2's IVF format
|
||||
- Pictor/PC Paint decoder
|
||||
- HE-AAC v2 decoder
|
||||
- libfaad2 wrapper removed
|
||||
- DTS-ES extension (XCh) decoding support
|
||||
- native VP8 decoder
|
||||
- RTSP tunneling over HTTP
|
||||
- RTP depacketization of SVQ3
|
||||
- -strict inofficial replaced by -strict unofficial
|
||||
- ffplay -exitonkeydown and -exitonmousedown options added
|
||||
- native GSM / GSM MS decoder
|
||||
- RTP depacketization of QDM2
|
||||
- ANSI/ASCII art playback system
|
||||
- Lego Mindstorms RSO de/muxer
|
||||
- libavcore added
|
||||
- SubRip subtitle file muxer and demuxer
|
||||
- Chinese AVS encoding via libxavs
|
||||
- ffprobe -show_packets option added
|
||||
- RTP packetization of Theora and Vorbis
|
||||
- RTP depacketization of MP4A-LATM
|
||||
- RTP packetization and depacketization of VP8
|
||||
- hflip filter
|
||||
- Apple HTTP Live Streaming demuxer
|
||||
- a64 codec
|
||||
- MMS-HTTP support
|
||||
- G.722 ADPCM audio encoder/decoder
|
||||
- R10k video decoder
|
||||
- ocv_smooth filter
|
||||
- frei0r wrapper filter
|
||||
- change crop filter syntax to width:height:x:y
|
||||
- make the crop filter accept parametric expressions
|
||||
- make ffprobe accept AVFormatContext options
|
||||
- yadif filter
|
||||
- blackframe filter
|
||||
- Demuxer for Leitch/Harris' VR native stream format (LXF)
|
||||
- RTP depacketization of the X-QT QuickTime format
|
||||
- SAP (Session Announcement Protocol, RFC 2974) muxer and demuxer
|
||||
- cropdetect filter
|
||||
- ffmpeg -crop* options removed
|
||||
- transpose filter added
|
||||
- ffmpeg -force_key_frames option added
|
||||
- demuxer for receiving raw rtp:// URLs without an SDP description
|
||||
- single stream LATM/LOAS decoder
|
||||
- setpts filter added
|
||||
- Win64 support for optimized x86 assembly functions
|
||||
- MJPEG/AVI1 to JPEG/JFIF bitstream filter
|
||||
- ASS subtitle encoder and decoder
|
||||
- IEC 61937 encapsulation for E-AC-3, TrueHD, DTS-HD (for HDMI passthrough)
|
||||
- overlay filter added
|
||||
- rename aspect filter to setdar, and pixelaspect to setsar
|
||||
- IEC 61937 demuxer
|
||||
- Mobotix .mxg demuxer
|
||||
- frei0r source added
|
||||
- hqdn3d filter added
|
||||
- RTP depacketization of QCELP
|
||||
- FLAC parser added
|
||||
- gradfun filter added
|
||||
- AMR-WB decoder
|
||||
- replace the ocv_smooth filter with a more generic ocv filter
|
||||
- Windows Televison (WTV) demuxer
|
||||
- FFmpeg metadata format muxer and demuxer
|
||||
- SubRip (srt) subtitle encoder and decoder
|
||||
- floating-point AC-3 encoder added
|
||||
- Lagarith decoder
|
||||
- ffmpeg -copytb option added
|
||||
- IVF muxer added
|
||||
- Wing Commander IV movies decoder added
|
||||
- movie source added
|
||||
- Bink version 'b' audio and video decoder
|
||||
- Bitmap Brothers JV playback system
|
||||
- Apple HTTP Live Streaming protocol handler
|
||||
- sndio support for playback and record
|
||||
- Linux framebuffer input device added
|
||||
- Chronomaster DFA decoder
|
||||
- DPX image encoder
|
||||
- MicroDVD subtitle file muxer and demuxer
|
||||
- Playstation Portable PMP format demuxer
|
||||
- fieldorder video filter added
|
||||
- AAC encoding via libvo-aacenc
|
||||
- AMR-WB encoding via libvo-amrwbenc
|
||||
- xWMA demuxer
|
||||
- Mobotix MxPEG decoder
|
||||
|
||||
|
||||
version 0.6:
|
||||
|
||||
- PB-frame decoding for H.263
|
||||
- deprecated vhook subsystem removed
|
||||
- deprecated old scaler removed
|
||||
- VQF demuxer
|
||||
- Alpha channel scaler
|
||||
- PCX encoder
|
||||
- RTP packetization of H.263
|
||||
- RTP packetization of AMR
|
||||
- RTP depacketization of Vorbis
|
||||
- CorePNG decoding support
|
||||
- Cook multichannel decoding support
|
||||
- introduced avlanguage helpers in libavformat
|
||||
- 8088flex TMV demuxer and decoder
|
||||
- per-stream language-tags extraction in asfdec
|
||||
- V210 decoder and encoder
|
||||
- remaining GPL parts in AC-3 decoder converted to LGPL
|
||||
- QCP demuxer
|
||||
- SoX native format muxer and demuxer
|
||||
- AMR-NB decoding/encoding, AMR-WB decoding via OpenCORE libraries
|
||||
- DPX image decoder
|
||||
- Electronic Arts Madcow decoder
|
||||
- DivX (XSUB) subtitle encoder
|
||||
- nonfree libamr support for AMR-NB/WB decoding/encoding removed
|
||||
- experimental AAC encoder
|
||||
- RTP depacketization of ASF and RTSP from WMS servers
|
||||
- RTMP support in libavformat
|
||||
- noX handling for OPT_BOOL X options
|
||||
- Wave64 demuxer
|
||||
- IEC-61937 compatible Muxer
|
||||
- TwinVQ decoder
|
||||
- Bluray (PGS) subtitle decoder
|
||||
- LPCM support in MPEG-TS (HDMV RID as found on Blu-ray disks)
|
||||
- WMA Pro decoder
|
||||
- Core Audio Format demuxer
|
||||
- Atrac1 decoder
|
||||
- MD STUDIO audio demuxer
|
||||
- RF64 support in WAV demuxer
|
||||
- MPEG-4 Audio Lossless Coding (ALS) decoder
|
||||
- -formats option split into -formats, -codecs, -bsfs, and -protocols
|
||||
- IV8 demuxer
|
||||
- CDG demuxer and decoder
|
||||
- R210 decoder
|
||||
- Auravision Aura 1 and 2 decoders
|
||||
- Deluxe Paint Animation playback system
|
||||
- SIPR decoder
|
||||
- Adobe Filmstrip muxer and demuxer
|
||||
- RTP depacketization of H.263
|
||||
- Bink demuxer and audio/video decoders
|
||||
- enable symbol versioning by default for linkers that support it
|
||||
- IFF PBM/ILBM bitmap decoder
|
||||
- concat protocol
|
||||
- Indeo 5 decoder
|
||||
- RTP depacketization of AMR
|
||||
- WMA Voice decoder
|
||||
- ffprobe tool
|
||||
- AMR-NB decoder
|
||||
- RTSP muxer
|
||||
- HE-AAC v1 decoder
|
||||
- Kega Game Video (KGV1) decoder
|
||||
- VorbisComment writing for FLAC, Ogg FLAC and Ogg Speex files
|
||||
- RTP depacketization of Theora
|
||||
- HTTP Digest authentication
|
||||
- RTMP/RTMPT/RTMPS/RTMPE/RTMPTE protocol support via librtmp
|
||||
- Psygnosis YOP demuxer and video decoder
|
||||
- spectral extension support in the E-AC-3 decoder
|
||||
- unsharp video filter
|
||||
- RTP hinting in the mov/3gp/mp4 muxer
|
||||
- Dirac in Ogg demuxing
|
||||
- seek to keyframes in Ogg
|
||||
- 4:2:2 and 4:4:4 Theora decoding
|
||||
- 35% faster VP3/Theora decoding
|
||||
- faster AAC decoding
|
||||
- faster H.264 decoding
|
||||
- RealAudio 1.0 (14.4K) encoder
|
||||
|
||||
|
||||
version 0.5:
|
||||
|
||||
- DV50 AKA DVCPRO50 encoder, decoder, muxer and demuxer
|
||||
- TechSmith Camtasia (TSCC) video decoder
|
||||
- IBM Ultimotion (ULTI) video decoder
|
||||
- Sierra Online audio file demuxer and decoder
|
||||
- Apple QuickDraw (qdrw) video decoder
|
||||
- Creative ADPCM audio decoder (16 bits as well as 8 bits schemes)
|
||||
- Electronic Arts Multimedia (WVE/UV2/etc.) file demuxer
|
||||
- Miro VideoXL (VIXL) video decoder
|
||||
- H.261 video encoder
|
||||
- QPEG video decoder
|
||||
- Nullsoft Video (NSV) file demuxer
|
||||
- Shorten audio decoder
|
||||
- LOCO video decoder
|
||||
- Apple Lossless Audio Codec (ALAC) decoder
|
||||
- Winnov WNV1 video decoder
|
||||
- Autodesk Animator Studio Codec (AASC) decoder
|
||||
- Indeo 2 video decoder
|
||||
- Fraps FPS1 video decoder
|
||||
- Snow video encoder/decoder
|
||||
- Sonic audio encoder/decoder
|
||||
- Vorbis audio decoder
|
||||
- Macromedia ADPCM decoder
|
||||
- Duck TrueMotion 2 video decoder
|
||||
- support for decoding FLX and DTA extensions in FLIC files
|
||||
- H.264 custom quantization matrices support
|
||||
- ffserver fixed, it should now be usable again
|
||||
- QDM2 audio decoder
|
||||
- Real Cooker audio decoder
|
||||
- TrueSpeech audio decoder
|
||||
- WMA2 audio decoder fixed, now all files should play correctly
|
||||
- RealAudio 14.4 and 28.8 decoders fixed
|
||||
- JPEG-LS decoder
|
||||
- build system improvements
|
||||
- tabs and trailing whitespace removed from the codebase
|
||||
- CamStudio video decoder
|
||||
- AIFF/AIFF-C audio format, encoding and decoding
|
||||
- ADTS AAC file reading and writing
|
||||
- Creative VOC file reading and writing
|
||||
- American Laser Games multimedia (*.mm) playback system
|
||||
- Zip Motion Blocks Video decoder
|
||||
- improved Theora/VP3 decoder
|
||||
- True Audio (TTA) decoder
|
||||
- AVS demuxer and video decoder
|
||||
- JPEG-LS encoder
|
||||
- Smacker demuxer and decoder
|
||||
- NuppelVideo/MythTV demuxer and RTjpeg decoder
|
||||
- KMVC decoder
|
||||
- MPEG-2 intra VLC support
|
||||
- MPEG-2 4:2:2 encoder
|
||||
- Flash Screen Video decoder
|
||||
- GXF demuxer
|
||||
- Chinese AVS decoder
|
||||
- GXF muxer
|
||||
- MXF demuxer
|
||||
- VC-1/WMV3/WMV9 video decoder
|
||||
- MacIntel support
|
||||
- AVISynth support
|
||||
- VMware video decoder
|
||||
- VP5 video decoder
|
||||
- VP6 video decoder
|
||||
- WavPack lossless audio decoder
|
||||
- Targa (.TGA) picture decoder
|
||||
- Vorbis audio encoder
|
||||
- Delphine Software .cin demuxer/audio and video decoder
|
||||
- Tiertex .seq demuxer/video decoder
|
||||
- MTV demuxer
|
||||
- TIFF picture encoder and decoder
|
||||
- GIF picture decoder
|
||||
- Intel Music Coder decoder
|
||||
- Zip Motion Blocks Video encoder
|
||||
- Musepack decoder
|
||||
- Flash Screen Video encoder
|
||||
- Theora encoding via libtheora
|
||||
- BMP encoder
|
||||
- WMA encoder
|
||||
- GSM-MS encoder and decoder
|
||||
- DCA decoder
|
||||
- DXA demuxer and decoder
|
||||
- DNxHD decoder
|
||||
- Gamecube movie (.THP) playback system
|
||||
- Blackfin optimizations
|
||||
- Interplay C93 demuxer and video decoder
|
||||
- Bethsoft VID demuxer and video decoder
|
||||
- CRYO APC demuxer
|
||||
- Atrac3 decoder
|
||||
- V.Flash PTX decoder
|
||||
- RoQ muxer, RoQ audio encoder
|
||||
- Renderware TXD demuxer and decoder
|
||||
- extern C declarations for C++ removed from headers
|
||||
- sws_flags command line option
|
||||
- codebook generator
|
||||
- RoQ video encoder
|
||||
- QTRLE encoder
|
||||
- OS/2 support removed and restored again
|
||||
- AC-3 decoder
|
||||
- NUT muxer
|
||||
- additional SPARC (VIS) optimizations
|
||||
- Matroska muxer
|
||||
- slice-based parallel H.264 decoding
|
||||
- Monkey's Audio demuxer and decoder
|
||||
- AMV audio and video decoder
|
||||
- DNxHD encoder
|
||||
- H.264 PAFF decoding
|
||||
- Nellymoser ASAO decoder
|
||||
- Beam Software SIFF demuxer and decoder
|
||||
- libvorbis Vorbis decoding removed in favor of native decoder
|
||||
- IntraX8 (J-Frame) subdecoder for WMV2 and VC-1
|
||||
- Ogg (Theora, Vorbis and FLAC) muxer
|
||||
- The "device" muxers and demuxers are now in a new libavdevice library
|
||||
- PC Paintbrush PCX decoder
|
||||
- Sun Rasterfile decoder
|
||||
- TechnoTrend PVA demuxer
|
||||
- Linux Media Labs MPEG-4 (LMLM4) demuxer
|
||||
- AVM2 (Flash 9) SWF muxer
|
||||
- QT variant of IMA ADPCM encoder
|
||||
- VFW grabber
|
||||
- iPod/iPhone compatible mp4 muxer
|
||||
- Mimic decoder
|
||||
- MSN TCP Webcam stream demuxer
|
||||
- RL2 demuxer / decoder
|
||||
- IFF demuxer
|
||||
- 8SVX audio decoder
|
||||
- non-recursive Makefiles
|
||||
- BFI demuxer
|
||||
- MAXIS EA XA (.xa) demuxer / decoder
|
||||
- BFI video decoder
|
||||
- OMA demuxer
|
||||
- MLP/TrueHD decoder
|
||||
- Electronic Arts CMV decoder
|
||||
- Motion Pixels Video decoder
|
||||
- Motion Pixels MVI demuxer
|
||||
- removed animated GIF decoder/demuxer
|
||||
- D-Cinema audio muxer
|
||||
- Electronic Arts TGV decoder
|
||||
- Apple Lossless Audio Codec (ALAC) encoder
|
||||
- AAC decoder
|
||||
- floating point PCM encoder/decoder
|
||||
- MXF muxer
|
||||
- DV100 AKA DVCPRO HD decoder and demuxer
|
||||
- E-AC-3 support added to AC-3 decoder
|
||||
- Nellymoser ASAO encoder
|
||||
- ASS and SSA demuxer and muxer
|
||||
- liba52 wrapper removed
|
||||
- SVQ3 watermark decoding support
|
||||
- Speex decoding via libspeex
|
||||
- Electronic Arts TGQ decoder
|
||||
- RV40 decoder
|
||||
- QCELP / PureVoice decoder
|
||||
- RV30 decoder
|
||||
- hybrid WavPack support
|
||||
- R3D REDCODE demuxer
|
||||
- ALSA support for playback and record
|
||||
- Electronic Arts TQI decoder
|
||||
- OpenJPEG based JPEG 2000 decoder
|
||||
- NC (NC4600) camera file demuxer
|
||||
- Gopher client support
|
||||
- MXF D-10 muxer
|
||||
- generic metadata API
|
||||
- flash ScreenVideo2 encoder
|
||||
|
||||
|
||||
version 0.4.9-pre1:
|
||||
|
||||
- DV encoder, DV muxer
|
||||
- Microsoft RLE video decoder
|
||||
- Microsoft Video-1 decoder
|
||||
- Apple Animation (RLE) decoder
|
||||
- Apple Graphics (SMC) decoder
|
||||
- Apple Video (RPZA) decoder
|
||||
- Cinepak decoder
|
||||
- Sega FILM (CPK) file demuxer
|
||||
- Westwood multimedia support (VQA & AUD files)
|
||||
- Id Quake II CIN playback support
|
||||
- 8BPS video decoder
|
||||
- FLIC playback support
|
||||
- RealVideo 2.0 (RV20) decoder
|
||||
- Duck TrueMotion v1 (DUCK) video decoder
|
||||
- Sierra VMD demuxer and video decoder
|
||||
- MSZH and ZLIB decoder support
|
||||
- SVQ1 video encoder
|
||||
- AMR-WB support
|
||||
- PPC optimizations
|
||||
- rate distortion optimal cbp support
|
||||
- rate distorted optimal ac prediction for MPEG-4
|
||||
- rate distorted optimal lambda->qp support
|
||||
- AAC encoding with libfaac
|
||||
- Sunplus JPEG codec (SP5X) support
|
||||
- use Lagrange multipler instead of QP for ratecontrol
|
||||
- Theora/VP3 decoding support
|
||||
- XA and ADX ADPCM codecs
|
||||
- export MPEG-2 active display area / pan scan
|
||||
- Add support for configuring with IBM XLC
|
||||
- floating point AAN DCT
|
||||
- initial support for zygo video (not complete)
|
||||
- RGB ffv1 support
|
||||
- new audio/video parser API
|
||||
- av_log() system
|
||||
- av_read_frame() and av_seek_frame() support
|
||||
- missing last frame fixes
|
||||
- seek by mouse in ffplay
|
||||
- noise reduction of DCT coefficients
|
||||
- H.263 OBMC & 4MV support
|
||||
- H.263 alternative inter vlc support
|
||||
- H.263 loop filter
|
||||
- H.263 slice structured mode
|
||||
- interlaced DCT support for MPEG-2 encoding
|
||||
- stuffing to stay above min_bitrate
|
||||
- MB type & QP visualization
|
||||
- frame stepping for ffplay
|
||||
- interlaced motion estimation
|
||||
- alternate scantable support
|
||||
- SVCD scan offset support
|
||||
- closed GOP support
|
||||
- SSE2 FDCT
|
||||
- quantizer noise shaping
|
||||
- G.726 ADPCM audio codec
|
||||
- MS ADPCM encoding
|
||||
- multithreaded/SMP motion estimation
|
||||
- multithreaded/SMP encoding for MPEG-1/MPEG-2/MPEG-4/H.263
|
||||
- multithreaded/SMP decoding for MPEG-2
|
||||
- FLAC decoder
|
||||
- Metrowerks CodeWarrior suppport
|
||||
- H.263+ custom pcf support
|
||||
- nicer output for 'ffmpeg -formats'
|
||||
- Matroska demuxer
|
||||
- SGI image format, encoding and decoding
|
||||
- H.264 loop filter support
|
||||
- H.264 CABAC support
|
||||
- nicer looking arrows for the motion vector visualization
|
||||
- improved VCD support
|
||||
- audio timestamp drift compensation
|
||||
- MPEG-2 YUV 422/444 support
|
||||
- polyphase kaiser windowed sinc and blackman nuttall windowed sinc audio resample
|
||||
- better image scaling
|
||||
- H.261 support
|
||||
- correctly interleave packets during encoding
|
||||
- VIS optimized motion compensation
|
||||
- intra_dc_precision>0 encoding support
|
||||
- support reuse of motion vectors/MB types/field select values of the source video
|
||||
- more accurate deblock filter
|
||||
- padding support
|
||||
- many optimizations and bugfixes
|
||||
- FunCom ISS audio file demuxer and according ADPCM decoding
|
||||
|
||||
|
||||
version 0.4.8:
|
||||
|
||||
- MPEG-2 video encoding (Michael)
|
||||
- Id RoQ playback subsystem (Mike Melanson and Tim Ferguson)
|
||||
- Wing Commander III Movie (.mve) file playback subsystem (Mike Melanson
|
||||
and Mario Brito)
|
||||
- Xan DPCM audio decoder (Mario Brito)
|
||||
- Interplay MVE playback subsystem (Mike Melanson)
|
||||
- Duck DK3 and DK4 ADPCM audio decoders (Mike Melanson)
|
||||
|
||||
|
||||
version 0.4.7:
|
||||
|
||||
- RealAudio 1.0 (14_4) and 2.0 (28_8) native decoders. Author unknown, code from mplayerhq
|
||||
(originally from public domain player for Amiga at http://www.honeypot.net/audio)
|
||||
- current version now also compiles with older GCC (Fabrice)
|
||||
- 4X multimedia playback system including 4xm file demuxer (Mike
|
||||
Melanson), and 4X video and audio codecs (Michael)
|
||||
- Creative YUV (CYUV) decoder (Mike Melanson)
|
||||
- FFV1 codec (our very simple lossless intra only codec, compresses much better
|
||||
than HuffYUV) (Michael)
|
||||
- ASV1 (Asus), H.264, Intel indeo3 codecs have been added (various)
|
||||
- tiny PNG encoder and decoder, tiny GIF decoder, PAM decoder (PPM with
|
||||
alpha support), JPEG YUV colorspace support. (Fabrice Bellard)
|
||||
- ffplay has been replaced with a newer version which uses SDL (optionally)
|
||||
for multiplatform support (Fabrice)
|
||||
- Sorenson Version 3 codec (SVQ3) support has been added (decoding only) - donated
|
||||
by anonymous
|
||||
- AMR format has been added (Johannes Carlsson)
|
||||
- 3GP support has been added (Johannes Carlsson)
|
||||
- VP3 codec has been added (Mike Melanson)
|
||||
- more MPEG-1/2 fixes
|
||||
- better multiplatform support, MS Visual Studio fixes (various)
|
||||
- AltiVec optimizations (Magnus Damn and others)
|
||||
- SH4 processor support has been added (BERO)
|
||||
- new public interfaces (avcodec_get_pix_fmt) (Roman Shaposhnick)
|
||||
- VOB streaming support (Brian Foley)
|
||||
- better MP3 autodetection (Andriy Rysin)
|
||||
- qpel encoding (Michael)
|
||||
- 4mv+b frames encoding finally fixed (Michael)
|
||||
- chroma ME (Michael)
|
||||
- 5 comparison functions for ME (Michael)
|
||||
- B-frame encoding speedup (Michael)
|
||||
- WMV2 codec (unfinished - Michael)
|
||||
- user specified diamond size for EPZS (Michael)
|
||||
- Playstation STR playback subsystem, still experimental (Mike and Michael)
|
||||
- ASV2 codec (Michael)
|
||||
- CLJR decoder (Alex)
|
||||
|
||||
.. And lots more new enhancements and fixes.
|
||||
|
||||
|
||||
version 0.4.6:
|
||||
|
||||
- completely new integer only MPEG audio layer 1/2/3 decoder rewritten
|
||||
from scratch
|
||||
- Recoded DCT and motion vector search with gcc (no longer depends on nasm)
|
||||
- fix quantization bug in AC3 encoder
|
||||
- added PCM codecs and format. Corrected WAV/AVI/ASF PCM issues
|
||||
- added prototype ffplay program
|
||||
- added GOB header parsing on H.263/H.263+ decoder (Juanjo)
|
||||
- bug fix on MCBPC tables of H.263 (Juanjo)
|
||||
- bug fix on DC coefficients of H.263 (Juanjo)
|
||||
- added Advanced Prediction Mode on H.263/H.263+ decoder (Juanjo)
|
||||
- now we can decode H.263 streams found in QuickTime files (Juanjo)
|
||||
- now we can decode H.263 streams found in VIVO v1 files(Juanjo)
|
||||
- preliminary RTP "friendly" mode for H.263/H.263+ coding. (Juanjo)
|
||||
- added GOB header for H.263/H.263+ coding on RTP mode (Juanjo)
|
||||
- now H.263 picture size is returned on the first decoded frame (Juanjo)
|
||||
- added first regression tests
|
||||
- added MPEG-2 TS demuxer
|
||||
- new demux API for libav
|
||||
- more accurate and faster IDCT (Michael)
|
||||
- faster and entropy-controlled motion search (Michael)
|
||||
- two pass video encoding (Michael)
|
||||
- new video rate control (Michael)
|
||||
- added MSMPEG4V1, MSMPEGV2 and WMV1 support (Michael)
|
||||
- great performance improvement of video encoders and decoders (Michael)
|
||||
- new and faster bit readers and vlc parsers (Michael)
|
||||
- high quality encoding mode: tries all macroblock/VLC types (Michael)
|
||||
- added DV video decoder
|
||||
- preliminary RTP/RTSP support in ffserver and libavformat
|
||||
- H.263+ AIC decoding/encoding support (Juanjo)
|
||||
- VCD MPEG-PS mode (Juanjo)
|
||||
- PSNR stuff (Juanjo)
|
||||
- simple stats output (Juanjo)
|
||||
- 16-bit and 15-bit RGB/BGR/GBR support (Bisqwit)
|
||||
|
||||
|
||||
version 0.4.5:
|
||||
|
||||
- some header fixes (Zdenek Kabelac <kabi at informatics.muni.cz>)
|
||||
- many MMX optimizations (Nick Kurshev <nickols_k at mail.ru>)
|
||||
- added configure system (actually a small shell script)
|
||||
- added MPEG audio layer 1/2/3 decoding using LGPL'ed mpglib by
|
||||
Michael Hipp (temporary solution - waiting for integer only
|
||||
decoder)
|
||||
- fixed VIDIOCSYNC interrupt
|
||||
- added Intel H.263 decoding support ('I263' AVI fourCC)
|
||||
- added Real Video 1.0 decoding (needs further testing)
|
||||
- simplified image formats again. Added PGM format (=grey
|
||||
pgm). Renamed old PGM to PGMYUV.
|
||||
- fixed msmpeg4 slice issues (tell me if you still find problems)
|
||||
- fixed OpenDivX bugs with newer versions (added VOL header decoding)
|
||||
- added support for MPlayer interface
|
||||
- added macroblock skip optimization
|
||||
- added MJPEG decoder
|
||||
- added mmx/mmxext IDCT from libmpeg2
|
||||
- added pgmyuvpipe, ppm, and ppm_pipe formats (original patch by Celer
|
||||
<celer at shell.scrypt.net>)
|
||||
- added pixel format conversion layer (e.g. for MJPEG or PPM)
|
||||
- added deinterlacing option
|
||||
- MPEG-1/2 fixes
|
||||
- MPEG-4 vol header fixes (Jonathan Marsden <snmjbm at pacbell.net>)
|
||||
- ARM optimizations (Lionel Ulmer <lionel.ulmer at free.fr>).
|
||||
- Windows porting of file converter
|
||||
- added MJPEG raw format (input/ouput)
|
||||
- added JPEG image format support (input/output)
|
||||
|
||||
|
||||
version 0.4.4:
|
||||
|
||||
- fixed some std header definitions (Bjorn Lindgren
|
||||
<bjorn.e.lindgren at telia.com>).
|
||||
- added MPEG demuxer (MPEG-1 and 2 compatible).
|
||||
- added ASF demuxer
|
||||
- added prototype RM demuxer
|
||||
- added AC3 decoding (done with libac3 by Aaron Holtzman)
|
||||
- added decoding codec parameter guessing (.e.g. for MPEG, because the
|
||||
header does not include them)
|
||||
- fixed header generation in MPEG-1, AVI and ASF muxer: wmplayer can now
|
||||
play them (only tested video)
|
||||
- fixed H.263 white bug
|
||||
- fixed phase rounding in img resample filter
|
||||
- add MMX code for polyphase img resample filter
|
||||
- added CPU autodetection
|
||||
- added generic title/author/copyright/comment string handling (ASF and RM
|
||||
use them)
|
||||
- added SWF demux to extract MP3 track (not usable yet because no MP3
|
||||
decoder)
|
||||
- added fractional frame rate support
|
||||
- codecs are no longer searched by read_header() (should fix ffserver
|
||||
segfault)
|
||||
|
||||
|
||||
version 0.4.3:
|
||||
|
||||
- BGR24 patch (initial patch by Jeroen Vreeken <pe1rxq at amsat.org>)
|
||||
- fixed raw yuv output
|
||||
- added motion rounding support in MPEG-4
|
||||
- fixed motion bug rounding in MSMPEG4
|
||||
- added B-frame handling in video core
|
||||
- added full MPEG-1 decoding support
|
||||
- added partial (frame only) MPEG-2 support
|
||||
- changed the FOURCC code for H.263 to "U263" to be able to see the
|
||||
+AVI/H.263 file with the UB Video H.263+ decoder. MPlayer works with
|
||||
this +codec ;) (JuanJo).
|
||||
- Halfpel motion estimation after MB type selection (JuanJo)
|
||||
- added pgm and .Y.U.V output format
|
||||
- suppressed 'img:' protocol. Simply use: /tmp/test%d.[pgm|Y] as input or
|
||||
output.
|
||||
- added pgmpipe I/O format (original patch from Martin Aumueller
|
||||
<lists at reserv.at>, but changed completely since we use a format
|
||||
instead of a protocol)
|
||||
|
||||
|
||||
version 0.4.2:
|
||||
|
||||
- added H.263/MPEG-4/MSMPEG4 decoding support. MPEG-4 decoding support
|
||||
(for OpenDivX) is almost complete: 8x8 MVs and rounding are
|
||||
missing. MSMPEG4 support is complete.
|
||||
- added prototype MPEG-1 decoder. Only I- and P-frames handled yet (it
|
||||
can decode ffmpeg MPEGs :-)).
|
||||
- added libavcodec API documentation (see apiexample.c).
|
||||
- fixed image polyphase bug (the bottom of some images could be
|
||||
greenish)
|
||||
- added support for non clipped motion vectors (decoding only)
|
||||
and image sizes non-multiple of 16
|
||||
- added support for AC prediction (decoding only)
|
||||
- added file overwrite confirmation (can be disabled with -y)
|
||||
- added custom size picture to H.263 using H.263+ (Juanjo)
|
||||
|
||||
|
||||
version 0.4.1:
|
||||
|
||||
- added MSMPEG4 (aka DivX) compatible encoder. Changed default codec
|
||||
of AVI and ASF to DIV3.
|
||||
- added -me option to set motion estimation method
|
||||
(default=log). suppressed redundant -hq option.
|
||||
- added options -acodec and -vcodec to force a given codec (useful for
|
||||
AVI for example)
|
||||
- fixed -an option
|
||||
- improved dct_quantize speed
|
||||
- factorized some motion estimation code
|
||||
|
||||
|
||||
version 0.4.0:
|
||||
|
||||
- removing grab code from ffserver and moved it to ffmpeg. Added
|
||||
multistream support to ffmpeg.
|
||||
- added timeshifting support for live feeds (option ?date=xxx in the
|
||||
URL)
|
||||
- added high quality image resize code with polyphase filter (need
|
||||
mmx/see optimization). Enable multiple image size support in ffserver.
|
||||
- added multi live feed support in ffserver
|
||||
- suppressed master feature from ffserver (it should be done with an
|
||||
external program which opens the .ffm url and writes it to another
|
||||
ffserver)
|
||||
- added preliminary support for video stream parsing (WAV and AVI half
|
||||
done). Added proper support for audio/video file conversion in
|
||||
ffmpeg.
|
||||
- added preliminary support for video file sending from ffserver
|
||||
- redesigning I/O subsystem: now using URL based input and output
|
||||
(see avio.h)
|
||||
- added WAV format support
|
||||
- added "tty user interface" to ffmpeg to stop grabbing gracefully
|
||||
- added MMX/SSE optimizations to SAD (Sums of Absolutes Differences)
|
||||
(Juan J. Sierralta P. a.k.a. "Juanjo" <juanjo at atmlab.utfsm.cl>)
|
||||
- added MMX DCT from mpeg2_movie 1.5 (Juanjo)
|
||||
- added new motion estimation algorithms, log and phods (Juanjo)
|
||||
- changed directories: libav for format handling, libavcodec for
|
||||
codecs
|
||||
|
||||
|
||||
version 0.3.4:
|
||||
|
||||
- added stereo in MPEG audio encoder
|
||||
|
||||
|
||||
version 0.3.3:
|
||||
|
||||
- added 'high quality' mode which use motion vectors. It can be used in
|
||||
real time at low resolution.
|
||||
- fixed rounding problems which caused quality problems at high
|
||||
bitrates and large GOP size
|
||||
|
||||
|
||||
version 0.3.2: small fixes
|
||||
|
||||
- ASF fixes
|
||||
- put_seek bug fix
|
||||
|
||||
|
||||
version 0.3.1: added avi/divx support
|
||||
|
||||
- added AVI support
|
||||
- added MPEG-4 codec compatible with OpenDivX. It is based on the H.263 codec
|
||||
- added sound for flash format (not tested)
|
||||
|
||||
|
||||
version 0.3: initial public release
|
2
Doxyfile
2
Doxyfile
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
||||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER = 0.8.12
|
||||
PROJECT_NUMBER = 0.7.4
|
||||
|
||||
# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
|
||||
# base path where the generated documentation will be put.
|
||||
|
6
LICENSE
6
LICENSE
@@ -41,6 +41,6 @@ is incompatible with the LGPL v2.1 and the GPL v2, but not with version 3 of
|
||||
those licenses. So to combine the OpenCORE libraries with FFmpeg, the license
|
||||
version needs to be upgraded by passing --enable-version3 to configure.
|
||||
|
||||
The nonfree external libraries libfaac and libaacplus can be hooked up in FFmpeg.
|
||||
You need to pass --enable-nonfree to configure to enable it. Employ this option
|
||||
with care as FFmpeg then becomes nonfree and unredistributable.
|
||||
The nonfree external library libfaac can be hooked up in FFmpeg. You need to
|
||||
pass --enable-nonfree to configure to enable it. Employ this option with care
|
||||
as FFmpeg then becomes nonfree and unredistributable.
|
||||
|
@@ -19,7 +19,7 @@ ffmpeg:
|
||||
ffmpeg.c Michael Niedermayer
|
||||
|
||||
ffplay:
|
||||
ffplay.c Marton Balint
|
||||
ffplay.c Michael Niedermayer
|
||||
|
||||
ffprobe:
|
||||
ffprobe.c Stefano Sabatini
|
||||
@@ -373,7 +373,6 @@ Ben Littler 3EE3 3723 E560 3214 A8CD 4DEB 2CDB FCE7 768C 8D2C
|
||||
Benoit Fouet B22A 4F4F 43EF 636B BB66 FCDC 0023 AE1E 2985 49C8
|
||||
Daniel Verkamp 78A6 07ED 782C 653E C628 B8B9 F0EB 8DD8 2F0E 21C7
|
||||
Diego Biurrun 8227 1E31 B6D9 4994 7427 E220 9CAE D6CC 4757 FCC5
|
||||
Gwenole Beauchesne 2E63 B3A6 3E44 37E2 017D 2704 53C7 6266 B153 99C4
|
||||
Jaikrishnan Menon 61A1 F09F 01C9 2D45 78E1 C862 25DC 8831 AF70 D368
|
||||
Justin Ruggles 3136 ECC0 C10D 6C04 5F43 CA29 FCBE CD2A 3787 1EBF
|
||||
Loren Merritt ABD9 08F4 C920 3F65 D8BE 35D7 1540 DAA7 060F 56DE
|
||||
|
7
Makefile
7
Makefile
@@ -258,12 +258,9 @@ FATE_SEEK = $(SEEK_TESTS:seek_%=fate-seek-%)
|
||||
FATE = $(FATE_ACODEC) \
|
||||
$(FATE_VCODEC) \
|
||||
$(FATE_LAVF) \
|
||||
$(FATE_LAVFI) \
|
||||
$(FATE_SEEK) \
|
||||
|
||||
FATE-$(CONFIG_AVFILTER) += $(FATE_LAVFI)
|
||||
|
||||
FATE += $(FATE-yes)
|
||||
|
||||
$(filter-out %-aref,$(FATE_ACODEC)): $(AREF)
|
||||
$(filter-out %-vref,$(FATE_VCODEC)): $(VREF)
|
||||
$(FATE_LAVF): $(REFS)
|
||||
@@ -285,7 +282,7 @@ fate-lavfi: $(FATE_LAVFI)
|
||||
fate-seek: $(FATE_SEEK)
|
||||
|
||||
ifdef SAMPLES
|
||||
FATE += $(FATE_TESTS) $(FATE_TESTS-yes)
|
||||
FATE += $(FATE_TESTS)
|
||||
fate-rsync:
|
||||
rsync -vaLW rsync://fate-suite.libav.org/fate-suite/ $(SAMPLES)
|
||||
else
|
||||
|
26
configure
vendored
26
configure
vendored
@@ -162,7 +162,6 @@ External library support:
|
||||
--enable-bzlib enable bzlib [autodetect]
|
||||
--enable-libcelt enable CELT/Opus decoding via libcelt [no]
|
||||
--enable-frei0r enable frei0r video filtering
|
||||
--enable-libaacplus enable AAC+ encoding via libaacplus [no]
|
||||
--enable-libopencore-amrnb enable AMR-NB de/encoding via libopencore-amrnb [no]
|
||||
--enable-libopencore-amrwb enable AMR-WB decoding via libopencore-amrwb [no]
|
||||
--enable-libopencv enable video filtering via libopencv [no]
|
||||
@@ -178,7 +177,7 @@ External library support:
|
||||
--enable-libopenjpeg enable JPEG 2000 decoding via OpenJPEG [no]
|
||||
--enable-librtmp enable RTMP[E] support via librtmp [no]
|
||||
--enable-libschroedinger enable Dirac support via libschroedinger [no]
|
||||
--enable-libspeex enable Speex encoding and decoding via libspeex [no]
|
||||
--enable-libspeex enable Speex decoding via libspeex [no]
|
||||
--enable-libtheora enable Theora encoding via libtheora [no]
|
||||
--enable-libvo-aacenc enable AAC encoding via libvo-aacenc [no]
|
||||
--enable-libvo-amrwbenc enable AMR-WB encoding via libvo-amrwbenc [no]
|
||||
@@ -928,8 +927,6 @@ CONFIG_LIST="
|
||||
h264pred
|
||||
hardcoded_tables
|
||||
huffman
|
||||
libaacplus
|
||||
libcdio
|
||||
libcelt
|
||||
libdc1394
|
||||
libdirac
|
||||
@@ -1404,7 +1401,6 @@ vdpau_deps="vdpau_vdpau_h vdpau_vdpau_x11_h"
|
||||
h264_parser_select="golomb h264dsp h264pred"
|
||||
|
||||
# external libraries
|
||||
libaacplus_encoder_deps="libaacplus"
|
||||
libcelt_decoder_deps="libcelt"
|
||||
libdirac_decoder_deps="libdirac !libschroedinger"
|
||||
libdirac_encoder_deps="libdirac"
|
||||
@@ -1421,7 +1417,6 @@ libopenjpeg_decoder_deps="libopenjpeg"
|
||||
libschroedinger_decoder_deps="libschroedinger"
|
||||
libschroedinger_encoder_deps="libschroedinger"
|
||||
libspeex_decoder_deps="libspeex"
|
||||
libspeex_encoder_deps="libspeex"
|
||||
libtheora_encoder_deps="libtheora"
|
||||
libvo_aacenc_encoder_deps="libvo_aacenc"
|
||||
libvo_amrwbenc_encoder_deps="libvo_amrwbenc"
|
||||
@@ -1536,7 +1531,7 @@ test_deps(){
|
||||
dep=${v%=*}
|
||||
tests=${v#*=}
|
||||
for name in ${tests}; do
|
||||
append ${name}_test_deps ${dep}$suf1 ${dep}$suf2
|
||||
eval ${name}_test_deps="'${dep}$suf1 ${dep}$suf2'"
|
||||
done
|
||||
done
|
||||
}
|
||||
@@ -1546,9 +1541,6 @@ set_ne_test_deps(){
|
||||
eval ${1}_le_test_deps="!bigendian"
|
||||
}
|
||||
|
||||
mxf_d10_test_deps="avfilter"
|
||||
seek_lavf_mxf_d10_test_deps="mxf_d10_test"
|
||||
|
||||
test_deps _encoder _decoder \
|
||||
adpcm_g726=g726 \
|
||||
adpcm_ima_qt \
|
||||
@@ -1611,7 +1603,7 @@ test_deps _muxer _demuxer \
|
||||
mmf \
|
||||
mov \
|
||||
pcm_mulaw=mulaw \
|
||||
mxf="mxf mxf_d10" \
|
||||
mxf \
|
||||
nut \
|
||||
ogg \
|
||||
rawvideo=pixfmt \
|
||||
@@ -2203,7 +2195,7 @@ case "$arch" in
|
||||
arch="sparc"
|
||||
subarch="sparc64"
|
||||
;;
|
||||
i[3-6]86|i86pc|BePC|x86pc|x86_64|x86_32|amd64)
|
||||
i[3-6]86|i86pc|BePC|x86pc|x86_64|amd64)
|
||||
arch="x86"
|
||||
;;
|
||||
esac
|
||||
@@ -2591,7 +2583,6 @@ die_license_disabled gpl libxavs
|
||||
die_license_disabled gpl libxvid
|
||||
die_license_disabled gpl x11grab
|
||||
|
||||
die_license_disabled nonfree libaacplus
|
||||
die_license_disabled nonfree libfaac
|
||||
|
||||
die_license_disabled version3 libopencore_amrnb
|
||||
@@ -2924,7 +2915,6 @@ check_mathfunc truncf
|
||||
enabled avisynth && require2 vfw32 "windows.h vfw.h" AVIFileInit -lavifil32
|
||||
enabled libcelt && require libcelt celt/celt.h celt_decode -lcelt0
|
||||
enabled frei0r && { check_header frei0r.h || die "ERROR: frei0r.h header not found"; }
|
||||
enabled libaacplus && require "libaacplus >= 2.0.0" aacplus.h aacplusEncOpen -laacplus
|
||||
enabled libdc1394 && require_pkg_config libdc1394-2 dc1394/dc1394.h dc1394_new
|
||||
enabled libdirac && require_pkg_config dirac \
|
||||
"libdirac_decoder/dirac_parser.h libdirac_encoder/dirac_encoder.h" \
|
||||
@@ -3082,10 +3072,6 @@ else
|
||||
fi
|
||||
check_cflags -fno-math-errno
|
||||
check_cflags -fno-signed-zeros
|
||||
check_cc -mno-red-zone <<EOF && noredzone_flags="-mno-red-zone"
|
||||
int x;
|
||||
EOF
|
||||
|
||||
|
||||
if enabled icc; then
|
||||
# Just warnings, no remarks
|
||||
@@ -3164,7 +3150,7 @@ check_deps $CONFIG_LIST \
|
||||
|
||||
enabled asm || { arch=c; disable $ARCH_LIST $ARCH_EXT_LIST; }
|
||||
|
||||
if test $target_os = "haiku"; then
|
||||
if test $target_os == "haiku"; then
|
||||
disable memalign
|
||||
disable posix_memalign
|
||||
fi
|
||||
@@ -3236,7 +3222,6 @@ echo "frei0r enabled ${frei0r-no}"
|
||||
echo "libdc1394 support ${libdc1394-no}"
|
||||
echo "libdirac enabled ${libdirac-no}"
|
||||
echo "libfaac enabled ${libfaac-no}"
|
||||
echo "libaacplus enabled ${libaacplus-no}"
|
||||
echo "libgsm enabled ${libgsm-no}"
|
||||
echo "libmp3lame enabled ${libmp3lame-no}"
|
||||
echo "libnut enabled ${libnut-no}"
|
||||
@@ -3397,7 +3382,6 @@ SLIB_EXTRA_CMD=${SLIB_EXTRA_CMD}
|
||||
SLIB_INSTALL_EXTRA_CMD=${SLIB_INSTALL_EXTRA_CMD}
|
||||
SLIB_UNINSTALL_EXTRA_CMD=${SLIB_UNINSTALL_EXTRA_CMD}
|
||||
SAMPLES:=${samples:-\$(FATE_SAMPLES)}
|
||||
NOREDZONE_FLAGS=$noredzone_flags
|
||||
EOF
|
||||
|
||||
get_version(){
|
||||
|
@@ -13,7 +13,6 @@ libavutil: 2011-04-18
|
||||
|
||||
API changes, most recent first:
|
||||
|
||||
|
||||
2011-06-19 - xxxxxxx - lavfi 2.23.0 - avfilter.h
|
||||
Add layout negotiation fields and helper functions.
|
||||
|
||||
@@ -44,12 +43,6 @@ API changes, most recent first:
|
||||
2011-06-12 - xxxxxxx - lavfi 2.16.0 - avfilter_graph_parse()
|
||||
Change avfilter_graph_parse() signature.
|
||||
|
||||
2011-07-10 - xxxxxxx - lavf 53.3.0
|
||||
Add avformat_find_stream_info(), deprecate av_find_stream_info().
|
||||
|
||||
2011-07-10 - xxxxxxx - lavc 53.6.0
|
||||
Add avcodec_open2(), deprecate avcodec_open().
|
||||
|
||||
2011-06-xx - xxxxxxx - lavf 53.2.0 - avformat.h
|
||||
Add avformat_open_input and avformat_write_header().
|
||||
Deprecate av_open_input_stream, av_open_input_file,
|
||||
|
@@ -2,7 +2,7 @@ Release Notes
|
||||
=============
|
||||
|
||||
* 0.8 "Love" June, 2011
|
||||
* 0.7 "Peace" June, 2011 (identical to 0.8 but using 0.6 ABI/API)
|
||||
* 0.7.1 "Peace" June, 2011 (identical to 0.8 but using 0.6 ABI/API)
|
||||
|
||||
|
||||
General notes
|
||||
|
@@ -479,7 +479,7 @@ int main(int argc, char **argv)
|
||||
|
||||
/* open the output file, if needed */
|
||||
if (!(fmt->flags & AVFMT_NOFILE)) {
|
||||
if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {
|
||||
if (avio_open(&oc->pb, filename, AVIO_WRONLY) < 0) {
|
||||
fprintf(stderr, "Could not open '%s'\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
|
@@ -447,11 +447,6 @@ encompassing your FFmpeg includes using @code{extern "C"}.
|
||||
|
||||
See @url{http://www.parashift.com/c++-faq-lite/mixing-c-and-cpp.html#faq-32.3}
|
||||
|
||||
@section I'm using libavutil from within my C++ application but the compiler complains about 'UINT64_C' was not declared in this scope
|
||||
|
||||
Libav is a pure C project using C99 math features, in order to enable C++
|
||||
to use them you have to append -D__STDC_CONSTANT_MACROS to your CXXFLAGS
|
||||
|
||||
@section I have a file in memory / a API different from *open/*read/ libc how do I use it with libavformat?
|
||||
|
||||
You have to implement a URLProtocol, see @file{libavformat/file.c} in
|
||||
|
@@ -713,39 +713,8 @@ ffmpeg -i in.ogg -map_metadata 0:0,s0 out.mp3
|
||||
Copy chapters from @var{infile} to @var{outfile}. If no chapter mapping is specified,
|
||||
then chapters are copied from the first input file with at least one chapter to all
|
||||
output files. Use a negative file index to disable any chapter copying.
|
||||
@item -debug @var{category}
|
||||
@item -debug
|
||||
Print specific debug info.
|
||||
@var{category} is a number or a string containing one of the following values:
|
||||
@table @samp
|
||||
@item bitstream
|
||||
@item buffers
|
||||
picture buffer allocations
|
||||
@item bugs
|
||||
@item dct_coeff
|
||||
@item er
|
||||
error recognition
|
||||
@item mb_type
|
||||
macroblock (MB) type
|
||||
@item mmco
|
||||
memory management control operations (H.264)
|
||||
@item mv
|
||||
motion vector
|
||||
@item pict
|
||||
picture info
|
||||
@item pts
|
||||
@item qp
|
||||
per-block quantization parameter (QP)
|
||||
@item rc
|
||||
rate control
|
||||
@item skip
|
||||
@item startcode
|
||||
@item thread_ops
|
||||
threading operations
|
||||
@item vis_mb_type
|
||||
visualize block types
|
||||
@item vis_qp
|
||||
visualize quantization parameter (QP), lower QP are tinted greener
|
||||
@end table
|
||||
@item -benchmark
|
||||
Show benchmarking information at the end of an encode.
|
||||
Shows CPU time used and maximum memory consumption.
|
||||
|
@@ -1683,7 +1683,7 @@ It accepts the following parameters:
|
||||
|
||||
Negative values for the amount will blur the input video, while positive
|
||||
values will sharpen. All parameters are optional and default to the
|
||||
equivalent of the string '5:5:1.0:5:5:0.0'.
|
||||
equivalent of the string '5:5:1.0:0:0:0.0'.
|
||||
|
||||
@table @option
|
||||
|
||||
@@ -1701,11 +1701,11 @@ and 5.0, default value is 1.0.
|
||||
|
||||
@item chroma_msize_x
|
||||
Set the chroma matrix horizontal size. It can be an integer between 3
|
||||
and 13, default value is 5.
|
||||
and 13, default value is 0.
|
||||
|
||||
@item chroma_msize_y
|
||||
Set the chroma matrix vertical size. It can be an integer between 3
|
||||
and 13, default value is 5.
|
||||
and 13, default value is 0.
|
||||
|
||||
@item luma_amount
|
||||
Set the chroma effect strength. It can be a float number between -2.0
|
||||
@@ -1760,9 +1760,9 @@ interlaced video, accepts one of the following values:
|
||||
|
||||
@table @option
|
||||
@item 0
|
||||
assume top field first
|
||||
@item 1
|
||||
assume bottom field first
|
||||
@item 1
|
||||
assume top field first
|
||||
@item -1
|
||||
enable automatic detection
|
||||
@end table
|
||||
|
@@ -542,8 +542,6 @@ following image formats are supported:
|
||||
@multitable @columnfractions .4 .1 .1 .4
|
||||
@item Name @tab Encoding @tab Decoding @tab Comments
|
||||
@item 8SVX audio @tab @tab X
|
||||
@item AAC+ @tab E @tab X
|
||||
@tab encoding supported through external library libaacplus
|
||||
@item AAC @tab E @tab X
|
||||
@tab encoding supported through external library libfaac and libvo-aacenc
|
||||
@item AC-3 @tab IX @tab X
|
||||
@@ -1062,7 +1060,7 @@ These library packages are only available from Cygwin Ports
|
||||
(@url{http://sourceware.org/cygwinports/}) :
|
||||
|
||||
@example
|
||||
yasm, libSDL-devel, libdirac-devel, libfaac-devel, libaacplus-devel, libgsm-devel,
|
||||
yasm, libSDL-devel, libdirac-devel, libfaac-devel, libgsm-devel,
|
||||
libmp3lame-devel, libschroedinger1.0-devel, speex-devel, libtheora-devel,
|
||||
libxvidcore-devel
|
||||
@end example
|
||||
|
19
ffmpeg.c
19
ffmpeg.c
@@ -31,7 +31,7 @@
|
||||
#include "libavformat/avformat.h"
|
||||
#include "libavdevice/avdevice.h"
|
||||
#include "libswscale/swscale.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavcodec/opt.h"
|
||||
#include "libavcodec/audioconvert.h"
|
||||
#include "libavutil/audioconvert.h"
|
||||
#include "libavutil/parseutils.h"
|
||||
@@ -113,7 +113,9 @@ typedef struct AVChapterMap {
|
||||
static const OptionDef options[];
|
||||
|
||||
#define MAX_FILES 100
|
||||
#if !FF_API_MAX_STREAMS
|
||||
#define MAX_STREAMS 1024 /* arbitrary sanity check value */
|
||||
#endif
|
||||
|
||||
static const char *last_asked_format = NULL;
|
||||
static int64_t input_files_ts_offset[MAX_FILES];
|
||||
@@ -343,7 +345,7 @@ typedef struct AVInputFile {
|
||||
int eof_reached; /* true if eof reached */
|
||||
int ist_index; /* index of first stream in ist_table */
|
||||
int buffer_size; /* current total buffer size */
|
||||
int nb_streams; /* nb streams we are aware of */
|
||||
int nb_streams;
|
||||
} AVInputFile;
|
||||
|
||||
#if HAVE_TERMIOS_H
|
||||
@@ -712,7 +714,6 @@ static int read_ffserver_streams(AVFormatContext *s, const char *filename)
|
||||
return err;
|
||||
/* copy stream format */
|
||||
s->nb_streams = 0;
|
||||
s->streams = av_mallocz(sizeof(AVStream *) * ic->nb_streams);
|
||||
for(i=0;i<ic->nb_streams;i++) {
|
||||
AVStream *st;
|
||||
AVCodec *codec;
|
||||
@@ -2054,7 +2055,7 @@ static int transcode(AVFormatContext **output_files,
|
||||
fi = stream_maps[i].sync_file_index;
|
||||
si = stream_maps[i].sync_stream_index;
|
||||
if (fi < 0 || fi > nb_input_files - 1 ||
|
||||
si < 0 || si > input_files[fi].nb_streams - 1) {
|
||||
si < 0 || si > input_files[fi].ctx->nb_streams - 1) {
|
||||
fprintf(stderr,"Could not find sync stream #%d.%d\n", fi, si);
|
||||
ret = AVERROR(EINVAL);
|
||||
goto fail;
|
||||
@@ -2379,9 +2380,9 @@ static int transcode(AVFormatContext **output_files,
|
||||
}
|
||||
}
|
||||
if(codec->codec_type == AVMEDIA_TYPE_VIDEO){
|
||||
/* maximum video buffer size is 6-bytes per pixel, plus DPX header size (1664)*/
|
||||
/* maximum video buffer size is 6-bytes per pixel, plus DPX header size */
|
||||
int size= codec->width * codec->height;
|
||||
bit_buffer_size= FFMAX(bit_buffer_size, 7*size + 10000);
|
||||
bit_buffer_size= FFMAX(bit_buffer_size, 6*size + 1664);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3957,7 +3958,7 @@ static int opt_output_file(const char *opt, const char *filename)
|
||||
/* check filename in case of an image number is expected */
|
||||
if (oc->oformat->flags & AVFMT_NEEDNUMBER) {
|
||||
if (!av_filename_number_test(oc->filename)) {
|
||||
print_error(oc->filename, AVERROR(EINVAL));
|
||||
print_error(oc->filename, AVERROR_NUMEXPECTED);
|
||||
ffmpeg_exit(1);
|
||||
}
|
||||
}
|
||||
@@ -3968,7 +3969,7 @@ static int opt_output_file(const char *opt, const char *filename)
|
||||
(strchr(filename, ':') == NULL ||
|
||||
filename[1] == ':' ||
|
||||
av_strstart(filename, "file:", NULL))) {
|
||||
if (avio_check(filename, 0) == 0) {
|
||||
if (url_exist(filename)) {
|
||||
if (!using_stdin) {
|
||||
fprintf(stderr,"File '%s' already exists. Overwrite ? [y/N] ", filename);
|
||||
fflush(stderr);
|
||||
@@ -3985,7 +3986,7 @@ static int opt_output_file(const char *opt, const char *filename)
|
||||
}
|
||||
|
||||
/* open the file */
|
||||
if ((err = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE)) < 0) {
|
||||
if ((err = avio_open(&oc->pb, filename, AVIO_WRONLY)) < 0) {
|
||||
print_error(filename, err);
|
||||
ffmpeg_exit(1);
|
||||
}
|
||||
|
9
ffplay.c
9
ffplay.c
@@ -35,7 +35,7 @@
|
||||
#include "libavdevice/avdevice.h"
|
||||
#include "libswscale/swscale.h"
|
||||
#include "libavcodec/audioconvert.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavcodec/opt.h"
|
||||
#include "libavcodec/avfft.h"
|
||||
|
||||
#if CONFIG_AVFILTER
|
||||
@@ -2135,12 +2135,7 @@ static int stream_component_open(VideoState *is, int stream_index)
|
||||
|
||||
avctx->workaround_bugs = workaround_bugs;
|
||||
avctx->lowres = lowres;
|
||||
if(avctx->lowres > codec->max_lowres){
|
||||
av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
|
||||
codec->max_lowres);
|
||||
avctx->lowres= codec->max_lowres;
|
||||
}
|
||||
if(avctx->lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
|
||||
if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
|
||||
avctx->idct_algo= idct;
|
||||
if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
|
||||
avctx->skip_frame= skip_frame;
|
||||
|
@@ -23,7 +23,7 @@
|
||||
|
||||
#include "libavformat/avformat.h"
|
||||
#include "libavcodec/avcodec.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavcodec/opt.h"
|
||||
#include "libavutil/pixdesc.h"
|
||||
#include "libavutil/dict.h"
|
||||
#include "libavdevice/avdevice.h"
|
||||
|
43
ffserver.c
43
ffserver.c
@@ -39,7 +39,7 @@
|
||||
#include "libavutil/dict.h"
|
||||
#include "libavutil/random_seed.h"
|
||||
#include "libavutil/parseutils.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavcodec/opt.h"
|
||||
#include <stdarg.h>
|
||||
#include <unistd.h>
|
||||
#include <fcntl.h>
|
||||
@@ -94,7 +94,9 @@ static const char *http_state[] = {
|
||||
"RTSP_SEND_PACKET",
|
||||
};
|
||||
|
||||
#if !FF_API_MAX_STREAMS
|
||||
#define MAX_STREAMS 20
|
||||
#endif
|
||||
|
||||
#define IOBUFFER_INIT_SIZE 8192
|
||||
|
||||
@@ -516,7 +518,6 @@ static int socket_open_listen(struct sockaddr_in *my_addr)
|
||||
tmp = 1;
|
||||
setsockopt(server_fd, SOL_SOCKET, SO_REUSEADDR, &tmp, sizeof(tmp));
|
||||
|
||||
my_addr->sin_family = AF_INET;
|
||||
if (bind (server_fd, (struct sockaddr *) my_addr, sizeof (*my_addr)) < 0) {
|
||||
char bindmsg[32];
|
||||
snprintf(bindmsg, sizeof(bindmsg), "bind(port %d)", ntohs(my_addr->sin_port));
|
||||
@@ -2230,11 +2231,11 @@ static int http_prepare_data(HTTPContext *c)
|
||||
av_dict_set(&c->fmt_ctx.metadata, "copyright", c->stream->copyright, 0);
|
||||
av_dict_set(&c->fmt_ctx.metadata, "title" , c->stream->title , 0);
|
||||
|
||||
c->fmt_ctx.streams = av_mallocz(sizeof(AVStream *) * c->stream->nb_streams);
|
||||
|
||||
for(i=0;i<c->stream->nb_streams;i++) {
|
||||
AVStream *st;
|
||||
AVStream *src;
|
||||
c->fmt_ctx.streams[i] = av_mallocz(sizeof(AVStream));
|
||||
st = av_mallocz(sizeof(AVStream));
|
||||
c->fmt_ctx.streams[i] = st;
|
||||
/* if file or feed, then just take streams from FFStream struct */
|
||||
if (!c->stream->feed ||
|
||||
c->stream->feed == c->stream)
|
||||
@@ -2242,9 +2243,9 @@ static int http_prepare_data(HTTPContext *c)
|
||||
else
|
||||
src = c->stream->feed->streams[c->stream->feed_streams[i]];
|
||||
|
||||
*(c->fmt_ctx.streams[i]) = *src;
|
||||
c->fmt_ctx.streams[i]->priv_data = 0;
|
||||
c->fmt_ctx.streams[i]->codec->frame_number = 0; /* XXX: should be done in
|
||||
*st = *src;
|
||||
st->priv_data = 0;
|
||||
st->codec->frame_number = 0; /* XXX: should be done in
|
||||
AVStream, not in codec */
|
||||
}
|
||||
/* set output format parameters */
|
||||
@@ -2942,9 +2943,11 @@ static int prepare_sdp_description(FFStream *stream, uint8_t **pbuffer,
|
||||
snprintf(avc->filename, 1024, "rtp://0.0.0.0");
|
||||
}
|
||||
|
||||
#if !FF_API_MAX_STREAMS
|
||||
if (avc->nb_streams >= INT_MAX/sizeof(*avc->streams) ||
|
||||
!(avc->streams = av_malloc(avc->nb_streams * sizeof(*avc->streams))))
|
||||
goto sdp_done;
|
||||
#endif
|
||||
if (avc->nb_streams >= INT_MAX/sizeof(*avs) ||
|
||||
!(avs = av_malloc(avc->nb_streams * sizeof(*avs))))
|
||||
goto sdp_done;
|
||||
@@ -2957,8 +2960,10 @@ static int prepare_sdp_description(FFStream *stream, uint8_t **pbuffer,
|
||||
av_sdp_create(&avc, 1, *pbuffer, 2048);
|
||||
|
||||
sdp_done:
|
||||
#if !FF_API_MAX_STREAMS
|
||||
av_free(avc->streams);
|
||||
av_dict_free(&avc->metadata);
|
||||
#endif
|
||||
av_metadata_free(&avc->metadata);
|
||||
av_free(avc);
|
||||
av_free(avs);
|
||||
|
||||
@@ -3386,9 +3391,6 @@ static int rtp_new_av_stream(HTTPContext *c,
|
||||
if (!st)
|
||||
goto fail;
|
||||
ctx->nb_streams = 1;
|
||||
ctx->streams = av_mallocz(sizeof(AVStream *) * ctx->nb_streams);
|
||||
if (!ctx->streams)
|
||||
goto fail;
|
||||
ctx->streams[0] = st;
|
||||
|
||||
if (!c->stream->feed ||
|
||||
@@ -3422,7 +3424,7 @@ static int rtp_new_av_stream(HTTPContext *c,
|
||||
"rtp://%s:%d", ipaddr, ntohs(dest_addr->sin_port));
|
||||
}
|
||||
|
||||
if (url_open(&h, ctx->filename, AVIO_FLAG_WRITE) < 0)
|
||||
if (url_open(&h, ctx->filename, AVIO_WRONLY) < 0)
|
||||
goto fail;
|
||||
c->rtp_handles[stream_index] = h;
|
||||
max_packet_size = url_get_max_packet_size(h);
|
||||
@@ -3675,7 +3677,7 @@ static void build_feed_streams(void)
|
||||
for(feed = first_feed; feed != NULL; feed = feed->next_feed) {
|
||||
int fd;
|
||||
|
||||
if (avio_check(feed->feed_filename, AVIO_FLAG_READ) > 0) {
|
||||
if (url_exist(feed->feed_filename)) {
|
||||
/* See if it matches */
|
||||
AVFormatContext *s = NULL;
|
||||
int matches = 0;
|
||||
@@ -3748,7 +3750,7 @@ static void build_feed_streams(void)
|
||||
unlink(feed->feed_filename);
|
||||
}
|
||||
}
|
||||
if (avio_check(feed->feed_filename, AVIO_FLAG_WRITE) <= 0) {
|
||||
if (!url_exist(feed->feed_filename)) {
|
||||
AVFormatContext s1 = {0}, *s = &s1;
|
||||
|
||||
if (feed->readonly) {
|
||||
@@ -3758,15 +3760,20 @@ static void build_feed_streams(void)
|
||||
}
|
||||
|
||||
/* only write the header of the ffm file */
|
||||
if (avio_open(&s->pb, feed->feed_filename, AVIO_FLAG_WRITE) < 0) {
|
||||
if (avio_open(&s->pb, feed->feed_filename, AVIO_WRONLY) < 0) {
|
||||
http_log("Could not open output feed file '%s'\n",
|
||||
feed->feed_filename);
|
||||
exit(1);
|
||||
}
|
||||
s->oformat = feed->fmt;
|
||||
s->nb_streams = feed->nb_streams;
|
||||
s->streams = feed->streams;
|
||||
if (avformat_write_header(s, NULL) < 0) {
|
||||
for(i=0;i<s->nb_streams;i++) {
|
||||
AVStream *st;
|
||||
st = feed->streams[i];
|
||||
s->streams[i] = st;
|
||||
}
|
||||
av_set_parameters(s, NULL);
|
||||
if (av_write_header(s) < 0) {
|
||||
http_log("Container doesn't supports the required parameters\n");
|
||||
exit(1);
|
||||
}
|
||||
|
101
libavcodec/4xm.c
101
libavcodec/4xm.c
@@ -133,9 +133,7 @@ typedef struct FourXContext{
|
||||
GetBitContext pre_gb; ///< ac/dc prefix
|
||||
GetBitContext gb;
|
||||
const uint8_t *bytestream;
|
||||
const uint8_t *bytestream_end;
|
||||
const uint16_t *wordstream;
|
||||
const uint16_t *wordstream_end;
|
||||
int mv[256];
|
||||
VLC pre_vlc;
|
||||
int last_dc;
|
||||
@@ -279,7 +277,7 @@ static void init_mv(FourXContext *f){
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void mcdc(uint16_t *dst, uint16_t *src, int log2w, int h, int stride, int scale, unsigned dc){
|
||||
static inline void mcdc(uint16_t *dst, uint16_t *src, int log2w, int h, int stride, int scale, int dc){
|
||||
int i;
|
||||
dc*= 0x10001;
|
||||
|
||||
@@ -330,10 +328,6 @@ static void decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src, int lo
|
||||
assert(code>=0 && code<=6);
|
||||
|
||||
if(code == 0){
|
||||
if (f->bytestream_end - f->bytestream < 1){
|
||||
av_log(f->avctx, AV_LOG_ERROR, "bytestream overread\n");
|
||||
return;
|
||||
}
|
||||
src += f->mv[ *f->bytestream++ ];
|
||||
if(start > src || src > end){
|
||||
av_log(f->avctx, AV_LOG_ERROR, "mv out of pic\n");
|
||||
@@ -351,31 +345,15 @@ static void decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src, int lo
|
||||
}else if(code == 3 && f->version<2){
|
||||
mcdc(dst, src, log2w, h, stride, 1, 0);
|
||||
}else if(code == 4){
|
||||
if (f->bytestream_end - f->bytestream < 1){
|
||||
av_log(f->avctx, AV_LOG_ERROR, "bytestream overread\n");
|
||||
return;
|
||||
}
|
||||
src += f->mv[ *f->bytestream++ ];
|
||||
if(start > src || src > end){
|
||||
av_log(f->avctx, AV_LOG_ERROR, "mv out of pic\n");
|
||||
return;
|
||||
}
|
||||
if (f->wordstream_end - f->wordstream < 1){
|
||||
av_log(f->avctx, AV_LOG_ERROR, "wordstream overread\n");
|
||||
return;
|
||||
}
|
||||
mcdc(dst, src, log2w, h, stride, 1, av_le2ne16(*f->wordstream++));
|
||||
}else if(code == 5){
|
||||
if (f->wordstream_end - f->wordstream < 1){
|
||||
av_log(f->avctx, AV_LOG_ERROR, "wordstream overread\n");
|
||||
return;
|
||||
}
|
||||
mcdc(dst, src, log2w, h, stride, 0, av_le2ne16(*f->wordstream++));
|
||||
}else if(code == 6){
|
||||
if (f->wordstream_end - f->wordstream < 2){
|
||||
av_log(f->avctx, AV_LOG_ERROR, "wordstream overread\n");
|
||||
return;
|
||||
}
|
||||
if(log2w){
|
||||
dst[0] = av_le2ne16(*f->wordstream++);
|
||||
dst[1] = av_le2ne16(*f->wordstream++);
|
||||
@@ -397,8 +375,6 @@ static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length){
|
||||
|
||||
if(f->version>1){
|
||||
extra=20;
|
||||
if (length < extra)
|
||||
return -1;
|
||||
bitstream_size= AV_RL32(buf+8);
|
||||
wordstream_size= AV_RL32(buf+12);
|
||||
bytestream_size= AV_RL32(buf+16);
|
||||
@@ -409,10 +385,11 @@ static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length){
|
||||
bytestream_size= FFMAX(length - bitstream_size - wordstream_size, 0);
|
||||
}
|
||||
|
||||
if (bitstream_size > length ||
|
||||
bytestream_size > length - bitstream_size ||
|
||||
wordstream_size > length - bytestream_size - bitstream_size ||
|
||||
extra > length - bytestream_size - bitstream_size - wordstream_size){
|
||||
if(bitstream_size+ bytestream_size+ wordstream_size + extra != length
|
||||
|| bitstream_size > (1<<26)
|
||||
|| bytestream_size > (1<<26)
|
||||
|| wordstream_size > (1<<26)
|
||||
){
|
||||
av_log(f->avctx, AV_LOG_ERROR, "lengths %d %d %d %d\n", bitstream_size, bytestream_size, wordstream_size,
|
||||
bitstream_size+ bytestream_size+ wordstream_size - length);
|
||||
return -1;
|
||||
@@ -422,13 +399,10 @@ static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length){
|
||||
if (!f->bitstream_buffer)
|
||||
return AVERROR(ENOMEM);
|
||||
f->dsp.bswap_buf(f->bitstream_buffer, (const uint32_t*)(buf + extra), bitstream_size/4);
|
||||
memset((uint8_t*)f->bitstream_buffer + bitstream_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
init_get_bits(&f->gb, f->bitstream_buffer, 8*bitstream_size);
|
||||
|
||||
f->wordstream= (const uint16_t*)(buf + extra + bitstream_size);
|
||||
f->wordstream_end= f->wordstream + wordstream_size/2;
|
||||
f->bytestream= buf + extra + bitstream_size + wordstream_size;
|
||||
f->bytestream_end = f->bytestream + bytestream_size;
|
||||
|
||||
init_mv(f);
|
||||
|
||||
@@ -557,7 +531,7 @@ static int decode_i_mb(FourXContext *f){
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const uint8_t *read_huffman_tables(FourXContext *f, const uint8_t * const buf, int buf_size){
|
||||
static const uint8_t *read_huffman_tables(FourXContext *f, const uint8_t * const buf){
|
||||
int frequency[512];
|
||||
uint8_t flag[512];
|
||||
int up[512];
|
||||
@@ -565,7 +539,6 @@ static const uint8_t *read_huffman_tables(FourXContext *f, const uint8_t * const
|
||||
int bits_tab[257];
|
||||
int start, end;
|
||||
const uint8_t *ptr= buf;
|
||||
const uint8_t *ptr_end = buf + buf_size;
|
||||
int j;
|
||||
|
||||
memset(frequency, 0, sizeof(frequency));
|
||||
@@ -576,8 +549,6 @@ static const uint8_t *read_huffman_tables(FourXContext *f, const uint8_t * const
|
||||
for(;;){
|
||||
int i;
|
||||
|
||||
if (start <= end && ptr_end - ptr < end - start + 1 + 1)
|
||||
return NULL;
|
||||
for(i=start; i<=end; i++){
|
||||
frequency[i]= *ptr++;
|
||||
}
|
||||
@@ -630,10 +601,9 @@ static const uint8_t *read_huffman_tables(FourXContext *f, const uint8_t * const
|
||||
len_tab[j]= len;
|
||||
}
|
||||
|
||||
if (init_vlc(&f->pre_vlc, ACDC_VLC_BITS, 257,
|
||||
len_tab , 1, 1,
|
||||
bits_tab, 4, 4, 0))
|
||||
return NULL;
|
||||
init_vlc(&f->pre_vlc, ACDC_VLC_BITS, 257,
|
||||
len_tab , 1, 1,
|
||||
bits_tab, 4, 4, 0);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
@@ -651,13 +621,10 @@ static int decode_i2_frame(FourXContext *f, const uint8_t *buf, int length){
|
||||
const int height= f->avctx->height;
|
||||
uint16_t *dst= (uint16_t*)f->current_picture.data[0];
|
||||
const int stride= f->current_picture.linesize[0]>>1;
|
||||
const uint8_t *buf_end = buf + length;
|
||||
|
||||
for(y=0; y<height; y+=16){
|
||||
for(x=0; x<width; x+=16){
|
||||
unsigned int color[4], bits;
|
||||
if (buf_end - buf < 8)
|
||||
return -1;
|
||||
memset(color, 0, sizeof(color));
|
||||
//warning following is purely guessed ...
|
||||
color[0]= bytestream_get_le16(&buf);
|
||||
@@ -691,26 +658,18 @@ static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length){
|
||||
uint16_t *dst= (uint16_t*)f->current_picture.data[0];
|
||||
const int stride= f->current_picture.linesize[0]>>1;
|
||||
const unsigned int bitstream_size= AV_RL32(buf);
|
||||
unsigned int prestream_size;
|
||||
const uint8_t *prestream;
|
||||
const int token_count av_unused = AV_RL32(buf + bitstream_size + 8);
|
||||
unsigned int prestream_size= 4*AV_RL32(buf + bitstream_size + 4);
|
||||
const uint8_t *prestream= buf + bitstream_size + 12;
|
||||
|
||||
if (length < bitstream_size + 12) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "packet size too small\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
prestream_size = 4 * AV_RL32(buf + bitstream_size + 4);
|
||||
prestream = buf + bitstream_size + 12;
|
||||
|
||||
if (prestream_size > (1<<26) ||
|
||||
prestream_size != length - (bitstream_size + 12)){
|
||||
if(prestream_size + bitstream_size + 12 != length
|
||||
|| bitstream_size > (1<<26)
|
||||
|| prestream_size > (1<<26)){
|
||||
av_log(f->avctx, AV_LOG_ERROR, "size mismatch %d %d %d\n", prestream_size, bitstream_size, length);
|
||||
return -1;
|
||||
}
|
||||
|
||||
prestream= read_huffman_tables(f, prestream, buf + length - prestream);
|
||||
if (!prestream)
|
||||
return -1;
|
||||
prestream= read_huffman_tables(f, prestream);
|
||||
|
||||
init_get_bits(&f->gb, buf + 4, 8*bitstream_size);
|
||||
|
||||
@@ -720,7 +679,6 @@ static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length){
|
||||
if (!f->bitstream_buffer)
|
||||
return AVERROR(ENOMEM);
|
||||
f->dsp.bswap_buf(f->bitstream_buffer, (const uint32_t*)prestream, prestream_size/4);
|
||||
memset((uint8_t*)f->bitstream_buffer + prestream_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
init_get_bits(&f->pre_gb, f->bitstream_buffer, 8*prestream_size);
|
||||
|
||||
f->last_dc= 0*128*8*8;
|
||||
@@ -752,8 +710,6 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
AVFrame *p, temp;
|
||||
int i, frame_4cc, frame_size;
|
||||
|
||||
if (buf_size < 12)
|
||||
return AVERROR_INVALIDDATA;
|
||||
frame_4cc= AV_RL32(buf);
|
||||
if(buf_size != AV_RL32(buf+4)+8 || buf_size < 20){
|
||||
av_log(f->avctx, AV_LOG_ERROR, "size mismatch %d %d\n", buf_size, AV_RL32(buf+4));
|
||||
@@ -766,11 +722,6 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
const int whole_size= AV_RL32(buf+16);
|
||||
CFrameBuffer *cfrm;
|
||||
|
||||
if (data_size < 0 || whole_size < 0){
|
||||
av_log(f->avctx, AV_LOG_ERROR, "sizes invalid\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
for(i=0; i<CFRAME_BUFFER_COUNT; i++){
|
||||
if(f->cfrm[i].id && f->cfrm[i].id < avctx->frame_number)
|
||||
av_log(f->avctx, AV_LOG_ERROR, "lost c frame %d\n", f->cfrm[i].id);
|
||||
@@ -787,8 +738,6 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
}
|
||||
cfrm= &f->cfrm[i];
|
||||
|
||||
if (data_size > UINT_MAX - cfrm->size - FF_INPUT_BUFFER_PADDING_SIZE)
|
||||
return AVERROR_INVALIDDATA;
|
||||
cfrm->data= av_fast_realloc(cfrm->data, &cfrm->allocated_size, cfrm->size + data_size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
if(!cfrm->data){ //explicit check needed as memcpy below might not catch a NULL
|
||||
av_log(f->avctx, AV_LOG_ERROR, "realloc falure");
|
||||
@@ -832,16 +781,12 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
|
||||
if(frame_4cc == AV_RL32("ifr2")){
|
||||
p->pict_type= AV_PICTURE_TYPE_I;
|
||||
if(decode_i2_frame(f, buf-4, frame_size+4) < 0){
|
||||
av_log(f->avctx, AV_LOG_ERROR, "decode i2 frame failed\n");
|
||||
if(decode_i2_frame(f, buf-4, frame_size) < 0)
|
||||
return -1;
|
||||
}
|
||||
}else if(frame_4cc == AV_RL32("ifrm")){
|
||||
p->pict_type= AV_PICTURE_TYPE_I;
|
||||
if(decode_i_frame(f, buf, frame_size) < 0){
|
||||
av_log(f->avctx, AV_LOG_ERROR, "decode i frame failed\n");
|
||||
if(decode_i_frame(f, buf, frame_size) < 0)
|
||||
return -1;
|
||||
}
|
||||
}else if(frame_4cc == AV_RL32("pfrm") || frame_4cc == AV_RL32("pfr2")){
|
||||
if(!f->last_picture.data[0]){
|
||||
f->last_picture.reference= 1;
|
||||
@@ -852,10 +797,8 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
}
|
||||
|
||||
p->pict_type= AV_PICTURE_TYPE_P;
|
||||
if(decode_p_frame(f, buf, frame_size) < 0){
|
||||
av_log(f->avctx, AV_LOG_ERROR, "decode p frame failed\n");
|
||||
if(decode_p_frame(f, buf, frame_size) < 0)
|
||||
return -1;
|
||||
}
|
||||
}else if(frame_4cc == AV_RL32("snd_")){
|
||||
av_log(avctx, AV_LOG_ERROR, "ignoring snd_ chunk length:%d\n", buf_size);
|
||||
}else{
|
||||
@@ -888,10 +831,6 @@ static av_cold int decode_init(AVCodecContext *avctx){
|
||||
av_log(avctx, AV_LOG_ERROR, "extradata wrong or missing\n");
|
||||
return 1;
|
||||
}
|
||||
if((avctx->width % 16) || (avctx->height % 16)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "unsupported width/height\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
avcodec_get_frame_defaults(&f->current_picture);
|
||||
avcodec_get_frame_defaults(&f->last_picture);
|
||||
|
@@ -50,8 +50,6 @@ typedef struct EightBpsContext {
|
||||
|
||||
unsigned char planes;
|
||||
unsigned char planemap[4];
|
||||
|
||||
uint32_t pal[256];
|
||||
} EightBpsContext;
|
||||
|
||||
|
||||
@@ -131,16 +129,13 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
|
||||
}
|
||||
}
|
||||
|
||||
if (avctx->bits_per_coded_sample <= 8) {
|
||||
const uint8_t *pal = av_packet_get_side_data(avpkt,
|
||||
AV_PKT_DATA_PALETTE,
|
||||
NULL);
|
||||
if (pal) {
|
||||
if (avctx->palctrl) {
|
||||
memcpy (c->pic.data[1], avctx->palctrl->palette, AVPALETTE_SIZE);
|
||||
if (avctx->palctrl->palette_changed) {
|
||||
c->pic.palette_has_changed = 1;
|
||||
memcpy(c->pal, pal, AVPALETTE_SIZE);
|
||||
}
|
||||
|
||||
memcpy (c->pic.data[1], c->pal, AVPALETTE_SIZE);
|
||||
avctx->palctrl->palette_changed = 0;
|
||||
} else
|
||||
c->pic.palette_has_changed = 0;
|
||||
}
|
||||
|
||||
*data_size = sizeof(AVFrame);
|
||||
@@ -170,6 +165,10 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
avctx->pix_fmt = PIX_FMT_PAL8;
|
||||
c->planes = 1;
|
||||
c->planemap[0] = 0; // 1st plane is palette indexes
|
||||
if (avctx->palctrl == NULL) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Error: PAL8 format but no palette from demuxer.\n");
|
||||
return -1;
|
||||
}
|
||||
break;
|
||||
case 24:
|
||||
avctx->pix_fmt = avctx->get_format(avctx, pixfmt_rgb24);
|
||||
|
@@ -44,7 +44,7 @@ typedef struct EightSvxContext {
|
||||
/* buffer used to store the whole audio decoded/interleaved chunk,
|
||||
* which is sent with the first packet */
|
||||
uint8_t *samples;
|
||||
int64_t samples_size;
|
||||
size_t samples_size;
|
||||
int samples_idx;
|
||||
} EightSvxContext;
|
||||
|
||||
|
@@ -15,6 +15,7 @@ OBJS = allcodecs.o \
|
||||
fmtconvert.o \
|
||||
imgconvert.o \
|
||||
jrevdct.o \
|
||||
opt.o \
|
||||
options.o \
|
||||
parser.o \
|
||||
raw.o \
|
||||
@@ -567,7 +568,6 @@ OBJS-$(CONFIG_WEBM_MUXER) += xiph.o mpeg4audio.o \
|
||||
OBJS-$(CONFIG_WTV_DEMUXER) += mpeg4audio.o mpegaudiodata.o
|
||||
|
||||
# external codec libraries
|
||||
OBJS-$(CONFIG_LIBAACPLUS_ENCODER) += libaacplus.o
|
||||
OBJS-$(CONFIG_LIBCELT_DECODER) += libcelt_dec.o
|
||||
OBJS-$(CONFIG_LIBDIRAC_DECODER) += libdiracdec.o
|
||||
OBJS-$(CONFIG_LIBDIRAC_ENCODER) += libdiracenc.o libdirac_libschro.o
|
||||
@@ -588,7 +588,6 @@ OBJS-$(CONFIG_LIBSCHROEDINGER_ENCODER) += libschroedingerenc.o \
|
||||
libschroedinger.o \
|
||||
libdirac_libschro.o
|
||||
OBJS-$(CONFIG_LIBSPEEX_DECODER) += libspeexdec.o
|
||||
OBJS-$(CONFIG_LIBSPEEX_ENCODER) += libspeexenc.o
|
||||
OBJS-$(CONFIG_LIBTHEORA_ENCODER) += libtheoraenc.o
|
||||
OBJS-$(CONFIG_LIBVO_AACENC_ENCODER) += libvo-aacenc.o mpeg4audio.o
|
||||
OBJS-$(CONFIG_LIBVO_AMRWBENC_ENCODER) += libvo-amrwbenc.o
|
||||
|
@@ -72,7 +72,7 @@ static int aac_adtstoasc_filter(AVBitStreamFilterContext *bsfc,
|
||||
int pce_size = 0;
|
||||
uint8_t pce_data[MAX_PCE_SIZE];
|
||||
if (!hdr.chan_config) {
|
||||
init_get_bits(&gb, buf, buf_size * 8);
|
||||
init_get_bits(&gb, buf, buf_size);
|
||||
if (get_bits(&gb, 3) != 5) {
|
||||
av_log_missing_feature(avctx, "PCE based channel configuration, where the PCE is not the first syntax element is", 0);
|
||||
return -1;
|
||||
|
@@ -754,20 +754,19 @@ static int decode_band_types(AACContext *ac, enum BandType band_type[120],
|
||||
av_log(ac->avctx, AV_LOG_ERROR, "invalid band type\n");
|
||||
return -1;
|
||||
}
|
||||
do {
|
||||
sect_len_incr = get_bits(gb, bits);
|
||||
while ((sect_len_incr = get_bits(gb, bits)) == (1 << bits) - 1)
|
||||
sect_end += sect_len_incr;
|
||||
if (get_bits_left(gb) < 0) {
|
||||
av_log(ac->avctx, AV_LOG_ERROR, overread_err);
|
||||
return -1;
|
||||
}
|
||||
if (sect_end > ics->max_sfb) {
|
||||
av_log(ac->avctx, AV_LOG_ERROR,
|
||||
"Number of bands (%d) exceeds limit (%d).\n",
|
||||
sect_end, ics->max_sfb);
|
||||
return -1;
|
||||
}
|
||||
} while (sect_len_incr == (1 << bits) - 1);
|
||||
sect_end += sect_len_incr;
|
||||
if (get_bits_left(gb) < 0) {
|
||||
av_log(ac->avctx, AV_LOG_ERROR, overread_err);
|
||||
return -1;
|
||||
}
|
||||
if (sect_end > ics->max_sfb) {
|
||||
av_log(ac->avctx, AV_LOG_ERROR,
|
||||
"Number of bands (%d) exceeds limit (%d).\n",
|
||||
sect_end, ics->max_sfb);
|
||||
return -1;
|
||||
}
|
||||
for (; k < sect_end; k++) {
|
||||
band_type [idx] = sect_band_type;
|
||||
band_type_run_end[idx++] = sect_end;
|
||||
@@ -1091,7 +1090,7 @@ static int decode_spectrum_and_dequant(AACContext *ac, float coef[1024],
|
||||
GET_VLC(code, re, gb, vlc_tab, 8, 2);
|
||||
cb_idx = cb_vector_idx[code];
|
||||
nnz = cb_idx >> 8 & 15;
|
||||
bits = nnz ? GET_CACHE(re, gb) : 0;
|
||||
bits = SHOW_UBITS(re, gb, nnz) << (32-nnz);
|
||||
LAST_SKIP_BITS(re, gb, nnz);
|
||||
cf = VMUL4S(cf, vq, cb_idx, bits, sf + idx);
|
||||
} while (len -= 4);
|
||||
@@ -1131,7 +1130,7 @@ static int decode_spectrum_and_dequant(AACContext *ac, float coef[1024],
|
||||
GET_VLC(code, re, gb, vlc_tab, 8, 2);
|
||||
cb_idx = cb_vector_idx[code];
|
||||
nnz = cb_idx >> 8 & 15;
|
||||
sign = nnz ? SHOW_UBITS(re, gb, nnz) << (cb_idx >> 12) : 0;
|
||||
sign = SHOW_UBITS(re, gb, nnz) << (cb_idx >> 12);
|
||||
LAST_SKIP_BITS(re, gb, nnz);
|
||||
cf = VMUL2S(cf, vq, cb_idx, sign, sf + idx);
|
||||
} while (len -= 2);
|
||||
@@ -1756,10 +1755,12 @@ static void windowing_and_mdct_ltp(AACContext *ac, float *out,
|
||||
} else {
|
||||
memset(in, 0, 448 * sizeof(float));
|
||||
ac->dsp.vector_fmul(in + 448, in + 448, swindow_prev, 128);
|
||||
memcpy(in + 576, in + 576, 448 * sizeof(float));
|
||||
}
|
||||
if (ics->window_sequence[0] != LONG_START_SEQUENCE) {
|
||||
ac->dsp.vector_fmul_reverse(in + 1024, in + 1024, lwindow, 1024);
|
||||
} else {
|
||||
memcpy(in + 1024, in + 1024, 448 * sizeof(float));
|
||||
ac->dsp.vector_fmul_reverse(in + 1024 + 448, in + 1024 + 448, swindow, 128);
|
||||
memset(in + 1024 + 576, 0, 448 * sizeof(float));
|
||||
}
|
||||
@@ -2077,7 +2078,7 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
|
||||
ChannelElement *che = NULL, *che_prev = NULL;
|
||||
enum RawDataBlockType elem_type, elem_type_prev = TYPE_END;
|
||||
int err, elem_id, data_size_tmp;
|
||||
int samples = 0, multiplier, audio_found = 0;
|
||||
int samples = 0, multiplier;
|
||||
|
||||
if (show_bits(gb, 12) == 0xfff) {
|
||||
if (parse_adts_frame_header(ac, gb) < 0) {
|
||||
@@ -2108,12 +2109,10 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
|
||||
|
||||
case TYPE_SCE:
|
||||
err = decode_ics(ac, &che->ch[0], gb, 0, 0);
|
||||
audio_found = 1;
|
||||
break;
|
||||
|
||||
case TYPE_CPE:
|
||||
err = decode_cpe(ac, gb, che);
|
||||
audio_found = 1;
|
||||
break;
|
||||
|
||||
case TYPE_CCE:
|
||||
@@ -2122,7 +2121,6 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
|
||||
|
||||
case TYPE_LFE:
|
||||
err = decode_ics(ac, &che->ch[0], gb, 0, 0);
|
||||
audio_found = 1;
|
||||
break;
|
||||
|
||||
case TYPE_DSE:
|
||||
@@ -2199,7 +2197,7 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
|
||||
samples, avctx->channels);
|
||||
}
|
||||
|
||||
if (ac->output_configured && audio_found)
|
||||
if (ac->output_configured)
|
||||
ac->output_configured = OC_LOCKED;
|
||||
|
||||
return 0;
|
||||
|
@@ -813,17 +813,14 @@ static void stereo_processing(PSContext *ps, float (*l)[32][2], float (*r)[32][2
|
||||
const float (*H_LUT)[8][4] = (PS_BASELINE || ps->icc_mode < 3) ? HA : HB;
|
||||
|
||||
//Remapping
|
||||
if (ps->num_env_old) {
|
||||
memcpy(H11[0][0], H11[0][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H11[0][0][0]));
|
||||
memcpy(H11[1][0], H11[1][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H11[1][0][0]));
|
||||
memcpy(H12[0][0], H12[0][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H12[0][0][0]));
|
||||
memcpy(H12[1][0], H12[1][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H12[1][0][0]));
|
||||
memcpy(H21[0][0], H21[0][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H21[0][0][0]));
|
||||
memcpy(H21[1][0], H21[1][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H21[1][0][0]));
|
||||
memcpy(H22[0][0], H22[0][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H22[0][0][0]));
|
||||
memcpy(H22[1][0], H22[1][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H22[1][0][0]));
|
||||
}
|
||||
|
||||
memcpy(H11[0][0], H11[0][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H11[0][0][0]));
|
||||
memcpy(H11[1][0], H11[1][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H11[1][0][0]));
|
||||
memcpy(H12[0][0], H12[0][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H12[0][0][0]));
|
||||
memcpy(H12[1][0], H12[1][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H12[1][0][0]));
|
||||
memcpy(H21[0][0], H21[0][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H21[0][0][0]));
|
||||
memcpy(H21[1][0], H21[1][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H21[1][0][0]));
|
||||
memcpy(H22[0][0], H22[0][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H22[0][0][0]));
|
||||
memcpy(H22[1][0], H22[1][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H22[1][0][0]));
|
||||
if (is34) {
|
||||
remap34(&iid_mapped, ps->iid_par, ps->nr_iid_par, ps->num_env, 1);
|
||||
remap34(&icc_mapped, ps->icc_par, ps->nr_icc_par, ps->num_env, 1);
|
||||
|
@@ -33,7 +33,6 @@
|
||||
#include "fft.h"
|
||||
#include "aacps.h"
|
||||
#include "libavutil/libm.h"
|
||||
#include "libavutil/avassert.h"
|
||||
|
||||
#include <stdint.h>
|
||||
#include <float.h>
|
||||
@@ -1183,15 +1182,14 @@ static void sbr_qmf_synthesis(DSPContext *dsp, FFTContext *mdct,
|
||||
{
|
||||
int i, n;
|
||||
const float *sbr_qmf_window = div ? sbr_qmf_window_ds : sbr_qmf_window_us;
|
||||
const int step = 128 >> div;
|
||||
float *v;
|
||||
for (i = 0; i < 32; i++) {
|
||||
if (*v_off < step) {
|
||||
if (*v_off == 0) {
|
||||
int saved_samples = (1280 - 128) >> div;
|
||||
memcpy(&v0[SBR_SYNTHESIS_BUF_SIZE - saved_samples], v0, saved_samples * sizeof(float));
|
||||
*v_off = SBR_SYNTHESIS_BUF_SIZE - saved_samples - step;
|
||||
*v_off = SBR_SYNTHESIS_BUF_SIZE - saved_samples - (128 >> div);
|
||||
} else {
|
||||
*v_off -= step;
|
||||
*v_off -= 128 >> div;
|
||||
}
|
||||
v = v0 + *v_off;
|
||||
if (div) {
|
||||
@@ -1459,7 +1457,6 @@ static void sbr_mapping(AACContext *ac, SpectralBandReplication *sbr,
|
||||
uint16_t *table = ch_data->bs_freq_res[e + 1] ? sbr->f_tablehigh : sbr->f_tablelow;
|
||||
int k;
|
||||
|
||||
av_assert0(sbr->kx[1] <= table[0]);
|
||||
for (i = 0; i < ilim; i++)
|
||||
for (m = table[i]; m < table[i + 1]; m++)
|
||||
sbr->e_origmapped[e][m - sbr->kx[1]] = ch_data->env_facs[e+1][i];
|
||||
|
@@ -108,7 +108,7 @@ static void ac3_bit_alloc_calc_bap_c(int16_t *mask, int16_t *psd,
|
||||
int snr_offset, int floor,
|
||||
const uint8_t *bap_tab, uint8_t *bap)
|
||||
{
|
||||
int bin, band, band_end;
|
||||
int bin, band;
|
||||
|
||||
/* special case, if snr offset is -960, set all bap's to zero */
|
||||
if (snr_offset == -960) {
|
||||
@@ -120,14 +120,12 @@ static void ac3_bit_alloc_calc_bap_c(int16_t *mask, int16_t *psd,
|
||||
band = ff_ac3_bin_to_band_tab[start];
|
||||
do {
|
||||
int m = (FFMAX(mask[band] - snr_offset - floor, 0) & 0x1FE0) + floor;
|
||||
band_end = ff_ac3_band_start_tab[++band];
|
||||
band_end = FFMIN(band_end, end);
|
||||
|
||||
int band_end = FFMIN(ff_ac3_band_start_tab[band+1], end);
|
||||
for (; bin < band_end; bin++) {
|
||||
int address = av_clip((psd[bin] - m) >> 5, 0, 63);
|
||||
bap[bin] = bap_tab[address];
|
||||
}
|
||||
} while (end > band_end);
|
||||
} while (end > ff_ac3_band_start_tab[band++]);
|
||||
}
|
||||
|
||||
static void ac3_update_bap_counts_c(uint16_t mant_cnt[16], uint8_t *bap,
|
||||
|
@@ -778,13 +778,9 @@ static int adpcm_encode_frame(AVCodecContext *avctx,
|
||||
static av_cold int adpcm_decode_init(AVCodecContext * avctx)
|
||||
{
|
||||
ADPCMContext *c = avctx->priv_data;
|
||||
unsigned int min_channels = 1;
|
||||
unsigned int max_channels = 2;
|
||||
|
||||
switch(avctx->codec->id) {
|
||||
case CODEC_ID_ADPCM_EA:
|
||||
min_channels = 2;
|
||||
break;
|
||||
case CODEC_ID_ADPCM_EA_R1:
|
||||
case CODEC_ID_ADPCM_EA_R2:
|
||||
case CODEC_ID_ADPCM_EA_R3:
|
||||
@@ -792,10 +788,8 @@ static av_cold int adpcm_decode_init(AVCodecContext * avctx)
|
||||
max_channels = 6;
|
||||
break;
|
||||
}
|
||||
|
||||
if (avctx->channels < min_channels || avctx->channels > max_channels) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
|
||||
return AVERROR(EINVAL);
|
||||
if(avctx->channels > max_channels){
|
||||
return -1;
|
||||
}
|
||||
|
||||
switch(avctx->codec->id) {
|
||||
@@ -1339,11 +1333,10 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
|
||||
buf_size -= 128;
|
||||
}
|
||||
break;
|
||||
case CODEC_ID_ADPCM_IMA_EA_EACS: {
|
||||
unsigned header_size = 4 + (8<<st);
|
||||
case CODEC_ID_ADPCM_IMA_EA_EACS:
|
||||
samples_in_chunk = bytestream_get_le32(&src) >> (1-st);
|
||||
|
||||
if (buf_size < header_size || samples_in_chunk > buf_size - header_size) {
|
||||
if (samples_in_chunk > buf_size-4-(8<<st)) {
|
||||
src += buf_size - 4;
|
||||
break;
|
||||
}
|
||||
@@ -1358,7 +1351,6 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
|
||||
*samples++ = adpcm_ima_expand_nibble(&c->status[st], *src&0x0F, 3);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case CODEC_ID_ADPCM_IMA_EA_SEAD:
|
||||
for (; src < buf+buf_size; src++) {
|
||||
*samples++ = adpcm_ima_expand_nibble(&c->status[0], src[0] >> 4, 6);
|
||||
@@ -1366,17 +1358,11 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
|
||||
}
|
||||
break;
|
||||
case CODEC_ID_ADPCM_EA:
|
||||
/* Each EA ADPCM frame has a 12-byte header followed by 30-byte pieces,
|
||||
each coding 28 stereo samples. */
|
||||
if (buf_size < 12) {
|
||||
av_log(avctx, AV_LOG_ERROR, "frame too small\n");
|
||||
return AVERROR(EINVAL);
|
||||
if (buf_size < 12 || AV_RL32(src) > (buf_size - 12)/30*28) {
|
||||
src += buf_size;
|
||||
break;
|
||||
}
|
||||
samples_in_chunk = AV_RL32(src);
|
||||
if (samples_in_chunk / 28 > (buf_size - 12) / 30) {
|
||||
av_log(avctx, AV_LOG_ERROR, "invalid frame\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
src += 4;
|
||||
current_left_sample = (int16_t)bytestream_get_le16(&src);
|
||||
previous_left_sample = (int16_t)bytestream_get_le16(&src);
|
||||
|
@@ -370,7 +370,6 @@ void avcodec_register_all(void)
|
||||
REGISTER_ENCDEC (XSUB, xsub);
|
||||
|
||||
/* external libraries */
|
||||
REGISTER_ENCODER (LIBAACPLUS, libaacplus);
|
||||
REGISTER_DECODER (LIBCELT, libcelt);
|
||||
REGISTER_ENCDEC (LIBDIRAC, libdirac);
|
||||
REGISTER_ENCODER (LIBFAAC, libfaac);
|
||||
@@ -381,7 +380,7 @@ void avcodec_register_all(void)
|
||||
REGISTER_DECODER (LIBOPENCORE_AMRWB, libopencore_amrwb);
|
||||
REGISTER_DECODER (LIBOPENJPEG, libopenjpeg);
|
||||
REGISTER_ENCDEC (LIBSCHROEDINGER, libschroedinger);
|
||||
REGISTER_ENCDEC (LIBSPEEX, libspeex);
|
||||
REGISTER_DECODER (LIBSPEEX, libspeex);
|
||||
REGISTER_ENCODER (LIBTHEORA, libtheora);
|
||||
REGISTER_ENCODER (LIBVO_AACENC, libvo_aacenc);
|
||||
REGISTER_ENCODER (LIBVO_AMRWBENC, libvo_amrwbenc);
|
||||
|
@@ -1010,7 +1010,7 @@ static void zero_remaining(unsigned int b, unsigned int b_max,
|
||||
{
|
||||
unsigned int count = 0;
|
||||
|
||||
for (; b < b_max; b++)
|
||||
while (b < b_max)
|
||||
count += div_blocks[b];
|
||||
|
||||
if (count)
|
||||
|
@@ -79,8 +79,6 @@ static inline int op(uint8_t **dst, const uint8_t *dst_end,
|
||||
int striplen = FFMIN(count, remaining);
|
||||
if (buf) {
|
||||
striplen = FFMIN(striplen, buf_end - *buf);
|
||||
if (*buf >= buf_end)
|
||||
goto exhausted;
|
||||
memcpy(*dst, *buf, striplen);
|
||||
*buf += striplen;
|
||||
} else if (pixel >= 0)
|
||||
|
@@ -163,18 +163,6 @@ typedef struct APEContext {
|
||||
|
||||
// TODO: dsputilize
|
||||
|
||||
static av_cold int ape_decode_close(AVCodecContext * avctx)
|
||||
{
|
||||
APEContext *s = avctx->priv_data;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < APE_FILTER_LEVELS; i++)
|
||||
av_freep(&s->filterbuf[i]);
|
||||
|
||||
av_freep(&s->data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_cold int ape_decode_init(AVCodecContext * avctx)
|
||||
{
|
||||
APEContext *s = avctx->priv_data;
|
||||
@@ -207,18 +195,25 @@ static av_cold int ape_decode_init(AVCodecContext * avctx)
|
||||
for (i = 0; i < APE_FILTER_LEVELS; i++) {
|
||||
if (!ape_filter_orders[s->fset][i])
|
||||
break;
|
||||
FF_ALLOC_OR_GOTO(avctx, s->filterbuf[i],
|
||||
(ape_filter_orders[s->fset][i] * 3 + HISTORY_SIZE) * 4,
|
||||
filter_alloc_fail);
|
||||
s->filterbuf[i] = av_malloc((ape_filter_orders[s->fset][i] * 3 + HISTORY_SIZE) * 4);
|
||||
}
|
||||
|
||||
dsputil_init(&s->dsp, avctx);
|
||||
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
|
||||
return 0;
|
||||
filter_alloc_fail:
|
||||
ape_decode_close(avctx);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
static av_cold int ape_decode_close(AVCodecContext * avctx)
|
||||
{
|
||||
APEContext *s = avctx->priv_data;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < APE_FILTER_LEVELS; i++)
|
||||
av_freep(&s->filterbuf[i]);
|
||||
|
||||
av_freep(&s->data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -802,7 +797,7 @@ static int ape_decode_frame(AVCodecContext * avctx,
|
||||
int buf_size = avpkt->size;
|
||||
APEContext *s = avctx->priv_data;
|
||||
int16_t *samples = data;
|
||||
uint32_t nblocks;
|
||||
int nblocks;
|
||||
int i, n;
|
||||
int blockstodecode;
|
||||
int bytes_used;
|
||||
@@ -819,15 +814,12 @@ static int ape_decode_frame(AVCodecContext * avctx,
|
||||
}
|
||||
|
||||
if(!s->samples){
|
||||
void *tmp_data = av_realloc(s->data, (buf_size + 3) & ~3);
|
||||
if (!tmp_data)
|
||||
return AVERROR(ENOMEM);
|
||||
s->data = tmp_data;
|
||||
s->data = av_realloc(s->data, (buf_size + 3) & ~3);
|
||||
s->dsp.bswap_buf((uint32_t*)s->data, (const uint32_t*)buf, buf_size >> 2);
|
||||
s->ptr = s->last_ptr = s->data;
|
||||
s->data_end = s->data + buf_size;
|
||||
|
||||
nblocks = bytestream_get_be32(&s->ptr);
|
||||
nblocks = s->samples = bytestream_get_be32(&s->ptr);
|
||||
n = bytestream_get_be32(&s->ptr);
|
||||
if(n < 0 || n > 3){
|
||||
av_log(avctx, AV_LOG_ERROR, "Incorrect offset passed\n");
|
||||
@@ -836,13 +828,12 @@ static int ape_decode_frame(AVCodecContext * avctx,
|
||||
}
|
||||
s->ptr += n;
|
||||
|
||||
s->currentframeblocks = nblocks;
|
||||
buf += 4;
|
||||
if (!nblocks || nblocks > INT_MAX) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid sample count: %u.\n", nblocks);
|
||||
if (s->samples <= 0) {
|
||||
*data_size = 0;
|
||||
return AVERROR_INVALIDDATA;
|
||||
return buf_size;
|
||||
}
|
||||
s->currentframeblocks = s->samples = nblocks;
|
||||
|
||||
memset(s->decoded0, 0, sizeof(s->decoded0));
|
||||
memset(s->decoded1, 0, sizeof(s->decoded1));
|
||||
|
@@ -56,7 +56,7 @@
|
||||
vhsub.s16 \r0, \d0, \d1 @ t3, t4, t8, t7
|
||||
vhsub.s16 \r1, \d1, \d0
|
||||
vhadd.s16 \d0, \d0, \d1 @ t1, t2, t6, t5
|
||||
vmov.i64 \d1, #0xffff00000000
|
||||
vmov.i64 \d1, #0xffff<<32
|
||||
vbit \r0, \r1, \d1
|
||||
vrev64.16 \r1, \r0 @ t7, t8, t4, t3
|
||||
vtrn.32 \r0, \r1 @ t3, t4, t7, t8
|
||||
|
@@ -276,7 +276,7 @@ static int atrac1_decode_frame(AVCodecContext *avctx, void *data,
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
AT1Ctx *q = avctx->priv_data;
|
||||
int ch, ret, i, out_size;
|
||||
int ch, ret, i;
|
||||
GetBitContext gb;
|
||||
float* samples = data;
|
||||
|
||||
@@ -286,13 +286,6 @@ static int atrac1_decode_frame(AVCodecContext *avctx, void *data,
|
||||
return -1;
|
||||
}
|
||||
|
||||
out_size = q->channels * AT1_SU_SAMPLES *
|
||||
av_get_bytes_per_sample(avctx->sample_fmt);
|
||||
if (*data_size < out_size) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
for (ch = 0; ch < q->channels; ch++) {
|
||||
AT1SUCtx* su = &q->SUs[ch];
|
||||
|
||||
@@ -325,7 +318,7 @@ static int atrac1_decode_frame(AVCodecContext *avctx, void *data,
|
||||
}
|
||||
}
|
||||
|
||||
*data_size = out_size;
|
||||
*data_size = q->channels * AT1_SU_SAMPLES * sizeof(*samples);
|
||||
return avctx->block_align;
|
||||
}
|
||||
|
||||
@@ -336,11 +329,6 @@ static av_cold int atrac1_decode_init(AVCodecContext *avctx)
|
||||
|
||||
avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
|
||||
|
||||
if (avctx->channels < 1 || avctx->channels > AT1_MAX_CHANNELS) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Unsupported number of channels: %d\n",
|
||||
avctx->channels);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
q->channels = avctx->channels;
|
||||
|
||||
/* Init the mdct transforms */
|
||||
|
@@ -395,8 +395,6 @@ static int decodeTonalComponents (GetBitContext *gb, tonal_component *pComponent
|
||||
|
||||
for (k=0; k<coded_components; k++) {
|
||||
sfIndx = get_bits(gb,6);
|
||||
if (component_count >= 64)
|
||||
return AVERROR_INVALIDDATA;
|
||||
pComponent[component_count].pos = j * 64 + (get_bits(gb,6));
|
||||
max_coded_values = 1024 - pComponent[component_count].pos;
|
||||
coded_values = coded_values_per_component + 1;
|
||||
|
@@ -30,10 +30,15 @@
|
||||
#include "libavutil/samplefmt.h"
|
||||
#include "libavutil/avutil.h"
|
||||
#include "libavutil/cpu.h"
|
||||
#include "libavutil/dict.h"
|
||||
|
||||
#include "libavcodec/version.h"
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
# define FF_INTERNALC_MEM_TYPE unsigned int
|
||||
#else
|
||||
# define FF_INTERNALC_MEM_TYPE size_t
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Identify the syntax and semantics of the bitstream.
|
||||
* The principle is roughly:
|
||||
@@ -112,6 +117,9 @@ enum CodecID {
|
||||
CODEC_ID_QDRAW,
|
||||
CODEC_ID_VIXL,
|
||||
CODEC_ID_QPEG,
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
CODEC_ID_XVID,
|
||||
#endif
|
||||
CODEC_ID_PNG,
|
||||
CODEC_ID_PPM,
|
||||
CODEC_ID_PBM,
|
||||
@@ -360,6 +368,18 @@ enum CodecID {
|
||||
CODEC_ID_FFMETADATA=0x21000, ///< Dummy codec for streams containing only metadata information.
|
||||
};
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
#define CodecType AVMediaType
|
||||
|
||||
#define CODEC_TYPE_UNKNOWN AVMEDIA_TYPE_UNKNOWN
|
||||
#define CODEC_TYPE_VIDEO AVMEDIA_TYPE_VIDEO
|
||||
#define CODEC_TYPE_AUDIO AVMEDIA_TYPE_AUDIO
|
||||
#define CODEC_TYPE_DATA AVMEDIA_TYPE_DATA
|
||||
#define CODEC_TYPE_SUBTITLE AVMEDIA_TYPE_SUBTITLE
|
||||
#define CODEC_TYPE_ATTACHMENT AVMEDIA_TYPE_ATTACHMENT
|
||||
#define CODEC_TYPE_NB AVMEDIA_TYPE_NB
|
||||
#endif
|
||||
|
||||
#if FF_API_OLD_SAMPLE_FMT
|
||||
#define SampleFormat AVSampleFormat
|
||||
|
||||
@@ -523,7 +543,7 @@ enum AVChromaLocation{
|
||||
/**
|
||||
* LPC analysis type
|
||||
*/
|
||||
enum AVLPCType {
|
||||
attribute_deprecated enum AVLPCType {
|
||||
AV_LPC_TYPE_DEFAULT = -1, ///< use the codec default LPC type
|
||||
AV_LPC_TYPE_NONE = 0, ///< do not use LPC prediction or use all zero coefficients
|
||||
AV_LPC_TYPE_FIXED = 1, ///< fixed LPC coefficients
|
||||
@@ -1071,10 +1091,6 @@ typedef struct AVPanScan{
|
||||
#define FF_BUFFER_HINTS_PRESERVE 0x04 // User must not alter buffer content.
|
||||
#define FF_BUFFER_HINTS_REUSABLE 0x08 // Codec will reuse the buffer (update).
|
||||
|
||||
enum AVPacketSideDataType {
|
||||
AV_PKT_DATA_PALETTE,
|
||||
};
|
||||
|
||||
typedef struct AVPacket {
|
||||
/**
|
||||
* Presentation timestamp in AVStream->time_base units; the time at which
|
||||
@@ -1096,17 +1112,6 @@ typedef struct AVPacket {
|
||||
int size;
|
||||
int stream_index;
|
||||
int flags;
|
||||
/**
|
||||
* Additional packet data that can be provided by the container.
|
||||
* Packet can contain several types of side information.
|
||||
*/
|
||||
struct {
|
||||
uint8_t *data;
|
||||
int size;
|
||||
enum AVPacketSideDataType type;
|
||||
} *side_data;
|
||||
int side_data_elems;
|
||||
|
||||
/**
|
||||
* Duration of this packet in AVStream->time_base units, 0 if unknown.
|
||||
* Equals next_pts - this_pts in presentation order.
|
||||
@@ -1136,6 +1141,9 @@ typedef struct AVPacket {
|
||||
int64_t convergence_duration;
|
||||
} AVPacket;
|
||||
#define AV_PKT_FLAG_KEY 0x0001
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
#define PKT_FLAG_KEY AV_PKT_FLAG_KEY
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Audio Video Frame.
|
||||
@@ -1256,6 +1264,16 @@ typedef struct AVCodecContext {
|
||||
*/
|
||||
enum PixelFormat pix_fmt;
|
||||
|
||||
#if FF_API_RATE_EMU
|
||||
/**
|
||||
* Frame rate emulation. If not zero, the lower layer (i.e. format handler)
|
||||
* has to read frames at native frame rate.
|
||||
* - encoding: Set by user.
|
||||
* - decoding: unused
|
||||
*/
|
||||
attribute_deprecated int rate_emu;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* If non NULL, 'draw_horiz_band' is called by the libavcodec
|
||||
* decoder to draw a horizontal band. It improves cache usage. Not
|
||||
@@ -1300,6 +1318,9 @@ typedef struct AVCodecContext {
|
||||
*/
|
||||
int frame_size;
|
||||
int frame_number; ///< audio or video frame number
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
int real_pict_num; ///< Returns the real picture number of previous encoded frame.
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Number of frames the decoded output will be delayed relative to
|
||||
@@ -1357,6 +1378,16 @@ typedef struct AVCodecContext {
|
||||
|
||||
int b_frame_strategy;
|
||||
|
||||
#if FF_API_HURRY_UP
|
||||
/**
|
||||
* hurry up amount
|
||||
* - encoding: unused
|
||||
* - decoding: Set by user. 1-> Skip B-frames, 2-> Skip IDCT/dequant too, 5-> Skip everything except header
|
||||
* @deprecated Deprecated in favor of skip_idct and skip_frame.
|
||||
*/
|
||||
attribute_deprecated int hurry_up;
|
||||
#endif
|
||||
|
||||
struct AVCodec *codec;
|
||||
|
||||
void *priv_data;
|
||||
@@ -1474,6 +1505,9 @@ typedef struct AVCodecContext {
|
||||
#define FF_COMPLIANCE_VERY_STRICT 2 ///< Strictly conform to an older more strict version of the spec or reference software.
|
||||
#define FF_COMPLIANCE_STRICT 1 ///< Strictly conform to all the things in the spec no matter what consequences.
|
||||
#define FF_COMPLIANCE_NORMAL 0
|
||||
#if FF_API_INOFFICIAL
|
||||
#define FF_COMPLIANCE_INOFFICIAL -1 ///< Allow inofficial extensions (deprecated - use FF_COMPLIANCE_UNOFFICIAL instead).
|
||||
#endif
|
||||
#define FF_COMPLIANCE_UNOFFICIAL -1 ///< Allow unofficial extensions
|
||||
#define FF_COMPLIANCE_EXPERIMENTAL -2 ///< Allow nonstandardized experimental things.
|
||||
|
||||
@@ -1747,6 +1781,25 @@ typedef struct AVCodecContext {
|
||||
*/
|
||||
unsigned dsp_mask;
|
||||
|
||||
#if FF_API_MM_FLAGS
|
||||
#define FF_MM_FORCE AV_CPU_FLAG_FORCE
|
||||
#define FF_MM_MMX AV_CPU_FLAG_MMX
|
||||
#define FF_MM_3DNOW AV_CPU_FLAG_3DNOW
|
||||
#define FF_MM_MMXEXT AV_CPU_FLAG_MMX2
|
||||
#define FF_MM_MMX2 AV_CPU_FLAG_MMX2
|
||||
#define FF_MM_SSE AV_CPU_FLAG_SSE
|
||||
#define FF_MM_SSE2 AV_CPU_FLAG_SSE2
|
||||
#define FF_MM_SSE2SLOW AV_CPU_FLAG_SSE2SLOW
|
||||
#define FF_MM_3DNOWEXT AV_CPU_FLAG_3DNOWEXT
|
||||
#define FF_MM_SSE3 AV_CPU_FLAG_SSE3
|
||||
#define FF_MM_SSE3SLOW AV_CPU_FLAG_SSE3SLOW
|
||||
#define FF_MM_SSSE3 AV_CPU_FLAG_SSSE3
|
||||
#define FF_MM_SSE4 AV_CPU_FLAG_SSE4
|
||||
#define FF_MM_SSE42 AV_CPU_FLAG_SSE42
|
||||
#define FF_MM_IWMMXT AV_CPU_FLAG_IWMMXT
|
||||
#define FF_MM_ALTIVEC AV_CPU_FLAG_ALTIVEC
|
||||
#endif
|
||||
|
||||
/**
|
||||
* bits per sample/pixel from the demuxer (needed for huffyuv).
|
||||
* - encoding: Set by libavcodec.
|
||||
@@ -1821,6 +1874,22 @@ typedef struct AVCodecContext {
|
||||
*/
|
||||
uint64_t error[4];
|
||||
|
||||
#if FF_API_MB_Q
|
||||
/**
|
||||
* minimum MB quantizer
|
||||
* - encoding: unused
|
||||
* - decoding: unused
|
||||
*/
|
||||
attribute_deprecated int mb_qmin;
|
||||
|
||||
/**
|
||||
* maximum MB quantizer
|
||||
* - encoding: unused
|
||||
* - decoding: unused
|
||||
*/
|
||||
attribute_deprecated int mb_qmax;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* motion estimation comparison function
|
||||
* - encoding: Set by user.
|
||||
@@ -2522,6 +2591,23 @@ typedef struct AVCodecContext {
|
||||
int compression_level;
|
||||
#define FF_COMPRESSION_DEFAULT -1
|
||||
|
||||
#if FF_API_USE_LPC
|
||||
/**
|
||||
* Sets whether to use LPC mode - used by FLAC encoder.
|
||||
* - encoding: Set by user.
|
||||
* - decoding: unused
|
||||
* @deprecated Deprecated in favor of lpc_type and lpc_passes.
|
||||
*/
|
||||
int use_lpc;
|
||||
|
||||
/**
|
||||
* LPC coefficient precision - used by FLAC encoder
|
||||
* - encoding: Set by user.
|
||||
* - decoding: unused
|
||||
*/
|
||||
int lpc_coeff_precision;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* - encoding: Set by user.
|
||||
* - decoding: unused
|
||||
@@ -2541,13 +2627,6 @@ typedef struct AVCodecContext {
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* LPC coefficient precision - used by FLAC encoder
|
||||
* - encoding: Set by user.
|
||||
* - decoding: unused
|
||||
*/
|
||||
attribute_deprecated int lpc_coeff_precision;
|
||||
|
||||
/**
|
||||
* search method for selecting prediction order
|
||||
* - encoding: Set by user.
|
||||
@@ -2607,6 +2686,7 @@ typedef struct AVCodecContext {
|
||||
|
||||
/**
|
||||
* Bits per sample/pixel of internal libavcodec pixel/sample format.
|
||||
* This field is applicable only when sample_fmt is AV_SAMPLE_FMT_S32.
|
||||
* - encoding: set by user.
|
||||
* - decoding: set by libavcodec.
|
||||
*/
|
||||
@@ -3211,33 +3291,6 @@ int av_dup_packet(AVPacket *pkt);
|
||||
*/
|
||||
void av_free_packet(AVPacket *pkt);
|
||||
|
||||
/**
|
||||
* Allocate new information of a packet.
|
||||
*
|
||||
* @param pkt packet
|
||||
* @param type side information type
|
||||
* @param size side information size
|
||||
* @return pointer to fresh allocated data or NULL otherwise
|
||||
*/
|
||||
uint8_t* av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
|
||||
int size);
|
||||
|
||||
/**
|
||||
* Get side information from packet.
|
||||
*
|
||||
* @param pkt packet
|
||||
* @param type desired side information type
|
||||
* @param size pointer for side information size to store (optional)
|
||||
* @return pointer to data if present or NULL otherwise
|
||||
*/
|
||||
uint8_t* av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
|
||||
int *size);
|
||||
|
||||
int av_packet_merge_side_data(AVPacket *pkt);
|
||||
|
||||
int av_packet_split_side_data(AVPacket *pkt);
|
||||
|
||||
|
||||
/* resample.c */
|
||||
|
||||
struct ReSampleContext;
|
||||
@@ -3245,6 +3298,14 @@ struct AVResampleContext;
|
||||
|
||||
typedef struct ReSampleContext ReSampleContext;
|
||||
|
||||
#if FF_API_AUDIO_OLD
|
||||
/**
|
||||
* @deprecated Use av_audio_resample_init() instead.
|
||||
*/
|
||||
attribute_deprecated ReSampleContext *audio_resample_init(int output_channels, int input_channels,
|
||||
int output_rate, int input_rate);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Initialize audio resampling context.
|
||||
*
|
||||
@@ -3408,6 +3469,23 @@ const char *avcodec_get_pix_fmt_name(enum PixelFormat pix_fmt);
|
||||
|
||||
void avcodec_set_dimensions(AVCodecContext *s, int width, int height);
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
/**
|
||||
* Return the pixel format corresponding to the name name.
|
||||
*
|
||||
* If there is no pixel format with name name, then look for a
|
||||
* pixel format with the name corresponding to the native endian
|
||||
* format of name.
|
||||
* For example in a little-endian system, first look for "gray16",
|
||||
* then for "gray16le".
|
||||
*
|
||||
* Finally if no pixel format has been found, return PIX_FMT_NONE.
|
||||
*
|
||||
* @deprecated Deprecated in favor of av_get_pix_fmt().
|
||||
*/
|
||||
attribute_deprecated enum PixelFormat avcodec_get_pix_fmt(const char* name);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Return a value representing the fourCC code associated to the
|
||||
* pixel format pix_fmt, or 0 if no associated fourCC code can be
|
||||
@@ -3476,6 +3554,14 @@ int avcodec_get_pix_fmt_loss(enum PixelFormat dst_pix_fmt, enum PixelFormat src_
|
||||
enum PixelFormat avcodec_find_best_pix_fmt(int64_t pix_fmt_mask, enum PixelFormat src_pix_fmt,
|
||||
int has_alpha, int *loss_ptr);
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
/**
|
||||
* @deprecated Use av_get_pix_fmt_string() instead.
|
||||
*/
|
||||
attribute_deprecated
|
||||
void avcodec_pix_fmt_string (char *buf, int buf_size, enum PixelFormat pix_fmt);
|
||||
#endif
|
||||
|
||||
#define FF_ALPHA_TRANSP 0x0001 /* image has some totally transparent pixels */
|
||||
#define FF_ALPHA_SEMI_TRANSP 0x0002 /* image has some transparent pixels */
|
||||
|
||||
@@ -3526,6 +3612,13 @@ const char *avcodec_license(void);
|
||||
*/
|
||||
void avcodec_init(void);
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
/**
|
||||
* @deprecated Deprecated in favor of avcodec_register().
|
||||
*/
|
||||
attribute_deprecated void register_avcodec(AVCodec *codec);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Register the codec codec and initialize libavcodec.
|
||||
*
|
||||
@@ -3670,6 +3763,14 @@ void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height);
|
||||
void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
|
||||
int linesize_align[4]);
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
/**
|
||||
* @deprecated Deprecated in favor of av_check_image_size().
|
||||
*/
|
||||
attribute_deprecated
|
||||
int avcodec_check_dimensions(void *av_log_ctx, unsigned int w, unsigned int h);
|
||||
#endif
|
||||
|
||||
enum PixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum PixelFormat * fmt);
|
||||
|
||||
#if FF_API_THREAD_INIT
|
||||
@@ -3678,13 +3779,12 @@ enum PixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum
|
||||
*/
|
||||
attribute_deprecated
|
||||
int avcodec_thread_init(AVCodecContext *s, int thread_count);
|
||||
void avcodec_thread_free(AVCodecContext *s);
|
||||
#endif
|
||||
|
||||
int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size);
|
||||
int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int, int),void *arg, int *ret, int count);
|
||||
//FIXME func typedef
|
||||
|
||||
#if FF_API_AVCODEC_OPEN
|
||||
/**
|
||||
* Initialize the AVCodecContext to use the given AVCodec. Prior to using this
|
||||
* function the context has to be allocated.
|
||||
@@ -3711,44 +3811,27 @@ int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2,
|
||||
* @param codec The codec to use within the context.
|
||||
* @return zero on success, a negative value on error
|
||||
* @see avcodec_alloc_context, avcodec_find_decoder, avcodec_find_encoder, avcodec_close
|
||||
*
|
||||
* @deprecated use avcodec_open2
|
||||
*/
|
||||
int avcodec_open(AVCodecContext *avctx, AVCodec *codec);
|
||||
#endif
|
||||
|
||||
#if FF_API_AUDIO_OLD
|
||||
/**
|
||||
* Initialize the AVCodecContext to use the given AVCodec. Prior to using this
|
||||
* function the context has to be allocated with avcodec_alloc_context().
|
||||
* Decode an audio frame from buf into samples.
|
||||
* Wrapper function which calls avcodec_decode_audio3.
|
||||
*
|
||||
* The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(),
|
||||
* avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for
|
||||
* retrieving a codec.
|
||||
*
|
||||
* @warning This function is not thread safe!
|
||||
*
|
||||
* @code
|
||||
* avcodec_register_all();
|
||||
* av_dict_set(&opts, "b", "2.5M", 0);
|
||||
* codec = avcodec_find_decoder(CODEC_ID_H264);
|
||||
* if (!codec)
|
||||
* exit(1);
|
||||
*
|
||||
* context = avcodec_alloc_context();
|
||||
*
|
||||
* if (avcodec_open(context, codec, opts) < 0)
|
||||
* exit(1);
|
||||
* @endcode
|
||||
*
|
||||
* @param avctx The context to initialize.
|
||||
* @param options A dictionary filled with AVCodecContext and codec-private options.
|
||||
* On return this object will be filled with options that were not found.
|
||||
*
|
||||
* @return zero on success, a negative value on error
|
||||
* @see avcodec_alloc_context3(), avcodec_find_decoder(), avcodec_find_encoder(),
|
||||
* av_dict_set(), av_opt_find().
|
||||
* @deprecated Use avcodec_decode_audio3 instead.
|
||||
* @param avctx the codec context
|
||||
* @param[out] samples the output buffer
|
||||
* @param[in,out] frame_size_ptr the output buffer size in bytes
|
||||
* @param[in] buf the input buffer
|
||||
* @param[in] buf_size the input buffer size in bytes
|
||||
* @return On error a negative value is returned, otherwise the number of bytes
|
||||
* used or zero if no frame could be decompressed.
|
||||
*/
|
||||
int avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVDictionary **options);
|
||||
attribute_deprecated int avcodec_decode_audio2(AVCodecContext *avctx, int16_t *samples,
|
||||
int *frame_size_ptr,
|
||||
const uint8_t *buf, int buf_size);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Decode the audio frame of size avpkt->size from avpkt->data into samples.
|
||||
@@ -3793,6 +3876,25 @@ int avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples,
|
||||
int *frame_size_ptr,
|
||||
AVPacket *avpkt);
|
||||
|
||||
#if FF_API_VIDEO_OLD
|
||||
/**
|
||||
* Decode a video frame from buf into picture.
|
||||
* Wrapper function which calls avcodec_decode_video2.
|
||||
*
|
||||
* @deprecated Use avcodec_decode_video2 instead.
|
||||
* @param avctx the codec context
|
||||
* @param[out] picture The AVFrame in which the decoded video frame will be stored.
|
||||
* @param[in] buf the input buffer
|
||||
* @param[in] buf_size the size of the input buffer in bytes
|
||||
* @param[in,out] got_picture_ptr Zero if no frame could be decompressed, otherwise, it is nonzero.
|
||||
* @return On error a negative value is returned, otherwise the number of bytes
|
||||
* used or zero if no frame could be decompressed.
|
||||
*/
|
||||
attribute_deprecated int avcodec_decode_video(AVCodecContext *avctx, AVFrame *picture,
|
||||
int *got_picture_ptr,
|
||||
const uint8_t *buf, int buf_size);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Decode the video frame of size avpkt->size from avpkt->data into picture.
|
||||
* Some decoders may support multiple frames in a single AVPacket, such
|
||||
@@ -3837,6 +3939,15 @@ int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
|
||||
int *got_picture_ptr,
|
||||
AVPacket *avpkt);
|
||||
|
||||
#if FF_API_SUBTITLE_OLD
|
||||
/* Decode a subtitle message. Return -1 if error, otherwise return the
|
||||
* number of bytes used. If no subtitle could be decompressed,
|
||||
* got_sub_ptr is zero. Otherwise, the subtitle is stored in *sub. */
|
||||
attribute_deprecated int avcodec_decode_subtitle(AVCodecContext *avctx, AVSubtitle *sub,
|
||||
int *got_sub_ptr,
|
||||
const uint8_t *buf, int buf_size);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Decode a subtitle message.
|
||||
* Return a negative value on error, otherwise return the number of bytes used.
|
||||
@@ -4105,6 +4216,15 @@ AVCodecParser *av_parser_next(AVCodecParser *c);
|
||||
void av_register_codec_parser(AVCodecParser *parser);
|
||||
AVCodecParserContext *av_parser_init(int codec_id);
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
attribute_deprecated
|
||||
int av_parser_parse(AVCodecParserContext *s,
|
||||
AVCodecContext *avctx,
|
||||
uint8_t **poutbuf, int *poutbuf_size,
|
||||
const uint8_t *buf, int buf_size,
|
||||
int64_t pts, int64_t dts);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Parse a packet.
|
||||
*
|
||||
@@ -4183,7 +4303,7 @@ AVBitStreamFilter *av_bitstream_filter_next(AVBitStreamFilter *f);
|
||||
*
|
||||
* @see av_realloc
|
||||
*/
|
||||
void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size);
|
||||
void *av_fast_realloc(void *ptr, unsigned int *size, FF_INTERNALC_MEM_TYPE min_size);
|
||||
|
||||
/**
|
||||
* Allocate a buffer, reusing the given one if large enough.
|
||||
@@ -4197,7 +4317,17 @@ void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size);
|
||||
* @param min_size minimum size of *ptr buffer after returning, *ptr will be NULL and
|
||||
* *size 0 if an error occurred.
|
||||
*/
|
||||
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size);
|
||||
void av_fast_malloc(void *ptr, unsigned int *size, FF_INTERNALC_MEM_TYPE min_size);
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
/**
|
||||
* @deprecated Deprecated in favor of av_image_copy().
|
||||
*/
|
||||
attribute_deprecated
|
||||
void av_picture_data_copy(uint8_t *dst_data[4], int dst_linesize[4],
|
||||
uint8_t *src_data[4], int src_linesize[4],
|
||||
enum PixelFormat pix_fmt, int width, int height);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Copy image src to dst. Wraps av_picture_data_copy() above.
|
||||
@@ -4226,6 +4356,22 @@ int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width,
|
||||
*/
|
||||
unsigned int av_xiphlacing(unsigned char *s, unsigned int v);
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
/**
|
||||
* Parse str and put in width_ptr and height_ptr the detected values.
|
||||
*
|
||||
* @deprecated Deprecated in favor of av_parse_video_size().
|
||||
*/
|
||||
attribute_deprecated int av_parse_video_frame_size(int *width_ptr, int *height_ptr, const char *str);
|
||||
|
||||
/**
|
||||
* Parse str and store the detected values in *frame_rate.
|
||||
*
|
||||
* @deprecated Deprecated in favor of av_parse_video_rate().
|
||||
*/
|
||||
attribute_deprecated int av_parse_video_frame_rate(AVRational *frame_rate, const char *str);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Logs a generic warning message about a missing feature. This function is
|
||||
* intended to be used internally by FFmpeg (libavcodec, libavformat, etc.)
|
||||
|
@@ -26,21 +26,12 @@
|
||||
void av_destruct_packet_nofree(AVPacket *pkt)
|
||||
{
|
||||
pkt->data = NULL; pkt->size = 0;
|
||||
pkt->side_data = NULL;
|
||||
pkt->side_data_elems = 0;
|
||||
}
|
||||
|
||||
void av_destruct_packet(AVPacket *pkt)
|
||||
{
|
||||
int i;
|
||||
|
||||
av_free(pkt->data);
|
||||
pkt->data = NULL; pkt->size = 0;
|
||||
|
||||
for (i = 0; i < pkt->side_data_elems; i++)
|
||||
av_free(pkt->side_data[i].data);
|
||||
av_freep(&pkt->side_data);
|
||||
pkt->side_data_elems = 0;
|
||||
}
|
||||
|
||||
void av_init_packet(AVPacket *pkt)
|
||||
@@ -53,8 +44,6 @@ void av_init_packet(AVPacket *pkt)
|
||||
pkt->flags = 0;
|
||||
pkt->stream_index = 0;
|
||||
pkt->destruct= NULL;
|
||||
pkt->side_data = NULL;
|
||||
pkt->side_data_elems = 0;
|
||||
}
|
||||
|
||||
int av_new_packet(AVPacket *pkt, int size)
|
||||
@@ -100,52 +89,23 @@ int av_grow_packet(AVPacket *pkt, int grow_by)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define DUP_DATA(dst, src, size, padding) \
|
||||
do { \
|
||||
void *data; \
|
||||
if (padding) { \
|
||||
if ((unsigned)(size) > (unsigned)(size) + FF_INPUT_BUFFER_PADDING_SIZE) \
|
||||
goto failed_alloc; \
|
||||
data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE); \
|
||||
} else { \
|
||||
data = av_malloc(size); \
|
||||
} \
|
||||
if (!data) \
|
||||
goto failed_alloc; \
|
||||
memcpy(data, src, size); \
|
||||
if (padding) \
|
||||
memset((uint8_t*)data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE); \
|
||||
dst = data; \
|
||||
} while(0)
|
||||
|
||||
int av_dup_packet(AVPacket *pkt)
|
||||
{
|
||||
AVPacket tmp_pkt;
|
||||
|
||||
if (((pkt->destruct == av_destruct_packet_nofree) || (pkt->destruct == NULL)) && pkt->data) {
|
||||
tmp_pkt = *pkt;
|
||||
|
||||
pkt->data = NULL;
|
||||
pkt->side_data = NULL;
|
||||
DUP_DATA(pkt->data, tmp_pkt.data, pkt->size, 1);
|
||||
pkt->destruct = av_destruct_packet;
|
||||
|
||||
if (pkt->side_data_elems) {
|
||||
int i;
|
||||
|
||||
DUP_DATA(pkt->side_data, tmp_pkt.side_data,
|
||||
pkt->side_data_elems * sizeof(*pkt->side_data), 0);
|
||||
memset(pkt->side_data, 0, pkt->side_data_elems * sizeof(*pkt->side_data));
|
||||
for (i = 0; i < pkt->side_data_elems; i++) {
|
||||
DUP_DATA(pkt->side_data[i].data, tmp_pkt.side_data[i].data,
|
||||
pkt->side_data[i].size, 1);
|
||||
}
|
||||
uint8_t *data;
|
||||
/* We duplicate the packet and don't forget to add the padding again. */
|
||||
if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
|
||||
return AVERROR(ENOMEM);
|
||||
data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
if (!data) {
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
memcpy(data, pkt->data, pkt->size);
|
||||
memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
pkt->data = data;
|
||||
pkt->destruct = av_destruct_packet;
|
||||
}
|
||||
return 0;
|
||||
failed_alloc:
|
||||
av_destruct_packet(pkt);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
void av_free_packet(AVPacket *pkt)
|
||||
@@ -153,125 +113,6 @@ void av_free_packet(AVPacket *pkt)
|
||||
if (pkt) {
|
||||
if (pkt->destruct) pkt->destruct(pkt);
|
||||
pkt->data = NULL; pkt->size = 0;
|
||||
pkt->side_data = NULL;
|
||||
pkt->side_data_elems = 0;
|
||||
}
|
||||
}
|
||||
|
||||
uint8_t* av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
|
||||
int size)
|
||||
{
|
||||
int elems = pkt->side_data_elems;
|
||||
|
||||
if ((unsigned)elems + 1 > INT_MAX / sizeof(*pkt->side_data))
|
||||
return NULL;
|
||||
if ((unsigned)size > INT_MAX - FF_INPUT_BUFFER_PADDING_SIZE)
|
||||
return NULL;
|
||||
|
||||
pkt->side_data = av_realloc(pkt->side_data, (elems + 1) * sizeof(*pkt->side_data));
|
||||
if (!pkt->side_data)
|
||||
return NULL;
|
||||
|
||||
pkt->side_data[elems].data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
if (!pkt->side_data[elems].data)
|
||||
return NULL;
|
||||
pkt->side_data[elems].size = size;
|
||||
pkt->side_data[elems].type = type;
|
||||
pkt->side_data_elems++;
|
||||
|
||||
return pkt->side_data[elems].data;
|
||||
}
|
||||
|
||||
uint8_t* av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
|
||||
int *size)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < pkt->side_data_elems; i++) {
|
||||
if (pkt->side_data[i].type == type) {
|
||||
if (size)
|
||||
*size = pkt->side_data[i].size;
|
||||
return pkt->side_data[i].data;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#define FF_MERGE_MARKER 0x8c4d9d108e25e9feULL
|
||||
|
||||
int av_packet_merge_side_data(AVPacket *pkt){
|
||||
if(pkt->side_data_elems){
|
||||
int i;
|
||||
uint8_t *p;
|
||||
uint64_t size= pkt->size + 8LL + FF_INPUT_BUFFER_PADDING_SIZE;
|
||||
AVPacket old= *pkt;
|
||||
for (i=0; i<old.side_data_elems; i++) {
|
||||
size += old.side_data[i].size + 5LL;
|
||||
}
|
||||
if (size > INT_MAX)
|
||||
return AVERROR(EINVAL);
|
||||
p = av_malloc(size);
|
||||
if (!p)
|
||||
return AVERROR(ENOMEM);
|
||||
pkt->data = p;
|
||||
pkt->destruct = av_destruct_packet;
|
||||
pkt->size = size - FF_INPUT_BUFFER_PADDING_SIZE;
|
||||
bytestream_put_buffer(&p, old.data, old.size);
|
||||
for (i=old.side_data_elems-1; i>=0; i--) {
|
||||
bytestream_put_buffer(&p, old.side_data[i].data, old.side_data[i].size);
|
||||
bytestream_put_be32(&p, old.side_data[i].size);
|
||||
*p++ = old.side_data[i].type | ((i==old.side_data_elems-1)*128);
|
||||
}
|
||||
bytestream_put_be64(&p, FF_MERGE_MARKER);
|
||||
av_assert0(p-pkt->data == pkt->size);
|
||||
memset(p, 0, FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
av_free_packet(&old);
|
||||
pkt->side_data_elems = 0;
|
||||
pkt->side_data = NULL;
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int av_packet_split_side_data(AVPacket *pkt){
|
||||
if (!pkt->side_data_elems && pkt->size >12 && AV_RB64(pkt->data + pkt->size - 8) == FF_MERGE_MARKER){
|
||||
int i;
|
||||
unsigned int size;
|
||||
uint8_t *p= pkt->data + pkt->size - 8 - 5;
|
||||
|
||||
av_dup_packet(pkt);
|
||||
|
||||
for (i=1; ; i++){
|
||||
size = AV_RB32(p);
|
||||
if (size>INT_MAX || p - pkt->data <= size)
|
||||
return 0;
|
||||
if (p[4]&128)
|
||||
break;
|
||||
p-= size+5;
|
||||
}
|
||||
|
||||
pkt->side_data = av_malloc(i * sizeof(*pkt->side_data));
|
||||
if (!pkt->side_data)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
p= pkt->data + pkt->size - 8 - 5;
|
||||
for (i=0; ; i++){
|
||||
size= AV_RB32(p);
|
||||
av_assert0(size<=INT_MAX && p - pkt->data > size);
|
||||
pkt->side_data[i].data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
pkt->side_data[i].size = size;
|
||||
pkt->side_data[i].type = p[4]&127;
|
||||
if (!pkt->side_data[i].data)
|
||||
return AVERROR(ENOMEM);
|
||||
memcpy(pkt->side_data[i].data, p-size, size);
|
||||
pkt->size -= size + 5;
|
||||
if(p[4]&128)
|
||||
break;
|
||||
p-= size+5;
|
||||
}
|
||||
pkt->size -= 8;
|
||||
pkt->side_data_elems = i+1;
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@@ -47,7 +47,6 @@ avs_decode_frame(AVCodecContext * avctx,
|
||||
void *data, int *data_size, AVPacket *avpkt)
|
||||
{
|
||||
const uint8_t *buf = avpkt->data;
|
||||
const uint8_t *buf_end = avpkt->data + avpkt->size;
|
||||
int buf_size = avpkt->size;
|
||||
AvsContext *const avs = avctx->priv_data;
|
||||
AVFrame *picture = data;
|
||||
@@ -70,8 +69,6 @@ avs_decode_frame(AVCodecContext * avctx,
|
||||
out = avs->picture.data[0];
|
||||
stride = avs->picture.linesize[0];
|
||||
|
||||
if (buf_end - buf < 4)
|
||||
return AVERROR_INVALIDDATA;
|
||||
sub_type = buf[0];
|
||||
type = buf[1];
|
||||
buf += 4;
|
||||
@@ -82,8 +79,6 @@ avs_decode_frame(AVCodecContext * avctx,
|
||||
|
||||
first = AV_RL16(buf);
|
||||
last = first + AV_RL16(buf + 2);
|
||||
if (first >= 256 || last > 256 || buf_end - buf < 4 + 4 + 3 * (last - first))
|
||||
return AVERROR_INVALIDDATA;
|
||||
buf += 4;
|
||||
for (i=first; i<last; i++, buf+=3)
|
||||
pal[i] = (buf[0] << 18) | (buf[1] << 10) | (buf[2] << 2);
|
||||
@@ -119,22 +114,16 @@ avs_decode_frame(AVCodecContext * avctx,
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (buf_end - buf < 256 * vect_w * vect_h)
|
||||
return AVERROR_INVALIDDATA;
|
||||
table = buf + (256 * vect_w * vect_h);
|
||||
if (sub_type != AVS_I_FRAME) {
|
||||
int map_size = ((318 / vect_w + 7) / 8) * (198 / vect_h);
|
||||
if (buf_end - table < map_size)
|
||||
return AVERROR_INVALIDDATA;
|
||||
init_get_bits(&change_map, table, map_size * 8);
|
||||
init_get_bits(&change_map, table, map_size);
|
||||
table += map_size;
|
||||
}
|
||||
|
||||
for (y=0; y<198; y+=vect_h) {
|
||||
for (x=0; x<318; x+=vect_w) {
|
||||
if (sub_type == AVS_I_FRAME || get_bits1(&change_map)) {
|
||||
if (buf_end - table < 1)
|
||||
return AVERROR_INVALIDDATA;
|
||||
vect = &buf[*table++ * (vect_w * vect_h)];
|
||||
for (j=0; j<vect_w; j++) {
|
||||
out[(y + 0) * stride + x + j] = vect[(0 * vect_w) + j];
|
||||
|
@@ -246,7 +246,7 @@ static void read_tree(GetBitContext *gb, Tree *tree)
|
||||
tree->syms[i] = get_bits(gb, 4);
|
||||
tmp1[tree->syms[i]] = 1;
|
||||
}
|
||||
for (i = 0; i < 16 && len < 16 - 1; i++)
|
||||
for (i = 0; i < 16; i++)
|
||||
if (!tmp1[i])
|
||||
tree->syms[++len] = i;
|
||||
} else {
|
||||
@@ -343,14 +343,14 @@ static int read_motion_values(AVCodecContext *avctx, GetBitContext *gb, Bundle *
|
||||
memset(b->cur_dec, v, t);
|
||||
b->cur_dec += t;
|
||||
} else {
|
||||
while (b->cur_dec < dec_end) {
|
||||
do {
|
||||
v = GET_HUFF(gb, b->tree);
|
||||
if (v) {
|
||||
sign = -get_bits1(gb);
|
||||
v = (v ^ sign) - sign;
|
||||
}
|
||||
*b->cur_dec++ = v;
|
||||
}
|
||||
} while (b->cur_dec < dec_end);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -374,7 +374,7 @@ static int read_block_types(AVCodecContext *avctx, GetBitContext *gb, Bundle *b)
|
||||
memset(b->cur_dec, v, t);
|
||||
b->cur_dec += t;
|
||||
} else {
|
||||
while (b->cur_dec < dec_end) {
|
||||
do {
|
||||
v = GET_HUFF(gb, b->tree);
|
||||
if (v < 12) {
|
||||
last = v;
|
||||
@@ -382,12 +382,10 @@ static int read_block_types(AVCodecContext *avctx, GetBitContext *gb, Bundle *b)
|
||||
} else {
|
||||
int run = bink_rlelens[v - 12];
|
||||
|
||||
if (dec_end - b->cur_dec < run)
|
||||
return -1;
|
||||
memset(b->cur_dec, last, run);
|
||||
b->cur_dec += run;
|
||||
}
|
||||
}
|
||||
} while (b->cur_dec < dec_end);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -457,8 +455,7 @@ static int read_dcs(AVCodecContext *avctx, GetBitContext *gb, Bundle *b,
|
||||
int start_bits, int has_sign)
|
||||
{
|
||||
int i, j, len, len2, bsize, sign, v, v2;
|
||||
int16_t *dst = (int16_t*)b->cur_dec;
|
||||
int16_t *dst_end = (int16_t*)b->data_end;
|
||||
int16_t *dst = (int16_t*)b->cur_dec;
|
||||
|
||||
CHECK_READ_VAL(gb, b, len);
|
||||
v = get_bits(gb, start_bits - has_sign);
|
||||
@@ -466,14 +463,10 @@ static int read_dcs(AVCodecContext *avctx, GetBitContext *gb, Bundle *b,
|
||||
sign = -get_bits1(gb);
|
||||
v = (v ^ sign) - sign;
|
||||
}
|
||||
if (dst_end - dst < 1)
|
||||
return -1;
|
||||
*dst++ = v;
|
||||
len--;
|
||||
for (i = 0; i < len; i += 8) {
|
||||
len2 = FFMIN(len - i, 8);
|
||||
if (dst_end - dst < len2)
|
||||
return -1;
|
||||
bsize = get_bits(gb, 4);
|
||||
if (bsize) {
|
||||
for (j = 0; j < len2; j++) {
|
||||
@@ -541,8 +534,6 @@ static int binkb_read_bundle(BinkContext *c, GetBitContext *gb, int bundle_num)
|
||||
int i, len;
|
||||
|
||||
CHECK_READ_VAL(gb, b, len);
|
||||
if (b->data_end - b->cur_dec < len * (1 + (bits > 8)))
|
||||
return -1;
|
||||
if (bits <= 8) {
|
||||
if (!issigned) {
|
||||
for (i = 0; i < len; i++)
|
||||
@@ -973,9 +964,8 @@ static int bink_decode_plane(BinkContext *c, GetBitContext *gb, int plane_idx,
|
||||
for (i = 0; i < BINK_NB_SRC; i++)
|
||||
read_bundle(gb, c, i);
|
||||
|
||||
ref_start = c->last.data[plane_idx] ? c->last.data[plane_idx]
|
||||
: c->pic.data[plane_idx];
|
||||
ref_end = ref_start
|
||||
ref_start = c->last.data[plane_idx];
|
||||
ref_end = c->last.data[plane_idx]
|
||||
+ (bw - 1 + c->last.linesize[plane_idx] * (bh - 1)) * 8;
|
||||
|
||||
for (i = 0; i < 64; i++)
|
||||
@@ -1004,8 +994,7 @@ static int bink_decode_plane(BinkContext *c, GetBitContext *gb, int plane_idx,
|
||||
if (by == bh)
|
||||
break;
|
||||
dst = c->pic.data[plane_idx] + 8*by*stride;
|
||||
prev = (c->last.data[plane_idx] ? c->last.data[plane_idx]
|
||||
: c->pic.data[plane_idx]) + 8*by*stride;
|
||||
prev = c->last.data[plane_idx] + 8*by*stride;
|
||||
for (bx = 0; bx < bw; bx++, dst += 8, prev += 8) {
|
||||
blk = get_value(c, BINK_SRC_BLOCK_TYPES);
|
||||
// 16x16 block type on odd line means part of the already decoded block, so skip it
|
||||
|
@@ -85,9 +85,9 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
frame_len_bits = 11;
|
||||
}
|
||||
|
||||
if (avctx->channels < 1 || avctx->channels > MAX_CHANNELS) {
|
||||
av_log(avctx, AV_LOG_ERROR, "invalid number of channels: %d\n", avctx->channels);
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (avctx->channels > MAX_CHANNELS) {
|
||||
av_log(avctx, AV_LOG_ERROR, "too many channels: %d\n", avctx->channels);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (avctx->extradata && avctx->extradata_size > 0)
|
||||
@@ -153,18 +153,11 @@ static const uint8_t rle_length_tab[16] = {
|
||||
2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 32, 64
|
||||
};
|
||||
|
||||
#define GET_BITS_SAFE(out, nbits) do { \
|
||||
if (get_bits_left(gb) < nbits) \
|
||||
return AVERROR_INVALIDDATA; \
|
||||
out = get_bits(gb, nbits); \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
* Decode Bink Audio block
|
||||
* @param[out] out Output buffer (must contain s->block_size elements)
|
||||
* @return 0 on success, negative error code on failure
|
||||
*/
|
||||
static int decode_block(BinkAudioContext *s, short *out, int use_dct)
|
||||
static void decode_block(BinkAudioContext *s, short *out, int use_dct)
|
||||
{
|
||||
int ch, i, j, k;
|
||||
float q, quant[25];
|
||||
@@ -177,19 +170,13 @@ static int decode_block(BinkAudioContext *s, short *out, int use_dct)
|
||||
for (ch = 0; ch < s->channels; ch++) {
|
||||
FFTSample *coeffs = s->coeffs_ptr[ch];
|
||||
if (s->version_b) {
|
||||
if (get_bits_left(gb) < 64)
|
||||
return AVERROR_INVALIDDATA;
|
||||
coeffs[0] = av_int2flt(get_bits(gb, 32)) * s->root;
|
||||
coeffs[1] = av_int2flt(get_bits(gb, 32)) * s->root;
|
||||
} else {
|
||||
if (get_bits_left(gb) < 58)
|
||||
return AVERROR_INVALIDDATA;
|
||||
coeffs[0] = get_float(gb) * s->root;
|
||||
coeffs[1] = get_float(gb) * s->root;
|
||||
}
|
||||
|
||||
if (get_bits_left(gb) < s->num_bands * 8)
|
||||
return AVERROR_INVALIDDATA;
|
||||
for (i = 0; i < s->num_bands; i++) {
|
||||
/* constant is result of 0.066399999/log10(M_E) */
|
||||
int value = get_bits(gb, 8);
|
||||
@@ -204,20 +191,15 @@ static int decode_block(BinkAudioContext *s, short *out, int use_dct)
|
||||
while (i < s->frame_len) {
|
||||
if (s->version_b) {
|
||||
j = i + 16;
|
||||
} else if (get_bits1(gb)) {
|
||||
j = i + rle_length_tab[get_bits(gb, 4)] * 8;
|
||||
} else {
|
||||
int v;
|
||||
GET_BITS_SAFE(v, 1);
|
||||
if (v) {
|
||||
GET_BITS_SAFE(v, 4);
|
||||
j = i + rle_length_tab[v] * 8;
|
||||
} else {
|
||||
j = i + 8;
|
||||
}
|
||||
j = i + 8;
|
||||
}
|
||||
|
||||
j = FFMIN(j, s->frame_len);
|
||||
|
||||
GET_BITS_SAFE(width, 4);
|
||||
width = get_bits(gb, 4);
|
||||
if (width == 0) {
|
||||
memset(coeffs + i, 0, (j - i) * sizeof(*coeffs));
|
||||
i = j;
|
||||
@@ -227,11 +209,9 @@ static int decode_block(BinkAudioContext *s, short *out, int use_dct)
|
||||
while (i < j) {
|
||||
if (s->bands[k] == i)
|
||||
q = quant[k++];
|
||||
GET_BITS_SAFE(coeff, width);
|
||||
coeff = get_bits(gb, width);
|
||||
if (coeff) {
|
||||
int v;
|
||||
GET_BITS_SAFE(v, 1);
|
||||
if (v)
|
||||
if (get_bits1(gb))
|
||||
coeffs[i] = -q * coeff;
|
||||
else
|
||||
coeffs[i] = q * coeff;
|
||||
@@ -267,8 +247,6 @@ static int decode_block(BinkAudioContext *s, short *out, int use_dct)
|
||||
s->overlap_len * s->channels * sizeof(*out));
|
||||
|
||||
s->first = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_cold int decode_end(AVCodecContext *avctx)
|
||||
@@ -300,17 +278,12 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
int reported_size;
|
||||
GetBitContext *gb = &s->gb;
|
||||
|
||||
if (buf_size < 4) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
init_get_bits(gb, buf, buf_size * 8);
|
||||
|
||||
reported_size = get_bits_long(gb, 32);
|
||||
while (samples + s->block_size <= samples_end) {
|
||||
if (decode_block(s, samples, avctx->codec->id == CODEC_ID_BINKAUDIO_DCT))
|
||||
break;
|
||||
while (get_bits_count(gb) / 8 < buf_size &&
|
||||
samples + s->block_size <= samples_end) {
|
||||
decode_block(s, samples, avctx->codec->id == CODEC_ID_BINKAUDIO_DCT);
|
||||
samples += s->block_size;
|
||||
get_bits_align32(gb);
|
||||
}
|
||||
|
@@ -109,8 +109,8 @@ static int alloc_table(VLC *vlc, int size, int use_static)
|
||||
if(use_static)
|
||||
abort(); //cant do anything, init_vlc() is used with too little memory
|
||||
vlc->table_allocated += (1 << vlc->bits);
|
||||
vlc->table = av_realloc_f(vlc->table,
|
||||
vlc->table_allocated, sizeof(VLC_TYPE) * 2);
|
||||
vlc->table = av_realloc(vlc->table,
|
||||
sizeof(VLC_TYPE) * 2 * vlc->table_allocated);
|
||||
if (!vlc->table)
|
||||
return -1;
|
||||
}
|
||||
|
@@ -161,14 +161,10 @@ void ff_init_cabac_states(CABACContext *c){
|
||||
ff_h264_mps_state[2*i+1]= 2*mps_state[i]+1;
|
||||
|
||||
if( i ){
|
||||
ff_h264_lps_state[2*i+0]=
|
||||
ff_h264_mlps_state[128-2*i-1]= 2*lps_state[i]+0;
|
||||
ff_h264_lps_state[2*i+1]=
|
||||
ff_h264_mlps_state[128-2*i-2]= 2*lps_state[i]+1;
|
||||
}else{
|
||||
ff_h264_lps_state[2*i+0]=
|
||||
ff_h264_mlps_state[128-2*i-1]= 1;
|
||||
ff_h264_lps_state[2*i+1]=
|
||||
ff_h264_mlps_state[128-2*i-2]= 0;
|
||||
}
|
||||
}
|
||||
@@ -194,8 +190,7 @@ int main(void){
|
||||
ff_init_cabac_states(&c);
|
||||
|
||||
for(i=0; i<SIZE; i++){
|
||||
if(2*i<SIZE) r[i] = av_lfg_get(&prng) % 7;
|
||||
else r[i] = (i>>8)&1;
|
||||
r[i] = av_lfg_get(&prng) % 7;
|
||||
}
|
||||
|
||||
for(i=0; i<SIZE; i++){
|
||||
@@ -210,7 +205,6 @@ START_TIMER
|
||||
STOP_TIMER("put_cabac")
|
||||
}
|
||||
|
||||
#if 0
|
||||
for(i=0; i<SIZE; i++){
|
||||
START_TIMER
|
||||
put_cabac_u(&c, state, r[i], 6, 3, i&1);
|
||||
@@ -222,7 +216,7 @@ START_TIMER
|
||||
put_cabac_ueg(&c, state, r[i], 3, 0, 1, 2);
|
||||
STOP_TIMER("put_cabac_ueg")
|
||||
}
|
||||
#endif
|
||||
|
||||
put_cabac_terminate(&c, 1);
|
||||
|
||||
ff_init_cabac_decoder(&c, b, SIZE);
|
||||
|
@@ -280,10 +280,6 @@ static int cdg_decode_frame(AVCodecContext *avctx,
|
||||
av_log(avctx, AV_LOG_ERROR, "buffer too small for decoder\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
if (buf_size > CDG_HEADER_SIZE + CDG_DATA_SIZE) {
|
||||
av_log(avctx, AV_LOG_ERROR, "buffer too big for decoder\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
ret = avctx->reget_buffer(avctx, &cc->frame);
|
||||
if (ret) {
|
||||
|
@@ -133,8 +133,9 @@ void ff_celp_lp_synthesis_filterf(float *out, const float *filter_coeffs,
|
||||
out2 -= val * old_out2;
|
||||
out3 -= val * old_out3;
|
||||
|
||||
old_out3 = out[-5];
|
||||
|
||||
for (i = 5; i <= filter_length; i += 2) {
|
||||
old_out3 = out[-i];
|
||||
val = filter_coeffs[i-1];
|
||||
|
||||
out0 -= val * old_out3;
|
||||
@@ -153,6 +154,7 @@ void ff_celp_lp_synthesis_filterf(float *out, const float *filter_coeffs,
|
||||
|
||||
FFSWAP(float, old_out0, old_out2);
|
||||
old_out1 = old_out3;
|
||||
old_out3 = out[-i-2];
|
||||
}
|
||||
|
||||
tmp0 = out0;
|
||||
|
@@ -67,7 +67,6 @@ typedef struct CinepakContext {
|
||||
|
||||
int sega_film_skip_bytes;
|
||||
|
||||
uint32_t pal[256];
|
||||
} CinepakContext;
|
||||
|
||||
static void cinepak_decode_codebook (cvid_codebook *codebook,
|
||||
@@ -336,8 +335,7 @@ static int cinepak_decode (CinepakContext *s)
|
||||
* If the frame header is followed by the bytes FE 00 00 06 00 00 then
|
||||
* this is probably one of the two known files that have 6 extra bytes
|
||||
* after the frame header. Else, assume 2 extra bytes. */
|
||||
if (s->size >= 16 &&
|
||||
(s->data[10] == 0xFE) &&
|
||||
if ((s->data[10] == 0xFE) &&
|
||||
(s->data[11] == 0x00) &&
|
||||
(s->data[12] == 0x00) &&
|
||||
(s->data[13] == 0x06) &&
|
||||
@@ -366,8 +364,6 @@ static int cinepak_decode (CinepakContext *s)
|
||||
s->strips[i].x2 = s->avctx->width;
|
||||
|
||||
strip_size = AV_RB24 (&s->data[1]) - 12;
|
||||
if(strip_size < 0)
|
||||
return -1;
|
||||
s->data += 12;
|
||||
strip_size = ((s->data + strip_size) > eod) ? (eod - s->data) : strip_size;
|
||||
|
||||
@@ -399,7 +395,7 @@ static av_cold int cinepak_decode_init(AVCodecContext *avctx)
|
||||
s->sega_film_skip_bytes = -1; /* uninitialized state */
|
||||
|
||||
// check for paletted data
|
||||
if (avctx->bits_per_coded_sample != 8) {
|
||||
if ((avctx->palctrl == NULL) || (avctx->bits_per_coded_sample == 40)) {
|
||||
s->palette_video = 0;
|
||||
avctx->pix_fmt = PIX_FMT_YUV420P;
|
||||
} else {
|
||||
@@ -432,18 +428,16 @@ static int cinepak_decode_frame(AVCodecContext *avctx,
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (s->palette_video) {
|
||||
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
|
||||
if (pal) {
|
||||
s->frame.palette_has_changed = 1;
|
||||
memcpy(s->pal, pal, AVPALETTE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
cinepak_decode(s);
|
||||
|
||||
if (s->palette_video)
|
||||
memcpy (s->frame.data[1], s->pal, AVPALETTE_SIZE);
|
||||
if (s->palette_video) {
|
||||
memcpy (s->frame.data[1], avctx->palctrl->palette, AVPALETTE_SIZE);
|
||||
if (avctx->palctrl->palette_changed) {
|
||||
s->frame.palette_has_changed = 1;
|
||||
avctx->palctrl->palette_changed = 0;
|
||||
} else
|
||||
s->frame.palette_has_changed = 0;
|
||||
}
|
||||
|
||||
*data_size = sizeof(AVFrame);
|
||||
*(AVFrame*)data = s->frame;
|
||||
|
@@ -67,7 +67,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
p->pict_type= AV_PICTURE_TYPE_I;
|
||||
p->key_frame= 1;
|
||||
|
||||
init_get_bits(&a->gb, buf, buf_size * 8);
|
||||
init_get_bits(&a->gb, buf, buf_size);
|
||||
|
||||
for(y=0; y<avctx->height; y++){
|
||||
uint8_t *luma= &a->picture.data[0][ y*a->picture.linesize[0] ];
|
||||
|
@@ -1066,10 +1066,6 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
|
||||
q->sample_rate = avctx->sample_rate;
|
||||
q->nb_channels = avctx->channels;
|
||||
q->bit_rate = avctx->bit_rate;
|
||||
if (!q->nb_channels) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* Initialize RNG. */
|
||||
av_lfg_init(&q->random_state, 0);
|
||||
@@ -1083,7 +1079,7 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
|
||||
q->subpacket[s].subbands = bytestream_get_be16(&edata_ptr);
|
||||
extradata_size -= 8;
|
||||
}
|
||||
if (extradata_size >= 8){
|
||||
if (avctx->extradata_size >= 8){
|
||||
bytestream_get_be32(&edata_ptr); //Unknown unused
|
||||
q->subpacket[s].js_subband_start = bytestream_get_be16(&edata_ptr);
|
||||
q->subpacket[s].js_vlc_bits = bytestream_get_be16(&edata_ptr);
|
||||
@@ -1179,9 +1175,8 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ((q->subpacket[s].js_vlc_bits > 6) || (q->subpacket[s].js_vlc_bits < 2*q->subpacket[s].joint_stereo)) {
|
||||
av_log(avctx,AV_LOG_ERROR,"js_vlc_bits = %d, only >= %d and <= 6 allowed!\n",
|
||||
q->subpacket[s].js_vlc_bits, 2*q->subpacket[s].joint_stereo);
|
||||
if ((q->subpacket[s].js_vlc_bits > 6) || (q->subpacket[s].js_vlc_bits < 0)) {
|
||||
av_log(avctx,AV_LOG_ERROR,"js_vlc_bits = %d, only >= 0 and <= 6 allowed!\n",q->subpacket[s].js_vlc_bits);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@@ -228,7 +228,7 @@ static av_cold int decode_init(AVCodecContext *avctx) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"CamStudio codec error: invalid depth %i bpp\n",
|
||||
avctx->bits_per_coded_sample);
|
||||
return AVERROR_INVALIDDATA;
|
||||
return 1;
|
||||
}
|
||||
c->bpp = avctx->bits_per_coded_sample;
|
||||
avcodec_get_frame_defaults(&c->pic);
|
||||
@@ -242,7 +242,7 @@ static av_cold int decode_init(AVCodecContext *avctx) {
|
||||
c->decomp_buf = av_malloc(c->decomp_size + AV_LZO_OUTPUT_PADDING);
|
||||
if (!c->decomp_buf) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n");
|
||||
return AVERROR(ENOMEM);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@@ -29,7 +29,6 @@
|
||||
#include "libavutil/common.h"
|
||||
#include "libavutil/intmath.h"
|
||||
#include "libavutil/intreadwrite.h"
|
||||
#include "libavutil/mathematics.h"
|
||||
#include "libavutil/audioconvert.h"
|
||||
#include "avcodec.h"
|
||||
#include "dsputil.h"
|
||||
@@ -899,17 +898,15 @@ static void qmf_32_subbands(DCAContext * s, int chans,
|
||||
else /* Perfect reconstruction */
|
||||
prCoeff = fir_32bands_perfect;
|
||||
|
||||
for (i = sb_act; i < 32; i++)
|
||||
s->raXin[i] = 0.0;
|
||||
|
||||
/* Reconstructed channel sample index */
|
||||
for (subindex = 0; subindex < 8; subindex++) {
|
||||
/* Load in one sample from each subband and clear inactive subbands */
|
||||
for (i = 0; i < sb_act; i++){
|
||||
unsigned sign = (i - 1) & 2;
|
||||
uint32_t v = AV_RN32A(&samples_in[i][subindex]) ^ sign << 30;
|
||||
uint32_t v = AV_RN32A(&samples_in[i][subindex]) ^ ((i-1)&2)<<30;
|
||||
AV_WN32A(&s->raXin[i], v);
|
||||
}
|
||||
for (; i < 32; i++)
|
||||
s->raXin[i] = 0.0;
|
||||
|
||||
s->synth.synth_filter_float(&s->imdct,
|
||||
s->subband_fir_hist[chans], &s->hist_index[chans],
|
||||
@@ -1653,7 +1650,6 @@ static int dca_decode_frame(AVCodecContext * avctx,
|
||||
//set AVCodec values with parsed data
|
||||
avctx->sample_rate = s->sample_rate;
|
||||
avctx->bit_rate = s->bit_rate;
|
||||
avctx->frame_size = s->sample_blocks * 32;
|
||||
|
||||
s->profile = FF_PROFILE_DTS;
|
||||
|
||||
|
@@ -120,7 +120,7 @@ static int parse_source_parameters(AVCodecContext *avctx, GetBitContext *gb,
|
||||
// chroma subsampling
|
||||
if (get_bits1(gb))
|
||||
source->chroma_format = svq3_get_ue_golomb(gb);
|
||||
if (source->chroma_format > 2U) {
|
||||
if (source->chroma_format > 2) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Unknown chroma format %d\n",
|
||||
source->chroma_format);
|
||||
return -1;
|
||||
@@ -128,14 +128,14 @@ static int parse_source_parameters(AVCodecContext *avctx, GetBitContext *gb,
|
||||
|
||||
if (get_bits1(gb))
|
||||
source->interlaced = svq3_get_ue_golomb(gb);
|
||||
if (source->interlaced > 1U)
|
||||
if (source->interlaced > 1)
|
||||
return -1;
|
||||
|
||||
// frame rate
|
||||
if (get_bits1(gb)) {
|
||||
source->frame_rate_index = svq3_get_ue_golomb(gb);
|
||||
|
||||
if (source->frame_rate_index > 10U)
|
||||
if (source->frame_rate_index > 10)
|
||||
return -1;
|
||||
|
||||
if (!source->frame_rate_index) {
|
||||
@@ -156,7 +156,7 @@ static int parse_source_parameters(AVCodecContext *avctx, GetBitContext *gb,
|
||||
if (get_bits1(gb)) {
|
||||
source->aspect_ratio_index = svq3_get_ue_golomb(gb);
|
||||
|
||||
if (source->aspect_ratio_index > 6U)
|
||||
if (source->aspect_ratio_index > 6)
|
||||
return -1;
|
||||
|
||||
if (!source->aspect_ratio_index) {
|
||||
@@ -179,7 +179,7 @@ static int parse_source_parameters(AVCodecContext *avctx, GetBitContext *gb,
|
||||
if (get_bits1(gb)) {
|
||||
source->pixel_range_index = svq3_get_ue_golomb(gb);
|
||||
|
||||
if (source->pixel_range_index > 4U)
|
||||
if (source->pixel_range_index > 4)
|
||||
return -1;
|
||||
|
||||
// This assumes either fullrange or MPEG levels only
|
||||
@@ -207,7 +207,7 @@ static int parse_source_parameters(AVCodecContext *avctx, GetBitContext *gb,
|
||||
if (get_bits1(gb)) {
|
||||
idx = source->color_spec_index = svq3_get_ue_golomb(gb);
|
||||
|
||||
if (source->color_spec_index > 4U)
|
||||
if (source->color_spec_index > 4)
|
||||
return -1;
|
||||
|
||||
avctx->color_primaries = dirac_color_presets[idx].color_primaries;
|
||||
@@ -217,7 +217,7 @@ static int parse_source_parameters(AVCodecContext *avctx, GetBitContext *gb,
|
||||
if (!source->color_spec_index) {
|
||||
if (get_bits1(gb)) {
|
||||
idx = svq3_get_ue_golomb(gb);
|
||||
if (idx < 3U)
|
||||
if (idx < 3)
|
||||
avctx->color_primaries = dirac_primaries[idx];
|
||||
}
|
||||
|
||||
@@ -259,7 +259,7 @@ int ff_dirac_parse_sequence_header(AVCodecContext *avctx, GetBitContext *gb,
|
||||
else if (version_major > 2)
|
||||
av_log(avctx, AV_LOG_WARNING, "Stream may have unhandled features\n");
|
||||
|
||||
if (video_format > 20U)
|
||||
if (video_format > 20)
|
||||
return -1;
|
||||
|
||||
// Fill in defaults for the source parameters.
|
||||
|
@@ -169,7 +169,6 @@ static int dpcm_decode_frame(AVCodecContext *avctx,
|
||||
int in, out = 0;
|
||||
int predictor[2];
|
||||
int channel_number = 0;
|
||||
int stereo = s->channels - 1;
|
||||
short *output_samples = data;
|
||||
int shift[2];
|
||||
unsigned char byte;
|
||||
@@ -178,9 +177,6 @@ static int dpcm_decode_frame(AVCodecContext *avctx,
|
||||
if (!buf_size)
|
||||
return 0;
|
||||
|
||||
if (stereo && (buf_size & 1))
|
||||
buf_size--;
|
||||
|
||||
// almost every DPCM variant expands one byte of data into two
|
||||
if(*data_size/2 < buf_size)
|
||||
return -1;
|
||||
@@ -299,7 +295,7 @@ static int dpcm_decode_frame(AVCodecContext *avctx,
|
||||
}
|
||||
|
||||
*data_size = out * sizeof(short);
|
||||
return avpkt->size;
|
||||
return buf_size;
|
||||
}
|
||||
|
||||
#define DPCM_DECODER(id, name, long_name_) \
|
||||
|
@@ -146,11 +146,11 @@ static int cin_decode_huffman(const unsigned char *src, int src_size, unsigned c
|
||||
return dst_cur - dst;
|
||||
}
|
||||
|
||||
static int cin_decode_lzss(const unsigned char *src, int src_size, unsigned char *dst, int dst_size)
|
||||
static void cin_decode_lzss(const unsigned char *src, int src_size, unsigned char *dst, int dst_size)
|
||||
{
|
||||
uint16_t cmd;
|
||||
int i, sz, offset, code;
|
||||
unsigned char *dst_end = dst + dst_size, *dst_start = dst;
|
||||
unsigned char *dst_end = dst + dst_size;
|
||||
const unsigned char *src_end = src + src_size;
|
||||
|
||||
while (src < src_end && dst < dst_end) {
|
||||
@@ -161,8 +161,6 @@ static int cin_decode_lzss(const unsigned char *src, int src_size, unsigned char
|
||||
} else {
|
||||
cmd = AV_RL16(src); src += 2;
|
||||
offset = cmd >> 4;
|
||||
if ((int) (dst - dst_start) < offset + 1)
|
||||
return AVERROR_INVALIDDATA;
|
||||
sz = (cmd & 0xF) + 2;
|
||||
/* don't use memcpy/memmove here as the decoding routine (ab)uses */
|
||||
/* buffer overlappings to repeat bytes in the destination */
|
||||
@@ -174,8 +172,6 @@ static int cin_decode_lzss(const unsigned char *src, int src_size, unsigned char
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cin_decode_rle(const unsigned char *src, int src_size, unsigned char *dst, int dst_size)
|
||||
@@ -205,7 +201,13 @@ static int cinvideo_decode_frame(AVCodecContext *avctx,
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
CinVideoContext *cin = avctx->priv_data;
|
||||
int i, y, palette_type, palette_colors_count, bitmap_frame_type, bitmap_frame_size, res = 0;
|
||||
int i, y, palette_type, palette_colors_count, bitmap_frame_type, bitmap_frame_size;
|
||||
|
||||
cin->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
|
||||
if (avctx->reget_buffer(avctx, &cin->frame)) {
|
||||
av_log(cin->avctx, AV_LOG_ERROR, "delphinecinvideo: reget_buffer() failed to allocate a frame\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
palette_type = buf[0];
|
||||
palette_colors_count = AV_RL16(buf+1);
|
||||
@@ -215,11 +217,7 @@ static int cinvideo_decode_frame(AVCodecContext *avctx,
|
||||
bitmap_frame_size = buf_size - 4;
|
||||
|
||||
/* handle palette */
|
||||
if (bitmap_frame_size < palette_colors_count * (3 + (palette_type != 0)))
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (palette_type == 0) {
|
||||
if (palette_colors_count > 256)
|
||||
return AVERROR_INVALIDDATA;
|
||||
for (i = 0; i < palette_colors_count; ++i) {
|
||||
cin->palette[i] = bytestream_get_le24(&buf);
|
||||
bitmap_frame_size -= 3;
|
||||
@@ -231,6 +229,8 @@ static int cinvideo_decode_frame(AVCodecContext *avctx,
|
||||
bitmap_frame_size -= 4;
|
||||
}
|
||||
}
|
||||
memcpy(cin->frame.data[1], cin->palette, sizeof(cin->palette));
|
||||
cin->frame.palette_has_changed = 1;
|
||||
|
||||
/* note: the decoding routines below assumes that surface.width = surface.pitch */
|
||||
switch (bitmap_frame_type) {
|
||||
@@ -263,31 +263,17 @@ static int cinvideo_decode_frame(AVCodecContext *avctx,
|
||||
cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
|
||||
break;
|
||||
case 38:
|
||||
res = cin_decode_lzss(buf, bitmap_frame_size,
|
||||
cin->bitmap_table[CIN_CUR_BMP],
|
||||
cin->bitmap_size);
|
||||
if (res < 0)
|
||||
return res;
|
||||
cin_decode_lzss(buf, bitmap_frame_size,
|
||||
cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
|
||||
break;
|
||||
case 39:
|
||||
res = cin_decode_lzss(buf, bitmap_frame_size,
|
||||
cin->bitmap_table[CIN_CUR_BMP],
|
||||
cin->bitmap_size);
|
||||
if (res < 0)
|
||||
return res;
|
||||
cin_decode_lzss(buf, bitmap_frame_size,
|
||||
cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
|
||||
cin_apply_delta_data(cin->bitmap_table[CIN_PRE_BMP],
|
||||
cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
|
||||
break;
|
||||
}
|
||||
|
||||
cin->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
|
||||
if (avctx->reget_buffer(avctx, &cin->frame)) {
|
||||
av_log(cin->avctx, AV_LOG_ERROR, "delphinecinvideo: reget_buffer() failed to allocate a frame\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
memcpy(cin->frame.data[1], cin->palette, sizeof(cin->palette));
|
||||
cin->frame.palette_has_changed = 1;
|
||||
for (y = 0; y < cin->avctx->height; ++y)
|
||||
memcpy(cin->frame.data[0] + (cin->avctx->height - 1 - y) * cin->frame.linesize[0],
|
||||
cin->bitmap_table[CIN_CUR_BMP] + y * cin->avctx->width,
|
||||
@@ -320,11 +306,6 @@ static av_cold int cinaudio_decode_init(AVCodecContext *avctx)
|
||||
CinAudioContext *cin = avctx->priv_data;
|
||||
|
||||
cin->avctx = avctx;
|
||||
if (avctx->channels != 1) {
|
||||
av_log_ask_for_sample(avctx, "Number of channels is not supported\n");
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
cin->initial_decode_frame = 1;
|
||||
cin->delta = 0;
|
||||
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
|
@@ -120,6 +120,14 @@ void ff_bink_idct_put_c(uint8_t *dest, int linesize, DCTELEM *block);
|
||||
void ff_ea_idct_put_c(uint8_t *dest, int linesize, DCTELEM *block);
|
||||
|
||||
/* 1/2^n downscaling functions from imgconvert.c */
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
/**
|
||||
* @deprecated Use av_image_copy_plane() instead.
|
||||
*/
|
||||
attribute_deprecated
|
||||
void ff_img_copy_plane(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
|
||||
#endif
|
||||
|
||||
void ff_shrink22(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
|
||||
void ff_shrink44(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
|
||||
void ff_shrink88(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
|
||||
|
@@ -1360,7 +1360,7 @@ static int dvbsub_display_end_segment(AVCodecContext *avctx, const uint8_t *buf,
|
||||
rect->y = display->y_pos + offset_y;
|
||||
rect->w = region->width;
|
||||
rect->h = region->height;
|
||||
rect->nb_colors = (1 << region->depth);
|
||||
rect->nb_colors = 16;
|
||||
rect->type = SUBTITLE_BITMAP;
|
||||
rect->pict.linesize[0] = region->width;
|
||||
|
||||
|
@@ -248,13 +248,11 @@ static const DVprofile dv_profiles[] = {
|
||||
const DVprofile* ff_dv_frame_profile(const DVprofile *sys,
|
||||
const uint8_t* frame, unsigned buf_size)
|
||||
{
|
||||
int i, dsf, stype;
|
||||
int i;
|
||||
|
||||
if(buf_size < DV_PROFILE_BYTES)
|
||||
return NULL;
|
||||
int dsf = (frame[3] & 0x80) >> 7;
|
||||
|
||||
dsf = (frame[3] & 0x80) >> 7;
|
||||
stype = frame[80*5 + 48 + 3] & 0x1f;
|
||||
int stype = frame[80*5 + 48 + 3] & 0x1f;
|
||||
|
||||
/* 576i50 25Mbps 4:1:1 is a special case */
|
||||
if (dsf == 1 && stype == 0 && frame[4] & 0x07 /* the APT field */) {
|
||||
|
@@ -158,10 +158,9 @@ static void fill_scaling_lists(const H264Context *h, DXVA_Qmatrix_H264 *qm)
|
||||
for (j = 0; j < 16; j++)
|
||||
qm->bScalingLists4x4[i][j] = h->pps.scaling_matrix4[i][zigzag_scan[j]];
|
||||
|
||||
for (j = 0; j < 64; j++) {
|
||||
qm->bScalingLists8x8[0][j] = h->pps.scaling_matrix8[0][ff_zigzag_direct[j]];
|
||||
qm->bScalingLists8x8[1][j] = h->pps.scaling_matrix8[3][ff_zigzag_direct[j]];
|
||||
}
|
||||
for (i = 0; i < 2; i++)
|
||||
for (j = 0; j < 64; j++)
|
||||
qm->bScalingLists8x8[i][j] = h->pps.scaling_matrix8[i][ff_zigzag_direct[j]];
|
||||
}
|
||||
|
||||
static int is_slice_short(struct dxva_context *ctx)
|
||||
|
@@ -56,7 +56,7 @@ static void cmv_decode_intra(CmvContext * s, const uint8_t *buf, const uint8_t *
|
||||
unsigned char *dst = s->frame.data[0];
|
||||
int i;
|
||||
|
||||
for (i=0; i < s->avctx->height && buf_end - buf >= s->avctx->width; i++) {
|
||||
for (i=0; i < s->avctx->height && buf+s->avctx->width<=buf_end; i++) {
|
||||
memcpy(dst, buf, s->avctx->width);
|
||||
dst += s->frame.linesize[0];
|
||||
buf += s->avctx->width;
|
||||
@@ -88,7 +88,7 @@ static void cmv_decode_inter(CmvContext * s, const uint8_t *buf, const uint8_t *
|
||||
|
||||
i = 0;
|
||||
for(y=0; y<s->avctx->height/4; y++)
|
||||
for(x=0; x<s->avctx->width/4 && buf_end - buf > i; x++) {
|
||||
for(x=0; x<s->avctx->width/4 && buf+i<buf_end; x++) {
|
||||
if (buf[i]==0xFF) {
|
||||
unsigned char *dst = s->frame.data[0] + (y*4)*s->frame.linesize[0] + x*4;
|
||||
if (raw+16<buf_end && *raw==0xFF) { /* intra */
|
||||
@@ -110,10 +110,9 @@ static void cmv_decode_inter(CmvContext * s, const uint8_t *buf, const uint8_t *
|
||||
}else{ /* inter using last frame as reference */
|
||||
int xoffset = (buf[i] & 0xF) - 7;
|
||||
int yoffset = ((buf[i] >> 4)) - 7;
|
||||
if (s->last_frame.data[0])
|
||||
cmv_motcomp(s->frame.data[0], s->frame.linesize[0],
|
||||
s->last_frame.data[0], s->last_frame.linesize[0],
|
||||
x*4, y*4, xoffset, yoffset, s->avctx->width, s->avctx->height);
|
||||
cmv_motcomp(s->frame.data[0], s->frame.linesize[0],
|
||||
s->last_frame.data[0], s->last_frame.linesize[0],
|
||||
x*4, y*4, xoffset, yoffset, s->avctx->width, s->avctx->height);
|
||||
}
|
||||
i++;
|
||||
}
|
||||
@@ -123,7 +122,7 @@ static void cmv_process_header(CmvContext *s, const uint8_t *buf, const uint8_t
|
||||
{
|
||||
int pal_start, pal_count, i;
|
||||
|
||||
if(buf_end - buf < 16) {
|
||||
if(buf+16>=buf_end) {
|
||||
av_log(s->avctx, AV_LOG_WARNING, "truncated header\n");
|
||||
return;
|
||||
}
|
||||
@@ -140,7 +139,7 @@ static void cmv_process_header(CmvContext *s, const uint8_t *buf, const uint8_t
|
||||
pal_count = AV_RL16(&buf[14]);
|
||||
|
||||
buf += 16;
|
||||
for (i=pal_start; i<pal_start+pal_count && i<AVPALETTE_COUNT && buf_end - buf >= 3; i++) {
|
||||
for (i=pal_start; i<pal_start+pal_count && i<AVPALETTE_COUNT && buf+2<buf_end; i++) {
|
||||
s->palette[i] = AV_RB24(buf);
|
||||
buf += 3;
|
||||
}
|
||||
@@ -158,9 +157,6 @@ static int cmv_decode_frame(AVCodecContext *avctx,
|
||||
CmvContext *s = avctx->priv_data;
|
||||
const uint8_t *buf_end = buf + buf_size;
|
||||
|
||||
if (buf_end - buf < EA_PREAMBLE_SIZE)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if (AV_RL32(buf)==MVIh_TAG||AV_RB32(buf)==MVIh_TAG) {
|
||||
cmv_process_header(s, buf+EA_PREAMBLE_SIZE, buf_end);
|
||||
return buf_size;
|
||||
|
@@ -85,21 +85,15 @@ static inline void comp_block(MadContext *t, int mb_x, int mb_y,
|
||||
{
|
||||
MpegEncContext *s = &t->s;
|
||||
if (j < 4) {
|
||||
unsigned offset = (mb_y*16 + ((j&2)<<2) + mv_y)*t->last_frame.linesize[0] + mb_x*16 + ((j&1)<<3) + mv_x;
|
||||
if (offset >= (s->height - 7) * t->last_frame.linesize[0] - 7)
|
||||
return;
|
||||
comp(t->frame.data[0] + (mb_y*16 + ((j&2)<<2))*t->frame.linesize[0] + mb_x*16 + ((j&1)<<3),
|
||||
t->frame.linesize[0],
|
||||
t->last_frame.data[0] + offset,
|
||||
t->last_frame.data[0] + (mb_y*16 + ((j&2)<<2) + mv_y)*t->last_frame.linesize[0] + mb_x*16 + ((j&1)<<3) + mv_x,
|
||||
t->last_frame.linesize[0], add);
|
||||
} else if (!(s->avctx->flags & CODEC_FLAG_GRAY)) {
|
||||
int index = j - 3;
|
||||
unsigned offset = (mb_y * 8 + (mv_y/2))*t->last_frame.linesize[index] + mb_x * 8 + (mv_x/2);
|
||||
if (offset >= (s->height/2 - 7) * t->last_frame.linesize[index] - 7)
|
||||
return;
|
||||
comp(t->frame.data[index] + (mb_y*8)*t->frame.linesize[index] + mb_x * 8,
|
||||
t->frame.linesize[index],
|
||||
t->last_frame.data[index] + offset,
|
||||
t->last_frame.data[index] + (mb_y * 8 + (mv_y/2))*t->last_frame.linesize[index] + mb_x * 8 + (mv_x/2),
|
||||
t->last_frame.linesize[index], add);
|
||||
}
|
||||
}
|
||||
@@ -211,8 +205,7 @@ static void decode_mb(MadContext *t, int inter)
|
||||
for (j=0; j<6; j++) {
|
||||
if (mv_map & (1<<j)) { // mv_x and mv_y are guarded by mv_map
|
||||
int add = 2*decode_motion(&s->gb);
|
||||
if (t->last_frame.data[0])
|
||||
comp_block(t, s->mb_x, s->mb_y, j, mv_x, mv_y, add);
|
||||
comp_block(t, s->mb_x, s->mb_y, j, mv_x, mv_y, add);
|
||||
} else {
|
||||
s->dsp.clear_block(t->block);
|
||||
decode_block_intra(t, t->block);
|
||||
@@ -273,8 +266,6 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
avcodec_set_dimensions(avctx, s->width, s->height);
|
||||
if (t->frame.data[0])
|
||||
avctx->release_buffer(avctx, &t->frame);
|
||||
if (t->last_frame.data[0])
|
||||
avctx->release_buffer(avctx, &t->last_frame);
|
||||
}
|
||||
|
||||
t->frame.reference = 1;
|
||||
@@ -289,7 +280,6 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
if (!t->bitstream_buf)
|
||||
return AVERROR(ENOMEM);
|
||||
bswap16_buf(t->bitstream_buf, (const uint16_t*)buf, (buf_end-buf)/2);
|
||||
memset((uint8_t*)t->bitstream_buf + (buf_end-buf), 0, FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
init_get_bits(&s->gb, t->bitstream_buf, 8*(buf_end-buf));
|
||||
|
||||
for (s->mb_y=0; s->mb_y < (avctx->height+15)/16; s->mb_y++)
|
||||
|
@@ -74,7 +74,7 @@ static int unpack(const uint8_t *src, const uint8_t *src_end, unsigned char *dst
|
||||
else
|
||||
src += 2;
|
||||
|
||||
if (src_end - src < 3)
|
||||
if (src+3>src_end)
|
||||
return -1;
|
||||
size = AV_RB24(src);
|
||||
src += 3;
|
||||
@@ -138,7 +138,7 @@ static int unpack(const uint8_t *src, const uint8_t *src_end, unsigned char *dst
|
||||
* @return 0 on success, -1 on critical buffer underflow
|
||||
*/
|
||||
static int tgv_decode_inter(TgvContext * s, const uint8_t *buf, const uint8_t *buf_end){
|
||||
unsigned last_frame_size = s->avctx->height*s->last_frame.linesize[0];
|
||||
unsigned char *frame0_end = s->last_frame.data[0] + s->avctx->width*s->last_frame.linesize[0];
|
||||
int num_mvs;
|
||||
int num_blocks_raw;
|
||||
int num_blocks_packed;
|
||||
@@ -148,7 +148,7 @@ static int tgv_decode_inter(TgvContext * s, const uint8_t *buf, const uint8_t *b
|
||||
int mvbits;
|
||||
const unsigned char *blocks_raw;
|
||||
|
||||
if(buf_end - buf < 12)
|
||||
if(buf+12>buf_end)
|
||||
return -1;
|
||||
|
||||
num_mvs = AV_RL16(&buf[0]);
|
||||
@@ -171,7 +171,7 @@ static int tgv_decode_inter(TgvContext * s, const uint8_t *buf, const uint8_t *b
|
||||
/* read motion vectors */
|
||||
mvbits = (num_mvs*2*10+31) & ~31;
|
||||
|
||||
if (buf_end - buf < (mvbits>>3)+16*num_blocks_raw+8*num_blocks_packed)
|
||||
if (buf+(mvbits>>3)+16*num_blocks_raw+8*num_blocks_packed>buf_end)
|
||||
return -1;
|
||||
|
||||
init_get_bits(&gb, buf, mvbits);
|
||||
@@ -207,14 +207,12 @@ static int tgv_decode_inter(TgvContext * s, const uint8_t *buf, const uint8_t *b
|
||||
int src_stride;
|
||||
|
||||
if (vector < num_mvs) {
|
||||
unsigned offset =
|
||||
(y*4 + s->mv_codebook[vector][1])*s->last_frame.linesize[0] +
|
||||
x*4 + s->mv_codebook[vector][0];
|
||||
|
||||
src = s->last_frame.data[0] +
|
||||
(y*4 + s->mv_codebook[vector][1])*s->last_frame.linesize[0] +
|
||||
x*4 + s->mv_codebook[vector][0];
|
||||
src_stride = s->last_frame.linesize[0];
|
||||
if (offset >= last_frame_size - (3*src_stride+3))
|
||||
if (src+3*src_stride+3>=frame0_end)
|
||||
continue;
|
||||
src = s->last_frame.data[0] + offset;
|
||||
}else{
|
||||
int offset = vector - num_mvs;
|
||||
if (offset<num_blocks_raw)
|
||||
@@ -254,15 +252,12 @@ static int tgv_decode_frame(AVCodecContext *avctx,
|
||||
const uint8_t *buf_end = buf + buf_size;
|
||||
int chunk_type;
|
||||
|
||||
if (buf_end - buf < EA_PREAMBLE_SIZE)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
chunk_type = AV_RL32(&buf[0]);
|
||||
buf += EA_PREAMBLE_SIZE;
|
||||
|
||||
if (chunk_type==kVGT_TAG) {
|
||||
int pal_count, i;
|
||||
if(buf_end - buf < 12) {
|
||||
if(buf+12>buf_end) {
|
||||
av_log(avctx, AV_LOG_WARNING, "truncated header\n");
|
||||
return -1;
|
||||
}
|
||||
@@ -277,7 +272,7 @@ static int tgv_decode_frame(AVCodecContext *avctx,
|
||||
|
||||
pal_count = AV_RL16(&buf[6]);
|
||||
buf += 12;
|
||||
for(i=0; i<pal_count && i<AVPALETTE_COUNT && buf_end - buf >= 3; i++) {
|
||||
for(i=0; i<pal_count && i<AVPALETTE_COUNT && buf+2<buf_end; i++) {
|
||||
s->palette[i] = AV_RB24(buf);
|
||||
buf += 3;
|
||||
}
|
||||
|
@@ -59,15 +59,12 @@ static av_cold int tqi_decode_init(AVCodecContext *avctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tqi_decode_mb(MpegEncContext *s, DCTELEM (*block)[64])
|
||||
static void tqi_decode_mb(MpegEncContext *s, DCTELEM (*block)[64])
|
||||
{
|
||||
int n;
|
||||
s->dsp.clear_blocks(block[0]);
|
||||
for (n=0; n<6; n++)
|
||||
if (ff_mpeg1_decode_block_intra(s, block[n], n) < 0)
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
ff_mpeg1_decode_block_intra(s, block[n], n);
|
||||
}
|
||||
|
||||
static inline void tqi_idct_put(TqiContext *t, DCTELEM (*block)[64])
|
||||
@@ -139,8 +136,7 @@ static int tqi_decode_frame(AVCodecContext *avctx,
|
||||
for (s->mb_y=0; s->mb_y<(avctx->height+15)/16; s->mb_y++)
|
||||
for (s->mb_x=0; s->mb_x<(avctx->width+15)/16; s->mb_x++)
|
||||
{
|
||||
if (tqi_decode_mb(s, t->block) < 0)
|
||||
break;
|
||||
tqi_decode_mb(s, t->block);
|
||||
tqi_idct_put(t, t->block);
|
||||
}
|
||||
|
||||
|
@@ -660,7 +660,7 @@ static int is_intra_more_likely(MpegEncContext *s){
|
||||
|
||||
if(s->codec_id == CODEC_ID_H264){
|
||||
H264Context *h= (void*)s;
|
||||
if (h->list_count <= 0 || h->ref_count[0] <= 0 || !h->ref_list[0][0].data[0])
|
||||
if(h->ref_count[0] <= 0 || !h->ref_list[0][0].data[0])
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@@ -1805,7 +1805,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
|
||||
bytes_read = c->bytestream - c->bytestream_start - 1;
|
||||
if(bytes_read ==0) av_log(avctx, AV_LOG_ERROR, "error at end of AC stream\n"); //FIXME
|
||||
//printf("pos=%d\n", bytes_read);
|
||||
init_get_bits(&f->slice_context[0]->gb, buf + bytes_read, (buf_size - bytes_read) * 8);
|
||||
init_get_bits(&f->slice_context[0]->gb, buf + bytes_read, buf_size - bytes_read);
|
||||
} else {
|
||||
bytes_read = 0; /* avoid warning */
|
||||
}
|
||||
@@ -1822,7 +1822,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
|
||||
if(fs->ac){
|
||||
ff_init_range_decoder(&fs->c, buf_p, v);
|
||||
}else{
|
||||
init_get_bits(&fs->gb, buf_p, v * 8);
|
||||
init_get_bits(&fs->gb, buf_p, v);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -228,11 +228,9 @@ static int get_metadata_size(const uint8_t *buf, int buf_size)
|
||||
|
||||
buf += 4;
|
||||
do {
|
||||
if (buf_end - buf < 4)
|
||||
return 0;
|
||||
ff_flac_parse_block_header(buf, &metadata_last, NULL, &metadata_size);
|
||||
buf += 4;
|
||||
if (buf_end - buf < metadata_size) {
|
||||
if (buf + metadata_size > buf_end) {
|
||||
/* need more data in order to read the complete header */
|
||||
return 0;
|
||||
}
|
||||
@@ -420,16 +418,7 @@ static inline int decode_subframe(FLACContext *s, int channel)
|
||||
type = get_bits(&s->gb, 6);
|
||||
|
||||
if (get_bits1(&s->gb)) {
|
||||
int left = get_bits_left(&s->gb);
|
||||
wasted = 1;
|
||||
if ( left < 0 ||
|
||||
(left < s->curr_bps && !show_bits_long(&s->gb, left)) ||
|
||||
!show_bits_long(&s->gb, s->curr_bps)) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"Invalid number of wasted bits > available bits (%d) - left=%d\n",
|
||||
s->curr_bps, left);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
while (!get_bits1(&s->gb))
|
||||
wasted++;
|
||||
s->curr_bps -= wasted;
|
||||
|
@@ -296,6 +296,17 @@ static av_cold int flac_encode_init(AVCodecContext *avctx)
|
||||
s->options.max_partition_order = ((int[]){ 2, 2, 3, 3, 3, 8, 8, 8, 8, 8, 8, 8, 8})[level];
|
||||
|
||||
/* set compression option overrides from AVCodecContext */
|
||||
#if FF_API_USE_LPC
|
||||
/* for compatibility with deprecated AVCodecContext.use_lpc */
|
||||
if (avctx->use_lpc == 0) {
|
||||
s->options.lpc_type = AV_LPC_TYPE_FIXED;
|
||||
} else if (avctx->use_lpc == 1) {
|
||||
s->options.lpc_type = AV_LPC_TYPE_LEVINSON;
|
||||
} else if (avctx->use_lpc > 1) {
|
||||
s->options.lpc_type = AV_LPC_TYPE_CHOLESKY;
|
||||
s->options.lpc_passes = avctx->use_lpc - 1;
|
||||
}
|
||||
#endif
|
||||
#if FF_API_FLAC_GLOBAL_OPTS
|
||||
if (avctx->lpc_type > FF_LPC_TYPE_DEFAULT) {
|
||||
if (avctx->lpc_type > FF_LPC_TYPE_CHOLESKY) {
|
||||
|
@@ -132,6 +132,7 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
||||
FlicDecodeContext *s = avctx->priv_data;
|
||||
|
||||
int stream_ptr = 0;
|
||||
int stream_ptr_after_color_chunk;
|
||||
int pixel_ptr;
|
||||
int palette_ptr;
|
||||
unsigned char palette_idx1;
|
||||
@@ -171,11 +172,7 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
||||
pixels = s->frame.data[0];
|
||||
pixel_limit = s->avctx->height * s->frame.linesize[0];
|
||||
|
||||
if (buf_size < 16 || buf_size > INT_MAX - (3 * 256 + FF_INPUT_BUFFER_PADDING_SIZE))
|
||||
return AVERROR_INVALIDDATA;
|
||||
frame_size = AV_RL32(&buf[stream_ptr]);
|
||||
if (frame_size > buf_size)
|
||||
frame_size = buf_size;
|
||||
stream_ptr += 6; /* skip the magic number */
|
||||
num_chunks = AV_RL16(&buf[stream_ptr]);
|
||||
stream_ptr += 10; /* skip padding */
|
||||
@@ -183,16 +180,13 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
||||
frame_size -= 16;
|
||||
|
||||
/* iterate through the chunks */
|
||||
while ((frame_size >= 6) && (num_chunks > 0)) {
|
||||
int stream_ptr_after_chunk;
|
||||
while ((frame_size > 0) && (num_chunks > 0)) {
|
||||
chunk_size = AV_RL32(&buf[stream_ptr]);
|
||||
if (chunk_size > frame_size) {
|
||||
av_log(avctx, AV_LOG_WARNING,
|
||||
"Invalid chunk_size = %u > frame_size = %u\n", chunk_size, frame_size);
|
||||
chunk_size = frame_size;
|
||||
}
|
||||
stream_ptr_after_chunk = stream_ptr + chunk_size;
|
||||
|
||||
stream_ptr += 4;
|
||||
chunk_type = AV_RL16(&buf[stream_ptr]);
|
||||
stream_ptr += 2;
|
||||
@@ -200,6 +194,8 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
||||
switch (chunk_type) {
|
||||
case FLI_256_COLOR:
|
||||
case FLI_COLOR:
|
||||
stream_ptr_after_color_chunk = stream_ptr + chunk_size - 6;
|
||||
|
||||
/* check special case: If this file is from the Magic Carpet
|
||||
* game and uses 6-bit colors even though it reports 256-color
|
||||
* chunks in a 0xAF12-type file (fli_type is set to 0xAF13 during
|
||||
@@ -223,9 +219,6 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
||||
if (color_changes == 0)
|
||||
color_changes = 256;
|
||||
|
||||
if (stream_ptr + color_changes * 3 > stream_ptr_after_chunk)
|
||||
break;
|
||||
|
||||
for (j = 0; j < color_changes; j++) {
|
||||
unsigned int entry;
|
||||
|
||||
@@ -242,6 +235,13 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
||||
s->palette[palette_ptr++] = entry;
|
||||
}
|
||||
}
|
||||
|
||||
/* color chunks sometimes have weird 16-bit alignment issues;
|
||||
* therefore, take the hardline approach and set the stream_ptr
|
||||
* to the value calculated w.r.t. the size specified by the color
|
||||
* chunk header */
|
||||
stream_ptr = stream_ptr_after_color_chunk;
|
||||
|
||||
break;
|
||||
|
||||
case FLI_DELTA:
|
||||
@@ -249,8 +249,6 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
||||
compressed_lines = AV_RL16(&buf[stream_ptr]);
|
||||
stream_ptr += 2;
|
||||
while (compressed_lines > 0) {
|
||||
if (stream_ptr + 2 > stream_ptr_after_chunk)
|
||||
break;
|
||||
line_packets = AV_RL16(&buf[stream_ptr]);
|
||||
stream_ptr += 2;
|
||||
if ((line_packets & 0xC000) == 0xC000) {
|
||||
@@ -270,8 +268,6 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
||||
CHECK_PIXEL_PTR(0);
|
||||
pixel_countdown = s->avctx->width;
|
||||
for (i = 0; i < line_packets; i++) {
|
||||
if (stream_ptr + 2 > stream_ptr_after_chunk)
|
||||
break;
|
||||
/* account for the skip bytes */
|
||||
pixel_skip = buf[stream_ptr++];
|
||||
pixel_ptr += pixel_skip;
|
||||
@@ -288,8 +284,6 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
||||
}
|
||||
} else {
|
||||
CHECK_PIXEL_PTR(byte_run * 2);
|
||||
if (stream_ptr + byte_run * 2 > stream_ptr_after_chunk)
|
||||
break;
|
||||
for (j = 0; j < byte_run * 2; j++, pixel_countdown--) {
|
||||
palette_idx1 = buf[stream_ptr++];
|
||||
pixels[pixel_ptr++] = palette_idx1;
|
||||
@@ -316,8 +310,6 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
||||
CHECK_PIXEL_PTR(0);
|
||||
pixel_countdown = s->avctx->width;
|
||||
line_packets = buf[stream_ptr++];
|
||||
if (stream_ptr + 2 * line_packets > stream_ptr_after_chunk)
|
||||
break;
|
||||
if (line_packets > 0) {
|
||||
for (i = 0; i < line_packets; i++) {
|
||||
/* account for the skip bytes */
|
||||
@@ -327,8 +319,6 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
||||
byte_run = (signed char)(buf[stream_ptr++]);
|
||||
if (byte_run > 0) {
|
||||
CHECK_PIXEL_PTR(byte_run);
|
||||
if (stream_ptr + byte_run > stream_ptr_after_chunk)
|
||||
break;
|
||||
for (j = 0; j < byte_run; j++, pixel_countdown--) {
|
||||
palette_idx1 = buf[stream_ptr++];
|
||||
pixels[pixel_ptr++] = palette_idx1;
|
||||
@@ -366,8 +356,6 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
||||
stream_ptr++;
|
||||
pixel_countdown = s->avctx->width;
|
||||
while (pixel_countdown > 0) {
|
||||
if (stream_ptr + 1 > stream_ptr_after_chunk)
|
||||
break;
|
||||
byte_run = (signed char)(buf[stream_ptr++]);
|
||||
if (byte_run > 0) {
|
||||
palette_idx1 = buf[stream_ptr++];
|
||||
@@ -382,8 +370,6 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
||||
} else { /* copy bytes if byte_run < 0 */
|
||||
byte_run = -byte_run;
|
||||
CHECK_PIXEL_PTR(byte_run);
|
||||
if (stream_ptr + byte_run > stream_ptr_after_chunk)
|
||||
break;
|
||||
for (j = 0; j < byte_run; j++) {
|
||||
palette_idx1 = buf[stream_ptr++];
|
||||
pixels[pixel_ptr++] = palette_idx1;
|
||||
@@ -401,9 +387,10 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
||||
|
||||
case FLI_COPY:
|
||||
/* copy the chunk (uncompressed frame) */
|
||||
if (chunk_size - 6 != s->avctx->width * s->avctx->height) {
|
||||
if (chunk_size - 6 > s->avctx->width * s->avctx->height) {
|
||||
av_log(avctx, AV_LOG_ERROR, "In chunk FLI_COPY : source data (%d bytes) " \
|
||||
"has incorrect size, skipping chunk\n", chunk_size - 6);
|
||||
"bigger than image, skipping chunk\n", chunk_size - 6);
|
||||
stream_ptr += chunk_size - 6;
|
||||
} else {
|
||||
for (y_ptr = 0; y_ptr < s->frame.linesize[0] * s->avctx->height;
|
||||
y_ptr += s->frame.linesize[0]) {
|
||||
@@ -416,6 +403,7 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
||||
|
||||
case FLI_MINI:
|
||||
/* some sort of a thumbnail? disregard this chunk... */
|
||||
stream_ptr += chunk_size - 6;
|
||||
break;
|
||||
|
||||
default:
|
||||
@@ -423,8 +411,6 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
||||
break;
|
||||
}
|
||||
|
||||
stream_ptr = stream_ptr_after_chunk;
|
||||
|
||||
frame_size -= chunk_size;
|
||||
num_chunks--;
|
||||
}
|
||||
|
@@ -135,7 +135,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
uint32_t *luma1,*luma2,*cb,*cr;
|
||||
uint32_t offs[4];
|
||||
int i, j, is_chroma, planes;
|
||||
enum PixelFormat pix_fmt;
|
||||
|
||||
|
||||
header = AV_RL32(buf);
|
||||
version = header & 0xff;
|
||||
@@ -152,16 +152,12 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
if (header_size == 8)
|
||||
buf+=4;
|
||||
|
||||
pix_fmt = version & 1 ? PIX_FMT_BGR24 : PIX_FMT_YUVJ420P;
|
||||
if (avctx->pix_fmt != pix_fmt && f->data[0]) {
|
||||
avctx->release_buffer(avctx, f);
|
||||
}
|
||||
avctx->pix_fmt = pix_fmt;
|
||||
|
||||
switch(version) {
|
||||
case 0:
|
||||
default:
|
||||
/* Fraps v0 is a reordered YUV420 */
|
||||
avctx->pix_fmt = PIX_FMT_YUVJ420P;
|
||||
|
||||
if ( (buf_size != avctx->width*avctx->height*3/2+header_size) &&
|
||||
(buf_size != header_size) ) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
@@ -209,6 +205,8 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
|
||||
case 1:
|
||||
/* Fraps v1 is an upside-down BGR24 */
|
||||
avctx->pix_fmt = PIX_FMT_BGR24;
|
||||
|
||||
if ( (buf_size != avctx->width*avctx->height*3+header_size) &&
|
||||
(buf_size != header_size) ) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
@@ -243,6 +241,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
* Fraps v2 is Huffman-coded YUV420 planes
|
||||
* Fraps v4 is virtually the same
|
||||
*/
|
||||
avctx->pix_fmt = PIX_FMT_YUVJ420P;
|
||||
planes = 3;
|
||||
f->reference = 1;
|
||||
f->buffer_hints = FF_BUFFER_HINTS_VALID |
|
||||
@@ -287,6 +286,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
case 3:
|
||||
case 5:
|
||||
/* Virtually the same as version 4, but is for RGB24 */
|
||||
avctx->pix_fmt = PIX_FMT_BGR24;
|
||||
planes = 3;
|
||||
f->reference = 1;
|
||||
f->buffer_hints = FF_BUFFER_HINTS_VALID |
|
||||
|
@@ -75,20 +75,6 @@ static inline int get_ue_golomb(GetBitContext *gb){
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Read an unsigned Exp-Golomb code in the range 0 to UINT32_MAX-1.
|
||||
*/
|
||||
static inline unsigned get_ue_golomb_long(GetBitContext *gb)
|
||||
{
|
||||
unsigned buf, log;
|
||||
|
||||
buf = show_bits_long(gb, 32);
|
||||
log = 31 - av_log2(buf);
|
||||
skip_bits_long(gb, log);
|
||||
|
||||
return get_bits_long(gb, log + 1) - 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* read unsigned exp golomb code, constraint to a max of 31.
|
||||
* the return value is undefined if the stored value exceeds 31.
|
||||
@@ -123,7 +109,7 @@ static inline int svq3_get_ue_golomb(GetBitContext *gb){
|
||||
}else{
|
||||
int ret = 1;
|
||||
|
||||
do {
|
||||
while (1) {
|
||||
buf >>= 32 - 8;
|
||||
LAST_SKIP_BITS(re, gb, FFMIN(ff_interleaved_golomb_vlc_len[buf], 8));
|
||||
|
||||
@@ -135,7 +121,7 @@ static inline int svq3_get_ue_golomb(GetBitContext *gb){
|
||||
ret = (ret << 4) | ff_interleaved_dirac_golomb_vlc_code[buf];
|
||||
UPDATE_CACHE(re, gb);
|
||||
buf = GET_CACHE(re, gb);
|
||||
} while (ret);
|
||||
}
|
||||
|
||||
CLOSE_READER(re, gb);
|
||||
return ret - 1;
|
||||
@@ -301,7 +287,7 @@ static inline int get_ur_golomb_jpegls(GetBitContext *gb, int k, int limit, int
|
||||
return buf;
|
||||
}else{
|
||||
int i;
|
||||
for (i = 0; i < limit && SHOW_UBITS(re, gb, 1) == 0; i++) {
|
||||
for(i=0; SHOW_UBITS(re, gb, 1) == 0; i++){
|
||||
LAST_SKIP_BITS(re, gb, 1);
|
||||
UPDATE_CACHE(re, gb);
|
||||
}
|
||||
|
@@ -599,6 +599,10 @@ retry:
|
||||
s->current_picture.pict_type= s->pict_type;
|
||||
s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I;
|
||||
|
||||
#if FF_API_HURRY_UP
|
||||
/* skip everything if we are in a hurry>=5 */
|
||||
if(avctx->hurry_up>=5) return get_consumed_bytes(s, buf_size);
|
||||
#endif
|
||||
if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==AV_PICTURE_TYPE_B)
|
||||
||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=AV_PICTURE_TYPE_I)
|
||||
|| avctx->skip_frame >= AVDISCARD_ALL)
|
||||
|
@@ -438,13 +438,6 @@ retry:
|
||||
if (ret < 0){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "header damaged\n");
|
||||
return -1;
|
||||
} else if ((s->width != avctx->coded_width ||
|
||||
s->height != avctx->coded_height ||
|
||||
(s->width + 15) >> 4 != s->mb_width ||
|
||||
(s->height + 15) >> 4 != s->mb_height) &&
|
||||
(HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))) {
|
||||
av_log_missing_feature(s->avctx, "Width/height/bit depth/chroma idc changing with threads is", 0);
|
||||
return AVERROR_PATCHWELCOME; // width / height changed during parallelized decoding
|
||||
}
|
||||
|
||||
avctx->has_b_frames= !s->low_delay;
|
||||
@@ -571,7 +564,8 @@ retry:
|
||||
#if HAVE_MMX
|
||||
if (s->codec_id == CODEC_ID_MPEG4 && s->xvid_build>=0 && avctx->idct_algo == FF_IDCT_AUTO && (av_get_cpu_flags() & AV_CPU_FLAG_MMX)) {
|
||||
avctx->idct_algo= FF_IDCT_XVIDMMX;
|
||||
ff_dct_common_init(s);
|
||||
avctx->coded_width= 0; // force reinit
|
||||
// dsputil_init(&s->dsp, avctx);
|
||||
s->picture_number=0;
|
||||
}
|
||||
#endif
|
||||
@@ -585,12 +579,6 @@ retry:
|
||||
|| s->height != avctx->coded_height) {
|
||||
/* H.263 could change picture size any time */
|
||||
ParseContext pc= s->parse_context; //FIXME move these demuxng hack to avformat
|
||||
|
||||
if (HAVE_THREADS && (s->avctx->active_thread_type&FF_THREAD_FRAME)) {
|
||||
av_log_missing_feature(s->avctx, "Width/height/bit depth/chroma idc changing with threads is", 0);
|
||||
return -1; // width / height changed during parallelized decoding
|
||||
}
|
||||
|
||||
s->parse_context.buffer=0;
|
||||
MPV_common_end(s);
|
||||
s->parse_context= pc;
|
||||
@@ -610,10 +598,18 @@ retry:
|
||||
|
||||
/* skip B-frames if we don't have reference frames */
|
||||
if(s->last_picture_ptr==NULL && (s->pict_type==AV_PICTURE_TYPE_B || s->dropable)) return get_consumed_bytes(s, buf_size);
|
||||
#if FF_API_HURRY_UP
|
||||
/* skip b frames if we are in a hurry */
|
||||
if(avctx->hurry_up && s->pict_type==FF_B_TYPE) return get_consumed_bytes(s, buf_size);
|
||||
#endif
|
||||
if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==AV_PICTURE_TYPE_B)
|
||||
|| (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=AV_PICTURE_TYPE_I)
|
||||
|| avctx->skip_frame >= AVDISCARD_ALL)
|
||||
return get_consumed_bytes(s, buf_size);
|
||||
#if FF_API_HURRY_UP
|
||||
/* skip everything if we are in a hurry>=5 */
|
||||
if(avctx->hurry_up>=5) return get_consumed_bytes(s, buf_size);
|
||||
#endif
|
||||
|
||||
if(s->next_p_frame_damaged){
|
||||
if(s->pict_type==AV_PICTURE_TYPE_B)
|
||||
|
@@ -106,12 +106,12 @@ int ff_h264_check_intra4x4_pred_mode(H264Context *h){
|
||||
}
|
||||
|
||||
return 0;
|
||||
} //FIXME cleanup like check_intra_pred_mode
|
||||
} //FIXME cleanup like ff_h264_check_intra_pred_mode
|
||||
|
||||
/**
|
||||
* checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
|
||||
*/
|
||||
int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma){
|
||||
int ff_h264_check_intra_pred_mode(H264Context *h, int mode){
|
||||
MpegEncContext * const s = &h->s;
|
||||
static const int8_t top [7]= {LEFT_DC_PRED8x8, 1,-1,-1};
|
||||
static const int8_t left[7]= { TOP_DC_PRED8x8,-1, 2,-1,DC_128_PRED8x8};
|
||||
@@ -131,7 +131,7 @@ int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma){
|
||||
|
||||
if((h->left_samples_available&0x8080) != 0x8080){
|
||||
mode= left[ mode ];
|
||||
if(is_chroma && (h->left_samples_available&0x8080)){ //mad cow disease mode, aka MBAFF + constrained_intra_pred
|
||||
if(h->left_samples_available&0x8080){ //mad cow disease mode, aka MBAFF + constrained_intra_pred
|
||||
mode= ALZHEIMER_DC_L0T_PRED8x8 + (!(h->left_samples_available&0x8000)) + 2*(mode == DC_128_PRED8x8);
|
||||
}
|
||||
if(mode<0){
|
||||
@@ -183,26 +183,18 @@ const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src, int *dst_l
|
||||
i-= RS;
|
||||
}
|
||||
|
||||
bufidx = h->nal_unit_type == NAL_DPC ? 1 : 0; // use second escape buffer for inter data
|
||||
si=h->rbsp_buffer_size[bufidx];
|
||||
av_fast_malloc(&h->rbsp_buffer[bufidx], &h->rbsp_buffer_size[bufidx], length+FF_INPUT_BUFFER_PADDING_SIZE+MAX_MBPAIR_SIZE);
|
||||
dst= h->rbsp_buffer[bufidx];
|
||||
if(si != h->rbsp_buffer_size[bufidx])
|
||||
memset(dst + length, 0, FF_INPUT_BUFFER_PADDING_SIZE+MAX_MBPAIR_SIZE);
|
||||
|
||||
if (dst == NULL){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if(i>=length-1){ //no escaped 0
|
||||
*dst_length= length;
|
||||
*consumed= length+1; //+1 for the header
|
||||
if(h->s.avctx->flags2 & CODEC_FLAG2_FAST){
|
||||
return src;
|
||||
}else{
|
||||
memcpy(dst, src, length);
|
||||
return dst;
|
||||
}
|
||||
return src;
|
||||
}
|
||||
|
||||
bufidx = h->nal_unit_type == NAL_DPC ? 1 : 0; // use second escape buffer for inter data
|
||||
av_fast_malloc(&h->rbsp_buffer[bufidx], &h->rbsp_buffer_size[bufidx], length+FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
dst= h->rbsp_buffer[bufidx];
|
||||
|
||||
if (dst == NULL){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
//printf("decoding esc\n");
|
||||
@@ -1005,12 +997,8 @@ static av_cold void common_init(H264Context *h){
|
||||
s->height = s->avctx->height;
|
||||
s->codec_id= s->avctx->codec->id;
|
||||
|
||||
s->avctx->bits_per_raw_sample = 8;
|
||||
|
||||
ff_h264dsp_init(&h->h264dsp,
|
||||
s->avctx->bits_per_raw_sample);
|
||||
ff_h264_pred_init(&h->hpc, s->codec_id,
|
||||
s->avctx->bits_per_raw_sample);
|
||||
ff_h264dsp_init(&h->h264dsp, 8);
|
||||
ff_h264_pred_init(&h->hpc, s->codec_id, 8);
|
||||
|
||||
h->dequant_coeff_pps= -1;
|
||||
s->unrestricted_mv=1;
|
||||
@@ -1022,20 +1010,17 @@ static av_cold void common_init(H264Context *h){
|
||||
memset(h->pps.scaling_matrix8, 16, 2*64*sizeof(uint8_t));
|
||||
}
|
||||
|
||||
int ff_h264_decode_extradata(H264Context *h, const uint8_t *buf, int size)
|
||||
int ff_h264_decode_extradata(H264Context *h)
|
||||
{
|
||||
AVCodecContext *avctx = h->s.avctx;
|
||||
|
||||
if(!buf || size <= 0)
|
||||
return -1;
|
||||
|
||||
if(buf[0] == 1){
|
||||
if(avctx->extradata[0] == 1){
|
||||
int i, cnt, nalsize;
|
||||
const unsigned char *p = buf;
|
||||
unsigned char *p = avctx->extradata;
|
||||
|
||||
h->is_avc = 1;
|
||||
|
||||
if(size < 7) {
|
||||
if(avctx->extradata_size < 7) {
|
||||
av_log(avctx, AV_LOG_ERROR, "avcC too short\n");
|
||||
return -1;
|
||||
}
|
||||
@@ -1047,8 +1032,6 @@ int ff_h264_decode_extradata(H264Context *h, const uint8_t *buf, int size)
|
||||
p += 6;
|
||||
for (i = 0; i < cnt; i++) {
|
||||
nalsize = AV_RB16(p) + 2;
|
||||
if(nalsize > size - (p-buf))
|
||||
return -1;
|
||||
if(decode_nal_units(h, p, nalsize) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Decoding sps %d from avcC failed\n", i);
|
||||
return -1;
|
||||
@@ -1059,8 +1042,6 @@ int ff_h264_decode_extradata(H264Context *h, const uint8_t *buf, int size)
|
||||
cnt = *(p++); // Number of pps
|
||||
for (i = 0; i < cnt; i++) {
|
||||
nalsize = AV_RB16(p) + 2;
|
||||
if(nalsize > size - (p-buf))
|
||||
return -1;
|
||||
if (decode_nal_units(h, p, nalsize) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Decoding pps %d from avcC failed\n", i);
|
||||
return -1;
|
||||
@@ -1068,10 +1049,10 @@ int ff_h264_decode_extradata(H264Context *h, const uint8_t *buf, int size)
|
||||
p += nalsize;
|
||||
}
|
||||
// Now store right nal length size, that will be use to parse all other nals
|
||||
h->nal_length_size = (buf[4] & 0x03) + 1;
|
||||
h->nal_length_size = (avctx->extradata[4] & 0x03) + 1;
|
||||
} else {
|
||||
h->is_avc = 0;
|
||||
if(decode_nal_units(h, buf, size) < 0)
|
||||
if(decode_nal_units(h, avctx->extradata, avctx->extradata_size) < 0)
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
@@ -1115,7 +1096,7 @@ av_cold int ff_h264_decode_init(AVCodecContext *avctx){
|
||||
}
|
||||
|
||||
if(avctx->extradata_size > 0 && avctx->extradata &&
|
||||
ff_h264_decode_extradata(h, avctx->extradata, avctx->extradata_size))
|
||||
ff_h264_decode_extradata(h))
|
||||
return -1;
|
||||
|
||||
if(h->sps.bitstream_restriction_flag && s->avctx->has_b_frames < h->sps.num_reorder_frames){
|
||||
@@ -1184,10 +1165,7 @@ static int decode_update_thread_context(AVCodecContext *dst, const AVCodecContex
|
||||
memcpy(&h->s + 1, &h1->s + 1, sizeof(H264Context) - sizeof(MpegEncContext)); //copy all fields after MpegEnc
|
||||
memset(h->sps_buffers, 0, sizeof(h->sps_buffers));
|
||||
memset(h->pps_buffers, 0, sizeof(h->pps_buffers));
|
||||
if (ff_h264_alloc_tables(h) < 0) {
|
||||
av_log(dst, AV_LOG_ERROR, "Could not allocate memory for h264\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
ff_h264_alloc_tables(h);
|
||||
context_init(h);
|
||||
|
||||
for(i=0; i<2; i++){
|
||||
@@ -1425,7 +1403,7 @@ static void decode_postinit(H264Context *h, int setup_finished){
|
||||
pics = 0;
|
||||
while(h->delayed_pic[pics]) pics++;
|
||||
|
||||
av_assert0(pics <= MAX_DELAYED_PIC_COUNT);
|
||||
assert(pics <= MAX_DELAYED_PIC_COUNT);
|
||||
|
||||
h->delayed_pic[pics++] = cur;
|
||||
if(cur->reference == 0)
|
||||
@@ -1870,30 +1848,15 @@ static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple, i
|
||||
tmp_y[j] = get_bits(&gb, bit_depth);
|
||||
}
|
||||
if(simple || !CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
|
||||
if (!h->sps.chroma_format_idc) {
|
||||
for (i = 0; i < 8; i++) {
|
||||
uint16_t *tmp_cb = (uint16_t*)(dest_cb + i*uvlinesize);
|
||||
for (j = 0; j < 8; j++) {
|
||||
tmp_cb[j] = 1 << (bit_depth - 1);
|
||||
}
|
||||
}
|
||||
for (i = 0; i < 8; i++) {
|
||||
uint16_t *tmp_cr = (uint16_t*)(dest_cr + i*uvlinesize);
|
||||
for (j = 0; j < 8; j++) {
|
||||
tmp_cr[j] = 1 << (bit_depth - 1);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < 8; i++) {
|
||||
uint16_t *tmp_cb = (uint16_t*)(dest_cb + i*uvlinesize);
|
||||
for (j = 0; j < 8; j++)
|
||||
tmp_cb[j] = get_bits(&gb, bit_depth);
|
||||
}
|
||||
for (i = 0; i < 8; i++) {
|
||||
uint16_t *tmp_cr = (uint16_t*)(dest_cr + i*uvlinesize);
|
||||
for (j = 0; j < 8; j++)
|
||||
tmp_cr[j] = get_bits(&gb, bit_depth);
|
||||
}
|
||||
for (i = 0; i < 8; i++) {
|
||||
uint16_t *tmp_cb = (uint16_t*)(dest_cb + i*uvlinesize);
|
||||
for (j = 0; j < 8; j++)
|
||||
tmp_cb[j] = get_bits(&gb, bit_depth);
|
||||
}
|
||||
for (i = 0; i < 8; i++) {
|
||||
uint16_t *tmp_cr = (uint16_t*)(dest_cr + i*uvlinesize);
|
||||
for (j = 0; j < 8; j++)
|
||||
tmp_cr[j] = get_bits(&gb, bit_depth);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -1901,16 +1864,9 @@ static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple, i
|
||||
memcpy(dest_y + i* linesize, h->mb + i*8, 16);
|
||||
}
|
||||
if(simple || !CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
|
||||
if (!h->sps.chroma_format_idc) {
|
||||
for (i = 0; i < 8; i++) {
|
||||
memset(dest_cb + i*uvlinesize, 128, 8);
|
||||
memset(dest_cr + i*uvlinesize, 128, 8);
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < 8; i++) {
|
||||
memcpy(dest_cb + i*uvlinesize, h->mb + 128 + i*4, 8);
|
||||
memcpy(dest_cr + i*uvlinesize, h->mb + 160 + i*4, 8);
|
||||
}
|
||||
for (i=0; i<8; i++) {
|
||||
memcpy(dest_cb+ i*uvlinesize, h->mb + 128 + i*4, 8);
|
||||
memcpy(dest_cr+ i*uvlinesize, h->mb + 160 + i*4, 8);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2217,11 +2173,7 @@ static void implicit_weight_table(H264Context *h, int field){
|
||||
}
|
||||
|
||||
if(field < 0){
|
||||
if (s->picture_structure == PICT_FRAME) {
|
||||
cur_poc = s->current_picture_ptr->poc;
|
||||
} else {
|
||||
cur_poc = s->current_picture_ptr->field_poc[s->picture_structure - 1];
|
||||
}
|
||||
cur_poc = s->current_picture_ptr->poc;
|
||||
if( h->ref_count[0] == 1 && h->ref_count[1] == 1 && !FRAME_MBAFF
|
||||
&& h->ref_list[0][0].poc + h->ref_list[1][0].poc == 2*cur_poc){
|
||||
h->use_weight= 0;
|
||||
@@ -2246,17 +2198,15 @@ static void implicit_weight_table(H264Context *h, int field){
|
||||
for(ref0=ref_start; ref0 < ref_count0; ref0++){
|
||||
int poc0 = h->ref_list[0][ref0].poc;
|
||||
for(ref1=ref_start; ref1 < ref_count1; ref1++){
|
||||
int w = 32;
|
||||
if (!h->ref_list[0][ref0].long_ref && !h->ref_list[1][ref1].long_ref) {
|
||||
int poc1 = h->ref_list[1][ref1].poc;
|
||||
int td = av_clip(poc1 - poc0, -128, 127);
|
||||
if(td){
|
||||
int tb = av_clip(cur_poc - poc0, -128, 127);
|
||||
int tx = (16384 + (FFABS(td) >> 1)) / td;
|
||||
int dist_scale_factor = (tb*tx + 32) >> 8;
|
||||
if(dist_scale_factor >= -64 && dist_scale_factor <= 128)
|
||||
w = 64 - dist_scale_factor;
|
||||
}
|
||||
int poc1 = h->ref_list[1][ref1].poc;
|
||||
int td = av_clip(poc1 - poc0, -128, 127);
|
||||
int w= 32;
|
||||
if(td){
|
||||
int tb = av_clip(cur_poc - poc0, -128, 127);
|
||||
int tx = (16384 + (FFABS(td) >> 1)) / td;
|
||||
int dist_scale_factor = (tb*tx + 32) >> 8;
|
||||
if(dist_scale_factor >= -64 && dist_scale_factor <= 128)
|
||||
w = 64 - dist_scale_factor;
|
||||
}
|
||||
if(field<0){
|
||||
h->implicit_weight[ref0][ref1][0]=
|
||||
@@ -2283,7 +2233,7 @@ static void idr(H264Context *h){
|
||||
static void flush_dpb(AVCodecContext *avctx){
|
||||
H264Context *h= avctx->priv_data;
|
||||
int i;
|
||||
for(i=0; i<=MAX_DELAYED_PIC_COUNT; i++) {
|
||||
for(i=0; i<MAX_DELAYED_PIC_COUNT; i++) {
|
||||
if(h->delayed_pic[i])
|
||||
h->delayed_pic[i]->reference= 0;
|
||||
h->delayed_pic[i]= NULL;
|
||||
@@ -2620,14 +2570,13 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
if (s->context_initialized
|
||||
&& ( s->width != s->avctx->width || s->height != s->avctx->height
|
||||
|| av_cmp_q(h->sps.sar, s->avctx->sample_aspect_ratio))) {
|
||||
if(h != h0 || (HAVE_THREADS && h->s.avctx->active_thread_type & FF_THREAD_FRAME)) {
|
||||
if(h != h0) {
|
||||
av_log_missing_feature(s->avctx, "Width/height changing with threads is", 0);
|
||||
return AVERROR_PATCHWELCOME; // width / height changed during parallelized decoding
|
||||
return -1; // width / height changed during parallelized decoding
|
||||
}
|
||||
free_tables(h, 0);
|
||||
flush_dpb(s->avctx);
|
||||
MPV_common_end(s);
|
||||
h->list_count = 0;
|
||||
}
|
||||
if (!s->context_initialized) {
|
||||
if (h != h0) {
|
||||
@@ -2689,10 +2638,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
h->prev_interlaced_frame = 1;
|
||||
|
||||
init_scan_tables(h);
|
||||
if (ff_h264_alloc_tables(h) < 0) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "Could not allocate memory for h264\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
ff_h264_alloc_tables(h);
|
||||
|
||||
if (!HAVE_THREADS || !(s->avctx->active_thread_type&FF_THREAD_SLICE)) {
|
||||
if (context_init(h) < 0) {
|
||||
@@ -2800,9 +2746,11 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
s0->first_field = FIELD_PICTURE;
|
||||
|
||||
} else {
|
||||
if (s0->current_picture_ptr->frame_num != h->frame_num) {
|
||||
if (h->nal_ref_idc &&
|
||||
s0->current_picture_ptr->reference &&
|
||||
s0->current_picture_ptr->frame_num != h->frame_num) {
|
||||
/*
|
||||
* This and previous field had
|
||||
* This and previous field were reference, but had
|
||||
* different frame_nums. Consider this field first in
|
||||
* pair. Throw away previous field except for reference
|
||||
* purposes.
|
||||
@@ -2886,8 +2834,6 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
h->ref_count[1]= h->pps.ref_count[1];
|
||||
|
||||
if(h->slice_type_nos != AV_PICTURE_TYPE_I){
|
||||
unsigned max= s->picture_structure == PICT_FRAME ? 15 : 31;
|
||||
|
||||
if(h->slice_type_nos == AV_PICTURE_TYPE_B){
|
||||
h->direct_spatial_mv_pred= get_bits1(&s->gb);
|
||||
}
|
||||
@@ -2897,29 +2843,26 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
h->ref_count[0]= get_ue_golomb(&s->gb) + 1;
|
||||
if(h->slice_type_nos==AV_PICTURE_TYPE_B)
|
||||
h->ref_count[1]= get_ue_golomb(&s->gb) + 1;
|
||||
}
|
||||
|
||||
if (h->ref_count[0]-1 > max || h->ref_count[1]-1 > max){
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "reference overflow\n");
|
||||
h->ref_count[0] = h->ref_count[1] = 1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
if(h->ref_count[0]-1 > 32-1 || h->ref_count[1]-1 > 32-1){
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "reference overflow\n");
|
||||
h->ref_count[0]= h->ref_count[1]= 1;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
if(h->slice_type_nos == AV_PICTURE_TYPE_B)
|
||||
h->list_count= 2;
|
||||
else
|
||||
h->list_count= 1;
|
||||
}else
|
||||
h->ref_count[1]= h->ref_count[0]= h->list_count= 0;
|
||||
h->list_count= 0;
|
||||
|
||||
if(!default_ref_list_done){
|
||||
ff_h264_fill_default_ref_list(h);
|
||||
}
|
||||
|
||||
if(h->slice_type_nos!=AV_PICTURE_TYPE_I && ff_h264_decode_ref_pic_list_reordering(h) < 0) {
|
||||
h->ref_count[1]= h->ref_count[0]= 0;
|
||||
if(h->slice_type_nos!=AV_PICTURE_TYPE_I && ff_h264_decode_ref_pic_list_reordering(h) < 0)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if(h->slice_type_nos!=AV_PICTURE_TYPE_I){
|
||||
s->last_picture_ptr= &h->ref_list[0][0];
|
||||
@@ -3725,7 +3668,7 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
s->workaround_bugs |= FF_BUG_TRUNCATED;
|
||||
|
||||
if(!(s->workaround_bugs & FF_BUG_TRUNCATED)){
|
||||
while(dst_length > 0 && ptr[dst_length - 1] == 0)
|
||||
while(ptr[dst_length - 1] == 0 && dst_length > 0)
|
||||
dst_length--;
|
||||
}
|
||||
bit_length= !dst_length ? 0 : (8*dst_length - ff_h264_decode_rbsp_trailing(h, ptr + dst_length - 1));
|
||||
@@ -3748,19 +3691,19 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
switch (hx->nal_unit_type) {
|
||||
case NAL_SPS:
|
||||
case NAL_PPS:
|
||||
nals_needed = nal_index;
|
||||
break;
|
||||
case NAL_IDR_SLICE:
|
||||
case NAL_SLICE:
|
||||
init_get_bits(&hx->s.gb, ptr, bit_length);
|
||||
if (!get_ue_golomb(&hx->s.gb))
|
||||
nals_needed = nal_index;
|
||||
nals_needed = nal_index;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
//FIXME do not discard SEI id
|
||||
if(avctx->skip_frame >= AVDISCARD_NONREF && h->nal_ref_idc == 0)
|
||||
if(
|
||||
#if FF_API_HURRY_UP
|
||||
(s->hurry_up == 1 && h->nal_ref_idc == 0) ||
|
||||
#endif
|
||||
(avctx->skip_frame >= AVDISCARD_NONREF && h->nal_ref_idc == 0))
|
||||
continue;
|
||||
|
||||
again:
|
||||
@@ -3797,6 +3740,9 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
}
|
||||
|
||||
if(hx->redundant_pic_count==0
|
||||
#if FF_API_HURRY_UP
|
||||
&& hx->s.hurry_up < 5
|
||||
#endif
|
||||
&& (avctx->skip_frame < AVDISCARD_NONREF || hx->nal_ref_idc)
|
||||
&& (avctx->skip_frame < AVDISCARD_BIDIR || hx->slice_type_nos!=AV_PICTURE_TYPE_B)
|
||||
&& (avctx->skip_frame < AVDISCARD_NONKEY || hx->slice_type_nos==AV_PICTURE_TYPE_I)
|
||||
@@ -3834,6 +3780,9 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
|
||||
if(hx->redundant_pic_count==0 && hx->intra_gb_ptr && hx->s.data_partitioning
|
||||
&& s->context_initialized
|
||||
#if FF_API_HURRY_UP
|
||||
&& s->hurry_up < 5
|
||||
#endif
|
||||
&& (avctx->skip_frame < AVDISCARD_NONREF || hx->nal_ref_idc)
|
||||
&& (avctx->skip_frame < AVDISCARD_BIDIR || hx->slice_type_nos!=AV_PICTURE_TYPE_B)
|
||||
&& (avctx->skip_frame < AVDISCARD_NONKEY || hx->slice_type_nos==AV_PICTURE_TYPE_I)
|
||||
@@ -3972,7 +3921,11 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
}
|
||||
|
||||
if(!(s->flags2 & CODEC_FLAG2_CHUNKS) && !s->current_picture_ptr){
|
||||
if (avctx->skip_frame >= AVDISCARD_NONREF)
|
||||
if (avctx->skip_frame >= AVDISCARD_NONREF
|
||||
#if FF_API_HURRY_UP
|
||||
|| s->hurry_up
|
||||
#endif
|
||||
)
|
||||
return 0;
|
||||
av_log(avctx, AV_LOG_ERROR, "no frame!\n");
|
||||
return -1;
|
||||
|
@@ -53,8 +53,6 @@
|
||||
|
||||
#define MAX_DELAYED_PIC_COUNT 16
|
||||
|
||||
#define MAX_MBPAIR_SIZE (256*1024) // a tighter bound could be calculated if someone cares about a few bytes
|
||||
|
||||
/* Compiling in interlaced support reduces the speed
|
||||
* of progressive decoding by about 2%. */
|
||||
#define ALLOW_INTERLACE
|
||||
@@ -101,7 +99,7 @@
|
||||
*/
|
||||
#define DELAYED_PIC_REF 4
|
||||
|
||||
#define QP_MAX_NUM (51 + 4*6) // The maximum supported qp
|
||||
#define QP_MAX_NUM (51 + 2*6) // The maximum supported qp
|
||||
|
||||
/* NAL unit types */
|
||||
enum {
|
||||
@@ -227,7 +225,7 @@ typedef struct PPS{
|
||||
int transform_8x8_mode; ///< transform_8x8_mode_flag
|
||||
uint8_t scaling_matrix4[6][16];
|
||||
uint8_t scaling_matrix8[6][64];
|
||||
uint8_t chroma_qp_table[2][QP_MAX_NUM+1]; ///< pre-scaled (with chroma_qp_index_offset) version of qp_table
|
||||
uint8_t chroma_qp_table[2][64]; ///< pre-scaled (with chroma_qp_index_offset) version of qp_table
|
||||
int chroma_qp_diff;
|
||||
}PPS;
|
||||
|
||||
@@ -584,7 +582,7 @@ typedef struct H264Context{
|
||||
}H264Context;
|
||||
|
||||
|
||||
extern const uint8_t ff_h264_chroma_qp[5][QP_MAX_NUM+1]; ///< One chroma qp table for each possible bit depth (8-12).
|
||||
extern const uint8_t ff_h264_chroma_qp[3][QP_MAX_NUM+1]; ///< One chroma qp table for each supported bit depth (8, 9, 10).
|
||||
|
||||
/**
|
||||
* Decode SEI
|
||||
@@ -658,12 +656,12 @@ int ff_h264_check_intra4x4_pred_mode(H264Context *h);
|
||||
/**
|
||||
* Check if the top & left blocks are available if needed & change the dc mode so it only uses the available blocks.
|
||||
*/
|
||||
int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma);
|
||||
int ff_h264_check_intra_pred_mode(H264Context *h, int mode);
|
||||
|
||||
void ff_h264_write_back_intra_pred_mode(H264Context *h);
|
||||
void ff_h264_hl_decode_mb(H264Context *h);
|
||||
int ff_h264_frame_start(H264Context *h);
|
||||
int ff_h264_decode_extradata(H264Context *h, const uint8_t *buf, int size);
|
||||
int ff_h264_decode_extradata(H264Context *h);
|
||||
av_cold int ff_h264_decode_init(AVCodecContext *avctx);
|
||||
av_cold int ff_h264_decode_end(AVCodecContext *avctx);
|
||||
av_cold void ff_h264_decode_init_vlc(void);
|
||||
@@ -1070,7 +1068,7 @@ static void fill_decode_caches(H264Context *h, int mb_type){
|
||||
AV_ZERO32(h->mv_cache [list][scan8[0] + 4 - 1*8]);
|
||||
h->ref_cache[list][scan8[0] + 4 - 1*8]= topright_type ? LIST_NOT_USED : PART_NOT_AVAILABLE;
|
||||
}
|
||||
if(h->ref_cache[list][scan8[0] + 2 - 1*8] < 0 || h->ref_cache[list][scan8[0] + 4 - 1*8] < 0){
|
||||
if(h->ref_cache[list][scan8[0] + 4 - 1*8] < 0){
|
||||
if(USES_LIST(topleft_type, list)){
|
||||
const int b_xy = h->mb2b_xy [topleft_xy] + 3 + h->b_stride + (h->topleft_partition & 2*h->b_stride);
|
||||
const int b8_xy= 4*topleft_xy + 1 + (h->topleft_partition & 2);
|
||||
|
@@ -1959,8 +1959,6 @@ decode_intra_mb:
|
||||
}
|
||||
|
||||
// The pixels are stored in the same order as levels in h->mb array.
|
||||
if ((int) (h->cabac.bytestream_end - ptr) < mb_size)
|
||||
return -1;
|
||||
memcpy(h->mb, ptr, mb_size); ptr+=mb_size;
|
||||
|
||||
ff_init_cabac_decoder(&h->cabac, ptr, h->cabac.bytestream_end - ptr);
|
||||
@@ -2005,14 +2003,14 @@ decode_intra_mb:
|
||||
ff_h264_write_back_intra_pred_mode(h);
|
||||
if( ff_h264_check_intra4x4_pred_mode(h) < 0 ) return -1;
|
||||
} else {
|
||||
h->intra16x16_pred_mode= ff_h264_check_intra_pred_mode( h, h->intra16x16_pred_mode, 0 );
|
||||
h->intra16x16_pred_mode= ff_h264_check_intra_pred_mode( h, h->intra16x16_pred_mode );
|
||||
if( h->intra16x16_pred_mode < 0 ) return -1;
|
||||
}
|
||||
if(decode_chroma){
|
||||
h->chroma_pred_mode_table[mb_xy] =
|
||||
pred_mode = decode_cabac_mb_chroma_pre_mode( h );
|
||||
|
||||
pred_mode= ff_h264_check_intra_pred_mode( h, pred_mode, 1 );
|
||||
pred_mode= ff_h264_check_intra_pred_mode( h, pred_mode );
|
||||
if( pred_mode < 0 ) return -1;
|
||||
h->chroma_pred_mode= pred_mode;
|
||||
} else {
|
||||
|
@@ -238,18 +238,17 @@ static inline int pred_non_zero_count(H264Context *h, int n){
|
||||
}
|
||||
|
||||
static av_cold void init_cavlc_level_tab(void){
|
||||
int suffix_length;
|
||||
int suffix_length, mask;
|
||||
unsigned int i;
|
||||
|
||||
for(suffix_length=0; suffix_length<7; suffix_length++){
|
||||
for(i=0; i<(1<<LEVEL_TAB_BITS); i++){
|
||||
int prefix= LEVEL_TAB_BITS - av_log2(2*i);
|
||||
int level_code= (prefix<<suffix_length) + (i>>(LEVEL_TAB_BITS-prefix-1-suffix_length)) - (1<<suffix_length);
|
||||
|
||||
mask= -(level_code&1);
|
||||
level_code= (((2+level_code)>>1) ^ mask) - mask;
|
||||
if(prefix + 1 + suffix_length <= LEVEL_TAB_BITS){
|
||||
int level_code = (prefix << suffix_length) +
|
||||
(i >> (av_log2(i) - suffix_length)) - (1 << suffix_length);
|
||||
int mask = -(level_code&1);
|
||||
level_code = (((2 + level_code) >> 1) ^ mask) - mask;
|
||||
cavlc_level_tab[suffix_length][i][0]= level_code;
|
||||
cavlc_level_tab[suffix_length][i][1]= prefix + 1 + suffix_length;
|
||||
}else if(prefix + 1 <= LEVEL_TAB_BITS){
|
||||
@@ -736,12 +735,12 @@ decode_intra_mb:
|
||||
if( ff_h264_check_intra4x4_pred_mode(h) < 0)
|
||||
return -1;
|
||||
}else{
|
||||
h->intra16x16_pred_mode= ff_h264_check_intra_pred_mode(h, h->intra16x16_pred_mode, 0);
|
||||
h->intra16x16_pred_mode= ff_h264_check_intra_pred_mode(h, h->intra16x16_pred_mode);
|
||||
if(h->intra16x16_pred_mode < 0)
|
||||
return -1;
|
||||
}
|
||||
if(decode_chroma){
|
||||
pred_mode= ff_h264_check_intra_pred_mode(h, get_ue_golomb_31(&s->gb), 1);
|
||||
pred_mode= ff_h264_check_intra_pred_mode(h, get_ue_golomb_31(&s->gb));
|
||||
if(pred_mode < 0)
|
||||
return -1;
|
||||
h->chroma_pred_mode= pred_mode;
|
||||
|
@@ -89,8 +89,7 @@ static void fill_colmap(H264Context *h, int map[2][16+32], int list, int field,
|
||||
for(j=start; j<end; j++){
|
||||
if(4*h->ref_list[0][j].frame_num + (h->ref_list[0][j].reference&3) == poc){
|
||||
int cur_ref= mbafi ? (j-16)^field : j;
|
||||
if(ref1->mbaff)
|
||||
map[list][2*old_ref + (rfield^field) + 16] = cur_ref;
|
||||
map[list][2*old_ref + (rfield^field) + 16] = cur_ref;
|
||||
if(rfield == field || !interl)
|
||||
map[list][old_ref] = cur_ref;
|
||||
break;
|
||||
@@ -253,10 +252,6 @@ static void pred_spatial_direct_motion(H264Context * const h, int *mb_type){
|
||||
mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy + s->mb_stride];
|
||||
b8_stride = 2+4*s->mb_stride;
|
||||
b4_stride *= 6;
|
||||
if(IS_INTERLACED(mb_type_col[0]) != IS_INTERLACED(mb_type_col[1])){
|
||||
mb_type_col[0] &= ~MB_TYPE_INTERLACED;
|
||||
mb_type_col[1] &= ~MB_TYPE_INTERLACED;
|
||||
}
|
||||
|
||||
sub_mb_type |= MB_TYPE_16x16|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
|
||||
if( (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)
|
||||
|
@@ -251,13 +251,7 @@ static int h264_parse(AVCodecParserContext *s,
|
||||
h->got_first = 1;
|
||||
if (avctx->extradata_size) {
|
||||
h->s.avctx = avctx;
|
||||
// must be done like in decoder, otherwise opening the parser,
|
||||
// letting it create extradata and then closing and opening again
|
||||
// will cause has_b_frames to be always set.
|
||||
// Note that estimate_timings_from_pts does exactly this.
|
||||
if (!avctx->has_b_frames)
|
||||
h->s.low_delay = 1;
|
||||
ff_h264_decode_extradata(h, avctx->extradata, avctx->extradata_size);
|
||||
ff_h264_decode_extradata(h);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -70,7 +70,7 @@ static const AVRational pixel_aspect[17]={
|
||||
QP(37,d), QP(37,d), QP(37,d), QP(38,d), QP(38,d), QP(38,d),\
|
||||
QP(39,d), QP(39,d), QP(39,d), QP(39,d)
|
||||
|
||||
const uint8_t ff_h264_chroma_qp[5][QP_MAX_NUM+1] = {
|
||||
const uint8_t ff_h264_chroma_qp[3][QP_MAX_NUM+1] = {
|
||||
{
|
||||
CHROMA_QP_TABLE_END(8)
|
||||
},
|
||||
@@ -83,19 +83,6 @@ const uint8_t ff_h264_chroma_qp[5][QP_MAX_NUM+1] = {
|
||||
6, 7, 8, 9, 10, 11,
|
||||
CHROMA_QP_TABLE_END(10)
|
||||
},
|
||||
{
|
||||
0, 1, 2, 3, 4, 5,
|
||||
6, 7, 8, 9, 10, 11,
|
||||
12,13,14,15, 16, 17,
|
||||
CHROMA_QP_TABLE_END(11)
|
||||
},
|
||||
{
|
||||
0, 1, 2, 3, 4, 5,
|
||||
6, 7, 8, 9, 10, 11,
|
||||
12,13,14,15, 16, 17,
|
||||
18,19,20,21, 22, 23,
|
||||
CHROMA_QP_TABLE_END(12)
|
||||
},
|
||||
};
|
||||
|
||||
static const uint8_t default_scaling4[2][16]={
|
||||
@@ -143,8 +130,8 @@ static inline int decode_hrd_parameters(H264Context *h, SPS *sps){
|
||||
get_bits(&s->gb, 4); /* bit_rate_scale */
|
||||
get_bits(&s->gb, 4); /* cpb_size_scale */
|
||||
for(i=0; i<cpb_count; i++){
|
||||
get_ue_golomb_long(&s->gb); /* bit_rate_value_minus1 */
|
||||
get_ue_golomb_long(&s->gb); /* cpb_size_value_minus1 */
|
||||
get_ue_golomb(&s->gb); /* bit_rate_value_minus1 */
|
||||
get_ue_golomb(&s->gb); /* cpb_size_value_minus1 */
|
||||
get_bits1(&s->gb); /* cbr_flag */
|
||||
}
|
||||
sps->initial_cpb_removal_delay_length = get_bits(&s->gb, 5) + 1;
|
||||
@@ -342,19 +329,10 @@ int ff_h264_decode_seq_parameter_set(H264Context *h){
|
||||
|
||||
if(sps->profile_idc >= 100){ //high profile
|
||||
sps->chroma_format_idc= get_ue_golomb_31(&s->gb);
|
||||
if (sps->chroma_format_idc > 3U) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "chroma_format_idc %d is illegal\n", sps->chroma_format_idc);
|
||||
goto fail;
|
||||
} else if(sps->chroma_format_idc == 3) {
|
||||
if(sps->chroma_format_idc == 3)
|
||||
sps->residual_color_transform_flag = get_bits1(&s->gb);
|
||||
}
|
||||
sps->bit_depth_luma = get_ue_golomb(&s->gb) + 8;
|
||||
sps->bit_depth_chroma = get_ue_golomb(&s->gb) + 8;
|
||||
if (sps->bit_depth_luma > 12U || sps->bit_depth_chroma > 12U) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "illegal bit depth value (%d, %d)\n",
|
||||
sps->bit_depth_luma, sps->bit_depth_chroma);
|
||||
goto fail;
|
||||
}
|
||||
sps->transform_bypass = get_bits1(&s->gb);
|
||||
decode_scaling_matrices(h, sps, NULL, 1, sps->scaling_matrix4, sps->scaling_matrix8);
|
||||
}else{
|
||||
@@ -387,7 +365,7 @@ int ff_h264_decode_seq_parameter_set(H264Context *h){
|
||||
}
|
||||
|
||||
sps->ref_frame_count= get_ue_golomb_31(&s->gb);
|
||||
if(sps->ref_frame_count > MAX_PICTURE_COUNT-2 || sps->ref_frame_count > 16U){
|
||||
if(sps->ref_frame_count > MAX_PICTURE_COUNT-2 || sps->ref_frame_count >= 32U){
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "too many reference frames\n");
|
||||
goto fail;
|
||||
}
|
||||
@@ -485,14 +463,10 @@ int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length){
|
||||
unsigned int pps_id= get_ue_golomb(&s->gb);
|
||||
PPS *pps;
|
||||
const int qp_bd_offset = 6*(h->sps.bit_depth_luma-8);
|
||||
int bits_left;
|
||||
|
||||
if(pps_id >= MAX_PPS_COUNT) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "pps_id (%d) out of range\n", pps_id);
|
||||
return -1;
|
||||
} else if (h->sps.bit_depth_luma > 10) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "Unimplemented luma bit depth=%d (max=10)\n", h->sps.bit_depth_luma);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
pps= av_mallocz(sizeof(PPS));
|
||||
@@ -565,9 +539,7 @@ int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length){
|
||||
memcpy(pps->scaling_matrix4, h->sps_buffers[pps->sps_id]->scaling_matrix4, sizeof(pps->scaling_matrix4));
|
||||
memcpy(pps->scaling_matrix8, h->sps_buffers[pps->sps_id]->scaling_matrix8, sizeof(pps->scaling_matrix8));
|
||||
|
||||
bits_left = bit_length - get_bits_count(&s->gb);
|
||||
if (bits_left && (bits_left > 8 ||
|
||||
show_bits(&s->gb, bits_left) != 1 << (bits_left - 1))) {
|
||||
if(get_bits_count(&s->gb) < bit_length){
|
||||
pps->transform_8x8_mode= get_bits1(&s->gb);
|
||||
decode_scaling_matrices(h, h->sps_buffers[pps->sps_id], pps, 0, pps->scaling_matrix4, pps->scaling_matrix8);
|
||||
pps->chroma_qp_index_offset[1]= get_se_golomb(&s->gb); //second_chroma_qp_index_offset
|
||||
|
@@ -301,7 +301,7 @@ int ff_h264_decode_ref_pic_list_reordering(H264Context *h){
|
||||
|
||||
void ff_h264_fill_mbaff_ref_list(H264Context *h){
|
||||
int list, i, j;
|
||||
for(list=0; list<h->list_count; list++){
|
||||
for(list=0; list<2; list++){ //FIXME try list_count
|
||||
for(i=0; i<h->ref_count[list]; i++){
|
||||
Picture *frame = &h->ref_list[list][i];
|
||||
Picture *field = &h->ref_list[list][16+2*i];
|
||||
@@ -678,7 +678,7 @@ int ff_h264_decode_ref_pic_marking(H264Context *h, GetBitContext *gb){
|
||||
}
|
||||
if(opcode==MMCO_SHORT2LONG || opcode==MMCO_LONG2UNUSED || opcode==MMCO_LONG || opcode==MMCO_SET_MAX_LONG){
|
||||
unsigned int long_arg= get_ue_golomb_31(gb);
|
||||
if(long_arg >= 32 || (long_arg >= 16 && !(opcode == MMCO_SET_MAX_LONG && long_arg == 16) && !(opcode == MMCO_LONG2UNUSED && FIELD_PICTURE))){
|
||||
if(long_arg >= 32 || (long_arg >= 16 && !(opcode == MMCO_LONG2UNUSED && FIELD_PICTURE))){
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "illegal long ref in memory management control operation %d\n", opcode);
|
||||
return -1;
|
||||
}
|
||||
|
@@ -40,7 +40,7 @@
|
||||
#undef BIT_DEPTH
|
||||
|
||||
static void pred4x4_vertical_vp8_c(uint8_t *src, const uint8_t *topright, int stride){
|
||||
const unsigned lt = src[-1-1*stride];
|
||||
const int lt= src[-1-1*stride];
|
||||
LOAD_TOP_EDGE
|
||||
LOAD_TOP_RIGHT_EDGE
|
||||
uint32_t v = PACK_4U8((lt + 2*t0 + t1 + 2) >> 2,
|
||||
@@ -55,7 +55,7 @@ static void pred4x4_vertical_vp8_c(uint8_t *src, const uint8_t *topright, int st
|
||||
}
|
||||
|
||||
static void pred4x4_horizontal_vp8_c(uint8_t *src, const uint8_t *topright, int stride){
|
||||
const unsigned lt = src[-1-1*stride];
|
||||
const int lt= src[-1-1*stride];
|
||||
LOAD_LEFT_EDGE
|
||||
|
||||
AV_WN32A(src+0*stride, ((lt + 2*l0 + l1 + 2) >> 2)*0x01010101);
|
||||
@@ -292,7 +292,7 @@ static void pred16x16_tm_vp8_c(uint8_t *src, int stride){
|
||||
|
||||
static void pred8x8_left_dc_rv40_c(uint8_t *src, int stride){
|
||||
int i;
|
||||
unsigned dc0;
|
||||
int dc0;
|
||||
|
||||
dc0=0;
|
||||
for(i=0;i<8; i++)
|
||||
@@ -307,7 +307,7 @@ static void pred8x8_left_dc_rv40_c(uint8_t *src, int stride){
|
||||
|
||||
static void pred8x8_top_dc_rv40_c(uint8_t *src, int stride){
|
||||
int i;
|
||||
unsigned dc0;
|
||||
int dc0;
|
||||
|
||||
dc0=0;
|
||||
for(i=0;i<8; i++)
|
||||
@@ -322,7 +322,7 @@ static void pred8x8_top_dc_rv40_c(uint8_t *src, int stride){
|
||||
|
||||
static void pred8x8_dc_rv40_c(uint8_t *src, int stride){
|
||||
int i;
|
||||
unsigned dc0 = 0;
|
||||
int dc0=0;
|
||||
|
||||
for(i=0;i<4; i++){
|
||||
dc0+= src[-1+i*stride] + src[i-stride];
|
||||
|
@@ -120,28 +120,28 @@ static void FUNCC(pred4x4_129_dc)(uint8_t *_src, const uint8_t *topright, int _s
|
||||
|
||||
|
||||
#define LOAD_TOP_RIGHT_EDGE\
|
||||
const unsigned av_unused t4 = topright[0];\
|
||||
const unsigned av_unused t5 = topright[1];\
|
||||
const unsigned av_unused t6 = topright[2];\
|
||||
const unsigned av_unused t7 = topright[3];\
|
||||
const int av_unused t4= topright[0];\
|
||||
const int av_unused t5= topright[1];\
|
||||
const int av_unused t6= topright[2];\
|
||||
const int av_unused t7= topright[3];\
|
||||
|
||||
#define LOAD_DOWN_LEFT_EDGE\
|
||||
const unsigned av_unused l4 = src[-1+4*stride];\
|
||||
const unsigned av_unused l5 = src[-1+5*stride];\
|
||||
const unsigned av_unused l6 = src[-1+6*stride];\
|
||||
const unsigned av_unused l7 = src[-1+7*stride];\
|
||||
const int av_unused l4= src[-1+4*stride];\
|
||||
const int av_unused l5= src[-1+5*stride];\
|
||||
const int av_unused l6= src[-1+6*stride];\
|
||||
const int av_unused l7= src[-1+7*stride];\
|
||||
|
||||
#define LOAD_LEFT_EDGE\
|
||||
const unsigned av_unused l0 = src[-1+0*stride];\
|
||||
const unsigned av_unused l1 = src[-1+1*stride];\
|
||||
const unsigned av_unused l2 = src[-1+2*stride];\
|
||||
const unsigned av_unused l3 = src[-1+3*stride];\
|
||||
const int av_unused l0= src[-1+0*stride];\
|
||||
const int av_unused l1= src[-1+1*stride];\
|
||||
const int av_unused l2= src[-1+2*stride];\
|
||||
const int av_unused l3= src[-1+3*stride];\
|
||||
|
||||
#define LOAD_TOP_EDGE\
|
||||
const unsigned av_unused t0 = src[ 0-1*stride];\
|
||||
const unsigned av_unused t1 = src[ 1-1*stride];\
|
||||
const unsigned av_unused t2 = src[ 2-1*stride];\
|
||||
const unsigned av_unused t3 = src[ 3-1*stride];\
|
||||
const int av_unused t0= src[ 0-1*stride];\
|
||||
const int av_unused t1= src[ 1-1*stride];\
|
||||
const int av_unused t2= src[ 2-1*stride];\
|
||||
const int av_unused t3= src[ 3-1*stride];\
|
||||
|
||||
static void FUNCC(pred4x4_down_right)(uint8_t *_src, const uint8_t *topright, int _stride){
|
||||
pixel *src = (pixel*)_src;
|
||||
|
@@ -82,15 +82,13 @@ typedef struct HYuvContext{
|
||||
DSPContext dsp;
|
||||
}HYuvContext;
|
||||
|
||||
#define classic_shift_luma_table_size 42
|
||||
static const unsigned char classic_shift_luma[classic_shift_luma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = {
|
||||
static const unsigned char classic_shift_luma[] = {
|
||||
34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
|
||||
16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
|
||||
69,68, 0
|
||||
};
|
||||
|
||||
#define classic_shift_chroma_table_size 59
|
||||
static const unsigned char classic_shift_chroma[classic_shift_chroma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = {
|
||||
static const unsigned char classic_shift_chroma[] = {
|
||||
66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
|
||||
56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
|
||||
214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
|
||||
@@ -186,7 +184,7 @@ static int read_len_table(uint8_t *dst, GetBitContext *gb){
|
||||
if(repeat==0)
|
||||
repeat= get_bits(gb, 8);
|
||||
//printf("%d %d\n", val, repeat);
|
||||
if(i+repeat > 256 || get_bits_left(gb) < 0) {
|
||||
if(i+repeat > 256) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
|
||||
return -1;
|
||||
}
|
||||
@@ -368,10 +366,10 @@ static int read_old_huffman_tables(HYuvContext *s){
|
||||
GetBitContext gb;
|
||||
int i;
|
||||
|
||||
init_get_bits(&gb, classic_shift_luma, classic_shift_luma_table_size*8);
|
||||
init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
|
||||
if(read_len_table(s->len[0], &gb)<0)
|
||||
return -1;
|
||||
init_get_bits(&gb, classic_shift_chroma, classic_shift_chroma_table_size*8);
|
||||
init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
|
||||
if(read_len_table(s->len[1], &gb)<0)
|
||||
return -1;
|
||||
|
||||
@@ -517,7 +515,7 @@ s->bgr32=1;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return AVERROR_INVALIDDATA;
|
||||
assert(0);
|
||||
}
|
||||
|
||||
alloc_temp(s);
|
||||
|
@@ -72,7 +72,6 @@ typedef struct IdcinContext {
|
||||
hnode huff_nodes[256][HUF_TOKENS*2];
|
||||
int num_huff_nodes[256];
|
||||
|
||||
uint32_t pal[256];
|
||||
} IdcinContext;
|
||||
|
||||
/*
|
||||
@@ -215,7 +214,7 @@ static int idcin_decode_frame(AVCodecContext *avctx,
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
IdcinContext *s = avctx->priv_data;
|
||||
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
|
||||
AVPaletteControl *palette_control = avctx->palctrl;
|
||||
|
||||
s->buf = buf;
|
||||
s->size = buf_size;
|
||||
@@ -230,12 +229,13 @@ static int idcin_decode_frame(AVCodecContext *avctx,
|
||||
|
||||
idcin_decode_vlcs(s);
|
||||
|
||||
if (pal) {
|
||||
s->frame.palette_has_changed = 1;
|
||||
memcpy(s->pal, pal, AVPALETTE_SIZE);
|
||||
}
|
||||
/* make the palette available on the way out */
|
||||
memcpy(s->frame.data[1], s->pal, AVPALETTE_SIZE);
|
||||
memcpy(s->frame.data[1], palette_control->palette, PALETTE_COUNT * 4);
|
||||
/* If palette changed inform application*/
|
||||
if (palette_control->palette_changed) {
|
||||
palette_control->palette_changed = 0;
|
||||
s->frame.palette_has_changed = 1;
|
||||
}
|
||||
|
||||
*data_size = sizeof(AVFrame);
|
||||
*(AVFrame*)data = s->frame;
|
||||
|
@@ -176,13 +176,7 @@ static int extract_header(AVCodecContext *const avctx,
|
||||
const uint8_t *buf;
|
||||
unsigned buf_size;
|
||||
IffContext *s = avctx->priv_data;
|
||||
int palette_size;
|
||||
|
||||
if (avctx->extradata_size < 2) {
|
||||
av_log(avctx, AV_LOG_ERROR, "not enough extradata\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
|
||||
int palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
|
||||
|
||||
if (avpkt) {
|
||||
int image_size;
|
||||
@@ -198,6 +192,8 @@ static int extract_header(AVCodecContext *const avctx,
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
} else {
|
||||
if (avctx->extradata_size < 2)
|
||||
return AVERROR_INVALIDDATA;
|
||||
buf = avctx->extradata;
|
||||
buf_size = bytestream_get_be16(&buf);
|
||||
if (buf_size <= 1 || palette_size < 0) {
|
||||
@@ -285,12 +281,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
int err;
|
||||
|
||||
if (avctx->bits_per_coded_sample <= 8) {
|
||||
int palette_size;
|
||||
|
||||
if (avctx->extradata_size >= 2)
|
||||
palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
|
||||
else
|
||||
palette_size = 0;
|
||||
int palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
|
||||
avctx->pix_fmt = (avctx->bits_per_coded_sample < 8) ||
|
||||
(avctx->extradata_size >= 2 && palette_size) ? PIX_FMT_PAL8 : PIX_FMT_GRAY8;
|
||||
} else if (avctx->bits_per_coded_sample <= 32) {
|
||||
|
@@ -104,15 +104,10 @@ static VLC_TYPE vlc_tables[VLC_TABLES_SIZE][2];
|
||||
|
||||
static av_cold int imc_decode_init(AVCodecContext * avctx)
|
||||
{
|
||||
int i, j, ret;
|
||||
int i, j;
|
||||
IMCContext *q = avctx->priv_data;
|
||||
double r1, r2;
|
||||
|
||||
if (avctx->channels != 1) {
|
||||
av_log_ask_for_sample(avctx, "Number of channels is not supported\n");
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
q->decoder_reset = 1;
|
||||
|
||||
for(i = 0; i < BANDS; i++)
|
||||
@@ -161,10 +156,7 @@ static av_cold int imc_decode_init(AVCodecContext * avctx)
|
||||
}
|
||||
q->one_div_log2 = 1/log(2);
|
||||
|
||||
if ((ret = ff_fft_init(&q->fft, 7, 1))) {
|
||||
av_log(avctx, AV_LOG_INFO, "FFT init failed\n");
|
||||
return ret;
|
||||
}
|
||||
ff_fft_init(&q->fft, 7, 1);
|
||||
dsputil_init(&q->dsp, avctx);
|
||||
avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
|
||||
avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
|
||||
|
@@ -424,11 +424,40 @@ const char *avcodec_get_pix_fmt_name(enum PixelFormat pix_fmt)
|
||||
}
|
||||
#endif
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
enum PixelFormat avcodec_get_pix_fmt(const char *name)
|
||||
{
|
||||
return av_get_pix_fmt(name);
|
||||
}
|
||||
|
||||
void avcodec_pix_fmt_string (char *buf, int buf_size, enum PixelFormat pix_fmt)
|
||||
{
|
||||
av_get_pix_fmt_string(buf, buf_size, pix_fmt);
|
||||
}
|
||||
#endif
|
||||
|
||||
int ff_is_hwaccel_pix_fmt(enum PixelFormat pix_fmt)
|
||||
{
|
||||
return av_pix_fmt_descriptors[pix_fmt].flags & PIX_FMT_HWACCEL;
|
||||
}
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
int ff_set_systematic_pal(uint32_t pal[256], enum PixelFormat pix_fmt){
|
||||
return ff_set_systematic_pal2(pal, pix_fmt);
|
||||
}
|
||||
|
||||
int ff_fill_linesize(AVPicture *picture, enum PixelFormat pix_fmt, int width)
|
||||
{
|
||||
return av_image_fill_linesizes(picture->linesize, pix_fmt, width);
|
||||
}
|
||||
|
||||
int ff_fill_pointer(AVPicture *picture, uint8_t *ptr, enum PixelFormat pix_fmt,
|
||||
int height)
|
||||
{
|
||||
return av_image_fill_pointers(picture->data, pix_fmt, height, ptr, picture->linesize);
|
||||
}
|
||||
#endif
|
||||
|
||||
int avpicture_fill(AVPicture *picture, uint8_t *ptr,
|
||||
enum PixelFormat pix_fmt, int width, int height)
|
||||
{
|
||||
@@ -470,16 +499,6 @@ int avpicture_layout(const AVPicture* src, enum PixelFormat pix_fmt, int width,
|
||||
}
|
||||
}
|
||||
|
||||
switch (pix_fmt) {
|
||||
case PIX_FMT_RGB8:
|
||||
case PIX_FMT_BGR8:
|
||||
case PIX_FMT_RGB4_BYTE:
|
||||
case PIX_FMT_BGR4_BYTE:
|
||||
case PIX_FMT_GRAY8:
|
||||
// do not include palette for these pseudo-paletted formats
|
||||
return size;
|
||||
}
|
||||
|
||||
if (desc->flags & PIX_FMT_PAL)
|
||||
memcpy((unsigned char *)(((size_t)dest + 3) & ~3), src->data[1], 256 * 4);
|
||||
|
||||
@@ -673,6 +692,28 @@ enum PixelFormat avcodec_find_best_pix_fmt(int64_t pix_fmt_mask, enum PixelForma
|
||||
return dst_pix_fmt;
|
||||
}
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
void ff_img_copy_plane(uint8_t *dst, int dst_wrap,
|
||||
const uint8_t *src, int src_wrap,
|
||||
int width, int height)
|
||||
{
|
||||
av_image_copy_plane(dst, dst_wrap, src, src_wrap, width, height);
|
||||
}
|
||||
|
||||
int ff_get_plane_bytewidth(enum PixelFormat pix_fmt, int width, int plane)
|
||||
{
|
||||
return av_image_get_linesize(pix_fmt, width, plane);
|
||||
}
|
||||
|
||||
void av_picture_data_copy(uint8_t *dst_data[4], int dst_linesize[4],
|
||||
uint8_t *src_data[4], int src_linesize[4],
|
||||
enum PixelFormat pix_fmt, int width, int height)
|
||||
{
|
||||
av_image_copy(dst_data, dst_linesize, src_data, src_linesize,
|
||||
pix_fmt, width, height);
|
||||
}
|
||||
#endif
|
||||
|
||||
void av_picture_copy(AVPicture *dst, const AVPicture *src,
|
||||
enum PixelFormat pix_fmt, int width, int height)
|
||||
{
|
||||
|
@@ -153,13 +153,6 @@ static int ir2_decode_frame(AVCodecContext *avctx,
|
||||
return -1;
|
||||
}
|
||||
|
||||
start = 48; /* hardcoded for now */
|
||||
|
||||
if (start >= buf_size) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "input buffer size too small (%d)\n", buf_size);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
s->decode_delta = buf[18];
|
||||
|
||||
/* decide whether frame uses deltas or not */
|
||||
@@ -167,8 +160,9 @@ static int ir2_decode_frame(AVCodecContext *avctx,
|
||||
for (i = 0; i < buf_size; i++)
|
||||
buf[i] = av_reverse[buf[i]];
|
||||
#endif
|
||||
start = 48; /* hardcoded for now */
|
||||
|
||||
init_get_bits(&s->gb, buf + start, (buf_size - start) * 8);
|
||||
init_get_bits(&s->gb, buf + start, buf_size - start);
|
||||
|
||||
if (s->decode_delta) { /* intraframe */
|
||||
ir2_decode_plane(s, avctx->width, avctx->height,
|
||||
|
@@ -219,10 +219,6 @@ static int decode_gop_header(IVI5DecContext *ctx, AVCodecContext *avctx)
|
||||
}
|
||||
|
||||
if (band->blk_size == 8) {
|
||||
if(quant_mat >= 5){
|
||||
av_log(avctx, AV_LOG_ERROR, "quant_mat %d too large!\n", quant_mat);
|
||||
return -1;
|
||||
}
|
||||
band->intra_base = &ivi5_base_quant_8x8_intra[quant_mat][0];
|
||||
band->inter_base = &ivi5_base_quant_8x8_inter[quant_mat][0];
|
||||
band->intra_scale = &ivi5_scale_quant_8x8_intra[quant_mat][0];
|
||||
|
@@ -77,7 +77,7 @@ int ff_intel_h263_decode_picture_header(MpegEncContext *s)
|
||||
}
|
||||
if(get_bits(&s->gb, 2))
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Bad value for reserved field\n");
|
||||
s->loop_filter = get_bits1(&s->gb) * !s->avctx->lowres;
|
||||
s->loop_filter = get_bits1(&s->gb);
|
||||
if(get_bits1(&s->gb))
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Bad value for reserved field\n");
|
||||
if(get_bits1(&s->gb))
|
||||
|
@@ -69,7 +69,6 @@ typedef struct IpvideoContext {
|
||||
int stride;
|
||||
int upper_motion_limit_offset;
|
||||
|
||||
uint32_t pal[256];
|
||||
} IpvideoContext;
|
||||
|
||||
#define CHECK_STREAM_PTR(stream_ptr, stream_end, n) \
|
||||
@@ -962,7 +961,7 @@ static void ipvideo_decode_opcodes(IpvideoContext *s)
|
||||
|
||||
if (!s->is_16bpp) {
|
||||
/* this is PAL8, so make the palette available */
|
||||
memcpy(s->current_frame.data[1], s->pal, AVPALETTE_SIZE);
|
||||
memcpy(s->current_frame.data[1], s->avctx->palctrl->palette, PALETTE_COUNT * 4);
|
||||
|
||||
s->stride = s->current_frame.linesize[0];
|
||||
s->stream_ptr = s->buf + 14; /* data starts 14 bytes in */
|
||||
@@ -1016,6 +1015,10 @@ static av_cold int ipvideo_decode_init(AVCodecContext *avctx)
|
||||
|
||||
s->is_16bpp = avctx->bits_per_coded_sample == 16;
|
||||
avctx->pix_fmt = s->is_16bpp ? PIX_FMT_RGB555 : PIX_FMT_PAL8;
|
||||
if (!s->is_16bpp && s->avctx->palctrl == NULL) {
|
||||
av_log(avctx, AV_LOG_ERROR, " Interplay video: palette expected.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
dsputil_init(&s->dsp, avctx);
|
||||
|
||||
@@ -1038,6 +1041,7 @@ static int ipvideo_decode_frame(AVCodecContext *avctx,
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
IpvideoContext *s = avctx->priv_data;
|
||||
AVPaletteControl *palette_control = avctx->palctrl;
|
||||
|
||||
/* compressed buffer needs to be large enough to at least hold an entire
|
||||
* decoding map */
|
||||
@@ -1054,16 +1058,13 @@ static int ipvideo_decode_frame(AVCodecContext *avctx,
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!s->is_16bpp) {
|
||||
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
|
||||
if (pal) {
|
||||
s->current_frame.palette_has_changed = 1;
|
||||
memcpy(s->pal, pal, AVPALETTE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
ipvideo_decode_opcodes(s);
|
||||
|
||||
if (!s->is_16bpp && palette_control->palette_changed) {
|
||||
palette_control->palette_changed = 0;
|
||||
s->current_frame.palette_has_changed = 1;
|
||||
}
|
||||
|
||||
*data_size = sizeof(AVFrame);
|
||||
*(AVFrame*)data = s->current_frame;
|
||||
|
||||
|
@@ -961,8 +961,6 @@ int h263_decode_picture_header(MpegEncContext *s)
|
||||
s->h263_aic = get_bits1(&s->gb); /* Advanced Intra Coding (AIC) */
|
||||
s->loop_filter= get_bits1(&s->gb);
|
||||
s->unrestricted_mv = s->umvplus || s->obmc || s->loop_filter;
|
||||
if(s->avctx->lowres)
|
||||
s->loop_filter = 0;
|
||||
|
||||
s->h263_slice_structured= get_bits1(&s->gb);
|
||||
if (get_bits1(&s->gb) != 0) {
|
||||
|
@@ -321,8 +321,6 @@ int ff_j2k_dwt_init(DWTContext *s, uint16_t border[2][2], int decomp_levels, int
|
||||
int i, j, lev = decomp_levels, maxlen,
|
||||
b[2][2];
|
||||
|
||||
if ((unsigned)decomp_levels >= FF_DWT_MAX_DECLVLS)
|
||||
return AVERROR_INVALIDDATA;
|
||||
s->ndeclevels = decomp_levels;
|
||||
s->type = type;
|
||||
|
||||
|
@@ -359,7 +359,7 @@ static int get_qcx(J2kDecoderContext *s, int n, J2kQuantStyle *q)
|
||||
|
||||
if (q->quantsty == J2K_QSTY_NONE){
|
||||
n -= 3;
|
||||
if (s->buf_end - s->buf < n || 32*3 < n)
|
||||
if (s->buf_end - s->buf < n)
|
||||
return AVERROR(EINVAL);
|
||||
for (i = 0; i < n; i++)
|
||||
q->expn[i] = bytestream_get_byte(&s->buf) >> 3;
|
||||
@@ -376,7 +376,7 @@ static int get_qcx(J2kDecoderContext *s, int n, J2kQuantStyle *q)
|
||||
}
|
||||
} else{
|
||||
n = (n - 3) >> 1;
|
||||
if (s->buf_end - s->buf < n || 32*3 < n)
|
||||
if (s->buf_end - s->buf < n)
|
||||
return AVERROR(EINVAL);
|
||||
for (i = 0; i < n; i++){
|
||||
x = bytestream_get_be16(&s->buf);
|
||||
@@ -421,10 +421,6 @@ static uint8_t get_sot(J2kDecoderContext *s)
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
s->curtileno = bytestream_get_be16(&s->buf); ///< Isot
|
||||
if((unsigned)s->curtileno >= s->numXtiles * s->numYtiles){
|
||||
s->curtileno=0;
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
s->buf += 4; ///< Psot (ignored)
|
||||
|
||||
@@ -965,20 +961,18 @@ static int decode_codestream(J2kDecoderContext *s)
|
||||
|
||||
static int jp2_find_codestream(J2kDecoderContext *s)
|
||||
{
|
||||
uint32_t atom_size;
|
||||
int32_t atom_size;
|
||||
int found_codestream = 0, search_range = 10;
|
||||
|
||||
// skip jpeg2k signature atom
|
||||
s->buf += 12;
|
||||
|
||||
while(!found_codestream && search_range && s->buf_end - s->buf >= 8) {
|
||||
while(!found_codestream && search_range) {
|
||||
atom_size = AV_RB32(s->buf);
|
||||
if(AV_RB32(s->buf + 4) == JP2_CODESTREAM) {
|
||||
found_codestream = 1;
|
||||
s->buf += 8;
|
||||
} else {
|
||||
if (s->buf_end - s->buf < atom_size)
|
||||
return 0;
|
||||
s->buf += atom_size;
|
||||
search_range--;
|
||||
}
|
||||
@@ -1011,8 +1005,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
// check if the image is in jp2 format
|
||||
if(s->buf_end - s->buf >= 12 &&
|
||||
(AV_RB32(s->buf) == 12) && (AV_RB32(s->buf + 4) == JP2_SIG_TYPE) &&
|
||||
if((AV_RB32(s->buf) == 12) && (AV_RB32(s->buf + 4) == JP2_SIG_TYPE) &&
|
||||
(AV_RB32(s->buf + 8) == JP2_SIG_VALUE)) {
|
||||
if(!jp2_find_codestream(s)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "couldn't find jpeg2k codestream atom\n");
|
||||
|
@@ -86,8 +86,6 @@ static inline void ff_jpegls_downscale_state(JLSState *state, int Q){
|
||||
}
|
||||
|
||||
static inline int ff_jpegls_update_state_regular(JLSState *state, int Q, int err){
|
||||
if(FFABS(err) > 0xFFFF)
|
||||
return -0x10000;
|
||||
state->A[Q] += FFABS(err);
|
||||
err *= state->twonear;
|
||||
state->B[Q] += err;
|
||||
|
@@ -143,10 +143,6 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
buf += 5;
|
||||
|
||||
if (video_size) {
|
||||
if(video_size < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "video size %d invalid\n", video_size);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (avctx->reget_buffer(avctx, &s->frame) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return -1;
|
||||
@@ -154,7 +150,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
|
||||
if (video_type == 0 || video_type == 1) {
|
||||
GetBitContext gb;
|
||||
init_get_bits(&gb, buf, 8 * FFMIN(video_size, buf_end - buf));
|
||||
init_get_bits(&gb, buf, FFMIN(video_size, buf_end - buf));
|
||||
|
||||
for (j = 0; j < avctx->height; j += 8)
|
||||
for (i = 0; i < avctx->width; i += 8)
|
||||
|
@@ -30,26 +30,19 @@
|
||||
|
||||
typedef struct {
|
||||
AVCodecContext *avctx;
|
||||
AVFrame prev, cur;
|
||||
AVFrame pic;
|
||||
uint16_t *prev, *cur;
|
||||
} KgvContext;
|
||||
|
||||
static void decode_flush(AVCodecContext *avctx)
|
||||
{
|
||||
KgvContext * const c = avctx->priv_data;
|
||||
|
||||
if (c->prev.data[0])
|
||||
avctx->release_buffer(avctx, &c->prev);
|
||||
}
|
||||
|
||||
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt)
|
||||
{
|
||||
const uint8_t *buf = avpkt->data;
|
||||
const uint8_t *buf_end = buf + avpkt->size;
|
||||
KgvContext * const c = avctx->priv_data;
|
||||
int offsets[8];
|
||||
int offsets[7];
|
||||
uint16_t *out, *prev;
|
||||
int outcnt = 0, maxcnt;
|
||||
int w, h, i, res;
|
||||
int w, h, i;
|
||||
|
||||
if (avpkt->size < 2)
|
||||
return -1;
|
||||
@@ -61,25 +54,22 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
|
||||
if (av_image_check_size(w, h, 0, avctx))
|
||||
return -1;
|
||||
|
||||
if (w != avctx->width || h != avctx->height) {
|
||||
if (c->prev.data[0])
|
||||
avctx->release_buffer(avctx, &c->prev);
|
||||
if (w != avctx->width || h != avctx->height)
|
||||
avcodec_set_dimensions(avctx, w, h);
|
||||
}
|
||||
|
||||
maxcnt = w * h;
|
||||
|
||||
c->cur.reference = 3;
|
||||
if ((res = avctx->get_buffer(avctx, &c->cur)) < 0)
|
||||
return res;
|
||||
out = (uint16_t *) c->cur.data[0];
|
||||
if (c->prev.data[0]) {
|
||||
prev = (uint16_t *) c->prev.data[0];
|
||||
} else {
|
||||
prev = NULL;
|
||||
}
|
||||
out = av_realloc(c->cur, w * h * 2);
|
||||
if (!out)
|
||||
return -1;
|
||||
c->cur = out;
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
prev = av_realloc(c->prev, w * h * 2);
|
||||
if (!prev)
|
||||
return -1;
|
||||
c->prev = prev;
|
||||
|
||||
for (i = 0; i < 7; i++)
|
||||
offsets[i] = -1;
|
||||
|
||||
while (outcnt < maxcnt && buf_end - 2 > buf) {
|
||||
@@ -90,7 +80,6 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
|
||||
out[outcnt++] = code; // rgb555 pixel coded directly
|
||||
} else {
|
||||
int count;
|
||||
int inp_off;
|
||||
uint16_t *inp;
|
||||
|
||||
if ((code & 0x6000) == 0x6000) {
|
||||
@@ -112,14 +101,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
|
||||
if (maxcnt - start < count)
|
||||
break;
|
||||
|
||||
if (!prev) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Frame reference does not exist\n");
|
||||
break;
|
||||
}
|
||||
|
||||
inp = prev;
|
||||
inp_off = start;
|
||||
inp = prev + start;
|
||||
} else {
|
||||
// copy from earlier in this frame
|
||||
int offset = (code & 0x1FFF) + 1;
|
||||
@@ -137,28 +119,27 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
|
||||
if (outcnt < offset)
|
||||
break;
|
||||
|
||||
inp = out;
|
||||
inp_off = outcnt - offset;
|
||||
inp = out + outcnt - offset;
|
||||
}
|
||||
|
||||
if (maxcnt - outcnt < count)
|
||||
break;
|
||||
|
||||
for (i = inp_off; i < count + inp_off; i++) {
|
||||
for (i = 0; i < count; i++)
|
||||
out[outcnt++] = inp[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (outcnt - maxcnt)
|
||||
av_log(avctx, AV_LOG_DEBUG, "frame finished with %d diff\n", outcnt - maxcnt);
|
||||
|
||||
*data_size = sizeof(AVFrame);
|
||||
*(AVFrame*)data = c->cur;
|
||||
c->pic.data[0] = (uint8_t *)c->cur;
|
||||
c->pic.linesize[0] = w * 2;
|
||||
|
||||
if (c->prev.data[0])
|
||||
avctx->release_buffer(avctx, &c->prev);
|
||||
FFSWAP(AVFrame, c->cur, c->prev);
|
||||
*data_size = sizeof(AVFrame);
|
||||
*(AVFrame*)data = c->pic;
|
||||
|
||||
FFSWAP(uint16_t *, c->cur, c->prev);
|
||||
|
||||
return avpkt->size;
|
||||
}
|
||||
@@ -169,25 +150,30 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
|
||||
c->avctx = avctx;
|
||||
avctx->pix_fmt = PIX_FMT_RGB555;
|
||||
avctx->flags |= CODEC_FLAG_EMU_EDGE;
|
||||
avcodec_get_frame_defaults(&c->pic);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_cold int decode_end(AVCodecContext *avctx)
|
||||
{
|
||||
decode_flush(avctx);
|
||||
KgvContext * const c = avctx->priv_data;
|
||||
|
||||
av_freep(&c->cur);
|
||||
av_freep(&c->prev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
AVCodec ff_kgv1_decoder = {
|
||||
.name = "kgv1",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.id = CODEC_ID_KGV1,
|
||||
.priv_data_size = sizeof(KgvContext),
|
||||
.init = decode_init,
|
||||
.close = decode_end,
|
||||
.decode = decode_frame,
|
||||
.flush = decode_flush,
|
||||
"kgv1",
|
||||
AVMEDIA_TYPE_VIDEO,
|
||||
CODEC_ID_KGV1,
|
||||
sizeof(KgvContext),
|
||||
decode_init,
|
||||
NULL,
|
||||
decode_end,
|
||||
decode_frame,
|
||||
.max_lowres = 1,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("Kega Game Video"),
|
||||
};
|
||||
|
@@ -33,7 +33,6 @@
|
||||
#define KMVC_KEYFRAME 0x80
|
||||
#define KMVC_PALETTE 0x40
|
||||
#define KMVC_METHOD 0x0F
|
||||
#define MAX_PALSIZE 256
|
||||
|
||||
/*
|
||||
* Decoder context
|
||||
@@ -44,7 +43,7 @@ typedef struct KmvcContext {
|
||||
|
||||
int setpal;
|
||||
int palsize;
|
||||
uint32_t pal[MAX_PALSIZE];
|
||||
uint32_t pal[256];
|
||||
uint8_t *cur, *prev;
|
||||
uint8_t *frm0, *frm1;
|
||||
} KmvcContext;
|
||||
@@ -58,21 +57,17 @@ typedef struct BitBuf {
|
||||
|
||||
#define kmvc_init_getbits(bb, src) bb.bits = 7; bb.bitbuf = *src++;
|
||||
|
||||
#define kmvc_getbit(bb, src, src_end, res) {\
|
||||
#define kmvc_getbit(bb, src, res) {\
|
||||
res = 0; \
|
||||
if (bb.bitbuf & (1 << bb.bits)) res = 1; \
|
||||
bb.bits--; \
|
||||
if(bb.bits == -1) { \
|
||||
if (src >= src_end) { \
|
||||
av_log(ctx->avctx, AV_LOG_ERROR, "Data overrun\n"); \
|
||||
return AVERROR_INVALIDDATA; \
|
||||
} \
|
||||
bb.bitbuf = *src++; \
|
||||
bb.bits = 7; \
|
||||
} \
|
||||
}
|
||||
|
||||
static int kmvc_decode_intra_8x8(KmvcContext * ctx, const uint8_t * src, int src_size, int w, int h)
|
||||
static void kmvc_decode_intra_8x8(KmvcContext * ctx, const uint8_t * src, int w, int h)
|
||||
{
|
||||
BitBuf bb;
|
||||
int res, val;
|
||||
@@ -80,18 +75,13 @@ static int kmvc_decode_intra_8x8(KmvcContext * ctx, const uint8_t * src, int src
|
||||
int bx, by;
|
||||
int l0x, l1x, l0y, l1y;
|
||||
int mx, my;
|
||||
const uint8_t *src_end = src + src_size;
|
||||
|
||||
kmvc_init_getbits(bb, src);
|
||||
|
||||
for (by = 0; by < h; by += 8)
|
||||
for (bx = 0; bx < w; bx += 8) {
|
||||
kmvc_getbit(bb, src, src_end, res);
|
||||
kmvc_getbit(bb, src, res);
|
||||
if (!res) { // fill whole 8x8 block
|
||||
if (src >= src_end) {
|
||||
av_log(ctx->avctx, AV_LOG_ERROR, "Data overrun\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
val = *src++;
|
||||
for (i = 0; i < 64; i++)
|
||||
BLK(ctx->cur, bx + (i & 0x7), by + (i >> 3)) = val;
|
||||
@@ -99,22 +89,14 @@ static int kmvc_decode_intra_8x8(KmvcContext * ctx, const uint8_t * src, int src
|
||||
for (i = 0; i < 4; i++) {
|
||||
l0x = bx + (i & 1) * 4;
|
||||
l0y = by + (i & 2) * 2;
|
||||
kmvc_getbit(bb, src, src_end, res);
|
||||
kmvc_getbit(bb, src, res);
|
||||
if (!res) {
|
||||
kmvc_getbit(bb, src, src_end, res);
|
||||
kmvc_getbit(bb, src, res);
|
||||
if (!res) { // fill whole 4x4 block
|
||||
if (src >= src_end) {
|
||||
av_log(ctx->avctx, AV_LOG_ERROR, "Data overrun\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
val = *src++;
|
||||
for (j = 0; j < 16; j++)
|
||||
BLK(ctx->cur, l0x + (j & 3), l0y + (j >> 2)) = val;
|
||||
} else { // copy block from already decoded place
|
||||
if (src >= src_end) {
|
||||
av_log(ctx->avctx, AV_LOG_ERROR, "Data overrun\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
val = *src++;
|
||||
mx = val & 0xF;
|
||||
my = val >> 4;
|
||||
@@ -126,24 +108,16 @@ static int kmvc_decode_intra_8x8(KmvcContext * ctx, const uint8_t * src, int src
|
||||
for (j = 0; j < 4; j++) {
|
||||
l1x = l0x + (j & 1) * 2;
|
||||
l1y = l0y + (j & 2);
|
||||
kmvc_getbit(bb, src, src_end, res);
|
||||
kmvc_getbit(bb, src, res);
|
||||
if (!res) {
|
||||
kmvc_getbit(bb, src, src_end, res);
|
||||
kmvc_getbit(bb, src, res);
|
||||
if (!res) { // fill whole 2x2 block
|
||||
if (src >= src_end) {
|
||||
av_log(ctx->avctx, AV_LOG_ERROR, "Data overrun\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
val = *src++;
|
||||
BLK(ctx->cur, l1x, l1y) = val;
|
||||
BLK(ctx->cur, l1x + 1, l1y) = val;
|
||||
BLK(ctx->cur, l1x, l1y + 1) = val;
|
||||
BLK(ctx->cur, l1x + 1, l1y + 1) = val;
|
||||
} else { // copy block from already decoded place
|
||||
if (src >= src_end) {
|
||||
av_log(ctx->avctx, AV_LOG_ERROR, "Data overrun\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
val = *src++;
|
||||
mx = val & 0xF;
|
||||
my = val >> 4;
|
||||
@@ -166,11 +140,9 @@ static int kmvc_decode_intra_8x8(KmvcContext * ctx, const uint8_t * src, int src
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kmvc_decode_inter_8x8(KmvcContext * ctx, const uint8_t * src, int src_size, int w, int h)
|
||||
static void kmvc_decode_inter_8x8(KmvcContext * ctx, const uint8_t * src, int w, int h)
|
||||
{
|
||||
BitBuf bb;
|
||||
int res, val;
|
||||
@@ -178,20 +150,15 @@ static int kmvc_decode_inter_8x8(KmvcContext * ctx, const uint8_t * src, int src
|
||||
int bx, by;
|
||||
int l0x, l1x, l0y, l1y;
|
||||
int mx, my;
|
||||
const uint8_t *src_end = src + src_size;
|
||||
|
||||
kmvc_init_getbits(bb, src);
|
||||
|
||||
for (by = 0; by < h; by += 8)
|
||||
for (bx = 0; bx < w; bx += 8) {
|
||||
kmvc_getbit(bb, src, src_end, res);
|
||||
kmvc_getbit(bb, src, res);
|
||||
if (!res) {
|
||||
kmvc_getbit(bb, src, src_end, res);
|
||||
kmvc_getbit(bb, src, res);
|
||||
if (!res) { // fill whole 8x8 block
|
||||
if (src >= src_end) {
|
||||
av_log(ctx->avctx, AV_LOG_ERROR, "Data overrun\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
val = *src++;
|
||||
for (i = 0; i < 64; i++)
|
||||
BLK(ctx->cur, bx + (i & 0x7), by + (i >> 3)) = val;
|
||||
@@ -204,22 +171,14 @@ static int kmvc_decode_inter_8x8(KmvcContext * ctx, const uint8_t * src, int src
|
||||
for (i = 0; i < 4; i++) {
|
||||
l0x = bx + (i & 1) * 4;
|
||||
l0y = by + (i & 2) * 2;
|
||||
kmvc_getbit(bb, src, src_end, res);
|
||||
kmvc_getbit(bb, src, res);
|
||||
if (!res) {
|
||||
kmvc_getbit(bb, src, src_end, res);
|
||||
kmvc_getbit(bb, src, res);
|
||||
if (!res) { // fill whole 4x4 block
|
||||
if (src >= src_end) {
|
||||
av_log(ctx->avctx, AV_LOG_ERROR, "Data overrun\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
val = *src++;
|
||||
for (j = 0; j < 16; j++)
|
||||
BLK(ctx->cur, l0x + (j & 3), l0y + (j >> 2)) = val;
|
||||
} else { // copy block
|
||||
if (src >= src_end) {
|
||||
av_log(ctx->avctx, AV_LOG_ERROR, "Data overrun\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
val = *src++;
|
||||
mx = (val & 0xF) - 8;
|
||||
my = (val >> 4) - 8;
|
||||
@@ -231,24 +190,16 @@ static int kmvc_decode_inter_8x8(KmvcContext * ctx, const uint8_t * src, int src
|
||||
for (j = 0; j < 4; j++) {
|
||||
l1x = l0x + (j & 1) * 2;
|
||||
l1y = l0y + (j & 2);
|
||||
kmvc_getbit(bb, src, src_end, res);
|
||||
kmvc_getbit(bb, src, res);
|
||||
if (!res) {
|
||||
kmvc_getbit(bb, src, src_end, res);
|
||||
kmvc_getbit(bb, src, res);
|
||||
if (!res) { // fill whole 2x2 block
|
||||
if (src >= src_end) {
|
||||
av_log(ctx->avctx, AV_LOG_ERROR, "Data overrun\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
val = *src++;
|
||||
BLK(ctx->cur, l1x, l1y) = val;
|
||||
BLK(ctx->cur, l1x + 1, l1y) = val;
|
||||
BLK(ctx->cur, l1x, l1y + 1) = val;
|
||||
BLK(ctx->cur, l1x + 1, l1y + 1) = val;
|
||||
} else { // copy block
|
||||
if (src >= src_end) {
|
||||
av_log(ctx->avctx, AV_LOG_ERROR, "Data overrun\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
val = *src++;
|
||||
mx = (val & 0xF) - 8;
|
||||
my = (val >> 4) - 8;
|
||||
@@ -271,8 +222,6 @@ static int kmvc_decode_inter_8x8(KmvcContext * ctx, const uint8_t * src, int src
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int decode_frame(AVCodecContext * avctx, void *data, int *data_size, AVPacket *avpkt)
|
||||
@@ -284,7 +233,6 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *data_size, AVPa
|
||||
int i;
|
||||
int header;
|
||||
int blocksize;
|
||||
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
|
||||
|
||||
if (ctx->pic.data[0])
|
||||
avctx->release_buffer(avctx, &ctx->pic);
|
||||
@@ -316,6 +264,13 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *data_size, AVPa
|
||||
ctx->pic.pict_type = AV_PICTURE_TYPE_P;
|
||||
}
|
||||
|
||||
/* if palette has been changed, copy it from palctrl */
|
||||
if (ctx->avctx->palctrl && ctx->avctx->palctrl->palette_changed) {
|
||||
memcpy(ctx->pal, ctx->avctx->palctrl->palette, AVPALETTE_SIZE);
|
||||
ctx->setpal = 1;
|
||||
ctx->avctx->palctrl->palette_changed = 0;
|
||||
}
|
||||
|
||||
if (header & KMVC_PALETTE) {
|
||||
ctx->pic.palette_has_changed = 1;
|
||||
// palette starts from index 1 and has 127 entries
|
||||
@@ -324,11 +279,6 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *data_size, AVPa
|
||||
}
|
||||
}
|
||||
|
||||
if (pal) {
|
||||
ctx->pic.palette_has_changed = 1;
|
||||
memcpy(ctx->pal, pal, AVPALETTE_SIZE);
|
||||
}
|
||||
|
||||
if (ctx->setpal) {
|
||||
ctx->setpal = 0;
|
||||
ctx->pic.palette_has_changed = 1;
|
||||
@@ -350,10 +300,10 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *data_size, AVPa
|
||||
memcpy(ctx->cur, ctx->prev, 320 * 200);
|
||||
break;
|
||||
case 3:
|
||||
kmvc_decode_intra_8x8(ctx, buf, buf_size, avctx->width, avctx->height);
|
||||
kmvc_decode_intra_8x8(ctx, buf, avctx->width, avctx->height);
|
||||
break;
|
||||
case 4:
|
||||
kmvc_decode_inter_8x8(ctx, buf, buf_size, avctx->width, avctx->height);
|
||||
kmvc_decode_inter_8x8(ctx, buf, avctx->width, avctx->height);
|
||||
break;
|
||||
default:
|
||||
av_log(avctx, AV_LOG_ERROR, "Unknown compression method %i\n", header & KMVC_METHOD);
|
||||
@@ -415,10 +365,6 @@ static av_cold int decode_init(AVCodecContext * avctx)
|
||||
c->palsize = 127;
|
||||
} else {
|
||||
c->palsize = AV_RL16(avctx->extradata + 10);
|
||||
if (c->palsize >= MAX_PALSIZE) {
|
||||
av_log(avctx, AV_LOG_ERROR, "KMVC palette too large\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
|
||||
if (avctx->extradata_size == 1036) { // palette in extradata
|
||||
@@ -428,6 +374,9 @@ static av_cold int decode_init(AVCodecContext * avctx)
|
||||
src += 4;
|
||||
}
|
||||
c->setpal = 1;
|
||||
if (c->avctx->palctrl) {
|
||||
c->avctx->palctrl->palette_changed = 0;
|
||||
}
|
||||
}
|
||||
|
||||
avcodec_get_frame_defaults(&c->pic);
|
||||
|
@@ -223,29 +223,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
|
||||
len = mszh_dlen;
|
||||
}
|
||||
break;
|
||||
case COMP_MSZH_NOCOMP: {
|
||||
int bppx2;
|
||||
switch (c->imgtype) {
|
||||
case IMGTYPE_YUV111:
|
||||
case IMGTYPE_RGB24:
|
||||
bppx2 = 6;
|
||||
break;
|
||||
case IMGTYPE_YUV422:
|
||||
case IMGTYPE_YUV211:
|
||||
bppx2 = 4;
|
||||
break;
|
||||
case IMGTYPE_YUV411:
|
||||
case IMGTYPE_YUV420:
|
||||
bppx2 = 3;
|
||||
break;
|
||||
default:
|
||||
bppx2 = 0; // will error out below
|
||||
break;
|
||||
}
|
||||
if (len < ((width * height * bppx2) >> 1))
|
||||
return AVERROR_INVALIDDATA;
|
||||
case COMP_MSZH_NOCOMP:
|
||||
break;
|
||||
}
|
||||
default:
|
||||
av_log(avctx, AV_LOG_ERROR, "BUG! Unknown MSZH compression in frame decoder.\n");
|
||||
return -1;
|
||||
@@ -477,7 +456,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
avcodec_get_frame_defaults(&c->pic);
|
||||
if (avctx->extradata_size < 8) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Extradata size too small.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Check codec type */
|
||||
@@ -526,7 +505,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
break;
|
||||
default:
|
||||
av_log(avctx, AV_LOG_ERROR, "Unsupported image format %d.\n", c->imgtype);
|
||||
return AVERROR_INVALIDDATA;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Detect compression method */
|
||||
@@ -543,7 +522,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
break;
|
||||
default:
|
||||
av_log(avctx, AV_LOG_ERROR, "Unsupported compression format for MSZH (%d).\n", c->compression);
|
||||
return AVERROR_INVALIDDATA;
|
||||
return 1;
|
||||
}
|
||||
break;
|
||||
#if CONFIG_ZLIB_DECODER
|
||||
@@ -561,7 +540,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
default:
|
||||
if (c->compression < Z_NO_COMPRESSION || c->compression > Z_BEST_COMPRESSION) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Unsupported compression level for ZLIB: (%d).\n", c->compression);
|
||||
return AVERROR_INVALIDDATA;
|
||||
return 1;
|
||||
}
|
||||
av_log(avctx, AV_LOG_DEBUG, "Compression level for ZLIB: (%d).\n", c->compression);
|
||||
}
|
||||
@@ -569,14 +548,14 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
#endif
|
||||
default:
|
||||
av_log(avctx, AV_LOG_ERROR, "BUG! Unknown codec in compression switch.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Allocate decompression buffer */
|
||||
if (c->decomp_size) {
|
||||
if ((c->decomp_buf = av_malloc(max_decomp_size)) == NULL) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n");
|
||||
return AVERROR(ENOMEM);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -602,7 +581,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
if (zret != Z_OK) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Inflate init error: %d\n", zret);
|
||||
av_freep(&c->decomp_buf);
|
||||
return AVERROR_INVALIDDATA;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@@ -1,134 +0,0 @@
|
||||
/*
|
||||
* Interface to libaacplus for aac+ (sbr+ps) encoding
|
||||
* Copyright (c) 2010 tipok <piratfm@gmail.com>
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* Interface to libaacplus for aac+ (sbr+ps) encoding.
|
||||
*/
|
||||
|
||||
#include "avcodec.h"
|
||||
#include <aacplus.h>
|
||||
|
||||
typedef struct aacPlusAudioContext {
|
||||
aacplusEncHandle aacplus_handle;
|
||||
} aacPlusAudioContext;
|
||||
|
||||
static av_cold int aacPlus_encode_init(AVCodecContext *avctx)
|
||||
{
|
||||
aacPlusAudioContext *s = avctx->priv_data;
|
||||
aacplusEncConfiguration *aacplus_cfg;
|
||||
unsigned long samples_input, max_bytes_output;
|
||||
|
||||
/* number of channels */
|
||||
if (avctx->channels < 1 || avctx->channels > 2) {
|
||||
av_log(avctx, AV_LOG_ERROR, "encoding %d channel(s) is not allowed\n", avctx->channels);
|
||||
return -1;
|
||||
}
|
||||
|
||||
s->aacplus_handle = aacplusEncOpen(avctx->sample_rate,
|
||||
avctx->channels,
|
||||
&samples_input, &max_bytes_output);
|
||||
if(!s->aacplus_handle) {
|
||||
av_log(avctx, AV_LOG_ERROR, "can't open encoder\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* check aacplus version */
|
||||
aacplus_cfg = aacplusEncGetCurrentConfiguration(s->aacplus_handle);
|
||||
|
||||
/* put the options in the configuration struct */
|
||||
if(avctx->profile != FF_PROFILE_AAC_LOW && avctx->profile != FF_PROFILE_UNKNOWN) {
|
||||
av_log(avctx, AV_LOG_ERROR, "invalid AAC profile: %d, only LC supported\n", avctx->profile);
|
||||
aacplusEncClose(s->aacplus_handle);
|
||||
return -1;
|
||||
}
|
||||
|
||||
aacplus_cfg->bitRate = avctx->bit_rate;
|
||||
aacplus_cfg->bandWidth = avctx->cutoff;
|
||||
aacplus_cfg->outputFormat = !(avctx->flags & CODEC_FLAG_GLOBAL_HEADER);
|
||||
aacplus_cfg->inputFormat = AACPLUS_INPUT_16BIT;
|
||||
if (!aacplusEncSetConfiguration(s->aacplus_handle, aacplus_cfg)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "libaacplus doesn't support this output format!\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
avctx->frame_size = samples_input / avctx->channels;
|
||||
|
||||
avctx->coded_frame= avcodec_alloc_frame();
|
||||
avctx->coded_frame->key_frame= 1;
|
||||
|
||||
/* Set decoder specific info */
|
||||
avctx->extradata_size = 0;
|
||||
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) {
|
||||
|
||||
unsigned char *buffer = NULL;
|
||||
unsigned long decoder_specific_info_size;
|
||||
|
||||
if (aacplusEncGetDecoderSpecificInfo(s->aacplus_handle, &buffer,
|
||||
&decoder_specific_info_size) == 1) {
|
||||
avctx->extradata = av_malloc(decoder_specific_info_size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
avctx->extradata_size = decoder_specific_info_size;
|
||||
memcpy(avctx->extradata, buffer, avctx->extradata_size);
|
||||
}
|
||||
#undef free
|
||||
free(buffer);
|
||||
#define free please_use_av_free
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int aacPlus_encode_frame(AVCodecContext *avctx,
|
||||
unsigned char *frame, int buf_size, void *data)
|
||||
{
|
||||
aacPlusAudioContext *s = avctx->priv_data;
|
||||
int bytes_written;
|
||||
|
||||
bytes_written = aacplusEncEncode(s->aacplus_handle,
|
||||
data,
|
||||
avctx->frame_size * avctx->channels,
|
||||
frame,
|
||||
buf_size);
|
||||
|
||||
return bytes_written;
|
||||
}
|
||||
|
||||
static av_cold int aacPlus_encode_close(AVCodecContext *avctx)
|
||||
{
|
||||
aacPlusAudioContext *s = avctx->priv_data;
|
||||
|
||||
av_freep(&avctx->coded_frame);
|
||||
av_freep(&avctx->extradata);
|
||||
|
||||
aacplusEncClose(s->aacplus_handle);
|
||||
return 0;
|
||||
}
|
||||
|
||||
AVCodec ff_libaacplus_encoder = {
|
||||
"libaacplus",
|
||||
AVMEDIA_TYPE_AUDIO,
|
||||
CODEC_ID_AAC,
|
||||
sizeof(aacPlusAudioContext),
|
||||
aacPlus_encode_init,
|
||||
aacPlus_encode_frame,
|
||||
aacPlus_encode_close,
|
||||
.sample_fmts = (const enum SampleFormat[]){SAMPLE_FMT_S16,SAMPLE_FMT_NONE},
|
||||
.long_name = NULL_IF_CONFIG_SMALL("libaacplus AAC+ (Advanced Audio Codec with SBR+PS)"),
|
||||
};
|
@@ -141,25 +141,18 @@ static int libgsm_decode_frame(AVCodecContext *avctx,
|
||||
AVPacket *avpkt) {
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
int out_size = avctx->frame_size * av_get_bytes_per_sample(avctx->sample_fmt);
|
||||
|
||||
if (*data_size < out_size) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
*data_size = 0; /* In case of error */
|
||||
if(buf_size < avctx->block_align) return -1;
|
||||
switch(avctx->codec_id) {
|
||||
case CODEC_ID_GSM:
|
||||
if(gsm_decode(avctx->priv_data,buf,data)) return -1;
|
||||
*data_size = GSM_FRAME_SIZE*sizeof(int16_t);
|
||||
break;
|
||||
case CODEC_ID_GSM_MS:
|
||||
if(gsm_decode(avctx->priv_data,buf,data) ||
|
||||
gsm_decode(avctx->priv_data,buf+33,((int16_t*)data)+GSM_FRAME_SIZE)) return -1;
|
||||
*data_size = GSM_FRAME_SIZE*sizeof(int16_t)*2;
|
||||
}
|
||||
|
||||
*data_size = out_size;
|
||||
return avctx->block_align;
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user