Compare commits
55 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
a248117f26 | ||
![]() |
bd66456866 | ||
![]() |
90ee388b28 | ||
![]() |
e3d643cf75 | ||
![]() |
1b05b0005b | ||
![]() |
38ca79b04d | ||
![]() |
0dff3171ce | ||
![]() |
ff29290e26 | ||
![]() |
ba7cd748c1 | ||
![]() |
510da4fe2b | ||
![]() |
eec833b10d | ||
![]() |
9b0736c08a | ||
![]() |
70a1182a48 | ||
![]() |
49d597f058 | ||
![]() |
44ebb2556d | ||
![]() |
0a41da3e9d | ||
![]() |
afe09e490a | ||
![]() |
f8c4dbe45e | ||
![]() |
c997dcd38b | ||
![]() |
2a1bebfc83 | ||
![]() |
23d835f611 | ||
![]() |
d04dc7b5a7 | ||
![]() |
3197a9c4fa | ||
![]() |
ade4f3e746 | ||
![]() |
053c19cd88 | ||
![]() |
96481c5e18 | ||
![]() |
9b052bfb86 | ||
![]() |
f844cb9bce | ||
![]() |
76c97f1963 | ||
![]() |
280998b13c | ||
![]() |
96cf80609a | ||
![]() |
33c9e18b09 | ||
![]() |
9c713f30e4 | ||
![]() |
530d10792d | ||
![]() |
799000af70 | ||
![]() |
f8d3bb8961 | ||
![]() |
78889be3fb | ||
![]() |
c65763a2c6 | ||
![]() |
6d4d186e9e | ||
![]() |
5ebb5a32bd | ||
![]() |
a694b2b158 | ||
![]() |
d785f69401 | ||
![]() |
5025dbc577 | ||
![]() |
5bfa208e65 | ||
![]() |
d86a5ce03f | ||
![]() |
7d4c38d58d | ||
![]() |
c313f3160a | ||
![]() |
7e6625a9af | ||
![]() |
f13f6f82c6 | ||
![]() |
8489c0599f | ||
![]() |
ee6b868ac8 | ||
![]() |
b6783b8826 | ||
![]() |
e2d529424f | ||
![]() |
537c173853 | ||
![]() |
c10582e703 |
2
Doxyfile
2
Doxyfile
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
||||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER = 0.7.15
|
||||
PROJECT_NUMBER = 0.8.15
|
||||
|
||||
# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
|
||||
# base path where the generated documentation will be put.
|
||||
|
120
doc/APIchanges
120
doc/APIchanges
@@ -66,16 +66,16 @@ API changes, most recent first:
|
||||
2011-06-10 - c381960 - lavfi 2.15.0 - avfilter_get_audio_buffer_ref_from_arrays
|
||||
Add avfilter_get_audio_buffer_ref_from_arrays() to avfilter.h.
|
||||
|
||||
2011-06-09 - d9f80ea - lavu 51.8.0 - AVMetadata
|
||||
2011-06-09 - f9ecb84 / d9f80ea - lavu 51.8.0 - AVMetadata
|
||||
Move AVMetadata from lavf to lavu and rename it to
|
||||
AVDictionary -- new installed header dict.h.
|
||||
All av_metadata_* functions renamed to av_dict_*.
|
||||
|
||||
2011-06-07 - a6703fa - lavu 51.8.0 - av_get_bytes_per_sample()
|
||||
2011-06-07 - d552f61 / a6703fa - lavu 51.8.0 - av_get_bytes_per_sample()
|
||||
Add av_get_bytes_per_sample() in libavutil/samplefmt.h.
|
||||
Deprecate av_get_bits_per_sample_fmt().
|
||||
|
||||
2011-06-xx - b39b062 - lavu 51.8.0 - opt.h
|
||||
2011-06-xx - f956924 / b39b062 - lavu 51.8.0 - opt.h
|
||||
Add av_opt_free convenience function.
|
||||
|
||||
2011-06-06 - 95a0242 - lavfi 2.14.0 - AVFilterBufferRefAudioProps
|
||||
@@ -105,7 +105,7 @@ API changes, most recent first:
|
||||
Add av_get_pix_fmt_name() in libavutil/pixdesc.h, and deprecate
|
||||
avcodec_get_pix_fmt_name() in libavcodec/avcodec.h in its favor.
|
||||
|
||||
2011-05-25 - 30315a8 - lavf 53.3.0 - avformat.h
|
||||
2011-05-25 - 39e4206 / 30315a8 - lavf 53.3.0 - avformat.h
|
||||
Add fps_probe_size to AVFormatContext.
|
||||
|
||||
2011-05-22 - 5ecdfd0 - lavf 53.2.0 - avformat.h
|
||||
@@ -121,10 +121,10 @@ API changes, most recent first:
|
||||
2011-05-14 - 9fdf772 - lavfi 2.6.0 - avcodec.h
|
||||
Add avfilter_get_video_buffer_ref_from_frame() to libavfilter/avcodec.h.
|
||||
|
||||
2011-05-18 - 64150ff - lavc 53.7.0 - AVCodecContext.request_sample_fmt
|
||||
2011-05-18 - 75a37b5 / 64150ff - lavc 53.7.0 - AVCodecContext.request_sample_fmt
|
||||
Add request_sample_fmt field to AVCodecContext.
|
||||
|
||||
2011-05-10 - 188dea1 - lavc 53.6.0 - avcodec.h
|
||||
2011-05-10 - 59eb12f / 188dea1 - lavc 53.6.0 - avcodec.h
|
||||
Deprecate AVLPCType and the following fields in
|
||||
AVCodecContext: lpc_coeff_precision, prediction_order_method,
|
||||
min_partition_order, max_partition_order, lpc_type, lpc_passes.
|
||||
@@ -154,81 +154,81 @@ API changes, most recent first:
|
||||
Add av_dynarray_add function for adding
|
||||
an element to a dynamic array.
|
||||
|
||||
2011-04-26 - bebe72f - lavu 51.1.0 - avutil.h
|
||||
2011-04-26 - d7e5aeb / bebe72f - lavu 51.1.0 - avutil.h
|
||||
Add AVPictureType enum and av_get_picture_type_char(), deprecate
|
||||
FF_*_TYPE defines and av_get_pict_type_char() defined in
|
||||
libavcodec/avcodec.h.
|
||||
|
||||
2011-04-26 - 10d3940 - lavfi 2.3.0 - avfilter.h
|
||||
2011-04-26 - d7e5aeb / 10d3940 - lavfi 2.3.0 - avfilter.h
|
||||
Add pict_type and key_frame fields to AVFilterBufferRefVideo.
|
||||
|
||||
2011-04-26 - 7a11c82 - lavfi 2.2.0 - vsrc_buffer
|
||||
2011-04-26 - d7e5aeb / 7a11c82 - lavfi 2.2.0 - vsrc_buffer
|
||||
Add sample_aspect_ratio fields to vsrc_buffer arguments
|
||||
|
||||
2011-04-21 - 94f7451 - lavc 53.1.0 - avcodec.h
|
||||
2011-04-21 - 8772156 / 94f7451 - lavc 53.1.0 - avcodec.h
|
||||
Add CODEC_CAP_SLICE_THREADS for codecs supporting sliced threading.
|
||||
|
||||
2011-04-15 - lavc 52.120.0 - avcodec.h
|
||||
AVPacket structure got additional members for passing side information:
|
||||
4de339e introduce side information for AVPacket
|
||||
2d8591c make containers pass palette change in AVPacket
|
||||
c407984 / 4de339e introduce side information for AVPacket
|
||||
c407984 / 2d8591c make containers pass palette change in AVPacket
|
||||
|
||||
2011-04-12 - lavf 52.107.0 - avio.h
|
||||
Avio cleanup, part II - deprecate the entire URLContext API:
|
||||
175389c add avio_check as a replacement for url_exist
|
||||
ff1ec0c add avio_pause and avio_seek_time as replacements
|
||||
c55780d / 175389c add avio_check as a replacement for url_exist
|
||||
9891004 / ff1ec0c add avio_pause and avio_seek_time as replacements
|
||||
for _av_url_read_fseek/fpause
|
||||
cdc6a87 deprecate av_protocol_next(), avio_enum_protocols
|
||||
d4d0932 / cdc6a87 deprecate av_protocol_next(), avio_enum_protocols
|
||||
should be used instead.
|
||||
80c6e23 rename url_set_interrupt_cb->avio_set_interrupt_cb.
|
||||
f87b1b3 rename open flags: URL_* -> AVIO_*
|
||||
f8270bb add avio_enum_protocols.
|
||||
5593f03 deprecate URLProtocol.
|
||||
c486dad deprecate URLContext.
|
||||
026e175 deprecate the typedef for URLInterruptCB
|
||||
8e76a19 deprecate av_register_protocol2.
|
||||
b840484 deprecate URL_PROTOCOL_FLAG_NESTED_SCHEME
|
||||
1305d93 deprecate av_url_read_seek
|
||||
fa104e1 deprecate av_url_read_pause
|
||||
727c7aa deprecate url_get_filename().
|
||||
5958df3 deprecate url_max_packet_size().
|
||||
1869ea0 deprecate url_get_file_handle().
|
||||
32a97d4 deprecate url_filesize().
|
||||
e52a914 deprecate url_close().
|
||||
58a48c6 deprecate url_seek().
|
||||
925e908 deprecate url_write().
|
||||
dce3756 deprecate url_read_complete().
|
||||
bc371ac deprecate url_read().
|
||||
0589da0 deprecate url_open().
|
||||
62eaaea deprecate url_connect.
|
||||
5652bb9 deprecate url_alloc.
|
||||
333e894 deprecate url_open_protocol
|
||||
e230705 deprecate url_poll and URLPollEntry
|
||||
c88caa5 / 80c6e23 rename url_set_interrupt_cb->avio_set_interrupt_cb.
|
||||
c88caa5 / f87b1b3 rename open flags: URL_* -> AVIO_*
|
||||
d4d0932 / f8270bb add avio_enum_protocols.
|
||||
d4d0932 / 5593f03 deprecate URLProtocol.
|
||||
d4d0932 / c486dad deprecate URLContext.
|
||||
d4d0932 / 026e175 deprecate the typedef for URLInterruptCB
|
||||
c88caa5 / 8e76a19 deprecate av_register_protocol2.
|
||||
11d7841 / b840484 deprecate URL_PROTOCOL_FLAG_NESTED_SCHEME
|
||||
11d7841 / 1305d93 deprecate av_url_read_seek
|
||||
11d7841 / fa104e1 deprecate av_url_read_pause
|
||||
434f248 / 727c7aa deprecate url_get_filename().
|
||||
434f248 / 5958df3 deprecate url_max_packet_size().
|
||||
434f248 / 1869ea0 deprecate url_get_file_handle().
|
||||
434f248 / 32a97d4 deprecate url_filesize().
|
||||
434f248 / e52a914 deprecate url_close().
|
||||
434f248 / 58a48c6 deprecate url_seek().
|
||||
434f248 / 925e908 deprecate url_write().
|
||||
434f248 / dce3756 deprecate url_read_complete().
|
||||
434f248 / bc371ac deprecate url_read().
|
||||
434f248 / 0589da0 deprecate url_open().
|
||||
434f248 / 62eaaea deprecate url_connect.
|
||||
434f248 / 5652bb9 deprecate url_alloc.
|
||||
434f248 / 333e894 deprecate url_open_protocol
|
||||
434f248 / e230705 deprecate url_poll and URLPollEntry
|
||||
|
||||
2011-04-08 - lavf 52.106.0 - avformat.h
|
||||
Minor avformat.h cleanup:
|
||||
a9bf9d8 deprecate av_guess_image2_codec
|
||||
c3675df rename avf_sdp_create->av_sdp_create
|
||||
d4d0932 / a9bf9d8 deprecate av_guess_image2_codec
|
||||
d4d0932 / c3675df rename avf_sdp_create->av_sdp_create
|
||||
|
||||
2011-04-03 - lavf 52.105.0 - avio.h
|
||||
Large-scale renaming/deprecating of AVIOContext-related functions:
|
||||
724f6a0 deprecate url_fdopen
|
||||
403ee83 deprecate url_open_dyn_packet_buf
|
||||
6dc7d80 rename url_close_dyn_buf -> avio_close_dyn_buf
|
||||
b92c545 rename url_open_dyn_buf -> avio_open_dyn_buf
|
||||
8978fed introduce an AVIOContext.seekable field as a replacement for
|
||||
2cae980 / 724f6a0 deprecate url_fdopen
|
||||
2cae980 / 403ee83 deprecate url_open_dyn_packet_buf
|
||||
2cae980 / 6dc7d80 rename url_close_dyn_buf -> avio_close_dyn_buf
|
||||
2cae980 / b92c545 rename url_open_dyn_buf -> avio_open_dyn_buf
|
||||
2cae980 / 8978fed introduce an AVIOContext.seekable field as a replacement for
|
||||
AVIOContext.is_streamed and url_is_streamed()
|
||||
b64030f deprecate get_checksum()
|
||||
4c4427a deprecate init_checksum()
|
||||
4ec153b deprecate udp_set_remote_url/get_local_port
|
||||
933e90a deprecate av_url_read_fseek/fpause
|
||||
8d9769a deprecate url_fileno
|
||||
b7f2fdd rename put_flush_packet -> avio_flush
|
||||
35f1023 deprecate url_close_buf
|
||||
83fddae deprecate url_open_buf
|
||||
d9d86e0 rename url_fprintf -> avio_printf
|
||||
59f65d9 deprecate url_setbufsize
|
||||
3e68b3b deprecate url_ferror
|
||||
1caa412 / b64030f deprecate get_checksum()
|
||||
1caa412 / 4c4427a deprecate init_checksum()
|
||||
2fd41c9 / 4ec153b deprecate udp_set_remote_url/get_local_port
|
||||
4fa0e24 / 933e90a deprecate av_url_read_fseek/fpause
|
||||
4fa0e24 / 8d9769a deprecate url_fileno
|
||||
0fecf26 / b7f2fdd rename put_flush_packet -> avio_flush
|
||||
0fecf26 / 35f1023 deprecate url_close_buf
|
||||
0fecf26 / 83fddae deprecate url_open_buf
|
||||
0fecf26 / d9d86e0 rename url_fprintf -> avio_printf
|
||||
0fecf26 / 59f65d9 deprecate url_setbufsize
|
||||
6947b0c / 3e68b3b deprecate url_ferror
|
||||
66e5b1d deprecate url_feof
|
||||
e8bb2e2 deprecate url_fget_max_packet_size
|
||||
76aa876 rename url_fsize -> avio_size
|
||||
@@ -250,7 +250,7 @@ API changes, most recent first:
|
||||
b3db9ce deprecate get_partial_buffer
|
||||
8d9ac96 rename av_alloc_put_byte -> avio_alloc_context
|
||||
|
||||
2011-03-25 - 34b47d7 - lavc 52.115.0 - AVCodecContext.audio_service_type
|
||||
2011-03-25 - 27ef7b1 / 34b47d7 - lavc 52.115.0 - AVCodecContext.audio_service_type
|
||||
Add audio_service_type field to AVCodecContext.
|
||||
|
||||
2011-03-17 - e309fdc - lavu 50.40.0 - pixfmt.h
|
||||
@@ -288,11 +288,11 @@ API changes, most recent first:
|
||||
2011-02-10 - 12c14cd - lavf 52.99.0 - AVStream.disposition
|
||||
Add AV_DISPOSITION_HEARING_IMPAIRED and AV_DISPOSITION_VISUAL_IMPAIRED.
|
||||
|
||||
2011-02-09 - 5592734 - lavc 52.112.0 - avcodec_thread_init()
|
||||
2011-02-09 - c0b102c - lavc 52.112.0 - avcodec_thread_init()
|
||||
Deprecate avcodec_thread_init()/avcodec_thread_free() use; instead
|
||||
set thread_count before calling avcodec_open.
|
||||
|
||||
2011-02-09 - 778b08a - lavc 52.111.0 - threading API
|
||||
2011-02-09 - 37b00b4 - lavc 52.111.0 - threading API
|
||||
Add CODEC_CAP_FRAME_THREADS with new restrictions on get_buffer()/
|
||||
release_buffer()/draw_horiz_band() callbacks for appropriate codecs.
|
||||
Add thread_type and active_thread_type fields to AVCodecContext.
|
||||
|
@@ -2,7 +2,7 @@ Release Notes
|
||||
=============
|
||||
|
||||
* 0.8 "Love" June, 2011
|
||||
* 0.7.1 "Peace" June, 2011 (identical to 0.8 but using 0.6 ABI/API)
|
||||
* 0.7 "Peace" June, 2011 (identical to 0.8 but using 0.6 ABI/API)
|
||||
|
||||
|
||||
General notes
|
||||
|
@@ -479,7 +479,7 @@ int main(int argc, char **argv)
|
||||
|
||||
/* open the output file, if needed */
|
||||
if (!(fmt->flags & AVFMT_NOFILE)) {
|
||||
if (avio_open(&oc->pb, filename, AVIO_WRONLY) < 0) {
|
||||
if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {
|
||||
fprintf(stderr, "Could not open '%s'\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
|
@@ -82,7 +82,7 @@ Follows a BNF description for the filtergraph syntax:
|
||||
@var{LINKLABEL} ::= "[" @var{NAME} "]"
|
||||
@var{LINKLABELS} ::= @var{LINKLABEL} [@var{LINKLABELS}]
|
||||
@var{FILTER_ARGUMENTS} ::= sequence of chars (eventually quoted)
|
||||
@var{FILTER} ::= [@var{LINKNAMES}] @var{NAME} ["=" @var{ARGUMENTS}] [@var{LINKNAMES}]
|
||||
@var{FILTER} ::= [@var{LINKLABELS}] @var{NAME} ["=" @var{FILTER_ARGUMENTS}] [@var{LINKLABELS}]
|
||||
@var{FILTERCHAIN} ::= @var{FILTER} [,@var{FILTERCHAIN}]
|
||||
@var{FILTERGRAPH} ::= @var{FILTERCHAIN} [;@var{FILTERGRAPH}]
|
||||
@end example
|
||||
|
@@ -15,7 +15,7 @@ be properly added to the respective issue.
|
||||
The subscription URL for the ffmpeg-trac list is:
|
||||
http(s)://ffmpeg.org/mailman/listinfo/ffmpeg-trac
|
||||
The URL of the webinterface of the tracker is:
|
||||
http(s)://ffmpeg.org/trac/ffmpeg
|
||||
http(s)://trac.ffmpeg.org
|
||||
|
||||
NOTE: issue = (bug report || patch || feature request)
|
||||
|
||||
|
11
ffmpeg.c
11
ffmpeg.c
@@ -31,7 +31,7 @@
|
||||
#include "libavformat/avformat.h"
|
||||
#include "libavdevice/avdevice.h"
|
||||
#include "libswscale/swscale.h"
|
||||
#include "libavcodec/opt.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavcodec/audioconvert.h"
|
||||
#include "libavutil/audioconvert.h"
|
||||
#include "libavutil/parseutils.h"
|
||||
@@ -113,9 +113,7 @@ typedef struct AVChapterMap {
|
||||
static const OptionDef options[];
|
||||
|
||||
#define MAX_FILES 100
|
||||
#if !FF_API_MAX_STREAMS
|
||||
#define MAX_STREAMS 1024 /* arbitrary sanity check value */
|
||||
#endif
|
||||
|
||||
static const char *last_asked_format = NULL;
|
||||
static int64_t input_files_ts_offset[MAX_FILES];
|
||||
@@ -715,6 +713,7 @@ static int read_ffserver_streams(AVFormatContext *s, const char *filename)
|
||||
return err;
|
||||
/* copy stream format */
|
||||
s->nb_streams = 0;
|
||||
s->streams = av_mallocz(sizeof(AVStream *) * ic->nb_streams);
|
||||
for(i=0;i<ic->nb_streams;i++) {
|
||||
AVStream *st;
|
||||
AVCodec *codec;
|
||||
@@ -3964,7 +3963,7 @@ static int opt_output_file(const char *opt, const char *filename)
|
||||
/* check filename in case of an image number is expected */
|
||||
if (oc->oformat->flags & AVFMT_NEEDNUMBER) {
|
||||
if (!av_filename_number_test(oc->filename)) {
|
||||
print_error(oc->filename, AVERROR_NUMEXPECTED);
|
||||
print_error(oc->filename, AVERROR(EINVAL));
|
||||
ffmpeg_exit(1);
|
||||
}
|
||||
}
|
||||
@@ -3975,7 +3974,7 @@ static int opt_output_file(const char *opt, const char *filename)
|
||||
(strchr(filename, ':') == NULL ||
|
||||
filename[1] == ':' ||
|
||||
av_strstart(filename, "file:", NULL))) {
|
||||
if (url_exist(filename)) {
|
||||
if (avio_check(filename, 0) == 0) {
|
||||
if (!using_stdin) {
|
||||
fprintf(stderr,"File '%s' already exists. Overwrite ? [y/N] ", filename);
|
||||
fflush(stderr);
|
||||
@@ -3992,7 +3991,7 @@ static int opt_output_file(const char *opt, const char *filename)
|
||||
}
|
||||
|
||||
/* open the file */
|
||||
if ((err = avio_open(&oc->pb, filename, AVIO_WRONLY)) < 0) {
|
||||
if ((err = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE)) < 0) {
|
||||
print_error(filename, err);
|
||||
ffmpeg_exit(1);
|
||||
}
|
||||
|
2
ffplay.c
2
ffplay.c
@@ -35,7 +35,7 @@
|
||||
#include "libavdevice/avdevice.h"
|
||||
#include "libswscale/swscale.h"
|
||||
#include "libavcodec/audioconvert.h"
|
||||
#include "libavcodec/opt.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavcodec/avfft.h"
|
||||
|
||||
#if CONFIG_AVFILTER
|
||||
|
@@ -23,7 +23,7 @@
|
||||
|
||||
#include "libavformat/avformat.h"
|
||||
#include "libavcodec/avcodec.h"
|
||||
#include "libavcodec/opt.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavutil/pixdesc.h"
|
||||
#include "libavutil/dict.h"
|
||||
#include "libavdevice/avdevice.h"
|
||||
|
42
ffserver.c
42
ffserver.c
@@ -39,7 +39,7 @@
|
||||
#include "libavutil/dict.h"
|
||||
#include "libavutil/random_seed.h"
|
||||
#include "libavutil/parseutils.h"
|
||||
#include "libavcodec/opt.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include <stdarg.h>
|
||||
#include <unistd.h>
|
||||
#include <fcntl.h>
|
||||
@@ -94,9 +94,7 @@ static const char *http_state[] = {
|
||||
"RTSP_SEND_PACKET",
|
||||
};
|
||||
|
||||
#if !FF_API_MAX_STREAMS
|
||||
#define MAX_STREAMS 20
|
||||
#endif
|
||||
|
||||
#define IOBUFFER_INIT_SIZE 8192
|
||||
|
||||
@@ -2232,11 +2230,11 @@ static int http_prepare_data(HTTPContext *c)
|
||||
av_dict_set(&c->fmt_ctx.metadata, "copyright", c->stream->copyright, 0);
|
||||
av_dict_set(&c->fmt_ctx.metadata, "title" , c->stream->title , 0);
|
||||
|
||||
c->fmt_ctx.streams = av_mallocz(sizeof(AVStream *) * c->stream->nb_streams);
|
||||
|
||||
for(i=0;i<c->stream->nb_streams;i++) {
|
||||
AVStream *st;
|
||||
AVStream *src;
|
||||
st = av_mallocz(sizeof(AVStream));
|
||||
c->fmt_ctx.streams[i] = st;
|
||||
c->fmt_ctx.streams[i] = av_mallocz(sizeof(AVStream));
|
||||
/* if file or feed, then just take streams from FFStream struct */
|
||||
if (!c->stream->feed ||
|
||||
c->stream->feed == c->stream)
|
||||
@@ -2244,9 +2242,9 @@ static int http_prepare_data(HTTPContext *c)
|
||||
else
|
||||
src = c->stream->feed->streams[c->stream->feed_streams[i]];
|
||||
|
||||
*st = *src;
|
||||
st->priv_data = 0;
|
||||
st->codec->frame_number = 0; /* XXX: should be done in
|
||||
*(c->fmt_ctx.streams[i]) = *src;
|
||||
c->fmt_ctx.streams[i]->priv_data = 0;
|
||||
c->fmt_ctx.streams[i]->codec->frame_number = 0; /* XXX: should be done in
|
||||
AVStream, not in codec */
|
||||
}
|
||||
/* set output format parameters */
|
||||
@@ -2944,11 +2942,9 @@ static int prepare_sdp_description(FFStream *stream, uint8_t **pbuffer,
|
||||
snprintf(avc->filename, 1024, "rtp://0.0.0.0");
|
||||
}
|
||||
|
||||
#if !FF_API_MAX_STREAMS
|
||||
if (avc->nb_streams >= INT_MAX/sizeof(*avc->streams) ||
|
||||
!(avc->streams = av_malloc(avc->nb_streams * sizeof(*avc->streams))))
|
||||
goto sdp_done;
|
||||
#endif
|
||||
if (avc->nb_streams >= INT_MAX/sizeof(*avs) ||
|
||||
!(avs = av_malloc(avc->nb_streams * sizeof(*avs))))
|
||||
goto sdp_done;
|
||||
@@ -2961,10 +2957,8 @@ static int prepare_sdp_description(FFStream *stream, uint8_t **pbuffer,
|
||||
av_sdp_create(&avc, 1, *pbuffer, 2048);
|
||||
|
||||
sdp_done:
|
||||
#if !FF_API_MAX_STREAMS
|
||||
av_free(avc->streams);
|
||||
#endif
|
||||
av_metadata_free(&avc->metadata);
|
||||
av_dict_free(&avc->metadata);
|
||||
av_free(avc);
|
||||
av_free(avs);
|
||||
|
||||
@@ -3392,6 +3386,9 @@ static int rtp_new_av_stream(HTTPContext *c,
|
||||
if (!st)
|
||||
goto fail;
|
||||
ctx->nb_streams = 1;
|
||||
ctx->streams = av_mallocz(sizeof(AVStream *) * ctx->nb_streams);
|
||||
if (!ctx->streams)
|
||||
goto fail;
|
||||
ctx->streams[0] = st;
|
||||
|
||||
if (!c->stream->feed ||
|
||||
@@ -3425,7 +3422,7 @@ static int rtp_new_av_stream(HTTPContext *c,
|
||||
"rtp://%s:%d", ipaddr, ntohs(dest_addr->sin_port));
|
||||
}
|
||||
|
||||
if (url_open(&h, ctx->filename, AVIO_WRONLY) < 0)
|
||||
if (url_open(&h, ctx->filename, AVIO_FLAG_WRITE) < 0)
|
||||
goto fail;
|
||||
c->rtp_handles[stream_index] = h;
|
||||
max_packet_size = url_get_max_packet_size(h);
|
||||
@@ -3678,7 +3675,7 @@ static void build_feed_streams(void)
|
||||
for(feed = first_feed; feed != NULL; feed = feed->next_feed) {
|
||||
int fd;
|
||||
|
||||
if (url_exist(feed->feed_filename)) {
|
||||
if (avio_check(feed->feed_filename, AVIO_FLAG_READ) > 0) {
|
||||
/* See if it matches */
|
||||
AVFormatContext *s = NULL;
|
||||
int matches = 0;
|
||||
@@ -3751,7 +3748,7 @@ static void build_feed_streams(void)
|
||||
unlink(feed->feed_filename);
|
||||
}
|
||||
}
|
||||
if (!url_exist(feed->feed_filename)) {
|
||||
if (avio_check(feed->feed_filename, AVIO_FLAG_WRITE) <= 0) {
|
||||
AVFormatContext s1 = {0}, *s = &s1;
|
||||
|
||||
if (feed->readonly) {
|
||||
@@ -3761,20 +3758,15 @@ static void build_feed_streams(void)
|
||||
}
|
||||
|
||||
/* only write the header of the ffm file */
|
||||
if (avio_open(&s->pb, feed->feed_filename, AVIO_WRONLY) < 0) {
|
||||
if (avio_open(&s->pb, feed->feed_filename, AVIO_FLAG_WRITE) < 0) {
|
||||
http_log("Could not open output feed file '%s'\n",
|
||||
feed->feed_filename);
|
||||
exit(1);
|
||||
}
|
||||
s->oformat = feed->fmt;
|
||||
s->nb_streams = feed->nb_streams;
|
||||
for(i=0;i<s->nb_streams;i++) {
|
||||
AVStream *st;
|
||||
st = feed->streams[i];
|
||||
s->streams[i] = st;
|
||||
}
|
||||
av_set_parameters(s, NULL);
|
||||
if (av_write_header(s) < 0) {
|
||||
s->streams = feed->streams;
|
||||
if (avformat_write_header(s, NULL) < 0) {
|
||||
http_log("Container doesn't supports the required parameters\n");
|
||||
exit(1);
|
||||
}
|
||||
|
@@ -50,6 +50,8 @@ typedef struct EightBpsContext {
|
||||
|
||||
unsigned char planes;
|
||||
unsigned char planemap[4];
|
||||
|
||||
uint32_t pal[256];
|
||||
} EightBpsContext;
|
||||
|
||||
|
||||
@@ -129,13 +131,16 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
|
||||
}
|
||||
}
|
||||
|
||||
if (avctx->palctrl) {
|
||||
memcpy (c->pic.data[1], avctx->palctrl->palette, AVPALETTE_SIZE);
|
||||
if (avctx->palctrl->palette_changed) {
|
||||
if (avctx->bits_per_coded_sample <= 8) {
|
||||
const uint8_t *pal = av_packet_get_side_data(avpkt,
|
||||
AV_PKT_DATA_PALETTE,
|
||||
NULL);
|
||||
if (pal) {
|
||||
c->pic.palette_has_changed = 1;
|
||||
avctx->palctrl->palette_changed = 0;
|
||||
} else
|
||||
c->pic.palette_has_changed = 0;
|
||||
memcpy(c->pal, pal, AVPALETTE_SIZE);
|
||||
}
|
||||
|
||||
memcpy (c->pic.data[1], c->pal, AVPALETTE_SIZE);
|
||||
}
|
||||
|
||||
*data_size = sizeof(AVFrame);
|
||||
@@ -165,10 +170,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
avctx->pix_fmt = PIX_FMT_PAL8;
|
||||
c->planes = 1;
|
||||
c->planemap[0] = 0; // 1st plane is palette indexes
|
||||
if (avctx->palctrl == NULL) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Error: PAL8 format but no palette from demuxer.\n");
|
||||
return -1;
|
||||
}
|
||||
break;
|
||||
case 24:
|
||||
avctx->pix_fmt = avctx->get_format(avctx, pixfmt_rgb24);
|
||||
|
@@ -15,7 +15,6 @@ OBJS = allcodecs.o \
|
||||
fmtconvert.o \
|
||||
imgconvert.o \
|
||||
jrevdct.o \
|
||||
opt.o \
|
||||
options.o \
|
||||
parser.o \
|
||||
raw.o \
|
||||
|
@@ -183,6 +183,8 @@ static av_cold int che_configure(AACContext *ac,
|
||||
enum ChannelPosition che_pos[4][MAX_ELEM_ID],
|
||||
int type, int id, int *channels)
|
||||
{
|
||||
if (*channels >= MAX_CHANNELS)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (che_pos[type][id]) {
|
||||
if (!ac->che[type][id] && !(ac->che[type][id] = av_mallocz(sizeof(ChannelElement))))
|
||||
return AVERROR(ENOMEM);
|
||||
|
@@ -257,7 +257,7 @@ static void alac_linear_predictor(AlacEncodeContext *s, int ch)
|
||||
// generate warm-up samples
|
||||
residual[0] = samples[0];
|
||||
for(i=1;i<=lpc.lpc_order;i++)
|
||||
residual[i] = samples[i] - samples[i-1];
|
||||
residual[i] = sign_extend(samples[i] - samples[i-1], s->write_sample_size);
|
||||
|
||||
// perform lpc on remaining samples
|
||||
for(i = lpc.lpc_order + 1; i < s->avctx->frame_size; i++) {
|
||||
|
@@ -179,8 +179,11 @@ static int decode_bytes(const uint8_t* inbuffer, uint8_t* out, int bytes){
|
||||
uint32_t* obuf = (uint32_t*) out;
|
||||
|
||||
off = (intptr_t)inbuffer & 3;
|
||||
buf = (const uint32_t*) (inbuffer - off);
|
||||
c = av_be2ne32((0x537F6103 >> (off*8)) | (0x537F6103 << (32-(off*8))));
|
||||
buf = (const uint32_t *)(inbuffer - off);
|
||||
if (off)
|
||||
c = av_be2ne32((0x537F6103U >> (off * 8)) | (0x537F6103U << (32 - (off * 8))));
|
||||
else
|
||||
c = av_be2ne32(0x537F6103U);
|
||||
bytes += 3 + off;
|
||||
for (i = 0; i < bytes/4; i++)
|
||||
obuf[i] = c ^ buf[i];
|
||||
|
@@ -34,12 +34,6 @@
|
||||
|
||||
#include "libavcodec/version.h"
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
# define FF_INTERNALC_MEM_TYPE unsigned int
|
||||
#else
|
||||
# define FF_INTERNALC_MEM_TYPE size_t
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Identify the syntax and semantics of the bitstream.
|
||||
* The principle is roughly:
|
||||
@@ -118,9 +112,6 @@ enum CodecID {
|
||||
CODEC_ID_QDRAW,
|
||||
CODEC_ID_VIXL,
|
||||
CODEC_ID_QPEG,
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
CODEC_ID_XVID,
|
||||
#endif
|
||||
CODEC_ID_PNG,
|
||||
CODEC_ID_PPM,
|
||||
CODEC_ID_PBM,
|
||||
@@ -369,18 +360,6 @@ enum CodecID {
|
||||
CODEC_ID_FFMETADATA=0x21000, ///< Dummy codec for streams containing only metadata information.
|
||||
};
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
#define CodecType AVMediaType
|
||||
|
||||
#define CODEC_TYPE_UNKNOWN AVMEDIA_TYPE_UNKNOWN
|
||||
#define CODEC_TYPE_VIDEO AVMEDIA_TYPE_VIDEO
|
||||
#define CODEC_TYPE_AUDIO AVMEDIA_TYPE_AUDIO
|
||||
#define CODEC_TYPE_DATA AVMEDIA_TYPE_DATA
|
||||
#define CODEC_TYPE_SUBTITLE AVMEDIA_TYPE_SUBTITLE
|
||||
#define CODEC_TYPE_ATTACHMENT AVMEDIA_TYPE_ATTACHMENT
|
||||
#define CODEC_TYPE_NB AVMEDIA_TYPE_NB
|
||||
#endif
|
||||
|
||||
#if FF_API_OLD_SAMPLE_FMT
|
||||
#define SampleFormat AVSampleFormat
|
||||
|
||||
@@ -1092,6 +1071,10 @@ typedef struct AVPanScan{
|
||||
#define FF_BUFFER_HINTS_PRESERVE 0x04 // User must not alter buffer content.
|
||||
#define FF_BUFFER_HINTS_REUSABLE 0x08 // Codec will reuse the buffer (update).
|
||||
|
||||
enum AVPacketSideDataType {
|
||||
AV_PKT_DATA_PALETTE,
|
||||
};
|
||||
|
||||
typedef struct AVPacket {
|
||||
/**
|
||||
* Presentation timestamp in AVStream->time_base units; the time at which
|
||||
@@ -1113,6 +1096,17 @@ typedef struct AVPacket {
|
||||
int size;
|
||||
int stream_index;
|
||||
int flags;
|
||||
/**
|
||||
* Additional packet data that can be provided by the container.
|
||||
* Packet can contain several types of side information.
|
||||
*/
|
||||
struct {
|
||||
uint8_t *data;
|
||||
int size;
|
||||
enum AVPacketSideDataType type;
|
||||
} *side_data;
|
||||
int side_data_elems;
|
||||
|
||||
/**
|
||||
* Duration of this packet in AVStream->time_base units, 0 if unknown.
|
||||
* Equals next_pts - this_pts in presentation order.
|
||||
@@ -1142,9 +1136,6 @@ typedef struct AVPacket {
|
||||
int64_t convergence_duration;
|
||||
} AVPacket;
|
||||
#define AV_PKT_FLAG_KEY 0x0001
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
#define PKT_FLAG_KEY AV_PKT_FLAG_KEY
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Audio Video Frame.
|
||||
@@ -1265,16 +1256,6 @@ typedef struct AVCodecContext {
|
||||
*/
|
||||
enum PixelFormat pix_fmt;
|
||||
|
||||
#if FF_API_RATE_EMU
|
||||
/**
|
||||
* Frame rate emulation. If not zero, the lower layer (i.e. format handler)
|
||||
* has to read frames at native frame rate.
|
||||
* - encoding: Set by user.
|
||||
* - decoding: unused
|
||||
*/
|
||||
attribute_deprecated int rate_emu;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* If non NULL, 'draw_horiz_band' is called by the libavcodec
|
||||
* decoder to draw a horizontal band. It improves cache usage. Not
|
||||
@@ -1319,9 +1300,6 @@ typedef struct AVCodecContext {
|
||||
*/
|
||||
int frame_size;
|
||||
int frame_number; ///< audio or video frame number
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
int real_pict_num; ///< Returns the real picture number of previous encoded frame.
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Number of frames the decoded output will be delayed relative to
|
||||
@@ -1379,16 +1357,6 @@ typedef struct AVCodecContext {
|
||||
|
||||
int b_frame_strategy;
|
||||
|
||||
#if FF_API_HURRY_UP
|
||||
/**
|
||||
* hurry up amount
|
||||
* - encoding: unused
|
||||
* - decoding: Set by user. 1-> Skip B-frames, 2-> Skip IDCT/dequant too, 5-> Skip everything except header
|
||||
* @deprecated Deprecated in favor of skip_idct and skip_frame.
|
||||
*/
|
||||
attribute_deprecated int hurry_up;
|
||||
#endif
|
||||
|
||||
struct AVCodec *codec;
|
||||
|
||||
void *priv_data;
|
||||
@@ -1506,9 +1474,6 @@ typedef struct AVCodecContext {
|
||||
#define FF_COMPLIANCE_VERY_STRICT 2 ///< Strictly conform to an older more strict version of the spec or reference software.
|
||||
#define FF_COMPLIANCE_STRICT 1 ///< Strictly conform to all the things in the spec no matter what consequences.
|
||||
#define FF_COMPLIANCE_NORMAL 0
|
||||
#if FF_API_INOFFICIAL
|
||||
#define FF_COMPLIANCE_INOFFICIAL -1 ///< Allow inofficial extensions (deprecated - use FF_COMPLIANCE_UNOFFICIAL instead).
|
||||
#endif
|
||||
#define FF_COMPLIANCE_UNOFFICIAL -1 ///< Allow unofficial extensions
|
||||
#define FF_COMPLIANCE_EXPERIMENTAL -2 ///< Allow nonstandardized experimental things.
|
||||
|
||||
@@ -1782,25 +1747,6 @@ typedef struct AVCodecContext {
|
||||
*/
|
||||
unsigned dsp_mask;
|
||||
|
||||
#if FF_API_MM_FLAGS
|
||||
#define FF_MM_FORCE AV_CPU_FLAG_FORCE
|
||||
#define FF_MM_MMX AV_CPU_FLAG_MMX
|
||||
#define FF_MM_3DNOW AV_CPU_FLAG_3DNOW
|
||||
#define FF_MM_MMXEXT AV_CPU_FLAG_MMX2
|
||||
#define FF_MM_MMX2 AV_CPU_FLAG_MMX2
|
||||
#define FF_MM_SSE AV_CPU_FLAG_SSE
|
||||
#define FF_MM_SSE2 AV_CPU_FLAG_SSE2
|
||||
#define FF_MM_SSE2SLOW AV_CPU_FLAG_SSE2SLOW
|
||||
#define FF_MM_3DNOWEXT AV_CPU_FLAG_3DNOWEXT
|
||||
#define FF_MM_SSE3 AV_CPU_FLAG_SSE3
|
||||
#define FF_MM_SSE3SLOW AV_CPU_FLAG_SSE3SLOW
|
||||
#define FF_MM_SSSE3 AV_CPU_FLAG_SSSE3
|
||||
#define FF_MM_SSE4 AV_CPU_FLAG_SSE4
|
||||
#define FF_MM_SSE42 AV_CPU_FLAG_SSE42
|
||||
#define FF_MM_IWMMXT AV_CPU_FLAG_IWMMXT
|
||||
#define FF_MM_ALTIVEC AV_CPU_FLAG_ALTIVEC
|
||||
#endif
|
||||
|
||||
/**
|
||||
* bits per sample/pixel from the demuxer (needed for huffyuv).
|
||||
* - encoding: Set by libavcodec.
|
||||
@@ -1875,22 +1821,6 @@ typedef struct AVCodecContext {
|
||||
*/
|
||||
uint64_t error[4];
|
||||
|
||||
#if FF_API_MB_Q
|
||||
/**
|
||||
* minimum MB quantizer
|
||||
* - encoding: unused
|
||||
* - decoding: unused
|
||||
*/
|
||||
attribute_deprecated int mb_qmin;
|
||||
|
||||
/**
|
||||
* maximum MB quantizer
|
||||
* - encoding: unused
|
||||
* - decoding: unused
|
||||
*/
|
||||
attribute_deprecated int mb_qmax;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* motion estimation comparison function
|
||||
* - encoding: Set by user.
|
||||
@@ -2592,23 +2522,6 @@ typedef struct AVCodecContext {
|
||||
int compression_level;
|
||||
#define FF_COMPRESSION_DEFAULT -1
|
||||
|
||||
#if FF_API_USE_LPC
|
||||
/**
|
||||
* Sets whether to use LPC mode - used by FLAC encoder.
|
||||
* - encoding: Set by user.
|
||||
* - decoding: unused
|
||||
* @deprecated Deprecated in favor of lpc_type and lpc_passes.
|
||||
*/
|
||||
int use_lpc;
|
||||
|
||||
/**
|
||||
* LPC coefficient precision - used by FLAC encoder
|
||||
* - encoding: Set by user.
|
||||
* - decoding: unused
|
||||
*/
|
||||
int lpc_coeff_precision;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* - encoding: Set by user.
|
||||
* - decoding: unused
|
||||
@@ -2628,6 +2541,13 @@ typedef struct AVCodecContext {
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* LPC coefficient precision - used by FLAC encoder
|
||||
* - encoding: Set by user.
|
||||
* - decoding: unused
|
||||
*/
|
||||
attribute_deprecated int lpc_coeff_precision;
|
||||
|
||||
/**
|
||||
* search method for selecting prediction order
|
||||
* - encoding: Set by user.
|
||||
@@ -3291,6 +3211,33 @@ int av_dup_packet(AVPacket *pkt);
|
||||
*/
|
||||
void av_free_packet(AVPacket *pkt);
|
||||
|
||||
/**
|
||||
* Allocate new information of a packet.
|
||||
*
|
||||
* @param pkt packet
|
||||
* @param type side information type
|
||||
* @param size side information size
|
||||
* @return pointer to fresh allocated data or NULL otherwise
|
||||
*/
|
||||
uint8_t* av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
|
||||
int size);
|
||||
|
||||
/**
|
||||
* Get side information from packet.
|
||||
*
|
||||
* @param pkt packet
|
||||
* @param type desired side information type
|
||||
* @param size pointer for side information size to store (optional)
|
||||
* @return pointer to data if present or NULL otherwise
|
||||
*/
|
||||
uint8_t* av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
|
||||
int *size);
|
||||
|
||||
int av_packet_merge_side_data(AVPacket *pkt);
|
||||
|
||||
int av_packet_split_side_data(AVPacket *pkt);
|
||||
|
||||
|
||||
/* resample.c */
|
||||
|
||||
struct ReSampleContext;
|
||||
@@ -3298,14 +3245,6 @@ struct AVResampleContext;
|
||||
|
||||
typedef struct ReSampleContext ReSampleContext;
|
||||
|
||||
#if FF_API_AUDIO_OLD
|
||||
/**
|
||||
* @deprecated Use av_audio_resample_init() instead.
|
||||
*/
|
||||
attribute_deprecated ReSampleContext *audio_resample_init(int output_channels, int input_channels,
|
||||
int output_rate, int input_rate);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Initialize audio resampling context.
|
||||
*
|
||||
@@ -3469,23 +3408,6 @@ const char *avcodec_get_pix_fmt_name(enum PixelFormat pix_fmt);
|
||||
|
||||
void avcodec_set_dimensions(AVCodecContext *s, int width, int height);
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
/**
|
||||
* Return the pixel format corresponding to the name name.
|
||||
*
|
||||
* If there is no pixel format with name name, then look for a
|
||||
* pixel format with the name corresponding to the native endian
|
||||
* format of name.
|
||||
* For example in a little-endian system, first look for "gray16",
|
||||
* then for "gray16le".
|
||||
*
|
||||
* Finally if no pixel format has been found, return PIX_FMT_NONE.
|
||||
*
|
||||
* @deprecated Deprecated in favor of av_get_pix_fmt().
|
||||
*/
|
||||
attribute_deprecated enum PixelFormat avcodec_get_pix_fmt(const char* name);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Return a value representing the fourCC code associated to the
|
||||
* pixel format pix_fmt, or 0 if no associated fourCC code can be
|
||||
@@ -3554,14 +3476,6 @@ int avcodec_get_pix_fmt_loss(enum PixelFormat dst_pix_fmt, enum PixelFormat src_
|
||||
enum PixelFormat avcodec_find_best_pix_fmt(int64_t pix_fmt_mask, enum PixelFormat src_pix_fmt,
|
||||
int has_alpha, int *loss_ptr);
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
/**
|
||||
* @deprecated Use av_get_pix_fmt_string() instead.
|
||||
*/
|
||||
attribute_deprecated
|
||||
void avcodec_pix_fmt_string (char *buf, int buf_size, enum PixelFormat pix_fmt);
|
||||
#endif
|
||||
|
||||
#define FF_ALPHA_TRANSP 0x0001 /* image has some totally transparent pixels */
|
||||
#define FF_ALPHA_SEMI_TRANSP 0x0002 /* image has some transparent pixels */
|
||||
|
||||
@@ -3612,13 +3526,6 @@ const char *avcodec_license(void);
|
||||
*/
|
||||
void avcodec_init(void);
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
/**
|
||||
* @deprecated Deprecated in favor of avcodec_register().
|
||||
*/
|
||||
attribute_deprecated void register_avcodec(AVCodec *codec);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Register the codec codec and initialize libavcodec.
|
||||
*
|
||||
@@ -3763,14 +3670,6 @@ void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height);
|
||||
void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
|
||||
int linesize_align[4]);
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
/**
|
||||
* @deprecated Deprecated in favor of av_check_image_size().
|
||||
*/
|
||||
attribute_deprecated
|
||||
int avcodec_check_dimensions(void *av_log_ctx, unsigned int w, unsigned int h);
|
||||
#endif
|
||||
|
||||
enum PixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum PixelFormat * fmt);
|
||||
|
||||
#if FF_API_THREAD_INIT
|
||||
@@ -3779,8 +3678,8 @@ enum PixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum
|
||||
*/
|
||||
attribute_deprecated
|
||||
int avcodec_thread_init(AVCodecContext *s, int thread_count);
|
||||
void avcodec_thread_free(AVCodecContext *s);
|
||||
#endif
|
||||
|
||||
int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size);
|
||||
int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int, int),void *arg, int *ret, int count);
|
||||
//FIXME func typedef
|
||||
@@ -3851,25 +3750,6 @@ int avcodec_open(AVCodecContext *avctx, AVCodec *codec);
|
||||
*/
|
||||
int avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVDictionary **options);
|
||||
|
||||
#if FF_API_AUDIO_OLD
|
||||
/**
|
||||
* Decode an audio frame from buf into samples.
|
||||
* Wrapper function which calls avcodec_decode_audio3.
|
||||
*
|
||||
* @deprecated Use avcodec_decode_audio3 instead.
|
||||
* @param avctx the codec context
|
||||
* @param[out] samples the output buffer
|
||||
* @param[in,out] frame_size_ptr the output buffer size in bytes
|
||||
* @param[in] buf the input buffer
|
||||
* @param[in] buf_size the input buffer size in bytes
|
||||
* @return On error a negative value is returned, otherwise the number of bytes
|
||||
* used or zero if no frame could be decompressed.
|
||||
*/
|
||||
attribute_deprecated int avcodec_decode_audio2(AVCodecContext *avctx, int16_t *samples,
|
||||
int *frame_size_ptr,
|
||||
const uint8_t *buf, int buf_size);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Decode the audio frame of size avpkt->size from avpkt->data into samples.
|
||||
* Some decoders may support multiple frames in a single AVPacket, such
|
||||
@@ -3913,25 +3793,6 @@ int avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples,
|
||||
int *frame_size_ptr,
|
||||
AVPacket *avpkt);
|
||||
|
||||
#if FF_API_VIDEO_OLD
|
||||
/**
|
||||
* Decode a video frame from buf into picture.
|
||||
* Wrapper function which calls avcodec_decode_video2.
|
||||
*
|
||||
* @deprecated Use avcodec_decode_video2 instead.
|
||||
* @param avctx the codec context
|
||||
* @param[out] picture The AVFrame in which the decoded video frame will be stored.
|
||||
* @param[in] buf the input buffer
|
||||
* @param[in] buf_size the size of the input buffer in bytes
|
||||
* @param[in,out] got_picture_ptr Zero if no frame could be decompressed, otherwise, it is nonzero.
|
||||
* @return On error a negative value is returned, otherwise the number of bytes
|
||||
* used or zero if no frame could be decompressed.
|
||||
*/
|
||||
attribute_deprecated int avcodec_decode_video(AVCodecContext *avctx, AVFrame *picture,
|
||||
int *got_picture_ptr,
|
||||
const uint8_t *buf, int buf_size);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Decode the video frame of size avpkt->size from avpkt->data into picture.
|
||||
* Some decoders may support multiple frames in a single AVPacket, such
|
||||
@@ -3976,15 +3837,6 @@ int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
|
||||
int *got_picture_ptr,
|
||||
AVPacket *avpkt);
|
||||
|
||||
#if FF_API_SUBTITLE_OLD
|
||||
/* Decode a subtitle message. Return -1 if error, otherwise return the
|
||||
* number of bytes used. If no subtitle could be decompressed,
|
||||
* got_sub_ptr is zero. Otherwise, the subtitle is stored in *sub. */
|
||||
attribute_deprecated int avcodec_decode_subtitle(AVCodecContext *avctx, AVSubtitle *sub,
|
||||
int *got_sub_ptr,
|
||||
const uint8_t *buf, int buf_size);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Decode a subtitle message.
|
||||
* Return a negative value on error, otherwise return the number of bytes used.
|
||||
@@ -4253,15 +4105,6 @@ AVCodecParser *av_parser_next(AVCodecParser *c);
|
||||
void av_register_codec_parser(AVCodecParser *parser);
|
||||
AVCodecParserContext *av_parser_init(int codec_id);
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
attribute_deprecated
|
||||
int av_parser_parse(AVCodecParserContext *s,
|
||||
AVCodecContext *avctx,
|
||||
uint8_t **poutbuf, int *poutbuf_size,
|
||||
const uint8_t *buf, int buf_size,
|
||||
int64_t pts, int64_t dts);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Parse a packet.
|
||||
*
|
||||
@@ -4340,7 +4183,7 @@ AVBitStreamFilter *av_bitstream_filter_next(AVBitStreamFilter *f);
|
||||
*
|
||||
* @see av_realloc
|
||||
*/
|
||||
void *av_fast_realloc(void *ptr, unsigned int *size, FF_INTERNALC_MEM_TYPE min_size);
|
||||
void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size);
|
||||
|
||||
/**
|
||||
* Allocate a buffer, reusing the given one if large enough.
|
||||
@@ -4354,17 +4197,7 @@ void *av_fast_realloc(void *ptr, unsigned int *size, FF_INTERNALC_MEM_TYPE min_s
|
||||
* @param min_size minimum size of *ptr buffer after returning, *ptr will be NULL and
|
||||
* *size 0 if an error occurred.
|
||||
*/
|
||||
void av_fast_malloc(void *ptr, unsigned int *size, FF_INTERNALC_MEM_TYPE min_size);
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
/**
|
||||
* @deprecated Deprecated in favor of av_image_copy().
|
||||
*/
|
||||
attribute_deprecated
|
||||
void av_picture_data_copy(uint8_t *dst_data[4], int dst_linesize[4],
|
||||
uint8_t *src_data[4], int src_linesize[4],
|
||||
enum PixelFormat pix_fmt, int width, int height);
|
||||
#endif
|
||||
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size);
|
||||
|
||||
/**
|
||||
* Copy image src to dst. Wraps av_picture_data_copy() above.
|
||||
@@ -4393,22 +4226,6 @@ int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width,
|
||||
*/
|
||||
unsigned int av_xiphlacing(unsigned char *s, unsigned int v);
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
/**
|
||||
* Parse str and put in width_ptr and height_ptr the detected values.
|
||||
*
|
||||
* @deprecated Deprecated in favor of av_parse_video_size().
|
||||
*/
|
||||
attribute_deprecated int av_parse_video_frame_size(int *width_ptr, int *height_ptr, const char *str);
|
||||
|
||||
/**
|
||||
* Parse str and store the detected values in *frame_rate.
|
||||
*
|
||||
* @deprecated Deprecated in favor of av_parse_video_rate().
|
||||
*/
|
||||
attribute_deprecated int av_parse_video_frame_rate(AVRational *frame_rate, const char *str);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Logs a generic warning message about a missing feature. This function is
|
||||
* intended to be used internally by FFmpeg (libavcodec, libavformat, etc.)
|
||||
|
@@ -26,12 +26,21 @@
|
||||
void av_destruct_packet_nofree(AVPacket *pkt)
|
||||
{
|
||||
pkt->data = NULL; pkt->size = 0;
|
||||
pkt->side_data = NULL;
|
||||
pkt->side_data_elems = 0;
|
||||
}
|
||||
|
||||
void av_destruct_packet(AVPacket *pkt)
|
||||
{
|
||||
int i;
|
||||
|
||||
av_free(pkt->data);
|
||||
pkt->data = NULL; pkt->size = 0;
|
||||
|
||||
for (i = 0; i < pkt->side_data_elems; i++)
|
||||
av_free(pkt->side_data[i].data);
|
||||
av_freep(&pkt->side_data);
|
||||
pkt->side_data_elems = 0;
|
||||
}
|
||||
|
||||
void av_init_packet(AVPacket *pkt)
|
||||
@@ -44,6 +53,8 @@ void av_init_packet(AVPacket *pkt)
|
||||
pkt->flags = 0;
|
||||
pkt->stream_index = 0;
|
||||
pkt->destruct= NULL;
|
||||
pkt->side_data = NULL;
|
||||
pkt->side_data_elems = 0;
|
||||
}
|
||||
|
||||
int av_new_packet(AVPacket *pkt, int size)
|
||||
@@ -89,23 +100,52 @@ int av_grow_packet(AVPacket *pkt, int grow_by)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define DUP_DATA(dst, src, size, padding) \
|
||||
do { \
|
||||
void *data; \
|
||||
if (padding) { \
|
||||
if ((unsigned)(size) > (unsigned)(size) + FF_INPUT_BUFFER_PADDING_SIZE) \
|
||||
goto failed_alloc; \
|
||||
data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE); \
|
||||
} else { \
|
||||
data = av_malloc(size); \
|
||||
} \
|
||||
if (!data) \
|
||||
goto failed_alloc; \
|
||||
memcpy(data, src, size); \
|
||||
if (padding) \
|
||||
memset((uint8_t*)data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE); \
|
||||
dst = data; \
|
||||
} while(0)
|
||||
|
||||
int av_dup_packet(AVPacket *pkt)
|
||||
{
|
||||
AVPacket tmp_pkt;
|
||||
|
||||
if (((pkt->destruct == av_destruct_packet_nofree) || (pkt->destruct == NULL)) && pkt->data) {
|
||||
uint8_t *data;
|
||||
/* We duplicate the packet and don't forget to add the padding again. */
|
||||
if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
|
||||
return AVERROR(ENOMEM);
|
||||
data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
if (!data) {
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
memcpy(data, pkt->data, pkt->size);
|
||||
memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
pkt->data = data;
|
||||
tmp_pkt = *pkt;
|
||||
|
||||
pkt->data = NULL;
|
||||
pkt->side_data = NULL;
|
||||
DUP_DATA(pkt->data, tmp_pkt.data, pkt->size, 1);
|
||||
pkt->destruct = av_destruct_packet;
|
||||
|
||||
if (pkt->side_data_elems) {
|
||||
int i;
|
||||
|
||||
DUP_DATA(pkt->side_data, tmp_pkt.side_data,
|
||||
pkt->side_data_elems * sizeof(*pkt->side_data), 0);
|
||||
memset(pkt->side_data, 0, pkt->side_data_elems * sizeof(*pkt->side_data));
|
||||
for (i = 0; i < pkt->side_data_elems; i++) {
|
||||
DUP_DATA(pkt->side_data[i].data, tmp_pkt.side_data[i].data,
|
||||
pkt->side_data[i].size, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
failed_alloc:
|
||||
av_destruct_packet(pkt);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
void av_free_packet(AVPacket *pkt)
|
||||
@@ -113,6 +153,125 @@ void av_free_packet(AVPacket *pkt)
|
||||
if (pkt) {
|
||||
if (pkt->destruct) pkt->destruct(pkt);
|
||||
pkt->data = NULL; pkt->size = 0;
|
||||
pkt->side_data = NULL;
|
||||
pkt->side_data_elems = 0;
|
||||
}
|
||||
}
|
||||
|
||||
uint8_t* av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
|
||||
int size)
|
||||
{
|
||||
int elems = pkt->side_data_elems;
|
||||
|
||||
if ((unsigned)elems + 1 > INT_MAX / sizeof(*pkt->side_data))
|
||||
return NULL;
|
||||
if ((unsigned)size > INT_MAX - FF_INPUT_BUFFER_PADDING_SIZE)
|
||||
return NULL;
|
||||
|
||||
pkt->side_data = av_realloc(pkt->side_data, (elems + 1) * sizeof(*pkt->side_data));
|
||||
if (!pkt->side_data)
|
||||
return NULL;
|
||||
|
||||
pkt->side_data[elems].data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
if (!pkt->side_data[elems].data)
|
||||
return NULL;
|
||||
pkt->side_data[elems].size = size;
|
||||
pkt->side_data[elems].type = type;
|
||||
pkt->side_data_elems++;
|
||||
|
||||
return pkt->side_data[elems].data;
|
||||
}
|
||||
|
||||
uint8_t* av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
|
||||
int *size)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < pkt->side_data_elems; i++) {
|
||||
if (pkt->side_data[i].type == type) {
|
||||
if (size)
|
||||
*size = pkt->side_data[i].size;
|
||||
return pkt->side_data[i].data;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#define FF_MERGE_MARKER 0x8c4d9d108e25e9feULL
|
||||
|
||||
int av_packet_merge_side_data(AVPacket *pkt){
|
||||
if(pkt->side_data_elems){
|
||||
int i;
|
||||
uint8_t *p;
|
||||
uint64_t size= pkt->size + 8LL + FF_INPUT_BUFFER_PADDING_SIZE;
|
||||
AVPacket old= *pkt;
|
||||
for (i=0; i<old.side_data_elems; i++) {
|
||||
size += old.side_data[i].size + 5LL;
|
||||
}
|
||||
if (size > INT_MAX)
|
||||
return AVERROR(EINVAL);
|
||||
p = av_malloc(size);
|
||||
if (!p)
|
||||
return AVERROR(ENOMEM);
|
||||
pkt->data = p;
|
||||
pkt->destruct = av_destruct_packet;
|
||||
pkt->size = size - FF_INPUT_BUFFER_PADDING_SIZE;
|
||||
bytestream_put_buffer(&p, old.data, old.size);
|
||||
for (i=old.side_data_elems-1; i>=0; i--) {
|
||||
bytestream_put_buffer(&p, old.side_data[i].data, old.side_data[i].size);
|
||||
bytestream_put_be32(&p, old.side_data[i].size);
|
||||
*p++ = old.side_data[i].type | ((i==old.side_data_elems-1)*128);
|
||||
}
|
||||
bytestream_put_be64(&p, FF_MERGE_MARKER);
|
||||
av_assert0(p-pkt->data == pkt->size);
|
||||
memset(p, 0, FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
av_free_packet(&old);
|
||||
pkt->side_data_elems = 0;
|
||||
pkt->side_data = NULL;
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int av_packet_split_side_data(AVPacket *pkt){
|
||||
if (!pkt->side_data_elems && pkt->size >12 && AV_RB64(pkt->data + pkt->size - 8) == FF_MERGE_MARKER){
|
||||
int i;
|
||||
unsigned int size;
|
||||
uint8_t *p= pkt->data + pkt->size - 8 - 5;
|
||||
|
||||
av_dup_packet(pkt);
|
||||
|
||||
for (i=1; ; i++){
|
||||
size = AV_RB32(p);
|
||||
if (size>INT_MAX || p - pkt->data <= size)
|
||||
return 0;
|
||||
if (p[4]&128)
|
||||
break;
|
||||
p-= size+5;
|
||||
}
|
||||
|
||||
pkt->side_data = av_malloc(i * sizeof(*pkt->side_data));
|
||||
if (!pkt->side_data)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
p= pkt->data + pkt->size - 8 - 5;
|
||||
for (i=0; ; i++){
|
||||
size= AV_RB32(p);
|
||||
av_assert0(size<=INT_MAX && p - pkt->data > size);
|
||||
pkt->side_data[i].data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
pkt->side_data[i].size = size;
|
||||
pkt->side_data[i].type = p[4]&127;
|
||||
if (!pkt->side_data[i].data)
|
||||
return AVERROR(ENOMEM);
|
||||
memcpy(pkt->side_data[i].data, p-size, size);
|
||||
pkt->size -= size + 5;
|
||||
if(p[4]&128)
|
||||
break;
|
||||
p-= size+5;
|
||||
}
|
||||
pkt->size -= 8;
|
||||
pkt->side_data_elems = i+1;
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@@ -67,6 +67,7 @@ typedef struct CinepakContext {
|
||||
|
||||
int sega_film_skip_bytes;
|
||||
|
||||
uint32_t pal[256];
|
||||
} CinepakContext;
|
||||
|
||||
static void cinepak_decode_codebook (cvid_codebook *codebook,
|
||||
@@ -398,7 +399,7 @@ static av_cold int cinepak_decode_init(AVCodecContext *avctx)
|
||||
s->sega_film_skip_bytes = -1; /* uninitialized state */
|
||||
|
||||
// check for paletted data
|
||||
if ((avctx->palctrl == NULL) || (avctx->bits_per_coded_sample == 40)) {
|
||||
if (avctx->bits_per_coded_sample != 8) {
|
||||
s->palette_video = 0;
|
||||
avctx->pix_fmt = PIX_FMT_YUV420P;
|
||||
} else {
|
||||
@@ -431,16 +432,18 @@ static int cinepak_decode_frame(AVCodecContext *avctx,
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (s->palette_video) {
|
||||
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
|
||||
if (pal) {
|
||||
s->frame.palette_has_changed = 1;
|
||||
memcpy(s->pal, pal, AVPALETTE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
cinepak_decode(s);
|
||||
|
||||
if (s->palette_video) {
|
||||
memcpy (s->frame.data[1], avctx->palctrl->palette, AVPALETTE_SIZE);
|
||||
if (avctx->palctrl->palette_changed) {
|
||||
s->frame.palette_has_changed = 1;
|
||||
avctx->palctrl->palette_changed = 0;
|
||||
} else
|
||||
s->frame.palette_has_changed = 0;
|
||||
}
|
||||
if (s->palette_video)
|
||||
memcpy (s->frame.data[1], s->pal, AVPALETTE_SIZE);
|
||||
|
||||
*data_size = sizeof(AVFrame);
|
||||
*(AVFrame*)data = s->frame;
|
||||
|
@@ -263,6 +263,8 @@ static int decode_wdlt(uint8_t *frame, int width, int height,
|
||||
segments = bytestream_get_le16(&src);
|
||||
}
|
||||
line_ptr = frame;
|
||||
if (frame_end - frame < width)
|
||||
return AVERROR_INVALIDDATA;
|
||||
frame += width;
|
||||
y++;
|
||||
while (segments--) {
|
||||
|
@@ -1914,7 +1914,7 @@ void ff_set_cmp(DSPContext* c, me_cmp_func *cmp, int type){
|
||||
|
||||
static void add_bytes_c(uint8_t *dst, uint8_t *src, int w){
|
||||
long i;
|
||||
for(i=0; i<=w-sizeof(long); i+=sizeof(long)){
|
||||
for(i=0; i<=w-(int)sizeof(long); i+=sizeof(long)){
|
||||
long a = *(long*)(src+i);
|
||||
long b = *(long*)(dst+i);
|
||||
*(long*)(dst+i) = ((a&pb_7f) + (b&pb_7f)) ^ ((a^b)&pb_80);
|
||||
@@ -1939,7 +1939,7 @@ static void diff_bytes_c(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
|
||||
}
|
||||
}else
|
||||
#endif
|
||||
for(i=0; i<=w-sizeof(long); i+=sizeof(long)){
|
||||
for(i=0; i<=w-(int)sizeof(long); i+=sizeof(long)){
|
||||
long a = *(long*)(src1+i);
|
||||
long b = *(long*)(src2+i);
|
||||
*(long*)(dst+i) = ((a|pb_80) - (b&pb_7f)) ^ ((a^b^pb_80)&pb_80);
|
||||
@@ -2836,7 +2836,7 @@ int ff_check_alignment(void){
|
||||
|
||||
av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx)
|
||||
{
|
||||
int i;
|
||||
int i, j;
|
||||
|
||||
ff_check_alignment();
|
||||
|
||||
@@ -3222,11 +3222,15 @@ av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx)
|
||||
if (ARCH_SH4) dsputil_init_sh4 (c, avctx);
|
||||
if (ARCH_BFIN) dsputil_init_bfin (c, avctx);
|
||||
|
||||
for(i=0; i<64; i++){
|
||||
if(!c->put_2tap_qpel_pixels_tab[0][i])
|
||||
c->put_2tap_qpel_pixels_tab[0][i]= c->put_h264_qpel_pixels_tab[0][i];
|
||||
if(!c->avg_2tap_qpel_pixels_tab[0][i])
|
||||
c->avg_2tap_qpel_pixels_tab[0][i]= c->avg_h264_qpel_pixels_tab[0][i];
|
||||
for (i = 0; i < 4; i++) {
|
||||
for (j = 0; j < 16; j++) {
|
||||
if(!c->put_2tap_qpel_pixels_tab[i][j])
|
||||
c->put_2tap_qpel_pixels_tab[i][j] =
|
||||
c->put_h264_qpel_pixels_tab[i][j];
|
||||
if(!c->avg_2tap_qpel_pixels_tab[i][j])
|
||||
c->avg_2tap_qpel_pixels_tab[i][j] =
|
||||
c->avg_h264_qpel_pixels_tab[i][j];
|
||||
}
|
||||
}
|
||||
|
||||
c->put_rv30_tpel_pixels_tab[0][0] = c->put_h264_qpel_pixels_tab[0][0];
|
||||
|
@@ -120,14 +120,6 @@ void ff_bink_idct_put_c(uint8_t *dest, int linesize, DCTELEM *block);
|
||||
void ff_ea_idct_put_c(uint8_t *dest, int linesize, DCTELEM *block);
|
||||
|
||||
/* 1/2^n downscaling functions from imgconvert.c */
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
/**
|
||||
* @deprecated Use av_image_copy_plane() instead.
|
||||
*/
|
||||
attribute_deprecated
|
||||
void ff_img_copy_plane(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
|
||||
#endif
|
||||
|
||||
void ff_shrink22(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
|
||||
void ff_shrink44(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
|
||||
void ff_shrink88(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
|
||||
|
@@ -522,7 +522,7 @@ static av_always_inline int encode_line(FFV1Context *s, int w,
|
||||
int run_mode=0;
|
||||
|
||||
if(s->ac){
|
||||
if(c->bytestream_end - c->bytestream < w*20){
|
||||
if(c->bytestream_end - c->bytestream < w*35){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
|
||||
return -1;
|
||||
}
|
||||
|
@@ -27,7 +27,7 @@ const int ff_flac_sample_rate_table[16] =
|
||||
8000, 16000, 22050, 24000, 32000, 44100, 48000, 96000,
|
||||
0, 0, 0, 0 };
|
||||
|
||||
const int16_t ff_flac_blocksize_table[16] = {
|
||||
const int32_t ff_flac_blocksize_table[16] = {
|
||||
0, 192, 576<<0, 576<<1, 576<<2, 576<<3, 0, 0,
|
||||
256<<0, 256<<1, 256<<2, 256<<3, 256<<4, 256<<5, 256<<6, 256<<7
|
||||
};
|
||||
|
@@ -26,6 +26,6 @@
|
||||
|
||||
extern const int ff_flac_sample_rate_table[16];
|
||||
|
||||
extern const int16_t ff_flac_blocksize_table[16];
|
||||
extern const int32_t ff_flac_blocksize_table[16];
|
||||
|
||||
#endif /* AVCODEC_FLACDATA_H */
|
||||
|
@@ -296,17 +296,6 @@ static av_cold int flac_encode_init(AVCodecContext *avctx)
|
||||
s->options.max_partition_order = ((int[]){ 2, 2, 3, 3, 3, 8, 8, 8, 8, 8, 8, 8, 8})[level];
|
||||
|
||||
/* set compression option overrides from AVCodecContext */
|
||||
#if FF_API_USE_LPC
|
||||
/* for compatibility with deprecated AVCodecContext.use_lpc */
|
||||
if (avctx->use_lpc == 0) {
|
||||
s->options.lpc_type = AV_LPC_TYPE_FIXED;
|
||||
} else if (avctx->use_lpc == 1) {
|
||||
s->options.lpc_type = AV_LPC_TYPE_LEVINSON;
|
||||
} else if (avctx->use_lpc > 1) {
|
||||
s->options.lpc_type = AV_LPC_TYPE_CHOLESKY;
|
||||
s->options.lpc_passes = avctx->use_lpc - 1;
|
||||
}
|
||||
#endif
|
||||
#if FF_API_FLAC_GLOBAL_OPTS
|
||||
if (avctx->lpc_type > FF_LPC_TYPE_DEFAULT) {
|
||||
if (avctx->lpc_type > FF_LPC_TYPE_CHOLESKY) {
|
||||
|
@@ -599,10 +599,6 @@ retry:
|
||||
s->current_picture.pict_type= s->pict_type;
|
||||
s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I;
|
||||
|
||||
#if FF_API_HURRY_UP
|
||||
/* skip everything if we are in a hurry>=5 */
|
||||
if(avctx->hurry_up>=5) return get_consumed_bytes(s, buf_size);
|
||||
#endif
|
||||
if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==AV_PICTURE_TYPE_B)
|
||||
||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=AV_PICTURE_TYPE_I)
|
||||
|| avctx->skip_frame >= AVDISCARD_ALL)
|
||||
|
@@ -610,18 +610,10 @@ retry:
|
||||
|
||||
/* skip B-frames if we don't have reference frames */
|
||||
if(s->last_picture_ptr==NULL && (s->pict_type==AV_PICTURE_TYPE_B || s->dropable)) return get_consumed_bytes(s, buf_size);
|
||||
#if FF_API_HURRY_UP
|
||||
/* skip b frames if we are in a hurry */
|
||||
if(avctx->hurry_up && s->pict_type==FF_B_TYPE) return get_consumed_bytes(s, buf_size);
|
||||
#endif
|
||||
if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==AV_PICTURE_TYPE_B)
|
||||
|| (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=AV_PICTURE_TYPE_I)
|
||||
|| avctx->skip_frame >= AVDISCARD_ALL)
|
||||
return get_consumed_bytes(s, buf_size);
|
||||
#if FF_API_HURRY_UP
|
||||
/* skip everything if we are in a hurry>=5 */
|
||||
if(avctx->hurry_up>=5) return get_consumed_bytes(s, buf_size);
|
||||
#endif
|
||||
|
||||
if(s->next_p_frame_damaged){
|
||||
if(s->pict_type==AV_PICTURE_TYPE_B)
|
||||
|
@@ -3777,11 +3777,7 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
}
|
||||
|
||||
//FIXME do not discard SEI id
|
||||
if(
|
||||
#if FF_API_HURRY_UP
|
||||
(s->hurry_up == 1 && h->nal_ref_idc == 0) ||
|
||||
#endif
|
||||
(avctx->skip_frame >= AVDISCARD_NONREF && h->nal_ref_idc == 0))
|
||||
if(avctx->skip_frame >= AVDISCARD_NONREF && h->nal_ref_idc == 0)
|
||||
continue;
|
||||
|
||||
again:
|
||||
@@ -3818,9 +3814,6 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
}
|
||||
|
||||
if(hx->redundant_pic_count==0
|
||||
#if FF_API_HURRY_UP
|
||||
&& hx->s.hurry_up < 5
|
||||
#endif
|
||||
&& (avctx->skip_frame < AVDISCARD_NONREF || hx->nal_ref_idc)
|
||||
&& (avctx->skip_frame < AVDISCARD_BIDIR || hx->slice_type_nos!=AV_PICTURE_TYPE_B)
|
||||
&& (avctx->skip_frame < AVDISCARD_NONKEY || hx->slice_type_nos==AV_PICTURE_TYPE_I)
|
||||
@@ -3859,9 +3852,6 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
if(hx->redundant_pic_count==0 && hx->intra_gb_ptr && hx->s.data_partitioning
|
||||
&& s->current_picture_ptr
|
||||
&& s->context_initialized
|
||||
#if FF_API_HURRY_UP
|
||||
&& s->hurry_up < 5
|
||||
#endif
|
||||
&& (avctx->skip_frame < AVDISCARD_NONREF || hx->nal_ref_idc)
|
||||
&& (avctx->skip_frame < AVDISCARD_BIDIR || hx->slice_type_nos!=AV_PICTURE_TYPE_B)
|
||||
&& (avctx->skip_frame < AVDISCARD_NONKEY || hx->slice_type_nos==AV_PICTURE_TYPE_I)
|
||||
@@ -3890,6 +3880,12 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
if(avctx->has_b_frames < 2)
|
||||
avctx->has_b_frames= !s->low_delay;
|
||||
|
||||
if (h->sps.bit_depth_luma != h->sps.bit_depth_chroma) {
|
||||
av_log_missing_feature(s->avctx,
|
||||
"Different bit depth between chroma and luma", 1);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
if (avctx->bits_per_raw_sample != h->sps.bit_depth_luma) {
|
||||
if (h->sps.bit_depth_luma >= 8 && h->sps.bit_depth_luma <= 10) {
|
||||
avctx->bits_per_raw_sample = h->sps.bit_depth_luma;
|
||||
@@ -4007,11 +4003,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
}
|
||||
|
||||
if(!(s->flags2 & CODEC_FLAG2_CHUNKS) && !s->current_picture_ptr){
|
||||
if (avctx->skip_frame >= AVDISCARD_NONREF
|
||||
#if FF_API_HURRY_UP
|
||||
|| s->hurry_up
|
||||
#endif
|
||||
)
|
||||
if (avctx->skip_frame >= AVDISCARD_NONREF)
|
||||
return 0;
|
||||
av_log(avctx, AV_LOG_ERROR, "no frame!\n");
|
||||
return -1;
|
||||
|
@@ -621,7 +621,7 @@ int ff_h264_decode_mb_cavlc(H264Context *h){
|
||||
down the code */
|
||||
if(h->slice_type_nos != AV_PICTURE_TYPE_I){
|
||||
if(s->mb_skip_run==-1)
|
||||
s->mb_skip_run= get_ue_golomb(&s->gb);
|
||||
s->mb_skip_run= get_ue_golomb_long(&s->gb);
|
||||
|
||||
if (s->mb_skip_run--) {
|
||||
if(FRAME_MBAFF && (s->mb_y&1) == 0){
|
||||
|
@@ -72,6 +72,7 @@ typedef struct IdcinContext {
|
||||
hnode huff_nodes[256][HUF_TOKENS*2];
|
||||
int num_huff_nodes[256];
|
||||
|
||||
uint32_t pal[256];
|
||||
} IdcinContext;
|
||||
|
||||
/*
|
||||
@@ -214,7 +215,7 @@ static int idcin_decode_frame(AVCodecContext *avctx,
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
IdcinContext *s = avctx->priv_data;
|
||||
AVPaletteControl *palette_control = avctx->palctrl;
|
||||
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
|
||||
|
||||
s->buf = buf;
|
||||
s->size = buf_size;
|
||||
@@ -229,13 +230,12 @@ static int idcin_decode_frame(AVCodecContext *avctx,
|
||||
|
||||
idcin_decode_vlcs(s);
|
||||
|
||||
/* make the palette available on the way out */
|
||||
memcpy(s->frame.data[1], palette_control->palette, PALETTE_COUNT * 4);
|
||||
/* If palette changed inform application*/
|
||||
if (palette_control->palette_changed) {
|
||||
palette_control->palette_changed = 0;
|
||||
if (pal) {
|
||||
s->frame.palette_has_changed = 1;
|
||||
memcpy(s->pal, pal, AVPALETTE_SIZE);
|
||||
}
|
||||
/* make the palette available on the way out */
|
||||
memcpy(s->frame.data[1], s->pal, AVPALETTE_SIZE);
|
||||
|
||||
*data_size = sizeof(AVFrame);
|
||||
*(AVFrame*)data = s->frame;
|
||||
|
@@ -424,40 +424,11 @@ const char *avcodec_get_pix_fmt_name(enum PixelFormat pix_fmt)
|
||||
}
|
||||
#endif
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
enum PixelFormat avcodec_get_pix_fmt(const char *name)
|
||||
{
|
||||
return av_get_pix_fmt(name);
|
||||
}
|
||||
|
||||
void avcodec_pix_fmt_string (char *buf, int buf_size, enum PixelFormat pix_fmt)
|
||||
{
|
||||
av_get_pix_fmt_string(buf, buf_size, pix_fmt);
|
||||
}
|
||||
#endif
|
||||
|
||||
int ff_is_hwaccel_pix_fmt(enum PixelFormat pix_fmt)
|
||||
{
|
||||
return av_pix_fmt_descriptors[pix_fmt].flags & PIX_FMT_HWACCEL;
|
||||
}
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
int ff_set_systematic_pal(uint32_t pal[256], enum PixelFormat pix_fmt){
|
||||
return ff_set_systematic_pal2(pal, pix_fmt);
|
||||
}
|
||||
|
||||
int ff_fill_linesize(AVPicture *picture, enum PixelFormat pix_fmt, int width)
|
||||
{
|
||||
return av_image_fill_linesizes(picture->linesize, pix_fmt, width);
|
||||
}
|
||||
|
||||
int ff_fill_pointer(AVPicture *picture, uint8_t *ptr, enum PixelFormat pix_fmt,
|
||||
int height)
|
||||
{
|
||||
return av_image_fill_pointers(picture->data, pix_fmt, height, ptr, picture->linesize);
|
||||
}
|
||||
#endif
|
||||
|
||||
int avpicture_fill(AVPicture *picture, uint8_t *ptr,
|
||||
enum PixelFormat pix_fmt, int width, int height)
|
||||
{
|
||||
@@ -703,28 +674,6 @@ enum PixelFormat avcodec_find_best_pix_fmt(int64_t pix_fmt_mask, enum PixelForma
|
||||
return dst_pix_fmt;
|
||||
}
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
void ff_img_copy_plane(uint8_t *dst, int dst_wrap,
|
||||
const uint8_t *src, int src_wrap,
|
||||
int width, int height)
|
||||
{
|
||||
av_image_copy_plane(dst, dst_wrap, src, src_wrap, width, height);
|
||||
}
|
||||
|
||||
int ff_get_plane_bytewidth(enum PixelFormat pix_fmt, int width, int plane)
|
||||
{
|
||||
return av_image_get_linesize(pix_fmt, width, plane);
|
||||
}
|
||||
|
||||
void av_picture_data_copy(uint8_t *dst_data[4], int dst_linesize[4],
|
||||
uint8_t *src_data[4], int src_linesize[4],
|
||||
enum PixelFormat pix_fmt, int width, int height)
|
||||
{
|
||||
av_image_copy(dst_data, dst_linesize, src_data, src_linesize,
|
||||
pix_fmt, width, height);
|
||||
}
|
||||
#endif
|
||||
|
||||
void av_picture_copy(AVPicture *dst, const AVPicture *src,
|
||||
enum PixelFormat pix_fmt, int width, int height)
|
||||
{
|
||||
|
@@ -804,6 +804,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
||||
avctx->release_buffer(avctx, &ctx->frame);
|
||||
|
||||
ctx->frame.reference = 0;
|
||||
avcodec_set_dimensions(avctx, ctx->planes[0].width, ctx->planes[0].height);
|
||||
if (avctx->get_buffer(avctx, &ctx->frame) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return -1;
|
||||
|
@@ -69,6 +69,7 @@ typedef struct IpvideoContext {
|
||||
int stride;
|
||||
int upper_motion_limit_offset;
|
||||
|
||||
uint32_t pal[256];
|
||||
} IpvideoContext;
|
||||
|
||||
#define CHECK_STREAM_PTR(stream_ptr, stream_end, n) \
|
||||
@@ -961,7 +962,7 @@ static void ipvideo_decode_opcodes(IpvideoContext *s)
|
||||
|
||||
if (!s->is_16bpp) {
|
||||
/* this is PAL8, so make the palette available */
|
||||
memcpy(s->current_frame.data[1], s->avctx->palctrl->palette, PALETTE_COUNT * 4);
|
||||
memcpy(s->current_frame.data[1], s->pal, AVPALETTE_SIZE);
|
||||
|
||||
s->stride = s->current_frame.linesize[0];
|
||||
s->stream_ptr = s->buf + 14; /* data starts 14 bytes in */
|
||||
@@ -1015,10 +1016,6 @@ static av_cold int ipvideo_decode_init(AVCodecContext *avctx)
|
||||
|
||||
s->is_16bpp = avctx->bits_per_coded_sample == 16;
|
||||
avctx->pix_fmt = s->is_16bpp ? PIX_FMT_RGB555 : PIX_FMT_PAL8;
|
||||
if (!s->is_16bpp && s->avctx->palctrl == NULL) {
|
||||
av_log(avctx, AV_LOG_ERROR, " Interplay video: palette expected.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
dsputil_init(&s->dsp, avctx);
|
||||
|
||||
@@ -1041,7 +1038,6 @@ static int ipvideo_decode_frame(AVCodecContext *avctx,
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
IpvideoContext *s = avctx->priv_data;
|
||||
AVPaletteControl *palette_control = avctx->palctrl;
|
||||
|
||||
/* compressed buffer needs to be large enough to at least hold an entire
|
||||
* decoding map */
|
||||
@@ -1058,13 +1054,16 @@ static int ipvideo_decode_frame(AVCodecContext *avctx,
|
||||
return -1;
|
||||
}
|
||||
|
||||
ipvideo_decode_opcodes(s);
|
||||
|
||||
if (!s->is_16bpp && palette_control->palette_changed) {
|
||||
palette_control->palette_changed = 0;
|
||||
s->current_frame.palette_has_changed = 1;
|
||||
if (!s->is_16bpp) {
|
||||
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
|
||||
if (pal) {
|
||||
s->current_frame.palette_has_changed = 1;
|
||||
memcpy(s->pal, pal, AVPALETTE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
ipvideo_decode_opcodes(s);
|
||||
|
||||
*data_size = sizeof(AVFrame);
|
||||
*(AVFrame*)data = s->current_frame;
|
||||
|
||||
|
@@ -28,6 +28,7 @@
|
||||
#include "avcodec.h"
|
||||
#include "bytestream.h"
|
||||
#include "j2k.h"
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/common.h"
|
||||
|
||||
#define JP2_SIG_TYPE 0x6A502020
|
||||
@@ -283,6 +284,10 @@ static int get_cox(J2kDecoderContext *s, J2kCodingStyle *c)
|
||||
c->log2_cblk_width = bytestream_get_byte(&s->buf) + 2; // cblk width
|
||||
c->log2_cblk_height = bytestream_get_byte(&s->buf) + 2; // cblk height
|
||||
|
||||
if (c->log2_cblk_width > 6 || c->log2_cblk_height > 6) {
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
c->cblk_style = bytestream_get_byte(&s->buf);
|
||||
if (c->cblk_style != 0){ // cblk style
|
||||
av_log(s->avctx, AV_LOG_WARNING, "extra cblk styles %X\n", c->cblk_style);
|
||||
@@ -699,6 +704,9 @@ static int decode_cblk(J2kDecoderContext *s, J2kCodingStyle *codsty, J2kT1Contex
|
||||
int bpass_csty_symbol = J2K_CBLK_BYPASS & codsty->cblk_style;
|
||||
int vert_causal_ctx_csty_symbol = J2K_CBLK_VSC & codsty->cblk_style;
|
||||
|
||||
av_assert0(width <= J2K_MAX_CBLKW);
|
||||
av_assert0(height <= J2K_MAX_CBLKH);
|
||||
|
||||
for (y = 0; y < height+2; y++)
|
||||
memset(t1->flags[y], 0, (width+2)*sizeof(int));
|
||||
|
||||
|
@@ -284,6 +284,7 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *data_size, AVPa
|
||||
int i;
|
||||
int header;
|
||||
int blocksize;
|
||||
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
|
||||
|
||||
if (ctx->pic.data[0])
|
||||
avctx->release_buffer(avctx, &ctx->pic);
|
||||
@@ -315,13 +316,6 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *data_size, AVPa
|
||||
ctx->pic.pict_type = AV_PICTURE_TYPE_P;
|
||||
}
|
||||
|
||||
/* if palette has been changed, copy it from palctrl */
|
||||
if (ctx->avctx->palctrl && ctx->avctx->palctrl->palette_changed) {
|
||||
memcpy(ctx->pal, ctx->avctx->palctrl->palette, AVPALETTE_SIZE);
|
||||
ctx->setpal = 1;
|
||||
ctx->avctx->palctrl->palette_changed = 0;
|
||||
}
|
||||
|
||||
if (header & KMVC_PALETTE) {
|
||||
ctx->pic.palette_has_changed = 1;
|
||||
// palette starts from index 1 and has 127 entries
|
||||
@@ -330,6 +324,11 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *data_size, AVPa
|
||||
}
|
||||
}
|
||||
|
||||
if (pal) {
|
||||
ctx->pic.palette_has_changed = 1;
|
||||
memcpy(ctx->pal, pal, AVPALETTE_SIZE);
|
||||
}
|
||||
|
||||
if (ctx->setpal) {
|
||||
ctx->setpal = 0;
|
||||
ctx->pic.palette_has_changed = 1;
|
||||
@@ -429,9 +428,6 @@ static av_cold int decode_init(AVCodecContext * avctx)
|
||||
src += 4;
|
||||
}
|
||||
c->setpal = 1;
|
||||
if (c->avctx->palctrl) {
|
||||
c->avctx->palctrl->palette_changed = 0;
|
||||
}
|
||||
}
|
||||
|
||||
avcodec_get_frame_defaults(&c->pic);
|
||||
|
@@ -1288,7 +1288,7 @@ static int mpeg_decode_postinit(AVCodecContext *avctx){
|
||||
s1->save_width != s->width ||
|
||||
s1->save_height != s->height ||
|
||||
s1->save_aspect_info != s->aspect_ratio_info||
|
||||
s1->save_progressive_seq != s->progressive_sequence ||
|
||||
(s1->save_progressive_seq != s->progressive_sequence && (s->height&31)) ||
|
||||
0)
|
||||
{
|
||||
|
||||
@@ -2478,18 +2478,10 @@ static int decode_chunks(AVCodecContext *avctx,
|
||||
/* Skip P-frames if we do not have a reference frame or we have an invalid header. */
|
||||
if(s2->pict_type==AV_PICTURE_TYPE_P && !s->sync) break;
|
||||
}
|
||||
#if FF_API_HURRY_UP
|
||||
/* Skip B-frames if we are in a hurry. */
|
||||
if(avctx->hurry_up && s2->pict_type==FF_B_TYPE) break;
|
||||
#endif
|
||||
if( (avctx->skip_frame >= AVDISCARD_NONREF && s2->pict_type==AV_PICTURE_TYPE_B)
|
||||
||(avctx->skip_frame >= AVDISCARD_NONKEY && s2->pict_type!=AV_PICTURE_TYPE_I)
|
||||
|| avctx->skip_frame >= AVDISCARD_ALL)
|
||||
break;
|
||||
#if FF_API_HURRY_UP
|
||||
/* Skip everything if we are in a hurry>=5. */
|
||||
if(avctx->hurry_up>=5) break;
|
||||
#endif
|
||||
|
||||
if (!s->mpeg_enc_ctx_allocated) break;
|
||||
|
||||
|
@@ -1138,9 +1138,6 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
|
||||
}
|
||||
}
|
||||
|
||||
#if FF_API_HURRY_UP
|
||||
s->hurry_up= s->avctx->hurry_up;
|
||||
#endif
|
||||
s->error_recognition= avctx->error_recognition;
|
||||
|
||||
/* set dequantizer, we can't do it during init as it might change for mpeg4
|
||||
@@ -2150,9 +2147,6 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
|
||||
}
|
||||
|
||||
/* skip dequant / idct if we are really late ;) */
|
||||
#if FF_API_HURRY_UP
|
||||
if(s->hurry_up>1) goto skip_idct;
|
||||
#endif
|
||||
if(s->avctx->skip_idct){
|
||||
if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
|
||||
||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
|
||||
|
@@ -391,11 +391,6 @@ typedef struct MpegEncContext {
|
||||
int no_rounding; /**< apply no rounding to motion compensation (MPEG4, msmpeg4, ...)
|
||||
for b-frames rounding mode is always 0 */
|
||||
|
||||
#if FF_API_HURRY_UP
|
||||
int hurry_up; /**< when set to 1 during decoding, b frames will be skipped
|
||||
when set to 2 idct/dequant will be skipped too */
|
||||
#endif
|
||||
|
||||
/* macroblock layer */
|
||||
int mb_x, mb_y;
|
||||
int mb_skip_run;
|
||||
|
@@ -26,9 +26,6 @@
|
||||
* http://www.pcisys.net/~melanson/codecs/
|
||||
*
|
||||
* The MS RLE decoder outputs PAL8 colorspace data.
|
||||
*
|
||||
* Note that this decoder expects the palette colors from the end of the
|
||||
* BITMAPINFO header passed through palctrl.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
@@ -46,6 +43,7 @@ typedef struct MsrleContext {
|
||||
const unsigned char *buf;
|
||||
int size;
|
||||
|
||||
uint32_t pal[256];
|
||||
} MsrleContext;
|
||||
|
||||
static av_cold int msrle_decode_init(AVCodecContext *avctx)
|
||||
@@ -95,13 +93,16 @@ static int msrle_decode_frame(AVCodecContext *avctx,
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (s->avctx->palctrl) {
|
||||
/* make the palette available */
|
||||
memcpy(s->frame.data[1], s->avctx->palctrl->palette, AVPALETTE_SIZE);
|
||||
if (s->avctx->palctrl->palette_changed) {
|
||||
if (avctx->bits_per_coded_sample > 1 && avctx->bits_per_coded_sample <= 8) {
|
||||
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
|
||||
|
||||
if (pal) {
|
||||
s->frame.palette_has_changed = 1;
|
||||
s->avctx->palctrl->palette_changed = 0;
|
||||
memcpy(s->pal, pal, AVPALETTE_SIZE);
|
||||
}
|
||||
|
||||
/* make the palette available */
|
||||
memcpy(s->frame.data[1], s->pal, AVPALETTE_SIZE);
|
||||
}
|
||||
|
||||
/* FIXME how to correctly detect RLE ??? */
|
||||
|
@@ -25,9 +25,6 @@
|
||||
* For more information about the MS Video-1 format, visit:
|
||||
* http://www.pcisys.net/~melanson/codecs/
|
||||
*
|
||||
* This decoder outputs either PAL8 or RGB555 data, depending on the
|
||||
* whether a RGB palette was passed through palctrl;
|
||||
* if it's present, then the data is PAL8; RGB555 otherwise.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
@@ -55,6 +52,7 @@ typedef struct Msvideo1Context {
|
||||
|
||||
int mode_8bit; /* if it's not 8-bit, it's 16-bit */
|
||||
|
||||
uint32_t pal[256];
|
||||
} Msvideo1Context;
|
||||
|
||||
static av_cold int msvideo1_decode_init(AVCodecContext *avctx)
|
||||
@@ -64,7 +62,7 @@ static av_cold int msvideo1_decode_init(AVCodecContext *avctx)
|
||||
s->avctx = avctx;
|
||||
|
||||
/* figure out the colorspace based on the presence of a palette */
|
||||
if (s->avctx->palctrl) {
|
||||
if (s->avctx->bits_per_coded_sample == 8) {
|
||||
s->mode_8bit = 1;
|
||||
avctx->pix_fmt = PIX_FMT_PAL8;
|
||||
} else {
|
||||
@@ -174,13 +172,8 @@ static void msvideo1_decode_8bit(Msvideo1Context *s)
|
||||
}
|
||||
|
||||
/* make the palette available on the way out */
|
||||
if (s->avctx->pix_fmt == PIX_FMT_PAL8) {
|
||||
memcpy(s->frame.data[1], s->avctx->palctrl->palette, AVPALETTE_SIZE);
|
||||
if (s->avctx->palctrl->palette_changed) {
|
||||
s->frame.palette_has_changed = 1;
|
||||
s->avctx->palctrl->palette_changed = 0;
|
||||
}
|
||||
}
|
||||
if (s->avctx->pix_fmt == PIX_FMT_PAL8)
|
||||
memcpy(s->frame.data[1], s->pal, AVPALETTE_SIZE);
|
||||
}
|
||||
|
||||
static void msvideo1_decode_16bit(Msvideo1Context *s)
|
||||
@@ -310,6 +303,15 @@ static int msvideo1_decode_frame(AVCodecContext *avctx,
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (s->mode_8bit) {
|
||||
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
|
||||
|
||||
if (pal) {
|
||||
memcpy(s->pal, pal, AVPALETTE_SIZE);
|
||||
s->frame.palette_has_changed = 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (s->mode_8bit)
|
||||
msvideo1_decode_8bit(s);
|
||||
else
|
||||
|
@@ -1,89 +0,0 @@
|
||||
/*
|
||||
* AVOptions ABI compatibility wrapper
|
||||
* Copyright (c) 2010 Michael Niedermayer <michaelni@gmx.at>
|
||||
*
|
||||
* This file is part of Libav.
|
||||
*
|
||||
* Libav is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* Libav is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with Libav; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "avcodec.h"
|
||||
#include "opt.h"
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53 && CONFIG_SHARED && HAVE_SYMVER
|
||||
|
||||
FF_SYMVER(const AVOption *, av_find_opt, (void *obj, const char *name, const char *unit, int mask, int flags), "LIBAVCODEC_52"){
|
||||
return av_find_opt(obj, name, unit, mask, flags);
|
||||
}
|
||||
FF_SYMVER(int, av_set_string3, (void *obj, const char *name, const char *val, int alloc, const AVOption **o_out), "LIBAVCODEC_52"){
|
||||
return av_set_string3(obj, name, val, alloc, o_out);
|
||||
}
|
||||
FF_SYMVER(const AVOption *, av_set_double, (void *obj, const char *name, double n), "LIBAVCODEC_52"){
|
||||
return av_set_double(obj, name, n);
|
||||
}
|
||||
FF_SYMVER(const AVOption *, av_set_q, (void *obj, const char *name, AVRational n), "LIBAVCODEC_52"){
|
||||
return av_set_q(obj, name, n);
|
||||
}
|
||||
FF_SYMVER(const AVOption *, av_set_int, (void *obj, const char *name, int64_t n), "LIBAVCODEC_52"){
|
||||
return av_set_int(obj, name, n);
|
||||
}
|
||||
FF_SYMVER(double, av_get_double, (void *obj, const char *name, const AVOption **o_out), "LIBAVCODEC_52"){
|
||||
return av_get_double(obj, name, o_out);
|
||||
}
|
||||
FF_SYMVER(AVRational, av_get_q, (void *obj, const char *name, const AVOption **o_out), "LIBAVCODEC_52"){
|
||||
return av_get_q(obj, name, o_out);
|
||||
}
|
||||
FF_SYMVER(int64_t, av_get_int, (void *obj, const char *name, const AVOption **o_out), "LIBAVCODEC_52"){
|
||||
return av_get_int(obj, name, o_out);
|
||||
}
|
||||
FF_SYMVER(const char *, av_get_string, (void *obj, const char *name, const AVOption **o_out, char *buf, int buf_len), "LIBAVCODEC_52"){
|
||||
return av_get_string(obj, name, o_out, buf, buf_len);
|
||||
}
|
||||
FF_SYMVER(const AVOption *, av_next_option, (void *obj, const AVOption *last), "LIBAVCODEC_52"){
|
||||
return av_next_option(obj, last);
|
||||
}
|
||||
FF_SYMVER(int, av_opt_show2, (void *obj, void *av_log_obj, int req_flags, int rej_flags), "LIBAVCODEC_52"){
|
||||
return av_opt_show2(obj, av_log_obj, req_flags, rej_flags);
|
||||
}
|
||||
FF_SYMVER(void, av_opt_set_defaults, (void *s), "LIBAVCODEC_52"){
|
||||
return av_opt_set_defaults(s);
|
||||
}
|
||||
FF_SYMVER(void, av_opt_set_defaults2, (void *s, int mask, int flags), "LIBAVCODEC_52"){
|
||||
return av_opt_set_defaults2(s, mask, flags);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if FF_API_SET_STRING_OLD
|
||||
const AVOption *av_set_string2(void *obj, const char *name, const char *val, int alloc){
|
||||
const AVOption *o;
|
||||
if (av_set_string3(obj, name, val, alloc, &o) < 0)
|
||||
return NULL;
|
||||
return o;
|
||||
}
|
||||
|
||||
const AVOption *av_set_string(void *obj, const char *name, const char *val){
|
||||
const AVOption *o;
|
||||
if (av_set_string3(obj, name, val, 0, &o) < 0)
|
||||
return NULL;
|
||||
return o;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if FF_API_OPT_SHOW
|
||||
int av_opt_show(void *obj, void *av_log_obj){
|
||||
return av_opt_show2(obj, av_log_obj,
|
||||
AV_OPT_FLAG_ENCODING_PARAM|AV_OPT_FLAG_DECODING_PARAM, 0);
|
||||
}
|
||||
#endif
|
@@ -1,21 +1,18 @@
|
||||
/*
|
||||
* AVOptions
|
||||
* copyright (c) 2005 Michael Niedermayer <michaelni@gmx.at>
|
||||
* This file is part of Libav.
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* Libav is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* Libav is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* License along with Libav; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
@@ -28,34 +25,10 @@
|
||||
#ifndef AVCODEC_OPT_H
|
||||
#define AVCODEC_OPT_H
|
||||
|
||||
/**
|
||||
* @file
|
||||
* AVOptions
|
||||
*/
|
||||
#include "libavcodec/version.h"
|
||||
|
||||
#include "libavutil/rational.h"
|
||||
#include "avcodec.h"
|
||||
#if FF_API_OPT_H
|
||||
#include "libavutil/opt.h"
|
||||
|
||||
#if FF_API_SET_STRING_OLD
|
||||
/**
|
||||
* @see av_set_string2()
|
||||
*/
|
||||
attribute_deprecated const AVOption *av_set_string(void *obj, const char *name, const char *val);
|
||||
|
||||
/**
|
||||
* @return a pointer to the AVOption corresponding to the field set or
|
||||
* NULL if no matching AVOption exists, or if the value val is not
|
||||
* valid
|
||||
* @see av_set_string3()
|
||||
*/
|
||||
attribute_deprecated const AVOption *av_set_string2(void *obj, const char *name, const char *val, int alloc);
|
||||
#endif
|
||||
#if FF_API_OPT_SHOW
|
||||
/**
|
||||
* @deprecated Use av_opt_show2() instead.
|
||||
*/
|
||||
attribute_deprecated int av_opt_show(void *obj, void *av_log_obj);
|
||||
#endif
|
||||
|
||||
#endif /* AVCODEC_OPT_H */
|
||||
|
@@ -438,7 +438,6 @@ static const AVOption options[]={
|
||||
{"crf_max", "in crf mode, prevents vbv from lowering quality beyond this point", OFFSET(crf_max), FF_OPT_TYPE_FLOAT, {.dbl = DEFAULT }, 0, 51, V|E},
|
||||
{"log_level_offset", "set the log level offset", OFFSET(log_level_offset), FF_OPT_TYPE_INT, {.dbl = 0 }, INT_MIN, INT_MAX },
|
||||
#if FF_API_FLAC_GLOBAL_OPTS
|
||||
{"use_lpc", "sets whether to use LPC mode (FLAC)", OFFSET(use_lpc), FF_OPT_TYPE_INT, {.dbl = -1 }, INT_MIN, INT_MAX, A|E},
|
||||
{"lpc_type", "deprecated, use flac-specific options", OFFSET(lpc_type), FF_OPT_TYPE_INT, {.dbl = AV_LPC_TYPE_DEFAULT }, AV_LPC_TYPE_DEFAULT, AV_LPC_TYPE_NB-1, A|E},
|
||||
{"none", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = AV_LPC_TYPE_NONE }, INT_MIN, INT_MAX, A|E, "lpc_type"},
|
||||
{"fixed", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = AV_LPC_TYPE_FIXED }, INT_MIN, INT_MAX, A|E, "lpc_type"},
|
||||
|
@@ -105,43 +105,6 @@ void ff_fetch_timestamp(AVCodecParserContext *s, int off, int remove){
|
||||
}
|
||||
}
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
/**
|
||||
*
|
||||
* @param buf input
|
||||
* @param buf_size input length, to signal EOF, this should be 0 (so that the last frame can be output)
|
||||
* @param pts input presentation timestamp
|
||||
* @param dts input decoding timestamp
|
||||
* @param poutbuf will contain a pointer to the first byte of the output frame
|
||||
* @param poutbuf_size will contain the length of the output frame
|
||||
* @return the number of bytes of the input bitstream used
|
||||
*
|
||||
* Example:
|
||||
* @code
|
||||
* while(in_len){
|
||||
* len = av_parser_parse(myparser, AVCodecContext, &data, &size,
|
||||
* in_data, in_len,
|
||||
* pts, dts);
|
||||
* in_data += len;
|
||||
* in_len -= len;
|
||||
*
|
||||
* if(size)
|
||||
* decode_frame(data, size);
|
||||
* }
|
||||
* @endcode
|
||||
*
|
||||
* @deprecated Use av_parser_parse2() instead.
|
||||
*/
|
||||
int av_parser_parse(AVCodecParserContext *s,
|
||||
AVCodecContext *avctx,
|
||||
uint8_t **poutbuf, int *poutbuf_size,
|
||||
const uint8_t *buf, int buf_size,
|
||||
int64_t pts, int64_t dts)
|
||||
{
|
||||
return av_parser_parse2(s, avctx, poutbuf, poutbuf_size, buf, buf_size, pts, dts, AV_NOPTS_VALUE);
|
||||
}
|
||||
#endif
|
||||
|
||||
int av_parser_parse2(AVCodecParserContext *s,
|
||||
AVCodecContext *avctx,
|
||||
uint8_t **poutbuf, int *poutbuf_size,
|
||||
@@ -279,8 +242,10 @@ int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_s
|
||||
if(next == END_NOT_FOUND){
|
||||
void* new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size, (*buf_size) + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
|
||||
if(!new_buffer)
|
||||
if(!new_buffer) {
|
||||
pc->index = 0;
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
pc->buffer = new_buffer;
|
||||
memcpy(&pc->buffer[pc->index], *buf, *buf_size);
|
||||
pc->index += *buf_size;
|
||||
@@ -293,11 +258,15 @@ int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_s
|
||||
/* append to buffer */
|
||||
if(pc->index){
|
||||
void* new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size, next + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
|
||||
if(!new_buffer)
|
||||
if(!new_buffer) {
|
||||
pc->overread_index =
|
||||
pc->index = 0;
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
pc->buffer = new_buffer;
|
||||
memcpy(&pc->buffer[pc->index], *buf, next + FF_INPUT_BUFFER_PADDING_SIZE );
|
||||
if (next > -FF_INPUT_BUFFER_PADDING_SIZE)
|
||||
memcpy(&pc->buffer[pc->index], *buf,
|
||||
next + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
pc->index = 0;
|
||||
*buf= pc->buffer;
|
||||
}
|
||||
|
@@ -107,7 +107,7 @@ static void png_put_interlaced_row(uint8_t *dst, int width,
|
||||
static void add_bytes_l2_c(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w)
|
||||
{
|
||||
long i;
|
||||
for(i=0; i<=w-sizeof(long); i+=sizeof(long)){
|
||||
for(i=0; i<=w-(int)sizeof(long); i+=sizeof(long)){
|
||||
long a = *(long*)(src1+i);
|
||||
long b = *(long*)(src2+i);
|
||||
*(long*)(dst+i) = ((a&pb_7f) + (b&pb_7f)) ^ ((a^b)&pb_80);
|
||||
|
@@ -383,9 +383,6 @@ static void update_context_from_user(AVCodecContext *dst, AVCodecContext *src)
|
||||
dst->release_buffer = src->release_buffer;
|
||||
|
||||
dst->opaque = src->opaque;
|
||||
#if FF_API_HURRY_UP
|
||||
dst->hurry_up = src->hurry_up;
|
||||
#endif
|
||||
dst->dsp_mask = src->dsp_mask;
|
||||
dst->debug = src->debug;
|
||||
dst->debug_mv = src->debug_mv;
|
||||
|
@@ -1884,6 +1884,10 @@ static av_cold int qdm2_decode_init(AVCodecContext *avctx)
|
||||
av_log(avctx, AV_LOG_ERROR, "Unknown FFT order (%d), contact the developers!\n", s->fft_order);
|
||||
return -1;
|
||||
}
|
||||
if (s->fft_size != (1 << (s->fft_order - 1))) {
|
||||
av_log(avctx, AV_LOG_ERROR, "FFT size %d not power of 2.\n", s->fft_size);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
ff_rdft_init(&s->rdft_ctx, s->fft_order, IDFT_C2R);
|
||||
ff_mpadsp_init(&s->mpadsp);
|
||||
|
@@ -260,6 +260,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
AVFrame * ref= (AVFrame*)&a->ref;
|
||||
uint8_t* outdata;
|
||||
int delta;
|
||||
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
|
||||
|
||||
if(ref->data[0])
|
||||
avctx->release_buffer(avctx, ref);
|
||||
@@ -279,11 +280,11 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
}
|
||||
|
||||
/* make the palette available on the way out */
|
||||
memcpy(a->pic.data[1], a->avctx->palctrl->palette, AVPALETTE_SIZE);
|
||||
if (a->avctx->palctrl->palette_changed) {
|
||||
if (pal) {
|
||||
a->pic.palette_has_changed = 1;
|
||||
a->avctx->palctrl->palette_changed = 0;
|
||||
memcpy(a->pal, pal, AVPALETTE_SIZE);
|
||||
}
|
||||
memcpy(a->pic.data[1], a->pal, AVPALETTE_SIZE);
|
||||
|
||||
*data_size = sizeof(AVFrame);
|
||||
*(AVFrame*)data = a->pic;
|
||||
@@ -294,10 +295,6 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
static av_cold int decode_init(AVCodecContext *avctx){
|
||||
QpegContext * const a = avctx->priv_data;
|
||||
|
||||
if (!avctx->palctrl) {
|
||||
av_log(avctx, AV_LOG_FATAL, "Missing required palette via palctrl\n");
|
||||
return -1;
|
||||
}
|
||||
avcodec_get_frame_defaults(&a->pic);
|
||||
avcodec_get_frame_defaults(&a->ref);
|
||||
a->avctx = avctx;
|
||||
|
@@ -46,6 +46,7 @@ typedef struct QtrleContext {
|
||||
const unsigned char *buf;
|
||||
int size;
|
||||
|
||||
uint32_t pal[256];
|
||||
} QtrleContext;
|
||||
|
||||
#define CHECK_STREAM_PTR(n) \
|
||||
@@ -519,12 +520,15 @@ static int qtrle_decode_frame(AVCodecContext *avctx,
|
||||
}
|
||||
|
||||
if(has_palette) {
|
||||
/* make the palette available on the way out */
|
||||
memcpy(s->frame.data[1], s->avctx->palctrl->palette, AVPALETTE_SIZE);
|
||||
if (s->avctx->palctrl->palette_changed) {
|
||||
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
|
||||
|
||||
if (pal) {
|
||||
s->frame.palette_has_changed = 1;
|
||||
s->avctx->palctrl->palette_changed = 0;
|
||||
memcpy(s->pal, pal, AVPALETTE_SIZE);
|
||||
}
|
||||
|
||||
/* make the palette available on the way out */
|
||||
memcpy(s->frame.data[1], s->pal, AVPALETTE_SIZE);
|
||||
}
|
||||
|
||||
done:
|
||||
|
@@ -184,9 +184,13 @@ static int raw_decode(AVCodecContext *avctx,
|
||||
(av_pix_fmt_descriptors[avctx->pix_fmt].flags & PIX_FMT_PAL))){
|
||||
frame->data[1]= context->palette;
|
||||
}
|
||||
if (avctx->palctrl && avctx->palctrl->palette_changed) {
|
||||
memcpy(frame->data[1], avctx->palctrl->palette, AVPALETTE_SIZE);
|
||||
avctx->palctrl->palette_changed = 0;
|
||||
if (avctx->pix_fmt == PIX_FMT_PAL8) {
|
||||
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
|
||||
|
||||
if (pal) {
|
||||
memcpy(frame->data[1], pal, AVPALETTE_SIZE);
|
||||
frame->palette_has_changed = 1;
|
||||
}
|
||||
}
|
||||
if(avctx->pix_fmt==PIX_FMT_BGR24 && ((frame->linesize[0]+3)&~3)*avctx->height <= buf_size)
|
||||
frame->linesize[0] = (frame->linesize[0]+3)&~3;
|
||||
|
@@ -275,17 +275,6 @@ ReSampleContext *av_audio_resample_init(int output_channels, int input_channels,
|
||||
return s;
|
||||
}
|
||||
|
||||
#if FF_API_AUDIO_OLD
|
||||
ReSampleContext *audio_resample_init(int output_channels, int input_channels,
|
||||
int output_rate, int input_rate)
|
||||
{
|
||||
return av_audio_resample_init(output_channels, input_channels,
|
||||
output_rate, input_rate,
|
||||
AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16,
|
||||
TAPS, 10, 0, 0.8);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* resample audio. 'nb_samples' is the number of input samples */
|
||||
/* XXX: optimize it ! */
|
||||
int audio_resample(ReSampleContext *s, short *output, short *input, int nb_samples)
|
||||
|
@@ -83,7 +83,7 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
unsigned short *pixels = (unsigned short *)s->frame.data[0];
|
||||
|
||||
int row_ptr = 0;
|
||||
int pixel_ptr = 0;
|
||||
int pixel_ptr = -4;
|
||||
int block_ptr;
|
||||
int pixel_x, pixel_y;
|
||||
int total_blocks;
|
||||
@@ -139,6 +139,7 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
colorA = AV_RB16 (&s->buf[stream_ptr]);
|
||||
stream_ptr += 2;
|
||||
while (n_blocks--) {
|
||||
ADVANCE_BLOCK()
|
||||
block_ptr = row_ptr + pixel_ptr;
|
||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
|
||||
for (pixel_x = 0; pixel_x < 4; pixel_x++){
|
||||
@@ -147,7 +148,6 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
}
|
||||
block_ptr += row_inc;
|
||||
}
|
||||
ADVANCE_BLOCK();
|
||||
}
|
||||
break;
|
||||
|
||||
@@ -186,6 +186,7 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
if (s->size - stream_ptr < n_blocks * 4)
|
||||
return;
|
||||
while (n_blocks--) {
|
||||
ADVANCE_BLOCK();
|
||||
block_ptr = row_ptr + pixel_ptr;
|
||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
|
||||
index = s->buf[stream_ptr++];
|
||||
@@ -196,7 +197,6 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
}
|
||||
block_ptr += row_inc;
|
||||
}
|
||||
ADVANCE_BLOCK();
|
||||
}
|
||||
break;
|
||||
|
||||
@@ -204,6 +204,7 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
case 0x00:
|
||||
if (s->size - stream_ptr < 16)
|
||||
return;
|
||||
ADVANCE_BLOCK();
|
||||
block_ptr = row_ptr + pixel_ptr;
|
||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
|
||||
for (pixel_x = 0; pixel_x < 4; pixel_x++){
|
||||
@@ -217,7 +218,6 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
}
|
||||
block_ptr += row_inc;
|
||||
}
|
||||
ADVANCE_BLOCK();
|
||||
break;
|
||||
|
||||
/* Unknown opcode */
|
||||
|
@@ -362,6 +362,11 @@ static int rv20_decode_picture_header(MpegEncContext *s)
|
||||
f= get_bits(&s->gb, av_log2(v)+1);
|
||||
|
||||
if(f){
|
||||
if (s->avctx->extradata_size < 8 + 2 * f) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Extradata too small.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
new_w= 4*((uint8_t*)s->avctx->extradata)[6+2*f];
|
||||
new_h= 4*((uint8_t*)s->avctx->extradata)[7+2*f];
|
||||
}else{
|
||||
|
@@ -1477,19 +1477,10 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
#if FF_API_HURRY_UP
|
||||
/* skip b frames if we are in a hurry */
|
||||
if(avctx->hurry_up && si.type==FF_B_TYPE) return buf_size;
|
||||
#endif
|
||||
|
||||
if( (avctx->skip_frame >= AVDISCARD_NONREF && si.type==AV_PICTURE_TYPE_B)
|
||||
|| (avctx->skip_frame >= AVDISCARD_NONKEY && si.type!=AV_PICTURE_TYPE_I)
|
||||
|| avctx->skip_frame >= AVDISCARD_ALL) return avpkt->size;
|
||||
#if FF_API_HURRY_UP
|
||||
/* skip everything if we are in a hurry>=5 */
|
||||
if(avctx->hurry_up>=5)
|
||||
return buf_size;
|
||||
#endif
|
||||
|| avctx->skip_frame >= AVDISCARD_ALL)
|
||||
return avpkt->size;
|
||||
|
||||
for(i=0; i<slice_count; i++){
|
||||
int offset= get_slice_offset(avctx, slices_hdr, i);
|
||||
|
@@ -78,7 +78,7 @@ typedef struct ShortenContext {
|
||||
GetBitContext gb;
|
||||
|
||||
int min_framesize, max_framesize;
|
||||
int channels;
|
||||
unsigned channels;
|
||||
|
||||
int32_t *decoded[MAX_CHANNELS];
|
||||
int32_t *decoded_base[MAX_CHANNELS];
|
||||
@@ -119,11 +119,11 @@ static int allocate_buffers(ShortenContext *s)
|
||||
for (chan=0; chan<s->channels; chan++) {
|
||||
if(FFMAX(1, s->nmean) >= UINT_MAX/sizeof(int32_t)){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "nmean too large\n");
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if(s->blocksize + s->nwrap >= UINT_MAX/sizeof(int32_t) || s->blocksize + s->nwrap <= (unsigned)s->nwrap){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "s->blocksize + s->nwrap too large\n");
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
tmp_ptr = av_realloc(s->offset[chan], sizeof(int32_t)*FFMAX(1, s->nmean));
|
||||
@@ -209,14 +209,14 @@ static int decode_wave_header(AVCodecContext *avctx, uint8_t *header, int header
|
||||
init_get_bits(&hb, header, header_size*8);
|
||||
if (get_le32(&hb) != MKTAG('R','I','F','F')) {
|
||||
av_log(avctx, AV_LOG_ERROR, "missing RIFF tag\n");
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
skip_bits_long(&hb, 32); /* chunk_size */
|
||||
|
||||
if (get_le32(&hb) != MKTAG('W','A','V','E')) {
|
||||
av_log(avctx, AV_LOG_ERROR, "missing WAVE tag\n");
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
while (get_le32(&hb) != MKTAG('f','m','t',' ')) {
|
||||
@@ -227,7 +227,7 @@ static int decode_wave_header(AVCodecContext *avctx, uint8_t *header, int header
|
||||
|
||||
if (len < 16) {
|
||||
av_log(avctx, AV_LOG_ERROR, "fmt chunk was too short\n");
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
wave_format = get_le16(&hb);
|
||||
@@ -237,7 +237,7 @@ static int decode_wave_header(AVCodecContext *avctx, uint8_t *header, int header
|
||||
break;
|
||||
default:
|
||||
av_log(avctx, AV_LOG_ERROR, "unsupported wave format\n");
|
||||
return -1;
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
|
||||
avctx->channels = get_le16(&hb);
|
||||
@@ -248,7 +248,7 @@ static int decode_wave_header(AVCodecContext *avctx, uint8_t *header, int header
|
||||
|
||||
if (avctx->bits_per_coded_sample != 16) {
|
||||
av_log(avctx, AV_LOG_ERROR, "unsupported number of bits per sample\n");
|
||||
return -1;
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
|
||||
len -= 16;
|
||||
@@ -342,8 +342,13 @@ static int shorten_decode_frame(AVCodecContext *avctx,
|
||||
s->internal_ftype = get_uint(s, TYPESIZE);
|
||||
|
||||
s->channels = get_uint(s, CHANSIZE);
|
||||
if (s->channels > MAX_CHANNELS) {
|
||||
if (!s->channels) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "No channels reported\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (s->channels <= 0 || s->channels > MAX_CHANNELS) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "too many channels: %d\n", s->channels);
|
||||
s->channels = 0;
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -506,7 +511,7 @@ static int shorten_decode_frame(AVCodecContext *avctx,
|
||||
s->bitshift = get_ur_golomb_shorten(&s->gb, BITSHIFTSIZE);
|
||||
break;
|
||||
case FN_BLOCKSIZE: {
|
||||
int blocksize = get_uint(s, av_log2(s->blocksize));
|
||||
unsigned blocksize = get_uint(s, av_log2(s->blocksize));
|
||||
if (blocksize > s->blocksize) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Increasing block size is not supported\n");
|
||||
return AVERROR_PATCHWELCOME;
|
||||
@@ -534,7 +539,7 @@ frame_done:
|
||||
av_log(s->avctx, AV_LOG_ERROR, "overread: %d\n", i - buf_size);
|
||||
s->bitstream_size=0;
|
||||
s->bitstream_index=0;
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (s->bitstream_size) {
|
||||
s->bitstream_index += i;
|
||||
|
@@ -54,6 +54,7 @@ typedef struct SmcContext {
|
||||
unsigned char color_quads[COLORS_PER_TABLE * CQUAD];
|
||||
unsigned char color_octets[COLORS_PER_TABLE * COCTET];
|
||||
|
||||
uint32_t pal[256];
|
||||
} SmcContext;
|
||||
|
||||
#define GET_BLOCK_COUNT() \
|
||||
@@ -110,11 +111,7 @@ static void smc_decode_stream(SmcContext *s)
|
||||
int color_octet_index = 0;
|
||||
|
||||
/* make the palette available */
|
||||
memcpy(s->frame.data[1], s->avctx->palctrl->palette, AVPALETTE_SIZE);
|
||||
if (s->avctx->palctrl->palette_changed) {
|
||||
s->frame.palette_has_changed = 1;
|
||||
s->avctx->palctrl->palette_changed = 0;
|
||||
}
|
||||
memcpy(s->frame.data[1], s->pal, AVPALETTE_SIZE);
|
||||
|
||||
chunk_size = AV_RB32(&s->buf[stream_ptr]) & 0x00FFFFFF;
|
||||
stream_ptr += 4;
|
||||
@@ -441,6 +438,7 @@ static int smc_decode_frame(AVCodecContext *avctx,
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
SmcContext *s = avctx->priv_data;
|
||||
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
|
||||
|
||||
s->buf = buf;
|
||||
s->size = buf_size;
|
||||
@@ -453,6 +451,11 @@ static int smc_decode_frame(AVCodecContext *avctx,
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (pal) {
|
||||
s->frame.palette_has_changed = 1;
|
||||
memcpy(s->pal, pal, AVPALETTE_SIZE);
|
||||
}
|
||||
|
||||
smc_decode_stream(s);
|
||||
|
||||
*data_size = sizeof(AVFrame);
|
||||
|
@@ -664,9 +664,6 @@ static int svq1_decode_frame(AVCodecContext *avctx,
|
||||
//this should be removed after libavcodec can handle more flexible picture types & ordering
|
||||
if(s->pict_type==AV_PICTURE_TYPE_B && s->last_picture_ptr==NULL) return buf_size;
|
||||
|
||||
#if FF_API_HURRY_UP
|
||||
if(avctx->hurry_up && s->pict_type==FF_B_TYPE) return buf_size;
|
||||
#endif
|
||||
if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==AV_PICTURE_TYPE_B)
|
||||
||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=AV_PICTURE_TYPE_I)
|
||||
|| avctx->skip_frame >= AVDISCARD_ALL)
|
||||
|
@@ -995,14 +995,6 @@ static int svq3_decode_frame(AVCodecContext *avctx,
|
||||
/* Skip B-frames if we do not have reference frames. */
|
||||
if (s->last_picture_ptr == NULL && s->pict_type == AV_PICTURE_TYPE_B)
|
||||
return 0;
|
||||
#if FF_API_HURRY_UP
|
||||
/* Skip B-frames if we are in a hurry. */
|
||||
if (avctx->hurry_up && s->pict_type == FF_B_TYPE)
|
||||
return 0;
|
||||
/* Skip everything if we are in a hurry >= 5. */
|
||||
if (avctx->hurry_up >= 5)
|
||||
return 0;
|
||||
#endif
|
||||
if ( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
|
||||
||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
|
||||
|| avctx->skip_frame >= AVDISCARD_ALL)
|
||||
|
@@ -171,13 +171,6 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
stride = -p->linesize[0];
|
||||
}
|
||||
|
||||
if(avctx->pix_fmt == PIX_FMT_PAL8 && avctx->palctrl){
|
||||
memcpy(p->data[1], avctx->palctrl->palette, AVPALETTE_SIZE);
|
||||
if(avctx->palctrl->palette_changed){
|
||||
p->palette_has_changed = 1;
|
||||
avctx->palctrl->palette_changed = 0;
|
||||
}
|
||||
}
|
||||
if(colors){
|
||||
size_t pal_size;
|
||||
if((colors + first_clr) > 256){
|
||||
|
@@ -60,6 +60,8 @@ typedef struct TsccContext {
|
||||
unsigned char* decomp_buf;
|
||||
int height;
|
||||
z_stream zstream;
|
||||
|
||||
uint32_t pal[256];
|
||||
} CamtasiaContext;
|
||||
|
||||
/*
|
||||
@@ -108,11 +110,13 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
|
||||
|
||||
/* make the palette available on the way out */
|
||||
if (c->avctx->pix_fmt == PIX_FMT_PAL8) {
|
||||
memcpy(c->pic.data[1], c->avctx->palctrl->palette, AVPALETTE_SIZE);
|
||||
if (c->avctx->palctrl->palette_changed) {
|
||||
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
|
||||
|
||||
if (pal) {
|
||||
c->pic.palette_has_changed = 1;
|
||||
c->avctx->palctrl->palette_changed = 0;
|
||||
memcpy(c->pal, pal, AVPALETTE_SIZE);
|
||||
}
|
||||
memcpy(c->pic.data[1], c->pal, AVPALETTE_SIZE);
|
||||
}
|
||||
|
||||
*data_size = sizeof(AVFrame);
|
||||
|
@@ -49,7 +49,7 @@ static int volatile entangled_thread_counter=0;
|
||||
static int (*ff_lockmgr_cb)(void **mutex, enum AVLockOp op);
|
||||
static void *codec_mutex;
|
||||
|
||||
void *av_fast_realloc(void *ptr, unsigned int *size, FF_INTERNALC_MEM_TYPE min_size)
|
||||
void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
|
||||
{
|
||||
if(min_size < *size)
|
||||
return ptr;
|
||||
@@ -65,7 +65,7 @@ void *av_fast_realloc(void *ptr, unsigned int *size, FF_INTERNALC_MEM_TYPE min_s
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void av_fast_malloc(void *ptr, unsigned int *size, FF_INTERNALC_MEM_TYPE min_size)
|
||||
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
|
||||
{
|
||||
void **p = ptr;
|
||||
if (min_size < *size)
|
||||
@@ -95,13 +95,6 @@ void avcodec_register(AVCodec *codec)
|
||||
codec->next = NULL;
|
||||
}
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
void register_avcodec(AVCodec *codec)
|
||||
{
|
||||
avcodec_register(codec);
|
||||
}
|
||||
#endif
|
||||
|
||||
unsigned avcodec_get_edge_width(void)
|
||||
{
|
||||
return EDGE_WIDTH;
|
||||
@@ -238,12 +231,6 @@ void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height){
|
||||
*width=FFALIGN(*width, align);
|
||||
}
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
int avcodec_check_dimensions(void *av_log_ctx, unsigned int w, unsigned int h){
|
||||
return av_image_check_size(w, h, 0, av_log_ctx);
|
||||
}
|
||||
#endif
|
||||
|
||||
int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){
|
||||
int i;
|
||||
int w= s->width;
|
||||
@@ -612,8 +599,6 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVD
|
||||
if (avctx->codec->encode) {
|
||||
int i;
|
||||
if (avctx->codec->sample_fmts) {
|
||||
if (avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
|
||||
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
for (i = 0; avctx->codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; i++)
|
||||
if (avctx->sample_fmt == avctx->codec->sample_fmts[i])
|
||||
break;
|
||||
@@ -771,22 +756,6 @@ static int64_t guess_correct_pts(AVCodecContext *ctx,
|
||||
return pts;
|
||||
}
|
||||
|
||||
#if FF_API_VIDEO_OLD
|
||||
int attribute_align_arg avcodec_decode_video(AVCodecContext *avctx, AVFrame *picture,
|
||||
int *got_picture_ptr,
|
||||
const uint8_t *buf, int buf_size)
|
||||
{
|
||||
AVPacket avpkt;
|
||||
av_init_packet(&avpkt);
|
||||
avpkt.data = buf;
|
||||
avpkt.size = buf_size;
|
||||
// HACK for CorePNG to decode as normal PNG by default
|
||||
avpkt.flags = AV_PKT_FLAG_KEY;
|
||||
|
||||
return avcodec_decode_video2(avctx, picture, got_picture_ptr, &avpkt);
|
||||
}
|
||||
#endif
|
||||
|
||||
int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
|
||||
int *got_picture_ptr,
|
||||
AVPacket *avpkt)
|
||||
@@ -798,6 +767,7 @@ int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *pi
|
||||
return -1;
|
||||
|
||||
if((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size || (avctx->active_thread_type&FF_THREAD_FRAME)){
|
||||
av_packet_split_side_data(avpkt);
|
||||
avctx->pkt = avpkt;
|
||||
if (HAVE_PTHREADS && avctx->active_thread_type&FF_THREAD_FRAME)
|
||||
ret = ff_thread_decode_frame(avctx, picture, got_picture_ptr,
|
||||
@@ -836,20 +806,6 @@ int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *pi
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if FF_API_AUDIO_OLD
|
||||
int attribute_align_arg avcodec_decode_audio2(AVCodecContext *avctx, int16_t *samples,
|
||||
int *frame_size_ptr,
|
||||
const uint8_t *buf, int buf_size)
|
||||
{
|
||||
AVPacket avpkt;
|
||||
av_init_packet(&avpkt);
|
||||
avpkt.data = buf;
|
||||
avpkt.size = buf_size;
|
||||
|
||||
return avcodec_decode_audio3(avctx, samples, frame_size_ptr, &avpkt);
|
||||
}
|
||||
#endif
|
||||
|
||||
int attribute_align_arg avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples,
|
||||
int *frame_size_ptr,
|
||||
AVPacket *avpkt)
|
||||
@@ -884,20 +840,6 @@ int attribute_align_arg avcodec_decode_audio3(AVCodecContext *avctx, int16_t *sa
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if FF_API_SUBTITLE_OLD
|
||||
int avcodec_decode_subtitle(AVCodecContext *avctx, AVSubtitle *sub,
|
||||
int *got_sub_ptr,
|
||||
const uint8_t *buf, int buf_size)
|
||||
{
|
||||
AVPacket avpkt;
|
||||
av_init_packet(&avpkt);
|
||||
avpkt.data = buf;
|
||||
avpkt.size = buf_size;
|
||||
|
||||
return avcodec_decode_subtitle2(avctx, sub, got_sub_ptr, &avpkt);
|
||||
}
|
||||
#endif
|
||||
|
||||
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
|
||||
int *got_sub_ptr,
|
||||
AVPacket *avpkt)
|
||||
@@ -1330,20 +1272,6 @@ unsigned int av_xiphlacing(unsigned char *s, unsigned int v)
|
||||
return n;
|
||||
}
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
#include "libavutil/parseutils.h"
|
||||
|
||||
int av_parse_video_frame_size(int *width_ptr, int *height_ptr, const char *str)
|
||||
{
|
||||
return av_parse_video_size(width_ptr, height_ptr, str);
|
||||
}
|
||||
|
||||
int av_parse_video_frame_rate(AVRational *frame_rate, const char *arg)
|
||||
{
|
||||
return av_parse_video_rate(frame_rate, arg);
|
||||
}
|
||||
#endif
|
||||
|
||||
int ff_match_2uint16(const uint16_t (*tab)[2], int size, int a, int b){
|
||||
int i;
|
||||
for(i=0; i<size && !(tab[i][0]==a && tab[i][1]==b); i++);
|
||||
@@ -1460,12 +1388,4 @@ int avcodec_thread_init(AVCodecContext *s, int thread_count)
|
||||
s->thread_count = thread_count;
|
||||
return ff_thread_init(s);
|
||||
}
|
||||
|
||||
void avcodec_thread_free(AVCodecContext *s)
|
||||
{
|
||||
#if HAVE_THREADS
|
||||
ff_thread_free(s);
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@@ -3716,21 +3716,11 @@ static int vc1_decode_frame(AVCodecContext *avctx,
|
||||
if(s->last_picture_ptr==NULL && (s->pict_type==AV_PICTURE_TYPE_B || s->dropable)){
|
||||
goto err;
|
||||
}
|
||||
#if FF_API_HURRY_UP
|
||||
/* skip b frames if we are in a hurry */
|
||||
if(avctx->hurry_up && s->pict_type==FF_B_TYPE) return -1;//buf_size;
|
||||
#endif
|
||||
if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==AV_PICTURE_TYPE_B)
|
||||
|| (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=AV_PICTURE_TYPE_I)
|
||||
|| avctx->skip_frame >= AVDISCARD_ALL) {
|
||||
goto end;
|
||||
}
|
||||
#if FF_API_HURRY_UP
|
||||
/* skip everything if we are in a hurry>=5 */
|
||||
if(avctx->hurry_up>=5) {
|
||||
goto err;
|
||||
}
|
||||
#endif
|
||||
|
||||
if(s->next_p_frame_damaged){
|
||||
if(s->pict_type==AV_PICTURE_TYPE_B)
|
||||
|
@@ -67,6 +67,13 @@ struct vdpau_render_state {
|
||||
|
||||
int state; ///< Holds FF_VDPAU_STATE_* values.
|
||||
|
||||
/** Describe size/location of the compressed video data.
|
||||
Set to 0 when freeing bitstream_buffers. */
|
||||
int bitstream_buffers_allocated;
|
||||
int bitstream_buffers_used;
|
||||
/** The user is responsible for freeing this buffer using av_freep(). */
|
||||
VdpBitstreamBuffer *bitstream_buffers;
|
||||
|
||||
/** picture parameter information for all supported codecs */
|
||||
union VdpPictureInfo {
|
||||
VdpPictureInfoH264 h264;
|
||||
@@ -74,13 +81,6 @@ struct vdpau_render_state {
|
||||
VdpPictureInfoVC1 vc1;
|
||||
VdpPictureInfoMPEG4Part2 mpeg4;
|
||||
} info;
|
||||
|
||||
/** Describe size/location of the compressed video data.
|
||||
Set to 0 when freeing bitstream_buffers. */
|
||||
int bitstream_buffers_allocated;
|
||||
int bitstream_buffers_used;
|
||||
/** The user is responsible for freeing this buffer using av_freep(). */
|
||||
VdpBitstreamBuffer *bitstream_buffers;
|
||||
};
|
||||
|
||||
/* @}*/
|
||||
|
@@ -20,8 +20,8 @@
|
||||
#ifndef AVCODEC_VERSION_H
|
||||
#define AVCODEC_VERSION_H
|
||||
|
||||
#define LIBAVCODEC_VERSION_MAJOR 52
|
||||
#define LIBAVCODEC_VERSION_MINOR 123
|
||||
#define LIBAVCODEC_VERSION_MAJOR 53
|
||||
#define LIBAVCODEC_VERSION_MINOR 8
|
||||
#define LIBAVCODEC_VERSION_MICRO 0
|
||||
|
||||
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
|
||||
@@ -41,45 +41,12 @@
|
||||
#ifndef FF_API_PALETTE_CONTROL
|
||||
#define FF_API_PALETTE_CONTROL (LIBAVCODEC_VERSION_MAJOR < 54)
|
||||
#endif
|
||||
#ifndef FF_API_MM_FLAGS
|
||||
#define FF_API_MM_FLAGS (LIBAVCODEC_VERSION_MAJOR < 53)
|
||||
#endif
|
||||
#ifndef FF_API_OPT_SHOW
|
||||
#define FF_API_OPT_SHOW (LIBAVCODEC_VERSION_MAJOR < 53)
|
||||
#endif
|
||||
#ifndef FF_API_AUDIO_OLD
|
||||
#define FF_API_AUDIO_OLD (LIBAVCODEC_VERSION_MAJOR < 53)
|
||||
#endif
|
||||
#ifndef FF_API_VIDEO_OLD
|
||||
#define FF_API_VIDEO_OLD (LIBAVCODEC_VERSION_MAJOR < 53)
|
||||
#endif
|
||||
#ifndef FF_API_SUBTITLE_OLD
|
||||
#define FF_API_SUBTITLE_OLD (LIBAVCODEC_VERSION_MAJOR < 53)
|
||||
#endif
|
||||
#ifndef FF_API_USE_LPC
|
||||
#define FF_API_USE_LPC (LIBAVCODEC_VERSION_MAJOR < 53)
|
||||
#endif
|
||||
#ifndef FF_API_SET_STRING_OLD
|
||||
#define FF_API_SET_STRING_OLD (LIBAVCODEC_VERSION_MAJOR < 53)
|
||||
#endif
|
||||
#ifndef FF_API_INOFFICIAL
|
||||
#define FF_API_INOFFICIAL (LIBAVCODEC_VERSION_MAJOR < 53)
|
||||
#endif
|
||||
#ifndef FF_API_OLD_SAMPLE_FMT
|
||||
#define FF_API_OLD_SAMPLE_FMT (LIBAVCODEC_VERSION_MAJOR < 54)
|
||||
#endif
|
||||
#ifndef FF_API_OLD_AUDIOCONVERT
|
||||
#define FF_API_OLD_AUDIOCONVERT (LIBAVCODEC_VERSION_MAJOR < 54)
|
||||
#endif
|
||||
#ifndef FF_API_HURRY_UP
|
||||
#define FF_API_HURRY_UP (LIBAVCODEC_VERSION_MAJOR < 53)
|
||||
#endif
|
||||
#ifndef FF_API_RATE_EMU
|
||||
#define FF_API_RATE_EMU (LIBAVCODEC_VERSION_MAJOR < 53)
|
||||
#endif
|
||||
#ifndef FF_API_MB_Q
|
||||
#define FF_API_MB_Q (LIBAVCODEC_VERSION_MAJOR < 53)
|
||||
#endif
|
||||
#ifndef FF_API_ANTIALIAS_ALGO
|
||||
#define FF_API_ANTIALIAS_ALGO (LIBAVCODEC_VERSION_MAJOR < 54)
|
||||
#endif
|
||||
|
@@ -572,6 +572,11 @@ static int vorbis_parse_setup_hdr_floors(vorbis_context *vc)
|
||||
floor_setup->data.t0.order = get_bits(gb, 8);
|
||||
floor_setup->data.t0.rate = get_bits(gb, 16);
|
||||
floor_setup->data.t0.bark_map_size = get_bits(gb, 16);
|
||||
if (floor_setup->data.t0.bark_map_size == 0) {
|
||||
av_log(vc->avccontext, AV_LOG_ERROR,
|
||||
"Floor 0 bark map size is 0.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
floor_setup->data.t0.amplitude_bits = get_bits(gb, 6);
|
||||
/* zero would result in a div by zero later *
|
||||
* 2^0 - 1 == 0 */
|
||||
|
@@ -85,6 +85,11 @@ static int wma_decode_init(AVCodecContext * avctx)
|
||||
int i, flags2;
|
||||
uint8_t *extradata;
|
||||
|
||||
if (!avctx->block_align) {
|
||||
av_log(avctx, AV_LOG_ERROR, "block_align is not set\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
s->avctx = avctx;
|
||||
|
||||
/* extract flag infos */
|
||||
|
@@ -277,6 +277,11 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
int log2_max_num_subframes;
|
||||
int num_possible_block_sizes;
|
||||
|
||||
if (!avctx->block_align) {
|
||||
av_log(avctx, AV_LOG_ERROR, "block_align is not set\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
s->avctx = avctx;
|
||||
dsputil_init(&s->dsp, avctx);
|
||||
init_put_bits(&s->pb, s->frame_data, MAX_FRAMESIZE);
|
||||
@@ -1502,8 +1507,11 @@ static int decode_packet(AVCodecContext *avctx,
|
||||
s->packet_done = 0;
|
||||
|
||||
/** sanity check for the buffer length */
|
||||
if (buf_size < avctx->block_align)
|
||||
return 0;
|
||||
if (buf_size < avctx->block_align) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Input packet too small (%d < %d)\n",
|
||||
buf_size, avctx->block_align);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
s->next_packet_start = buf_size - avctx->block_align;
|
||||
buf_size = avctx->block_align;
|
||||
|
@@ -25,11 +25,6 @@
|
||||
|
||||
#include "avcodec.h"
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
#define AV_XVMC_STATE_DISPLAY_PENDING 1 /** the surface should be shown, the video driver manipulates this */
|
||||
#define AV_XVMC_STATE_PREDICTION 2 /** the surface is needed for prediction, the codec manipulates this */
|
||||
#define AV_XVMC_STATE_OSD_SOURCE 4 /** the surface is needed for subpicture rendering */
|
||||
#endif
|
||||
#define AV_XVMC_ID 0x1DC711C0 /**< special value to ensure that regular pixel routines haven't corrupted the struct
|
||||
the number is 1337 speak for the letters IDCT MCo (motion compensation) */
|
||||
|
||||
@@ -151,22 +146,6 @@ struct xvmc_pix_fmt {
|
||||
of coded blocks it contains.
|
||||
*/
|
||||
int next_free_data_block_num;
|
||||
|
||||
/** extensions may be placed here */
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
//@{
|
||||
/** State flags used to work around limitations in the MPlayer video system.
|
||||
0 - Surface is not used.
|
||||
1 - Surface is still held in application to be displayed or is
|
||||
still visible.
|
||||
2 - Surface is still held in libavcodec buffer for prediction.
|
||||
*/
|
||||
int state;
|
||||
|
||||
/** pointer to the surface where the subpicture is rendered */
|
||||
void* p_osd_target_surface_render;
|
||||
//}@
|
||||
#endif
|
||||
};
|
||||
|
||||
#endif /* AVCODEC_XVMC_H */
|
||||
|
@@ -301,7 +301,7 @@ static int xan_decode_frame_type0(AVCodecContext *avctx, AVPacket *avpkt)
|
||||
corr_end = avpkt->size;
|
||||
if (chroma_off > corr_off)
|
||||
corr_end = chroma_off;
|
||||
dec_size = xan_unpack(s->scratch_buffer, s->buffer_size,
|
||||
dec_size = xan_unpack(s->scratch_buffer, s->buffer_size / 2,
|
||||
avpkt->data + 8 + corr_off,
|
||||
corr_end - corr_off);
|
||||
if (dec_size < 0)
|
||||
|
@@ -22,9 +22,9 @@
|
||||
#include "libavutil/avutil.h"
|
||||
#include "libavformat/avformat.h"
|
||||
|
||||
#define LIBAVDEVICE_VERSION_MAJOR 52
|
||||
#define LIBAVDEVICE_VERSION_MINOR 5
|
||||
#define LIBAVDEVICE_VERSION_MICRO 0
|
||||
#define LIBAVDEVICE_VERSION_MAJOR 53
|
||||
#define LIBAVDEVICE_VERSION_MINOR 1
|
||||
#define LIBAVDEVICE_VERSION_MICRO 1
|
||||
|
||||
#define LIBAVDEVICE_VERSION_INT AV_VERSION_INT(LIBAVDEVICE_VERSION_MAJOR, \
|
||||
LIBAVDEVICE_VERSION_MINOR, \
|
||||
|
@@ -25,8 +25,8 @@
|
||||
#include "libavutil/avutil.h"
|
||||
#include "libavutil/samplefmt.h"
|
||||
|
||||
#define LIBAVFILTER_VERSION_MAJOR 1
|
||||
#define LIBAVFILTER_VERSION_MINOR 80
|
||||
#define LIBAVFILTER_VERSION_MAJOR 2
|
||||
#define LIBAVFILTER_VERSION_MINOR 23
|
||||
#define LIBAVFILTER_VERSION_MICRO 0
|
||||
|
||||
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
|
||||
|
@@ -23,6 +23,7 @@
|
||||
#include <ctype.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "libavutil/avstring.h"
|
||||
#include "avfilter.h"
|
||||
#include "avfiltergraph.h"
|
||||
#include "internal.h"
|
||||
@@ -163,7 +164,11 @@ static int query_formats(AVFilterGraph *graph, AVClass *log_ctx)
|
||||
/* couldn't merge format lists. auto-insert scale filter */
|
||||
snprintf(inst_name, sizeof(inst_name), "auto-inserted scaler %d",
|
||||
scaler_count++);
|
||||
snprintf(scale_args, sizeof(scale_args), "0:0:%s", graph->scale_sws_opts);
|
||||
av_strlcpy(scale_args, "0:0", sizeof(scale_args));
|
||||
if (graph->scale_sws_opts) {
|
||||
av_strlcat(scale_args, ":", sizeof(scale_args));
|
||||
av_strlcat(scale_args, graph->scale_sws_opts, sizeof(scale_args));
|
||||
}
|
||||
if ((ret = avfilter_graph_create_filter(&scale, avfilter_get_by_name("scale"),
|
||||
inst_name, scale_args, NULL, graph)) < 0)
|
||||
return ret;
|
||||
|
@@ -121,7 +121,8 @@ static int create_filter(AVFilterContext **filt_ctx, AVFilterGraph *ctx, int ind
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!strcmp(filt_name, "scale") && args && !strstr(args, "flags")) {
|
||||
if (!strcmp(filt_name, "scale") && args && !strstr(args, "flags") &&
|
||||
ctx->scale_sws_opts) {
|
||||
snprintf(tmp_args, sizeof(tmp_args), "%s:%s",
|
||||
args, ctx->scale_sws_opts);
|
||||
args = tmp_args;
|
||||
|
@@ -158,7 +158,7 @@ static int read_shape_from_file(int *cols, int *rows, int **values, const char *
|
||||
}
|
||||
w++;
|
||||
}
|
||||
if (*rows > (FF_INTERNAL_MEM_TYPE_MAX_VALUE / (sizeof(int)) / *cols)) {
|
||||
if (*rows > (SIZE_MAX / sizeof(int) / *cols)) {
|
||||
av_log(log_ctx, AV_LOG_ERROR, "File with size %dx%d is too big\n",
|
||||
*rows, *cols);
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
@@ -10,7 +10,6 @@ OBJS = allformats.o \
|
||||
id3v1.o \
|
||||
id3v2.o \
|
||||
metadata.o \
|
||||
metadata_compat.o \
|
||||
options.o \
|
||||
os_support.o \
|
||||
sdp.o \
|
||||
|
@@ -208,7 +208,7 @@ static int parse_playlist(AppleHTTPContext *c, const char *url,
|
||||
|
||||
if (!in) {
|
||||
close_in = 1;
|
||||
if ((ret = avio_open(&in, url, AVIO_RDONLY)) < 0)
|
||||
if ((ret = avio_open(&in, url, AVIO_FLAG_READ)) < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -321,13 +321,13 @@ static int open_input(struct variant *var)
|
||||
{
|
||||
struct segment *seg = var->segments[var->cur_seq_no - var->start_seq_no];
|
||||
if (seg->key_type == KEY_NONE) {
|
||||
return ffurl_open(&var->input, seg->url, AVIO_RDONLY);
|
||||
return ffurl_open(&var->input, seg->url, AVIO_FLAG_READ);
|
||||
} else if (seg->key_type == KEY_AES_128) {
|
||||
char iv[33], key[33], url[MAX_URL_SIZE];
|
||||
int ret;
|
||||
if (strcmp(seg->key, var->key_url)) {
|
||||
URLContext *uc;
|
||||
if (ffurl_open(&uc, seg->key, AVIO_RDONLY) == 0) {
|
||||
if (ffurl_open(&uc, seg->key, AVIO_FLAG_READ) == 0) {
|
||||
if (ffurl_read_complete(uc, var->key, sizeof(var->key))
|
||||
!= sizeof(var->key)) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Unable to read key file %s\n",
|
||||
@@ -347,7 +347,7 @@ static int open_input(struct variant *var)
|
||||
snprintf(url, sizeof(url), "crypto+%s", seg->url);
|
||||
else
|
||||
snprintf(url, sizeof(url), "crypto:%s", seg->url);
|
||||
if ((ret = ffurl_alloc(&var->input, url, AVIO_RDONLY)) < 0)
|
||||
if ((ret = ffurl_alloc(&var->input, url, AVIO_FLAG_READ)) < 0)
|
||||
return ret;
|
||||
av_set_string3(var->input->priv_data, "key", key, 0, NULL);
|
||||
av_set_string3(var->input->priv_data, "iv", iv, 0, NULL);
|
||||
@@ -395,9 +395,7 @@ reload:
|
||||
goto reload;
|
||||
}
|
||||
|
||||
ret = ffurl_open(&v->input,
|
||||
v->segments[v->cur_seq_no - v->start_seq_no]->url,
|
||||
AVIO_RDONLY);
|
||||
ret = open_input(v);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
@@ -114,7 +114,7 @@ static int parse_playlist(URLContext *h, const char *url)
|
||||
char line[1024];
|
||||
const char *ptr;
|
||||
|
||||
if ((ret = avio_open(&in, url, AVIO_RDONLY)) < 0)
|
||||
if ((ret = avio_open(&in, url, AVIO_FLAG_READ)) < 0)
|
||||
return ret;
|
||||
|
||||
read_chomp_line(in, line, sizeof(line));
|
||||
@@ -179,7 +179,7 @@ static int applehttp_open(URLContext *h, const char *uri, int flags)
|
||||
int ret, i;
|
||||
const char *nested_url;
|
||||
|
||||
if (flags & (AVIO_WRONLY | AVIO_RDWR))
|
||||
if (flags & AVIO_FLAG_WRITE)
|
||||
return AVERROR(ENOSYS);
|
||||
|
||||
s = av_mallocz(sizeof(AppleHTTPContext));
|
||||
@@ -194,7 +194,7 @@ static int applehttp_open(URLContext *h, const char *uri, int flags)
|
||||
av_strlcpy(s->playlisturl, "http://", sizeof(s->playlisturl));
|
||||
av_strlcat(s->playlisturl, nested_url, sizeof(s->playlisturl));
|
||||
} else {
|
||||
av_log(NULL, AV_LOG_ERROR, "Unsupported url %s\n", uri);
|
||||
av_log(h, AV_LOG_ERROR, "Unsupported url %s\n", uri);
|
||||
ret = AVERROR(EINVAL);
|
||||
goto fail;
|
||||
}
|
||||
@@ -217,7 +217,7 @@ static int applehttp_open(URLContext *h, const char *uri, int flags)
|
||||
}
|
||||
|
||||
if (s->n_segments == 0) {
|
||||
av_log(NULL, AV_LOG_WARNING, "Empty playlist\n");
|
||||
av_log(h, AV_LOG_WARNING, "Empty playlist\n");
|
||||
ret = AVERROR(EIO);
|
||||
goto fail;
|
||||
}
|
||||
@@ -257,7 +257,7 @@ retry:
|
||||
return ret;
|
||||
}
|
||||
if (s->cur_seq_no < s->start_seq_no) {
|
||||
av_log(NULL, AV_LOG_WARNING,
|
||||
av_log(h, AV_LOG_WARNING,
|
||||
"skipping %d segments ahead, expired from playlist\n",
|
||||
s->start_seq_no - s->cur_seq_no);
|
||||
s->cur_seq_no = s->start_seq_no;
|
||||
@@ -273,12 +273,12 @@ retry:
|
||||
goto retry;
|
||||
}
|
||||
url = s->segments[s->cur_seq_no - s->start_seq_no]->url,
|
||||
av_log(NULL, AV_LOG_DEBUG, "opening %s\n", url);
|
||||
ret = ffurl_open(&s->seg_hd, url, AVIO_RDONLY);
|
||||
av_log(h, AV_LOG_DEBUG, "opening %s\n", url);
|
||||
ret = ffurl_open(&s->seg_hd, url, AVIO_FLAG_READ);
|
||||
if (ret < 0) {
|
||||
if (url_interrupt_cb())
|
||||
return AVERROR_EXIT;
|
||||
av_log(NULL, AV_LOG_WARNING, "Unable to open %s\n", url);
|
||||
av_log(h, AV_LOG_WARNING, "Unable to open %s\n", url);
|
||||
s->cur_seq_no++;
|
||||
goto retry;
|
||||
}
|
||||
|
@@ -45,6 +45,8 @@ typedef struct {
|
||||
|
||||
uint16_t stream_language_index;
|
||||
|
||||
int palette_changed;
|
||||
uint32_t palette[256];
|
||||
} ASFStream;
|
||||
|
||||
typedef struct {
|
||||
|
@@ -361,15 +361,14 @@ static int asf_read_stream_properties(AVFormatContext *s, int64_t size)
|
||||
/* This is true for all paletted codecs implemented in ffmpeg */
|
||||
if (st->codec->extradata_size && (st->codec->bits_per_coded_sample <= 8)) {
|
||||
int av_unused i;
|
||||
st->codec->palctrl = av_mallocz(sizeof(AVPaletteControl));
|
||||
#if HAVE_BIGENDIAN
|
||||
for (i = 0; i < FFMIN(st->codec->extradata_size, AVPALETTE_SIZE)/4; i++)
|
||||
st->codec->palctrl->palette[i] = av_bswap32(((uint32_t*)st->codec->extradata)[i]);
|
||||
asf_st->palette[i] = av_bswap32(((uint32_t*)st->codec->extradata)[i]);
|
||||
#else
|
||||
memcpy(st->codec->palctrl->palette, st->codec->extradata,
|
||||
FFMIN(st->codec->extradata_size, AVPALETTE_SIZE));
|
||||
memcpy(asf_st->palette, st->codec->extradata,
|
||||
FFMIN(st->codec->extradata_size, AVPALETTE_SIZE));
|
||||
#endif
|
||||
st->codec->palctrl->palette_changed = 1;
|
||||
asf_st->palette_changed = 1;
|
||||
}
|
||||
|
||||
st->codec->codec_tag = tag1;
|
||||
@@ -982,6 +981,17 @@ static int ff_asf_parse_packet(AVFormatContext *s, AVIOContext *pb, AVPacket *pk
|
||||
asf_st->pkt.stream_index = asf->stream_index;
|
||||
asf_st->pkt.pos =
|
||||
asf_st->packet_pos= asf->packet_pos;
|
||||
if (asf_st->pkt.data && asf_st->palette_changed) {
|
||||
uint8_t *pal;
|
||||
pal = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE,
|
||||
AVPALETTE_SIZE);
|
||||
if (!pal) {
|
||||
av_log(s, AV_LOG_ERROR, "Cannot append palette to packet\n");
|
||||
} else {
|
||||
memcpy(pal, asf_st->palette, AVPALETTE_SIZE);
|
||||
asf_st->palette_changed = 0;
|
||||
}
|
||||
}
|
||||
//printf("new packet: stream:%d key:%d packet_key:%d audio:%d size:%d\n",
|
||||
//asf->stream_index, asf->packet_key_frame, asf_st->pkt.flags & AV_PKT_FLAG_KEY,
|
||||
//s->streams[asf->stream_index]->codec->codec_type == AVMEDIA_TYPE_AUDIO, asf->packet_obj_size);
|
||||
@@ -1138,14 +1148,8 @@ static void asf_reset_header(AVFormatContext *s)
|
||||
|
||||
static int asf_read_close(AVFormatContext *s)
|
||||
{
|
||||
int i;
|
||||
asf_reset_header(s);
|
||||
|
||||
for(i=0;i<s->nb_streams;i++) {
|
||||
AVStream *st = s->streams[i];
|
||||
av_free(st->codec->palctrl);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -144,20 +144,6 @@ typedef struct AVMetadataConv AVMetadataConv;
|
||||
attribute_deprecated AVDictionaryEntry *
|
||||
av_metadata_get(AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags);
|
||||
|
||||
#if FF_API_OLD_METADATA
|
||||
/**
|
||||
* Set the given tag in *pm, overwriting an existing tag.
|
||||
*
|
||||
* @param pm pointer to a pointer to a metadata struct. If *pm is NULL
|
||||
* a metadata struct is allocated and put in *pm.
|
||||
* @param key tag key to add to *pm (will be av_strduped)
|
||||
* @param value tag value to add to *pm (will be av_strduped)
|
||||
* @return >= 0 on success otherwise an error code <0
|
||||
* @deprecated Use av_metadata_set2() instead.
|
||||
*/
|
||||
attribute_deprecated int av_metadata_set(AVMetadata **pm, const char *key, const char *value);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Set the given tag in *pm, overwriting an existing tag.
|
||||
*
|
||||
@@ -270,10 +256,6 @@ typedef struct AVFormatParameters {
|
||||
immediately (RTSP only). */
|
||||
attribute_deprecated unsigned int prealloced_context:1;
|
||||
#endif
|
||||
#if FF_API_PARAMETERS_CODEC_ID
|
||||
attribute_deprecated enum CodecID video_codec_id;
|
||||
attribute_deprecated enum CodecID audio_codec_id;
|
||||
#endif
|
||||
} AVFormatParameters;
|
||||
|
||||
//! Demuxer will use avio_open, no opened file should be provided by the caller.
|
||||
@@ -566,10 +548,6 @@ typedef struct AVStream {
|
||||
*/
|
||||
int64_t duration;
|
||||
|
||||
#if FF_API_OLD_METADATA
|
||||
attribute_deprecated char language[4]; /**< ISO 639-2/B 3-letter language code (empty string if undefined) */
|
||||
#endif
|
||||
|
||||
/* av_read_frame() support */
|
||||
enum AVStreamParseType need_parsing;
|
||||
struct AVCodecParserContext *parser;
|
||||
@@ -585,14 +563,6 @@ typedef struct AVStream {
|
||||
|
||||
int64_t nb_frames; ///< number of frames in this stream if known or 0
|
||||
|
||||
#if FF_API_LAVF_UNUSED
|
||||
attribute_deprecated int64_t unused[4+1];
|
||||
#endif
|
||||
|
||||
#if FF_API_OLD_METADATA
|
||||
attribute_deprecated char *filename; /**< source filename of the stream */
|
||||
#endif
|
||||
|
||||
int disposition; /**< AV_DISPOSITION_* bit field */
|
||||
|
||||
AVProbeData probe_data;
|
||||
@@ -683,10 +653,6 @@ typedef struct AVStream {
|
||||
*/
|
||||
typedef struct AVProgram {
|
||||
int id;
|
||||
#if FF_API_OLD_METADATA
|
||||
attribute_deprecated char *provider_name; ///< network name for DVB streams
|
||||
attribute_deprecated char *name; ///< service name for DVB streams
|
||||
#endif
|
||||
int flags;
|
||||
enum AVDiscard discard; ///< selects which program to discard and which to feed to the caller
|
||||
unsigned int *stream_index;
|
||||
@@ -705,16 +671,9 @@ typedef struct AVChapter {
|
||||
int id; ///< unique ID to identify the chapter
|
||||
AVRational time_base; ///< time base in which the start/end timestamps are specified
|
||||
int64_t start, end; ///< chapter start/end time in time_base units
|
||||
#if FF_API_OLD_METADATA
|
||||
attribute_deprecated char *title; ///< chapter title
|
||||
#endif
|
||||
AVDictionary *metadata;
|
||||
} AVChapter;
|
||||
|
||||
#if FF_API_MAX_STREAMS
|
||||
#define MAX_STREAMS 20
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Format I/O context.
|
||||
* New fields can be added to the end with minor version bumps.
|
||||
@@ -730,24 +689,10 @@ typedef struct AVFormatContext {
|
||||
void *priv_data;
|
||||
AVIOContext *pb;
|
||||
unsigned int nb_streams;
|
||||
#if FF_API_MAX_STREAMS
|
||||
AVStream *streams[MAX_STREAMS];
|
||||
#else
|
||||
AVStream **streams;
|
||||
#endif
|
||||
char filename[1024]; /**< input or output filename */
|
||||
/* stream info */
|
||||
int64_t timestamp;
|
||||
#if FF_API_OLD_METADATA
|
||||
attribute_deprecated char title[512];
|
||||
attribute_deprecated char author[512];
|
||||
attribute_deprecated char copyright[512];
|
||||
attribute_deprecated char comment[512];
|
||||
attribute_deprecated char album[512];
|
||||
attribute_deprecated int year; /**< ID3 year, 0 if none */
|
||||
attribute_deprecated int track; /**< track number, 0 if none */
|
||||
attribute_deprecated char genre[32]; /**< ID3 genre */
|
||||
#endif
|
||||
|
||||
int ctx_flags; /**< Format-specific flags, see AVFMTCTX_xx */
|
||||
/* private data for pts handling (do not modify directly). */
|
||||
@@ -787,17 +732,9 @@ typedef struct AVFormatContext {
|
||||
|
||||
/* av_read_frame() support */
|
||||
AVStream *cur_st;
|
||||
#if FF_API_LAVF_UNUSED
|
||||
const uint8_t *cur_ptr_deprecated;
|
||||
int cur_len_deprecated;
|
||||
AVPacket cur_pkt_deprecated;
|
||||
#endif
|
||||
|
||||
/* av_seek_frame() support */
|
||||
int64_t data_offset; /**< offset of the first packet */
|
||||
#if FF_API_INDEX_BUILT
|
||||
attribute_deprecated int index_built;
|
||||
#endif
|
||||
|
||||
int mux_rate;
|
||||
unsigned int packet_size;
|
||||
@@ -937,11 +874,6 @@ typedef struct AVPacketList {
|
||||
struct AVPacketList *next;
|
||||
} AVPacketList;
|
||||
|
||||
#if FF_API_FIRST_FORMAT
|
||||
attribute_deprecated extern AVInputFormat *first_iformat;
|
||||
attribute_deprecated extern AVOutputFormat *first_oformat;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* If f is NULL, returns the first registered input format,
|
||||
* if f is non-NULL, returns the next registered input format after f
|
||||
@@ -966,18 +898,6 @@ attribute_deprecated enum CodecID av_guess_image2_codec(const char *filename);
|
||||
/* utils.c */
|
||||
void av_register_input_format(AVInputFormat *format);
|
||||
void av_register_output_format(AVOutputFormat *format);
|
||||
#if FF_API_GUESS_FORMAT
|
||||
attribute_deprecated AVOutputFormat *guess_stream_format(const char *short_name,
|
||||
const char *filename,
|
||||
const char *mime_type);
|
||||
|
||||
/**
|
||||
* @deprecated Use av_guess_format() instead.
|
||||
*/
|
||||
attribute_deprecated AVOutputFormat *guess_format(const char *short_name,
|
||||
const char *filename,
|
||||
const char *mime_type);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Return the output format in the list of registered output formats
|
||||
@@ -1194,12 +1114,6 @@ attribute_deprecated int av_open_input_file(AVFormatContext **ic_ptr, const char
|
||||
*/
|
||||
int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options);
|
||||
|
||||
#if FF_API_ALLOC_FORMAT_CONTEXT
|
||||
/**
|
||||
* @deprecated Use avformat_alloc_context() instead.
|
||||
*/
|
||||
attribute_deprecated AVFormatContext *av_alloc_format_context(void);
|
||||
#endif
|
||||
int av_demuxer_open(AVFormatContext *ic, AVFormatParameters *ap);
|
||||
|
||||
/**
|
||||
@@ -1653,22 +1567,6 @@ void av_dump_format(AVFormatContext *ic,
|
||||
const char *url,
|
||||
int is_output);
|
||||
|
||||
#if FF_API_PARSE_FRAME_PARAM
|
||||
/**
|
||||
* Parse width and height out of string str.
|
||||
* @deprecated Use av_parse_video_frame_size instead.
|
||||
*/
|
||||
attribute_deprecated int parse_image_size(int *width_ptr, int *height_ptr,
|
||||
const char *str);
|
||||
|
||||
/**
|
||||
* Convert framerate from a string to a fraction.
|
||||
* @deprecated Use av_parse_video_frame_rate instead.
|
||||
*/
|
||||
attribute_deprecated int parse_frame_rate(int *frame_rate, int *frame_rate_base,
|
||||
const char *arg);
|
||||
#endif
|
||||
|
||||
#if FF_API_PARSE_DATE
|
||||
/**
|
||||
* Parse datestr and return a corresponding number of microseconds.
|
||||
|
@@ -611,15 +611,18 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
|
||||
/* This code assumes that extradata contains only palette. */
|
||||
/* This is true for all paletted codecs implemented in FFmpeg. */
|
||||
if (st->codec->extradata_size && (st->codec->bits_per_coded_sample <= 8)) {
|
||||
st->codec->palctrl = av_mallocz(sizeof(AVPaletteControl));
|
||||
int pal_size = (1 << st->codec->bits_per_coded_sample) << 2;
|
||||
const uint8_t *pal_src;
|
||||
|
||||
pal_size = FFMIN(pal_size, st->codec->extradata_size);
|
||||
pal_src = st->codec->extradata + st->codec->extradata_size - pal_size;
|
||||
#if HAVE_BIGENDIAN
|
||||
for (i = 0; i < FFMIN(st->codec->extradata_size, AVPALETTE_SIZE)/4; i++)
|
||||
st->codec->palctrl->palette[i] = av_bswap32(((uint32_t*)st->codec->extradata)[i]);
|
||||
for (i = 0; i < pal_size/4; i++)
|
||||
ast->pal[i] = AV_RL32(pal_src+4*i);
|
||||
#else
|
||||
memcpy(st->codec->palctrl->palette, st->codec->extradata,
|
||||
FFMIN(st->codec->extradata_size, AVPALETTE_SIZE));
|
||||
memcpy(ast->pal, pal_src, pal_size);
|
||||
#endif
|
||||
st->codec->palctrl->palette_changed = 1;
|
||||
ast->has_pal = 1;
|
||||
}
|
||||
|
||||
print_tag("video", tag1, 0);
|
||||
@@ -961,14 +964,14 @@ resync:
|
||||
return err;
|
||||
|
||||
if(ast->has_pal && pkt->data && pkt->size<(unsigned)INT_MAX/2){
|
||||
void *ptr= av_realloc(pkt->data, pkt->size + 4*256 + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
if(ptr){
|
||||
ast->has_pal=0;
|
||||
pkt->size += 4*256;
|
||||
pkt->data= ptr;
|
||||
memcpy(pkt->data + pkt->size - 4*256, ast->pal, 4*256);
|
||||
}else
|
||||
av_log(s, AV_LOG_ERROR, "Failed to append palette\n");
|
||||
uint8_t *pal;
|
||||
pal = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE, AVPALETTE_SIZE);
|
||||
if(!pal){
|
||||
av_log(s, AV_LOG_ERROR, "Failed to allocate data for palette\n");
|
||||
}else{
|
||||
memcpy(pal, ast->pal, AVPALETTE_SIZE);
|
||||
ast->has_pal = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (CONFIG_DV_DEMUXER && avi->dv_demux) {
|
||||
@@ -1398,7 +1401,6 @@ static int avi_read_close(AVFormatContext *s)
|
||||
for(i=0;i<s->nb_streams;i++) {
|
||||
AVStream *st = s->streams[i];
|
||||
AVIStream *ast = st->priv_data;
|
||||
av_free(st->codec->palctrl);
|
||||
if (ast) {
|
||||
if (ast->sub_ctx) {
|
||||
av_freep(&ast->sub_ctx->pb);
|
||||
|
@@ -30,7 +30,6 @@
|
||||
#endif
|
||||
#include "url.h"
|
||||
|
||||
#if FF_API_URL_CLASS
|
||||
/** @name Logging context. */
|
||||
/*@{*/
|
||||
static const char *urlcontext_to_name(void *ptr)
|
||||
@@ -47,7 +46,6 @@ static const AVClass urlcontext_class = {
|
||||
.version = LIBAVUTIL_VERSION_INT,
|
||||
};
|
||||
/*@}*/
|
||||
#endif
|
||||
|
||||
static int default_interrupt_cb(void);
|
||||
|
||||
@@ -85,29 +83,6 @@ int ffurl_register_protocol(URLProtocol *protocol, int size)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if FF_API_REGISTER_PROTOCOL
|
||||
/* The layout of URLProtocol as of when major was bumped to 52 */
|
||||
struct URLProtocol_compat {
|
||||
const char *name;
|
||||
int (*url_open)(URLContext *h, const char *filename, int flags);
|
||||
int (*url_read)(URLContext *h, unsigned char *buf, int size);
|
||||
int (*url_write)(URLContext *h, unsigned char *buf, int size);
|
||||
int64_t (*url_seek)(URLContext *h, int64_t pos, int whence);
|
||||
int (*url_close)(URLContext *h);
|
||||
struct URLProtocol *next;
|
||||
};
|
||||
|
||||
int av_register_protocol(URLProtocol *protocol)
|
||||
{
|
||||
return ffurl_register_protocol(protocol, sizeof(struct URLProtocol_compat));
|
||||
}
|
||||
|
||||
int register_protocol(URLProtocol *protocol)
|
||||
{
|
||||
return ffurl_register_protocol(protocol, sizeof(struct URLProtocol_compat));
|
||||
}
|
||||
#endif
|
||||
|
||||
static int url_alloc_for_protocol (URLContext **puc, struct URLProtocol *up,
|
||||
const char *filename, int flags)
|
||||
{
|
||||
@@ -123,9 +98,7 @@ static int url_alloc_for_protocol (URLContext **puc, struct URLProtocol *up,
|
||||
err = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
#if FF_API_URL_CLASS
|
||||
uc->av_class = &urlcontext_class;
|
||||
#endif
|
||||
uc->filename = (char *) &uc[1];
|
||||
strcpy(uc->filename, filename);
|
||||
uc->prot = up;
|
||||
@@ -157,7 +130,7 @@ int ffurl_connect(URLContext* uc)
|
||||
return err;
|
||||
uc->is_connected = 1;
|
||||
//We must be careful here as ffurl_seek() could be slow, for example for http
|
||||
if( (uc->flags & (AVIO_WRONLY | AVIO_RDWR))
|
||||
if( (uc->flags & AVIO_FLAG_WRITE)
|
||||
|| !strcmp(uc->prot->name, "file"))
|
||||
if(!uc->is_streamed && ffurl_seek(uc, 0, SEEK_SET) < 0)
|
||||
uc->is_streamed= 1;
|
||||
@@ -317,21 +290,21 @@ static inline int retry_transfer_wrapper(URLContext *h, unsigned char *buf, int
|
||||
|
||||
int ffurl_read(URLContext *h, unsigned char *buf, int size)
|
||||
{
|
||||
if (h->flags & AVIO_WRONLY)
|
||||
if (!(h->flags & AVIO_FLAG_READ))
|
||||
return AVERROR(EIO);
|
||||
return retry_transfer_wrapper(h, buf, size, 1, h->prot->url_read);
|
||||
}
|
||||
|
||||
int ffurl_read_complete(URLContext *h, unsigned char *buf, int size)
|
||||
{
|
||||
if (h->flags & AVIO_WRONLY)
|
||||
if (!(h->flags & AVIO_FLAG_READ))
|
||||
return AVERROR(EIO);
|
||||
return retry_transfer_wrapper(h, buf, size, size, h->prot->url_read);
|
||||
}
|
||||
|
||||
int ffurl_write(URLContext *h, const unsigned char *buf, int size)
|
||||
{
|
||||
if (!(h->flags & (AVIO_WRONLY | AVIO_RDWR)))
|
||||
if (!(h->flags & AVIO_FLAG_WRITE))
|
||||
return AVERROR(EIO);
|
||||
/* avoid sending too big packets */
|
||||
if (h->max_packet_size && size > h->max_packet_size)
|
||||
@@ -370,7 +343,7 @@ int ffurl_close(URLContext *h)
|
||||
int url_exist(const char *filename)
|
||||
{
|
||||
URLContext *h;
|
||||
if (ffurl_open(&h, filename, AVIO_RDONLY) < 0)
|
||||
if (ffurl_open(&h, filename, AVIO_FLAG_READ) < 0)
|
||||
return 0;
|
||||
ffurl_close(h);
|
||||
return 1;
|
||||
|
@@ -101,9 +101,7 @@ typedef struct {
|
||||
* @deprecated This struct will be made private
|
||||
*/
|
||||
typedef struct URLContext {
|
||||
#if FF_API_URL_CLASS
|
||||
const AVClass *av_class; ///< information for av_log(). Set by url_open().
|
||||
#endif
|
||||
struct URLProtocol *prot;
|
||||
int flags;
|
||||
int is_streamed; /**< true if streamed (no seek possible), default = false */
|
||||
@@ -152,9 +150,9 @@ attribute_deprecated int url_poll(URLPollEntry *poll_table, int n, int timeout);
|
||||
* constants, optionally ORed with other flags.
|
||||
* @{
|
||||
*/
|
||||
#define URL_RDONLY 0 /**< read-only */
|
||||
#define URL_WRONLY 1 /**< write-only */
|
||||
#define URL_RDWR 2 /**< read-write */
|
||||
#define URL_RDONLY 1 /**< read-only */
|
||||
#define URL_WRONLY 2 /**< write-only */
|
||||
#define URL_RDWR (URL_RDONLY|URL_WRONLY) /**< read-write */
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
@@ -346,7 +344,7 @@ attribute_deprecated int url_exist(const char *url);
|
||||
#endif // FF_API_OLD_AVIO
|
||||
|
||||
/**
|
||||
* Return AVIO_* access flags corresponding to the access permissions
|
||||
* Return AVIO_FLAG_* access flags corresponding to the access permissions
|
||||
* of the resource in url, or a negative value corresponding to an
|
||||
* AVERROR code in case of failure. The returned access flags are
|
||||
* masked by the value in flags.
|
||||
@@ -356,9 +354,6 @@ attribute_deprecated int url_exist(const char *url);
|
||||
* one call to another. Thus you should not trust the returned value,
|
||||
* unless you are sure that no other processes are accessing the
|
||||
* checked resource.
|
||||
*
|
||||
* @note This function is slightly broken until next major bump
|
||||
* because of AVIO_RDONLY == 0. Don't use it until then.
|
||||
*/
|
||||
int avio_check(const char *url, int flags);
|
||||
|
||||
@@ -370,22 +365,6 @@ int avio_check(const char *url, int flags);
|
||||
*/
|
||||
void avio_set_interrupt_cb(int (*interrupt_cb)(void));
|
||||
|
||||
#if FF_API_REGISTER_PROTOCOL
|
||||
extern URLProtocol *first_protocol;
|
||||
#endif
|
||||
|
||||
#if FF_API_REGISTER_PROTOCOL
|
||||
/**
|
||||
* @deprecated Use av_register_protocol() instead.
|
||||
*/
|
||||
attribute_deprecated int register_protocol(URLProtocol *protocol);
|
||||
|
||||
/**
|
||||
* @deprecated Use av_register_protocol2() instead.
|
||||
*/
|
||||
attribute_deprecated int av_register_protocol(URLProtocol *protocol);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Allocate and initialize an AVIOContext for buffered I/O. It must be later
|
||||
* freed with av_free().
|
||||
@@ -543,29 +522,15 @@ int avio_get_str16le(AVIOContext *pb, int maxlen, char *buf, int buflen);
|
||||
int avio_get_str16be(AVIOContext *pb, int maxlen, char *buf, int buflen);
|
||||
|
||||
|
||||
#if FF_API_URL_RESETBUF
|
||||
/** Reset the buffer for reading or writing.
|
||||
* @note Will drop any data currently in the buffer without transmitting it.
|
||||
* @param flags URL_RDONLY to set up the buffer for reading, or URL_WRONLY
|
||||
* to set up the buffer for writing. */
|
||||
int url_resetbuf(AVIOContext *s, int flags);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @name URL open modes
|
||||
* The flags argument to avio_open must be one of the following
|
||||
* constants, optionally ORed with other flags.
|
||||
* @{
|
||||
*/
|
||||
#if LIBAVFORMAT_VERSION_MAJOR < 53
|
||||
#define AVIO_RDONLY 0 /**< read-only */
|
||||
#define AVIO_WRONLY 1 /**< write-only */
|
||||
#define AVIO_RDWR 2 /**< read-write */
|
||||
#else
|
||||
#define AVIO_RDONLY 1 /**< read-only */
|
||||
#define AVIO_WRONLY 2 /**< write-only */
|
||||
#define AVIO_RDWR 4 /**< read-write */
|
||||
#endif
|
||||
#define AVIO_FLAG_READ 1 /**< read-only */
|
||||
#define AVIO_FLAG_WRITE 2 /**< write-only */
|
||||
#define AVIO_FLAG_READ_WRITE (AVIO_FLAG_READ|AVIO_FLAG_WRITE) /**< read-write pseudo flag */
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
@@ -582,11 +547,7 @@ int url_resetbuf(AVIOContext *s, int flags);
|
||||
* Warning: non-blocking protocols is work-in-progress; this flag may be
|
||||
* silently ignored.
|
||||
*/
|
||||
#if LIBAVFORMAT_VERSION_MAJOR < 53
|
||||
#define AVIO_FLAG_NONBLOCK 4
|
||||
#else
|
||||
#define AVIO_FLAG_NONBLOCK 8
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Create and initialize a AVIOContext for accessing the
|
||||
@@ -630,10 +591,6 @@ int avio_open_dyn_buf(AVIOContext **s);
|
||||
*/
|
||||
int avio_close_dyn_buf(AVIOContext *s, uint8_t **pbuffer);
|
||||
|
||||
#if FF_API_UDP_GET_FILE
|
||||
int udp_get_file_handle(URLContext *h);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Iterate through names of available protocols.
|
||||
* @note it is recommanded to use av_protocol_next() instead of this
|
||||
|
@@ -38,9 +38,7 @@
|
||||
#define SHORT_SEEK_THRESHOLD 4096
|
||||
|
||||
static void fill_buffer(AVIOContext *s);
|
||||
#if !FF_API_URL_RESETBUF
|
||||
static int url_resetbuf(AVIOContext *s, int flags);
|
||||
#endif
|
||||
|
||||
int ffio_init_context(AVIOContext *s,
|
||||
unsigned char *buffer,
|
||||
@@ -55,7 +53,7 @@ int ffio_init_context(AVIOContext *s,
|
||||
s->buffer_size = buffer_size;
|
||||
s->buf_ptr = buffer;
|
||||
s->opaque = opaque;
|
||||
url_resetbuf(s, write_flag ? AVIO_WRONLY : AVIO_RDONLY);
|
||||
url_resetbuf(s, write_flag ? AVIO_FLAG_WRITE : AVIO_FLAG_READ);
|
||||
s->write_packet = write_packet;
|
||||
s->read_packet = read_packet;
|
||||
s->seek = seek;
|
||||
@@ -859,7 +857,7 @@ int ffio_fdopen(AVIOContext **s, URLContext *h)
|
||||
}
|
||||
|
||||
if (ffio_init_context(*s, buffer, buffer_size,
|
||||
(h->flags & AVIO_WRONLY || h->flags & AVIO_RDWR), h,
|
||||
h->flags & AVIO_FLAG_WRITE, h,
|
||||
(void*)ffurl_read, (void*)ffurl_write, (void*)ffurl_seek) < 0) {
|
||||
av_free(buffer);
|
||||
av_freep(s);
|
||||
@@ -888,24 +886,15 @@ int ffio_set_buf_size(AVIOContext *s, int buf_size)
|
||||
s->buffer = buffer;
|
||||
s->buffer_size = buf_size;
|
||||
s->buf_ptr = buffer;
|
||||
url_resetbuf(s, s->write_flag ? AVIO_WRONLY : AVIO_RDONLY);
|
||||
url_resetbuf(s, s->write_flag ? AVIO_FLAG_WRITE : AVIO_FLAG_READ);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if FF_API_URL_RESETBUF
|
||||
int url_resetbuf(AVIOContext *s, int flags)
|
||||
#else
|
||||
static int url_resetbuf(AVIOContext *s, int flags)
|
||||
#endif
|
||||
{
|
||||
#if FF_API_URL_RESETBUF
|
||||
if (flags & AVIO_RDWR)
|
||||
return AVERROR(EINVAL);
|
||||
#else
|
||||
assert(flags == AVIO_WRONLY || flags == AVIO_RDONLY);
|
||||
#endif
|
||||
assert(flags == AVIO_FLAG_WRITE || flags == AVIO_FLAG_READ);
|
||||
|
||||
if (flags & AVIO_WRONLY) {
|
||||
if (flags & AVIO_FLAG_WRITE) {
|
||||
s->buf_end = s->buffer + s->buffer_size;
|
||||
s->write_flag = 1;
|
||||
} else {
|
||||
@@ -1063,7 +1052,7 @@ int url_open_buf(AVIOContext **s, uint8_t *buf, int buf_size, int flags)
|
||||
if(!*s)
|
||||
return AVERROR(ENOMEM);
|
||||
ret = ffio_init_context(*s, buf, buf_size,
|
||||
(flags & AVIO_WRONLY || flags & AVIO_RDWR),
|
||||
flags & AVIO_FLAG_WRITE,
|
||||
NULL, NULL, NULL, NULL);
|
||||
if(ret != 0)
|
||||
av_freep(s);
|
||||
|
@@ -76,12 +76,12 @@ static int crypto_open(URLContext *h, const char *uri, int flags)
|
||||
ret = AVERROR(EINVAL);
|
||||
goto err;
|
||||
}
|
||||
if (flags == AVIO_WRONLY) {
|
||||
if (flags & AVIO_FLAG_WRITE) {
|
||||
av_log(h, AV_LOG_ERROR, "Only decryption is supported currently\n");
|
||||
ret = AVERROR(ENOSYS);
|
||||
goto err;
|
||||
}
|
||||
if ((ret = ffurl_open(&c->hd, nested_url, AVIO_RDONLY)) < 0) {
|
||||
if ((ret = ffurl_open(&c->hd, nested_url, AVIO_FLAG_READ)) < 0) {
|
||||
av_log(h, AV_LOG_ERROR, "Unable to open input\n");
|
||||
goto err;
|
||||
}
|
||||
|
@@ -58,9 +58,8 @@ static int file_check(URLContext *h, int mask)
|
||||
if (ret < 0)
|
||||
return AVERROR(errno);
|
||||
|
||||
ret |= st.st_mode&S_IRUSR ? mask&AVIO_RDONLY : 0;
|
||||
ret |= st.st_mode&S_IWUSR ? mask&AVIO_WRONLY : 0;
|
||||
ret |= st.st_mode&S_IWUSR && st.st_mode&S_IRUSR ? mask&AVIO_RDWR : 0;
|
||||
ret |= st.st_mode&S_IRUSR ? mask&AVIO_FLAG_READ : 0;
|
||||
ret |= st.st_mode&S_IWUSR ? mask&AVIO_FLAG_WRITE : 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -74,9 +73,9 @@ static int file_open(URLContext *h, const char *filename, int flags)
|
||||
|
||||
av_strstart(filename, "file:", &filename);
|
||||
|
||||
if (flags & AVIO_RDWR) {
|
||||
if (flags & AVIO_FLAG_WRITE && flags & AVIO_FLAG_READ) {
|
||||
access = O_CREAT | O_TRUNC | O_RDWR;
|
||||
} else if (flags & AVIO_WRONLY) {
|
||||
} else if (flags & AVIO_FLAG_WRITE) {
|
||||
access = O_CREAT | O_TRUNC | O_WRONLY;
|
||||
} else {
|
||||
access = O_RDONLY;
|
||||
@@ -132,7 +131,7 @@ static int pipe_open(URLContext *h, const char *filename, int flags)
|
||||
|
||||
fd = strtol(filename, &final, 10);
|
||||
if((filename == final) || *final ) {/* No digits found, or something like 10ab */
|
||||
if (flags & AVIO_WRONLY) {
|
||||
if (flags & AVIO_FLAG_WRITE) {
|
||||
fd = 1;
|
||||
} else {
|
||||
fd = 0;
|
||||
|
@@ -116,11 +116,9 @@ static int flac_read_header(AVFormatContext *s,
|
||||
|
||||
static int flac_probe(AVProbeData *p)
|
||||
{
|
||||
uint8_t *bufptr = p->buf;
|
||||
uint8_t *end = p->buf + p->buf_size;
|
||||
|
||||
if(bufptr > end-4 || memcmp(bufptr, "fLaC", 4)) return 0;
|
||||
else return AVPROBE_SCORE_MAX/2;
|
||||
if (p->buf_size < 4 || memcmp(p->buf, "fLaC", 4))
|
||||
return 0;
|
||||
return AVPROBE_SCORE_MAX/2;
|
||||
}
|
||||
|
||||
AVInputFormat ff_flac_demuxer = {
|
||||
|
@@ -50,7 +50,7 @@ static int gopher_connect(URLContext *h, const char *path)
|
||||
if (!path) return AVERROR(EINVAL);
|
||||
break;
|
||||
default:
|
||||
av_log(NULL, AV_LOG_WARNING,
|
||||
av_log(h, AV_LOG_WARNING,
|
||||
"Gopher protocol type '%c' not supported yet!\n",
|
||||
*path);
|
||||
return AVERROR(EINVAL);
|
||||
@@ -100,7 +100,7 @@ static int gopher_open(URLContext *h, const char *uri, int flags)
|
||||
ff_url_join(buf, sizeof(buf), "tcp", NULL, hostname, port, NULL);
|
||||
|
||||
s->hd = NULL;
|
||||
err = ffurl_open(&s->hd, buf, AVIO_RDWR);
|
||||
err = ffurl_open(&s->hd, buf, AVIO_FLAG_READ_WRITE);
|
||||
if (err < 0)
|
||||
goto fail;
|
||||
|
||||
|
@@ -73,7 +73,7 @@ void ff_http_set_headers(URLContext *h, const char *headers)
|
||||
int len = strlen(headers);
|
||||
|
||||
if (len && strcmp("\r\n", headers + len - 2))
|
||||
av_log(NULL, AV_LOG_ERROR, "No trailing CRLF found in HTTP header.\n");
|
||||
av_log(h, AV_LOG_ERROR, "No trailing CRLF found in HTTP header.\n");
|
||||
|
||||
av_strlcpy(s->headers, headers, sizeof(s->headers));
|
||||
}
|
||||
@@ -127,7 +127,7 @@ static int http_open_cnx(URLContext *h)
|
||||
port = 80;
|
||||
|
||||
ff_url_join(buf, sizeof(buf), "tcp", NULL, hostname, port, NULL);
|
||||
err = ffurl_open(&hd, buf, AVIO_RDWR);
|
||||
err = ffurl_open(&hd, buf, AVIO_FLAG_READ_WRITE);
|
||||
if (err < 0)
|
||||
goto fail;
|
||||
|
||||
@@ -235,7 +235,7 @@ static int process_line(URLContext *h, char *line, int line_count,
|
||||
* don't abort until all headers have been parsed. */
|
||||
if (s->http_code >= 400 && s->http_code < 600 && s->http_code != 401) {
|
||||
end += strspn(end, SPACE_CHARS);
|
||||
av_log(NULL, AV_LOG_WARNING, "HTTP error %d %s\n",
|
||||
av_log(h, AV_LOG_WARNING, "HTTP error %d %s\n",
|
||||
s->http_code, end);
|
||||
return -1;
|
||||
}
|
||||
@@ -301,7 +301,7 @@ static int http_connect(URLContext *h, const char *path, const char *hoststr,
|
||||
|
||||
|
||||
/* send http header */
|
||||
post = h->flags & AVIO_WRONLY;
|
||||
post = h->flags & AVIO_FLAG_WRITE;
|
||||
authstr = ff_http_auth_create_response(&s->auth_state, auth, path,
|
||||
post ? "POST" : "GET");
|
||||
|
||||
@@ -456,7 +456,7 @@ static int http_close(URLContext *h)
|
||||
HTTPContext *s = h->priv_data;
|
||||
|
||||
/* signal end of chunked encoding if used */
|
||||
if ((h->flags & AVIO_WRONLY) && s->chunksize != -1) {
|
||||
if ((h->flags & AVIO_FLAG_WRITE) && s->chunksize != -1) {
|
||||
ret = ffurl_write(s->hd, footer, sizeof(footer) - 1);
|
||||
ret = ret > 0 ? 0 : ret;
|
||||
}
|
||||
|
@@ -86,13 +86,11 @@ typedef struct IdcinDemuxContext {
|
||||
int audio_present;
|
||||
|
||||
int64_t pts;
|
||||
|
||||
AVPaletteControl palctrl;
|
||||
} IdcinDemuxContext;
|
||||
|
||||
static int idcin_probe(AVProbeData *p)
|
||||
{
|
||||
unsigned int number;
|
||||
unsigned int number, sample_rate;
|
||||
|
||||
/*
|
||||
* This is what you could call a "probabilistic" file check: id CIN
|
||||
@@ -121,18 +119,18 @@ static int idcin_probe(AVProbeData *p)
|
||||
return 0;
|
||||
|
||||
/* check the audio sample rate */
|
||||
number = AV_RL32(&p->buf[8]);
|
||||
if ((number != 0) && ((number < 8000) | (number > 48000)))
|
||||
sample_rate = AV_RL32(&p->buf[8]);
|
||||
if (sample_rate && (sample_rate < 8000 || sample_rate > 48000))
|
||||
return 0;
|
||||
|
||||
/* check the audio bytes/sample */
|
||||
number = AV_RL32(&p->buf[12]);
|
||||
if (number > 2)
|
||||
if (number > 2 || sample_rate && !number)
|
||||
return 0;
|
||||
|
||||
/* check the audio channels */
|
||||
number = AV_RL32(&p->buf[16]);
|
||||
if (number > 2)
|
||||
if (number > 2 || sample_rate && !number)
|
||||
return 0;
|
||||
|
||||
/* return half certainly since this check is a bit sketchy */
|
||||
@@ -172,8 +170,6 @@ static int idcin_read_header(AVFormatContext *s,
|
||||
if (avio_read(pb, st->codec->extradata, HUFFMAN_TABLE_SIZE) !=
|
||||
HUFFMAN_TABLE_SIZE)
|
||||
return AVERROR(EIO);
|
||||
/* save a reference in order to transport the palette */
|
||||
st->codec->palctrl = &idcin->palctrl;
|
||||
|
||||
/* if sample rate is 0, assume no audio */
|
||||
if (sample_rate) {
|
||||
@@ -226,6 +222,7 @@ static int idcin_read_packet(AVFormatContext *s,
|
||||
int palette_scale;
|
||||
unsigned char r, g, b;
|
||||
unsigned char palette_buffer[768];
|
||||
uint32_t palette[256];
|
||||
|
||||
if (url_feof(s->pb))
|
||||
return AVERROR(EIO);
|
||||
@@ -236,7 +233,6 @@ static int idcin_read_packet(AVFormatContext *s,
|
||||
return AVERROR(EIO);
|
||||
} else if (command == 1) {
|
||||
/* trigger a palette change */
|
||||
idcin->palctrl.palette_changed = 1;
|
||||
if (avio_read(pb, palette_buffer, 768) != 768)
|
||||
return AVERROR(EIO);
|
||||
/* scale the palette as necessary */
|
||||
@@ -251,7 +247,7 @@ static int idcin_read_packet(AVFormatContext *s,
|
||||
r = palette_buffer[i * 3 ] << palette_scale;
|
||||
g = palette_buffer[i * 3 + 1] << palette_scale;
|
||||
b = palette_buffer[i * 3 + 2] << palette_scale;
|
||||
idcin->palctrl.palette[i] = (r << 16) | (g << 8) | (b);
|
||||
palette[i] = (r << 16) | (g << 8) | (b);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -262,6 +258,15 @@ static int idcin_read_packet(AVFormatContext *s,
|
||||
ret= av_get_packet(pb, pkt, chunk_size);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (command == 1) {
|
||||
uint8_t *pal;
|
||||
|
||||
pal = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE,
|
||||
AVPALETTE_SIZE);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
memcpy(pal, palette, AVPALETTE_SIZE);
|
||||
}
|
||||
pkt->stream_index = idcin->video_stream_index;
|
||||
pkt->pts = idcin->pts;
|
||||
} else {
|
||||
|
@@ -185,6 +185,11 @@ static int iff_read_header(AVFormatContext *s,
|
||||
break;
|
||||
|
||||
case ID_CMAP:
|
||||
if (data_size < 3 || data_size > 768 || data_size % 3) {
|
||||
av_log(s, AV_LOG_ERROR, "Invalid CMAP chunk size %d\n",
|
||||
data_size);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
st->codec->extradata_size = data_size + IFF_EXTRA_VIDEO_SIZE;
|
||||
st->codec->extradata = av_malloc(data_size + IFF_EXTRA_VIDEO_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
if (!st->codec->extradata)
|
||||
|
@@ -143,11 +143,11 @@ static int find_image_range(int *pfirst_index, int *plast_index,
|
||||
if (av_get_frame_filename(buf, sizeof(buf), path, first_index) < 0){
|
||||
*pfirst_index =
|
||||
*plast_index = 1;
|
||||
if(url_exist(buf))
|
||||
if (avio_check(buf, AVIO_FLAG_READ) > 0)
|
||||
return 0;
|
||||
return -1;
|
||||
}
|
||||
if (url_exist(buf))
|
||||
if (avio_check(buf, AVIO_FLAG_READ) > 0)
|
||||
break;
|
||||
}
|
||||
if (first_index == 5)
|
||||
@@ -165,7 +165,7 @@ static int find_image_range(int *pfirst_index, int *plast_index,
|
||||
if (av_get_frame_filename(buf, sizeof(buf), path,
|
||||
last_index + range1) < 0)
|
||||
goto fail;
|
||||
if (!url_exist(buf))
|
||||
if (avio_check(buf, AVIO_FLAG_READ) <= 0)
|
||||
break;
|
||||
range = range1;
|
||||
/* just in case... */
|
||||
@@ -314,7 +314,7 @@ static int read_packet(AVFormatContext *s1, AVPacket *pkt)
|
||||
s->path, s->img_number)<0 && s->img_number > 1)
|
||||
return AVERROR(EIO);
|
||||
for(i=0; i<3; i++){
|
||||
if (avio_open(&f[i], filename, AVIO_RDONLY) < 0) {
|
||||
if (avio_open(&f[i], filename, AVIO_FLAG_READ) < 0) {
|
||||
if(i==1)
|
||||
break;
|
||||
av_log(s1, AV_LOG_ERROR, "Could not open file : %s\n",filename);
|
||||
@@ -401,7 +401,7 @@ static int write_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
for(i=0; i<3; i++){
|
||||
if (avio_open(&pb[i], filename, AVIO_WRONLY) < 0) {
|
||||
if (avio_open(&pb[i], filename, AVIO_FLAG_WRITE) < 0) {
|
||||
av_log(s, AV_LOG_ERROR, "Could not open file : %s\n",filename);
|
||||
return AVERROR(EIO);
|
||||
}
|
||||
|
@@ -83,18 +83,6 @@ void ff_read_frame_flush(AVFormatContext *s);
|
||||
/** Get the current time since NTP epoch in microseconds. */
|
||||
uint64_t ff_ntp_time(void);
|
||||
|
||||
#if FF_API_URL_SPLIT
|
||||
/**
|
||||
* @deprecated use av_url_split() instead
|
||||
*/
|
||||
void ff_url_split(char *proto, int proto_size,
|
||||
char *authorization, int authorization_size,
|
||||
char *hostname, int hostname_size,
|
||||
int *port_ptr,
|
||||
char *path, int path_size,
|
||||
const char *url);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Assemble a URL string from components. This is the reverse operation
|
||||
* of av_url_split.
|
||||
|
@@ -86,6 +86,8 @@ typedef struct IPMVEContext {
|
||||
unsigned int video_width;
|
||||
unsigned int video_height;
|
||||
int64_t video_pts;
|
||||
uint32_t palette[256];
|
||||
int has_palette;
|
||||
|
||||
unsigned int audio_bits;
|
||||
unsigned int audio_channels;
|
||||
@@ -105,8 +107,6 @@ typedef struct IPMVEContext {
|
||||
|
||||
int64_t next_chunk_offset;
|
||||
|
||||
AVPaletteControl palette_control;
|
||||
|
||||
} IPMVEContext;
|
||||
|
||||
static int load_ipmovie_packet(IPMVEContext *s, AVIOContext *pb,
|
||||
@@ -151,6 +151,17 @@ static int load_ipmovie_packet(IPMVEContext *s, AVIOContext *pb,
|
||||
if (av_new_packet(pkt, s->decode_map_chunk_size + s->video_chunk_size))
|
||||
return CHUNK_NOMEM;
|
||||
|
||||
if (s->has_palette) {
|
||||
uint8_t *pal;
|
||||
|
||||
pal = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE,
|
||||
AVPALETTE_SIZE);
|
||||
if (pal) {
|
||||
memcpy(pal, s->palette, AVPALETTE_SIZE);
|
||||
s->has_palette = 0;
|
||||
}
|
||||
}
|
||||
|
||||
pkt->pos= s->decode_map_chunk_offset;
|
||||
avio_seek(pb, s->decode_map_chunk_offset, SEEK_SET);
|
||||
s->decode_map_chunk_offset = 0;
|
||||
@@ -444,10 +455,9 @@ static int process_ipmovie_chunk(IPMVEContext *s, AVIOContext *pb,
|
||||
r = scratch[j++] * 4;
|
||||
g = scratch[j++] * 4;
|
||||
b = scratch[j++] * 4;
|
||||
s->palette_control.palette[i] = (r << 16) | (g << 8) | (b);
|
||||
s->palette[i] = (r << 16) | (g << 8) | (b);
|
||||
}
|
||||
/* indicate a palette change */
|
||||
s->palette_control.palette_changed = 1;
|
||||
s->has_palette = 1;
|
||||
break;
|
||||
|
||||
case OPCODE_SET_PALETTE_COMPRESSED:
|
||||
@@ -561,9 +571,6 @@ static int ipmovie_read_header(AVFormatContext *s,
|
||||
st->codec->height = ipmovie->video_height;
|
||||
st->codec->bits_per_coded_sample = ipmovie->video_bpp;
|
||||
|
||||
/* palette considerations */
|
||||
st->codec->palctrl = &ipmovie->palette_control;
|
||||
|
||||
if (ipmovie->audio_type) {
|
||||
st = av_new_stream(s, 0);
|
||||
if (!st)
|
||||
|
@@ -123,6 +123,8 @@ typedef struct MOVStreamContext {
|
||||
int width; ///< tkhd width
|
||||
int height; ///< tkhd height
|
||||
int dts_shift; ///< dts shift when ctts is negative
|
||||
uint32_t palette[256];
|
||||
int has_palette;
|
||||
} MOVStreamContext;
|
||||
|
||||
typedef struct MOVContext {
|
||||
|
@@ -94,7 +94,7 @@ static int rtmp_open(URLContext *s, const char *uri, int flags)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (flags & AVIO_WRONLY)
|
||||
if (flags & AVIO_FLAG_WRITE)
|
||||
RTMP_EnableWrite(r);
|
||||
|
||||
if (!RTMP_Connect(r, NULL) || !RTMP_ConnectStream(r, 0)) {
|
||||
|
@@ -1402,7 +1402,7 @@ static int matroska_read_header(AVFormatContext *s, AVFormatParameters *ap)
|
||||
&& track->codec_priv.data != NULL) {
|
||||
int ret;
|
||||
ffio_init_context(&b, track->codec_priv.data, track->codec_priv.size,
|
||||
AVIO_RDONLY, NULL, NULL, NULL, NULL);
|
||||
0, NULL, NULL, NULL, NULL);
|
||||
ret = ff_get_wav_header(&b, st->codec, track->codec_priv.size);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@@ -2028,10 +2028,11 @@ static int matroska_read_seek(AVFormatContext *s, int stream_index,
|
||||
if (tracks[i].type == MATROSKA_TRACK_TYPE_SUBTITLE
|
||||
&& !tracks[i].stream->discard != AVDISCARD_ALL) {
|
||||
index_sub = av_index_search_timestamp(tracks[i].stream, st->index_entries[index].timestamp, AVSEEK_FLAG_BACKWARD);
|
||||
if (index_sub >= 0
|
||||
&& st->index_entries[index_sub].pos < st->index_entries[index_min].pos
|
||||
&& st->index_entries[index].timestamp - st->index_entries[index_sub].timestamp < 30000000000/matroska->time_scale)
|
||||
index_min = index_sub;
|
||||
while(index_sub >= 0
|
||||
&& index_min >= 0
|
||||
&& tracks[i].stream->index_entries[index_sub].pos < st->index_entries[index_min].pos
|
||||
&& st->index_entries[index].timestamp - tracks[i].stream->index_entries[index_sub].timestamp < 30000000000/matroska->time_scale)
|
||||
index_min--;
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -36,7 +36,7 @@ static int md5_open(URLContext *h, const char *filename, int flags)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (flags != AVIO_WRONLY)
|
||||
if (!flags & AVIO_FLAG_WRITE)
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
av_md5_init(h->priv_data);
|
||||
@@ -65,7 +65,7 @@ static int md5_close(URLContext *h)
|
||||
av_strstart(filename, "md5:", &filename);
|
||||
|
||||
if (*filename) {
|
||||
err = ffurl_open(&out, filename, AVIO_WRONLY);
|
||||
err = ffurl_open(&out, filename, AVIO_FLAG_WRITE);
|
||||
if (err)
|
||||
return err;
|
||||
err = ffurl_write(out, buf, i*2+1);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user