Compare commits
94 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
15466db69e | ||
![]() |
27816fb9ef | ||
![]() |
f06d9dced4 | ||
![]() |
26cb351452 | ||
![]() |
762a5878a6 | ||
![]() |
f0af6e705f | ||
![]() |
70b97a89d2 | ||
![]() |
1d1adf5ff4 | ||
![]() |
acfad331ad | ||
![]() |
43f8a422b3 | ||
![]() |
95bd0f3a4b | ||
![]() |
eddf146ada | ||
![]() |
7293372959 | ||
![]() |
2e1226a695 | ||
![]() |
f66d2bf949 | ||
![]() |
4a6ac71742 | ||
![]() |
08337cca05 | ||
![]() |
9c655d2a57 | ||
![]() |
f00f799833 | ||
![]() |
05684cee42 | ||
![]() |
e693af81b7 | ||
![]() |
73ebc4046e | ||
![]() |
1cbd7b08f6 | ||
![]() |
a330aca126 | ||
![]() |
2e7bd0f725 | ||
![]() |
a066b2cedd | ||
![]() |
a18e8d82de | ||
![]() |
441ef87ea8 | ||
![]() |
6e53134f98 | ||
![]() |
237751eb25 | ||
![]() |
264eb0074f | ||
![]() |
7db809a373 | ||
![]() |
0df814cf97 | ||
![]() |
88fa3243dd | ||
![]() |
2a6f2cd848 | ||
![]() |
c001472226 | ||
![]() |
1ec0541ae0 | ||
![]() |
151554e1eb | ||
![]() |
ac91bfe086 | ||
![]() |
bbcf6f5c62 | ||
![]() |
e740506d31 | ||
![]() |
65aac419e5 | ||
![]() |
662714abbe | ||
![]() |
51782e8690 | ||
![]() |
0afb004d3c | ||
![]() |
a9c3b588af | ||
![]() |
f775a92054 | ||
![]() |
cccb06b095 | ||
![]() |
be54d1f104 | ||
![]() |
e84d17c7c9 | ||
![]() |
254fabe758 | ||
![]() |
d1f8eaf3d2 | ||
![]() |
483a02e25f | ||
![]() |
3f06023bd2 | ||
![]() |
ce3a8c983f | ||
![]() |
f5c880cecb | ||
![]() |
2af2c7ecff | ||
![]() |
12aa4220dd | ||
![]() |
e40688ed80 | ||
![]() |
d403242a28 | ||
![]() |
f921a47fae | ||
![]() |
a7fa1c9b2b | ||
![]() |
6ff54eb87b | ||
![]() |
7f2ab5e50f | ||
![]() |
aebb9410c5 | ||
![]() |
459090181f | ||
![]() |
071d7f4e17 | ||
![]() |
620197d1ff | ||
![]() |
73e7fe8e64 | ||
![]() |
973e67e5d2 | ||
![]() |
d89cd16afa | ||
![]() |
7cbceb16ad | ||
![]() |
157dd52700 | ||
![]() |
23614d09d5 | ||
![]() |
bea4894d0c | ||
![]() |
250cf2d4da | ||
![]() |
93d076b4fd | ||
![]() |
8749b83e0b | ||
![]() |
a9b600cf39 | ||
![]() |
c3c8365dbd | ||
![]() |
6432f8826d | ||
![]() |
e04ae11fa0 | ||
![]() |
08fadda68a | ||
![]() |
9473e5a05d | ||
![]() |
8a24ccfee3 | ||
![]() |
3ba55ea4ae | ||
![]() |
265db41540 | ||
![]() |
ed5041143e | ||
![]() |
965f96c5ed | ||
![]() |
3b6aeb148b | ||
![]() |
259bb2555b | ||
![]() |
665b67014f | ||
![]() |
1051c152f9 | ||
![]() |
5a0862af55 |
89
Changelog
89
Changelog
@@ -3,6 +3,93 @@ releases are sorted from youngest to oldest.
|
||||
|
||||
version <next>:
|
||||
|
||||
version 2.7.2:
|
||||
- imc: use correct position for flcoeffs2 calculation
|
||||
- hevc: check slice address length
|
||||
- snow: remove an obsolete av_assert2
|
||||
- webp: fix infinite loop in webp_decode_frame
|
||||
- wavpack: limit extra_bits to 32 and use get_bits_long
|
||||
- ffmpeg: only count got_output/errors in decode_error_stat
|
||||
- ffmpeg: exit_on_error if decoding a packet failed
|
||||
- pthread_frame: forward error codes when flushing
|
||||
- huffyuvdec: validate image size
|
||||
- wavpack: use get_bits_long to read up to 32 bits
|
||||
- nutdec: check maxpos in read_sm_data before returning success
|
||||
- s302m: fix arithmetic exception
|
||||
- vc1dec: use get_bits_long and limit the read bits to 32
|
||||
- mpegaudiodec: copy AVFloatDSPContext from first context to all contexts
|
||||
- avcodec/vp8: Check buffer size in vp8_decode_frame_header()
|
||||
- avcodec/vp8: Fix null pointer dereference in ff_vp8_decode_free()
|
||||
- avcodec/diracdec: Check for hpel_base allocation failure
|
||||
- avcodec/rv34: Clear pointers in ff_rv34_decode_init_thread_copy()
|
||||
- avfilter/af_aresample: Check ff_all_* for allocation failures
|
||||
- avcodec/pthread_frame: clear priv_data, avoid stale pointer in error case
|
||||
- swscale/utils: Clear pix buffers
|
||||
- avutil/fifo: Fix the case where func() returns less bytes than requested in av_fifo_generic_write()
|
||||
- ffmpeg: Fix cleanup after failed allocation of output_files
|
||||
- avformat/mov: Fix deallocation when MOVStreamContext failed to allocate
|
||||
- ffmpeg: Fix crash with ost->last_frame allocation failure
|
||||
- ffmpeg: Fix cleanup with ost = NULL
|
||||
- avcodec/pthread_frame: check avctx on deallocation
|
||||
- avcodec/sanm: Reset sizes in destroy_buffers()
|
||||
- avcodec/alac: Clear pointers in allocate_buffers()
|
||||
- bytestream2: set the reader to the end when reading more than available
|
||||
- avcodec/utils: use a minimum 32pixel width in avcodec_align_dimensions2() for H.264
|
||||
- avcodec/mpegvideo: Clear pointers in ff_mpv_common_init()
|
||||
- oggparsedirac: check return value of init_get_bits
|
||||
- wmalosslessdec: reset frame->nb_samples on packet loss
|
||||
- wmalosslessdec: avoid reading 0 bits with get_bits
|
||||
- Put a space between string literals and macros.
|
||||
- avcodec/rawenc: Use ff_alloc_packet() instead of ff_alloc_packet2()
|
||||
- avcodec/aacsbr: check that the element type matches before applying SBR
|
||||
- avcodec/h264_slice: Use w/h from the AVFrame instead of mb_w/h
|
||||
- vp9/update_prob: prevent out of bounds table read
|
||||
- avfilter/vf_transpose: Fix rounding error
|
||||
- avcodec/h264_refs: discard mismatching references
|
||||
- avcodec/mjpegdec: Fix small picture upscale
|
||||
- avcodec/pngdec: Check values before updating context in decode_fctl_chunk()
|
||||
- avcodec/pngdec: Copy IHDR & plte state from last thread
|
||||
- avcodec/pngdec: Require a IHDR chunk before fctl
|
||||
- avcodec/pngdec: Only allow one IHDR chunk
|
||||
- wmavoice: limit wmavoice_decode_packet return value to packet size
|
||||
- swscale/swscale_unscaled: Fix rounding difference with RGBA output between little and big endian
|
||||
- ffmpeg: Do not use the data/size of a bitstream filter after failure
|
||||
- swscale/x86/rgb2rgb_template: fix signedness of v in shuffle_bytes_2103_{mmx,mmxext}
|
||||
- vda: unlock the pixel buffer base address.
|
||||
- swscale/rgb2rgb_template: Fix signedness of v in shuffle_bytes_2103_c()
|
||||
- swscale/rgb2rgb_template: Implement shuffle_bytes_0321_c and fix shuffle_bytes_2103_c on BE
|
||||
- swscale/rgb2rgb_template: Disable shuffle_bytes_2103_c on big endian
|
||||
- swr: Remember previously set int_sample_format from user
|
||||
- swresample: soxr implementation for swr_get_out_samples()
|
||||
- avformat/swfdec: Do not error out on pixel format changes
|
||||
- ffmpeg_opt: Fix forcing fourccs
|
||||
- configure: Check for x265_api_get
|
||||
- swscale/x86/rgb2rgb_template: don't call emms on sse2/avx functions
|
||||
- swscale/x86/rgb2rgb_template: add missing xmm clobbers
|
||||
- library.mak: Workaround SDL redefining main and breaking fate tests on mingw
|
||||
- vaapi_h264: fix RefPicList[] field flags.
|
||||
|
||||
version 2.7.1:
|
||||
- postproc: fix unaligned access
|
||||
- avformat: clarify what package needs to be compiled with SSL support
|
||||
- avcodec/libx264: Avoid reconfig on equivalent aspect ratios
|
||||
- avcodec/flacenc: Fix Invalid Rice order
|
||||
- tls_gnutls: fix hang on disconnection
|
||||
- avcodec/hevc_ps: Only discard overread VPS if a previous is available
|
||||
- ffmpeg: Free last_frame instead of just unref
|
||||
- avcodec/ffv1enc: fix bps for >8bit yuv when not explicitly set
|
||||
- avio: fix potential crashes when combining ffio_ensure_seekback + crc
|
||||
- examples/demuxing_decoding: use properties from frame instead of video_dec_ctx
|
||||
- h264: er: Copy from the previous reference only if compatible
|
||||
- doc: fix spelling errors
|
||||
- configure: only disable VSX for !ppc64el
|
||||
- ffmpeg_opt: Check for localtime() failure
|
||||
- avformat/singlejpeg: fix standalone compilation
|
||||
- configure: Disable VSX on unspecified / generic CPUs
|
||||
- avformat: Fix bug in parse_rps for HEVC.
|
||||
- takdec: ensure chan2 is a valid channel index
|
||||
- avcodec/h264_slice: Use AVFrame dimensions for grayscale handling
|
||||
|
||||
|
||||
version 2.7:
|
||||
- FFT video filter
|
||||
@@ -573,7 +660,7 @@ easier to use. The changes are:
|
||||
all the stream in the first input file, except for the second audio
|
||||
stream'.
|
||||
* There is a new option -c (or -codec) for choosing the decoder/encoder to
|
||||
use, which allows to precisely specify target stream(s) consistently with
|
||||
use, which makes it possible to precisely specify target stream(s) consistently with
|
||||
other options. E.g. -c:v lib264 sets the codec for all video streams, -c:a:0
|
||||
libvorbis sets the codec for the first audio stream and -c copy copies all
|
||||
the streams without reencoding. Old -vcodec/-acodec/-scodec options are now
|
||||
|
9
configure
vendored
9
configure
vendored
@@ -4607,6 +4607,9 @@ unsigned int endian = 'B' << 24 | 'I' << 16 | 'G' << 8 | 'E';
|
||||
EOF
|
||||
od -t x1 $TMPO | grep -q '42 *49 *47 *45' && enable bigendian
|
||||
|
||||
if ! enabled ppc64 || enabled bigendian; then
|
||||
disable vsx
|
||||
fi
|
||||
|
||||
check_gas() {
|
||||
log "check_gas using '$as' as AS"
|
||||
@@ -5154,7 +5157,7 @@ enabled libx264 && { use_pkg_config x264 "stdint.h x264.h" x264_encode
|
||||
warn "using libx264 without pkg-config"; } } &&
|
||||
{ check_cpp_condition x264.h "X264_BUILD >= 118" ||
|
||||
die "ERROR: libx264 must be installed and version must be >= 0.118."; }
|
||||
enabled libx265 && require_pkg_config x265 x265.h x265_encoder_encode &&
|
||||
enabled libx265 && require_pkg_config x265 x265.h x265_api_get &&
|
||||
{ check_cpp_condition x265.h "X265_BUILD >= 57" ||
|
||||
die "ERROR: libx265 version must be >= 57."; }
|
||||
enabled libxavs && require libxavs xavs.h xavs_encoder_encode -lxavs
|
||||
@@ -5236,8 +5239,8 @@ disabled securetransport || { check_func SecIdentityCreate "-Wl,-framework,CoreF
|
||||
enable securetransport; }
|
||||
|
||||
makeinfo --version > /dev/null 2>&1 && enable makeinfo || disable makeinfo
|
||||
enabled makeinfo && (makeinfo --version | \
|
||||
grep -q 'makeinfo (GNU texinfo) 5' > /dev/null 2>&1) \
|
||||
enabled makeinfo \
|
||||
&& [ 0$(makeinfo --version | grep "texinfo" | sed 's/.*texinfo[^0-9]*\([0-9]*\)\..*/\1/') -ge 5 ] \
|
||||
&& enable makeinfo_html || disable makeinfo_html
|
||||
disabled makeinfo_html && texi2html --help 2> /dev/null | grep -q 'init-file' && enable texi2html || disable texi2html
|
||||
perl -v > /dev/null 2>&1 && enable perl || disable perl
|
||||
|
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
||||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER = 2.7
|
||||
PROJECT_NUMBER = 2.7.2
|
||||
|
||||
# With the PROJECT_LOGO tag one can specify a logo or icon that is included
|
||||
# in the documentation. The maximum height of the logo should not exceed 55
|
||||
|
@@ -95,7 +95,7 @@ This decoder aims to implement the complete FLAC specification from Xiph.
|
||||
|
||||
@item -use_buggy_lpc
|
||||
The lavc FLAC encoder used to produce buggy streams with high lpc values
|
||||
(like the default value). This option allows to decode such streams
|
||||
(like the default value). This option makes it possible to decode such streams
|
||||
correctly by using lavc's old buggy lpc logic for decoding.
|
||||
|
||||
@end table
|
||||
|
@@ -81,22 +81,24 @@ static int decode_packet(int *got_frame, int cached)
|
||||
fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
if (video_dec_ctx->width != width || video_dec_ctx->height != height ||
|
||||
video_dec_ctx->pix_fmt != pix_fmt) {
|
||||
/* To handle this change, one could call av_image_alloc again and
|
||||
* decode the following frames into another rawvideo file. */
|
||||
fprintf(stderr, "Error: Width, height and pixel format have to be "
|
||||
"constant in a rawvideo file, but the width, height or "
|
||||
"pixel format of the input video changed:\n"
|
||||
"old: width = %d, height = %d, format = %s\n"
|
||||
"new: width = %d, height = %d, format = %s\n",
|
||||
width, height, av_get_pix_fmt_name(pix_fmt),
|
||||
video_dec_ctx->width, video_dec_ctx->height,
|
||||
av_get_pix_fmt_name(video_dec_ctx->pix_fmt));
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (*got_frame) {
|
||||
|
||||
if (frame->width != width || frame->height != height ||
|
||||
frame->format != pix_fmt) {
|
||||
/* To handle this change, one could call av_image_alloc again and
|
||||
* decode the following frames into another rawvideo file. */
|
||||
fprintf(stderr, "Error: Width, height and pixel format have to be "
|
||||
"constant in a rawvideo file, but the width, height or "
|
||||
"pixel format of the input video changed:\n"
|
||||
"old: width = %d, height = %d, format = %s\n"
|
||||
"new: width = %d, height = %d, format = %s\n",
|
||||
width, height, av_get_pix_fmt_name(pix_fmt),
|
||||
frame->width, frame->height,
|
||||
av_get_pix_fmt_name(frame->format));
|
||||
return -1;
|
||||
}
|
||||
|
||||
printf("video_frame%s n:%d coded_n:%d pts:%s\n",
|
||||
cached ? "(cached)" : "",
|
||||
video_frame_count++, frame->coded_picture_number,
|
||||
|
@@ -117,7 +117,7 @@ static int open_output_file(const char *filename)
|
||||
/* in this example, we choose transcoding to same codec */
|
||||
encoder = avcodec_find_encoder(dec_ctx->codec_id);
|
||||
if (!encoder) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Neccessary encoder not found\n");
|
||||
av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
|
32
ffmpeg.c
32
ffmpeg.c
@@ -456,7 +456,10 @@ static void ffmpeg_cleanup(int ret)
|
||||
/* close files */
|
||||
for (i = 0; i < nb_output_files; i++) {
|
||||
OutputFile *of = output_files[i];
|
||||
AVFormatContext *s = of->ctx;
|
||||
AVFormatContext *s;
|
||||
if (!of)
|
||||
continue;
|
||||
s = of->ctx;
|
||||
if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
|
||||
avio_closep(&s->pb);
|
||||
avformat_free_context(s);
|
||||
@@ -466,7 +469,12 @@ static void ffmpeg_cleanup(int ret)
|
||||
}
|
||||
for (i = 0; i < nb_output_streams; i++) {
|
||||
OutputStream *ost = output_streams[i];
|
||||
AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
|
||||
AVBitStreamFilterContext *bsfc;
|
||||
|
||||
if (!ost)
|
||||
continue;
|
||||
|
||||
bsfc = ost->bitstream_filters;
|
||||
while (bsfc) {
|
||||
AVBitStreamFilterContext *next = bsfc->next;
|
||||
av_bitstream_filter_close(bsfc);
|
||||
@@ -650,6 +658,7 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
|
||||
if (!new_pkt.buf)
|
||||
exit_program(1);
|
||||
} else if (a < 0) {
|
||||
new_pkt = *pkt;
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
|
||||
bsfc->filter->name, pkt->stream_index,
|
||||
avctx->codec ? avctx->codec->name : "copy");
|
||||
@@ -1176,8 +1185,10 @@ static void do_video_out(AVFormatContext *s,
|
||||
if (!ost->last_frame)
|
||||
ost->last_frame = av_frame_alloc();
|
||||
av_frame_unref(ost->last_frame);
|
||||
if (next_picture)
|
||||
if (next_picture && ost->last_frame)
|
||||
av_frame_ref(ost->last_frame, next_picture);
|
||||
else
|
||||
av_frame_free(&ost->last_frame);
|
||||
}
|
||||
|
||||
static double psnr(double d)
|
||||
@@ -1863,9 +1874,12 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (*got_output || ret<0 || pkt->size)
|
||||
if (*got_output || ret<0)
|
||||
decode_error_stat[ret<0] ++;
|
||||
|
||||
if (ret < 0 && exit_on_error)
|
||||
exit_program(1);
|
||||
|
||||
if (!*got_output || ret < 0) {
|
||||
if (!pkt->size) {
|
||||
for (i = 0; i < ist->nb_filters; i++)
|
||||
@@ -2008,9 +2022,12 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
|
||||
);
|
||||
}
|
||||
|
||||
if (*got_output || ret<0 || pkt->size)
|
||||
if (*got_output || ret<0)
|
||||
decode_error_stat[ret<0] ++;
|
||||
|
||||
if (ret < 0 && exit_on_error)
|
||||
exit_program(1);
|
||||
|
||||
if (*got_output && ret >= 0) {
|
||||
if (ist->dec_ctx->width != decoded_frame->width ||
|
||||
ist->dec_ctx->height != decoded_frame->height ||
|
||||
@@ -2126,9 +2143,12 @@ static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
|
||||
int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
|
||||
&subtitle, got_output, pkt);
|
||||
|
||||
if (*got_output || ret<0 || pkt->size)
|
||||
if (*got_output || ret<0)
|
||||
decode_error_stat[ret<0] ++;
|
||||
|
||||
if (ret < 0 && exit_on_error)
|
||||
exit_program(1);
|
||||
|
||||
if (ret < 0 || !*got_output) {
|
||||
if (!pkt->size)
|
||||
sub2video_flush(ist);
|
||||
|
@@ -1203,6 +1203,7 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
|
||||
uint32_t tag = strtol(codec_tag, &next, 0);
|
||||
if (*next)
|
||||
tag = AV_RL32(codec_tag);
|
||||
ost->st->codec->codec_tag =
|
||||
ost->enc_ctx->codec_tag = tag;
|
||||
}
|
||||
|
||||
@@ -2438,6 +2439,9 @@ static int opt_vstats(void *optctx, const char *opt, const char *arg)
|
||||
time_t today2 = time(NULL);
|
||||
struct tm *today = localtime(&today2);
|
||||
|
||||
if (!today)
|
||||
return AVERROR(errno);
|
||||
|
||||
snprintf(filename, sizeof(filename), "vstats_%02d%02d%02d.log", today->tm_hour, today->tm_min,
|
||||
today->tm_sec);
|
||||
return opt_vstats_file(NULL, opt, filename);
|
||||
|
@@ -77,6 +77,8 @@ static int vda_retrieve_data(AVCodecContext *s, AVFrame *frame)
|
||||
frame->width, frame->height);
|
||||
|
||||
ret = av_frame_copy_props(vda->tmp_frame, frame);
|
||||
CVPixelBufferUnlockBaseAddress(pixbuf, kCVPixelBufferLock_ReadOnly);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@@ -1019,6 +1019,8 @@ static unsigned int read_sbr_data(AACContext *ac, SpectralBandReplication *sbr,
|
||||
{
|
||||
unsigned int cnt = get_bits_count(gb);
|
||||
|
||||
sbr->id_aac = id_aac;
|
||||
|
||||
if (id_aac == TYPE_SCE || id_aac == TYPE_CCE) {
|
||||
if (read_sbr_single_channel_element(ac, sbr, gb)) {
|
||||
sbr_turnoff(sbr);
|
||||
@@ -1695,6 +1697,12 @@ void ff_sbr_apply(AACContext *ac, SpectralBandReplication *sbr, int id_aac,
|
||||
int nch = (id_aac == TYPE_CPE) ? 2 : 1;
|
||||
int err;
|
||||
|
||||
if (id_aac != sbr->id_aac) {
|
||||
av_log(ac->avctx, AV_LOG_ERROR,
|
||||
"element type mismatch %d != %d\n", id_aac, sbr->id_aac);
|
||||
sbr_turnoff(sbr);
|
||||
}
|
||||
|
||||
if (!sbr->kx_and_m_pushed) {
|
||||
sbr->kx[0] = sbr->kx[1];
|
||||
sbr->m[0] = sbr->m[1];
|
||||
@@ -1718,6 +1726,7 @@ void ff_sbr_apply(AACContext *ac, SpectralBandReplication *sbr, int id_aac,
|
||||
sbr->c.sbr_hf_inverse_filter(&sbr->dsp, sbr->alpha0, sbr->alpha1,
|
||||
(const float (*)[40][2]) sbr->X_low, sbr->k[0]);
|
||||
sbr_chirp(sbr, &sbr->data[ch]);
|
||||
av_assert0(sbr->data[ch].bs_num_env > 0);
|
||||
sbr_hf_gen(ac, sbr, sbr->X_high,
|
||||
(const float (*)[40][2]) sbr->X_low,
|
||||
(const float (*)[2]) sbr->alpha0,
|
||||
|
@@ -534,6 +534,12 @@ static int allocate_buffers(ALACContext *alac)
|
||||
int ch;
|
||||
int buf_size = alac->max_samples_per_frame * sizeof(int32_t);
|
||||
|
||||
for (ch = 0; ch < 2; ch++) {
|
||||
alac->predict_error_buffer[ch] = NULL;
|
||||
alac->output_samples_buffer[ch] = NULL;
|
||||
alac->extra_bits_buffer[ch] = NULL;
|
||||
}
|
||||
|
||||
for (ch = 0; ch < FFMIN(alac->channels, 2); ch++) {
|
||||
FF_ALLOC_OR_GOTO(alac->avctx, alac->predict_error_buffer[ch],
|
||||
buf_size, buf_alloc_fail);
|
||||
|
@@ -103,7 +103,7 @@ typedef struct ASSSplitContext ASSSplitContext;
|
||||
* Split a full ASS file or a ASS header from a string buffer and store
|
||||
* the split structure in a newly allocated context.
|
||||
*
|
||||
* @param buf String containing the ASS formated data.
|
||||
* @param buf String containing the ASS formatted data.
|
||||
* @return Newly allocated struct containing split data.
|
||||
*/
|
||||
ASSSplitContext *ff_ass_split(const char *buf);
|
||||
|
@@ -247,7 +247,7 @@ static int build_table(VLC *vlc, int table_nb_bits, int nb_codes,
|
||||
|
||||
/* Build VLC decoding tables suitable for use with get_vlc().
|
||||
|
||||
'nb_bits' set thee decoding table size (2^nb_bits) entries. The
|
||||
'nb_bits' set the decoding table size (2^nb_bits) entries. The
|
||||
bigger it is, the faster is the decoding. But it should not be too
|
||||
big to save memory and L1 cache. '9' is a good compromise.
|
||||
|
||||
@@ -265,7 +265,7 @@ static int build_table(VLC *vlc, int table_nb_bits, int nb_codes,
|
||||
'xxx_size' : gives the number of bytes of each entry of the 'bits'
|
||||
or 'codes' tables.
|
||||
|
||||
'wrap' and 'size' allows to use any memory configuration and types
|
||||
'wrap' and 'size' make it possible to use any memory configuration and types
|
||||
(byte/word/long) to store the 'bits', 'codes', and 'symbols' tables.
|
||||
|
||||
'use_static' should be set to 1 for tables, which should be freed
|
||||
|
@@ -71,8 +71,10 @@ static av_always_inline type bytestream2_get_ ## name ## u(GetByteContext *g) \
|
||||
} \
|
||||
static av_always_inline type bytestream2_get_ ## name(GetByteContext *g) \
|
||||
{ \
|
||||
if (g->buffer_end - g->buffer < bytes) \
|
||||
if (g->buffer_end - g->buffer < bytes) { \
|
||||
g->buffer = g->buffer_end; \
|
||||
return 0; \
|
||||
} \
|
||||
return bytestream2_get_ ## name ## u(g); \
|
||||
} \
|
||||
static av_always_inline type bytestream2_peek_ ## name(GetByteContext *g) \
|
||||
|
@@ -1563,7 +1563,7 @@ static void select_dsp_funcs(DiracContext *s, int width, int height, int xblen,
|
||||
}
|
||||
}
|
||||
|
||||
static void interpolate_refplane(DiracContext *s, DiracFrame *ref, int plane, int width, int height)
|
||||
static int interpolate_refplane(DiracContext *s, DiracFrame *ref, int plane, int width, int height)
|
||||
{
|
||||
/* chroma allocates an edge of 8 when subsampled
|
||||
which for 4:2:2 means an h edge of 16 and v edge of 8
|
||||
@@ -1575,11 +1575,14 @@ static void interpolate_refplane(DiracContext *s, DiracFrame *ref, int plane, in
|
||||
|
||||
/* no need for hpel if we only have fpel vectors */
|
||||
if (!s->mv_precision)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
for (i = 1; i < 4; i++) {
|
||||
if (!ref->hpel_base[plane][i])
|
||||
ref->hpel_base[plane][i] = av_malloc((height+2*edge) * ref->avframe->linesize[plane] + 32);
|
||||
if (!ref->hpel_base[plane][i]) {
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
/* we need to be 16-byte aligned even for chroma */
|
||||
ref->hpel[plane][i] = ref->hpel_base[plane][i] + edge*ref->avframe->linesize[plane] + 16;
|
||||
}
|
||||
@@ -1593,6 +1596,8 @@ static void interpolate_refplane(DiracContext *s, DiracFrame *ref, int plane, in
|
||||
s->mpvencdsp.draw_edges(ref->hpel[plane][3], ref->avframe->linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM);
|
||||
}
|
||||
ref->interpolated[plane] = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1646,8 +1651,11 @@ static int dirac_decode_frame_internal(DiracContext *s)
|
||||
|
||||
select_dsp_funcs(s, p->width, p->height, p->xblen, p->yblen);
|
||||
|
||||
for (i = 0; i < s->num_refs; i++)
|
||||
interpolate_refplane(s, s->ref_pics[i], comp, p->width, p->height);
|
||||
for (i = 0; i < s->num_refs; i++) {
|
||||
int ret = interpolate_refplane(s, s->ref_pics[i], comp, p->width, p->height);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
memset(s->mctmp, 0, 4*p->yoffset*p->stride);
|
||||
|
||||
|
@@ -75,17 +75,20 @@ static av_cold int encode_init(AVCodecContext *avctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define write16(p, value) \
|
||||
do { \
|
||||
if (s->big_endian) AV_WB16(p, value); \
|
||||
else AV_WL16(p, value); \
|
||||
} while(0)
|
||||
static av_always_inline void write16_internal(int big_endian, void *p, int value)
|
||||
{
|
||||
if (big_endian) AV_WB16(p, value);
|
||||
else AV_WL16(p, value);
|
||||
}
|
||||
|
||||
#define write32(p, value) \
|
||||
do { \
|
||||
if (s->big_endian) AV_WB32(p, value); \
|
||||
else AV_WL32(p, value); \
|
||||
} while(0)
|
||||
static av_always_inline void write32_internal(int big_endian, void *p, int value)
|
||||
{
|
||||
if (big_endian) AV_WB32(p, value);
|
||||
else AV_WL32(p, value);
|
||||
}
|
||||
|
||||
#define write16(p, value) write16_internal(s->big_endian, p, value)
|
||||
#define write32(p, value) write32_internal(s->big_endian, p, value)
|
||||
|
||||
static void encode_rgb48_10bit(AVCodecContext *avctx, const AVPicture *pic, uint8_t *dst)
|
||||
{
|
||||
|
@@ -753,7 +753,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
|
||||
s->chroma_planes = desc->nb_components < 3 ? 0 : 1;
|
||||
s->colorspace = 0;
|
||||
s->transparency = desc->nb_components == 4;
|
||||
if (!avctx->bits_per_raw_sample)
|
||||
if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample)
|
||||
s->bits_per_raw_sample = 8;
|
||||
break;
|
||||
case AV_PIX_FMT_RGB32:
|
||||
|
@@ -705,7 +705,7 @@ static uint64_t calc_rice_params(RiceContext *rc,
|
||||
bits[pmin] = UINT32_MAX;
|
||||
for (i = pmax; ; ) {
|
||||
bits[i] = calc_optimal_rice_params(&tmp_rc, i, sums, n, pred_order, kmax, exact);
|
||||
if (bits[i] < bits[opt_porder]) {
|
||||
if (bits[i] < bits[opt_porder] || pmax == pmin) {
|
||||
opt_porder = i;
|
||||
*rc = tmp_rc;
|
||||
}
|
||||
|
@@ -125,6 +125,7 @@ static int add_sorted(H264Picture **sorted, H264Picture **src, int len, int limi
|
||||
int ff_h264_fill_default_ref_list(H264Context *h, H264SliceContext *sl)
|
||||
{
|
||||
int i, len;
|
||||
int j;
|
||||
|
||||
if (sl->slice_type_nos == AV_PICTURE_TYPE_B) {
|
||||
H264Picture *sorted[32];
|
||||
@@ -188,6 +189,21 @@ int ff_h264_fill_default_ref_list(H264Context *h, H264SliceContext *sl)
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
for (j = 0; j<1+(sl->slice_type_nos == AV_PICTURE_TYPE_B); j++) {
|
||||
for (i = 0; i < sl->ref_count[j]; i++) {
|
||||
if (h->default_ref_list[j][i].parent) {
|
||||
AVFrame *f = h->default_ref_list[j][i].parent->f;
|
||||
if (h->cur_pic_ptr->f->width != f->width ||
|
||||
h->cur_pic_ptr->f->height != f->height ||
|
||||
h->cur_pic_ptr->f->format != f->format) {
|
||||
av_log(h->avctx, AV_LOG_ERROR, "Discarding mismatching reference\n");
|
||||
memset(&h->default_ref_list[j][i], 0, sizeof(h->default_ref_list[j][i]));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -251,11 +251,11 @@ static int alloc_picture(H264Context *h, H264Picture *pic)
|
||||
av_pix_fmt_get_chroma_sub_sample(pic->f->format,
|
||||
&h_chroma_shift, &v_chroma_shift);
|
||||
|
||||
for(i=0; i<FF_CEIL_RSHIFT(h->avctx->height, v_chroma_shift); i++) {
|
||||
for(i=0; i<FF_CEIL_RSHIFT(pic->f->height, v_chroma_shift); i++) {
|
||||
memset(pic->f->data[1] + pic->f->linesize[1]*i,
|
||||
0x80, FF_CEIL_RSHIFT(h->avctx->width, h_chroma_shift));
|
||||
0x80, FF_CEIL_RSHIFT(pic->f->width, h_chroma_shift));
|
||||
memset(pic->f->data[2] + pic->f->linesize[2]*i,
|
||||
0x80, FF_CEIL_RSHIFT(h->avctx->width, h_chroma_shift));
|
||||
0x80, FF_CEIL_RSHIFT(pic->f->width, h_chroma_shift));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1549,14 +1549,17 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
|
||||
* vectors. Given we are concealing a lost frame, this probably
|
||||
* is not noticeable by comparison, but it should be fixed. */
|
||||
if (h->short_ref_count) {
|
||||
if (prev) {
|
||||
if (prev &&
|
||||
h->short_ref[0]->f->width == prev->f->width &&
|
||||
h->short_ref[0]->f->height == prev->f->height &&
|
||||
h->short_ref[0]->f->format == prev->f->format) {
|
||||
av_image_copy(h->short_ref[0]->f->data,
|
||||
h->short_ref[0]->f->linesize,
|
||||
(const uint8_t **)prev->f->data,
|
||||
prev->f->linesize,
|
||||
h->avctx->pix_fmt,
|
||||
h->mb_width * 16,
|
||||
h->mb_height * 16);
|
||||
prev->f->format,
|
||||
prev->f->width,
|
||||
prev->f->height);
|
||||
h->short_ref[0]->poc = prev->poc + 2;
|
||||
}
|
||||
h->short_ref[0]->frame_num = h->prev_frame_num;
|
||||
|
@@ -456,7 +456,7 @@ static int hls_slice_header(HEVCContext *s)
|
||||
|
||||
slice_address_length = av_ceil_log2(s->sps->ctb_width *
|
||||
s->sps->ctb_height);
|
||||
sh->slice_segment_addr = get_bits(gb, slice_address_length);
|
||||
sh->slice_segment_addr = slice_address_length ? get_bits(gb, slice_address_length) : 0;
|
||||
if (sh->slice_segment_addr >= s->sps->ctb_width * s->sps->ctb_height) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"Invalid slice segment address: %u.\n",
|
||||
|
@@ -200,7 +200,7 @@ static inline int parse_nal_units(AVCodecParserContext *s, AVCodecContext *avctx
|
||||
|
||||
slice_address_length = av_ceil_log2_c(h->sps->ctb_width *
|
||||
h->sps->ctb_height);
|
||||
sh->slice_segment_addr = get_bits(gb, slice_address_length);
|
||||
sh->slice_segment_addr = slice_address_length ? get_bits(gb, slice_address_length) : 0;
|
||||
if (sh->slice_segment_addr >= h->sps->ctb_width * h->sps->ctb_height) {
|
||||
av_log(h->avctx, AV_LOG_ERROR, "Invalid slice segment address: %u.\n",
|
||||
sh->slice_segment_addr);
|
||||
|
@@ -499,7 +499,8 @@ int ff_hevc_decode_nal_vps(HEVCContext *s)
|
||||
if (get_bits_left(gb) < 0) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"Overread VPS by %d bits\n", -get_bits_left(gb));
|
||||
goto err;
|
||||
if (s->vps_list[vps_id])
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (s->vps_list[vps_id] &&
|
||||
|
@@ -37,6 +37,7 @@
|
||||
#include "huffyuv.h"
|
||||
#include "huffyuvdsp.h"
|
||||
#include "thread.h"
|
||||
#include "libavutil/imgutils.h"
|
||||
#include "libavutil/pixdesc.h"
|
||||
|
||||
#define classic_shift_luma_table_size 42
|
||||
@@ -291,6 +292,10 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
HYuvContext *s = avctx->priv_data;
|
||||
int ret;
|
||||
|
||||
ret = av_image_check_size(avctx->width, avctx->height, 0, avctx);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ff_huffyuvdsp_init(&s->hdsp);
|
||||
memset(s->vlc, 0, 4 * sizeof(VLC));
|
||||
|
||||
|
@@ -426,7 +426,7 @@ static void imc_decode_level_coefficients_raw(IMCContext *q, int *levlCoeffBuf,
|
||||
|
||||
pos = q->coef0_pos;
|
||||
flcoeffs1[pos] = 20000.0 / pow (2, levlCoeffBuf[0] * 0.18945); // 0.18945 = log2(10) * 0.05703125
|
||||
flcoeffs2[pos] = log2f(flcoeffs1[0]);
|
||||
flcoeffs2[pos] = log2f(flcoeffs1[pos]);
|
||||
tmp = flcoeffs1[pos];
|
||||
tmp2 = flcoeffs2[pos];
|
||||
|
||||
|
@@ -76,7 +76,7 @@
|
||||
* encodes them with just enough bits to reproduce the background noise.
|
||||
*
|
||||
* Discontinuous Transmission (DTX)
|
||||
* DTX is an addition to VAD/VBR operation, that allows to stop transmitting
|
||||
* DTX is an addition to VAD/VBR operation, that makes it possible to stop transmitting
|
||||
* completely when the background noise is stationary.
|
||||
* In file-based operation only 5 bits are used for such frames.
|
||||
*/
|
||||
|
@@ -191,8 +191,7 @@ static int X264_frame(AVCodecContext *ctx, AVPacket *pkt, const AVFrame *frame,
|
||||
x4->params.b_tff = frame->top_field_first;
|
||||
x264_encoder_reconfig(x4->enc, &x4->params);
|
||||
}
|
||||
if (x4->params.vui.i_sar_height != ctx->sample_aspect_ratio.den ||
|
||||
x4->params.vui.i_sar_width != ctx->sample_aspect_ratio.num) {
|
||||
if (x4->params.vui.i_sar_height*ctx->sample_aspect_ratio.num != ctx->sample_aspect_ratio.den * x4->params.vui.i_sar_width) {
|
||||
x4->params.vui.i_sar_height = ctx->sample_aspect_ratio.den;
|
||||
x4->params.vui.i_sar_width = ctx->sample_aspect_ratio.num;
|
||||
x264_encoder_reconfig(x4->enc, &x4->params);
|
||||
|
@@ -2192,11 +2192,13 @@ the_end:
|
||||
}
|
||||
} else if (s->upscale_h[p] == 2) {
|
||||
if (is16bit) {
|
||||
((uint16_t*)line)[w - 1] =
|
||||
((uint16_t*)line)[w - 2] = ((uint16_t*)line)[(w - 1) / 3];
|
||||
((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 3];
|
||||
if (w > 1)
|
||||
((uint16_t*)line)[w - 2] = ((uint16_t*)line)[w - 1];
|
||||
} else {
|
||||
line[w - 1] =
|
||||
line[w - 2] = line[(w - 1) / 3];
|
||||
line[w - 1] = line[(w - 1) / 3];
|
||||
if (w > 1)
|
||||
line[w - 2] = line[w - 1];
|
||||
}
|
||||
for (index = w - 3; index > 0; index--) {
|
||||
line[index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3;
|
||||
|
@@ -1893,6 +1893,7 @@ static av_cold int decode_init_mp3on4(AVCodecContext * avctx)
|
||||
s->mp3decctx[i]->adu_mode = 1;
|
||||
s->mp3decctx[i]->avctx = avctx;
|
||||
s->mp3decctx[i]->mpadsp = s->mp3decctx[0]->mpadsp;
|
||||
s->mp3decctx[i]->fdsp = s->mp3decctx[0]->fdsp;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@@ -1227,6 +1227,82 @@ fail:
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
static void clear_context(MpegEncContext *s)
|
||||
{
|
||||
int i, j, k;
|
||||
|
||||
memset(&s->next_picture, 0, sizeof(s->next_picture));
|
||||
memset(&s->last_picture, 0, sizeof(s->last_picture));
|
||||
memset(&s->current_picture, 0, sizeof(s->current_picture));
|
||||
memset(&s->new_picture, 0, sizeof(s->new_picture));
|
||||
|
||||
memset(s->thread_context, 0, sizeof(s->thread_context));
|
||||
|
||||
s->me.map = NULL;
|
||||
s->me.score_map = NULL;
|
||||
s->dct_error_sum = NULL;
|
||||
s->block = NULL;
|
||||
s->blocks = NULL;
|
||||
memset(s->pblocks, 0, sizeof(s->pblocks));
|
||||
s->ac_val_base = NULL;
|
||||
s->ac_val[0] =
|
||||
s->ac_val[1] =
|
||||
s->ac_val[2] =NULL;
|
||||
s->sc.edge_emu_buffer = NULL;
|
||||
s->me.scratchpad = NULL;
|
||||
s->me.temp =
|
||||
s->sc.rd_scratchpad =
|
||||
s->sc.b_scratchpad =
|
||||
s->sc.obmc_scratchpad = NULL;
|
||||
|
||||
s->parse_context.buffer = NULL;
|
||||
s->parse_context.buffer_size = 0;
|
||||
s->bitstream_buffer = NULL;
|
||||
s->allocated_bitstream_buffer_size = 0;
|
||||
s->picture = NULL;
|
||||
s->mb_type = NULL;
|
||||
s->p_mv_table_base = NULL;
|
||||
s->b_forw_mv_table_base = NULL;
|
||||
s->b_back_mv_table_base = NULL;
|
||||
s->b_bidir_forw_mv_table_base = NULL;
|
||||
s->b_bidir_back_mv_table_base = NULL;
|
||||
s->b_direct_mv_table_base = NULL;
|
||||
s->p_mv_table = NULL;
|
||||
s->b_forw_mv_table = NULL;
|
||||
s->b_back_mv_table = NULL;
|
||||
s->b_bidir_forw_mv_table = NULL;
|
||||
s->b_bidir_back_mv_table = NULL;
|
||||
s->b_direct_mv_table = NULL;
|
||||
for (i = 0; i < 2; i++) {
|
||||
for (j = 0; j < 2; j++) {
|
||||
for (k = 0; k < 2; k++) {
|
||||
s->b_field_mv_table_base[i][j][k] = NULL;
|
||||
s->b_field_mv_table[i][j][k] = NULL;
|
||||
}
|
||||
s->b_field_select_table[i][j] = NULL;
|
||||
s->p_field_mv_table_base[i][j] = NULL;
|
||||
s->p_field_mv_table[i][j] = NULL;
|
||||
}
|
||||
s->p_field_select_table[i] = NULL;
|
||||
}
|
||||
|
||||
s->dc_val_base = NULL;
|
||||
s->coded_block_base = NULL;
|
||||
s->mbintra_table = NULL;
|
||||
s->cbp_table = NULL;
|
||||
s->pred_dir_table = NULL;
|
||||
|
||||
s->mbskip_table = NULL;
|
||||
|
||||
s->er.error_status_table = NULL;
|
||||
s->er.er_temp_buffer = NULL;
|
||||
s->mb_index2xy = NULL;
|
||||
s->lambda_table = NULL;
|
||||
|
||||
s->cplx_tab = NULL;
|
||||
s->bits_tab = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* init common structure for both encoder and decoder.
|
||||
* this assumes that some variables like width/height are already set
|
||||
@@ -1238,6 +1314,8 @@ av_cold int ff_mpv_common_init(MpegEncContext *s)
|
||||
s->avctx->active_thread_type & FF_THREAD_SLICE) ?
|
||||
s->avctx->thread_count : 1;
|
||||
|
||||
clear_context(s);
|
||||
|
||||
if (s->encoding && s->avctx->slices)
|
||||
nb_slices = s->avctx->slices;
|
||||
|
||||
@@ -1282,10 +1360,6 @@ av_cold int ff_mpv_common_init(MpegEncContext *s)
|
||||
if (!s->picture[i].f)
|
||||
goto fail;
|
||||
}
|
||||
memset(&s->next_picture, 0, sizeof(s->next_picture));
|
||||
memset(&s->last_picture, 0, sizeof(s->last_picture));
|
||||
memset(&s->current_picture, 0, sizeof(s->current_picture));
|
||||
memset(&s->new_picture, 0, sizeof(s->new_picture));
|
||||
s->next_picture.f = av_frame_alloc();
|
||||
if (!s->next_picture.f)
|
||||
goto fail;
|
||||
|
@@ -542,17 +542,17 @@ static int decode_ihdr_chunk(AVCodecContext *avctx, PNGDecContext *s,
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
s->width = bytestream2_get_be32(&s->gb);
|
||||
s->height = bytestream2_get_be32(&s->gb);
|
||||
if (av_image_check_size(s->width, s->height, 0, avctx)) {
|
||||
s->width = s->height = 0;
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid image size\n");
|
||||
if (s->state & PNG_IHDR) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Multiple IHDR\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (s->cur_w == 0 && s->cur_h == 0) {
|
||||
// Only set cur_w/h if update_thread_context() has not set it
|
||||
s->cur_w = s->width;
|
||||
s->cur_h = s->height;
|
||||
|
||||
s->width = s->cur_w = bytestream2_get_be32(&s->gb);
|
||||
s->height = s->cur_h = bytestream2_get_be32(&s->gb);
|
||||
if (av_image_check_size(s->width, s->height, 0, avctx)) {
|
||||
s->cur_w = s->cur_h = s->width = s->height = 0;
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid image size\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
s->bit_depth = bytestream2_get_byte(&s->gb);
|
||||
s->color_type = bytestream2_get_byte(&s->gb);
|
||||
@@ -815,10 +815,16 @@ static int decode_fctl_chunk(AVCodecContext *avctx, PNGDecContext *s,
|
||||
uint32_t length)
|
||||
{
|
||||
uint32_t sequence_number;
|
||||
int cur_w, cur_h, x_offset, y_offset, dispose_op, blend_op;
|
||||
|
||||
if (length != 26)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if (!(s->state & PNG_IHDR)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "fctl before IHDR\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
s->last_w = s->cur_w;
|
||||
s->last_h = s->cur_h;
|
||||
s->last_x_offset = s->x_offset;
|
||||
@@ -826,23 +832,23 @@ static int decode_fctl_chunk(AVCodecContext *avctx, PNGDecContext *s,
|
||||
s->last_dispose_op = s->dispose_op;
|
||||
|
||||
sequence_number = bytestream2_get_be32(&s->gb);
|
||||
s->cur_w = bytestream2_get_be32(&s->gb);
|
||||
s->cur_h = bytestream2_get_be32(&s->gb);
|
||||
s->x_offset = bytestream2_get_be32(&s->gb);
|
||||
s->y_offset = bytestream2_get_be32(&s->gb);
|
||||
cur_w = bytestream2_get_be32(&s->gb);
|
||||
cur_h = bytestream2_get_be32(&s->gb);
|
||||
x_offset = bytestream2_get_be32(&s->gb);
|
||||
y_offset = bytestream2_get_be32(&s->gb);
|
||||
bytestream2_skip(&s->gb, 4); /* delay_num (2), delay_den (2) */
|
||||
s->dispose_op = bytestream2_get_byte(&s->gb);
|
||||
s->blend_op = bytestream2_get_byte(&s->gb);
|
||||
dispose_op = bytestream2_get_byte(&s->gb);
|
||||
blend_op = bytestream2_get_byte(&s->gb);
|
||||
bytestream2_skip(&s->gb, 4); /* crc */
|
||||
|
||||
if (sequence_number == 0 &&
|
||||
(s->cur_w != s->width ||
|
||||
s->cur_h != s->height ||
|
||||
s->x_offset != 0 ||
|
||||
s->y_offset != 0) ||
|
||||
s->cur_w <= 0 || s->cur_h <= 0 ||
|
||||
s->x_offset < 0 || s->y_offset < 0 ||
|
||||
s->cur_w > s->width - s->x_offset|| s->cur_h > s->height - s->y_offset)
|
||||
(cur_w != s->width ||
|
||||
cur_h != s->height ||
|
||||
x_offset != 0 ||
|
||||
y_offset != 0) ||
|
||||
cur_w <= 0 || cur_h <= 0 ||
|
||||
x_offset < 0 || y_offset < 0 ||
|
||||
cur_w > s->width - x_offset|| cur_h > s->height - y_offset)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if (sequence_number == 0 && s->dispose_op == APNG_DISPOSE_OP_PREVIOUS) {
|
||||
@@ -863,6 +869,13 @@ static int decode_fctl_chunk(AVCodecContext *avctx, PNGDecContext *s,
|
||||
s->dispose_op = APNG_BLEND_OP_SOURCE;
|
||||
}
|
||||
|
||||
s->cur_w = cur_w;
|
||||
s->cur_h = cur_h;
|
||||
s->x_offset = x_offset;
|
||||
s->y_offset = y_offset;
|
||||
s->dispose_op = dispose_op;
|
||||
s->blend_op = blend_op;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1259,12 +1272,24 @@ static int update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
|
||||
(ret = ff_thread_ref_frame(&pdst->picture, &psrc->picture)) < 0)
|
||||
return ret;
|
||||
if (CONFIG_APNG_DECODER && dst->codec_id == AV_CODEC_ID_APNG) {
|
||||
pdst->width = psrc->width;
|
||||
pdst->height = psrc->height;
|
||||
pdst->bit_depth = psrc->bit_depth;
|
||||
pdst->color_type = psrc->color_type;
|
||||
pdst->compression_type = psrc->compression_type;
|
||||
pdst->interlace_type = psrc->interlace_type;
|
||||
pdst->filter_type = psrc->filter_type;
|
||||
pdst->cur_w = psrc->cur_w;
|
||||
pdst->cur_h = psrc->cur_h;
|
||||
pdst->x_offset = psrc->x_offset;
|
||||
pdst->y_offset = psrc->y_offset;
|
||||
|
||||
pdst->dispose_op = psrc->dispose_op;
|
||||
|
||||
memcpy(pdst->palette, psrc->palette, sizeof(pdst->palette));
|
||||
|
||||
pdst->state |= psrc->state & (PNG_IHDR | PNG_PLTE);
|
||||
|
||||
ff_thread_release_buffer(dst, &pdst->last_picture);
|
||||
if (psrc->last_picture.f->data[0])
|
||||
return ff_thread_ref_frame(&pdst->last_picture, &psrc->last_picture);
|
||||
|
@@ -504,7 +504,7 @@ static int hadamard8_diff8x8_altivec(MpegEncContext *s, uint8_t *dst,
|
||||
}
|
||||
|
||||
/*
|
||||
* 16x8 works with 16 elements; it allows to avoid replicating loads, and
|
||||
* 16x8 works with 16 elements; it can avoid replicating loads, and
|
||||
* gives the compiler more room for scheduling. It's only used from
|
||||
* inside hadamard8_diff16_altivec.
|
||||
*
|
||||
|
@@ -454,6 +454,9 @@ int ff_thread_decode_frame(AVCodecContext *avctx,
|
||||
*got_picture_ptr = p->got_frame;
|
||||
picture->pkt_dts = p->avpkt.dts;
|
||||
|
||||
if (p->result < 0)
|
||||
err = p->result;
|
||||
|
||||
/*
|
||||
* A later call with avkpt->size == 0 may loop over all threads,
|
||||
* including this one, searching for a frame to return before being
|
||||
@@ -471,6 +474,14 @@ int ff_thread_decode_frame(AVCodecContext *avctx,
|
||||
|
||||
fctx->next_finished = finished;
|
||||
|
||||
/*
|
||||
* When no frame was found while flushing, but an error occured in
|
||||
* any thread, return it instead of 0.
|
||||
* Otherwise the error can get lost.
|
||||
*/
|
||||
if (!avpkt->size && !*got_picture_ptr)
|
||||
return err;
|
||||
|
||||
/* return the size of the consumed packet if no error occurred */
|
||||
return (p->result >= 0) ? avpkt->size : p->result;
|
||||
}
|
||||
@@ -572,7 +583,7 @@ void ff_frame_thread_free(AVCodecContext *avctx, int thread_count)
|
||||
pthread_join(p->thread, NULL);
|
||||
p->thread_init=0;
|
||||
|
||||
if (codec->close)
|
||||
if (codec->close && p->avctx)
|
||||
codec->close(p->avctx);
|
||||
|
||||
release_delayed_buffers(p);
|
||||
@@ -590,12 +601,13 @@ void ff_frame_thread_free(AVCodecContext *avctx, int thread_count)
|
||||
av_packet_unref(&p->avpkt);
|
||||
av_freep(&p->released_buffers);
|
||||
|
||||
if (i) {
|
||||
if (i && p->avctx) {
|
||||
av_freep(&p->avctx->priv_data);
|
||||
av_freep(&p->avctx->slice_offset);
|
||||
}
|
||||
|
||||
av_freep(&p->avctx->internal);
|
||||
if (p->avctx)
|
||||
av_freep(&p->avctx->internal);
|
||||
av_freep(&p->avctx);
|
||||
}
|
||||
|
||||
@@ -678,6 +690,7 @@ int ff_frame_thread_init(AVCodecContext *avctx)
|
||||
|
||||
copy->internal = av_malloc(sizeof(AVCodecInternal));
|
||||
if (!copy->internal) {
|
||||
copy->priv_data = NULL;
|
||||
err = AVERROR(ENOMEM);
|
||||
goto error;
|
||||
}
|
||||
|
@@ -54,7 +54,7 @@ static int raw_encode(AVCodecContext *avctx, AVPacket *pkt,
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if ((ret = ff_alloc_packet2(avctx, pkt, ret)) < 0)
|
||||
if ((ret = ff_alloc_packet(pkt, ret)) < 0)
|
||||
return ret;
|
||||
if ((ret = avpicture_layout((const AVPicture *)frame, avctx->pix_fmt, avctx->width,
|
||||
avctx->height, pkt->data, pkt->size)) < 0)
|
||||
|
@@ -1534,7 +1534,14 @@ int ff_rv34_decode_init_thread_copy(AVCodecContext *avctx)
|
||||
|
||||
if (avctx->internal->is_copy) {
|
||||
r->tmp_b_block_base = NULL;
|
||||
r->cbp_chroma = NULL;
|
||||
r->cbp_luma = NULL;
|
||||
r->deblock_coefs = NULL;
|
||||
r->intra_types_hist = NULL;
|
||||
r->mb_type = NULL;
|
||||
|
||||
ff_mpv_idct_init(&r->s);
|
||||
|
||||
if ((err = ff_mpv_common_init(&r->s)) < 0)
|
||||
return err;
|
||||
if ((err = rv34_decoder_alloc(r)) < 0) {
|
||||
|
@@ -85,10 +85,6 @@ static int s302m_parse_frame_header(AVCodecContext *avctx, const uint8_t *buf,
|
||||
case 8:
|
||||
avctx->channel_layout = AV_CH_LAYOUT_5POINT1_BACK | AV_CH_LAYOUT_STEREO_DOWNMIX;
|
||||
}
|
||||
avctx->bit_rate = 48000 * avctx->channels * (avctx->bits_per_raw_sample + 4) +
|
||||
32 * (48000 / (buf_size * 8 /
|
||||
(avctx->channels *
|
||||
(avctx->bits_per_raw_sample + 4))));
|
||||
|
||||
return frame_size;
|
||||
}
|
||||
@@ -117,6 +113,8 @@ static int s302m_decode_frame(AVCodecContext *avctx, void *data,
|
||||
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
|
||||
return ret;
|
||||
|
||||
avctx->bit_rate = 48000 * avctx->channels * (avctx->bits_per_raw_sample + 4) +
|
||||
32 * 48000 / frame->nb_samples;
|
||||
buf_size = (frame->nb_samples * avctx->channels / 2) * block_size;
|
||||
|
||||
if (avctx->bits_per_raw_sample == 24) {
|
||||
|
@@ -457,6 +457,7 @@ static void destroy_buffers(SANMVideoContext *ctx)
|
||||
ctx->frm0_size =
|
||||
ctx->frm1_size =
|
||||
ctx->frm2_size = 0;
|
||||
init_sizes(ctx, 0, 0);
|
||||
}
|
||||
|
||||
static av_cold int init_buffers(SANMVideoContext *ctx)
|
||||
|
@@ -137,6 +137,7 @@ typedef struct AACSBRContext {
|
||||
struct SpectralBandReplication {
|
||||
int sample_rate;
|
||||
int start;
|
||||
int id_aac;
|
||||
int reset;
|
||||
SpectrumParameters spectrum_params;
|
||||
int bs_amp_res_header;
|
||||
|
@@ -304,6 +304,8 @@ static av_always_inline void add_yblock(SnowContext *s, int sliced, slice_buffer
|
||||
BlockNode *lb= lt+b_stride;
|
||||
BlockNode *rb= lb+1;
|
||||
uint8_t *block[4];
|
||||
// When src_stride is large enough, it is possible to interleave the blocks.
|
||||
// Otherwise the blocks are written sequentially in the tmp buffer.
|
||||
int tmp_step= src_stride >= 7*MB_SIZE ? MB_SIZE : MB_SIZE*src_stride;
|
||||
uint8_t *tmp = s->scratchbuf;
|
||||
uint8_t *ptmp;
|
||||
@@ -347,8 +349,6 @@ static av_always_inline void add_yblock(SnowContext *s, int sliced, slice_buffer
|
||||
|
||||
if(b_w<=0 || b_h<=0) return;
|
||||
|
||||
av_assert2(src_stride > 2*MB_SIZE + 5);
|
||||
|
||||
if(!sliced && offset_dst)
|
||||
dst += src_x + src_y*dst_stride;
|
||||
dst8+= src_x + src_y*src_stride;
|
||||
|
@@ -801,6 +801,12 @@ static int tak_decode_frame(AVCodecContext *avctx, void *data,
|
||||
if (s->mcdparams[i].present) {
|
||||
s->mcdparams[i].index = get_bits(gb, 2);
|
||||
s->mcdparams[i].chan2 = get_bits(gb, 4);
|
||||
if (s->mcdparams[i].chan2 >= avctx->channels) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"invalid channel 2 (%d) for %d channel(s)\n",
|
||||
s->mcdparams[i].chan2, avctx->channels);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (s->mcdparams[i].index == 1) {
|
||||
if ((nbit == s->mcdparams[i].chan2) ||
|
||||
(ch_mask & 1 << s->mcdparams[i].chan2))
|
||||
|
@@ -428,10 +428,12 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
|
||||
|
||||
*width = FFALIGN(*width, w_align);
|
||||
*height = FFALIGN(*height, h_align);
|
||||
if (s->codec_id == AV_CODEC_ID_H264 || s->lowres)
|
||||
if (s->codec_id == AV_CODEC_ID_H264 || s->lowres) {
|
||||
// some of the optimized chroma MC reads one line too much
|
||||
// which is also done in mpeg decoders with lowres > 0
|
||||
*height += 2;
|
||||
*width = FFMAX(*width, 32);
|
||||
}
|
||||
|
||||
for (i = 0; i < 4; i++)
|
||||
linesize_align[i] = STRIDE_ALIGN;
|
||||
|
@@ -162,7 +162,8 @@ static void fill_vaapi_RefPicList(VAPictureH264 RefPicList[32],
|
||||
unsigned int i, n = 0;
|
||||
for (i = 0; i < ref_count; i++)
|
||||
if (ref_list[i].reference)
|
||||
fill_vaapi_pic(&RefPicList[n++], ref_list[i].parent, 0);
|
||||
fill_vaapi_pic(&RefPicList[n++], ref_list[i].parent,
|
||||
ref_list[i].reference);
|
||||
|
||||
for (; n < 32; n++)
|
||||
init_vaapi_pic(&RefPicList[n]);
|
||||
|
@@ -469,7 +469,7 @@ static av_cold int vc1_decode_init(AVCodecContext *avctx)
|
||||
count = avctx->extradata_size*8 - get_bits_count(&gb);
|
||||
if (count > 0) {
|
||||
av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
|
||||
count, get_bits(&gb, count));
|
||||
count, get_bits_long(&gb, FFMIN(count, 32)));
|
||||
} else if (count < 0) {
|
||||
av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
|
||||
}
|
||||
|
@@ -639,6 +639,11 @@ static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_si
|
||||
int width = s->avctx->width;
|
||||
int height = s->avctx->height;
|
||||
|
||||
if (buf_size < 3) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Insufficent data (%d) for header\n", buf_size);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
s->keyframe = !(buf[0] & 1);
|
||||
s->profile = (buf[0]>>1) & 7;
|
||||
s->invisible = !(buf[0] & 0x10);
|
||||
@@ -2689,6 +2694,9 @@ av_cold int ff_vp8_decode_free(AVCodecContext *avctx)
|
||||
VP8Context *s = avctx->priv_data;
|
||||
int i;
|
||||
|
||||
if (!s)
|
||||
return 0;
|
||||
|
||||
vp8_decode_flush_impl(avctx, 1);
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++)
|
||||
av_frame_free(&s->frames[i].tf.f);
|
||||
|
@@ -425,7 +425,7 @@ static av_always_inline int inv_recenter_nonneg(int v, int m)
|
||||
// differential forward probability updates
|
||||
static int update_prob(VP56RangeCoder *c, int p)
|
||||
{
|
||||
static const int inv_map_table[254] = {
|
||||
static const int inv_map_table[255] = {
|
||||
7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176,
|
||||
189, 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9,
|
||||
10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24,
|
||||
@@ -444,7 +444,7 @@ static int update_prob(VP56RangeCoder *c, int p)
|
||||
207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 219, 220, 221,
|
||||
222, 223, 224, 225, 226, 227, 229, 230, 231, 232, 233, 234, 235, 236,
|
||||
237, 238, 239, 240, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
|
||||
252, 253,
|
||||
252, 253, 253,
|
||||
};
|
||||
int d;
|
||||
|
||||
@@ -474,6 +474,7 @@ static int update_prob(VP56RangeCoder *c, int p)
|
||||
if (d >= 65)
|
||||
d = (d << 1) - 65 + vp8_rac_get(c);
|
||||
d += 64;
|
||||
av_assert2(d < FF_ARRAY_ELEMS(inv_map_table));
|
||||
}
|
||||
|
||||
return p <= 128 ? 1 + inv_recenter_nonneg(inv_map_table[d], p - 1) :
|
||||
@@ -3286,7 +3287,7 @@ static void decode_b(AVCodecContext *ctx, int row, int col,
|
||||
}
|
||||
|
||||
// emulated overhangs if the stride of the target buffer can't hold. This
|
||||
// allows to support emu-edge and so on even if we have large block
|
||||
// makes it possible to support emu-edge and so on even if we have large block
|
||||
// overhangs
|
||||
emu[0] = (col + w4) * 8 > f->linesize[0] ||
|
||||
(row + h4) > s->rows;
|
||||
@@ -3988,7 +3989,8 @@ static int vp9_decode_frame(AVCodecContext *ctx, void *frame,
|
||||
int size = pkt->size;
|
||||
VP9Context *s = ctx->priv_data;
|
||||
int res, tile_row, tile_col, i, ref, row, col;
|
||||
int retain_segmap_ref = s->segmentation.enabled && !s->segmentation.update_map;
|
||||
int retain_segmap_ref = s->segmentation.enabled && !s->segmentation.update_map
|
||||
&& s->frames[REF_FRAME_SEGMAP].segmentation_map;
|
||||
ptrdiff_t yoff, uvoff, ls_y, ls_uv;
|
||||
AVFrame *f;
|
||||
int bytesperpixel;
|
||||
|
@@ -155,7 +155,7 @@ static int wv_get_value(WavpackFrameContext *ctx, GetBitContext *gb,
|
||||
if (t >= 2) {
|
||||
if (get_bits_left(gb) < t - 1)
|
||||
goto error;
|
||||
t = get_bits(gb, t - 1) | (1 << (t - 1));
|
||||
t = get_bits_long(gb, t - 1) | (1 << (t - 1));
|
||||
} else {
|
||||
if (get_bits_left(gb) < 0)
|
||||
goto error;
|
||||
@@ -186,7 +186,7 @@ static int wv_get_value(WavpackFrameContext *ctx, GetBitContext *gb,
|
||||
} else {
|
||||
if (get_bits_left(gb) < t2 - 1)
|
||||
goto error;
|
||||
t += get_bits(gb, t2 - 1) | (1 << (t2 - 1));
|
||||
t += get_bits_long(gb, t2 - 1) | (1 << (t2 - 1));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -271,7 +271,7 @@ static inline int wv_get_value_integer(WavpackFrameContext *s, uint32_t *crc,
|
||||
|
||||
if (s->got_extra_bits &&
|
||||
get_bits_left(&s->gb_extra_bits) >= s->extra_bits) {
|
||||
S |= get_bits(&s->gb_extra_bits, s->extra_bits);
|
||||
S |= get_bits_long(&s->gb_extra_bits, s->extra_bits);
|
||||
*crc = *crc * 9 + (S & 0xffff) * 3 + ((unsigned)S >> 16);
|
||||
}
|
||||
}
|
||||
@@ -835,7 +835,11 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
|
||||
continue;
|
||||
}
|
||||
bytestream2_get_buffer(&gb, val, 4);
|
||||
if (val[0]) {
|
||||
if (val[0] > 32) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Invalid INT32INFO, extra_bits = %d (> 32)\n", val[0]);
|
||||
continue;
|
||||
} else if (val[0]) {
|
||||
s->extra_bits = val[0];
|
||||
} else if (val[1]) {
|
||||
s->shift = val[1];
|
||||
|
@@ -1387,7 +1387,7 @@ static int webp_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
||||
}
|
||||
|
||||
av_dict_free(&s->exif_metadata);
|
||||
while (bytestream2_get_bytes_left(&gb) > 0) {
|
||||
while (bytestream2_get_bytes_left(&gb) > 8) {
|
||||
char chunk_str[5] = { 0 };
|
||||
|
||||
chunk_type = bytestream2_get_le32(&gb);
|
||||
|
@@ -488,7 +488,7 @@ static int decode_cdlms(WmallDecodeCtx *s)
|
||||
if ((1 << cbits) < s->cdlms[c][i].scaling + 1)
|
||||
cbits++;
|
||||
|
||||
s->cdlms[c][i].bitsend = get_bits(&s->gb, cbits) + 2;
|
||||
s->cdlms[c][i].bitsend = (cbits ? get_bits(&s->gb, cbits) : 0) + 2;
|
||||
shift_l = 32 - s->cdlms[c][i].bitsend;
|
||||
shift_r = 32 - s->cdlms[c][i].scaling - 2;
|
||||
for (j = 0; j < s->cdlms[c][i].coefsend; j++)
|
||||
@@ -1005,6 +1005,7 @@ static int decode_frame(WmallDecodeCtx *s)
|
||||
if ((ret = ff_get_buffer(s->avctx, s->frame, 0)) < 0) {
|
||||
/* return an error if no frame could be decoded at all */
|
||||
s->packet_loss = 1;
|
||||
s->frame->nb_samples = 0;
|
||||
return ret;
|
||||
}
|
||||
for (i = 0; i < s->num_channels; i++) {
|
||||
|
@@ -1982,7 +1982,14 @@ static int wmavoice_decode_packet(AVCodecContext *ctx, void *data,
|
||||
*got_frame_ptr) {
|
||||
cnt += s->spillover_nbits;
|
||||
s->skip_bits_next = cnt & 7;
|
||||
return cnt >> 3;
|
||||
res = cnt >> 3;
|
||||
if (res > avpkt->size) {
|
||||
av_log(ctx, AV_LOG_ERROR,
|
||||
"Trying to skip %d bytes in packet of size %d\n",
|
||||
res, avpkt->size);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
return res;
|
||||
} else
|
||||
skip_bits_long (gb, s->spillover_nbits - cnt +
|
||||
get_bits_count(gb)); // resync
|
||||
@@ -2001,7 +2008,14 @@ static int wmavoice_decode_packet(AVCodecContext *ctx, void *data,
|
||||
} else if (*got_frame_ptr) {
|
||||
int cnt = get_bits_count(gb);
|
||||
s->skip_bits_next = cnt & 7;
|
||||
return cnt >> 3;
|
||||
res = cnt >> 3;
|
||||
if (res > avpkt->size) {
|
||||
av_log(ctx, AV_LOG_ERROR,
|
||||
"Trying to skip %d bytes in packet of size %d\n",
|
||||
res, avpkt->size);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
return res;
|
||||
} else if ((s->sframe_cache_size = pos) > 0) {
|
||||
/* rewind bit reader to start of last (incomplete) superframe... */
|
||||
init_get_bits(gb, avpkt->data, size << 3);
|
||||
|
@@ -80,7 +80,7 @@
|
||||
"movq %%mm"#R1", "#OFF"(%1) \n\t" \
|
||||
"add %2, %0 \n\t"
|
||||
|
||||
/** Sacrifying mm6 allows to pipeline loads from src */
|
||||
/** Sacrificing mm6 makes it possible to pipeline loads from src */
|
||||
static void vc1_put_ver_16b_shift2_mmx(int16_t *dst,
|
||||
const uint8_t *src, x86_reg stride,
|
||||
int rnd, int64_t shift)
|
||||
|
@@ -27,7 +27,7 @@
|
||||
* @author Benoit Fouet ( benoit fouet free fr )
|
||||
* @author Nicolas George ( nicolas george normalesup org )
|
||||
*
|
||||
* This avdevice decoder allows to capture audio from an ALSA (Advanced
|
||||
* This avdevice decoder can capture audio from an ALSA (Advanced
|
||||
* Linux Sound Architecture) device.
|
||||
*
|
||||
* The filename parameter is the name of an ALSA PCM device capable of
|
||||
|
@@ -26,7 +26,7 @@
|
||||
* @author Luca Abeni ( lucabe72 email it )
|
||||
* @author Benoit Fouet ( benoit fouet free fr )
|
||||
*
|
||||
* This avdevice encoder allows to play audio to an ALSA (Advanced Linux
|
||||
* This avdevice encoder can play audio to an ALSA (Advanced Linux
|
||||
* Sound Architecture) device.
|
||||
*
|
||||
* The filename parameter is the name of an ALSA PCM device capable of
|
||||
|
@@ -324,7 +324,7 @@ int avdevice_dev_to_app_control_message(struct AVFormatContext *s,
|
||||
* Following API allows user to probe device capabilities (supported codecs,
|
||||
* pixel formats, sample formats, resolutions, channel counts, etc).
|
||||
* It is build on top op AVOption API.
|
||||
* Queried capabilities allows to set up converters of video or audio
|
||||
* Queried capabilities make it possible to set up converters of video or audio
|
||||
* parameters that fit to the device.
|
||||
*
|
||||
* List of capabilities that can be queried:
|
||||
|
@@ -87,15 +87,24 @@ static int query_formats(AVFilterContext *ctx)
|
||||
AVFilterLink *inlink = ctx->inputs[0];
|
||||
AVFilterLink *outlink = ctx->outputs[0];
|
||||
|
||||
AVFilterFormats *in_formats = ff_all_formats(AVMEDIA_TYPE_AUDIO);
|
||||
AVFilterFormats *out_formats;
|
||||
AVFilterFormats *in_samplerates = ff_all_samplerates();
|
||||
AVFilterFormats *out_samplerates;
|
||||
AVFilterChannelLayouts *in_layouts = ff_all_channel_counts();
|
||||
AVFilterChannelLayouts *out_layouts;
|
||||
AVFilterFormats *in_formats, *out_formats;
|
||||
AVFilterFormats *in_samplerates, *out_samplerates;
|
||||
AVFilterChannelLayouts *in_layouts, *out_layouts;
|
||||
|
||||
|
||||
in_formats = ff_all_formats(AVMEDIA_TYPE_AUDIO);
|
||||
if (!in_formats)
|
||||
return AVERROR(ENOMEM);
|
||||
ff_formats_ref (in_formats, &inlink->out_formats);
|
||||
|
||||
in_samplerates = ff_all_samplerates();
|
||||
if (!in_samplerates)
|
||||
return AVERROR(ENOMEM);
|
||||
ff_formats_ref (in_samplerates, &inlink->out_samplerates);
|
||||
|
||||
in_layouts = ff_all_channel_counts();
|
||||
if (!in_layouts)
|
||||
return AVERROR(ENOMEM);
|
||||
ff_channel_layouts_ref(in_layouts, &inlink->out_channel_layouts);
|
||||
|
||||
if(out_rate > 0) {
|
||||
|
@@ -151,7 +151,7 @@ static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr,
|
||||
int hsub = plane == 1 || plane == 2 ? trans->hsub : 0;
|
||||
int vsub = plane == 1 || plane == 2 ? trans->vsub : 0;
|
||||
int pixstep = trans->pixsteps[plane];
|
||||
int inh = in->height >> vsub;
|
||||
int inh = FF_CEIL_RSHIFT(in->height, vsub);
|
||||
int outw = FF_CEIL_RSHIFT(out->width, hsub);
|
||||
int outh = FF_CEIL_RSHIFT(out->height, vsub);
|
||||
int start = (outh * jobnr ) / nb_jobs;
|
||||
|
@@ -67,7 +67,7 @@
|
||||
* with an AVFMT_NOFILE format).
|
||||
*
|
||||
* @section lavf_options Passing options to (de)muxers
|
||||
* Lavf allows to configure muxers and demuxers using the @ref avoptions
|
||||
* It is possible to configure lavf muxers and demuxers using the @ref avoptions
|
||||
* mechanism. Generic (format-independent) libavformat options are provided by
|
||||
* AVFormatContext, they can be examined from a user program by calling
|
||||
* av_opt_next() / av_opt_find() on an allocated AVFormatContext (or its AVClass
|
||||
@@ -234,7 +234,7 @@
|
||||
* @defgroup lavf_io I/O Read/Write
|
||||
* @{
|
||||
* @section lavf_io_dirlist Directory listing
|
||||
* The directory listing API allows to list files on remote servers.
|
||||
* The directory listing API makes it possible to list files on remote servers.
|
||||
*
|
||||
* Some of possible use cases:
|
||||
* - an "open file" dialog to choose files from a remote location,
|
||||
@@ -2361,7 +2361,7 @@ int av_write_uncoded_frame(AVFormatContext *s, int stream_index,
|
||||
/**
|
||||
* Write a uncoded frame to an output media file.
|
||||
*
|
||||
* If the muxer supports it, this function allows to write an AVFrame
|
||||
* If the muxer supports it, this function makes it possible to write an AVFrame
|
||||
* structure directly, without encoding it into a packet.
|
||||
* It is mostly useful for devices and similar special muxers that use raw
|
||||
* video or PCM data and will not serialize it into a byte stream.
|
||||
|
@@ -263,7 +263,7 @@ int ffurl_alloc(URLContext **puc, const char *filename, int flags,
|
||||
|
||||
*puc = NULL;
|
||||
if (av_strstart(filename, "https:", NULL))
|
||||
av_log(NULL, AV_LOG_WARNING, "https protocol not found, recompile with "
|
||||
av_log(NULL, AV_LOG_WARNING, "https protocol not found, recompile FFmpeg with "
|
||||
"openssl, gnutls,\n"
|
||||
"or securetransport enabled.\n");
|
||||
return AVERROR_PROTOCOL_NOT_FOUND;
|
||||
|
@@ -79,7 +79,7 @@ typedef struct AVIODirEntry {
|
||||
char *name; /**< Filename */
|
||||
int type; /**< Type of the entry */
|
||||
int utf8; /**< Set to 1 when name is encoded with UTF-8, 0 otherwise.
|
||||
Name can be encoded with UTF-8 eventhough 0 is set. */
|
||||
Name can be encoded with UTF-8 even though 0 is set. */
|
||||
int64_t size; /**< File size in bytes, -1 if unknown. */
|
||||
int64_t modification_timestamp; /**< Time of last modification in microseconds since unix
|
||||
epoch, -1 if unknown. */
|
||||
|
@@ -812,6 +812,7 @@ int ffio_ensure_seekback(AVIOContext *s, int64_t buf_size)
|
||||
int max_buffer_size = s->max_packet_size ?
|
||||
s->max_packet_size : IO_BUFFER_SIZE;
|
||||
int filled = s->buf_end - s->buffer;
|
||||
ptrdiff_t checksum_ptr_offset = s->checksum_ptr ? s->checksum_ptr - s->buffer : -1;
|
||||
|
||||
buf_size += s->buf_ptr - s->buffer + max_buffer_size;
|
||||
|
||||
@@ -829,6 +830,8 @@ int ffio_ensure_seekback(AVIOContext *s, int64_t buf_size)
|
||||
s->buf_end = buffer + (s->buf_end - s->buffer);
|
||||
s->buffer = buffer;
|
||||
s->buffer_size = buf_size;
|
||||
if (checksum_ptr_offset >= 0)
|
||||
s->checksum_ptr = s->buffer + checksum_ptr_offset;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -562,7 +562,7 @@ static int flv_write_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
}
|
||||
|
||||
if (enc->codec_id == AV_CODEC_ID_H264 || enc->codec_id == AV_CODEC_ID_MPEG4) {
|
||||
/* check if extradata looks like mp4 formated */
|
||||
/* check if extradata looks like mp4 formatted */
|
||||
if (enc->extradata_size > 0 && *(uint8_t*)enc->extradata != 1)
|
||||
if ((ret = ff_avc_parse_nal_units_buf(pkt->data, &data, &size)) < 0)
|
||||
return ret;
|
||||
|
@@ -449,7 +449,7 @@ static int parse_rps(GetBitContext *gb, unsigned int rps_idx,
|
||||
*
|
||||
* NumDeltaPocs[RefRpsIdx]: num_delta_pocs[rps_idx - 1]
|
||||
*/
|
||||
for (i = 0; i < num_delta_pocs[rps_idx - 1]; i++) {
|
||||
for (i = 0; i <= num_delta_pocs[rps_idx - 1]; i++) {
|
||||
uint8_t use_delta_flag = 0;
|
||||
uint8_t used_by_curr_pic_flag = get_bits1(gb);
|
||||
if (!used_by_curr_pic_flag)
|
||||
|
@@ -1668,6 +1668,14 @@ static int matroska_parse_tracks(AVFormatContext *s)
|
||||
if (!track->codec_id)
|
||||
continue;
|
||||
|
||||
if (track->audio.samplerate < 0 || track->audio.samplerate > INT_MAX ||
|
||||
isnan(track->audio.samplerate)) {
|
||||
av_log(matroska->ctx, AV_LOG_WARNING,
|
||||
"Invalid sample rate %f, defaulting to 8000 instead.\n",
|
||||
track->audio.samplerate);
|
||||
track->audio.samplerate = 8000;
|
||||
}
|
||||
|
||||
if (track->type == MATROSKA_TRACK_TYPE_VIDEO) {
|
||||
if (!track->default_duration && track->video.frame_rate > 0)
|
||||
track->default_duration = 1000000000 / track->video.frame_rate;
|
||||
@@ -1869,6 +1877,18 @@ static int matroska_parse_tracks(AVFormatContext *s)
|
||||
NULL, NULL, NULL, NULL);
|
||||
avio_write(&b, "TTA1", 4);
|
||||
avio_wl16(&b, 1);
|
||||
if (track->audio.channels > UINT16_MAX ||
|
||||
track->audio.bitdepth > UINT16_MAX) {
|
||||
av_log(matroska->ctx, AV_LOG_WARNING,
|
||||
"Too large audio channel number %"PRIu64
|
||||
" or bitdepth %"PRIu64". Skipping track.\n",
|
||||
track->audio.channels, track->audio.bitdepth);
|
||||
av_freep(&extradata);
|
||||
if (matroska->ctx->error_recognition & AV_EF_EXPLODE)
|
||||
return AVERROR_INVALIDDATA;
|
||||
else
|
||||
continue;
|
||||
}
|
||||
avio_wl16(&b, track->audio.channels);
|
||||
avio_wl16(&b, track->audio.bitdepth);
|
||||
if (track->audio.out_samplerate < 0 || track->audio.out_samplerate > INT_MAX)
|
||||
|
@@ -3964,6 +3964,9 @@ static int mov_read_close(AVFormatContext *s)
|
||||
AVStream *st = s->streams[i];
|
||||
MOVStreamContext *sc = st->priv_data;
|
||||
|
||||
if (!sc)
|
||||
continue;
|
||||
|
||||
av_freep(&sc->ctts_data);
|
||||
for (j = 0; j < sc->drefs_count; j++) {
|
||||
av_freep(&sc->drefs[j].path);
|
||||
|
@@ -1005,6 +1005,9 @@ static int read_sm_data(AVFormatContext *s, AVIOContext *bc, AVPacket *pkt, int
|
||||
AV_WL32(dst+4, skip_end);
|
||||
}
|
||||
|
||||
if (avio_tell(bc) >= maxpos)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -31,14 +31,19 @@ static int dirac_header(AVFormatContext *s, int idx)
|
||||
AVStream *st = s->streams[idx];
|
||||
dirac_source_params source;
|
||||
GetBitContext gb;
|
||||
int ret;
|
||||
|
||||
// already parsed the header
|
||||
if (st->codec->codec_id == AV_CODEC_ID_DIRAC)
|
||||
return 0;
|
||||
|
||||
init_get_bits(&gb, os->buf + os->pstart + 13, (os->psize - 13) * 8);
|
||||
if (avpriv_dirac_parse_sequence_header(st->codec, &gb, &source) < 0)
|
||||
return -1;
|
||||
ret = init_get_bits8(&gb, os->buf + os->pstart + 13, (os->psize - 13));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = avpriv_dirac_parse_sequence_header(st->codec, &gb, &source);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
|
||||
st->codec->codec_id = AV_CODEC_ID_DIRAC;
|
||||
|
@@ -250,7 +250,9 @@ AVOutputFormat ff_mjpeg_muxer = {
|
||||
.write_packet = ff_raw_write_packet,
|
||||
.flags = AVFMT_NOTIMESTAMPS,
|
||||
};
|
||||
#endif
|
||||
|
||||
#if CONFIG_SINGLEJPEG_MUXER
|
||||
AVOutputFormat ff_singlejpeg_muxer = {
|
||||
.name = "singlejpeg",
|
||||
.long_name = NULL_IF_CONFIG_SMALL("JPEG single image"),
|
||||
|
@@ -407,10 +407,8 @@ static int swf_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
}
|
||||
if (st->codec->pix_fmt != AV_PIX_FMT_NONE && st->codec->pix_fmt != pix_fmt) {
|
||||
av_log(s, AV_LOG_ERROR, "pixel format change unsupported\n");
|
||||
res = AVERROR_PATCHWELCOME;
|
||||
goto bitmap_end;
|
||||
}
|
||||
st->codec->pix_fmt = pix_fmt;
|
||||
}else
|
||||
st->codec->pix_fmt = pix_fmt;
|
||||
|
||||
if (linesize * height > pkt->size) {
|
||||
res = AVERROR_INVALIDDATA;
|
||||
|
@@ -87,7 +87,7 @@ static int tls_close(URLContext *h)
|
||||
{
|
||||
TLSContext *c = h->priv_data;
|
||||
if (c->need_shutdown)
|
||||
gnutls_bye(c->session, GNUTLS_SHUT_RDWR);
|
||||
gnutls_bye(c->session, GNUTLS_SHUT_WR);
|
||||
if (c->session)
|
||||
gnutls_deinit(c->session);
|
||||
if (c->cred)
|
||||
|
@@ -502,7 +502,7 @@ int main(void)
|
||||
printf("Testing av_append_path_component()\n");
|
||||
#define TEST_APPEND_PATH_COMPONENT(path, component, expected) \
|
||||
fullpath = av_append_path_component((path), (component)); \
|
||||
printf("%s = %s\n", fullpath, expected); \
|
||||
printf("%s = %s\n", fullpath ? fullpath : "(null)", expected); \
|
||||
av_free(fullpath);
|
||||
TEST_APPEND_PATH_COMPONENT(NULL, NULL, "(null)")
|
||||
TEST_APPEND_PATH_COMPONENT("path", NULL, "path");
|
||||
|
@@ -129,7 +129,8 @@ int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size,
|
||||
do {
|
||||
int len = FFMIN(f->end - wptr, size);
|
||||
if (func) {
|
||||
if (func(src, wptr, len) <= 0)
|
||||
len = func(src, wptr, len);
|
||||
if (len <= 0)
|
||||
break;
|
||||
} else {
|
||||
memcpy(wptr, src, len);
|
||||
|
@@ -238,8 +238,8 @@ void avpriv_request_sample(void *avc,
|
||||
#if HAVE_LIBC_MSVCRT
|
||||
#include <crtversion.h>
|
||||
#if defined(_VC_CRT_MAJOR_VERSION) && _VC_CRT_MAJOR_VERSION < 14
|
||||
#pragma comment(linker, "/include:"EXTERN_PREFIX"avpriv_strtod")
|
||||
#pragma comment(linker, "/include:"EXTERN_PREFIX"avpriv_snprintf")
|
||||
#pragma comment(linker, "/include:" EXTERN_PREFIX "avpriv_strtod")
|
||||
#pragma comment(linker, "/include:" EXTERN_PREFIX "avpriv_snprintf")
|
||||
#endif
|
||||
|
||||
#define avpriv_open ff_open
|
||||
|
@@ -309,7 +309,7 @@ AVClassCategory av_default_get_category(void *ptr);
|
||||
|
||||
/**
|
||||
* Format a line of log the same way as the default callback.
|
||||
* @param line buffer to receive the formated line
|
||||
* @param line buffer to receive the formatted line
|
||||
* @param line_size size of the buffer
|
||||
* @param print_prefix used to store whether the prefix must be printed;
|
||||
* must point to a persistent integer initially set to 1
|
||||
|
@@ -213,7 +213,7 @@
|
||||
* In some cases it may be more convenient to put all options into an
|
||||
* AVDictionary and call av_opt_set_dict() on it. A specific case of this
|
||||
* are the format/codec open functions in lavf/lavc which take a dictionary
|
||||
* filled with option as a parameter. This allows to set some options
|
||||
* filled with option as a parameter. This makes it possible to set some options
|
||||
* that cannot be set otherwise, since e.g. the input file format is not known
|
||||
* before the file is actually opened.
|
||||
*/
|
||||
|
@@ -530,7 +530,7 @@ enum AVColorRange {
|
||||
* Illustration showing the location of the first (top left) chroma sample of the
|
||||
* image, the left shows only luma, the right
|
||||
* shows the location of the chroma sample, the 2 could be imagined to overlay
|
||||
* each other but are drawn seperately due to limitations of ASCII
|
||||
* each other but are drawn separately due to limitations of ASCII
|
||||
*
|
||||
* 1st 2nd 1st 2nd horizontal luma sample positions
|
||||
* v v v v
|
||||
|
@@ -76,6 +76,7 @@ try to unroll inner for(x=0 ... loop to avoid these damn if(x ... checks
|
||||
#include "config.h"
|
||||
#include "libavutil/avutil.h"
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/intreadwrite.h"
|
||||
#include <inttypes.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
@@ -997,7 +998,7 @@ void pp_postprocess(const uint8_t * src[3], const int srcStride[3],
|
||||
int i;
|
||||
const int count= FFMAX(mbHeight * QPStride, mbWidth);
|
||||
for(i=0; i<(count>>2); i++){
|
||||
((uint32_t*)c->nonBQPTable)[i] = ((const uint32_t*)QP_store)[i] & 0x3F3F3F3F;
|
||||
AV_WN32(c->nonBQPTable + (i<<2), AV_RN32(QP_store + (i<<2)) & 0x3F3F3F3F);
|
||||
}
|
||||
for(i<<=2; i<count; i++){
|
||||
c->nonBQPTable[i] = QP_store[i] & 0x3F;
|
||||
|
@@ -34,6 +34,7 @@ $(SUBDIR)x86/%.o: $(SUBDIR)x86/%$(YASMD).asm
|
||||
LIBOBJS := $(OBJS) $(SUBDIR)%.h.o $(TESTOBJS)
|
||||
$(LIBOBJS) $(LIBOBJS:.o=.s) $(LIBOBJS:.o=.i): CPPFLAGS += -DHAVE_AV_CONFIG_H
|
||||
$(TESTOBJS) $(TESTOBJS:.o=.i): CPPFLAGS += -DTEST
|
||||
$(TESTOBJS) $(TESTOBJS:.o=.i): CFLAGS += -Umain
|
||||
|
||||
$(SUBDIR)$(LIBNAME): $(OBJS)
|
||||
$(RM) $@
|
||||
|
@@ -49,8 +49,8 @@ static const AVOption options[]={
|
||||
{"in_sample_fmt" , "set input sample format" , OFFSET( in_sample_fmt ), AV_OPT_TYPE_SAMPLE_FMT , {.i64=AV_SAMPLE_FMT_NONE}, -1 , INT_MAX, PARAM},
|
||||
{"osf" , "set output sample format" , OFFSET(out_sample_fmt ), AV_OPT_TYPE_SAMPLE_FMT , {.i64=AV_SAMPLE_FMT_NONE}, -1 , INT_MAX, PARAM},
|
||||
{"out_sample_fmt" , "set output sample format" , OFFSET(out_sample_fmt ), AV_OPT_TYPE_SAMPLE_FMT , {.i64=AV_SAMPLE_FMT_NONE}, -1 , INT_MAX, PARAM},
|
||||
{"tsf" , "set internal sample format" , OFFSET(int_sample_fmt ), AV_OPT_TYPE_SAMPLE_FMT , {.i64=AV_SAMPLE_FMT_NONE}, -1 , INT_MAX, PARAM},
|
||||
{"internal_sample_fmt" , "set internal sample format" , OFFSET(int_sample_fmt ), AV_OPT_TYPE_SAMPLE_FMT , {.i64=AV_SAMPLE_FMT_NONE}, -1 , INT_MAX, PARAM},
|
||||
{"tsf" , "set internal sample format" , OFFSET(user_int_sample_fmt), AV_OPT_TYPE_SAMPLE_FMT , {.i64=AV_SAMPLE_FMT_NONE}, -1 , INT_MAX, PARAM},
|
||||
{"internal_sample_fmt" , "set internal sample format" , OFFSET(user_int_sample_fmt), AV_OPT_TYPE_SAMPLE_FMT , {.i64=AV_SAMPLE_FMT_NONE}, -1 , INT_MAX, PARAM},
|
||||
{"icl" , "set input channel layout" , OFFSET(user_in_ch_layout ), AV_OPT_TYPE_CHANNEL_LAYOUT, {.i64=0 }, 0 , INT64_MAX , PARAM, "channel_layout"},
|
||||
{"in_channel_layout" , "set input channel layout" , OFFSET(user_in_ch_layout ), AV_OPT_TYPE_CHANNEL_LAYOUT, {.i64=0 }, 0 , INT64_MAX , PARAM, "channel_layout"},
|
||||
{"ocl" , "set output channel layout" , OFFSET(user_out_ch_layout), AV_OPT_TYPE_CHANNEL_LAYOUT, {.i64=0 }, 0 , INT64_MAX , PARAM, "channel_layout"},
|
||||
|
@@ -67,7 +67,17 @@ static void destroy(struct ResampleContext * *c){
|
||||
}
|
||||
|
||||
static int flush(struct SwrContext *s){
|
||||
s->delayed_samples_fixup = soxr_delay((soxr_t)s->resample);
|
||||
|
||||
soxr_process((soxr_t)s->resample, NULL, 0, NULL, NULL, 0, NULL);
|
||||
|
||||
{
|
||||
float f;
|
||||
size_t idone, odone;
|
||||
soxr_process((soxr_t)s->resample, &f, 0, &idone, &f, 0, &odone);
|
||||
s->delayed_samples_fixup -= soxr_delay((soxr_t)s->resample);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -87,18 +97,34 @@ static int process(
|
||||
}
|
||||
|
||||
static int64_t get_delay(struct SwrContext *s, int64_t base){
|
||||
double delay_s = soxr_delay((soxr_t)s->resample) / s->out_sample_rate;
|
||||
double delayed_samples = soxr_delay((soxr_t)s->resample);
|
||||
double delay_s;
|
||||
|
||||
if (s->flushed)
|
||||
delayed_samples += s->delayed_samples_fixup;
|
||||
|
||||
delay_s = delayed_samples / s->out_sample_rate;
|
||||
|
||||
return (int64_t)(delay_s * base + .5);
|
||||
}
|
||||
|
||||
static int invert_initial_buffer(struct ResampleContext *c, AudioData *dst, const AudioData *src,
|
||||
int in_count, int *out_idx, int *out_sz)
|
||||
{
|
||||
int in_count, int *out_idx, int *out_sz){
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int64_t get_out_samples(struct SwrContext *s, int in_samples){
|
||||
double out_samples = (double)s->out_sample_rate / s->in_sample_rate * in_samples;
|
||||
double delayed_samples = soxr_delay((soxr_t)s->resample);
|
||||
|
||||
if (s->flushed)
|
||||
delayed_samples += s->delayed_samples_fixup;
|
||||
|
||||
return (int64_t)(out_samples + delayed_samples + 1 + .5);
|
||||
}
|
||||
|
||||
struct Resampler const swri_soxr_resampler={
|
||||
create, destroy, process, flush, NULL /* set_compensation */, get_delay,
|
||||
invert_initial_buffer,
|
||||
invert_initial_buffer, get_out_samples
|
||||
};
|
||||
|
||||
|
@@ -173,6 +173,8 @@ av_cold int swr_init(struct SwrContext *s){
|
||||
s-> in_ch_layout = s-> user_in_ch_layout;
|
||||
s->out_ch_layout = s->user_out_ch_layout;
|
||||
|
||||
s->int_sample_fmt= s->user_int_sample_fmt;
|
||||
|
||||
if(av_get_channel_layout_nb_channels(s-> in_ch_layout) > SWR_CH_MAX) {
|
||||
av_log(s, AV_LOG_WARNING, "Input channel layout 0x%"PRIx64" is invalid or unsupported.\n", s-> in_ch_layout);
|
||||
s->in_ch_layout = 0;
|
||||
|
@@ -119,6 +119,7 @@ struct SwrContext {
|
||||
int user_used_ch_count; ///< User set used channel count
|
||||
int64_t user_in_ch_layout; ///< User set input channel layout
|
||||
int64_t user_out_ch_layout; ///< User set output channel layout
|
||||
enum AVSampleFormat user_int_sample_fmt; ///< User set internal sample format
|
||||
|
||||
struct DitherContext dither;
|
||||
|
||||
@@ -157,6 +158,7 @@ struct SwrContext {
|
||||
int64_t outpts; ///< output PTS
|
||||
int64_t firstpts; ///< first PTS
|
||||
int drop_output; ///< number of output samples to drop
|
||||
double delayed_samples_fixup; ///< soxr 0.1.1: needed to fixup delayed_samples after flush has been called.
|
||||
|
||||
struct AudioConvert *in_convert; ///< input conversion context
|
||||
struct AudioConvert *out_convert; ///< output conversion context
|
||||
|
@@ -51,6 +51,7 @@ void (*rgb16to15)(const uint8_t *src, uint8_t *dst, int src_size);
|
||||
void (*rgb15to16)(const uint8_t *src, uint8_t *dst, int src_size);
|
||||
void (*rgb15to32)(const uint8_t *src, uint8_t *dst, int src_size);
|
||||
|
||||
void (*shuffle_bytes_0321)(const uint8_t *src, uint8_t *dst, int src_size);
|
||||
void (*shuffle_bytes_2103)(const uint8_t *src, uint8_t *dst, int src_size);
|
||||
|
||||
void (*yv12toyuy2)(const uint8_t *ysrc, const uint8_t *usrc,
|
||||
@@ -333,7 +334,6 @@ void shuffle_bytes_ ## a ## b ## c ## d(const uint8_t *src, \
|
||||
} \
|
||||
}
|
||||
|
||||
DEFINE_SHUFFLE_BYTES(0, 3, 2, 1)
|
||||
DEFINE_SHUFFLE_BYTES(1, 2, 3, 0)
|
||||
DEFINE_SHUFFLE_BYTES(3, 0, 1, 2)
|
||||
DEFINE_SHUFFLE_BYTES(3, 2, 1, 0)
|
||||
|
@@ -50,6 +50,7 @@ extern void (*rgb24to15)(const uint8_t *src, uint8_t *dst, int src_size);
|
||||
extern void (*rgb32tobgr16)(const uint8_t *src, uint8_t *dst, int src_size);
|
||||
extern void (*rgb32tobgr15)(const uint8_t *src, uint8_t *dst, int src_size);
|
||||
|
||||
extern void (*shuffle_bytes_0321)(const uint8_t *src, uint8_t *dst, int src_size);
|
||||
extern void (*shuffle_bytes_2103)(const uint8_t *src, uint8_t *dst, int src_size);
|
||||
|
||||
void rgb64tobgr48_nobswap(const uint8_t *src, uint8_t *dst, int src_size);
|
||||
@@ -71,7 +72,6 @@ void rgb15tobgr15(const uint8_t *src, uint8_t *dst, int src_size);
|
||||
void rgb12tobgr12(const uint8_t *src, uint8_t *dst, int src_size);
|
||||
void rgb12to15(const uint8_t *src, uint8_t *dst, int src_size);
|
||||
|
||||
void shuffle_bytes_0321(const uint8_t *src, uint8_t *dst, int src_size);
|
||||
void shuffle_bytes_1230(const uint8_t *src, uint8_t *dst, int src_size);
|
||||
void shuffle_bytes_3012(const uint8_t *src, uint8_t *dst, int src_size);
|
||||
void shuffle_bytes_3210(const uint8_t *src, uint8_t *dst, int src_size);
|
||||
|
@@ -322,12 +322,26 @@ static inline void shuffle_bytes_2103_c(const uint8_t *src, uint8_t *dst,
|
||||
uint8_t *d = dst - idx;
|
||||
|
||||
for (; idx < 15; idx += 4) {
|
||||
register int v = *(const uint32_t *)&s[idx], g = v & 0xff00ff00;
|
||||
register unsigned v = *(const uint32_t *)&s[idx], g = v & 0xff00ff00;
|
||||
v &= 0xff00ff;
|
||||
*(uint32_t *)&d[idx] = (v >> 16) + g + (v << 16);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void shuffle_bytes_0321_c(const uint8_t *src, uint8_t *dst,
|
||||
int src_size)
|
||||
{
|
||||
int idx = 15 - src_size;
|
||||
const uint8_t *s = src - idx;
|
||||
uint8_t *d = dst - idx;
|
||||
|
||||
for (; idx < 15; idx += 4) {
|
||||
register unsigned v = *(const uint32_t *)&s[idx], g = v & 0x00ff00ff;
|
||||
v &= 0xff00ff00;
|
||||
*(uint32_t *)&d[idx] = (v >> 16) + g + (v << 16);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void rgb24tobgr24_c(const uint8_t *src, uint8_t *dst, int src_size)
|
||||
{
|
||||
unsigned i;
|
||||
@@ -929,7 +943,13 @@ static av_cold void rgb2rgb_init_c(void)
|
||||
rgb24to15 = rgb24to15_c;
|
||||
rgb24to16 = rgb24to16_c;
|
||||
rgb24tobgr24 = rgb24tobgr24_c;
|
||||
#if HAVE_BIGENDIAN
|
||||
shuffle_bytes_0321 = shuffle_bytes_2103_c;
|
||||
shuffle_bytes_2103 = shuffle_bytes_0321_c;
|
||||
#else
|
||||
shuffle_bytes_0321 = shuffle_bytes_0321_c;
|
||||
shuffle_bytes_2103 = shuffle_bytes_2103_c;
|
||||
#endif
|
||||
rgb32tobgr16 = rgb32tobgr16_c;
|
||||
rgb32tobgr15 = rgb32tobgr15_c;
|
||||
yv12toyuy2 = yv12toyuy2_c;
|
||||
|
@@ -1243,6 +1243,11 @@ static rgbConvFn findRgbConvFn(SwsContext *c)
|
||||
if ((dstFormat == AV_PIX_FMT_RGB32_1 || dstFormat == AV_PIX_FMT_BGR32_1) && !isRGBA32(srcFormat) && ALT32_CORR<0)
|
||||
return NULL;
|
||||
|
||||
// Maintain symmetry between endianness
|
||||
if (c->flags & SWS_BITEXACT)
|
||||
if ((dstFormat == AV_PIX_FMT_RGB32 || dstFormat == AV_PIX_FMT_BGR32 ) && !isRGBA32(srcFormat) && ALT32_CORR>0)
|
||||
return NULL;
|
||||
|
||||
return conv;
|
||||
}
|
||||
|
||||
|
@@ -1487,9 +1487,9 @@ av_cold int sws_init_context(SwsContext *c, SwsFilter *srcFilter,
|
||||
|
||||
/* Allocate pixbufs (we use dynamic allocation because otherwise we would
|
||||
* need to allocate several megabytes to handle all possible cases) */
|
||||
FF_ALLOC_OR_GOTO(c, c->lumPixBuf, c->vLumBufSize * 3 * sizeof(int16_t *), fail);
|
||||
FF_ALLOC_OR_GOTO(c, c->chrUPixBuf, c->vChrBufSize * 3 * sizeof(int16_t *), fail);
|
||||
FF_ALLOC_OR_GOTO(c, c->chrVPixBuf, c->vChrBufSize * 3 * sizeof(int16_t *), fail);
|
||||
FF_ALLOCZ_OR_GOTO(c, c->lumPixBuf, c->vLumBufSize * 3 * sizeof(int16_t *), fail);
|
||||
FF_ALLOCZ_OR_GOTO(c, c->chrUPixBuf, c->vChrBufSize * 3 * sizeof(int16_t *), fail);
|
||||
FF_ALLOCZ_OR_GOTO(c, c->chrVPixBuf, c->vChrBufSize * 3 * sizeof(int16_t *), fail);
|
||||
if (CONFIG_SWSCALE_ALPHA && isALPHA(c->srcFormat) && isALPHA(c->dstFormat))
|
||||
FF_ALLOCZ_OR_GOTO(c, c->alpPixBuf, c->vLumBufSize * 3 * sizeof(int16_t *), fail);
|
||||
/* Note we need at least one pixel more at the end because of the MMX code
|
||||
|
@@ -1090,7 +1090,7 @@ static inline void RENAME(shuffle_bytes_2103)(const uint8_t *src, uint8_t *dst,
|
||||
: "r" (s), "r" (d), "m" (mask32b), "m" (mask32r), "m" (mmx_one)
|
||||
: "memory");
|
||||
for (; idx<15; idx+=4) {
|
||||
register int v = *(const uint32_t *)&s[idx], g = v & 0xff00ff00;
|
||||
register unsigned v = *(const uint32_t *)&s[idx], g = v & 0xff00ff00;
|
||||
v &= 0xff00ff;
|
||||
*(uint32_t *)&d[idx] = (v>>16) + g + (v<<16);
|
||||
}
|
||||
@@ -1905,7 +1905,7 @@ static void RENAME(interleaveBytes)(const uint8_t *src1, const uint8_t *src2, ui
|
||||
"cmp %3, %%"REG_a" \n\t"
|
||||
" jb 1b \n\t"
|
||||
::"r"(dest), "r"(src1), "r"(src2), "r" ((x86_reg)width-15)
|
||||
: "memory", "%"REG_a""
|
||||
: "memory", XMM_CLOBBERS("xmm0", "xmm1", "xmm2",) "%"REG_a
|
||||
);
|
||||
#else
|
||||
__asm__(
|
||||
@@ -1943,7 +1943,9 @@ static void RENAME(interleaveBytes)(const uint8_t *src1, const uint8_t *src2, ui
|
||||
src2 += src2Stride;
|
||||
}
|
||||
__asm__(
|
||||
#if !COMPILE_TEMPLATE_SSE2
|
||||
EMMS" \n\t"
|
||||
#endif
|
||||
SFENCE" \n\t"
|
||||
::: "memory"
|
||||
);
|
||||
@@ -1971,7 +1973,9 @@ static void RENAME(deinterleaveBytes)(const uint8_t *src, uint8_t *dst1, uint8_t
|
||||
dst2 += dst2Stride;
|
||||
}
|
||||
__asm__(
|
||||
#if !COMPILE_TEMPLATE_SSE2
|
||||
EMMS" \n\t"
|
||||
#endif
|
||||
SFENCE" \n\t"
|
||||
::: "memory"
|
||||
);
|
||||
|
Reference in New Issue
Block a user