Compare commits
44 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
d005e2ecce | ||
![]() |
57bdb3f3dd | ||
![]() |
5e34dded10 | ||
![]() |
45ca270ec9 | ||
![]() |
ca2ccd85d7 | ||
![]() |
437f6fb488 | ||
![]() |
f913da3e15 | ||
![]() |
ed9c6529f0 | ||
![]() |
aa40bbb492 | ||
![]() |
8e276fc96a | ||
![]() |
4a4e30a6d8 | ||
![]() |
1c733a440a | ||
![]() |
5c3bc127ca | ||
![]() |
be94d15a03 | ||
![]() |
9c57328b81 | ||
![]() |
6952f6f39b | ||
![]() |
6359be6751 | ||
![]() |
beb55b3981 | ||
![]() |
80aec733ad | ||
![]() |
77bb6b5bcc | ||
![]() |
f68395f7fc | ||
![]() |
eefb6b654d | ||
![]() |
d18d48def6 | ||
![]() |
8df77c3758 | ||
![]() |
08f56b846c | ||
![]() |
f903147f2d | ||
![]() |
9a840d5e17 | ||
![]() |
9e43d92d6a | ||
![]() |
e13e928baa | ||
![]() |
d3bfb66a66 | ||
![]() |
17a6ca7d31 | ||
![]() |
8a20224059 | ||
![]() |
29ee8b72c4 | ||
![]() |
25864cf562 | ||
![]() |
f74206cb40 | ||
![]() |
148d9cd122 | ||
![]() |
bc259185cb | ||
![]() |
3b6bde3b3d | ||
![]() |
4f187f0af1 | ||
![]() |
10c2d22ba1 | ||
![]() |
35738e5898 | ||
![]() |
59d98fc050 | ||
![]() |
60bfa9154d | ||
![]() |
9794727ccd |
@@ -1,6 +1,12 @@
|
|||||||
Entries are sorted chronologically from oldest to youngest within each release,
|
Entries are sorted chronologically from oldest to youngest within each release,
|
||||||
releases are sorted from youngest to oldest.
|
releases are sorted from youngest to oldest.
|
||||||
|
|
||||||
|
version 2.3.3:
|
||||||
|
- h264: fix grayscale only decoding with weighted prediction
|
||||||
|
- mjpegdec: support AV_PIX_FMT_YUV420P16 with upscale_h
|
||||||
|
- proresenc_ks: fix buffer overflow
|
||||||
|
- matroskadec: fix crash
|
||||||
|
|
||||||
version 2.3.2:
|
version 2.3.2:
|
||||||
- snow: fix null pointer dereference
|
- snow: fix null pointer dereference
|
||||||
- huffyucdec: fix overread
|
- huffyucdec: fix overread
|
||||||
|
1
configure
vendored
1
configure
vendored
@@ -4528,6 +4528,7 @@ EOF
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
check_ldflags -Wl,--as-needed
|
check_ldflags -Wl,--as-needed
|
||||||
|
check_ldflags -Wl,-z,noexecstack
|
||||||
|
|
||||||
if check_func dlopen; then
|
if check_func dlopen; then
|
||||||
ldl=
|
ldl=
|
||||||
|
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
|||||||
# This could be handy for archiving the generated documentation or
|
# This could be handy for archiving the generated documentation or
|
||||||
# if some version control system is used.
|
# if some version control system is used.
|
||||||
|
|
||||||
PROJECT_NUMBER = 2.3.2
|
PROJECT_NUMBER = 2.3.4
|
||||||
|
|
||||||
# With the PROJECT_LOGO tag one can specify a logo or icon that is included
|
# With the PROJECT_LOGO tag one can specify a logo or icon that is included
|
||||||
# in the documentation. The maximum height of the logo should not exceed 55
|
# in the documentation. The maximum height of the logo should not exceed 55
|
||||||
|
@@ -491,7 +491,7 @@ aeval=val(ch)/2:c=same
|
|||||||
@item
|
@item
|
||||||
Invert phase of the second channel:
|
Invert phase of the second channel:
|
||||||
@example
|
@example
|
||||||
eval=val(0)|-val(1)
|
aeval=val(0)|-val(1)
|
||||||
@end example
|
@end example
|
||||||
@end itemize
|
@end itemize
|
||||||
|
|
||||||
@@ -9318,7 +9318,7 @@ Default value is "all", which will cycle through the list of all tests.
|
|||||||
|
|
||||||
Some examples:
|
Some examples:
|
||||||
@example
|
@example
|
||||||
testsrc=t=dc_luma
|
mptestsrc=t=dc_luma
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
will generate a "dc_luma" test pattern.
|
will generate a "dc_luma" test pattern.
|
||||||
|
11
ffmpeg.c
11
ffmpeg.c
@@ -1799,18 +1799,10 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
|
|||||||
for (i = 0; i < nb_filtergraphs; i++)
|
for (i = 0; i < nb_filtergraphs; i++)
|
||||||
if (ist_in_filtergraph(filtergraphs[i], ist)) {
|
if (ist_in_filtergraph(filtergraphs[i], ist)) {
|
||||||
FilterGraph *fg = filtergraphs[i];
|
FilterGraph *fg = filtergraphs[i];
|
||||||
int j;
|
|
||||||
if (configure_filtergraph(fg) < 0) {
|
if (configure_filtergraph(fg) < 0) {
|
||||||
av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
|
av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
|
||||||
exit_program(1);
|
exit_program(1);
|
||||||
}
|
}
|
||||||
for (j = 0; j < fg->nb_outputs; j++) {
|
|
||||||
OutputStream *ost = fg->outputs[j]->ost;
|
|
||||||
if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
|
|
||||||
!(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
|
|
||||||
av_buffersink_set_frame_size(ost->filter->filter,
|
|
||||||
ost->enc_ctx->frame_size);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -3419,7 +3411,7 @@ static int process_input(int file_index)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* add the stream-global side data to the first packet */
|
/* add the stream-global side data to the first packet */
|
||||||
if (ist->nb_packets == 1)
|
if (ist->nb_packets == 1) {
|
||||||
if (ist->st->nb_side_data)
|
if (ist->st->nb_side_data)
|
||||||
av_packet_split_side_data(&pkt);
|
av_packet_split_side_data(&pkt);
|
||||||
for (i = 0; i < ist->st->nb_side_data; i++) {
|
for (i = 0; i < ist->st->nb_side_data; i++) {
|
||||||
@@ -3435,6 +3427,7 @@ static int process_input(int file_index)
|
|||||||
|
|
||||||
memcpy(dst_data, src_sd->data, src_sd->size);
|
memcpy(dst_data, src_sd->data, src_sd->size);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (pkt.dts != AV_NOPTS_VALUE)
|
if (pkt.dts != AV_NOPTS_VALUE)
|
||||||
pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
|
pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
|
||||||
|
@@ -919,6 +919,16 @@ int configure_filtergraph(FilterGraph *fg)
|
|||||||
}
|
}
|
||||||
|
|
||||||
fg->reconfiguration = 1;
|
fg->reconfiguration = 1;
|
||||||
|
|
||||||
|
for (i = 0; i < fg->nb_outputs; i++) {
|
||||||
|
OutputStream *ost = fg->outputs[i]->ost;
|
||||||
|
if (ost &&
|
||||||
|
ost->enc->type == AVMEDIA_TYPE_AUDIO &&
|
||||||
|
!(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
|
||||||
|
av_buffersink_set_frame_size(ost->filter->filter,
|
||||||
|
ost->enc_ctx->frame_size);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -34,7 +34,7 @@ static int aac_sync(uint64_t state, AACAC3ParseContext *hdr_info,
|
|||||||
int size;
|
int size;
|
||||||
union {
|
union {
|
||||||
uint64_t u64;
|
uint64_t u64;
|
||||||
uint8_t u8[8];
|
uint8_t u8[8 + FF_INPUT_BUFFER_PADDING_SIZE];
|
||||||
} tmp;
|
} tmp;
|
||||||
|
|
||||||
tmp.u64 = av_be2ne64(state);
|
tmp.u64 = av_be2ne64(state);
|
||||||
|
@@ -166,7 +166,7 @@ static int ac3_sync(uint64_t state, AACAC3ParseContext *hdr_info,
|
|||||||
int err;
|
int err;
|
||||||
union {
|
union {
|
||||||
uint64_t u64;
|
uint64_t u64;
|
||||||
uint8_t u8[8];
|
uint8_t u8[8 + FF_INPUT_BUFFER_PADDING_SIZE];
|
||||||
} tmp = { av_be2ne64(state) };
|
} tmp = { av_be2ne64(state) };
|
||||||
AC3HeaderInfo hdr, *phdr = &hdr;
|
AC3HeaderInfo hdr, *phdr = &hdr;
|
||||||
GetBitContext gbc;
|
GetBitContext gbc;
|
||||||
|
@@ -263,7 +263,7 @@ static void apply_channel_coupling(AC3EncodeContext *s)
|
|||||||
energy_cpl = energy[blk][CPL_CH][bnd];
|
energy_cpl = energy[blk][CPL_CH][bnd];
|
||||||
energy_ch = energy[blk][ch][bnd];
|
energy_ch = energy[blk][ch][bnd];
|
||||||
blk1 = blk+1;
|
blk1 = blk+1;
|
||||||
while (!s->blocks[blk1].new_cpl_coords[ch] && blk1 < s->num_blocks) {
|
while (blk1 < s->num_blocks && !s->blocks[blk1].new_cpl_coords[ch]) {
|
||||||
if (s->blocks[blk1].cpl_in_use) {
|
if (s->blocks[blk1].cpl_in_use) {
|
||||||
energy_cpl += energy[blk1][CPL_CH][bnd];
|
energy_cpl += energy[blk1][CPL_CH][bnd];
|
||||||
energy_ch += energy[blk1][ch][bnd];
|
energy_ch += energy[blk1][ch][bnd];
|
||||||
|
@@ -32,6 +32,10 @@
|
|||||||
#include "cabac.h"
|
#include "cabac.h"
|
||||||
#include "config.h"
|
#include "config.h"
|
||||||
|
|
||||||
|
#ifndef UNCHECKED_BITSTREAM_READER
|
||||||
|
#define UNCHECKED_BITSTREAM_READER !CONFIG_SAFE_BITSTREAM_READER
|
||||||
|
#endif
|
||||||
|
|
||||||
#if ARCH_AARCH64
|
#if ARCH_AARCH64
|
||||||
# include "aarch64/cabac.h"
|
# include "aarch64/cabac.h"
|
||||||
#endif
|
#endif
|
||||||
|
@@ -248,7 +248,7 @@ const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src,
|
|||||||
|
|
||||||
#define STARTCODE_TEST \
|
#define STARTCODE_TEST \
|
||||||
if (i + 2 < length && src[i + 1] == 0 && src[i + 2] <= 3) { \
|
if (i + 2 < length && src[i + 1] == 0 && src[i + 2] <= 3) { \
|
||||||
if (src[i + 2] != 3) { \
|
if (src[i + 2] != 3 && src[i + 2] != 0) { \
|
||||||
/* startcode, so we must be past the end */ \
|
/* startcode, so we must be past the end */ \
|
||||||
length = i; \
|
length = i; \
|
||||||
} \
|
} \
|
||||||
@@ -321,7 +321,7 @@ const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src,
|
|||||||
if (src[si + 2] > 3) {
|
if (src[si + 2] > 3) {
|
||||||
dst[di++] = src[si++];
|
dst[di++] = src[si++];
|
||||||
dst[di++] = src[si++];
|
dst[di++] = src[si++];
|
||||||
} else if (src[si] == 0 && src[si + 1] == 0) {
|
} else if (src[si] == 0 && src[si + 1] == 0 && src[si + 2] != 0) {
|
||||||
if (src[si + 2] == 3) { // escape
|
if (src[si + 2] == 3) { // escape
|
||||||
dst[di++] = 0;
|
dst[di++] = 0;
|
||||||
dst[di++] = 0;
|
dst[di++] = 0;
|
||||||
|
@@ -420,10 +420,12 @@ static av_always_inline void mc_part_weighted(H264Context *h, int n, int square,
|
|||||||
int weight1 = 64 - weight0;
|
int weight1 = 64 - weight0;
|
||||||
luma_weight_avg(dest_y, tmp_y, h->mb_linesize,
|
luma_weight_avg(dest_y, tmp_y, h->mb_linesize,
|
||||||
height, 5, weight0, weight1, 0);
|
height, 5, weight0, weight1, 0);
|
||||||
chroma_weight_avg(dest_cb, tmp_cb, h->mb_uvlinesize,
|
if (!CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) {
|
||||||
chroma_height, 5, weight0, weight1, 0);
|
chroma_weight_avg(dest_cb, tmp_cb, h->mb_uvlinesize,
|
||||||
chroma_weight_avg(dest_cr, tmp_cr, h->mb_uvlinesize,
|
chroma_height, 5, weight0, weight1, 0);
|
||||||
chroma_height, 5, weight0, weight1, 0);
|
chroma_weight_avg(dest_cr, tmp_cr, h->mb_uvlinesize,
|
||||||
|
chroma_height, 5, weight0, weight1, 0);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
luma_weight_avg(dest_y, tmp_y, h->mb_linesize, height,
|
luma_weight_avg(dest_y, tmp_y, h->mb_linesize, height,
|
||||||
h->luma_log2_weight_denom,
|
h->luma_log2_weight_denom,
|
||||||
@@ -431,18 +433,20 @@ static av_always_inline void mc_part_weighted(H264Context *h, int n, int square,
|
|||||||
h->luma_weight[refn1][1][0],
|
h->luma_weight[refn1][1][0],
|
||||||
h->luma_weight[refn0][0][1] +
|
h->luma_weight[refn0][0][1] +
|
||||||
h->luma_weight[refn1][1][1]);
|
h->luma_weight[refn1][1][1]);
|
||||||
chroma_weight_avg(dest_cb, tmp_cb, h->mb_uvlinesize, chroma_height,
|
if (!CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) {
|
||||||
h->chroma_log2_weight_denom,
|
chroma_weight_avg(dest_cb, tmp_cb, h->mb_uvlinesize, chroma_height,
|
||||||
h->chroma_weight[refn0][0][0][0],
|
h->chroma_log2_weight_denom,
|
||||||
h->chroma_weight[refn1][1][0][0],
|
h->chroma_weight[refn0][0][0][0],
|
||||||
h->chroma_weight[refn0][0][0][1] +
|
h->chroma_weight[refn1][1][0][0],
|
||||||
h->chroma_weight[refn1][1][0][1]);
|
h->chroma_weight[refn0][0][0][1] +
|
||||||
chroma_weight_avg(dest_cr, tmp_cr, h->mb_uvlinesize, chroma_height,
|
h->chroma_weight[refn1][1][0][1]);
|
||||||
h->chroma_log2_weight_denom,
|
chroma_weight_avg(dest_cr, tmp_cr, h->mb_uvlinesize, chroma_height,
|
||||||
h->chroma_weight[refn0][0][1][0],
|
h->chroma_log2_weight_denom,
|
||||||
h->chroma_weight[refn1][1][1][0],
|
h->chroma_weight[refn0][0][1][0],
|
||||||
h->chroma_weight[refn0][0][1][1] +
|
h->chroma_weight[refn1][1][1][0],
|
||||||
h->chroma_weight[refn1][1][1][1]);
|
h->chroma_weight[refn0][0][1][1] +
|
||||||
|
h->chroma_weight[refn1][1][1][1]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
int list = list1 ? 1 : 0;
|
int list = list1 ? 1 : 0;
|
||||||
@@ -456,15 +460,17 @@ static av_always_inline void mc_part_weighted(H264Context *h, int n, int square,
|
|||||||
h->luma_log2_weight_denom,
|
h->luma_log2_weight_denom,
|
||||||
h->luma_weight[refn][list][0],
|
h->luma_weight[refn][list][0],
|
||||||
h->luma_weight[refn][list][1]);
|
h->luma_weight[refn][list][1]);
|
||||||
if (h->use_weight_chroma) {
|
if (!CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) {
|
||||||
chroma_weight_op(dest_cb, h->mb_uvlinesize, chroma_height,
|
if (h->use_weight_chroma) {
|
||||||
h->chroma_log2_weight_denom,
|
chroma_weight_op(dest_cb, h->mb_uvlinesize, chroma_height,
|
||||||
h->chroma_weight[refn][list][0][0],
|
h->chroma_log2_weight_denom,
|
||||||
h->chroma_weight[refn][list][0][1]);
|
h->chroma_weight[refn][list][0][0],
|
||||||
chroma_weight_op(dest_cr, h->mb_uvlinesize, chroma_height,
|
h->chroma_weight[refn][list][0][1]);
|
||||||
h->chroma_log2_weight_denom,
|
chroma_weight_op(dest_cr, h->mb_uvlinesize, chroma_height,
|
||||||
h->chroma_weight[refn][list][1][0],
|
h->chroma_log2_weight_denom,
|
||||||
h->chroma_weight[refn][list][1][1]);
|
h->chroma_weight[refn][list][1][0],
|
||||||
|
h->chroma_weight[refn][list][1][1]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -269,6 +269,11 @@ static inline void ls_decode_line(JLSState *state, MJpegDecodeContext *s,
|
|||||||
x += stride;
|
x += stride;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (x >= w) {
|
||||||
|
av_log(NULL, AV_LOG_ERROR, "run overflow\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/* decode run termination value */
|
/* decode run termination value */
|
||||||
Rb = R(last, x);
|
Rb = R(last, x);
|
||||||
RItype = (FFABS(Ra - Rb) <= state->near) ? 1 : 0;
|
RItype = (FFABS(Ra - Rb) <= state->near) ? 1 : 0;
|
||||||
|
@@ -96,8 +96,7 @@ static int ilbc_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
|
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
WebRtcIlbcfix_DecodeImpl((WebRtc_Word16*) frame->data[0],
|
WebRtcIlbcfix_DecodeImpl((int16_t *) frame->data[0], (const uint16_t *) buf, &s->decoder, 1);
|
||||||
(const WebRtc_UWord16*) buf, &s->decoder, 1);
|
|
||||||
|
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
|
|
||||||
@@ -170,7 +169,7 @@ static int ilbc_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
if ((ret = ff_alloc_packet2(avctx, avpkt, 50)) < 0)
|
if ((ret = ff_alloc_packet2(avctx, avpkt, 50)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
WebRtcIlbcfix_EncodeImpl((WebRtc_UWord16*) avpkt->data, (const WebRtc_Word16*) frame->data[0], &s->encoder);
|
WebRtcIlbcfix_EncodeImpl((uint16_t *) avpkt->data, (const int16_t *) frame->data[0], &s->encoder);
|
||||||
|
|
||||||
avpkt->size = s->encoder.no_of_bytes;
|
avpkt->size = s->encoder.no_of_bytes;
|
||||||
*got_packet_ptr = 1;
|
*got_packet_ptr = 1;
|
||||||
|
@@ -244,7 +244,7 @@ int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
|
|||||||
|
|
||||||
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
||||||
{
|
{
|
||||||
int len, nb_components, i, width, height, pix_fmt_id, ret;
|
int len, nb_components, i, width, height, bits, pix_fmt_id, ret;
|
||||||
int h_count[MAX_COMPONENTS];
|
int h_count[MAX_COMPONENTS];
|
||||||
int v_count[MAX_COMPONENTS];
|
int v_count[MAX_COMPONENTS];
|
||||||
|
|
||||||
@@ -254,11 +254,11 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
|||||||
/* XXX: verify len field validity */
|
/* XXX: verify len field validity */
|
||||||
len = get_bits(&s->gb, 16);
|
len = get_bits(&s->gb, 16);
|
||||||
s->avctx->bits_per_raw_sample =
|
s->avctx->bits_per_raw_sample =
|
||||||
s->bits = get_bits(&s->gb, 8);
|
bits = get_bits(&s->gb, 8);
|
||||||
|
|
||||||
if (s->pegasus_rct)
|
if (s->pegasus_rct)
|
||||||
s->bits = 9;
|
bits = 9;
|
||||||
if (s->bits == 9 && !s->pegasus_rct)
|
if (bits == 9 && !s->pegasus_rct)
|
||||||
s->rct = 1; // FIXME ugly
|
s->rct = 1; // FIXME ugly
|
||||||
|
|
||||||
if(s->lossless && s->avctx->lowres){
|
if(s->lossless && s->avctx->lowres){
|
||||||
@@ -291,7 +291,7 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
|||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (s->ls && !(s->bits <= 8 || nb_components == 1)) {
|
if (s->ls && !(bits <= 8 || nb_components == 1)) {
|
||||||
avpriv_report_missing_feature(s->avctx,
|
avpriv_report_missing_feature(s->avctx,
|
||||||
"JPEG-LS that is not <= 8 "
|
"JPEG-LS that is not <= 8 "
|
||||||
"bits/component or 16-bit gray");
|
"bits/component or 16-bit gray");
|
||||||
@@ -337,11 +337,13 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
|||||||
|
|
||||||
/* if different size, realloc/alloc picture */
|
/* if different size, realloc/alloc picture */
|
||||||
if ( width != s->width || height != s->height
|
if ( width != s->width || height != s->height
|
||||||
|
|| bits != s->bits
|
||||||
|| memcmp(s->h_count, h_count, sizeof(h_count))
|
|| memcmp(s->h_count, h_count, sizeof(h_count))
|
||||||
|| memcmp(s->v_count, v_count, sizeof(v_count))) {
|
|| memcmp(s->v_count, v_count, sizeof(v_count))) {
|
||||||
|
|
||||||
s->width = width;
|
s->width = width;
|
||||||
s->height = height;
|
s->height = height;
|
||||||
|
s->bits = bits;
|
||||||
memcpy(s->h_count, h_count, sizeof(h_count));
|
memcpy(s->h_count, h_count, sizeof(h_count));
|
||||||
memcpy(s->v_count, v_count, sizeof(v_count));
|
memcpy(s->v_count, v_count, sizeof(v_count));
|
||||||
s->interlaced = 0;
|
s->interlaced = 0;
|
||||||
@@ -1894,6 +1896,7 @@ int ff_mjpeg_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
int start_code;
|
int start_code;
|
||||||
int i, index;
|
int i, index;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
int is16bit;
|
||||||
|
|
||||||
av_dict_free(&s->exif_metadata);
|
av_dict_free(&s->exif_metadata);
|
||||||
av_freep(&s->stereo3d);
|
av_freep(&s->stereo3d);
|
||||||
@@ -2072,6 +2075,9 @@ fail:
|
|||||||
s->got_picture = 0;
|
s->got_picture = 0;
|
||||||
return ret;
|
return ret;
|
||||||
the_end:
|
the_end:
|
||||||
|
|
||||||
|
is16bit = av_pix_fmt_desc_get(s->avctx->pix_fmt)->comp[0].step_minus1;
|
||||||
|
|
||||||
if (s->upscale_h) {
|
if (s->upscale_h) {
|
||||||
int p;
|
int p;
|
||||||
av_assert0(avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
|
av_assert0(avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
|
||||||
@@ -2081,6 +2087,7 @@ the_end:
|
|||||||
avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
|
avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
|
||||||
avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
|
avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
|
||||||
avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
|
avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
|
||||||
|
avctx->pix_fmt == AV_PIX_FMT_YUV420P16||
|
||||||
avctx->pix_fmt == AV_PIX_FMT_GBRAP
|
avctx->pix_fmt == AV_PIX_FMT_GBRAP
|
||||||
);
|
);
|
||||||
avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
|
avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
|
||||||
@@ -2091,9 +2098,16 @@ the_end:
|
|||||||
continue;
|
continue;
|
||||||
if (p==1 || p==2)
|
if (p==1 || p==2)
|
||||||
w >>= hshift;
|
w >>= hshift;
|
||||||
|
av_assert0(w > 0);
|
||||||
for (i = 0; i < s->chroma_height; i++) {
|
for (i = 0; i < s->chroma_height; i++) {
|
||||||
for (index = w - 1; index; index--)
|
if (is16bit) ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 2];
|
||||||
line[index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
|
else line[w - 1] = line[(w - 1) / 2];
|
||||||
|
for (index = w - 2; index > 0; index--) {
|
||||||
|
if (is16bit)
|
||||||
|
((uint16_t*)line)[index] = (((uint16_t*)line)[index / 2] + ((uint16_t*)line)[(index + 1) / 2]) >> 1;
|
||||||
|
else
|
||||||
|
line[index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
|
||||||
|
}
|
||||||
line += s->linesize[p];
|
line += s->linesize[p];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -2118,7 +2132,7 @@ the_end:
|
|||||||
for (i = s->height - 1; i; i--) {
|
for (i = s->height - 1; i; i--) {
|
||||||
uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[p])[i / 2 * s->linesize[p]];
|
uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[p])[i / 2 * s->linesize[p]];
|
||||||
uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[p])[(i + 1) / 2 * s->linesize[p]];
|
uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[p])[(i + 1) / 2 * s->linesize[p]];
|
||||||
if (src1 == src2) {
|
if (src1 == src2 || i == s->height - 1) {
|
||||||
memcpy(dst, src1, w);
|
memcpy(dst, src1, w);
|
||||||
} else {
|
} else {
|
||||||
for (index = 0; index < w; index++)
|
for (index = 0; index < w; index++)
|
||||||
|
@@ -1436,6 +1436,9 @@ int ff_MPV_common_frame_size_change(MpegEncContext *s)
|
|||||||
{
|
{
|
||||||
int i, err = 0;
|
int i, err = 0;
|
||||||
|
|
||||||
|
if (!s->context_initialized)
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
|
||||||
if (s->slice_context_count > 1) {
|
if (s->slice_context_count > 1) {
|
||||||
for (i = 0; i < s->slice_context_count; i++) {
|
for (i = 0; i < s->slice_context_count; i++) {
|
||||||
free_duplicate_context(s->thread_context[i]);
|
free_duplicate_context(s->thread_context[i]);
|
||||||
@@ -1465,8 +1468,8 @@ int ff_MPV_common_frame_size_change(MpegEncContext *s)
|
|||||||
s->mb_height = (s->height + 15) / 16;
|
s->mb_height = (s->height + 15) / 16;
|
||||||
|
|
||||||
if ((s->width || s->height) &&
|
if ((s->width || s->height) &&
|
||||||
av_image_check_size(s->width, s->height, 0, s->avctx))
|
(err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
|
||||||
return AVERROR_INVALIDDATA;
|
goto fail;
|
||||||
|
|
||||||
if ((err = init_context_frame(s)))
|
if ((err = init_context_frame(s)))
|
||||||
goto fail;
|
goto fail;
|
||||||
@@ -1482,7 +1485,7 @@ int ff_MPV_common_frame_size_change(MpegEncContext *s)
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < nb_slices; i++) {
|
for (i = 0; i < nb_slices; i++) {
|
||||||
if (init_duplicate_context(s->thread_context[i]) < 0)
|
if ((err = init_duplicate_context(s->thread_context[i])) < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
s->thread_context[i]->start_mb_y =
|
s->thread_context[i]->start_mb_y =
|
||||||
(s->mb_height * (i) + nb_slices / 2) / nb_slices;
|
(s->mb_height * (i) + nb_slices / 2) / nb_slices;
|
||||||
|
@@ -471,7 +471,6 @@ static void put_alpha_run(PutBitContext *pb, int run)
|
|||||||
|
|
||||||
// todo alpha quantisation for high quants
|
// todo alpha quantisation for high quants
|
||||||
static int encode_alpha_plane(ProresContext *ctx, PutBitContext *pb,
|
static int encode_alpha_plane(ProresContext *ctx, PutBitContext *pb,
|
||||||
const uint16_t *src, int linesize,
|
|
||||||
int mbs_per_slice, uint16_t *blocks,
|
int mbs_per_slice, uint16_t *blocks,
|
||||||
int quant)
|
int quant)
|
||||||
{
|
{
|
||||||
@@ -566,11 +565,16 @@ static int encode_slice(AVCodecContext *avctx, const AVFrame *pic,
|
|||||||
get_alpha_data(ctx, src, linesize, xp, yp,
|
get_alpha_data(ctx, src, linesize, xp, yp,
|
||||||
pwidth, avctx->height / ctx->pictures_per_frame,
|
pwidth, avctx->height / ctx->pictures_per_frame,
|
||||||
ctx->blocks[0], mbs_per_slice, ctx->alpha_bits);
|
ctx->blocks[0], mbs_per_slice, ctx->alpha_bits);
|
||||||
sizes[i] = encode_alpha_plane(ctx, pb, src, linesize,
|
sizes[i] = encode_alpha_plane(ctx, pb,
|
||||||
mbs_per_slice, ctx->blocks[0],
|
mbs_per_slice, ctx->blocks[0],
|
||||||
quant);
|
quant);
|
||||||
}
|
}
|
||||||
total_size += sizes[i];
|
total_size += sizes[i];
|
||||||
|
if (put_bits_left(pb) < 0) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Serious underevaluation of"
|
||||||
|
"required buffer size");
|
||||||
|
return AVERROR_BUFFER_TOO_SMALL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return total_size;
|
return total_size;
|
||||||
}
|
}
|
||||||
@@ -941,9 +945,9 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
|
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
|
||||||
avctx->coded_frame->key_frame = 1;
|
avctx->coded_frame->key_frame = 1;
|
||||||
|
|
||||||
pkt_size = ctx->frame_size_upper_bound + FF_MIN_BUFFER_SIZE;
|
pkt_size = ctx->frame_size_upper_bound;
|
||||||
|
|
||||||
if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size)) < 0)
|
if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size + FF_MIN_BUFFER_SIZE)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
orig_buf = pkt->data;
|
orig_buf = pkt->data;
|
||||||
@@ -1020,7 +1024,9 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
slice_hdr = buf;
|
slice_hdr = buf;
|
||||||
buf += slice_hdr_size - 1;
|
buf += slice_hdr_size - 1;
|
||||||
init_put_bits(&pb, buf, (pkt_size - (buf - orig_buf)) * 8);
|
init_put_bits(&pb, buf, (pkt_size - (buf - orig_buf)) * 8);
|
||||||
encode_slice(avctx, pic, &pb, sizes, x, y, q, mbs_per_slice);
|
ret = encode_slice(avctx, pic, &pb, sizes, x, y, q, mbs_per_slice);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
bytestream_put_byte(&slice_hdr, q);
|
bytestream_put_byte(&slice_hdr, q);
|
||||||
slice_size = slice_hdr_size + sizes[ctx->num_planes - 1];
|
slice_size = slice_hdr_size + sizes[ctx->num_planes - 1];
|
||||||
@@ -1202,8 +1208,6 @@ static av_cold int encode_init(AVCodecContext *avctx)
|
|||||||
ctx->bits_per_mb = ls * 8;
|
ctx->bits_per_mb = ls * 8;
|
||||||
if (ctx->chroma_factor == CFACTOR_Y444)
|
if (ctx->chroma_factor == CFACTOR_Y444)
|
||||||
ctx->bits_per_mb += ls * 4;
|
ctx->bits_per_mb += ls * 4;
|
||||||
if (ctx->num_planes == 4)
|
|
||||||
ctx->bits_per_mb += ls * 4;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx->frame_size_upper_bound = ctx->pictures_per_frame *
|
ctx->frame_size_upper_bound = ctx->pictures_per_frame *
|
||||||
@@ -1212,6 +1216,14 @@ static av_cold int encode_init(AVCodecContext *avctx)
|
|||||||
(mps * ctx->bits_per_mb) / 8)
|
(mps * ctx->bits_per_mb) / 8)
|
||||||
+ 200;
|
+ 200;
|
||||||
|
|
||||||
|
if (ctx->alpha_bits) {
|
||||||
|
// alpha plane is run-coded and might run over bit budget
|
||||||
|
ctx->frame_size_upper_bound += ctx->pictures_per_frame *
|
||||||
|
ctx->slices_per_picture *
|
||||||
|
/* num pixels per slice */ (ctx->mbs_per_slice * 256 *
|
||||||
|
/* bits per pixel */ (1 + ctx->alpha_bits + 1) + 7 >> 3);
|
||||||
|
}
|
||||||
|
|
||||||
avctx->codec_tag = ctx->profile_info->tag;
|
avctx->codec_tag = ctx->profile_info->tag;
|
||||||
|
|
||||||
av_log(avctx, AV_LOG_DEBUG,
|
av_log(avctx, AV_LOG_DEBUG,
|
||||||
|
@@ -659,7 +659,10 @@ static inline void unpack_coeffs(SnowContext *s, SubBand *b, SubBand * parent, i
|
|||||||
if(v){
|
if(v){
|
||||||
v= 2*(get_symbol2(&s->c, b->state[context + 2], context-4) + 1);
|
v= 2*(get_symbol2(&s->c, b->state[context + 2], context-4) + 1);
|
||||||
v+=get_rac(&s->c, &b->state[0][16 + 1 + 3 + ff_quant3bA[l&0xFF] + 3*ff_quant3bA[t&0xFF]]);
|
v+=get_rac(&s->c, &b->state[0][16 + 1 + 3 + ff_quant3bA[l&0xFF] + 3*ff_quant3bA[t&0xFF]]);
|
||||||
|
if ((uint16_t)v != v) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "Coefficient damaged\n");
|
||||||
|
v = 1;
|
||||||
|
}
|
||||||
xc->x=x;
|
xc->x=x;
|
||||||
(xc++)->coeff= v;
|
(xc++)->coeff= v;
|
||||||
}
|
}
|
||||||
@@ -669,6 +672,10 @@ static inline void unpack_coeffs(SnowContext *s, SubBand *b, SubBand * parent, i
|
|||||||
else run= INT_MAX;
|
else run= INT_MAX;
|
||||||
v= 2*(get_symbol2(&s->c, b->state[0 + 2], 0-4) + 1);
|
v= 2*(get_symbol2(&s->c, b->state[0 + 2], 0-4) + 1);
|
||||||
v+=get_rac(&s->c, &b->state[0][16 + 1 + 3]);
|
v+=get_rac(&s->c, &b->state[0][16 + 1 + 3]);
|
||||||
|
if ((uint16_t)v != v) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "Coefficient damaged\n");
|
||||||
|
v = 1;
|
||||||
|
}
|
||||||
|
|
||||||
xc->x=x;
|
xc->x=x;
|
||||||
(xc++)->coeff= v;
|
(xc++)->coeff= v;
|
||||||
|
@@ -374,6 +374,8 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
|
|||||||
case AV_PIX_FMT_GBRP12BE:
|
case AV_PIX_FMT_GBRP12BE:
|
||||||
case AV_PIX_FMT_GBRP14LE:
|
case AV_PIX_FMT_GBRP14LE:
|
||||||
case AV_PIX_FMT_GBRP14BE:
|
case AV_PIX_FMT_GBRP14BE:
|
||||||
|
case AV_PIX_FMT_GBRP16LE:
|
||||||
|
case AV_PIX_FMT_GBRP16BE:
|
||||||
w_align = 16; //FIXME assume 16 pixel per macroblock
|
w_align = 16; //FIXME assume 16 pixel per macroblock
|
||||||
h_align = 16 * 2; // interlaced needs 2 macroblocks height
|
h_align = 16 * 2; // interlaced needs 2 macroblocks height
|
||||||
break;
|
break;
|
||||||
|
@@ -5484,7 +5484,7 @@ static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
|
|||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (!s->current_picture.f->data[0]) {
|
if (!s->current_picture.f || !s->current_picture.f->data[0]) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
|
av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@@ -253,6 +253,10 @@ static int wv_get_value(WavpackFrameContext *ctx, GetBitContext *gb,
|
|||||||
return sign ? ~ret : ret;
|
return sign ? ~ret : ret;
|
||||||
|
|
||||||
error:
|
error:
|
||||||
|
ret = get_bits_left(gb);
|
||||||
|
if (ret <= 0) {
|
||||||
|
av_log(ctx->avctx, AV_LOG_ERROR, "Too few bits (%d) left\n", ret);
|
||||||
|
}
|
||||||
*last = 1;
|
*last = 1;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@@ -2876,10 +2876,11 @@ static int wavpack_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((ret = ff_alloc_packet2(avctx, avpkt, s->block_samples * avctx->channels * 8)) < 0)
|
buf_size = s->block_samples * avctx->channels * 8
|
||||||
|
+ 200 /* for headers */;
|
||||||
|
if ((ret = ff_alloc_packet2(avctx, avpkt, buf_size)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
buf = avpkt->data;
|
buf = avpkt->data;
|
||||||
buf_size = avpkt->size;
|
|
||||||
|
|
||||||
for (s->ch_offset = 0; s->ch_offset < avctx->channels;) {
|
for (s->ch_offset = 0; s->ch_offset < avctx->channels;) {
|
||||||
set_samplerate(s);
|
set_samplerate(s);
|
||||||
|
@@ -1028,7 +1028,7 @@ static int apply_color_indexing_transform(WebPContext *s)
|
|||||||
ImageContext *img;
|
ImageContext *img;
|
||||||
ImageContext *pal;
|
ImageContext *pal;
|
||||||
int i, x, y;
|
int i, x, y;
|
||||||
uint8_t *p, *pi;
|
uint8_t *p;
|
||||||
|
|
||||||
img = &s->image[IMAGE_ROLE_ARGB];
|
img = &s->image[IMAGE_ROLE_ARGB];
|
||||||
pal = &s->image[IMAGE_ROLE_COLOR_INDEXING];
|
pal = &s->image[IMAGE_ROLE_COLOR_INDEXING];
|
||||||
@@ -1066,11 +1066,11 @@ static int apply_color_indexing_transform(WebPContext *s)
|
|||||||
p = GET_PIXEL(img->frame, x, y);
|
p = GET_PIXEL(img->frame, x, y);
|
||||||
i = p[2];
|
i = p[2];
|
||||||
if (i >= pal->frame->width) {
|
if (i >= pal->frame->width) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "invalid palette index %d\n", i);
|
AV_WB32(p, 0x00000000);
|
||||||
return AVERROR_INVALIDDATA;
|
} else {
|
||||||
|
const uint8_t *pi = GET_PIXEL(pal->frame, i, 0);
|
||||||
|
AV_COPY32(p, pi);
|
||||||
}
|
}
|
||||||
pi = GET_PIXEL(pal->frame, i, 0);
|
|
||||||
AV_COPY32(p, pi);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -20,10 +20,10 @@
|
|||||||
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
;******************************************************************************
|
;******************************************************************************
|
||||||
|
|
||||||
%if ARCH_X86_64
|
|
||||||
|
|
||||||
%include "libavutil/x86/x86util.asm"
|
%include "libavutil/x86/x86util.asm"
|
||||||
|
|
||||||
|
%if ARCH_X86_64
|
||||||
|
|
||||||
SECTION_RODATA
|
SECTION_RODATA
|
||||||
|
|
||||||
cextern pb_3
|
cextern pb_3
|
||||||
|
@@ -55,8 +55,10 @@ static int ape_tag_read_field(AVFormatContext *s)
|
|||||||
av_log(s, AV_LOG_WARNING, "Invalid APE tag key '%s'.\n", key);
|
av_log(s, AV_LOG_WARNING, "Invalid APE tag key '%s'.\n", key);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
if (size >= UINT_MAX)
|
if (size > INT32_MAX - FF_INPUT_BUFFER_PADDING_SIZE) {
|
||||||
return -1;
|
av_log(s, AV_LOG_ERROR, "APE tag size too large.\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
if (flags & APE_TAG_FLAG_IS_BINARY) {
|
if (flags & APE_TAG_FLAG_IS_BINARY) {
|
||||||
uint8_t filename[1024];
|
uint8_t filename[1024];
|
||||||
enum AVCodecID id;
|
enum AVCodecID id;
|
||||||
|
@@ -33,13 +33,15 @@ static int mpeg4video_probe(AVProbeData *probe_packet)
|
|||||||
|
|
||||||
for(i=0; i<probe_packet->buf_size; i++){
|
for(i=0; i<probe_packet->buf_size; i++){
|
||||||
temp_buffer = (temp_buffer<<8) + probe_packet->buf[i];
|
temp_buffer = (temp_buffer<<8) + probe_packet->buf[i];
|
||||||
if ((temp_buffer & 0xffffff00) != 0x100)
|
if (temp_buffer & 0xfffffe00)
|
||||||
|
continue;
|
||||||
|
if (temp_buffer < 2)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (temp_buffer == VOP_START_CODE) VOP++;
|
if (temp_buffer == VOP_START_CODE) VOP++;
|
||||||
else if (temp_buffer == VISUAL_OBJECT_START_CODE) VISO++;
|
else if (temp_buffer == VISUAL_OBJECT_START_CODE) VISO++;
|
||||||
else if (temp_buffer < 0x120) VO++;
|
else if (temp_buffer >= 0x100 && temp_buffer < 0x120) VO++;
|
||||||
else if (temp_buffer < 0x130) VOL++;
|
else if (temp_buffer >= 0x120 && temp_buffer < 0x130) VOL++;
|
||||||
else if ( !(0x1AF < temp_buffer && temp_buffer < 0x1B7)
|
else if ( !(0x1AF < temp_buffer && temp_buffer < 0x1B7)
|
||||||
&& !(0x1B9 < temp_buffer && temp_buffer < 0x1C4)) res++;
|
&& !(0x1B9 < temp_buffer && temp_buffer < 0x1C4)) res++;
|
||||||
}
|
}
|
||||||
|
@@ -2161,8 +2161,10 @@ static int matroska_read_header(AVFormatContext *s)
|
|||||||
(AVRational) { 1, 1000000000 },
|
(AVRational) { 1, 1000000000 },
|
||||||
chapters[i].start, chapters[i].end,
|
chapters[i].start, chapters[i].end,
|
||||||
chapters[i].title);
|
chapters[i].title);
|
||||||
av_dict_set(&chapters[i].chapter->metadata,
|
if (chapters[i].chapter) {
|
||||||
"title", chapters[i].title, 0);
|
av_dict_set(&chapters[i].chapter->metadata,
|
||||||
|
"title", chapters[i].title, 0);
|
||||||
|
}
|
||||||
max_start = chapters[i].start;
|
max_start = chapters[i].start;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -783,7 +783,7 @@ static int vobsub_read_header(AVFormatContext *s)
|
|||||||
|
|
||||||
while (*p == ' ')
|
while (*p == ' ')
|
||||||
p++;
|
p++;
|
||||||
av_log(s, AV_LOG_DEBUG, "IDX stream[%d] name=%s\n", st->id, p);
|
av_log(s, AV_LOG_DEBUG, "IDX stream[%d] name=%s\n", stream_id, p);
|
||||||
av_strlcpy(alt, p, sizeof(alt));
|
av_strlcpy(alt, p, sizeof(alt));
|
||||||
header_parsed = 1;
|
header_parsed = 1;
|
||||||
|
|
||||||
|
@@ -506,6 +506,7 @@ static int analyze(const uint8_t *buf, int size, int packet_size, int *index)
|
|||||||
int stat[TS_MAX_PACKET_SIZE];
|
int stat[TS_MAX_PACKET_SIZE];
|
||||||
int i;
|
int i;
|
||||||
int best_score = 0;
|
int best_score = 0;
|
||||||
|
int best_score2 = 0;
|
||||||
|
|
||||||
memset(stat, 0, packet_size * sizeof(*stat));
|
memset(stat, 0, packet_size * sizeof(*stat));
|
||||||
|
|
||||||
@@ -517,11 +518,13 @@ static int analyze(const uint8_t *buf, int size, int packet_size, int *index)
|
|||||||
best_score = stat[x];
|
best_score = stat[x];
|
||||||
if (index)
|
if (index)
|
||||||
*index = x;
|
*index = x;
|
||||||
|
} else if (stat[x] > best_score2) {
|
||||||
|
best_score2 = stat[x];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return best_score;
|
return best_score - best_score2;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* autodetect fec presence. Must have at least 1024 bytes */
|
/* autodetect fec presence. Must have at least 1024 bytes */
|
||||||
|
@@ -817,7 +817,6 @@ retry:
|
|||||||
return psize;
|
return psize;
|
||||||
fail:
|
fail:
|
||||||
av_free_packet(pkt);
|
av_free_packet(pkt);
|
||||||
av_free(pkt);
|
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -209,11 +209,15 @@ int ff_put_wav_header(AVIOContext *pb, AVCodecContext *enc, int flags)
|
|||||||
void ff_put_bmp_header(AVIOContext *pb, AVCodecContext *enc,
|
void ff_put_bmp_header(AVIOContext *pb, AVCodecContext *enc,
|
||||||
const AVCodecTag *tags, int for_asf, int ignore_extradata)
|
const AVCodecTag *tags, int for_asf, int ignore_extradata)
|
||||||
{
|
{
|
||||||
|
int keep_height = enc->extradata_size >= 9 &&
|
||||||
|
!memcmp(enc->extradata + enc->extradata_size - 9, "BottomUp", 9);
|
||||||
|
int extradata_size = enc->extradata_size - 9*keep_height;
|
||||||
|
|
||||||
/* size */
|
/* size */
|
||||||
avio_wl32(pb, 40 + (ignore_extradata ? 0 : enc->extradata_size));
|
avio_wl32(pb, 40 + (ignore_extradata ? 0 :extradata_size));
|
||||||
avio_wl32(pb, enc->width);
|
avio_wl32(pb, enc->width);
|
||||||
//We always store RGB TopDown
|
//We always store RGB TopDown
|
||||||
avio_wl32(pb, enc->codec_tag ? enc->height : -enc->height);
|
avio_wl32(pb, enc->codec_tag || keep_height ? enc->height : -enc->height);
|
||||||
/* planes */
|
/* planes */
|
||||||
avio_wl16(pb, 1);
|
avio_wl16(pb, 1);
|
||||||
/* depth */
|
/* depth */
|
||||||
@@ -227,9 +231,9 @@ void ff_put_bmp_header(AVIOContext *pb, AVCodecContext *enc,
|
|||||||
avio_wl32(pb, 0);
|
avio_wl32(pb, 0);
|
||||||
|
|
||||||
if (!ignore_extradata) {
|
if (!ignore_extradata) {
|
||||||
avio_write(pb, enc->extradata, enc->extradata_size);
|
avio_write(pb, enc->extradata, extradata_size);
|
||||||
|
|
||||||
if (!for_asf && enc->extradata_size & 1)
|
if (!for_asf && extradata_size & 1)
|
||||||
avio_w8(pb, 0);
|
avio_w8(pb, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -684,7 +684,7 @@ static int seg_write_packet(AVFormatContext *s, AVPacket *pkt)
|
|||||||
end_pts = seg->segment_count < seg->nb_times ?
|
end_pts = seg->segment_count < seg->nb_times ?
|
||||||
seg->times[seg->segment_count] : INT64_MAX;
|
seg->times[seg->segment_count] : INT64_MAX;
|
||||||
} else if (seg->frames) {
|
} else if (seg->frames) {
|
||||||
start_frame = seg->segment_count <= seg->nb_frames ?
|
start_frame = seg->segment_count < seg->nb_frames ?
|
||||||
seg->frames[seg->segment_count] : INT_MAX;
|
seg->frames[seg->segment_count] : INT_MAX;
|
||||||
} else {
|
} else {
|
||||||
if (seg->use_clocktime) {
|
if (seg->use_clocktime) {
|
||||||
|
@@ -289,6 +289,7 @@ static int swf_read_packet(AVFormatContext *s, AVPacket *pkt)
|
|||||||
const int bmp_fmt = avio_r8(pb);
|
const int bmp_fmt = avio_r8(pb);
|
||||||
const int width = avio_rl16(pb);
|
const int width = avio_rl16(pb);
|
||||||
const int height = avio_rl16(pb);
|
const int height = avio_rl16(pb);
|
||||||
|
int pix_fmt;
|
||||||
|
|
||||||
len -= 2+1+2+2;
|
len -= 2+1+2+2;
|
||||||
|
|
||||||
@@ -353,17 +354,21 @@ static int swf_read_packet(AVFormatContext *s, AVPacket *pkt)
|
|||||||
avpriv_set_pts_info(vst, 64, 256, swf->frame_rate);
|
avpriv_set_pts_info(vst, 64, 256, swf->frame_rate);
|
||||||
st = vst;
|
st = vst;
|
||||||
}
|
}
|
||||||
st->codec->width = width;
|
|
||||||
st->codec->height = height;
|
|
||||||
|
|
||||||
if ((res = av_new_packet(pkt, out_len - colormapsize * colormapbpp)) < 0)
|
if ((res = av_new_packet(pkt, out_len - colormapsize * colormapbpp)) < 0)
|
||||||
goto bitmap_end;
|
goto bitmap_end;
|
||||||
|
if (!st->codec->width && !st->codec->height) {
|
||||||
|
st->codec->width = width;
|
||||||
|
st->codec->height = height;
|
||||||
|
} else {
|
||||||
|
ff_add_param_change(pkt, 0, 0, 0, width, height);
|
||||||
|
}
|
||||||
pkt->pos = pos;
|
pkt->pos = pos;
|
||||||
pkt->stream_index = st->index;
|
pkt->stream_index = st->index;
|
||||||
|
|
||||||
switch (bmp_fmt) {
|
switch (bmp_fmt) {
|
||||||
case 3:
|
case 3:
|
||||||
st->codec->pix_fmt = AV_PIX_FMT_PAL8;
|
pix_fmt = AV_PIX_FMT_PAL8;
|
||||||
for (i = 0; i < colormapsize; i++)
|
for (i = 0; i < colormapsize; i++)
|
||||||
if (alpha_bmp) colormap[i] = buf[3]<<24 | AV_RB24(buf + 4*i);
|
if (alpha_bmp) colormap[i] = buf[3]<<24 | AV_RB24(buf + 4*i);
|
||||||
else colormap[i] = 0xffU <<24 | AV_RB24(buf + 3*i);
|
else colormap[i] = 0xffU <<24 | AV_RB24(buf + 3*i);
|
||||||
@@ -375,14 +380,20 @@ static int swf_read_packet(AVFormatContext *s, AVPacket *pkt)
|
|||||||
memcpy(pal, colormap, AVPALETTE_SIZE);
|
memcpy(pal, colormap, AVPALETTE_SIZE);
|
||||||
break;
|
break;
|
||||||
case 4:
|
case 4:
|
||||||
st->codec->pix_fmt = AV_PIX_FMT_RGB555;
|
pix_fmt = AV_PIX_FMT_RGB555;
|
||||||
break;
|
break;
|
||||||
case 5:
|
case 5:
|
||||||
st->codec->pix_fmt = alpha_bmp ? AV_PIX_FMT_ARGB : AV_PIX_FMT_0RGB;
|
pix_fmt = alpha_bmp ? AV_PIX_FMT_ARGB : AV_PIX_FMT_0RGB;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
av_assert0(0);
|
av_assert0(0);
|
||||||
}
|
}
|
||||||
|
if (st->codec->pix_fmt != AV_PIX_FMT_NONE && st->codec->pix_fmt != pix_fmt) {
|
||||||
|
av_log(s, AV_LOG_ERROR, "pixel format change unsupported\n");
|
||||||
|
res = AVERROR_PATCHWELCOME;
|
||||||
|
goto bitmap_end;
|
||||||
|
}
|
||||||
|
st->codec->pix_fmt = pix_fmt;
|
||||||
|
|
||||||
if (linesize * height > pkt->size) {
|
if (linesize * height > pkt->size) {
|
||||||
res = AVERROR_INVALIDDATA;
|
res = AVERROR_INVALIDDATA;
|
||||||
|
@@ -631,7 +631,7 @@ int av_opt_set_channel_layout(void *obj, const char *name, int64_t cl, int searc
|
|||||||
"The value set by option '%s' is not a channel layout.\n", o->name);
|
"The value set by option '%s' is not a channel layout.\n", o->name);
|
||||||
return AVERROR(EINVAL);
|
return AVERROR(EINVAL);
|
||||||
}
|
}
|
||||||
*(int *)(((int64_t *)target_obj) + o->offset) = cl;
|
*(int64_t *)(((uint8_t *)target_obj) + o->offset) = cl;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -45,7 +45,7 @@
|
|||||||
"cpuid \n\t" \
|
"cpuid \n\t" \
|
||||||
"xchg %%"REG_b", %%"REG_S \
|
"xchg %%"REG_b", %%"REG_S \
|
||||||
: "=a" (eax), "=S" (ebx), "=c" (ecx), "=d" (edx) \
|
: "=a" (eax), "=S" (ebx), "=c" (ecx), "=d" (edx) \
|
||||||
: "0" (index))
|
: "0" (index), "2"(0))
|
||||||
|
|
||||||
#define xgetbv(index, eax, edx) \
|
#define xgetbv(index, eax, edx) \
|
||||||
__asm__ (".byte 0x0f, 0x01, 0xd0" : "=a"(eax), "=d"(edx) : "c" (index))
|
__asm__ (".byte 0x0f, 0x01, 0xd0" : "=a"(eax), "=d"(edx) : "c" (index))
|
||||||
|
@@ -33,6 +33,10 @@
|
|||||||
#include "libavutil/intreadwrite.h"
|
#include "libavutil/intreadwrite.h"
|
||||||
#include "libavutil/timer.h"
|
#include "libavutil/timer.h"
|
||||||
|
|
||||||
|
#ifndef AV_READ_TIME
|
||||||
|
#define AV_READ_TIME(x) 0
|
||||||
|
#endif
|
||||||
|
|
||||||
#if HAVE_UNISTD_H
|
#if HAVE_UNISTD_H
|
||||||
#include <unistd.h> /* for getopt */
|
#include <unistd.h> /* for getopt */
|
||||||
#endif
|
#endif
|
||||||
|
Reference in New Issue
Block a user