Compare commits
73 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
5a0a156e88 | ||
![]() |
884094deba | ||
![]() |
db20610c49 | ||
![]() |
15a736483e | ||
![]() |
c8b90c7cd5 | ||
![]() |
97978b7ae8 | ||
![]() |
3f2efac0fe | ||
![]() |
b9058f58bd | ||
![]() |
47faf347a0 | ||
![]() |
3d1e4b7ca5 | ||
![]() |
a2bea0df0e | ||
![]() |
483b2016bb | ||
![]() |
271f5d68b9 | ||
![]() |
dee327b0e7 | ||
![]() |
e438fd3be9 | ||
![]() |
3d380ffde9 | ||
![]() |
84b100396e | ||
![]() |
7855083443 | ||
![]() |
ca7f64e0ac | ||
![]() |
2b26f8c6bf | ||
![]() |
6e3697b985 | ||
![]() |
c93501687c | ||
![]() |
bb6f466794 | ||
![]() |
08a319549a | ||
![]() |
d1da1c8384 | ||
![]() |
8ded3738d1 | ||
![]() |
a2186a8054 | ||
![]() |
a92b73da99 | ||
![]() |
ab38b39059 | ||
![]() |
5dde8ba59e | ||
![]() |
b841869477 | ||
![]() |
3488e9e269 | ||
![]() |
5a8d78ab7f | ||
![]() |
dea7f1c62e | ||
![]() |
c7027ae738 | ||
![]() |
971b13752d | ||
![]() |
d0ed672484 | ||
![]() |
751e684aae | ||
![]() |
803ca5c349 | ||
![]() |
252002aec1 | ||
![]() |
d805a51713 | ||
![]() |
45dd7df83b | ||
![]() |
da82be0cc0 | ||
![]() |
b8b77aefe8 | ||
![]() |
30147f14d4 | ||
![]() |
d6a705d778 | ||
![]() |
e24b33cd68 | ||
![]() |
6f7fd2f589 | ||
![]() |
f974c54909 | ||
![]() |
71b3235cea | ||
![]() |
dcd1acce1a | ||
![]() |
5c502e5d41 | ||
![]() |
fa45feefad | ||
![]() |
9f3135b30b | ||
![]() |
7ba102d008 | ||
![]() |
3e65caf5bc | ||
![]() |
af7cbdf470 | ||
![]() |
414b377462 | ||
![]() |
86d4d4b011 | ||
![]() |
14f31df2cc | ||
![]() |
d04e78805a | ||
![]() |
25c67b2165 | ||
![]() |
fd8af75109 | ||
![]() |
93d720b040 | ||
![]() |
720e2d4143 | ||
![]() |
9195ef6f65 | ||
![]() |
833dce3818 | ||
![]() |
a8b6721bed | ||
![]() |
1fe734f4d3 | ||
![]() |
311e58e478 | ||
![]() |
970109deaf | ||
![]() |
eff0bf7def | ||
![]() |
6559bb893f |
@@ -65,7 +65,7 @@ struct SwsContext *sws_opts;
|
||||
AVDictionary *swr_opts;
|
||||
AVDictionary *format_opts, *codec_opts, *resample_opts;
|
||||
|
||||
const int this_year = 2013;
|
||||
const int this_year = 2014;
|
||||
|
||||
static FILE *report_file;
|
||||
|
||||
|
26
configure
vendored
26
configure
vendored
@@ -1075,6 +1075,26 @@ require_pkg_config(){
|
||||
add_extralibs $(get_safe ${pkg}_libs)
|
||||
}
|
||||
|
||||
require_libfreetype(){
|
||||
log require_libfreetype "$@"
|
||||
pkg="freetype2"
|
||||
check_cmd $pkg_config --exists --print-errors $pkg \
|
||||
|| die "ERROR: $pkg not found"
|
||||
pkg_cflags=$($pkg_config --cflags $pkg)
|
||||
pkg_libs=$($pkg_config --libs $pkg)
|
||||
{
|
||||
echo "#include <ft2build.h>"
|
||||
echo "#include FT_FREETYPE_H"
|
||||
echo "long check_func(void) { return (long) FT_Init_FreeType; }"
|
||||
echo "int main(void) { return 0; }"
|
||||
} | check_ld "cc" $pkg_cflags $pkg_libs \
|
||||
&& set_safe ${pkg}_cflags $pkg_cflags \
|
||||
&& set_safe ${pkg}_libs $pkg_libs \
|
||||
|| die "ERROR: $pkg not found"
|
||||
add_cflags $(get_safe ${pkg}_cflags)
|
||||
add_extralibs $(get_safe ${pkg}_libs)
|
||||
}
|
||||
|
||||
hostcc_o(){
|
||||
eval printf '%s\\n' $HOSTCC_O
|
||||
}
|
||||
@@ -2636,7 +2656,9 @@ probe_cc(){
|
||||
unset _depflags _DEPCMD _DEPFLAGS
|
||||
_flags_filter=echo
|
||||
|
||||
if $_cc -v 2>&1 | grep -q '^gcc.*LLVM'; then
|
||||
if $_cc --version 2>&1 | grep -q '^GNU assembler'; then
|
||||
true # no-op to avoid reading stdin in following checks
|
||||
elif $_cc -v 2>&1 | grep -q '^gcc.*LLVM'; then
|
||||
_type=llvm_gcc
|
||||
gcc_extra_ver=$(expr "$($_cc --version | head -n1)" : '.*\((.*)\)')
|
||||
_ident="llvm-gcc $($_cc -dumpversion) $gcc_extra_ver"
|
||||
@@ -3902,7 +3924,7 @@ enabled libfaac && require2 libfaac "stdint.h faac.h" faacEncGetVersion -lfaa
|
||||
enabled libfdk_aac && require libfdk_aac fdk-aac/aacenc_lib.h aacEncOpen -lfdk-aac
|
||||
flite_libs="-lflite_cmu_time_awb -lflite_cmu_us_awb -lflite_cmu_us_kal -lflite_cmu_us_kal16 -lflite_cmu_us_rms -lflite_cmu_us_slt -lflite_usenglish -lflite_cmulex -lflite"
|
||||
enabled libflite && require2 libflite "flite/flite.h" flite_init $flite_libs
|
||||
enabled libfreetype && require_pkg_config freetype2 "ft2build.h freetype/freetype.h" FT_Init_FreeType
|
||||
enabled libfreetype && require_libfreetype
|
||||
enabled libgsm && { for gsm_hdr in "gsm.h" "gsm/gsm.h"; do
|
||||
check_lib "${gsm_hdr}" gsm_create -lgsm && break;
|
||||
done || die "ERROR: libgsm not found"; }
|
||||
|
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
||||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER = 1.2.4
|
||||
PROJECT_NUMBER = 1.2.5
|
||||
|
||||
# With the PROJECT_LOGO tag one can specify an logo or icon that is included
|
||||
# in the documentation. The maximum height of the logo should not exceed 55
|
||||
|
@@ -749,7 +749,7 @@ Set maximum frame size, or duration of a frame in milliseconds. The
|
||||
argument must be exactly the following: 2.5, 5, 10, 20, 40, 60. Smaller
|
||||
frame sizes achieve lower latency but less quality at a given bitrate.
|
||||
Sizes greater than 20ms are only interesting at fairly low bitrates.
|
||||
The default of FFmpeg is 10ms, but is 20ms in @command{opusenc}.
|
||||
The default is 20ms.
|
||||
|
||||
@item packet_loss (@emph{expect-loss})
|
||||
Set expected packet loss percentage. The default is 0.
|
||||
|
7
ffmpeg.c
7
ffmpeg.c
@@ -1910,7 +1910,10 @@ static int output_packet(InputStream *ist, const AVPacket *pkt)
|
||||
ist->st->codec->sample_rate;
|
||||
break;
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
if (pkt->duration) {
|
||||
if (ist->framerate.num) {
|
||||
int64_t next_dts = av_rescale_q(ist->next_dts, AV_TIME_BASE_Q, av_inv_q(ist->framerate));
|
||||
ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), AV_TIME_BASE_Q);
|
||||
} else if (pkt->duration) {
|
||||
ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
|
||||
} else if(ist->st->codec->time_base.num != 0) {
|
||||
int ticks= ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
|
||||
@@ -2213,6 +2216,8 @@ static int transcode_init(void)
|
||||
codec->time_base = icodec->time_base;
|
||||
}
|
||||
|
||||
if (ist && !ost->frame_rate.num)
|
||||
ost->frame_rate = ist->framerate;
|
||||
if(ost->frame_rate.num)
|
||||
codec->time_base = av_inv_q(ost->frame_rate);
|
||||
|
||||
|
@@ -328,6 +328,14 @@ static AVLFG random_state;
|
||||
|
||||
static FILE *logfile = NULL;
|
||||
|
||||
static void htmlstrip(char *s) {
|
||||
while (s && *s) {
|
||||
s += strspn(s, "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ,. ");
|
||||
if (*s)
|
||||
*s++ = '?';
|
||||
}
|
||||
}
|
||||
|
||||
static int64_t ffm_read_write_index(int fd)
|
||||
{
|
||||
uint8_t buf[8];
|
||||
@@ -1887,6 +1895,7 @@ static int http_parse_request(HTTPContext *c)
|
||||
send_error:
|
||||
c->http_error = 404;
|
||||
q = c->buffer;
|
||||
htmlstrip(msg);
|
||||
snprintf(q, c->buffer_size,
|
||||
"HTTP/1.0 404 Not Found\r\n"
|
||||
"Content-type: text/html\r\n"
|
||||
|
@@ -429,6 +429,7 @@ static void hybrid_synthesis(PSDSPContext *dsp, float out[2][38][64],
|
||||
#define DECAY_SLOPE 0.05f
|
||||
/// Number of frequency bands that can be addressed by the parameter index, b(k)
|
||||
static const int NR_PAR_BANDS[] = { 20, 34 };
|
||||
static const int NR_IPDOPD_BANDS[] = { 11, 17 };
|
||||
/// Number of frequency bands that can be addressed by the sub subband index, k
|
||||
static const int NR_BANDS[] = { 71, 91 };
|
||||
/// Start frequency band for the all-pass filter decay slope
|
||||
@@ -823,7 +824,8 @@ static void stereo_processing(PSContext *ps, float (*l)[32][2], float (*r)[32][2
|
||||
h12 = H_LUT[iid_mapped[e][b] + 7 + 23 * ps->iid_quant][icc_mapped[e][b]][1];
|
||||
h21 = H_LUT[iid_mapped[e][b] + 7 + 23 * ps->iid_quant][icc_mapped[e][b]][2];
|
||||
h22 = H_LUT[iid_mapped[e][b] + 7 + 23 * ps->iid_quant][icc_mapped[e][b]][3];
|
||||
if (!PS_BASELINE && ps->enable_ipdopd && b < ps->nr_ipdopd_par) {
|
||||
|
||||
if (!PS_BASELINE && ps->enable_ipdopd && b < NR_IPDOPD_BANDS[is34]) {
|
||||
//The spec say says to only run this smoother when enable_ipdopd
|
||||
//is set but the reference decoder appears to run it constantly
|
||||
float h11i, h12i, h21i, h22i;
|
||||
|
@@ -112,7 +112,7 @@ static void hscroll(AVCodecContext *avctx)
|
||||
AnsiContext *s = avctx->priv_data;
|
||||
int i;
|
||||
|
||||
if (s->y < avctx->height - s->font_height) {
|
||||
if (s->y <= avctx->height - 2*s->font_height) {
|
||||
s->y += s->font_height;
|
||||
return;
|
||||
}
|
||||
@@ -165,7 +165,7 @@ static void draw_char(AVCodecContext *avctx, int c)
|
||||
ff_draw_pc_font(s->frame.data[0] + s->y * s->frame.linesize[0] + s->x,
|
||||
s->frame.linesize[0], s->font, s->font_height, c, fg, bg);
|
||||
s->x += FONT_WIDTH;
|
||||
if (s->x >= avctx->width) {
|
||||
if (s->x > avctx->width - FONT_WIDTH) {
|
||||
s->x = 0;
|
||||
hscroll(avctx);
|
||||
}
|
||||
@@ -239,6 +239,8 @@ static int execute_code(AVCodecContext * avctx, int c)
|
||||
default:
|
||||
av_log_ask_for_sample(avctx, "unsupported screen mode\n");
|
||||
}
|
||||
s->x = av_clip(s->x, 0, width - FONT_WIDTH);
|
||||
s->y = av_clip(s->y, 0, height - s->font_height);
|
||||
if (width != avctx->width || height != avctx->height) {
|
||||
if (s->frame.data[0])
|
||||
avctx->release_buffer(avctx, &s->frame);
|
||||
@@ -335,6 +337,8 @@ static int execute_code(AVCodecContext * avctx, int c)
|
||||
av_log_ask_for_sample(avctx, "unsupported escape code\n");
|
||||
break;
|
||||
}
|
||||
s->x = av_clip(s->x, 0, avctx->width - FONT_WIDTH);
|
||||
s->y = av_clip(s->y, 0, avctx->height - s->font_height);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -41,10 +41,10 @@ function ff_scalarproduct_int16_neon, export=1
|
||||
|
||||
vpadd.s32 d16, d0, d1
|
||||
vpadd.s32 d17, d2, d3
|
||||
vpadd.s32 d10, d4, d5
|
||||
vpadd.s32 d11, d6, d7
|
||||
vpadd.s32 d18, d4, d5
|
||||
vpadd.s32 d19, d6, d7
|
||||
vpadd.s32 d0, d16, d17
|
||||
vpadd.s32 d1, d10, d11
|
||||
vpadd.s32 d1, d18, d19
|
||||
vpadd.s32 d2, d0, d1
|
||||
vpaddl.s32 d3, d2
|
||||
vmov.32 r0, d3[0]
|
||||
@@ -81,10 +81,10 @@ function ff_scalarproduct_and_madd_int16_neon, export=1
|
||||
|
||||
vpadd.s32 d16, d0, d1
|
||||
vpadd.s32 d17, d2, d3
|
||||
vpadd.s32 d10, d4, d5
|
||||
vpadd.s32 d11, d6, d7
|
||||
vpadd.s32 d18, d4, d5
|
||||
vpadd.s32 d19, d6, d7
|
||||
vpadd.s32 d0, d16, d17
|
||||
vpadd.s32 d1, d10, d11
|
||||
vpadd.s32 d1, d18, d19
|
||||
vpadd.s32 d2, d0, d1
|
||||
vpaddl.s32 d3, d2
|
||||
vmov.32 r0, d3[0]
|
||||
|
@@ -286,7 +286,7 @@ int av_packet_split_side_data(AVPacket *pkt){
|
||||
for (i=0; ; i++){
|
||||
size= AV_RB32(p);
|
||||
av_assert0(size<=INT_MAX && p - pkt->data >= size);
|
||||
pkt->side_data[i].data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
pkt->side_data[i].data = av_mallocz(size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
pkt->side_data[i].size = size;
|
||||
pkt->side_data[i].type = p[4]&127;
|
||||
if (!pkt->side_data[i].data)
|
||||
|
@@ -117,6 +117,7 @@ typedef struct BinkContext {
|
||||
int version; ///< internal Bink file version
|
||||
int has_alpha;
|
||||
int swap_planes;
|
||||
unsigned frame_num;
|
||||
|
||||
Bundle bundle[BINKB_NB_SRC]; ///< bundles for decoding all data types
|
||||
Tree col_high[16]; ///< trees for decoding high nibble in "colours" data type
|
||||
@@ -1207,6 +1208,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
|
||||
if (c->version >= 'i')
|
||||
skip_bits_long(&gb, 32);
|
||||
|
||||
c->frame_num++;
|
||||
|
||||
for (plane = 0; plane < 3; plane++) {
|
||||
plane_idx = (!plane || !c->swap_planes) ? plane : (plane ^ 3);
|
||||
|
||||
@@ -1215,7 +1218,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
|
||||
return ret;
|
||||
} else {
|
||||
if ((ret = binkb_decode_plane(c, &gb, plane_idx,
|
||||
!avctx->frame_number, !!plane)) < 0)
|
||||
c->frame_num == 1, !!plane)) < 0)
|
||||
return ret;
|
||||
}
|
||||
if (get_bits_count(&gb) >= bits_count)
|
||||
@@ -1339,6 +1342,13 @@ static av_cold int decode_end(AVCodecContext *avctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void flush(AVCodecContext *avctx)
|
||||
{
|
||||
BinkContext * const c = avctx->priv_data;
|
||||
|
||||
c->frame_num = 0;
|
||||
}
|
||||
|
||||
AVCodec ff_bink_decoder = {
|
||||
.name = "binkvideo",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
@@ -1348,5 +1358,6 @@ AVCodec ff_bink_decoder = {
|
||||
.close = decode_end,
|
||||
.decode = decode_frame,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("Bink video"),
|
||||
.flush = flush,
|
||||
.capabilities = CODEC_CAP_DR1,
|
||||
};
|
||||
|
@@ -305,7 +305,15 @@ int ff_init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes,
|
||||
GET_DATA(buf[j].bits, bits, i, bits_wrap, bits_size);\
|
||||
if (!(condition))\
|
||||
continue;\
|
||||
if (buf[j].bits > 3*nb_bits || buf[j].bits>32) {\
|
||||
av_log(NULL, AV_LOG_ERROR, "Too long VLC in init_vlc\n");\
|
||||
return -1;\
|
||||
}\
|
||||
GET_DATA(buf[j].code, codes, i, codes_wrap, codes_size);\
|
||||
if (buf[j].code >= (1LL<<buf[j].bits)) {\
|
||||
av_log(NULL, AV_LOG_ERROR, "Invalid code in init_vlc\n");\
|
||||
return -1;\
|
||||
}\
|
||||
if (flags & INIT_VLC_LE)\
|
||||
buf[j].code = bitswap_32(buf[j].code);\
|
||||
else\
|
||||
|
@@ -175,7 +175,13 @@ static int decode_frame(AVCodecContext *avctx, void *data,
|
||||
case C93_4X4_FROM_PREV:
|
||||
for (j = 0; j < 8; j += 4) {
|
||||
for (i = 0; i < 8; i += 4) {
|
||||
offset = bytestream2_get_le16(&gb);
|
||||
int offset = bytestream2_get_le16(&gb);
|
||||
int from_x = offset % WIDTH;
|
||||
int from_y = offset / WIDTH;
|
||||
if (block_type == C93_4X4_FROM_CURR && from_y == y+j &&
|
||||
(FFABS(from_x - x-i) < 4 || FFABS(from_x - x-i) > WIDTH-4)) {
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if ((ret = copy_block(avctx, &out[j*stride+i],
|
||||
copy_from, offset, 4, stride)) < 0)
|
||||
return ret;
|
||||
|
@@ -305,7 +305,7 @@ STOP_TIMER("get_cabac_bypass")
|
||||
|
||||
for(i=0; i<SIZE; i++){
|
||||
START_TIMER
|
||||
if( (r[i]&1) != get_cabac(&c, state) )
|
||||
if( (r[i]&1) != get_cabac_noinline(&c, state) )
|
||||
av_log(NULL, AV_LOG_ERROR, "CABAC failure at %d\n", i);
|
||||
STOP_TIMER("get_cabac")
|
||||
}
|
||||
|
@@ -71,6 +71,11 @@ static int decode_13(AVCodecContext *avctx, DxaDecContext *c, uint8_t* dst, uint
|
||||
case 4: // motion compensation
|
||||
x = (*mv) >> 4; if(x & 8) x = 8 - x;
|
||||
y = (*mv++) & 0xF; if(y & 8) y = 8 - y;
|
||||
if (i < -x || avctx->width - i - 4 < x ||
|
||||
j < -y || avctx->height - j - 4 < y) {
|
||||
av_log(avctx, AV_LOG_ERROR, "MV %d %d out of bounds\n", x,y);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
tmp2 += x + y*stride;
|
||||
case 0: // skip
|
||||
case 5: // skip in method 12
|
||||
@@ -128,6 +133,11 @@ static int decode_13(AVCodecContext *avctx, DxaDecContext *c, uint8_t* dst, uint
|
||||
case 0x80: // motion compensation
|
||||
x = (*mv) >> 4; if(x & 8) x = 8 - x;
|
||||
y = (*mv++) & 0xF; if(y & 8) y = 8 - y;
|
||||
if (i + 2*(k & 1) < -x || avctx->width - i - 2*(k & 1) - 2 < x ||
|
||||
j + (k & 2) < -y || avctx->height - j - (k & 2) - 2 < y) {
|
||||
av_log(avctx, AV_LOG_ERROR, "MV %d %d out of bounds\n", x,y);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
tmp2 += x + y*stride;
|
||||
case 0x00: // skip
|
||||
tmp[d + 0 ] = tmp2[0];
|
||||
|
@@ -255,6 +255,11 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
calc_quant_matrix(s, buf[13]);
|
||||
buf += 16;
|
||||
|
||||
if (width < 16 || height < 16) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Dimensions too small\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (avctx->width != width || avctx->height != height) {
|
||||
if((width * height)/2048*7 > buf_end-buf)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
@@ -762,6 +762,17 @@ void ff_er_frame_start(ERContext *s)
|
||||
s->error_occurred = 0;
|
||||
}
|
||||
|
||||
static int er_supported(ERContext *s)
|
||||
{
|
||||
if(s->avctx->hwaccel ||
|
||||
s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU ||
|
||||
!s->cur_pic ||
|
||||
s->cur_pic->field_picture
|
||||
)
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a slice.
|
||||
* @param endx x component of the last macroblock, can be -1
|
||||
@@ -828,7 +839,7 @@ void ff_er_add_slice(ERContext *s, int startx, int starty,
|
||||
s->error_status_table[start_xy] |= VP_START;
|
||||
|
||||
if (start_xy > 0 && !(s->avctx->active_thread_type & FF_THREAD_SLICE) &&
|
||||
s->avctx->skip_top * s->mb_width < start_i) {
|
||||
er_supported(s) && s->avctx->skip_top * s->mb_width < start_i) {
|
||||
int prev_status = s->error_status_table[s->mb_index2xy[start_i - 1]];
|
||||
|
||||
prev_status &= ~ VP_START;
|
||||
@@ -851,9 +862,7 @@ void ff_er_frame_end(ERContext *s)
|
||||
* though it should not crash if enabled. */
|
||||
if (!s->avctx->err_recognition || s->error_count == 0 ||
|
||||
s->avctx->lowres ||
|
||||
s->avctx->hwaccel ||
|
||||
s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU ||
|
||||
!s->cur_pic || s->cur_pic->field_picture ||
|
||||
!er_supported(s) ||
|
||||
s->error_count == 3 * s->mb_width *
|
||||
(s->avctx->skip_top + s->avctx->skip_bottom)) {
|
||||
return;
|
||||
|
@@ -374,7 +374,7 @@ static void bl_intrp(EVRCContext *e, float *ex, float delay)
|
||||
int offset, i, coef_idx;
|
||||
int16_t t;
|
||||
|
||||
offset = lrintf(fabs(delay));
|
||||
offset = lrintf(delay);
|
||||
|
||||
t = (offset - delay + 0.5) * 8.0 + 0.5;
|
||||
if (t == 8) {
|
||||
@@ -640,7 +640,7 @@ static void postfilter(EVRCContext *e, float *in, const float *coeff,
|
||||
/* Short term postfilter */
|
||||
synthesis_filter(temp, wcoef2, e->postfilter_iir, length, out);
|
||||
|
||||
memcpy(e->postfilter_residual,
|
||||
memmove(e->postfilter_residual,
|
||||
e->postfilter_residual + length, ACB_SIZE * sizeof(float));
|
||||
}
|
||||
|
||||
@@ -714,7 +714,7 @@ static void frame_erasure(EVRCContext *e, float *samples)
|
||||
e->pitch[ACB_SIZE + j] = e->energy_vector[i];
|
||||
}
|
||||
|
||||
memcpy(e->pitch, e->pitch + subframe_size, ACB_SIZE * sizeof(float));
|
||||
memmove(e->pitch, e->pitch + subframe_size, ACB_SIZE * sizeof(float));
|
||||
|
||||
if (e->bitrate != RATE_QUANT && e->avg_acb_gain < 0.4) {
|
||||
f = 0.1 * e->avg_fcb_gain;
|
||||
@@ -814,7 +814,7 @@ static int evrc_decode_frame(AVCodecContext *avctx, void *data,
|
||||
|
||||
interpolate_delay(idelay, delay, e->prev_pitch_delay, i);
|
||||
acb_excitation(e, e->pitch + ACB_SIZE, e->avg_acb_gain, idelay, subframe_size);
|
||||
memcpy(e->pitch, e->pitch + subframe_size, ACB_SIZE * sizeof(float));
|
||||
memmove(e->pitch, e->pitch + subframe_size, ACB_SIZE * sizeof(float));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -872,7 +872,7 @@ static int evrc_decode_frame(AVCodecContext *avctx, void *data,
|
||||
e->pitch[ACB_SIZE + j] = e->energy_vector[i];
|
||||
}
|
||||
|
||||
memcpy(e->pitch, e->pitch + subframe_size, ACB_SIZE * sizeof(float));
|
||||
memmove(e->pitch, e->pitch + subframe_size, ACB_SIZE * sizeof(float));
|
||||
|
||||
synthesis_filter(e->pitch + ACB_SIZE, ilpc,
|
||||
e->synthesis, subframe_size, tmp);
|
||||
|
@@ -583,47 +583,32 @@ static int read_header(FFV1Context *f)
|
||||
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P; break;
|
||||
case 0x20: f->avctx->pix_fmt = AV_PIX_FMT_YUV411P; break;
|
||||
case 0x22: f->avctx->pix_fmt = AV_PIX_FMT_YUV410P; break;
|
||||
default:
|
||||
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
} else if (f->avctx->bits_per_raw_sample <= 8 && f->transparency) {
|
||||
switch(16*f->chroma_h_shift + f->chroma_v_shift) {
|
||||
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P; break;
|
||||
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P; break;
|
||||
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P; break;
|
||||
default:
|
||||
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
} else if (f->avctx->bits_per_raw_sample == 9) {
|
||||
} else if (f->avctx->bits_per_raw_sample == 9 && !f->transparency) {
|
||||
f->packed_at_lsb = 1;
|
||||
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
|
||||
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P9; break;
|
||||
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P9; break;
|
||||
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P9; break;
|
||||
default:
|
||||
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
} else if (f->avctx->bits_per_raw_sample == 10) {
|
||||
} else if (f->avctx->bits_per_raw_sample == 10 && !f->transparency) {
|
||||
f->packed_at_lsb = 1;
|
||||
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
|
||||
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P10; break;
|
||||
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P10; break;
|
||||
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P10; break;
|
||||
default:
|
||||
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
} else {
|
||||
} else if (f->avctx->bits_per_raw_sample == 16 && !f->transparency){
|
||||
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
|
||||
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P16; break;
|
||||
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P16; break;
|
||||
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P16; break;
|
||||
default:
|
||||
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
}
|
||||
} else if (f->colorspace == 1) {
|
||||
@@ -647,6 +632,10 @@ static int read_header(FFV1Context *f)
|
||||
av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n");
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
if (f->avctx->pix_fmt == AV_PIX_FMT_NONE) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
|
||||
av_dlog(f->avctx, "%d %d %d\n",
|
||||
f->chroma_h_shift, f->chroma_v_shift, f->avctx->pix_fmt);
|
||||
|
@@ -1782,11 +1782,6 @@ int ff_h264_frame_start(H264Context *h)
|
||||
h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
|
||||
}
|
||||
|
||||
/* Some macroblocks can be accessed before they're available in case
|
||||
* of lost slices, MBAFF or threading. */
|
||||
memset(h->slice_table, -1,
|
||||
(h->mb_height * h->mb_stride - 1) * sizeof(*h->slice_table));
|
||||
|
||||
// s->decode = (h->flags & CODEC_FLAG_PSNR) || !s->encoding ||
|
||||
// h->cur_pic.f.reference /* || h->contains_intra */ || 1;
|
||||
|
||||
@@ -2584,6 +2579,7 @@ static void flush_change(H264Context *h)
|
||||
h->sync= 0;
|
||||
h->list_count = 0;
|
||||
h->current_slice = 0;
|
||||
h->mmco_reset = 1;
|
||||
}
|
||||
|
||||
/* forget old pics after a seek */
|
||||
@@ -3114,7 +3110,6 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
unsigned int pps_id;
|
||||
int num_ref_idx_active_override_flag, ret;
|
||||
unsigned int slice_type, tmp, i, j;
|
||||
int default_ref_list_done = 0;
|
||||
int last_pic_structure, last_pic_droppable;
|
||||
int must_reinit;
|
||||
int needs_reinit = 0;
|
||||
@@ -3154,12 +3149,6 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
h->slice_type_fixed = 0;
|
||||
|
||||
slice_type = golomb_to_pict_type[slice_type];
|
||||
if (slice_type == AV_PICTURE_TYPE_I ||
|
||||
(h0->current_slice != 0 &&
|
||||
slice_type == h0->last_slice_type &&
|
||||
!memcmp(h0->last_ref_count, h0->ref_count, sizeof(h0->ref_count)))) {
|
||||
default_ref_list_done = 1;
|
||||
}
|
||||
h->slice_type = slice_type;
|
||||
h->slice_type_nos = slice_type & 3;
|
||||
|
||||
@@ -3219,7 +3208,10 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
|| 16*h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) != h->avctx->coded_height
|
||||
|| h->avctx->bits_per_raw_sample != h->sps.bit_depth_luma
|
||||
|| h->cur_chroma_format_idc != h->sps.chroma_format_idc
|
||||
|| av_cmp_q(h->sps.sar, h->avctx->sample_aspect_ratio)));
|
||||
|| av_cmp_q(h->sps.sar, h->avctx->sample_aspect_ratio)
|
||||
|| h->mb_width != h->sps.mb_width
|
||||
|| h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag)
|
||||
));
|
||||
if (h0->avctx->pix_fmt != get_pixel_format(h0, 0))
|
||||
must_reinit = 1;
|
||||
|
||||
@@ -3337,7 +3329,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
} else {
|
||||
/* Shorten frame num gaps so we don't have to allocate reference
|
||||
* frames just to throw them away */
|
||||
if (h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0) {
|
||||
if (h->frame_num != h->prev_frame_num) {
|
||||
int unwrap_prev_frame_num = h->prev_frame_num;
|
||||
int max_frame_num = 1 << h->sps.log2_max_frame_num;
|
||||
|
||||
@@ -3364,7 +3356,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
assert(h0->cur_pic_ptr->f.reference != DELAYED_PIC_REF);
|
||||
|
||||
/* Mark old field/frame as completed */
|
||||
if (!last_pic_droppable && h0->cur_pic_ptr->owner2 == h0) {
|
||||
if (h0->cur_pic_ptr->owner2 == h0) {
|
||||
ff_thread_report_progress(&h0->cur_pic_ptr->f, INT_MAX,
|
||||
last_pic_structure == PICT_BOTTOM_FIELD);
|
||||
}
|
||||
@@ -3373,7 +3365,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
if (!FIELD_PICTURE || h->picture_structure == last_pic_structure) {
|
||||
/* Previous field is unmatched. Don't display it, but let it
|
||||
* remain for reference if marked as such. */
|
||||
if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
|
||||
if (last_pic_structure != PICT_FRAME) {
|
||||
ff_thread_report_progress(&h0->cur_pic_ptr->f, INT_MAX,
|
||||
last_pic_structure == PICT_TOP_FIELD);
|
||||
}
|
||||
@@ -3383,7 +3375,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
* different frame_nums. Consider this field first in
|
||||
* pair. Throw away previous field except for reference
|
||||
* purposes. */
|
||||
if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
|
||||
if (last_pic_structure != PICT_FRAME) {
|
||||
ff_thread_report_progress(&h0->cur_pic_ptr->f, INT_MAX,
|
||||
last_pic_structure == PICT_TOP_FIELD);
|
||||
}
|
||||
@@ -3419,7 +3411,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
}
|
||||
}
|
||||
|
||||
while (h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0 && !h0->first_field &&
|
||||
while (h->frame_num != h->prev_frame_num && !h0->first_field &&
|
||||
h->frame_num != (h->prev_frame_num + 1) % (1 << h->sps.log2_max_frame_num)) {
|
||||
Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
|
||||
av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
|
||||
@@ -3498,6 +3490,15 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
} else {
|
||||
release_unused_pictures(h, 0);
|
||||
}
|
||||
/* Some macroblocks can be accessed before they're available in case
|
||||
* of lost slices, MBAFF or threading. */
|
||||
if (FIELD_PICTURE) {
|
||||
for(i = (h->picture_structure == PICT_BOTTOM_FIELD); i<h->mb_height; i++)
|
||||
memset(h->slice_table + i*h->mb_stride, -1, (h->mb_stride - (i+1==h->mb_height)) * sizeof(*h->slice_table));
|
||||
} else {
|
||||
memset(h->slice_table, -1,
|
||||
(h->mb_height * h->mb_stride - 1) * sizeof(*h->slice_table));
|
||||
}
|
||||
}
|
||||
if (h != h0 && (ret = clone_slice(h, h0)) < 0)
|
||||
return ret;
|
||||
@@ -3590,9 +3591,12 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
h->list_count = 0;
|
||||
h->ref_count[0] = h->ref_count[1] = 0;
|
||||
}
|
||||
|
||||
if (!default_ref_list_done)
|
||||
if (slice_type != AV_PICTURE_TYPE_I &&
|
||||
(h0->current_slice == 0 ||
|
||||
slice_type != h0->last_slice_type ||
|
||||
memcmp(h0->last_ref_count, h0->ref_count, sizeof(h0->ref_count)))) {
|
||||
ff_h264_fill_default_ref_list(h);
|
||||
}
|
||||
|
||||
if (h->slice_type_nos != AV_PICTURE_TYPE_I &&
|
||||
ff_h264_decode_ref_pic_list_reordering(h) < 0) {
|
||||
|
@@ -549,9 +549,15 @@ static int decode_residual(H264Context *h, GetBitContext *gb, int16_t *block, in
|
||||
if(prefix<15){
|
||||
level_code = (prefix<<suffix_length) + get_bits(gb, suffix_length);
|
||||
}else{
|
||||
level_code = (15<<suffix_length) + get_bits(gb, prefix-3);
|
||||
if(prefix>=16)
|
||||
level_code = 15<<suffix_length;
|
||||
if (prefix>=16) {
|
||||
if(prefix > 25+3){
|
||||
av_log(h->avctx, AV_LOG_ERROR, "Invalid level prefix\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
level_code += (1<<(prefix-3))-4096;
|
||||
}
|
||||
level_code += get_bits(gb, prefix-3);
|
||||
}
|
||||
mask= -(level_code&1);
|
||||
level_code= (((2+level_code)>>1) ^ mask) - mask;
|
||||
|
@@ -543,7 +543,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
|
||||
if(!pic){
|
||||
if(mmco[i].opcode != MMCO_SHORT2LONG || !h->long_ref[mmco[i].long_arg]
|
||||
|| h->long_ref[mmco[i].long_arg]->frame_num != frame_num) {
|
||||
av_log(h->avctx, AV_LOG_ERROR, "mmco: unref short failure\n");
|
||||
av_log(h->avctx, h->short_ref_count ? AV_LOG_ERROR : AV_LOG_DEBUG, "mmco: unref short failure\n");
|
||||
err = AVERROR_INVALIDDATA;
|
||||
}
|
||||
continue;
|
||||
@@ -586,6 +586,9 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
|
||||
|
||||
if (h->long_ref[mmco[i].long_arg] != h->cur_pic_ptr) {
|
||||
remove_long(h, mmco[i].long_arg, 0);
|
||||
if (remove_short(h, h->cur_pic_ptr->frame_num, 0)) {
|
||||
av_log(h->avctx, AV_LOG_ERROR, "mmco: cannot assign current picture to short and long at the same time\n");
|
||||
}
|
||||
|
||||
h->long_ref[ mmco[i].long_arg ]= h->cur_pic_ptr;
|
||||
h->long_ref[ mmco[i].long_arg ]->long_ref=1;
|
||||
@@ -680,7 +683,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
|
||||
print_short_term(h);
|
||||
print_long_term(h);
|
||||
|
||||
if(err >= 0 && h->long_ref_count==0 && h->short_ref_count<=2 && h->pps.ref_count[0]<=1 + (h->picture_structure != PICT_FRAME) && h->cur_pic_ptr->f.pict_type == AV_PICTURE_TYPE_I){
|
||||
if(err >= 0 && h->long_ref_count==0 && h->short_ref_count<=2 && h->pps.ref_count[0]<=2 + (h->picture_structure != PICT_FRAME) && h->cur_pic_ptr->f.pict_type == AV_PICTURE_TYPE_I){
|
||||
h->cur_pic_ptr->sync |= 1;
|
||||
if(!h->avctx->has_b_frames)
|
||||
h->sync = 2;
|
||||
@@ -693,7 +696,7 @@ int ff_h264_decode_ref_pic_marking(H264Context *h, GetBitContext *gb,
|
||||
int first_slice)
|
||||
{
|
||||
int i, ret;
|
||||
MMCO mmco_temp[MAX_MMCO_COUNT], *mmco = first_slice ? h->mmco : mmco_temp;
|
||||
MMCO mmco_temp[MAX_MMCO_COUNT], *mmco = mmco_temp;
|
||||
int mmco_index = 0;
|
||||
|
||||
if (h->nal_unit_type == NAL_IDR_SLICE){ // FIXME fields
|
||||
@@ -759,6 +762,7 @@ int ff_h264_decode_ref_pic_marking(H264Context *h, GetBitContext *gb,
|
||||
}
|
||||
|
||||
if (first_slice && mmco_index != -1) {
|
||||
memcpy(h->mmco, mmco_temp, sizeof(h->mmco));
|
||||
h->mmco_index = mmco_index;
|
||||
} else if (!first_slice && mmco_index >= 0 &&
|
||||
(mmco_index != h->mmco_index ||
|
||||
|
@@ -142,6 +142,8 @@ static inline int ls_get_code_runterm(GetBitContext *gb, JLSState *state, int RI
|
||||
ret = ret >> 1;
|
||||
}
|
||||
|
||||
if(FFABS(ret) > 0xFFFF)
|
||||
return -0x10000;
|
||||
/* update state */
|
||||
state->A[Q] += FFABS(ret) - RItype;
|
||||
ret *= state->twonear;
|
||||
|
@@ -42,6 +42,7 @@
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "libavutil/mem.h"
|
||||
#include "libavutil/pixdesc.h"
|
||||
#include "avcodec.h"
|
||||
#include "bytestream.h"
|
||||
#include "internal.h"
|
||||
@@ -491,6 +492,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
unsigned int max_basesize = FFALIGN(avctx->width, 4) *
|
||||
FFALIGN(avctx->height, 4);
|
||||
unsigned int max_decomp_size;
|
||||
int subsample_h, subsample_v;
|
||||
|
||||
avcodec_get_frame_defaults(&c->pic);
|
||||
if (avctx->extradata_size < 8) {
|
||||
@@ -517,6 +519,9 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
max_decomp_size = max_basesize * 2;
|
||||
avctx->pix_fmt = AV_PIX_FMT_YUV422P;
|
||||
av_log(avctx, AV_LOG_DEBUG, "Image type is YUV 4:2:2.\n");
|
||||
if (avctx->width % 4) {
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
break;
|
||||
case IMGTYPE_RGB24:
|
||||
c->decomp_size = basesize * 3;
|
||||
@@ -547,6 +552,11 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &subsample_h, &subsample_v);
|
||||
if (avctx->width % (1<<subsample_h) || avctx->height % (1<<subsample_v)) {
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* Detect compression method */
|
||||
c->compression = (int8_t)avctx->extradata[5];
|
||||
switch (avctx->codec_id) {
|
||||
|
@@ -380,7 +380,7 @@ static const AVOption libopus_options[] = {
|
||||
{ "voip", "Favor improved speech intelligibility", 0, AV_OPT_TYPE_CONST, { .i64 = OPUS_APPLICATION_VOIP }, 0, 0, FLAGS, "application" },
|
||||
{ "audio", "Favor faithfulness to the input", 0, AV_OPT_TYPE_CONST, { .i64 = OPUS_APPLICATION_AUDIO }, 0, 0, FLAGS, "application" },
|
||||
{ "lowdelay", "Restrict to only the lowest delay modes", 0, AV_OPT_TYPE_CONST, { .i64 = OPUS_APPLICATION_RESTRICTED_LOWDELAY }, 0, 0, FLAGS, "application" },
|
||||
{ "frame_duration", "Duration of a frame in milliseconds", OFFSET(frame_duration), AV_OPT_TYPE_FLOAT, { .dbl = 10.0 }, 2.5, 60.0, FLAGS },
|
||||
{ "frame_duration", "Duration of a frame in milliseconds", OFFSET(frame_duration), AV_OPT_TYPE_FLOAT, { .dbl = 20.0 }, 2.5, 60.0, FLAGS },
|
||||
{ "packet_loss", "Expected packet loss percentage", OFFSET(packet_loss), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 100, FLAGS },
|
||||
{ "vbr", "Variable bit rate mode", OFFSET(vbr), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 2, FLAGS, "vbr" },
|
||||
{ "off", "Use constant bit rate", 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, 0, 0, FLAGS, "vbr" },
|
||||
|
@@ -812,7 +812,8 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
|
||||
s->mv[dir][1][0], s->mv[dir][1][1], 8, mb_y);
|
||||
}
|
||||
} else {
|
||||
if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
|
||||
if( s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field
|
||||
|| !ref_picture[0]){
|
||||
ref_picture = s->current_picture_ptr->f.data;
|
||||
}
|
||||
|
||||
@@ -826,8 +827,8 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
|
||||
for(i=0; i<2; i++){
|
||||
uint8_t ** ref2picture;
|
||||
|
||||
if(s->picture_structure == s->field_select[dir][i] + 1
|
||||
|| s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
|
||||
if((s->picture_structure == s->field_select[dir][i] + 1
|
||||
|| s->pict_type == AV_PICTURE_TYPE_B || s->first_field) && ref_picture[0]){
|
||||
ref2picture= ref_picture;
|
||||
}else{
|
||||
ref2picture = s->current_picture_ptr->f.data;
|
||||
@@ -856,6 +857,9 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
|
||||
pix_op = s->dsp.avg_pixels_tab;
|
||||
}
|
||||
}else{
|
||||
if (!ref_picture[0]) {
|
||||
ref_picture = s->current_picture_ptr->f.data;
|
||||
}
|
||||
for(i=0; i<2; i++){
|
||||
mpeg_motion(s, dest_y, dest_cb, dest_cr,
|
||||
s->picture_structure != i+1,
|
||||
|
@@ -58,7 +58,7 @@ enum MSV1Mode{
|
||||
};
|
||||
|
||||
#define SKIP_PREFIX 0x8400
|
||||
#define SKIPS_MAX 0x0FFF
|
||||
#define SKIPS_MAX 0x03FF
|
||||
#define MKRGB555(in, off) ((in[off] << 10) | (in[off + 1] << 5) | (in[off + 2]))
|
||||
|
||||
static const int remap[16] = { 0, 1, 4, 5, 2, 3, 6, 7, 8, 9, 12, 13, 10, 11, 14, 15 };
|
||||
|
@@ -424,7 +424,7 @@ static int shorten_decode_frame(AVCodecContext *avctx, void *data,
|
||||
void *tmp_ptr;
|
||||
s->max_framesize = 8192; // should hopefully be enough for the first header
|
||||
tmp_ptr = av_fast_realloc(s->bitstream, &s->allocated_bitstream_size,
|
||||
s->max_framesize);
|
||||
s->max_framesize + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
if (!tmp_ptr) {
|
||||
av_log(avctx, AV_LOG_ERROR, "error allocating bitstream buffer\n");
|
||||
return AVERROR(ENOMEM);
|
||||
@@ -437,7 +437,7 @@ static int shorten_decode_frame(AVCodecContext *avctx, void *data,
|
||||
buf_size = FFMIN(buf_size, s->max_framesize - s->bitstream_size);
|
||||
input_buf_size = buf_size;
|
||||
|
||||
if (s->bitstream_index + s->bitstream_size + buf_size >
|
||||
if (s->bitstream_index + s->bitstream_size + buf_size + FF_INPUT_BUFFER_PADDING_SIZE >
|
||||
s->allocated_bitstream_size) {
|
||||
memmove(s->bitstream, &s->bitstream[s->bitstream_index],
|
||||
s->bitstream_size);
|
||||
|
@@ -790,8 +790,8 @@ static int svq3_decode_slice_header(AVCodecContext *avctx)
|
||||
header ^ s->watermark_key);
|
||||
}
|
||||
if (length > 0) {
|
||||
memcpy((uint8_t *) &h->gb.buffer[get_bits_count(&h->gb) >> 3],
|
||||
&h->gb.buffer[h->gb.size_in_bits >> 3], length - 1);
|
||||
memmove((uint8_t *) &h->gb.buffer[get_bits_count(&h->gb) >> 3],
|
||||
&h->gb.buffer[h->gb.size_in_bits >> 3], length - 1);
|
||||
}
|
||||
skip_bits_long(&h->gb, 0);
|
||||
}
|
||||
|
@@ -1647,10 +1647,17 @@ static int add_metadata_from_side_data(AVCodecContext *avctx, AVFrame *frame)
|
||||
if (!side_metadata)
|
||||
goto end;
|
||||
end = side_metadata + size;
|
||||
if (size && end[-1])
|
||||
return AVERROR_INVALIDDATA;
|
||||
while (side_metadata < end) {
|
||||
const uint8_t *key = side_metadata;
|
||||
const uint8_t *val = side_metadata + strlen(key) + 1;
|
||||
int ret = av_dict_set(ff_frame_get_metadatap(frame), key, val, 0);
|
||||
int ret;
|
||||
|
||||
if (val >= end)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
ret = av_dict_set(ff_frame_get_metadatap(frame), key, val, 0);
|
||||
if (ret < 0)
|
||||
break;
|
||||
side_metadata = val + strlen(val) + 1;
|
||||
@@ -1969,6 +1976,16 @@ int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
|
||||
int did_split = av_packet_split_side_data(&tmp);
|
||||
//apply_param_change(avctx, &tmp);
|
||||
|
||||
if (did_split) {
|
||||
/* FFMIN() prevents overflow in case the packet wasn't allocated with
|
||||
* proper padding.
|
||||
* If the side data is smaller than the buffer padding size, the
|
||||
* remaining bytes should have already been filled with zeros by the
|
||||
* original packet allocation anyway. */
|
||||
memset(tmp.data + tmp.size, 0,
|
||||
FFMIN(avpkt->size - tmp.size, FF_INPUT_BUFFER_PADDING_SIZE));
|
||||
}
|
||||
|
||||
pkt_recoded = tmp;
|
||||
ret = recode_subtitle(avctx, &pkt_recoded, &tmp);
|
||||
if (ret < 0) {
|
||||
|
@@ -1572,7 +1572,8 @@ static int decode_packet(AVCodecContext *avctx, void *data,
|
||||
(frame_size = show_bits(gb, s->log2_frame_size)) &&
|
||||
frame_size <= remaining_bits(s, gb)) {
|
||||
save_bits(s, gb, frame_size, 0);
|
||||
s->packet_done = !decode_frame(s, data, got_frame_ptr);
|
||||
if (!s->packet_loss)
|
||||
s->packet_done = !decode_frame(s, data, got_frame_ptr);
|
||||
} else if (!s->len_prefix
|
||||
&& s->num_saved_bits > get_bits_count(&s->gb)) {
|
||||
/** when the frames do not have a length prefix, we don't know
|
||||
|
@@ -244,6 +244,11 @@ static inline void xan_wc3_copy_pixel_run(XanContext *s, int x, int y,
|
||||
curframe_x = x;
|
||||
prevframe_index = (y + motion_y) * stride + x + motion_x;
|
||||
prevframe_x = x + motion_x;
|
||||
|
||||
if (prev_palette_plane == palette_plane && FFABS(curframe_index - prevframe_index) < pixel_count) {
|
||||
return ;
|
||||
}
|
||||
|
||||
while (pixel_count &&
|
||||
curframe_index < s->frame_size &&
|
||||
prevframe_index < s->frame_size) {
|
||||
|
@@ -757,6 +757,10 @@ static int v4l2_set_parameters(AVFormatContext *s1)
|
||||
standard.index = i;
|
||||
if (v4l2_ioctl(s->fd, VIDIOC_ENUMSTD, &standard) < 0) {
|
||||
ret = AVERROR(errno);
|
||||
if (ret == AVERROR(EINVAL)) {
|
||||
tpf = &streamparm.parm.capture.timeperframe;
|
||||
break;
|
||||
}
|
||||
av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_ENUMSTD): %s\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
@@ -117,9 +117,9 @@ void ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
|
||||
(*links)[idx] = NULL;
|
||||
|
||||
(*count)++;
|
||||
for (i = idx+1; i < *count; i++)
|
||||
if (*links[i])
|
||||
(*(unsigned *)((uint8_t *) *links[i] + padidx_off))++;
|
||||
for (i = idx + 1; i < *count; i++)
|
||||
if ((*links)[i])
|
||||
(*(unsigned *)((uint8_t *) (*links)[i] + padidx_off))++;
|
||||
}
|
||||
|
||||
int avfilter_link(AVFilterContext *src, unsigned srcpad,
|
||||
|
@@ -48,7 +48,6 @@
|
||||
#include "video.h"
|
||||
|
||||
#include <ft2build.h>
|
||||
#include <freetype/config/ftheader.h>
|
||||
#include FT_FREETYPE_H
|
||||
#include FT_GLYPH_H
|
||||
#if CONFIG_FONTCONFIG
|
||||
|
@@ -457,7 +457,7 @@ static int asf_write_header1(AVFormatContext *s, int64_t file_size,
|
||||
avio_wl16(pb, 40 + enc->extradata_size); /* size */
|
||||
|
||||
/* BITMAPINFOHEADER header */
|
||||
ff_put_bmp_header(pb, enc, ff_codec_bmp_tags, 1);
|
||||
ff_put_bmp_header(pb, enc, ff_codec_bmp_tags, 1, 0);
|
||||
}
|
||||
end_header(pb, hpos);
|
||||
}
|
||||
|
@@ -639,6 +639,8 @@ static int avi_read_header(AVFormatContext *s)
|
||||
st->codec->codec_tag = tag1;
|
||||
st->codec->codec_id = ff_codec_get_id(ff_codec_bmp_tags, tag1);
|
||||
st->need_parsing = AVSTREAM_PARSE_HEADERS; // This is needed to get the pict type which is necessary for generating correct pts.
|
||||
if (st->codec->codec_tag == MKTAG('V', 'S', 'S', 'H'))
|
||||
st->need_parsing = AVSTREAM_PARSE_FULL;
|
||||
|
||||
if(st->codec->codec_tag==0 && st->codec->height > 0 && st->codec->extradata_size < 1U<<30){
|
||||
st->codec->extradata_size+= 9;
|
||||
|
@@ -291,7 +291,7 @@ static int avi_write_header(AVFormatContext *s)
|
||||
// are not (yet) supported.
|
||||
if (stream->codec_id != AV_CODEC_ID_XSUB) break;
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
ff_put_bmp_header(pb, stream, ff_codec_bmp_tags, 0);
|
||||
ff_put_bmp_header(pb, stream, ff_codec_bmp_tags, 0, 0);
|
||||
break;
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
if ((ret = ff_put_wav_header(pb, stream)) < 0) {
|
||||
|
@@ -1781,7 +1781,8 @@ static int matroska_read_header(AVFormatContext *s)
|
||||
av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
|
||||
1000000000, track->default_duration, 30000);
|
||||
#if FF_API_R_FRAME_RATE
|
||||
st->r_frame_rate = st->avg_frame_rate;
|
||||
if (st->avg_frame_rate.num < st->avg_frame_rate.den * 1000L)
|
||||
st->r_frame_rate = st->avg_frame_rate;
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -1811,6 +1812,7 @@ static int matroska_read_header(AVFormatContext *s)
|
||||
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
|
||||
st->codec->sample_rate = track->audio.out_samplerate;
|
||||
st->codec->channels = track->audio.channels;
|
||||
if (!st->codec->bits_per_coded_sample)
|
||||
st->codec->bits_per_coded_sample = track->audio.bitdepth;
|
||||
if (st->codec->codec_id != AV_CODEC_ID_AAC)
|
||||
st->need_parsing = AVSTREAM_PARSE_HEADERS;
|
||||
|
@@ -512,7 +512,7 @@ static int mkv_write_codecprivate(AVFormatContext *s, AVIOContext *pb, AVCodecCo
|
||||
ret = AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
ff_put_bmp_header(dyn_cp, codec, ff_codec_bmp_tags, 0);
|
||||
ff_put_bmp_header(dyn_cp, codec, ff_codec_bmp_tags, 0, 0);
|
||||
}
|
||||
|
||||
} else if (codec->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
@@ -567,7 +567,7 @@ static int mkv_write_tracks(AVFormatContext *s)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!bit_depth)
|
||||
if (!bit_depth && codec->codec_id != AV_CODEC_ID_ADPCM_G726)
|
||||
bit_depth = av_get_bytes_per_sample(codec->sample_fmt) << 3;
|
||||
if (!bit_depth)
|
||||
bit_depth = codec->bits_per_coded_sample;
|
||||
|
@@ -1679,6 +1679,8 @@ static int mov_read_stss(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
if (!entries)
|
||||
{
|
||||
sc->keyframe_absent = 1;
|
||||
if (!st->need_parsing)
|
||||
st->need_parsing = AVSTREAM_PARSE_HEADERS;
|
||||
return 0;
|
||||
}
|
||||
if (entries >= UINT_MAX / sizeof(int))
|
||||
|
@@ -283,6 +283,7 @@ static int mp3_seek(AVFormatContext *s, int stream_index, int64_t timestamp,
|
||||
AVStream *st = s->streams[0];
|
||||
int64_t ret = av_index_search_timestamp(st, timestamp, flags);
|
||||
int i, j;
|
||||
int dir = (flags&AVSEEK_FLAG_BACKWARD) ? -1 : 1;
|
||||
|
||||
if (mp3->is_cbr && st->duration > 0 && mp3->header_filesize > s->data_offset) {
|
||||
int64_t filesize = avio_size(s->pb);
|
||||
@@ -312,7 +313,7 @@ static int mp3_seek(AVFormatContext *s, int stream_index, int64_t timestamp,
|
||||
|
||||
#define MIN_VALID 3
|
||||
for(i=0; i<4096; i++) {
|
||||
int64_t pos = ie->pos + i;
|
||||
int64_t pos = ie->pos + i*dir;
|
||||
for(j=0; j<MIN_VALID; j++) {
|
||||
ret = check(s, pos);
|
||||
if(ret < 0)
|
||||
@@ -325,7 +326,7 @@ static int mp3_seek(AVFormatContext *s, int stream_index, int64_t timestamp,
|
||||
if(j!=MIN_VALID)
|
||||
i=0;
|
||||
|
||||
ret = avio_seek(s->pb, ie->pos + i, SEEK_SET);
|
||||
ret = avio_seek(s->pb, ie->pos + i*dir, SEEK_SET);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ff_update_cur_dts(s, st, ie->timestamp);
|
||||
|
@@ -1856,6 +1856,8 @@ static int mxf_read_header(AVFormatContext *s)
|
||||
MXFContext *mxf = s->priv_data;
|
||||
KLVPacket klv;
|
||||
int64_t essence_offset = 0;
|
||||
int64_t last_pos = -1;
|
||||
uint64_t last_pos_index = 1;
|
||||
int ret;
|
||||
|
||||
mxf->last_forward_tell = INT64_MAX;
|
||||
@@ -1871,7 +1873,12 @@ static int mxf_read_header(AVFormatContext *s)
|
||||
|
||||
while (!url_feof(s->pb)) {
|
||||
const MXFMetadataReadTableEntry *metadata;
|
||||
|
||||
if (avio_tell(s->pb) == last_pos) {
|
||||
av_log(mxf->fc, AV_LOG_ERROR, "MXF structure loop detected\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if ((1ULL<<61) % last_pos_index++ == 0)
|
||||
last_pos = avio_tell(s->pb);
|
||||
if (klv_read_packet(&klv, s->pb) < 0) {
|
||||
/* EOF - seek to previous partition or stop */
|
||||
if(mxf_parse_handle_partition_or_eof(mxf) <= 0)
|
||||
|
@@ -584,8 +584,15 @@ static int write_index(NUTContext *nut, AVIOContext *bc) {
|
||||
int64_t last_pts= -1;
|
||||
int j, k;
|
||||
for (j=0; j<nut->sp_count; j++) {
|
||||
int flag = (nus->keyframe_pts[j] != AV_NOPTS_VALUE) ^ (j+1 == nut->sp_count);
|
||||
int flag;
|
||||
int n = 0;
|
||||
|
||||
if (j && nus->keyframe_pts[j] == nus->keyframe_pts[j-1]) {
|
||||
av_log(nut->avf, AV_LOG_WARNING, "Multiple keyframes with same PTS\n");
|
||||
nus->keyframe_pts[j] = AV_NOPTS_VALUE;
|
||||
}
|
||||
|
||||
flag = (nus->keyframe_pts[j] != AV_NOPTS_VALUE) ^ (j+1 == nut->sp_count);
|
||||
for (; j<nut->sp_count && (nus->keyframe_pts[j] != AV_NOPTS_VALUE) == flag; j++)
|
||||
n++;
|
||||
|
||||
|
@@ -786,6 +786,11 @@ static int64_t ogg_read_timestamp(AVFormatContext *s, int stream_index,
|
||||
&& !ogg_packet(s, &i, &pstart, &psize, pos_arg)) {
|
||||
if (i == stream_index) {
|
||||
struct ogg_stream *os = ogg->streams + stream_index;
|
||||
// Dont trust the last timestamps of a ogm video
|
||||
if ( (os->flags & OGG_FLAG_EOS)
|
||||
&& !(os->flags & OGG_FLAG_BOS)
|
||||
&& os->codec == &ff_ogm_video_codec)
|
||||
continue;
|
||||
pts = ogg_calc_pts(s, i, NULL);
|
||||
ogg_validate_keyframe(s, i, pstart, psize);
|
||||
if (os->pflags & AV_PKT_FLAG_KEY) {
|
||||
|
@@ -569,9 +569,9 @@ int ff_put_wav_header(AVIOContext *pb, AVCodecContext *enc)
|
||||
}
|
||||
|
||||
/* BITMAPINFOHEADER header */
|
||||
void ff_put_bmp_header(AVIOContext *pb, AVCodecContext *enc, const AVCodecTag *tags, int for_asf)
|
||||
void ff_put_bmp_header(AVIOContext *pb, AVCodecContext *enc, const AVCodecTag *tags, int for_asf, int ignore_extradata)
|
||||
{
|
||||
avio_wl32(pb, 40 + enc->extradata_size); /* size */
|
||||
avio_wl32(pb, 40 + (ignore_extradata ? 0 : enc->extradata_size)); /* size */
|
||||
avio_wl32(pb, enc->width);
|
||||
//We always store RGB TopDown
|
||||
avio_wl32(pb, enc->codec_tag ? enc->height : -enc->height);
|
||||
@@ -586,10 +586,12 @@ void ff_put_bmp_header(AVIOContext *pb, AVCodecContext *enc, const AVCodecTag *t
|
||||
avio_wl32(pb, 0);
|
||||
avio_wl32(pb, 0);
|
||||
|
||||
avio_write(pb, enc->extradata, enc->extradata_size);
|
||||
if (!ignore_extradata) {
|
||||
avio_write(pb, enc->extradata, enc->extradata_size);
|
||||
|
||||
if (!for_asf && enc->extradata_size & 1)
|
||||
avio_w8(pb, 0);
|
||||
if (!for_asf && enc->extradata_size & 1)
|
||||
avio_w8(pb, 0);
|
||||
}
|
||||
}
|
||||
|
||||
void ff_parse_specific_params(AVCodecContext *stream, int *au_rate, int *au_ssize, int *au_scale)
|
||||
|
@@ -46,7 +46,7 @@ void ff_end_tag(AVIOContext *pb, int64_t start);
|
||||
*/
|
||||
int ff_get_bmp_header(AVIOContext *pb, AVStream *st, unsigned *esize);
|
||||
|
||||
void ff_put_bmp_header(AVIOContext *pb, AVCodecContext *enc, const AVCodecTag *tags, int for_asf);
|
||||
void ff_put_bmp_header(AVIOContext *pb, AVCodecContext *enc, const AVCodecTag *tags, int for_asf, int ignore_extradata);
|
||||
int ff_put_wav_header(AVIOContext *pb, AVCodecContext *enc);
|
||||
enum AVCodecID ff_wav_codec_get_id(unsigned int tag, int bps);
|
||||
int ff_get_wav_header(AVIOContext *pb, AVCodecContext *codec, int size);
|
||||
|
@@ -26,15 +26,15 @@
|
||||
|
||||
typedef struct ThpDemuxContext {
|
||||
int version;
|
||||
int first_frame;
|
||||
int first_framesz;
|
||||
int last_frame;
|
||||
unsigned first_frame;
|
||||
unsigned first_framesz;
|
||||
unsigned last_frame;
|
||||
int compoff;
|
||||
int framecnt;
|
||||
unsigned framecnt;
|
||||
AVRational fps;
|
||||
int frame;
|
||||
int next_frame;
|
||||
int next_framesz;
|
||||
unsigned frame;
|
||||
int64_t next_frame;
|
||||
unsigned next_framesz;
|
||||
int video_stream_index;
|
||||
int audio_stream_index;
|
||||
int compcount;
|
||||
@@ -158,7 +158,7 @@ static int thp_read_packet(AVFormatContext *s,
|
||||
avio_seek(pb, thp->next_frame, SEEK_SET);
|
||||
|
||||
/* Locate the next frame and read out its size. */
|
||||
thp->next_frame += thp->next_framesz;
|
||||
thp->next_frame += FFMAX(thp->next_framesz, 1);
|
||||
thp->next_framesz = avio_rb32(pb);
|
||||
|
||||
avio_rb32(pb); /* Previous total size. */
|
||||
|
@@ -1165,12 +1165,14 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
|
||||
if (pkt->dts != AV_NOPTS_VALUE) {
|
||||
// got DTS from the stream, update reference timestamp
|
||||
st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
|
||||
pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
|
||||
} else if (st->reference_dts != AV_NOPTS_VALUE) {
|
||||
// compute DTS based on reference timestamp
|
||||
pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
|
||||
pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
|
||||
}
|
||||
|
||||
if (st->reference_dts != AV_NOPTS_VALUE && pkt->pts == AV_NOPTS_VALUE)
|
||||
pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
|
||||
|
||||
if (pc->dts_sync_point > 0)
|
||||
st->reference_dts = pkt->dts; // new reference
|
||||
}
|
||||
@@ -2843,9 +2845,10 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
|
||||
goto find_stream_info_err;
|
||||
}
|
||||
|
||||
read_size += pkt->size;
|
||||
|
||||
st = ic->streams[pkt->stream_index];
|
||||
if (!(st->disposition & AV_DISPOSITION_ATTACHED_PIC))
|
||||
read_size += pkt->size;
|
||||
|
||||
if (pkt->dts != AV_NOPTS_VALUE && st->codec_info_nb_frames > 1) {
|
||||
/* check for non-increasing dts */
|
||||
if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
|
||||
|
@@ -132,6 +132,11 @@ static int vqf_read_header(AVFormatContext *s)
|
||||
rate_flag = AV_RB32(comm_chunk + 8);
|
||||
avio_skip(s->pb, len-12);
|
||||
|
||||
if (st->codec->channels <= 0) {
|
||||
av_log(s, AV_LOG_ERROR, "Invalid number of channels\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
st->codec->bit_rate = read_bitrate*1000;
|
||||
break;
|
||||
case MKTAG('D','S','I','Z'): // size of compressed data
|
||||
|
@@ -368,8 +368,15 @@ break_loop:
|
||||
|
||||
avio_seek(pb, data_ofs, SEEK_SET);
|
||||
|
||||
if (!sample_count && st->codec->channels && av_get_bits_per_sample(st->codec->codec_id) && wav->data_end <= avio_size(pb))
|
||||
sample_count = (data_size<<3) / (st->codec->channels * (uint64_t)av_get_bits_per_sample(st->codec->codec_id));
|
||||
if (!sample_count || av_get_exact_bits_per_sample(st->codec->codec_id) > 0)
|
||||
if ( st->codec->channels
|
||||
&& data_size
|
||||
&& av_get_bits_per_sample(st->codec->codec_id)
|
||||
&& wav->data_end <= avio_size(pb))
|
||||
sample_count = (data_size << 3)
|
||||
/
|
||||
(st->codec->channels * (uint64_t)av_get_bits_per_sample(st->codec->codec_id));
|
||||
|
||||
if (sample_count)
|
||||
st->duration = sample_count;
|
||||
|
||||
|
@@ -227,9 +227,50 @@ static void finish_chunk(AVFormatContext *s)
|
||||
write_index(s);
|
||||
}
|
||||
|
||||
static void put_videoinfoheader2(AVIOContext *pb, AVStream *st)
|
||||
{
|
||||
AVRational dar = av_mul_q(st->sample_aspect_ratio, (AVRational){st->codec->width, st->codec->height});
|
||||
unsigned int num, den;
|
||||
av_reduce(&num, &den, dar.num, dar.den, 0xFFFFFFFF);
|
||||
|
||||
/* VIDEOINFOHEADER2 */
|
||||
avio_wl32(pb, 0);
|
||||
avio_wl32(pb, 0);
|
||||
avio_wl32(pb, st->codec->width);
|
||||
avio_wl32(pb, st->codec->height);
|
||||
|
||||
avio_wl32(pb, 0);
|
||||
avio_wl32(pb, 0);
|
||||
avio_wl32(pb, 0);
|
||||
avio_wl32(pb, 0);
|
||||
|
||||
avio_wl32(pb, st->codec->bit_rate);
|
||||
avio_wl32(pb, 0);
|
||||
avio_wl64(pb, st->avg_frame_rate.num && st->avg_frame_rate.den ? INT64_C(10000000) / av_q2d(st->avg_frame_rate) : 0);
|
||||
avio_wl32(pb, 0);
|
||||
avio_wl32(pb, 0);
|
||||
|
||||
avio_wl32(pb, num);
|
||||
avio_wl32(pb, den);
|
||||
avio_wl32(pb, 0);
|
||||
avio_wl32(pb, 0);
|
||||
|
||||
ff_put_bmp_header(pb, st->codec, ff_codec_bmp_tags, 0, 1);
|
||||
|
||||
if (st->codec->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
|
||||
/* MPEG2VIDEOINFO */
|
||||
avio_wl32(pb, 0);
|
||||
avio_wl32(pb, st->codec->extradata_size);
|
||||
avio_wl32(pb, -1);
|
||||
avio_wl32(pb, -1);
|
||||
avio_wl32(pb, 0);
|
||||
avio_write(pb, st->codec->extradata, st->codec->extradata_size);
|
||||
avio_wl64(pb, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static int write_stream_codec_info(AVFormatContext *s, AVStream *st)
|
||||
{
|
||||
WtvContext *wctx = s->priv_data;
|
||||
const ff_asf_guid *g, *media_type, *format_type;
|
||||
AVIOContext *pb = s->pb;
|
||||
int64_t hdr_pos_start;
|
||||
@@ -261,13 +302,7 @@ static int write_stream_codec_info(AVFormatContext *s, AVStream *st)
|
||||
|
||||
hdr_pos_start = avio_tell(pb);
|
||||
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
if (wctx->first_video_flag) {
|
||||
write_pad(pb, 216); //The size is sensitive.
|
||||
wctx->first_video_flag = 0;
|
||||
} else {
|
||||
write_pad(pb, 72); // aspect ratio
|
||||
ff_put_bmp_header(pb, st->codec, ff_codec_bmp_tags, 0);
|
||||
}
|
||||
put_videoinfoheader2(pb, st);
|
||||
} else {
|
||||
ff_put_wav_header(pb, st->codec);
|
||||
}
|
||||
|
@@ -101,6 +101,9 @@ static int use_color = -1;
|
||||
|
||||
static void colored_fputs(int level, const char *str)
|
||||
{
|
||||
if (!*str)
|
||||
return;
|
||||
|
||||
if (use_color < 0) {
|
||||
#if HAVE_SETCONSOLETEXTATTRIBUTE
|
||||
CONSOLE_SCREEN_BUFFER_INFO con_info;
|
||||
|
@@ -249,7 +249,7 @@ int av_set_string3(void *obj, const char *name, const char *val, int alloc, cons
|
||||
|
||||
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
|
||||
{
|
||||
int ret;
|
||||
int ret = 0;
|
||||
void *dst, *target_obj;
|
||||
const AVOption *o = av_opt_find2(obj, name, NULL, 0, search_flags, &target_obj);
|
||||
if (!o || !target_obj)
|
||||
|
@@ -1107,9 +1107,10 @@ av_cold int sws_init_context(SwsContext *c, SwsFilter *srcFilter,
|
||||
dst_stride <<= 1;
|
||||
|
||||
if (INLINE_MMXEXT(cpu_flags) && c->srcBpc == 8 && c->dstBpc <= 14) {
|
||||
c->canMMXEXTBeUsed = (dstW >= srcW && (dstW & 31) == 0 &&
|
||||
(srcW & 15) == 0) ? 1 : 0;
|
||||
if (!c->canMMXEXTBeUsed && dstW >= srcW && (srcW & 15) == 0
|
||||
c->canMMXEXTBeUsed = dstW >= srcW && (dstW & 31) == 0 &&
|
||||
c->chrDstW >= c->chrSrcW &&
|
||||
(srcW & 15) == 0;
|
||||
if (!c->canMMXEXTBeUsed && dstW >= srcW && c->chrDstW >= c->chrSrcW && (srcW & 15) == 0
|
||||
|
||||
&& (flags & SWS_FAST_BILINEAR)) {
|
||||
if (flags & SWS_PRINT_INFO)
|
||||
|
@@ -1,3 +1,3 @@
|
||||
345516d3a03fd239c62e5e7257c9f4a2 *./tests/data/lavf/lavf.wtv
|
||||
ea7ac962126198092454f36cc3af3433 *./tests/data/lavf/lavf.wtv
|
||||
413696 ./tests/data/lavf/lavf.wtv
|
||||
./tests/data/lavf/lavf.wtv CRC=0xcc2dc628
|
||||
|
@@ -1,48 +1,48 @@
|
||||
ret: 0 st: 1 flags:1 dts:-0.010907 pts:-0.010907 pos: 26344 size: 208
|
||||
ret: 0 st: 1 flags:1 dts:-0.010907 pts:-0.010907 pos: 26288 size: 208
|
||||
ret: 0 st:-1 flags:0 ts:-1.000000
|
||||
ret: 0 st: 1 flags:1 dts:-0.010907 pts:-0.010907 pos: 26344 size: 208
|
||||
ret: 0 st: 1 flags:1 dts:-0.010907 pts:-0.010907 pos: 26288 size: 208
|
||||
ret: 0 st:-1 flags:1 ts: 1.894167
|
||||
ret: 0 st: 1 flags:1 dts: 0.694399 pts: 0.694399 pos: 294744 size: 209
|
||||
ret: 0 st: 1 flags:1 dts: 0.694399 pts: 0.694399 pos: 294688 size: 209
|
||||
ret: 0 st: 0 flags:0 ts: 0.788334
|
||||
ret: 0 st: 1 flags:1 dts: 0.694399 pts: 0.694399 pos: 294744 size: 209
|
||||
ret: 0 st: 1 flags:1 dts: 0.694399 pts: 0.694399 pos: 294688 size: 209
|
||||
ret: 0 st: 0 flags:1 ts:-0.317499
|
||||
ret: 0 st: 1 flags:1 dts:-0.010907 pts:-0.010907 pos: 26344 size: 208
|
||||
ret: 0 st: 1 flags:1 dts:-0.010907 pts:-0.010907 pos: 26288 size: 208
|
||||
ret:-1 st: 1 flags:0 ts: 2.576668
|
||||
ret: 0 st: 1 flags:1 ts: 1.470835
|
||||
ret: 0 st: 1 flags:1 dts: 0.694399 pts: 0.694399 pos: 294744 size: 209
|
||||
ret: 0 st: 1 flags:1 dts: 0.694399 pts: 0.694399 pos: 294688 size: 209
|
||||
ret: 0 st:-1 flags:0 ts: 0.365002
|
||||
ret: 0 st: 1 flags:1 dts:-0.010907 pts:-0.010907 pos: 26344 size: 208
|
||||
ret: 0 st: 1 flags:1 dts:-0.010907 pts:-0.010907 pos: 26288 size: 208
|
||||
ret: 0 st:-1 flags:1 ts:-0.740831
|
||||
ret: 0 st: 1 flags:1 dts:-0.010907 pts:-0.010907 pos: 26344 size: 208
|
||||
ret: 0 st: 1 flags:1 dts:-0.010907 pts:-0.010907 pos: 26288 size: 208
|
||||
ret:-1 st: 0 flags:0 ts: 2.153336
|
||||
ret: 0 st: 0 flags:1 ts: 1.047503
|
||||
ret: 0 st: 1 flags:1 dts: 0.694399 pts: 0.694399 pos: 294744 size: 209
|
||||
ret: 0 st: 1 flags:1 dts: 0.694399 pts: 0.694399 pos: 294688 size: 209
|
||||
ret: 0 st: 1 flags:0 ts:-0.058330
|
||||
ret: 0 st: 1 flags:1 dts:-0.010907 pts:-0.010907 pos: 26344 size: 208
|
||||
ret: 0 st: 1 flags:1 dts:-0.010907 pts:-0.010907 pos: 26288 size: 208
|
||||
ret: 0 st: 1 flags:1 ts: 2.835837
|
||||
ret: 0 st: 1 flags:1 dts: 0.694399 pts: 0.694399 pos: 294744 size: 209
|
||||
ret: 0 st: 1 flags:1 dts: 0.694399 pts: 0.694399 pos: 294688 size: 209
|
||||
ret:-1 st:-1 flags:0 ts: 1.730004
|
||||
ret: 0 st:-1 flags:1 ts: 0.624171
|
||||
ret: 0 st: 1 flags:1 dts:-0.010907 pts:-0.010907 pos: 26344 size: 208
|
||||
ret: 0 st: 1 flags:1 dts:-0.010907 pts:-0.010907 pos: 26288 size: 208
|
||||
ret: 0 st: 0 flags:0 ts:-0.481662
|
||||
ret: 0 st: 1 flags:1 dts:-0.010907 pts:-0.010907 pos: 26344 size: 208
|
||||
ret: 0 st: 1 flags:1 dts:-0.010907 pts:-0.010907 pos: 26288 size: 208
|
||||
ret: 0 st: 0 flags:1 ts: 2.412505
|
||||
ret: 0 st: 1 flags:1 dts: 0.694399 pts: 0.694399 pos: 294744 size: 209
|
||||
ret: 0 st: 1 flags:1 dts: 0.694399 pts: 0.694399 pos: 294688 size: 209
|
||||
ret:-1 st: 1 flags:0 ts: 1.306672
|
||||
ret: 0 st: 1 flags:1 ts: 0.200839
|
||||
ret: 0 st: 1 flags:1 dts: 0.224195 pts: 0.224195 pos: 112904 size: 209
|
||||
ret: 0 st: 1 flags:1 dts: 0.224195 pts: 0.224195 pos: 112848 size: 209
|
||||
ret: 0 st:-1 flags:0 ts:-0.904994
|
||||
ret: 0 st: 1 flags:1 dts:-0.010907 pts:-0.010907 pos: 26344 size: 208
|
||||
ret: 0 st: 1 flags:1 dts:-0.010907 pts:-0.010907 pos: 26288 size: 208
|
||||
ret: 0 st:-1 flags:1 ts: 1.989173
|
||||
ret: 0 st: 1 flags:1 dts: 0.694399 pts: 0.694399 pos: 294744 size: 209
|
||||
ret: 0 st: 1 flags:1 dts: 0.694399 pts: 0.694399 pos: 294688 size: 209
|
||||
ret: 0 st: 0 flags:0 ts: 0.883340
|
||||
ret: 0 st: 1 flags:1 dts: 0.694399 pts: 0.694399 pos: 294744 size: 209
|
||||
ret: 0 st: 1 flags:1 dts: 0.694399 pts: 0.694399 pos: 294688 size: 209
|
||||
ret: 0 st: 0 flags:1 ts:-0.222493
|
||||
ret: 0 st: 1 flags:1 dts:-0.010907 pts:-0.010907 pos: 26344 size: 208
|
||||
ret: 0 st: 1 flags:1 dts:-0.010907 pts:-0.010907 pos: 26288 size: 208
|
||||
ret:-1 st: 1 flags:0 ts: 2.671674
|
||||
ret: 0 st: 1 flags:1 ts: 1.565841
|
||||
ret: 0 st: 1 flags:1 dts: 0.694399 pts: 0.694399 pos: 294744 size: 209
|
||||
ret: 0 st: 1 flags:1 dts: 0.694399 pts: 0.694399 pos: 294688 size: 209
|
||||
ret: 0 st:-1 flags:0 ts: 0.460008
|
||||
ret: 0 st: 1 flags:1 dts:-0.010907 pts:-0.010907 pos: 26344 size: 208
|
||||
ret: 0 st: 1 flags:1 dts:-0.010907 pts:-0.010907 pos: 26288 size: 208
|
||||
ret: 0 st:-1 flags:1 ts:-0.645825
|
||||
ret: 0 st: 1 flags:1 dts:-0.010907 pts:-0.010907 pos: 26344 size: 208
|
||||
ret: 0 st: 1 flags:1 dts:-0.010907 pts:-0.010907 pos: 26288 size: 208
|
||||
|
Reference in New Issue
Block a user