Compare commits
109 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
bf0d2ee92c | ||
![]() |
53239e50ed | ||
![]() |
0746f5035e | ||
![]() |
88cc4a405a | ||
![]() |
5537faaf19 | ||
![]() |
d8a8b3948c | ||
![]() |
917ce36a20 | ||
![]() |
2e5579bbb4 | ||
![]() |
2cde388aea | ||
![]() |
77e8bd8ce6 | ||
![]() |
6155d5d98b | ||
![]() |
7ed19bd337 | ||
![]() |
97fb0b2109 | ||
![]() |
e32e1e3a4f | ||
![]() |
4e11780b85 | ||
![]() |
3183c20781 | ||
![]() |
3193f4d3f2 | ||
![]() |
265ad094a8 | ||
![]() |
b9d09fb8c9 | ||
![]() |
a3cd26aa33 | ||
![]() |
c803985132 | ||
![]() |
ec7c1cd733 | ||
![]() |
c4d250abca | ||
![]() |
d125abfa74 | ||
![]() |
e29761b1c8 | ||
![]() |
5c4dc00c3a | ||
![]() |
baa1738f99 | ||
![]() |
1a396d1ee0 | ||
![]() |
2bb09b714a | ||
![]() |
c5c39132b4 | ||
![]() |
c4b5b3309c | ||
![]() |
2ee4b48c9b | ||
![]() |
1077380513 | ||
![]() |
a15ceebb6f | ||
![]() |
7a3ff7fb81 | ||
![]() |
e75bb490ca | ||
![]() |
f8bb156fa1 | ||
![]() |
e407615129 | ||
![]() |
313492c0bd | ||
![]() |
4f53eaaafa | ||
![]() |
1177e047f1 | ||
![]() |
66342aab73 | ||
![]() |
17b2ad6f43 | ||
![]() |
19640e99df | ||
![]() |
4b70e6e6e5 | ||
![]() |
1d9a93682d | ||
![]() |
51c580102d | ||
![]() |
25b73ae265 | ||
![]() |
b55be8b266 | ||
![]() |
fc6a00d20d | ||
![]() |
c0dc649e81 | ||
![]() |
ce167a49ba | ||
![]() |
4e6f1df657 | ||
![]() |
bec350611e | ||
![]() |
895ddf1a80 | ||
![]() |
e48f2847ab | ||
![]() |
71c63e799f | ||
![]() |
45eb2a0d49 | ||
![]() |
019b4b0650 | ||
![]() |
e3654b25cf | ||
![]() |
b837d5bf83 | ||
![]() |
8ae0d702a1 | ||
![]() |
e032e647dd | ||
![]() |
eb9041403d | ||
![]() |
0051174c70 | ||
![]() |
61c966ef30 | ||
![]() |
772f50c1f3 | ||
![]() |
0eb8786eac | ||
![]() |
b2b359f124 | ||
![]() |
0ad8d75133 | ||
![]() |
7fd11fbeeb | ||
![]() |
470fd8e64e | ||
![]() |
f74f4a5401 | ||
![]() |
c47cdf837c | ||
![]() |
91ef250713 | ||
![]() |
3670942fae | ||
![]() |
fa4604d805 | ||
![]() |
03fbb6ff3d | ||
![]() |
36cfee3adc | ||
![]() |
2dca276dcb | ||
![]() |
0ee5f46fd1 | ||
![]() |
b1b69baa01 | ||
![]() |
f0526bc21e | ||
![]() |
0afe061f28 | ||
![]() |
e6093f5b85 | ||
![]() |
8323f09442 | ||
![]() |
68d83bc3a6 | ||
![]() |
0c9d465e98 | ||
![]() |
a3dca10470 | ||
![]() |
eca1e3dcc8 | ||
![]() |
7c3a3d47cf | ||
![]() |
bd9c755b22 | ||
![]() |
4c246c65bf | ||
![]() |
b0d3873085 | ||
![]() |
b902eab45a | ||
![]() |
79d86b844f | ||
![]() |
3d8c51d699 | ||
![]() |
c7dc73a6c3 | ||
![]() |
4f2299b6b5 | ||
![]() |
004cdd8b15 | ||
![]() |
cd5c78c804 | ||
![]() |
ab17d11310 | ||
![]() |
d0ed336d8a | ||
![]() |
56e07e4caf | ||
![]() |
e61a7c9b45 | ||
![]() |
56976346d9 | ||
![]() |
e5b878b37f | ||
![]() |
d5b20daeb0 | ||
![]() |
a9602c6cfb |
8
configure
vendored
8
configure
vendored
@@ -1539,6 +1539,7 @@ HAVE_LIST="
|
||||
alsa_asoundlib_h
|
||||
altivec_h
|
||||
arpa_inet_h
|
||||
as_object_arch
|
||||
asm_mod_q
|
||||
asm_types_h
|
||||
atomic_cas_ptr
|
||||
@@ -4154,6 +4155,11 @@ EOF
|
||||
|
||||
check_inline_asm asm_mod_q '"add r0, %Q0, %R0" :: "r"((long long)0)'
|
||||
|
||||
# llvm's integrated assembler supports .object_arch from llvm 3.5
|
||||
[ "$objformat" = elf ] && check_as <<EOF && enable as_object_arch
|
||||
.object_arch armv4
|
||||
EOF
|
||||
|
||||
[ $target_os != win32 ] && enabled_all armv6t2 shared !pic && enable_weak_pic
|
||||
|
||||
elif enabled mips; then
|
||||
@@ -4640,7 +4646,7 @@ enabled_any sndio_indev sndio_outdev && check_lib2 sndio.h sio_open -lsndio
|
||||
if enabled libcdio; then
|
||||
check_lib2 "cdio/cdda.h cdio/paranoia.h" cdio_cddap_open -lcdio_paranoia -lcdio_cdda -lcdio ||
|
||||
check_lib2 "cdio/paranoia/cdda.h cdio/paranoia/paranoia.h" cdio_cddap_open -lcdio_paranoia -lcdio_cdda -lcdio ||
|
||||
die "ERROR: libcdio-paranoia not found"
|
||||
die "ERROR: No usable libcdio/cdparanoia found"
|
||||
fi
|
||||
|
||||
check_lib X11/Xlib.h XOpenDisplay -lX11 && enable xlib
|
||||
|
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
||||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER = 2.2.12
|
||||
PROJECT_NUMBER = 2.2.14
|
||||
|
||||
# With the PROJECT_LOGO tag one can specify a logo or icon that is included
|
||||
# in the documentation. The maximum height of the logo should not exceed 55
|
||||
|
@@ -298,7 +298,7 @@ FFmpeg has a @url{http://ffmpeg.org/ffmpeg-protocols.html#concat,
|
||||
@code{concat}} protocol designed specifically for that, with examples in the
|
||||
documentation.
|
||||
|
||||
A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow to concatenate
|
||||
A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow one to concatenate
|
||||
video by merely concatenating the files containing them.
|
||||
|
||||
Hence you may concatenate your multimedia files by first transcoding them to
|
||||
|
@@ -71,7 +71,7 @@ the HTTP server (configured through the @option{Port} option), and
|
||||
configuration file.
|
||||
|
||||
Each feed is associated to a file which is stored on disk. This stored
|
||||
file is used to allow to send pre-recorded data to a player as fast as
|
||||
file is used to send pre-recorded data to a player as fast as
|
||||
possible when new content is added in real-time to the stream.
|
||||
|
||||
A "live-stream" or "stream" is a resource published by
|
||||
|
@@ -3143,7 +3143,7 @@ Set number overlapping pixels for each block. Each block is of size
|
||||
@code{16x16}. Since the filter can be slow, you may want to reduce this value,
|
||||
at the cost of a less effective filter and the risk of various artefacts.
|
||||
|
||||
If the overlapping value doesn't allow to process the whole input width or
|
||||
If the overlapping value doesn't permit processing the whole input width or
|
||||
height, a warning will be displayed and according borders won't be denoised.
|
||||
|
||||
Default value is @code{15}.
|
||||
|
@@ -23,7 +23,7 @@ Reduce buffering.
|
||||
|
||||
@item probesize @var{integer} (@emph{input})
|
||||
Set probing size in bytes, i.e. the size of the data to analyze to get
|
||||
stream information. A higher value will allow to detect more
|
||||
stream information. A higher value will enable detecting more
|
||||
information in case it is dispersed into the stream, but will increase
|
||||
latency. Must be an integer not lesser than 32. It is 5000000 by default.
|
||||
|
||||
@@ -63,7 +63,7 @@ Default is 0.
|
||||
|
||||
@item analyzeduration @var{integer} (@emph{input})
|
||||
Specify how many microseconds are analyzed to probe the input. A
|
||||
higher value will allow to detect more accurate information, but will
|
||||
higher value will enable detecting more accurate information, but will
|
||||
increase latency. It defaults to 5,000,000 microseconds = 5 seconds.
|
||||
|
||||
@item cryptokey @var{hexadecimal string} (@emph{input})
|
||||
|
@@ -1,7 +1,7 @@
|
||||
@chapter Input Devices
|
||||
@c man begin INPUT DEVICES
|
||||
|
||||
Input devices are configured elements in FFmpeg which allow to access
|
||||
Input devices are configured elements in FFmpeg which enable accessing
|
||||
the data coming from a multimedia device attached to your system.
|
||||
|
||||
When you configure your FFmpeg build, all the supported input devices
|
||||
|
@@ -982,8 +982,8 @@ Set raise error timeout, expressed in microseconds.
|
||||
This option is only relevant in read mode: if no data arrived in more
|
||||
than this time interval, raise error.
|
||||
|
||||
@item listen_timeout=@var{microseconds}
|
||||
Set listen timeout, expressed in microseconds.
|
||||
@item listen_timeout=@var{milliseconds}
|
||||
Set listen timeout, expressed in milliseconds.
|
||||
@end table
|
||||
|
||||
The following example shows how to setup a listening TCP connection
|
||||
|
@@ -841,7 +841,7 @@ Return 1.0 if @var{x} is +/-INFINITY, 0.0 otherwise.
|
||||
Return 1.0 if @var{x} is NAN, 0.0 otherwise.
|
||||
|
||||
@item ld(var)
|
||||
Allow to load the value of the internal variable with number
|
||||
Load the value of the internal variable with number
|
||||
@var{var}, which was previously stored with st(@var{var}, @var{expr}).
|
||||
The function returns the loaded value.
|
||||
|
||||
@@ -909,7 +909,7 @@ Compute the square root of @var{expr}. This is equivalent to
|
||||
Compute expression @code{1/(1 + exp(4*x))}.
|
||||
|
||||
@item st(var, expr)
|
||||
Allow to store the value of the expression @var{expr} in an internal
|
||||
Store the value of the expression @var{expr} in an internal
|
||||
variable. @var{var} specifies the number of the variable where to
|
||||
store the value, and it is a value ranging from 0 to 9. The function
|
||||
returns the value stored in the internal variable.
|
||||
|
@@ -38,15 +38,15 @@ static av_cold int zero12v_decode_init(AVCodecContext *avctx)
|
||||
static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
||||
int *got_frame, AVPacket *avpkt)
|
||||
{
|
||||
int line = 0, ret;
|
||||
int line, ret;
|
||||
const int width = avctx->width;
|
||||
AVFrame *pic = data;
|
||||
uint16_t *y, *u, *v;
|
||||
const uint8_t *line_end, *src = avpkt->data;
|
||||
int stride = avctx->width * 8 / 3;
|
||||
|
||||
if (width == 1) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Width 1 not supported.\n");
|
||||
if (width <= 1 || avctx->height <= 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Dimensions %dx%d not supported.\n", width, avctx->height);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
@@ -67,45 +67,45 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
||||
pic->pict_type = AV_PICTURE_TYPE_I;
|
||||
pic->key_frame = 1;
|
||||
|
||||
y = (uint16_t *)pic->data[0];
|
||||
u = (uint16_t *)pic->data[1];
|
||||
v = (uint16_t *)pic->data[2];
|
||||
line_end = avpkt->data + stride;
|
||||
for (line = 0; line < avctx->height; line++) {
|
||||
uint16_t y_temp[6] = {0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000};
|
||||
uint16_t u_temp[3] = {0x8000, 0x8000, 0x8000};
|
||||
uint16_t v_temp[3] = {0x8000, 0x8000, 0x8000};
|
||||
int x;
|
||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||
|
||||
while (line++ < avctx->height) {
|
||||
while (1) {
|
||||
uint32_t t = AV_RL32(src);
|
||||
for (x = 0; x < width; x += 6) {
|
||||
uint32_t t;
|
||||
|
||||
if (width - x < 6 || line_end - src < 16) {
|
||||
y = y_temp;
|
||||
u = u_temp;
|
||||
v = v_temp;
|
||||
}
|
||||
|
||||
if (line_end - src < 4)
|
||||
break;
|
||||
|
||||
t = AV_RL32(src);
|
||||
src += 4;
|
||||
*u++ = t << 6 & 0xFFC0;
|
||||
*y++ = t >> 4 & 0xFFC0;
|
||||
*v++ = t >> 14 & 0xFFC0;
|
||||
|
||||
if (src >= line_end - 1) {
|
||||
*y = 0x80;
|
||||
src++;
|
||||
line_end += stride;
|
||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||
if (line_end - src < 4)
|
||||
break;
|
||||
}
|
||||
|
||||
t = AV_RL32(src);
|
||||
src += 4;
|
||||
*y++ = t << 6 & 0xFFC0;
|
||||
*u++ = t >> 4 & 0xFFC0;
|
||||
*y++ = t >> 14 & 0xFFC0;
|
||||
if (src >= line_end - 2) {
|
||||
if (!(width & 1)) {
|
||||
*y = 0x80;
|
||||
src += 2;
|
||||
}
|
||||
line_end += stride;
|
||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||
|
||||
if (line_end - src < 4)
|
||||
break;
|
||||
}
|
||||
|
||||
t = AV_RL32(src);
|
||||
src += 4;
|
||||
@@ -113,15 +113,8 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
||||
*y++ = t >> 4 & 0xFFC0;
|
||||
*u++ = t >> 14 & 0xFFC0;
|
||||
|
||||
if (src >= line_end - 1) {
|
||||
*y = 0x80;
|
||||
src++;
|
||||
line_end += stride;
|
||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||
if (line_end - src < 4)
|
||||
break;
|
||||
}
|
||||
|
||||
t = AV_RL32(src);
|
||||
src += 4;
|
||||
@@ -129,18 +122,21 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
||||
*v++ = t >> 4 & 0xFFC0;
|
||||
*y++ = t >> 14 & 0xFFC0;
|
||||
|
||||
if (src >= line_end - 2) {
|
||||
if (width & 1) {
|
||||
*y = 0x80;
|
||||
src += 2;
|
||||
}
|
||||
line_end += stride;
|
||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||
if (width - x < 6)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (x < width) {
|
||||
y = x + (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||
u = x/2 + (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||
v = x/2 + (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||
memcpy(y, y_temp, sizeof(*y) * (width - x));
|
||||
memcpy(u, u_temp, sizeof(*u) * (width - x + 1) / 2);
|
||||
memcpy(v, v_temp, sizeof(*v) * (width - x + 1) / 2);
|
||||
}
|
||||
|
||||
line_end += stride;
|
||||
src = line_end - stride;
|
||||
}
|
||||
|
||||
*got_frame = 1;
|
||||
|
@@ -78,9 +78,13 @@ static void to_meta_with_crop(AVCodecContext *avctx, AVFrame *p, int *dest)
|
||||
for (y = blocky; y < blocky + 8 && y < C64YRES; y++) {
|
||||
for (x = blockx; x < blockx + 8 && x < C64XRES; x += 2) {
|
||||
if(x < width && y < height) {
|
||||
/* build average over 2 pixels */
|
||||
luma = (src[(x + 0 + y * p->linesize[0])] +
|
||||
src[(x + 1 + y * p->linesize[0])]) / 2;
|
||||
if (x + 1 < width) {
|
||||
/* build average over 2 pixels */
|
||||
luma = (src[(x + 0 + y * p->linesize[0])] +
|
||||
src[(x + 1 + y * p->linesize[0])]) / 2;
|
||||
} else {
|
||||
luma = src[(x + y * p->linesize[0])];
|
||||
}
|
||||
/* write blocks as linear data now so they are suitable for elbg */
|
||||
dest[0] = luma;
|
||||
}
|
||||
@@ -317,7 +321,9 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
} else {
|
||||
/* fill up mc_meta_charset with data until lifetime exceeds */
|
||||
if (c->mc_frame_counter < c->mc_lifetime) {
|
||||
*p = *pict;
|
||||
ret = av_frame_ref(p, pict);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
p->pict_type = AV_PICTURE_TYPE_I;
|
||||
p->key_frame = 1;
|
||||
to_meta_with_crop(avctx, p, meta + 32000 * c->mc_frame_counter);
|
||||
@@ -334,8 +340,8 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
req_size = 0;
|
||||
/* any frames to encode? */
|
||||
if (c->mc_lifetime) {
|
||||
req_size = charset_size + c->mc_lifetime*(screen_size + colram_size);
|
||||
if ((ret = ff_alloc_packet2(avctx, pkt, req_size)) < 0)
|
||||
int alloc_size = charset_size + c->mc_lifetime*(screen_size + colram_size);
|
||||
if ((ret = ff_alloc_packet2(avctx, pkt, alloc_size)) < 0)
|
||||
return ret;
|
||||
buf = pkt->data;
|
||||
|
||||
@@ -352,6 +358,7 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
/* advance pointers */
|
||||
buf += charset_size;
|
||||
charset += charset_size;
|
||||
req_size += charset_size;
|
||||
}
|
||||
|
||||
/* write x frames to buf */
|
||||
|
@@ -165,7 +165,7 @@ static void put_audio_specific_config(AVCodecContext *avctx)
|
||||
PutBitContext pb;
|
||||
AACEncContext *s = avctx->priv_data;
|
||||
|
||||
init_put_bits(&pb, avctx->extradata, avctx->extradata_size*8);
|
||||
init_put_bits(&pb, avctx->extradata, avctx->extradata_size);
|
||||
put_bits(&pb, 5, 2); //object type - AAC-LC
|
||||
put_bits(&pb, 4, s->samplerate_index); //sample rate index
|
||||
put_bits(&pb, 4, s->channels);
|
||||
|
@@ -78,6 +78,7 @@ av_cold void ff_h264dsp_init_aarch64(H264DSPContext *c, const int bit_depth,
|
||||
c->h264_v_loop_filter_luma = ff_h264_v_loop_filter_luma_neon;
|
||||
c->h264_h_loop_filter_luma = ff_h264_h_loop_filter_luma_neon;
|
||||
c->h264_v_loop_filter_chroma = ff_h264_v_loop_filter_chroma_neon;
|
||||
if (chroma_format_idc <= 1)
|
||||
c->h264_h_loop_filter_chroma = ff_h264_h_loop_filter_chroma_neon;
|
||||
|
||||
c->weight_h264_pixels_tab[0] = ff_weight_h264_pixels_16_neon;
|
||||
|
@@ -541,7 +541,7 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
case AV_CODEC_ID_ADPCM_IMA_QT:
|
||||
{
|
||||
PutBitContext pb;
|
||||
init_put_bits(&pb, dst, pkt_size * 8);
|
||||
init_put_bits(&pb, dst, pkt_size);
|
||||
|
||||
for (ch = 0; ch < avctx->channels; ch++) {
|
||||
ADPCMChannelStatus *status = &c->status[ch];
|
||||
@@ -571,7 +571,7 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
case AV_CODEC_ID_ADPCM_SWF:
|
||||
{
|
||||
PutBitContext pb;
|
||||
init_put_bits(&pb, dst, pkt_size * 8);
|
||||
init_put_bits(&pb, dst, pkt_size);
|
||||
|
||||
n = frame->nb_samples - 1;
|
||||
|
||||
|
@@ -437,8 +437,8 @@ static av_cold int aic_decode_init(AVCodecContext *avctx)
|
||||
ctx->mb_width = FFALIGN(avctx->width, 16) >> 4;
|
||||
ctx->mb_height = FFALIGN(avctx->height, 16) >> 4;
|
||||
|
||||
ctx->num_x_slices = 16;
|
||||
ctx->slice_width = ctx->mb_width / 16;
|
||||
ctx->num_x_slices = (ctx->mb_width + 15) >> 4;
|
||||
ctx->slice_width = 16;
|
||||
for (i = 1; i < 32; i++) {
|
||||
if (!(ctx->mb_width % i) && (ctx->mb_width / i < 32)) {
|
||||
ctx->slice_width = ctx->mb_width / i;
|
||||
|
@@ -23,9 +23,10 @@
|
||||
#include "libavutil/arm/asm.S"
|
||||
|
||||
function ff_prefetch_arm, export=1
|
||||
1:
|
||||
subs r2, r2, #1
|
||||
pld [r0]
|
||||
add r0, r0, r1
|
||||
bne X(ff_prefetch_arm)
|
||||
bne 1b
|
||||
bx lr
|
||||
endfunc
|
||||
|
@@ -63,7 +63,7 @@ static int dxtory_decode_v1_410(AVCodecContext *avctx, AVFrame *pic,
|
||||
uint8_t *Y1, *Y2, *Y3, *Y4, *U, *V;
|
||||
int ret;
|
||||
|
||||
if (src_size < avctx->width * avctx->height * 9L / 8) {
|
||||
if (src_size < avctx->width * avctx->height * 9LL / 8) {
|
||||
av_log(avctx, AV_LOG_ERROR, "packet too small\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
@@ -106,7 +106,7 @@ static int dxtory_decode_v1_420(AVCodecContext *avctx, AVFrame *pic,
|
||||
uint8_t *Y1, *Y2, *U, *V;
|
||||
int ret;
|
||||
|
||||
if (src_size < avctx->width * avctx->height * 3L / 2) {
|
||||
if (src_size < avctx->width * avctx->height * 3LL / 2) {
|
||||
av_log(avctx, AV_LOG_ERROR, "packet too small\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
@@ -143,7 +143,7 @@ static int dxtory_decode_v1_444(AVCodecContext *avctx, AVFrame *pic,
|
||||
uint8_t *Y, *U, *V;
|
||||
int ret;
|
||||
|
||||
if (src_size < avctx->width * avctx->height * 3L) {
|
||||
if (src_size < avctx->width * avctx->height * 3LL) {
|
||||
av_log(avctx, AV_LOG_ERROR, "packet too small\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
@@ -145,6 +145,11 @@ static inline int decode_block_intra(MadContext *s, int16_t * block)
|
||||
break;
|
||||
} else if (level != 0) {
|
||||
i += run;
|
||||
if (i > 63) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
|
||||
return -1;
|
||||
}
|
||||
j = scantable[i];
|
||||
level = (level*quant_matrix[j]) >> 4;
|
||||
level = (level-1)|1;
|
||||
@@ -159,6 +164,11 @@ static inline int decode_block_intra(MadContext *s, int16_t * block)
|
||||
run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6);
|
||||
|
||||
i += run;
|
||||
if (i > 63) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
|
||||
return -1;
|
||||
}
|
||||
j = scantable[i];
|
||||
if (level < 0) {
|
||||
level = -level;
|
||||
@@ -170,10 +180,6 @@ static inline int decode_block_intra(MadContext *s, int16_t * block)
|
||||
level = (level-1)|1;
|
||||
}
|
||||
}
|
||||
if (i > 63) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
|
||||
return -1;
|
||||
}
|
||||
|
||||
block[j] = level;
|
||||
}
|
||||
|
@@ -251,7 +251,7 @@ static void put_line(uint8_t *dst, int size, int width, const int *runs)
|
||||
PutBitContext pb;
|
||||
int run, mode = ~0, pix_left = width, run_idx = 0;
|
||||
|
||||
init_put_bits(&pb, dst, size * 8);
|
||||
init_put_bits(&pb, dst, size);
|
||||
while (pix_left > 0) {
|
||||
run = runs[run_idx++];
|
||||
mode = ~mode;
|
||||
|
@@ -287,7 +287,7 @@ static int write_header(FlashSV2Context * s, uint8_t * buf, int buf_size)
|
||||
if (buf_size < 5)
|
||||
return -1;
|
||||
|
||||
init_put_bits(&pb, buf, buf_size * 8);
|
||||
init_put_bits(&pb, buf, buf_size);
|
||||
|
||||
put_bits(&pb, 4, (s->block_width >> 4) - 1);
|
||||
put_bits(&pb, 12, s->image_width);
|
||||
|
@@ -151,7 +151,7 @@ static int encode_bitstream(FlashSVContext *s, const AVFrame *p, uint8_t *buf,
|
||||
int buf_pos, res;
|
||||
int pred_blocks = 0;
|
||||
|
||||
init_put_bits(&pb, buf, buf_size * 8);
|
||||
init_put_bits(&pb, buf, buf_size);
|
||||
|
||||
put_bits(&pb, 4, block_width / 16 - 1);
|
||||
put_bits(&pb, 12, s->image_width);
|
||||
|
@@ -105,7 +105,7 @@ static int gif_image_write_image(AVCodecContext *avctx,
|
||||
/* skip common columns */
|
||||
while (x_start < x_end) {
|
||||
int same_column = 1;
|
||||
for (y = y_start; y < y_end; y++) {
|
||||
for (y = y_start; y <= y_end; y++) {
|
||||
if (ref[y*ref_linesize + x_start] != buf[y*linesize + x_start]) {
|
||||
same_column = 0;
|
||||
break;
|
||||
@@ -117,7 +117,7 @@ static int gif_image_write_image(AVCodecContext *avctx,
|
||||
}
|
||||
while (x_end > x_start) {
|
||||
int same_column = 1;
|
||||
for (y = y_start; y < y_end; y++) {
|
||||
for (y = y_start; y <= y_end; y++) {
|
||||
if (ref[y*ref_linesize + x_end] != buf[y*linesize + x_end]) {
|
||||
same_column = 0;
|
||||
break;
|
||||
|
@@ -1709,8 +1709,9 @@ static int decode_init_thread_copy(AVCodecContext *avctx)
|
||||
memset(h->sps_buffers, 0, sizeof(h->sps_buffers));
|
||||
memset(h->pps_buffers, 0, sizeof(h->pps_buffers));
|
||||
|
||||
h->rbsp_buffer[0] = NULL;
|
||||
h->rbsp_buffer[1] = NULL;
|
||||
h->avctx = avctx;
|
||||
h->rbsp_buffer[0] = NULL;
|
||||
h->rbsp_buffer[1] = NULL;
|
||||
h->rbsp_buffer_size[0] = 0;
|
||||
h->rbsp_buffer_size[1] = 0;
|
||||
h->context_initialized = 0;
|
||||
@@ -1884,8 +1885,11 @@ static int decode_update_thread_context(AVCodecContext *dst,
|
||||
|
||||
h->cur_pic_ptr = REBASE_PICTURE(h1->cur_pic_ptr, h, h1);
|
||||
unref_picture(h, &h->cur_pic);
|
||||
if (h1->cur_pic.f.buf[0] && (ret = ref_picture(h, &h->cur_pic, &h1->cur_pic)) < 0)
|
||||
return ret;
|
||||
if (h1->cur_pic.f.buf[0]) {
|
||||
ret = ref_picture(h, &h->cur_pic, &h1->cur_pic);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
h->workaround_bugs = h1->workaround_bugs;
|
||||
h->low_delay = h1->low_delay;
|
||||
@@ -3506,6 +3510,9 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
int must_reinit;
|
||||
int needs_reinit = 0;
|
||||
int field_pic_flag, bottom_field_flag;
|
||||
int first_slice = h == h0 && !h0->current_slice;
|
||||
int frame_num, picture_structure, droppable;
|
||||
PPS *pps;
|
||||
|
||||
h->me.qpel_put = h->h264qpel.put_h264_qpel_pixels_tab;
|
||||
h->me.qpel_avg = h->h264qpel.avg_h264_qpel_pixels_tab;
|
||||
@@ -3570,18 +3577,27 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
h0->au_pps_id, pps_id);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
h->pps = *h0->pps_buffers[pps_id];
|
||||
|
||||
if (!h0->sps_buffers[h->pps.sps_id]) {
|
||||
pps = h0->pps_buffers[pps_id];
|
||||
|
||||
if (!h0->sps_buffers[pps->sps_id]) {
|
||||
av_log(h->avctx, AV_LOG_ERROR,
|
||||
"non-existing SPS %u referenced\n",
|
||||
h->pps.sps_id);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (first_slice)
|
||||
h->pps = *h0->pps_buffers[pps_id];
|
||||
|
||||
if (h->pps.sps_id != h->sps.sps_id ||
|
||||
h->pps.sps_id != h->current_sps_id ||
|
||||
h0->sps_buffers[h->pps.sps_id]->new) {
|
||||
if (pps->sps_id != h->sps.sps_id ||
|
||||
pps->sps_id != h->current_sps_id ||
|
||||
h0->sps_buffers[pps->sps_id]->new) {
|
||||
|
||||
if (!first_slice) {
|
||||
av_log(h->avctx, AV_LOG_ERROR,
|
||||
"SPS changed in the middle of the frame\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
h->sps = *h0->sps_buffers[h->pps.sps_id];
|
||||
|
||||
@@ -3611,13 +3627,15 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
|| 16*h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) != h->avctx->coded_height
|
||||
|| h->avctx->bits_per_raw_sample != h->sps.bit_depth_luma
|
||||
|| h->cur_chroma_format_idc != h->sps.chroma_format_idc
|
||||
|| av_cmp_q(h->sps.sar, h->avctx->sample_aspect_ratio)
|
||||
|| h->mb_width != h->sps.mb_width
|
||||
|| h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag)
|
||||
));
|
||||
if (non_j_pixfmt(h0->avctx->pix_fmt) != non_j_pixfmt(get_pixel_format(h0, 0)))
|
||||
must_reinit = 1;
|
||||
|
||||
if (first_slice && av_cmp_q(h->sps.sar, h->avctx->sample_aspect_ratio))
|
||||
must_reinit = 1;
|
||||
|
||||
h->mb_width = h->sps.mb_width;
|
||||
h->mb_height = h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag);
|
||||
h->mb_num = h->mb_width * h->mb_height;
|
||||
@@ -3694,44 +3712,48 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
}
|
||||
}
|
||||
|
||||
if (h == h0 && h->dequant_coeff_pps != pps_id) {
|
||||
if (first_slice && h->dequant_coeff_pps != pps_id) {
|
||||
h->dequant_coeff_pps = pps_id;
|
||||
init_dequant_tables(h);
|
||||
}
|
||||
|
||||
h->frame_num = get_bits(&h->gb, h->sps.log2_max_frame_num);
|
||||
frame_num = get_bits(&h->gb, h->sps.log2_max_frame_num);
|
||||
if (!first_slice) {
|
||||
if (h0->frame_num != frame_num) {
|
||||
av_log(h->avctx, AV_LOG_ERROR, "Frame num change from %d to %d\n",
|
||||
h0->frame_num, frame_num);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
|
||||
h->mb_mbaff = 0;
|
||||
h->mb_aff_frame = 0;
|
||||
last_pic_structure = h0->picture_structure;
|
||||
last_pic_droppable = h0->droppable;
|
||||
h->droppable = h->nal_ref_idc == 0;
|
||||
droppable = h->nal_ref_idc == 0;
|
||||
if (h->sps.frame_mbs_only_flag) {
|
||||
h->picture_structure = PICT_FRAME;
|
||||
picture_structure = PICT_FRAME;
|
||||
} else {
|
||||
if (!h->sps.direct_8x8_inference_flag && slice_type == AV_PICTURE_TYPE_B) {
|
||||
av_log(h->avctx, AV_LOG_ERROR, "This stream was generated by a broken encoder, invalid 8x8 inference\n");
|
||||
return -1;
|
||||
}
|
||||
field_pic_flag = get_bits1(&h->gb);
|
||||
|
||||
if (field_pic_flag) {
|
||||
bottom_field_flag = get_bits1(&h->gb);
|
||||
h->picture_structure = PICT_TOP_FIELD + bottom_field_flag;
|
||||
picture_structure = PICT_TOP_FIELD + bottom_field_flag;
|
||||
} else {
|
||||
h->picture_structure = PICT_FRAME;
|
||||
picture_structure = PICT_FRAME;
|
||||
h->mb_aff_frame = h->sps.mb_aff;
|
||||
}
|
||||
}
|
||||
h->mb_field_decoding_flag = h->picture_structure != PICT_FRAME;
|
||||
|
||||
if (h0->current_slice != 0) {
|
||||
if (last_pic_structure != h->picture_structure ||
|
||||
last_pic_droppable != h->droppable) {
|
||||
if (h0->current_slice) {
|
||||
if (last_pic_structure != picture_structure ||
|
||||
last_pic_droppable != droppable) {
|
||||
av_log(h->avctx, AV_LOG_ERROR,
|
||||
"Changing field mode (%d -> %d) between slices is not allowed\n",
|
||||
last_pic_structure, h->picture_structure);
|
||||
h->picture_structure = last_pic_structure;
|
||||
h->droppable = last_pic_droppable;
|
||||
return AVERROR_INVALIDDATA;
|
||||
} else if (!h0->cur_pic_ptr) {
|
||||
av_log(h->avctx, AV_LOG_ERROR,
|
||||
@@ -3739,7 +3761,14 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
h0->current_slice + 1);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
} else {
|
||||
}
|
||||
|
||||
h->picture_structure = picture_structure;
|
||||
h->droppable = droppable;
|
||||
h->frame_num = frame_num;
|
||||
h->mb_field_decoding_flag = picture_structure != PICT_FRAME;
|
||||
|
||||
if (h0->current_slice == 0) {
|
||||
/* Shorten frame num gaps so we don't have to allocate reference
|
||||
* frames just to throw them away */
|
||||
if (h->frame_num != h->prev_frame_num) {
|
||||
@@ -4945,8 +4974,8 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size,
|
||||
continue;
|
||||
|
||||
again:
|
||||
if ( !(avctx->active_thread_type & FF_THREAD_FRAME)
|
||||
|| nals_needed >= nal_index)
|
||||
if ( (!(avctx->active_thread_type & FF_THREAD_FRAME) || nals_needed >= nal_index)
|
||||
&& !h->current_slice)
|
||||
h->au_pps_id = -1;
|
||||
/* Ignore per frame NAL unit type during extradata
|
||||
* parsing. Decoding slices is not possible in codec init
|
||||
|
@@ -1280,7 +1280,7 @@ void ff_h264_init_cabac_states(H264Context *h) {
|
||||
}
|
||||
|
||||
static int decode_cabac_field_decoding_flag(H264Context *h) {
|
||||
const long mbb_xy = h->mb_xy - 2L*h->mb_stride;
|
||||
const int mbb_xy = h->mb_xy - 2*h->mb_stride;
|
||||
|
||||
unsigned long ctx = 0;
|
||||
|
||||
@@ -1712,7 +1712,7 @@ decode_cabac_residual_internal(H264Context *h, int16_t *block,
|
||||
\
|
||||
if( coeff_abs >= 15 ) { \
|
||||
int j = 0; \
|
||||
while(get_cabac_bypass( CC ) && j<30) { \
|
||||
while (get_cabac_bypass(CC) && j < 30) { \
|
||||
j++; \
|
||||
} \
|
||||
\
|
||||
|
@@ -383,7 +383,8 @@ int ff_h264_decode_seq_parameter_set(H264Context *h)
|
||||
"Different chroma and luma bit depth");
|
||||
goto fail;
|
||||
}
|
||||
if (sps->bit_depth_luma > 14U || sps->bit_depth_chroma > 14U) {
|
||||
if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 14 ||
|
||||
sps->bit_depth_chroma < 8 || sps->bit_depth_chroma > 14) {
|
||||
av_log(h->avctx, AV_LOG_ERROR, "illegal bit depth value (%d, %d)\n",
|
||||
sps->bit_depth_luma, sps->bit_depth_chroma);
|
||||
goto fail;
|
||||
|
@@ -787,11 +787,30 @@ int ff_hevc_decode_nal_sps(HEVCContext *s)
|
||||
sps->log2_max_trafo_size = log2_diff_max_min_transform_block_size +
|
||||
sps->log2_min_tb_size;
|
||||
|
||||
if (sps->log2_min_tb_size >= sps->log2_min_cb_size) {
|
||||
if (sps->log2_min_cb_size < 3 || sps->log2_min_cb_size > 30) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid value %d for log2_min_cb_size", sps->log2_min_cb_size);
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (sps->log2_diff_max_min_coding_block_size > 30) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid value %d for log2_diff_max_min_coding_block_size", sps->log2_diff_max_min_coding_block_size);
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (sps->log2_min_tb_size >= sps->log2_min_cb_size || sps->log2_min_tb_size < 2) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid value for log2_min_tb_size");
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (log2_diff_max_min_transform_block_size < 0 || log2_diff_max_min_transform_block_size > 30) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid value %d for log2_diff_max_min_transform_block_size", log2_diff_max_min_transform_block_size);
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
goto err;
|
||||
}
|
||||
|
||||
sps->max_transform_hierarchy_depth_inter = get_ue_golomb_long(gb);
|
||||
sps->max_transform_hierarchy_depth_intra = get_ue_golomb_long(gb);
|
||||
|
||||
|
@@ -83,7 +83,12 @@ static inline int mdec_decode_block_intra(MDECContext *a, int16_t *block, int n)
|
||||
if (level == 127) {
|
||||
break;
|
||||
} else if (level != 0) {
|
||||
i += run;
|
||||
i += run;
|
||||
if (i > 63) {
|
||||
av_log(a->avctx, AV_LOG_ERROR,
|
||||
"ac-tex damaged at %d %d\n", a->mb_x, a->mb_y);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
j = scantable[i];
|
||||
level = (level * qscale * quant_matrix[j]) >> 3;
|
||||
level = (level ^ SHOW_SBITS(re, &a->gb, 1)) - SHOW_SBITS(re, &a->gb, 1);
|
||||
@@ -93,8 +98,13 @@ static inline int mdec_decode_block_intra(MDECContext *a, int16_t *block, int n)
|
||||
run = SHOW_UBITS(re, &a->gb, 6)+1; LAST_SKIP_BITS(re, &a->gb, 6);
|
||||
UPDATE_CACHE(re, &a->gb);
|
||||
level = SHOW_SBITS(re, &a->gb, 10); SKIP_BITS(re, &a->gb, 10);
|
||||
i += run;
|
||||
j = scantable[i];
|
||||
i += run;
|
||||
if (i > 63) {
|
||||
av_log(a->avctx, AV_LOG_ERROR,
|
||||
"ac-tex damaged at %d %d\n", a->mb_x, a->mb_y);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
j = scantable[i];
|
||||
if (level < 0) {
|
||||
level = -level;
|
||||
level = (level * qscale * quant_matrix[j]) >> 3;
|
||||
@@ -105,10 +115,6 @@ static inline int mdec_decode_block_intra(MDECContext *a, int16_t *block, int n)
|
||||
level = (level - 1) | 1;
|
||||
}
|
||||
}
|
||||
if (i > 63) {
|
||||
av_log(a->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", a->mb_x, a->mb_y);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
block[j] = level;
|
||||
}
|
||||
|
@@ -89,7 +89,7 @@ static void ff_acelp_interpolatef_mips(float *out, const float *in,
|
||||
"addu %[p_filter_coeffs_m], %[p_filter_coeffs_m], %[prec] \n\t"
|
||||
"madd.s %[v],%[v],%[in_val_m], %[fc_val_m] \n\t"
|
||||
|
||||
: [v] "=&f" (v),[p_in_p] "+r" (p_in_p), [p_in_m] "+r" (p_in_m),
|
||||
: [v] "+&f" (v),[p_in_p] "+r" (p_in_p), [p_in_m] "+r" (p_in_m),
|
||||
[p_filter_coeffs_p] "+r" (p_filter_coeffs_p),
|
||||
[in_val_p] "=&f" (in_val_p), [in_val_m] "=&f" (in_val_m),
|
||||
[fc_val_p] "=&f" (fc_val_p), [fc_val_m] "=&f" (fc_val_m),
|
||||
|
@@ -495,9 +495,12 @@ unk_pixfmt:
|
||||
}
|
||||
if (s->ls) {
|
||||
s->upscale_h = s->upscale_v = 0;
|
||||
if (s->nb_components > 1)
|
||||
if (s->nb_components == 3) {
|
||||
s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
|
||||
else if (s->bits <= 8)
|
||||
} else if (s->nb_components != 1) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Unsupported number of components %d\n", s->nb_components);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
} else if (s->bits <= 8)
|
||||
s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
|
||||
else
|
||||
s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
|
||||
@@ -1180,13 +1183,18 @@ static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
|
||||
|
||||
if (s->interlaced && s->bottom_field)
|
||||
block_offset += linesize[c] >> 1;
|
||||
ptr = data[c] + block_offset;
|
||||
if ( 8*(h * mb_x + x) < s->width
|
||||
&& 8*(v * mb_y + y) < s->height) {
|
||||
ptr = data[c] + block_offset;
|
||||
} else
|
||||
ptr = NULL;
|
||||
if (!s->progressive) {
|
||||
if (copy_mb)
|
||||
mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
|
||||
linesize[c], s->avctx->lowres);
|
||||
if (copy_mb) {
|
||||
if (ptr)
|
||||
mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
|
||||
linesize[c], s->avctx->lowres);
|
||||
|
||||
else {
|
||||
} else {
|
||||
s->dsp.clear_block(s->block);
|
||||
if (decode_block(s, s->block, i,
|
||||
s->dc_index[i], s->ac_index[i],
|
||||
@@ -1195,9 +1203,11 @@ static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
|
||||
"error y=%d x=%d\n", mb_y, mb_x);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
s->dsp.idct_put(ptr, linesize[c], s->block);
|
||||
if (s->bits & 7)
|
||||
shift_output(s, ptr, linesize[c]);
|
||||
if (ptr) {
|
||||
s->dsp.idct_put(ptr, linesize[c], s->block);
|
||||
if (s->bits & 7)
|
||||
shift_output(s, ptr, linesize[c]);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
int block_idx = s->block_stride[c] * (v * mb_y + y) +
|
||||
@@ -1835,6 +1845,10 @@ int ff_mjpeg_find_marker(MJpegDecodeContext *s,
|
||||
put_bits(&pb, 8, x);
|
||||
if (x == 0xFF) {
|
||||
x = src[b++];
|
||||
if (x & 0x80) {
|
||||
av_log(s->avctx, AV_LOG_WARNING, "Invalid escape sequence\n");
|
||||
x &= 0x7f;
|
||||
}
|
||||
put_bits(&pb, 7, x);
|
||||
bit_count--;
|
||||
}
|
||||
|
@@ -925,7 +925,7 @@ extern const uint8_t ff_aic_dc_scale_table[32];
|
||||
extern const uint8_t ff_h263_chroma_qscale_table[32];
|
||||
|
||||
/* rv10.c */
|
||||
void ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number);
|
||||
int ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number);
|
||||
int ff_rv_decode_dc(MpegEncContext *s, int n);
|
||||
void ff_rv20_encode_picture_header(MpegEncContext *s, int picture_number);
|
||||
|
||||
|
@@ -348,18 +348,18 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
|
||||
switch(avctx->codec_id) {
|
||||
case AV_CODEC_ID_MPEG1VIDEO:
|
||||
case AV_CODEC_ID_MPEG2VIDEO:
|
||||
avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112L / 15000000 * 16384;
|
||||
avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
|
||||
break;
|
||||
case AV_CODEC_ID_MPEG4:
|
||||
case AV_CODEC_ID_MSMPEG4V1:
|
||||
case AV_CODEC_ID_MSMPEG4V2:
|
||||
case AV_CODEC_ID_MSMPEG4V3:
|
||||
if (avctx->rc_max_rate >= 15000000) {
|
||||
avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000L) * (760-320) / (38400000 - 15000000);
|
||||
avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
|
||||
} else if(avctx->rc_max_rate >= 2000000) {
|
||||
avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000L) * (320- 80) / (15000000 - 2000000);
|
||||
avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
|
||||
} else if(avctx->rc_max_rate >= 384000) {
|
||||
avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000L) * ( 80- 40) / ( 2000000 - 384000);
|
||||
avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
|
||||
} else
|
||||
avctx->rc_buffer_size = 40;
|
||||
avctx->rc_buffer_size *= 16384;
|
||||
@@ -3583,8 +3583,11 @@ static int encode_picture(MpegEncContext *s, int picture_number)
|
||||
ff_msmpeg4_encode_picture_header(s, picture_number);
|
||||
else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
|
||||
ff_mpeg4_encode_picture_header(s, picture_number);
|
||||
else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10)
|
||||
ff_rv10_encode_picture_header(s, picture_number);
|
||||
else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
|
||||
ret = ff_rv10_encode_picture_header(s, picture_number);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
|
||||
ff_rv20_encode_picture_header(s, picture_number);
|
||||
else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
|
||||
|
@@ -301,7 +301,7 @@ static void encode_block(NellyMoserEncodeContext *s, unsigned char *output, int
|
||||
|
||||
apply_mdct(s);
|
||||
|
||||
init_put_bits(&pb, output, output_size * 8);
|
||||
init_put_bits(&pb, output, output_size);
|
||||
|
||||
i = 0;
|
||||
for (band = 0; band < NELLY_BANDS; band++) {
|
||||
|
@@ -91,7 +91,6 @@ static const AVOption avcodec_options[] = {
|
||||
{"hex", "hex motion estimation", 0, AV_OPT_TYPE_CONST, {.i64 = ME_HEX }, INT_MIN, INT_MAX, V|E, "me_method" },
|
||||
{"umh", "umh motion estimation", 0, AV_OPT_TYPE_CONST, {.i64 = ME_UMH }, INT_MIN, INT_MAX, V|E, "me_method" },
|
||||
{"iter", "iter motion estimation", 0, AV_OPT_TYPE_CONST, {.i64 = ME_ITER }, INT_MIN, INT_MAX, V|E, "me_method" },
|
||||
{"extradata_size", NULL, OFFSET(extradata_size), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
|
||||
{"time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, {.dbl = 0}, INT_MIN, INT_MAX},
|
||||
{"g", "set the group of picture (GOP) size", OFFSET(gop_size), AV_OPT_TYPE_INT, {.i64 = 12 }, INT_MIN, INT_MAX, V|E},
|
||||
{"ar", "set audio sampling rate (in Hz)", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX, A|D|E},
|
||||
|
@@ -304,7 +304,7 @@ static int encode_slice_plane(AVCodecContext *avctx, int mb_count,
|
||||
}
|
||||
|
||||
blocks_per_slice = mb_count << (2 - chroma);
|
||||
init_put_bits(&pb, buf, buf_size << 3);
|
||||
init_put_bits(&pb, buf, buf_size);
|
||||
|
||||
encode_dc_coeffs(&pb, blocks, blocks_per_slice, qmat);
|
||||
encode_ac_coeffs(avctx, &pb, blocks, blocks_per_slice, qmat);
|
||||
|
@@ -1058,7 +1058,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
slice_hdr = pkt->data + (slice_hdr - start);
|
||||
tmp = pkt->data + (tmp - start);
|
||||
}
|
||||
init_put_bits(&pb, buf, (pkt_size - (buf - orig_buf)) * 8);
|
||||
init_put_bits(&pb, buf, (pkt_size - (buf - orig_buf)));
|
||||
ret = encode_slice(avctx, pic, &pb, sizes, x, y, q,
|
||||
mbs_per_slice);
|
||||
if (ret < 0)
|
||||
|
@@ -960,6 +960,8 @@ static av_cold int roq_encode_init(AVCodecContext *avctx)
|
||||
|
||||
av_lfg_init(&enc->randctx, 1);
|
||||
|
||||
enc->avctx = avctx;
|
||||
|
||||
enc->framesSinceKeyframe = 0;
|
||||
if ((avctx->width & 0xf) || (avctx->height & 0xf)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Dimensions must be divisible by 16\n");
|
||||
|
@@ -28,7 +28,7 @@
|
||||
#include "mpegvideo.h"
|
||||
#include "put_bits.h"
|
||||
|
||||
void ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number)
|
||||
int ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number)
|
||||
{
|
||||
int full_frame= 0;
|
||||
|
||||
@@ -48,12 +48,18 @@ void ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number)
|
||||
/* if multiple packets per frame are sent, the position at which
|
||||
to display the macroblocks is coded here */
|
||||
if(!full_frame){
|
||||
if (s->mb_width * s->mb_height >= (1U << 12)) {
|
||||
avpriv_report_missing_feature(s->avctx, "Encoding frames with %d (>= 4096) macroblocks",
|
||||
s->mb_width * s->mb_height);
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
put_bits(&s->pb, 6, 0); /* mb_x */
|
||||
put_bits(&s->pb, 6, 0); /* mb_y */
|
||||
put_bits(&s->pb, 12, s->mb_width * s->mb_height);
|
||||
}
|
||||
|
||||
put_bits(&s->pb, 3, 0); /* ignored */
|
||||
return 0;
|
||||
}
|
||||
|
||||
FF_MPV_GENERIC_CLASS(rv10)
|
||||
|
@@ -81,7 +81,7 @@ static int s302m_encode2_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
return ret;
|
||||
|
||||
o = avpkt->data;
|
||||
init_put_bits(&pb, o, buf_size * 8);
|
||||
init_put_bits(&pb, o, buf_size);
|
||||
put_bits(&pb, 16, buf_size - AES3_HEADER_LEN);
|
||||
put_bits(&pb, 2, (avctx->channels - 2) >> 1); // number of channels
|
||||
put_bits(&pb, 8, 0); // channel ID
|
||||
|
@@ -153,7 +153,7 @@ static int decode_q_branch(SnowContext *s, int level, int x, int y){
|
||||
int l = left->color[0];
|
||||
int cb= left->color[1];
|
||||
int cr= left->color[2];
|
||||
int ref = 0;
|
||||
unsigned ref = 0;
|
||||
int ref_context= av_log2(2*left->ref) + av_log2(2*top->ref);
|
||||
int mx_context= av_log2(2*FFABS(left->mx - top->mx)) + 0*av_log2(2*FFABS(tr->mx - top->mx));
|
||||
int my_context= av_log2(2*FFABS(left->my - top->my)) + 0*av_log2(2*FFABS(tr->my - top->my));
|
||||
|
@@ -499,6 +499,14 @@ static int init_image(TiffContext *s, ThreadFrame *frame)
|
||||
int i, ret;
|
||||
uint32_t *pal;
|
||||
|
||||
// make sure there is no aliasing in the following switch
|
||||
if (s->bpp >= 100 || s->bppcount >= 10) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"Unsupported image parameters: bpp=%d, bppcount=%d\n",
|
||||
s->bpp, s->bppcount);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
switch (s->planar * 1000 + s->bpp * 10 + s->bppcount) {
|
||||
case 11:
|
||||
if (!s->palette_is_set) {
|
||||
@@ -628,13 +636,6 @@ static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
|
||||
s->bpp = -1;
|
||||
}
|
||||
}
|
||||
if (s->bpp > 64U) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"This format is not supported (bpp=%d, %d components)\n",
|
||||
s->bpp, count);
|
||||
s->bpp = 0;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
break;
|
||||
case TIFF_SAMPLES_PER_PIXEL:
|
||||
if (count != 1) {
|
||||
@@ -926,6 +927,13 @@ static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
|
||||
}
|
||||
}
|
||||
end:
|
||||
if (s->bpp > 64U) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"This format is not supported (bpp=%d, %d components)\n",
|
||||
s->bpp, count);
|
||||
s->bpp = 0;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
bytestream2_seek(&s->gb, start, SEEK_SET);
|
||||
return 0;
|
||||
}
|
||||
|
@@ -375,7 +375,7 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
|
||||
case AV_PIX_FMT_YUVJ411P:
|
||||
case AV_PIX_FMT_UYYVYY411:
|
||||
w_align = 32;
|
||||
h_align = 8;
|
||||
h_align = 16 * 2;
|
||||
break;
|
||||
case AV_PIX_FMT_YUV410P:
|
||||
if (s->codec_id == AV_CODEC_ID_SVQ1) {
|
||||
|
@@ -278,7 +278,8 @@ static int vp9_alloc_frame(AVCodecContext *ctx, VP9Frame *f)
|
||||
|
||||
// retain segmentation map if it doesn't update
|
||||
if (s->segmentation.enabled && !s->segmentation.update_map &&
|
||||
!s->intraonly && !s->keyframe) {
|
||||
!s->intraonly && !s->keyframe && !s->errorres &&
|
||||
ctx->active_thread_type != FF_THREAD_FRAME) {
|
||||
memcpy(f->segmentation_map, s->frames[LAST_FRAME].segmentation_map, sz);
|
||||
}
|
||||
|
||||
@@ -1344,16 +1345,29 @@ static void decode_mode(AVCodecContext *ctx)
|
||||
vp56_rac_get_prob_branchy(&s->c,
|
||||
s->prob.segpred[s->above_segpred_ctx[col] +
|
||||
s->left_segpred_ctx[row7]]))) {
|
||||
int pred = 8, x;
|
||||
uint8_t *refsegmap = s->frames[LAST_FRAME].segmentation_map;
|
||||
if (!s->errorres) {
|
||||
int pred = 8, x;
|
||||
uint8_t *refsegmap = s->frames[LAST_FRAME].segmentation_map;
|
||||
|
||||
if (!s->last_uses_2pass)
|
||||
ff_thread_await_progress(&s->frames[LAST_FRAME].tf, row >> 3, 0);
|
||||
for (y = 0; y < h4; y++)
|
||||
for (x = 0; x < w4; x++)
|
||||
pred = FFMIN(pred, refsegmap[(y + row) * 8 * s->sb_cols + x + col]);
|
||||
av_assert1(pred < 8);
|
||||
b->seg_id = pred;
|
||||
if (!s->last_uses_2pass)
|
||||
ff_thread_await_progress(&s->frames[LAST_FRAME].tf, row >> 3, 0);
|
||||
for (y = 0; y < h4; y++) {
|
||||
int idx_base = (y + row) * 8 * s->sb_cols + col;
|
||||
for (x = 0; x < w4; x++)
|
||||
pred = FFMIN(pred, refsegmap[idx_base + x]);
|
||||
if (!s->segmentation.update_map && ctx->active_thread_type == FF_THREAD_FRAME) {
|
||||
// FIXME maybe retain reference to previous frame as
|
||||
// segmap reference instead of copying the whole map
|
||||
// into a new buffer
|
||||
memcpy(&s->frames[CUR_FRAME].segmentation_map[idx_base],
|
||||
&refsegmap[idx_base], w4);
|
||||
}
|
||||
}
|
||||
av_assert1(pred < 8);
|
||||
b->seg_id = pred;
|
||||
} else {
|
||||
b->seg_id = 0;
|
||||
}
|
||||
|
||||
memset(&s->above_segpred_ctx[col], 1, w4);
|
||||
memset(&s->left_segpred_ctx[row7], 1, h4);
|
||||
@@ -3742,7 +3756,7 @@ static int vp9_decode_frame(AVCodecContext *ctx, void *frame,
|
||||
if ((res = av_frame_ref(frame, s->refs[ref].f)) < 0)
|
||||
return res;
|
||||
*got_frame = 1;
|
||||
return 0;
|
||||
return pkt->size;
|
||||
}
|
||||
data += res;
|
||||
size -= res;
|
||||
@@ -3952,7 +3966,7 @@ static int vp9_decode_frame(AVCodecContext *ctx, void *frame,
|
||||
*got_frame = 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return pkt->size;
|
||||
}
|
||||
|
||||
static void vp9_decode_flush(AVCodecContext *ctx)
|
||||
|
@@ -690,6 +690,11 @@ static int decode_entropy_coded_image(WebPContext *s, enum ImageRole role,
|
||||
length = offset + get_bits(&s->gb, extra_bits) + 1;
|
||||
}
|
||||
prefix_code = huff_reader_get_symbol(&hg[HUFF_IDX_DIST], &s->gb);
|
||||
if (prefix_code > 39) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"distance prefix code too large: %d\n", prefix_code);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (prefix_code < 4) {
|
||||
distance = prefix_code + 1;
|
||||
} else {
|
||||
@@ -1078,7 +1083,7 @@ static int vp8_lossless_decode_frame(AVCodecContext *avctx, AVFrame *p,
|
||||
unsigned int data_size, int is_alpha_chunk)
|
||||
{
|
||||
WebPContext *s = avctx->priv_data;
|
||||
int w, h, ret, i;
|
||||
int w, h, ret, i, used;
|
||||
|
||||
if (!is_alpha_chunk) {
|
||||
s->lossless = 1;
|
||||
@@ -1128,8 +1133,16 @@ static int vp8_lossless_decode_frame(AVCodecContext *avctx, AVFrame *p,
|
||||
/* parse transformations */
|
||||
s->nb_transforms = 0;
|
||||
s->reduced_width = 0;
|
||||
used = 0;
|
||||
while (get_bits1(&s->gb)) {
|
||||
enum TransformType transform = get_bits(&s->gb, 2);
|
||||
if (used & (1 << transform)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Transform %d used more than once\n",
|
||||
transform);
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
goto free_and_return;
|
||||
}
|
||||
used |= (1 << transform);
|
||||
s->transforms[s->nb_transforms++] = transform;
|
||||
switch (transform) {
|
||||
case PREDICTOR_TRANSFORM:
|
||||
|
@@ -132,8 +132,8 @@ static void mlp_filter_channel_x86(int32_t *state, const int32_t *coeff,
|
||||
FIRMUL (ff_mlp_firorder_6, 0x14 )
|
||||
FIRMUL (ff_mlp_firorder_5, 0x10 )
|
||||
FIRMUL (ff_mlp_firorder_4, 0x0c )
|
||||
FIRMULREG(ff_mlp_firorder_3, 0x08,10)
|
||||
FIRMULREG(ff_mlp_firorder_2, 0x04, 9)
|
||||
FIRMUL (ff_mlp_firorder_3, 0x08 )
|
||||
FIRMUL (ff_mlp_firorder_2, 0x04 )
|
||||
FIRMULREG(ff_mlp_firorder_1, 0x00, 8)
|
||||
LABEL_MANGLE(ff_mlp_firorder_0)":\n\t"
|
||||
"jmp *%6 \n\t"
|
||||
@@ -162,8 +162,6 @@ static void mlp_filter_channel_x86(int32_t *state, const int32_t *coeff,
|
||||
: /* 4*/"r"((x86_reg)mask), /* 5*/"r"(firjump),
|
||||
/* 6*/"r"(iirjump) , /* 7*/"c"(filter_shift)
|
||||
, /* 8*/"r"((int64_t)coeff[0])
|
||||
, /* 9*/"r"((int64_t)coeff[1])
|
||||
, /*10*/"r"((int64_t)coeff[2])
|
||||
: "rax", "rdx", "rsi"
|
||||
#else /* ARCH_X86_32 */
|
||||
/* 3*/"+m"(blocksize)
|
||||
|
@@ -410,11 +410,16 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
|
||||
int hi_ver, lo_ver, ret;
|
||||
|
||||
/* parse header */
|
||||
if (len < 1)
|
||||
return AVERROR_INVALIDDATA;
|
||||
c->flags = buf[0];
|
||||
buf++; len--;
|
||||
if (c->flags & ZMBV_KEYFRAME) {
|
||||
void *decode_intra = NULL;
|
||||
c->decode_intra= NULL;
|
||||
|
||||
if (len < 6)
|
||||
return AVERROR_INVALIDDATA;
|
||||
hi_ver = buf[0];
|
||||
lo_ver = buf[1];
|
||||
c->comp = buf[2];
|
||||
|
@@ -41,6 +41,11 @@ static int adx_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
AVCodecContext *avctx = s->streams[0]->codec;
|
||||
int ret, size;
|
||||
|
||||
if (avctx->channels <= 0) {
|
||||
av_log(s, AV_LOG_ERROR, "invalid number of channels %d\n", avctx->channels);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
size = BLOCK_SIZE * avctx->channels;
|
||||
|
||||
pkt->pos = avio_tell(s->pb);
|
||||
|
@@ -1472,7 +1472,7 @@ static int asf_build_simple_index(AVFormatContext *s, int stream_index)
|
||||
ff_asf_guid g;
|
||||
ASFContext *asf = s->priv_data;
|
||||
int64_t current_pos = avio_tell(s->pb);
|
||||
int ret = 0;
|
||||
int64_t ret;
|
||||
|
||||
if((ret = avio_seek(s->pb, asf->data_object_offset + asf->data_object_size, SEEK_SET)) < 0) {
|
||||
return ret;
|
||||
@@ -1542,7 +1542,7 @@ static int asf_read_seek(AVFormatContext *s, int stream_index,
|
||||
|
||||
/* Try using the protocol's read_seek if available */
|
||||
if (s->pb) {
|
||||
int ret = avio_seek_time(s->pb, stream_index, pts, flags);
|
||||
int64_t ret = avio_seek_time(s->pb, stream_index, pts, flags);
|
||||
if (ret >= 0)
|
||||
asf_reset_header(s);
|
||||
if (ret != AVERROR(ENOSYS))
|
||||
|
@@ -119,8 +119,12 @@ static int write_header(AVFormatContext *s)
|
||||
{
|
||||
AVCodecContext *enc = s->streams[0]->codec;
|
||||
|
||||
enc->codec_id = AV_CODEC_ID_G729;
|
||||
enc->channels = 1;
|
||||
if ((enc->codec_id != AV_CODEC_ID_G729) || enc->channels != 1) {
|
||||
av_log(s, AV_LOG_ERROR,
|
||||
"only codec g729 with 1 channel is supported by this format\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
enc->bits_per_coded_sample = 16;
|
||||
enc->block_align = (enc->bits_per_coded_sample * enc->channels) >> 3;
|
||||
|
||||
@@ -133,6 +137,9 @@ static int write_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
GetBitContext gb;
|
||||
int i;
|
||||
|
||||
if (pkt->size != 10)
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
avio_wl16(pb, SYNC_WORD);
|
||||
avio_wl16(pb, 8 * 10);
|
||||
|
||||
|
@@ -79,6 +79,7 @@ static int ffm_read_data(AVFormatContext *s,
|
||||
FFMContext *ffm = s->priv_data;
|
||||
AVIOContext *pb = s->pb;
|
||||
int len, fill_size, size1, frame_offset, id;
|
||||
int64_t last_pos = -1;
|
||||
|
||||
size1 = size;
|
||||
while (size > 0) {
|
||||
@@ -98,9 +99,11 @@ static int ffm_read_data(AVFormatContext *s,
|
||||
avio_seek(pb, tell, SEEK_SET);
|
||||
}
|
||||
id = avio_rb16(pb); /* PACKET_ID */
|
||||
if (id != PACKET_ID)
|
||||
if (id != PACKET_ID) {
|
||||
if (ffm_resync(s, id) < 0)
|
||||
return -1;
|
||||
last_pos = avio_tell(pb);
|
||||
}
|
||||
fill_size = avio_rb16(pb);
|
||||
ffm->dts = avio_rb64(pb);
|
||||
frame_offset = avio_rb16(pb);
|
||||
@@ -114,7 +117,9 @@ static int ffm_read_data(AVFormatContext *s,
|
||||
if (!frame_offset) {
|
||||
/* This packet has no frame headers in it */
|
||||
if (avio_tell(pb) >= ffm->packet_size * 3LL) {
|
||||
avio_seek(pb, -ffm->packet_size * 2LL, SEEK_CUR);
|
||||
int64_t seekback = FFMIN(ffm->packet_size * 2LL, avio_tell(pb) - last_pos);
|
||||
seekback = FFMAX(seekback, 0);
|
||||
avio_seek(pb, -seekback, SEEK_CUR);
|
||||
goto retry_read;
|
||||
}
|
||||
/* This is bad, we cannot find a valid frame header */
|
||||
@@ -291,6 +296,11 @@ static int ffm2_read_header(AVFormatContext *s)
|
||||
case MKBETAG('S', 'T', 'V', 'I'):
|
||||
codec->time_base.num = avio_rb32(pb);
|
||||
codec->time_base.den = avio_rb32(pb);
|
||||
if (codec->time_base.num <= 0 || codec->time_base.den <= 0) {
|
||||
av_log(s, AV_LOG_ERROR, "Invalid time base %d/%d\n",
|
||||
codec->time_base.num, codec->time_base.den);
|
||||
goto fail;
|
||||
}
|
||||
codec->width = avio_rb16(pb);
|
||||
codec->height = avio_rb16(pb);
|
||||
codec->gop_size = avio_rb16(pb);
|
||||
@@ -346,7 +356,7 @@ static int ffm2_read_header(AVFormatContext *s)
|
||||
}
|
||||
|
||||
/* get until end of block reached */
|
||||
while ((avio_tell(pb) % ffm->packet_size) != 0)
|
||||
while ((avio_tell(pb) % ffm->packet_size) != 0 && !pb->eof_reached)
|
||||
avio_r8(pb);
|
||||
|
||||
/* init packet demux */
|
||||
@@ -415,6 +425,11 @@ static int ffm_read_header(AVFormatContext *s)
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
codec->time_base.num = avio_rb32(pb);
|
||||
codec->time_base.den = avio_rb32(pb);
|
||||
if (codec->time_base.num <= 0 || codec->time_base.den <= 0) {
|
||||
av_log(s, AV_LOG_ERROR, "Invalid time base %d/%d\n",
|
||||
codec->time_base.num, codec->time_base.den);
|
||||
goto fail;
|
||||
}
|
||||
codec->width = avio_rb16(pb);
|
||||
codec->height = avio_rb16(pb);
|
||||
codec->gop_size = avio_rb16(pb);
|
||||
@@ -473,7 +488,7 @@ static int ffm_read_header(AVFormatContext *s)
|
||||
}
|
||||
|
||||
/* get until end of block reached */
|
||||
while ((avio_tell(pb) % ffm->packet_size) != 0)
|
||||
while ((avio_tell(pb) % ffm->packet_size) != 0 && !pb->eof_reached)
|
||||
avio_r8(pb);
|
||||
|
||||
/* init packet demux */
|
||||
|
@@ -488,7 +488,7 @@ static int flv_write_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
avio_w8(pb, FLV_TAG_TYPE_VIDEO);
|
||||
|
||||
flags = enc->codec_tag;
|
||||
if (flags == 0) {
|
||||
if (flags <= 0 || flags > 15) {
|
||||
av_log(s, AV_LOG_ERROR,
|
||||
"Video codec '%s' is not compatible with FLV\n",
|
||||
avcodec_get_name(enc->codec_id));
|
||||
|
@@ -556,7 +556,7 @@ static int gxf_packet(AVFormatContext *s, AVPacket *pkt) {
|
||||
}
|
||||
|
||||
static int gxf_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) {
|
||||
int res = 0;
|
||||
int64_t res = 0;
|
||||
uint64_t pos;
|
||||
uint64_t maxlen = 100 * 1024 * 1024;
|
||||
AVStream *st = s->streams[0];
|
||||
|
@@ -359,7 +359,7 @@ static int idcin_read_seek(AVFormatContext *s, int stream_index,
|
||||
IdcinDemuxContext *idcin = s->priv_data;
|
||||
|
||||
if (idcin->first_pkt_pos > 0) {
|
||||
int ret = avio_seek(s->pb, idcin->first_pkt_pos, SEEK_SET);
|
||||
int64_t ret = avio_seek(s->pb, idcin->first_pkt_pos, SEEK_SET);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ff_update_cur_dts(s, s->streams[idcin->video_stream_index], 0);
|
||||
|
@@ -1907,8 +1907,8 @@ static int matroska_read_header(AVFormatContext *s)
|
||||
av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
|
||||
1000000000, track->default_duration, 30000);
|
||||
#if FF_API_R_FRAME_RATE
|
||||
if ( st->avg_frame_rate.num < st->avg_frame_rate.den * 1000L
|
||||
&& st->avg_frame_rate.num > st->avg_frame_rate.den * 5L)
|
||||
if ( st->avg_frame_rate.num < st->avg_frame_rate.den * 1000LL
|
||||
&& st->avg_frame_rate.num > st->avg_frame_rate.den * 5LL)
|
||||
st->r_frame_rate = st->avg_frame_rate;
|
||||
#endif
|
||||
}
|
||||
@@ -2822,7 +2822,7 @@ static int matroska_read_seek(AVFormatContext *s, int stream_index,
|
||||
int64_t timestamp, int flags)
|
||||
{
|
||||
MatroskaDemuxContext *matroska = s->priv_data;
|
||||
MatroskaTrack *tracks = matroska->tracks.elem;
|
||||
MatroskaTrack *tracks = NULL;
|
||||
AVStream *st = s->streams[stream_index];
|
||||
int i, index, index_sub, index_min;
|
||||
|
||||
@@ -2851,6 +2851,7 @@ static int matroska_read_seek(AVFormatContext *s, int stream_index,
|
||||
goto err;
|
||||
|
||||
index_min = index;
|
||||
tracks = matroska->tracks.elem;
|
||||
for (i=0; i < matroska->tracks.nb_elem; i++) {
|
||||
tracks[i].audio.pkt_cnt = 0;
|
||||
tracks[i].audio.sub_packet_cnt = 0;
|
||||
|
@@ -2316,7 +2316,7 @@ static int mov_open_dref(AVIOContext **pb, const char *src, MOVDref *ref,
|
||||
/* try relative path, we do not try the absolute because it can leak information about our
|
||||
system to an attacker */
|
||||
if (ref->nlvl_to > 0 && ref->nlvl_from > 0) {
|
||||
char filename[1024];
|
||||
char filename[1025];
|
||||
const char *src_path;
|
||||
int i, l;
|
||||
|
||||
@@ -2342,10 +2342,12 @@ static int mov_open_dref(AVIOContext **pb, const char *src, MOVDref *ref,
|
||||
filename[src_path - src] = 0;
|
||||
|
||||
for (i = 1; i < ref->nlvl_from; i++)
|
||||
av_strlcat(filename, "../", 1024);
|
||||
av_strlcat(filename, "../", sizeof(filename));
|
||||
|
||||
av_strlcat(filename, ref->path + l + 1, 1024);
|
||||
av_strlcat(filename, ref->path + l + 1, sizeof(filename));
|
||||
|
||||
if (strlen(filename) + 1 == sizeof(filename))
|
||||
return AVERROR(ENOENT);
|
||||
if (!avio_open2(pb, filename, AVIO_FLAG_READ, int_cb, NULL))
|
||||
return 0;
|
||||
}
|
||||
|
@@ -57,7 +57,7 @@ typedef struct {
|
||||
|
||||
static inline int64_t bs_get_v(const uint8_t **bs)
|
||||
{
|
||||
int64_t v = 0;
|
||||
uint64_t v = 0;
|
||||
int br = 0;
|
||||
int c;
|
||||
|
||||
@@ -91,7 +91,7 @@ static int mpc8_probe(AVProbeData *p)
|
||||
size = bs_get_v(&bs);
|
||||
if (size < 2)
|
||||
return 0;
|
||||
if (bs + size - 2 >= bs_end)
|
||||
if (size >= bs_end - bs + 2)
|
||||
return AVPROBE_SCORE_EXTENSION - 1; // seems to be valid MPC but no header yet
|
||||
if (header_found) {
|
||||
if (size < 11 || size > 28)
|
||||
@@ -108,7 +108,7 @@ static int mpc8_probe(AVProbeData *p)
|
||||
|
||||
static inline int64_t gb_get_v(GetBitContext *gb)
|
||||
{
|
||||
int64_t v = 0;
|
||||
uint64_t v = 0;
|
||||
int bits = 0;
|
||||
while(get_bits1(gb) && bits < 64-7){
|
||||
v <<= 7;
|
||||
@@ -223,6 +223,10 @@ static int mpc8_read_header(AVFormatContext *s)
|
||||
while(!url_feof(pb)){
|
||||
pos = avio_tell(pb);
|
||||
mpc8_get_chunk_header(pb, &tag, &size);
|
||||
if (size < 0) {
|
||||
av_log(s, AV_LOG_ERROR, "Invalid chunk length\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if(tag == TAG_STREAMHDR)
|
||||
break;
|
||||
mpc8_handle_chunk(s, tag, pos, size);
|
||||
|
@@ -371,7 +371,7 @@ static int mv_read_packet(AVFormatContext *avctx, AVPacket *pkt)
|
||||
AVStream *st = avctx->streams[mv->stream_index];
|
||||
const AVIndexEntry *index;
|
||||
int frame = mv->frame[mv->stream_index];
|
||||
int ret;
|
||||
int64_t ret;
|
||||
uint64_t pos;
|
||||
|
||||
if (frame < st->nb_index_entries) {
|
||||
|
@@ -172,7 +172,7 @@ static int nprobe(AVFormatContext *s, uint8_t *enc_header, unsigned size,
|
||||
taglen = AV_RB32(&enc_header[pos + 32]);
|
||||
datalen = AV_RB32(&enc_header[pos + 36]) >> 4;
|
||||
|
||||
pos += 44L + taglen;
|
||||
pos += 44LL + taglen;
|
||||
|
||||
if (pos + (((uint64_t)datalen) << 4) > size)
|
||||
return -1;
|
||||
@@ -462,7 +462,7 @@ static int oma_read_seek(struct AVFormatContext *s,
|
||||
int stream_index, int64_t timestamp, int flags)
|
||||
{
|
||||
OMAContext *oc = s->priv_data;
|
||||
int err = ff_pcm_read_seek(s, stream_index, timestamp, flags);
|
||||
int64_t err = ff_pcm_read_seek(s, stream_index, timestamp, flags);
|
||||
|
||||
if (!oc->encrypted)
|
||||
return err;
|
||||
|
@@ -44,6 +44,10 @@ typedef struct {
|
||||
|
||||
/* in ms */
|
||||
#define BUFFER_DURATION 0
|
||||
/* the header needs at most 7 + 4 + 12 B */
|
||||
#define MAX_HEADER_SIZE (7 + 4 + 12)
|
||||
/* UINT16_MAX is the maximal chunk size */
|
||||
#define MAX_PACKET_SIZE (UINT16_MAX - MAX_HEADER_SIZE)
|
||||
|
||||
|
||||
static void put_str(AVIOContext *s, const char *tag)
|
||||
@@ -391,6 +395,10 @@ static int rm_write_video(AVFormatContext *s, const uint8_t *buf, int size, int
|
||||
/* Well, I spent some time finding the meaning of these bits. I am
|
||||
not sure I understood everything, but it works !! */
|
||||
#if 1
|
||||
if (size > MAX_PACKET_SIZE) {
|
||||
av_log(s, AV_LOG_ERROR, "Muxing packets larger than 64 kB (%d) is not supported\n", size);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
write_packet_header(s, stream, size + 7 + (size >= 0x4000)*4, key_frame);
|
||||
/* bit 7: '1' if final packet of a frame converted in several packets */
|
||||
avio_w8(pb, 0x81);
|
||||
|
@@ -317,7 +317,7 @@ static int smacker_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
int err;
|
||||
|
||||
size = avio_rl32(s->pb) - 4;
|
||||
if (!size || size + 4L > frame_size) {
|
||||
if (!size || size + 4LL > frame_size) {
|
||||
av_log(s, AV_LOG_ERROR, "Invalid audio part size\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
@@ -184,6 +184,8 @@ static int thp_read_packet(AVFormatContext *s,
|
||||
pkt->stream_index = thp->video_stream_index;
|
||||
} else {
|
||||
ret = av_get_packet(pb, pkt, thp->audiosize);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (ret != thp->audiosize) {
|
||||
av_free_packet(pkt);
|
||||
return AVERROR(EIO);
|
||||
|
@@ -118,8 +118,10 @@ static int tta_read_header(AVFormatContext *s)
|
||||
ffio_init_checksum(s->pb, tta_check_crc, UINT32_MAX);
|
||||
for (i = 0; i < c->totalframes; i++) {
|
||||
uint32_t size = avio_rl32(s->pb);
|
||||
av_add_index_entry(st, framepos, i * c->frame_size, size, 0,
|
||||
AVINDEX_KEYFRAME);
|
||||
int r;
|
||||
if ((r = av_add_index_entry(st, framepos, i * c->frame_size, size, 0,
|
||||
AVINDEX_KEYFRAME)) < 0)
|
||||
return r;
|
||||
framepos += size;
|
||||
}
|
||||
crc = ffio_get_checksum(s->pb) ^ UINT32_MAX;
|
||||
@@ -153,6 +155,11 @@ static int tta_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
if (c->currentframe >= c->totalframes)
|
||||
return AVERROR_EOF;
|
||||
|
||||
if (st->nb_index_entries < c->totalframes) {
|
||||
av_log(s, AV_LOG_ERROR, "Index entry disappeared\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
size = st->index_entries[c->currentframe].size;
|
||||
|
||||
ret = av_get_packet(s->pb, pkt, size);
|
||||
|
@@ -2847,8 +2847,8 @@ static int get_std_framerate(int i)
|
||||
* And there are "variable" fps files this needs to detect as well. */
|
||||
static int tb_unreliable(AVCodecContext *c)
|
||||
{
|
||||
if (c->time_base.den >= 101L * c->time_base.num ||
|
||||
c->time_base.den < 5L * c->time_base.num ||
|
||||
if (c->time_base.den >= 101LL * c->time_base.num ||
|
||||
c->time_base.den < 5LL * c->time_base.num ||
|
||||
// c->codec_tag == AV_RL32("DIVX") ||
|
||||
// c->codec_tag == AV_RL32("XVID") ||
|
||||
c->codec_tag == AV_RL32("mp4v") ||
|
||||
|
@@ -265,7 +265,7 @@ static int vqf_read_seek(AVFormatContext *s,
|
||||
{
|
||||
VqfContext *c = s->priv_data;
|
||||
AVStream *st;
|
||||
int ret;
|
||||
int64_t ret;
|
||||
int64_t pos;
|
||||
|
||||
st = s->streams[stream_index];
|
||||
|
@@ -43,11 +43,17 @@
|
||||
#elif HAVE_ARMV5TE
|
||||
.arch armv5te
|
||||
#endif
|
||||
#if HAVE_AS_OBJECT_ARCH
|
||||
ELF .object_arch armv4
|
||||
#endif
|
||||
|
||||
#if HAVE_NEON
|
||||
.fpu neon
|
||||
ELF .eabi_attribute 10, 0 @ suppress Tag_FP_arch
|
||||
ELF .eabi_attribute 12, 0 @ suppress Tag_Advanced_SIMD_arch
|
||||
#elif HAVE_VFP
|
||||
.fpu vfp
|
||||
ELF .eabi_attribute 10, 0 @ suppress Tag_FP_arch
|
||||
#endif
|
||||
|
||||
.syntax unified
|
||||
|
@@ -75,8 +75,8 @@ static int read_number(const AVOption *o, void *dst, double *num, int *den, int6
|
||||
{
|
||||
switch (o->type) {
|
||||
case AV_OPT_TYPE_FLAGS: *intnum = *(unsigned int*)dst;return 0;
|
||||
case AV_OPT_TYPE_PIXEL_FMT:
|
||||
case AV_OPT_TYPE_SAMPLE_FMT:
|
||||
case AV_OPT_TYPE_PIXEL_FMT: *intnum = *(enum AVPixelFormat *)dst;return 0;
|
||||
case AV_OPT_TYPE_SAMPLE_FMT:*intnum = *(enum AVSampleFormat*)dst;return 0;
|
||||
case AV_OPT_TYPE_INT: *intnum = *(int *)dst;return 0;
|
||||
case AV_OPT_TYPE_CHANNEL_LAYOUT:
|
||||
case AV_OPT_TYPE_DURATION:
|
||||
@@ -110,9 +110,9 @@ static int write_number(void *obj, const AVOption *o, void *dst, double num, int
|
||||
}
|
||||
|
||||
switch (o->type) {
|
||||
case AV_OPT_TYPE_PIXEL_FMT: *(enum AVPixelFormat *)dst = llrint(num/den) * intnum; break;
|
||||
case AV_OPT_TYPE_SAMPLE_FMT:*(enum AVSampleFormat*)dst = llrint(num/den) * intnum; break;
|
||||
case AV_OPT_TYPE_FLAGS:
|
||||
case AV_OPT_TYPE_PIXEL_FMT:
|
||||
case AV_OPT_TYPE_SAMPLE_FMT:
|
||||
case AV_OPT_TYPE_INT: *(int *)dst= llrint(num/den)*intnum; break;
|
||||
case AV_OPT_TYPE_DURATION:
|
||||
case AV_OPT_TYPE_CHANNEL_LAYOUT:
|
||||
|
@@ -84,14 +84,14 @@ int swri_dither_init(SwrContext *s, enum AVSampleFormat out_fmt, enum AVSampleFo
|
||||
in_fmt = av_get_packed_sample_fmt( in_fmt);
|
||||
|
||||
if(in_fmt == AV_SAMPLE_FMT_FLT || in_fmt == AV_SAMPLE_FMT_DBL){
|
||||
if(out_fmt == AV_SAMPLE_FMT_S32) scale = 1.0/(1L<<31);
|
||||
if(out_fmt == AV_SAMPLE_FMT_S16) scale = 1.0/(1L<<15);
|
||||
if(out_fmt == AV_SAMPLE_FMT_U8 ) scale = 1.0/(1L<< 7);
|
||||
if(out_fmt == AV_SAMPLE_FMT_S32) scale = 1.0/(1LL<<31);
|
||||
if(out_fmt == AV_SAMPLE_FMT_S16) scale = 1.0/(1LL<<15);
|
||||
if(out_fmt == AV_SAMPLE_FMT_U8 ) scale = 1.0/(1LL<< 7);
|
||||
}
|
||||
if(in_fmt == AV_SAMPLE_FMT_S32 && out_fmt == AV_SAMPLE_FMT_S32 && (s->dither.output_sample_bits&31)) scale = 1;
|
||||
if(in_fmt == AV_SAMPLE_FMT_S32 && out_fmt == AV_SAMPLE_FMT_S16) scale = 1L<<16;
|
||||
if(in_fmt == AV_SAMPLE_FMT_S32 && out_fmt == AV_SAMPLE_FMT_U8 ) scale = 1L<<24;
|
||||
if(in_fmt == AV_SAMPLE_FMT_S16 && out_fmt == AV_SAMPLE_FMT_U8 ) scale = 1L<<8;
|
||||
if(in_fmt == AV_SAMPLE_FMT_S32 && out_fmt == AV_SAMPLE_FMT_S16) scale = 1<<16;
|
||||
if(in_fmt == AV_SAMPLE_FMT_S32 && out_fmt == AV_SAMPLE_FMT_U8 ) scale = 1<<24;
|
||||
if(in_fmt == AV_SAMPLE_FMT_S16 && out_fmt == AV_SAMPLE_FMT_U8 ) scale = 1<<8;
|
||||
|
||||
scale *= s->dither.scale;
|
||||
|
||||
|
@@ -618,14 +618,25 @@ static av_cold int initFilter(int16_t **outFilter, int32_t **filterPos,
|
||||
}
|
||||
|
||||
if ((*filterPos)[i] + filterSize > srcW) {
|
||||
int shift = (*filterPos)[i] + filterSize - srcW;
|
||||
// move filter coefficients right to compensate for filterPos
|
||||
for (j = filterSize - 2; j >= 0; j--) {
|
||||
int right = FFMIN(j + shift, filterSize - 1);
|
||||
filter[i * filterSize + right] += filter[i * filterSize + j];
|
||||
filter[i * filterSize + j] = 0;
|
||||
int shift = (*filterPos)[i] + FFMIN(filterSize - srcW, 0);
|
||||
int64_t acc = 0;
|
||||
|
||||
for (j = filterSize - 1; j >= 0; j--) {
|
||||
if ((*filterPos)[i] + j >= srcW) {
|
||||
acc += filter[i * filterSize + j];
|
||||
filter[i * filterSize + j] = 0;
|
||||
}
|
||||
}
|
||||
(*filterPos)[i]= srcW - filterSize;
|
||||
for (j = filterSize - 1; j >= 0; j--) {
|
||||
if (j < shift) {
|
||||
filter[i * filterSize + j] = 0;
|
||||
} else {
|
||||
filter[i * filterSize + j] = filter[i * filterSize + j - shift];
|
||||
}
|
||||
}
|
||||
|
||||
(*filterPos)[i]-= shift;
|
||||
filter[i * filterSize + srcW - 1 - (*filterPos)[i]] += acc;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1328,7 +1339,7 @@ av_cold int sws_init_context(SwsContext *c, SwsFilter *srcFilter,
|
||||
c->chrDstW = FF_CEIL_RSHIFT(dstW, c->chrDstHSubSample);
|
||||
c->chrDstH = FF_CEIL_RSHIFT(dstH, c->chrDstVSubSample);
|
||||
|
||||
FF_ALLOC_OR_GOTO(c, c->formatConvBuffer, FFALIGN(srcW*2+78, 16) * 2, fail);
|
||||
FF_ALLOCZ_OR_GOTO(c, c->formatConvBuffer, FFALIGN(srcW*2+78, 16) * 2, fail);
|
||||
|
||||
/* unscaled special cases */
|
||||
if (unscaled && !usesHFilter && !usesVFilter &&
|
||||
|
@@ -35,15 +35,15 @@
|
||||
0, 330, 330, 1, 4295, 0xf71b0b38, S=1, 1024, 0xf351799f
|
||||
0, 340, 340, 1, 2044, 0x5adcb93b, S=1, 1024, 0xf351799f
|
||||
0, 350, 350, 1, 3212, 0xcf79eeed, S=1, 1024, 0xf351799f
|
||||
0, 360, 360, 1, 2281, 0x68464d30, S=1, 1024, 0xf351799f
|
||||
0, 360, 360, 1, 2292, 0xb4386334, S=1, 1024, 0xf351799f
|
||||
0, 370, 370, 1, 3633, 0x0010992f, S=1, 1024, 0xf351799f
|
||||
0, 380, 380, 1, 3552, 0x23697490, S=1, 1024, 0xf351799f
|
||||
0, 390, 390, 1, 3690, 0x62afdbb8, S=1, 1024, 0xf351799f
|
||||
0, 400, 400, 1, 1558, 0x7a13e53b, S=1, 1024, 0xf351799f
|
||||
0, 410, 410, 1, 940, 0xb1b6cba2, S=1, 1024, 0xf351799f
|
||||
0, 400, 400, 1, 1559, 0x5baef54a, S=1, 1024, 0xf351799f
|
||||
0, 410, 410, 1, 954, 0xca75ca79, S=1, 1024, 0xf351799f
|
||||
0, 420, 420, 1, 273, 0x3687799b, S=1, 1024, 0xf351799f
|
||||
0, 430, 430, 1, 930, 0x29f3b0c4, S=1, 1024, 0xf351799f
|
||||
0, 440, 440, 1, 271, 0xe7af807c, S=1, 1024, 0xf351799f
|
||||
0, 440, 440, 1, 271, 0x305e8094, S=1, 1024, 0xf351799f
|
||||
0, 450, 450, 1, 196, 0xf5ab51ee, S=1, 1024, 0xf351799f
|
||||
0, 460, 460, 1, 4299, 0x67ec0d55, S=1, 1024, 0xf351799f
|
||||
0, 470, 470, 1, 4895, 0xb394406c, S=1, 1024, 0xf351799f
|
||||
@@ -56,7 +56,7 @@
|
||||
0, 540, 540, 1, 5179, 0x860fc6a1, S=1, 1024, 0xf351799f
|
||||
0, 550, 550, 1, 5046, 0xce9183d3, S=1, 1024, 0xf351799f
|
||||
0, 560, 560, 1, 5140, 0xa6d7b9af, S=1, 1024, 0xf351799f
|
||||
0, 570, 570, 1, 4289, 0xb415f717, S=1, 1024, 0xf351799f
|
||||
0, 570, 570, 1, 4301, 0x03b6ef3f, S=1, 1024, 0xf351799f
|
||||
0, 580, 580, 1, 5079, 0xa8d59e01, S=1, 1024, 0xf351799f
|
||||
0, 590, 590, 1, 5284, 0xea34e3b3, S=1, 1024, 0xf351799f
|
||||
0, 600, 600, 1, 5426, 0x556a15cd, S=1, 1024, 0xf351799f
|
||||
|
@@ -35,15 +35,15 @@
|
||||
0, 330, 330, 1, 4295, 0xc1850a80, S=1, 1024, 0xcfc8799f
|
||||
0, 340, 340, 1, 2044, 0x0440c072, S=1, 1024, 0xcfc8799f
|
||||
0, 350, 350, 1, 3212, 0xe91af08f, S=1, 1024, 0xcfc8799f
|
||||
0, 360, 360, 1, 2281, 0x6a414aa1, S=1, 1024, 0xcfc8799f
|
||||
0, 360, 360, 1, 2292, 0x6765633e, S=1, 1024, 0xcfc8799f
|
||||
0, 370, 370, 1, 3633, 0xac779aa3, S=1, 1024, 0xcfc8799f
|
||||
0, 380, 380, 1, 3552, 0xed2c75b2, S=1, 1024, 0xcfc8799f
|
||||
0, 390, 390, 1, 3690, 0x2020dd0d, S=1, 1024, 0xcfc8799f
|
||||
0, 400, 400, 1, 1558, 0x2c14e4b2, S=1, 1024, 0xcfc8799f
|
||||
0, 410, 410, 1, 940, 0x4927cd90, S=1, 1024, 0xcfc8799f
|
||||
0, 400, 400, 1, 1559, 0x596ef330, S=1, 1024, 0xcfc8799f
|
||||
0, 410, 410, 1, 954, 0xac12c9c5, S=1, 1024, 0xcfc8799f
|
||||
0, 420, 420, 1, 273, 0x138c7831, S=1, 1024, 0xcfc8799f
|
||||
0, 430, 430, 1, 930, 0xf1c3ae3f, S=1, 1024, 0xcfc8799f
|
||||
0, 440, 440, 1, 271, 0x6d338044, S=1, 1024, 0xcfc8799f
|
||||
0, 440, 440, 1, 271, 0x921a80af, S=1, 1024, 0xcfc8799f
|
||||
0, 450, 450, 1, 196, 0xa5de5322, S=1, 1024, 0xcfc8799f
|
||||
0, 460, 460, 1, 4299, 0x5bac0d86, S=1, 1024, 0xcfc8799f
|
||||
0, 470, 470, 1, 4895, 0xc43639a6, S=1, 1024, 0xcfc8799f
|
||||
@@ -56,7 +56,7 @@
|
||||
0, 540, 540, 1, 5179, 0x97aac3a1, S=1, 1024, 0xcfc8799f
|
||||
0, 550, 550, 1, 5046, 0x836a80cd, S=1, 1024, 0xcfc8799f
|
||||
0, 560, 560, 1, 5140, 0xa725c1e7, S=1, 1024, 0xcfc8799f
|
||||
0, 570, 570, 1, 4289, 0x7b3afbc0, S=1, 1024, 0xcfc8799f
|
||||
0, 570, 570, 1, 4301, 0x0203f239, S=1, 1024, 0xcfc8799f
|
||||
0, 580, 580, 1, 5079, 0xb2e7a2de, S=1, 1024, 0xcfc8799f
|
||||
0, 590, 590, 1, 5284, 0xb757dfe1, S=1, 1024, 0xcfc8799f
|
||||
0, 600, 600, 1, 5426, 0xf9f11e57, S=1, 1024, 0xcfc8799f
|
||||
|
Reference in New Issue
Block a user