Compare commits
136 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
377fabc9e6 | ||
![]() |
41eda87048 | ||
![]() |
e6ac11e417 | ||
![]() |
2cac35086c | ||
![]() |
af343f5cdd | ||
![]() |
391e0fc6c9 | ||
![]() |
caeca53a09 | ||
![]() |
760929117d | ||
![]() |
acada70ffb | ||
![]() |
4f91c45644 | ||
![]() |
e4831bb9a6 | ||
![]() |
db5b454c3d | ||
![]() |
301761792a | ||
![]() |
440e98574b | ||
![]() |
604d72aa0d | ||
![]() |
03ddc26066 | ||
![]() |
801eff785a | ||
![]() |
b59ee5dcf1 | ||
![]() |
e163d884ef | ||
![]() |
56cc629a64 | ||
![]() |
685321e4bd | ||
![]() |
3f1a58db6f | ||
![]() |
597d709eb4 | ||
![]() |
dd0c5e0fa9 | ||
![]() |
ad02537746 | ||
![]() |
3bc9cfe66e | ||
![]() |
910c1f2352 | ||
![]() |
55065315ca | ||
![]() |
8081879655 | ||
![]() |
a39c6bf1b8 | ||
![]() |
884a9b0d29 | ||
![]() |
4457e6137d | ||
![]() |
08d9fd611e | ||
![]() |
5fa739e685 | ||
![]() |
b143844ea0 | ||
![]() |
10ff052c60 | ||
![]() |
4ede95e69c | ||
![]() |
ce8910d861 | ||
![]() |
3d0c9c9af6 | ||
![]() |
f3f22f183f | ||
![]() |
bfbff1c748 | ||
![]() |
7fd7950174 | ||
![]() |
700fb8c8dd | ||
![]() |
9f80712454 | ||
![]() |
fe9cbf582b | ||
![]() |
642d758a2d | ||
![]() |
aa45b90804 | ||
![]() |
549b8083d6 | ||
![]() |
ec6719f655 | ||
![]() |
11ecd8574a | ||
![]() |
5754176b5b | ||
![]() |
fb3189ce8b | ||
![]() |
8168a7cec9 | ||
![]() |
562d6fd5b5 | ||
![]() |
dd14723602 | ||
![]() |
9474c93028 | ||
![]() |
7e070cf202 | ||
![]() |
1b48a426a9 | ||
![]() |
e3e369f696 | ||
![]() |
6996a2f796 | ||
![]() |
05f5a2eb62 | ||
![]() |
4a636a5e43 | ||
![]() |
44da556815 | ||
![]() |
aa097b4d5f | ||
![]() |
8148833193 | ||
![]() |
3c0f84402b | ||
![]() |
601fa56582 | ||
![]() |
c0df6a24ce | ||
![]() |
2d63f9b4ef | ||
![]() |
4c849c6991 | ||
![]() |
42c3a3719b | ||
![]() |
7a0ff7566b | ||
![]() |
10c244cc89 | ||
![]() |
99008ba366 | ||
![]() |
a81c1ea2eb | ||
![]() |
0892a6340f | ||
![]() |
d3e2f35f7a | ||
![]() |
e39fc137ae | ||
![]() |
a2ae183a38 | ||
![]() |
80b8dc30dc | ||
![]() |
7b91e52eb9 | ||
![]() |
e28814e0e1 | ||
![]() |
d6e250abfc | ||
![]() |
61ece41372 | ||
![]() |
b6c5848a1f | ||
![]() |
b6ba39f931 | ||
![]() |
77d43bf42d | ||
![]() |
899d95efe1 | ||
![]() |
8812b5f164 | ||
![]() |
f31170d4e7 | ||
![]() |
0173a7966b | ||
![]() |
a60eb6ef12 | ||
![]() |
8582e6e9a3 | ||
![]() |
9a5e81235e | ||
![]() |
c497d71a02 | ||
![]() |
0054d70f23 | ||
![]() |
b102d5d97d | ||
![]() |
858c3158b5 | ||
![]() |
5e87fa347c | ||
![]() |
6a441ee78e | ||
![]() |
316589e1db | ||
![]() |
35bf5f7966 | ||
![]() |
89409be50c | ||
![]() |
a4bf9033c3 | ||
![]() |
8502b4aef6 | ||
![]() |
03e404740e | ||
![]() |
688da036b1 | ||
![]() |
c761e144f6 | ||
![]() |
b3e5c8de6a | ||
![]() |
ee6c1670df | ||
![]() |
9e4a68a76c | ||
![]() |
25594f0018 | ||
![]() |
a85c3fff37 | ||
![]() |
0f5840b51a | ||
![]() |
1285fe5530 | ||
![]() |
0aefcb6aa8 | ||
![]() |
64bc5f3bf7 | ||
![]() |
b61e311b0e | ||
![]() |
ee66a7198e | ||
![]() |
50336dc4f1 | ||
![]() |
269dbc5359 | ||
![]() |
850298ef25 | ||
![]() |
628b82294a | ||
![]() |
75d8cccf0e | ||
![]() |
d87997b56f | ||
![]() |
b15e85d820 | ||
![]() |
654b24f68a | ||
![]() |
2f2fd8c6d1 | ||
![]() |
c5f7c755cf | ||
![]() |
b581580bd1 | ||
![]() |
3313f31f01 | ||
![]() |
c71c77e56f | ||
![]() |
08c81f7365 | ||
![]() |
50073e2395 | ||
![]() |
3fc967f6c7 | ||
![]() |
26ac878cc2 |
2
Doxyfile
2
Doxyfile
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
||||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER = 0.8.11
|
||||
PROJECT_NUMBER = 0.8.13
|
||||
|
||||
# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
|
||||
# base path where the generated documentation will be put.
|
||||
|
@@ -57,7 +57,7 @@ AVFormatContext *avformat_opts;
|
||||
struct SwsContext *sws_opts;
|
||||
AVDictionary *format_opts, *video_opts, *audio_opts, *sub_opts;
|
||||
|
||||
static const int this_year = 2011;
|
||||
static const int this_year = 2013;
|
||||
|
||||
void init_opts(void)
|
||||
{
|
||||
|
4
configure
vendored
4
configure
vendored
@@ -1057,6 +1057,7 @@ HAVE_LIST="
|
||||
dlfcn_h
|
||||
dlopen
|
||||
dos_paths
|
||||
dxva_h
|
||||
ebp_available
|
||||
ebx_available
|
||||
exp2
|
||||
@@ -2378,7 +2379,7 @@ check_host_cflags -std=c99
|
||||
check_host_cflags -Wall
|
||||
|
||||
case "$arch" in
|
||||
alpha|ia64|mips|parisc|sparc)
|
||||
alpha|ia64|mips|parisc|ppc|sparc)
|
||||
spic=$shared
|
||||
;;
|
||||
x86)
|
||||
@@ -2859,6 +2860,7 @@ check_func_headers windows.h MapViewOfFile
|
||||
check_func_headers windows.h VirtualAlloc
|
||||
|
||||
check_header dlfcn.h
|
||||
check_header dxva.h
|
||||
check_header dxva2api.h
|
||||
check_header libcrystalhd/libcrystalhd_if.h
|
||||
check_header malloc.h
|
||||
|
@@ -299,6 +299,10 @@ prefix is ``ffmpeg2pass''. The complete file name will be
|
||||
@file{PREFIX-N.log}, where N is a number specific to the output
|
||||
stream.
|
||||
|
||||
Note that this option is overwritten by a local option of the same name
|
||||
when using @code{-vcodec libx264}. That option maps to the x264 option stats
|
||||
which has a different syntax.
|
||||
|
||||
@item -newvideo
|
||||
Add a new video stream to the current output stream.
|
||||
|
||||
|
12
ffmpeg.c
12
ffmpeg.c
@@ -313,6 +313,7 @@ typedef struct AVOutputStream {
|
||||
#endif
|
||||
|
||||
int sws_flags;
|
||||
char *forced_key_frames;
|
||||
} AVOutputStream;
|
||||
|
||||
static AVOutputStream **output_streams_for_file[MAX_FILES] = { NULL };
|
||||
@@ -2336,6 +2337,9 @@ static int transcode(AVFormatContext **output_files,
|
||||
"Please consider specifiying a lower framerate, a different muxer or -vsync 2\n");
|
||||
}
|
||||
|
||||
if (ost->forced_key_frames)
|
||||
parse_forced_key_frames(ost->forced_key_frames, ost, codec);
|
||||
|
||||
#if CONFIG_AVFILTER
|
||||
if (configure_video_filters(ist, ost)) {
|
||||
fprintf(stderr, "Error opening filters!\n");
|
||||
@@ -2857,6 +2861,7 @@ static int transcode(AVFormatContext **output_files,
|
||||
av_freep(&ost->st->codec->subtitle_header);
|
||||
av_free(ost->resample_frame.data[0]);
|
||||
av_free(ost->forced_kf_pts);
|
||||
av_free(ost->forced_key_frames);
|
||||
if (ost->video_resample)
|
||||
sws_freeContext(ost->img_resample_ctx);
|
||||
if (ost->resample)
|
||||
@@ -3655,8 +3660,10 @@ static void new_video_stream(AVFormatContext *oc, int file_idx)
|
||||
}
|
||||
}
|
||||
|
||||
if (forced_key_frames)
|
||||
parse_forced_key_frames(forced_key_frames, ost, video_enc);
|
||||
if (forced_key_frames) {
|
||||
ost->forced_key_frames = forced_key_frames;
|
||||
forced_key_frames = NULL;
|
||||
}
|
||||
}
|
||||
if (video_language) {
|
||||
av_dict_set(&st->metadata, "language", video_language, 0);
|
||||
@@ -3666,7 +3673,6 @@ static void new_video_stream(AVFormatContext *oc, int file_idx)
|
||||
/* reset some key parameters */
|
||||
video_disable = 0;
|
||||
av_freep(&video_codec_name);
|
||||
av_freep(&forced_key_frames);
|
||||
video_stream_copy = 0;
|
||||
frame_pix_fmt = PIX_FMT_NONE;
|
||||
}
|
||||
|
@@ -44,7 +44,7 @@ typedef struct EightSvxContext {
|
||||
/* buffer used to store the whole audio decoded/interleaved chunk,
|
||||
* which is sent with the first packet */
|
||||
uint8_t *samples;
|
||||
size_t samples_size;
|
||||
int64_t samples_size;
|
||||
int samples_idx;
|
||||
} EightSvxContext;
|
||||
|
||||
|
@@ -568,6 +568,11 @@ static av_cold int aac_decode_init(AVCodecContext *avctx)
|
||||
output_scale_factor = 1.0;
|
||||
}
|
||||
|
||||
if (avctx->channels > MAX_CHANNELS) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Too many channels\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
AAC_INIT_VLC_STATIC( 0, 304);
|
||||
AAC_INIT_VLC_STATIC( 1, 270);
|
||||
AAC_INIT_VLC_STATIC( 2, 550);
|
||||
@@ -1694,7 +1699,7 @@ static void apply_tns(float coef[1024], TemporalNoiseShaping *tns,
|
||||
int w, filt, m, i;
|
||||
int bottom, top, order, start, end, size, inc;
|
||||
float lpc[TNS_MAX_ORDER];
|
||||
float tmp[TNS_MAX_ORDER];
|
||||
float tmp[TNS_MAX_ORDER + 1];
|
||||
|
||||
for (w = 0; w < ics->num_windows; w++) {
|
||||
bottom = ics->num_swb;
|
||||
|
@@ -1183,14 +1183,15 @@ static void sbr_qmf_synthesis(DSPContext *dsp, FFTContext *mdct,
|
||||
{
|
||||
int i, n;
|
||||
const float *sbr_qmf_window = div ? sbr_qmf_window_ds : sbr_qmf_window_us;
|
||||
const int step = 128 >> div;
|
||||
float *v;
|
||||
for (i = 0; i < 32; i++) {
|
||||
if (*v_off < 128 >> div) {
|
||||
if (*v_off < step) {
|
||||
int saved_samples = (1280 - 128) >> div;
|
||||
memcpy(&v0[SBR_SYNTHESIS_BUF_SIZE - saved_samples], v0, saved_samples * sizeof(float));
|
||||
*v_off = SBR_SYNTHESIS_BUF_SIZE - saved_samples - (128 >> div);
|
||||
*v_off = SBR_SYNTHESIS_BUF_SIZE - saved_samples - step;
|
||||
} else {
|
||||
*v_off -= 128 >> div;
|
||||
*v_off -= step;
|
||||
}
|
||||
v = v0 + *v_off;
|
||||
if (div) {
|
||||
|
@@ -778,9 +778,13 @@ static int adpcm_encode_frame(AVCodecContext *avctx,
|
||||
static av_cold int adpcm_decode_init(AVCodecContext * avctx)
|
||||
{
|
||||
ADPCMContext *c = avctx->priv_data;
|
||||
unsigned int min_channels = 1;
|
||||
unsigned int max_channels = 2;
|
||||
|
||||
switch(avctx->codec->id) {
|
||||
case CODEC_ID_ADPCM_EA:
|
||||
min_channels = 2;
|
||||
break;
|
||||
case CODEC_ID_ADPCM_EA_R1:
|
||||
case CODEC_ID_ADPCM_EA_R2:
|
||||
case CODEC_ID_ADPCM_EA_R3:
|
||||
@@ -788,8 +792,10 @@ static av_cold int adpcm_decode_init(AVCodecContext * avctx)
|
||||
max_channels = 6;
|
||||
break;
|
||||
}
|
||||
if(avctx->channels > max_channels){
|
||||
return -1;
|
||||
|
||||
if (avctx->channels < min_channels || avctx->channels > max_channels) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
switch(avctx->codec->id) {
|
||||
|
@@ -664,10 +664,9 @@ static av_cold int alac_decode_init(AVCodecContext * avctx)
|
||||
alac->numchannels = alac->avctx->channels;
|
||||
|
||||
/* initialize from the extradata */
|
||||
if (alac->avctx->extradata_size != ALAC_EXTRADATA_SIZE) {
|
||||
av_log(avctx, AV_LOG_ERROR, "alac: expected %d extradata bytes\n",
|
||||
ALAC_EXTRADATA_SIZE);
|
||||
return -1;
|
||||
if (alac->avctx->extradata_size < ALAC_EXTRADATA_SIZE) {
|
||||
av_log(avctx, AV_LOG_ERROR, "alac: extradata is too small\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (alac_set_info(alac)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "alac: set_info failed\n");
|
||||
|
@@ -551,12 +551,15 @@ static void get_block_sizes(ALSDecContext *ctx, unsigned int *div_blocks,
|
||||
|
||||
/** Read the block data for a constant block
|
||||
*/
|
||||
static void read_const_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
static int read_const_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
{
|
||||
ALSSpecificConfig *sconf = &ctx->sconf;
|
||||
AVCodecContext *avctx = ctx->avctx;
|
||||
GetBitContext *gb = &ctx->gb;
|
||||
|
||||
if (bd->block_length <= 0)
|
||||
return -1;
|
||||
|
||||
*bd->raw_samples = 0;
|
||||
*bd->const_block = get_bits1(gb); // 1 = constant value, 0 = zero block (silence)
|
||||
bd->js_blocks = get_bits1(gb);
|
||||
@@ -571,6 +574,8 @@ static void read_const_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
|
||||
// ensure constant block decoding by reusing this field
|
||||
*bd->const_block = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -650,6 +655,11 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
for (k = 1; k < sub_blocks; k++)
|
||||
s[k] = s[k - 1] + decode_rice(gb, 0);
|
||||
}
|
||||
for (k = 1; k < sub_blocks; k++)
|
||||
if (s[k] > 32) {
|
||||
av_log(avctx, AV_LOG_ERROR, "k invalid for rice code.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (get_bits1(gb))
|
||||
*bd->shift_lsbs = get_bits(gb, 4) + 1;
|
||||
@@ -662,6 +672,11 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
int opt_order_length = av_ceil_log2(av_clip((bd->block_length >> 3) - 1,
|
||||
2, sconf->max_order + 1));
|
||||
*bd->opt_order = get_bits(gb, opt_order_length);
|
||||
if (*bd->opt_order > sconf->max_order) {
|
||||
*bd->opt_order = sconf->max_order;
|
||||
av_log(avctx, AV_LOG_ERROR, "Predictor order too large!\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
} else {
|
||||
*bd->opt_order = sconf->max_order;
|
||||
}
|
||||
@@ -694,6 +709,10 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
int rice_param = parcor_rice_table[sconf->coef_table][k][1];
|
||||
int offset = parcor_rice_table[sconf->coef_table][k][0];
|
||||
quant_cof[k] = decode_rice(gb, rice_param) + offset;
|
||||
if (quant_cof[k] < -64 || quant_cof[k] > 63) {
|
||||
av_log(avctx, AV_LOG_ERROR, "quant_cof %d is out of range\n", quant_cof[k]);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
|
||||
// read coefficients 20 to 126
|
||||
@@ -726,7 +745,7 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
bd->ltp_gain[0] = decode_rice(gb, 1) << 3;
|
||||
bd->ltp_gain[1] = decode_rice(gb, 2) << 3;
|
||||
|
||||
r = get_unary(gb, 0, 4);
|
||||
r = get_unary(gb, 0, 3);
|
||||
c = get_bits(gb, 2);
|
||||
bd->ltp_gain[2] = ltp_gain_values[r][c];
|
||||
|
||||
@@ -755,7 +774,6 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
int delta[8];
|
||||
unsigned int k [8];
|
||||
unsigned int b = av_clip((av_ceil_log2(bd->block_length) - 3) >> 1, 0, 5);
|
||||
unsigned int i = start;
|
||||
|
||||
// read most significant bits
|
||||
unsigned int high;
|
||||
@@ -766,29 +784,30 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
|
||||
current_res = bd->raw_samples + start;
|
||||
|
||||
for (sb = 0; sb < sub_blocks; sb++, i = 0) {
|
||||
for (sb = 0; sb < sub_blocks; sb++) {
|
||||
unsigned int sb_len = sb_length - (sb ? 0 : start);
|
||||
|
||||
k [sb] = s[sb] > b ? s[sb] - b : 0;
|
||||
delta[sb] = 5 - s[sb] + k[sb];
|
||||
|
||||
ff_bgmc_decode(gb, sb_length, current_res,
|
||||
ff_bgmc_decode(gb, sb_len, current_res,
|
||||
delta[sb], sx[sb], &high, &low, &value, ctx->bgmc_lut, ctx->bgmc_lut_status);
|
||||
|
||||
current_res += sb_length;
|
||||
current_res += sb_len;
|
||||
}
|
||||
|
||||
ff_bgmc_decode_end(gb);
|
||||
|
||||
|
||||
// read least significant bits and tails
|
||||
i = start;
|
||||
current_res = bd->raw_samples + start;
|
||||
|
||||
for (sb = 0; sb < sub_blocks; sb++, i = 0) {
|
||||
for (sb = 0; sb < sub_blocks; sb++, start = 0) {
|
||||
unsigned int cur_tail_code = tail_code[sx[sb]][delta[sb]];
|
||||
unsigned int cur_k = k[sb];
|
||||
unsigned int cur_s = s[sb];
|
||||
|
||||
for (; i < sb_length; i++) {
|
||||
for (; start < sb_length; start++) {
|
||||
int32_t res = *current_res;
|
||||
|
||||
if (res == cur_tail_code) {
|
||||
@@ -956,7 +975,8 @@ static int read_block(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
if (read_var_block_data(ctx, bd))
|
||||
return -1;
|
||||
} else {
|
||||
read_const_block_data(ctx, bd);
|
||||
if (read_const_block_data(ctx, bd) < 0)
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@@ -366,7 +366,7 @@ int ff_ass_split_override_codes(const ASSCodesCallbacks *callbacks, void *priv,
|
||||
char new_line[2];
|
||||
int text_len = 0;
|
||||
|
||||
while (*buf) {
|
||||
while (buf && *buf) {
|
||||
if (text && callbacks->text &&
|
||||
(sscanf(buf, "\\%1[nN]", new_line) == 1 ||
|
||||
!strncmp(buf, "{\\", 2))) {
|
||||
|
@@ -160,6 +160,7 @@ static av_cold int avs_decode_init(AVCodecContext * avctx)
|
||||
AvsContext *const avs = avctx->priv_data;
|
||||
avctx->pix_fmt = PIX_FMT_PAL8;
|
||||
avcodec_get_frame_defaults(&avs->picture);
|
||||
avcodec_set_dimensions(avctx, 318, 198);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -85,9 +85,9 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
frame_len_bits = 11;
|
||||
}
|
||||
|
||||
if (avctx->channels > MAX_CHANNELS) {
|
||||
av_log(avctx, AV_LOG_ERROR, "too many channels: %d\n", avctx->channels);
|
||||
return -1;
|
||||
if (avctx->channels < 1 || avctx->channels > MAX_CHANNELS) {
|
||||
av_log(avctx, AV_LOG_ERROR, "invalid number of channels: %d\n", avctx->channels);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (avctx->extradata && avctx->extradata_size > 0)
|
||||
|
@@ -219,9 +219,6 @@ static int bmp_decode_frame(AVCodecContext *avctx,
|
||||
if(comp == BMP_RLE4 || comp == BMP_RLE8)
|
||||
memset(p->data[0], 0, avctx->height * p->linesize[0]);
|
||||
|
||||
if(depth == 4 || depth == 8)
|
||||
memset(p->data[1], 0, 1024);
|
||||
|
||||
if(height > 0){
|
||||
ptr = p->data[0] + (avctx->height - 1) * p->linesize[0];
|
||||
linesize = -p->linesize[0];
|
||||
@@ -232,6 +229,9 @@ static int bmp_decode_frame(AVCodecContext *avctx,
|
||||
|
||||
if(avctx->pix_fmt == PIX_FMT_PAL8){
|
||||
int colors = 1 << depth;
|
||||
|
||||
memset(p->data[1], 0, 1024);
|
||||
|
||||
if(ihsize >= 36){
|
||||
int t;
|
||||
buf = buf0 + 46;
|
||||
|
@@ -26,6 +26,10 @@
|
||||
#include "libavutil/common.h"
|
||||
#include "libavutil/intreadwrite.h"
|
||||
|
||||
typedef struct {
|
||||
const uint8_t *buffer, *buffer_end;
|
||||
} GetByteContext;
|
||||
|
||||
#define DEF_T(type, name, bytes, read, write) \
|
||||
static av_always_inline type bytestream_get_ ## name(const uint8_t **b){\
|
||||
(*b) += bytes;\
|
||||
@@ -34,6 +38,18 @@ static av_always_inline type bytestream_get_ ## name(const uint8_t **b){\
|
||||
static av_always_inline void bytestream_put_ ##name(uint8_t **b, const type value){\
|
||||
write(*b, value);\
|
||||
(*b) += bytes;\
|
||||
}\
|
||||
static av_always_inline type bytestream2_get_ ## name(GetByteContext *g)\
|
||||
{\
|
||||
if (g->buffer_end - g->buffer < bytes)\
|
||||
return 0;\
|
||||
return bytestream_get_ ## name(&g->buffer);\
|
||||
}\
|
||||
static av_always_inline type bytestream2_peek_ ## name(GetByteContext *g)\
|
||||
{\
|
||||
if (g->buffer_end - g->buffer < bytes)\
|
||||
return 0;\
|
||||
return read(g->buffer);\
|
||||
}
|
||||
|
||||
#define DEF(name, bytes, read, write) \
|
||||
@@ -55,6 +71,34 @@ DEF (byte, 1, AV_RB8 , AV_WB8 )
|
||||
#undef DEF64
|
||||
#undef DEF_T
|
||||
|
||||
static av_always_inline void bytestream2_init(GetByteContext *g,
|
||||
const uint8_t *buf, int buf_size)
|
||||
{
|
||||
g->buffer = buf;
|
||||
g->buffer_end = buf + buf_size;
|
||||
}
|
||||
|
||||
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
|
||||
{
|
||||
return g->buffer_end - g->buffer;
|
||||
}
|
||||
|
||||
static av_always_inline void bytestream2_skip(GetByteContext *g,
|
||||
unsigned int size)
|
||||
{
|
||||
g->buffer += FFMIN(g->buffer_end - g->buffer, size);
|
||||
}
|
||||
|
||||
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g,
|
||||
uint8_t *dst,
|
||||
unsigned int size)
|
||||
{
|
||||
int size2 = FFMIN(g->buffer_end - g->buffer, size);
|
||||
memcpy(dst, g->buffer, size2);
|
||||
g->buffer += size2;
|
||||
return size2;
|
||||
}
|
||||
|
||||
static av_always_inline unsigned int bytestream_get_buffer(const uint8_t **b, uint8_t *dst, unsigned int size)
|
||||
{
|
||||
memcpy(dst, *b, size);
|
||||
|
@@ -609,12 +609,21 @@ static int decode_pic(AVSContext *h) {
|
||||
static int decode_seq_header(AVSContext *h) {
|
||||
MpegEncContext *s = &h->s;
|
||||
int frame_rate_code;
|
||||
int width, height;
|
||||
|
||||
h->profile = get_bits(&s->gb,8);
|
||||
h->level = get_bits(&s->gb,8);
|
||||
skip_bits1(&s->gb); //progressive sequence
|
||||
s->width = get_bits(&s->gb,14);
|
||||
s->height = get_bits(&s->gb,14);
|
||||
|
||||
width = get_bits(&s->gb, 14);
|
||||
height = get_bits(&s->gb, 14);
|
||||
if ((s->width || s->height) && (s->width != width || s->height != height)) {
|
||||
av_log_missing_feature(s, "Width/height changing in CAVS is", 0);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
s->width = width;
|
||||
s->height = height;
|
||||
|
||||
skip_bits(&s->gb,2); //chroma format
|
||||
skip_bits(&s->gb,3); //sample_precision
|
||||
h->aspect_ratio = get_bits(&s->gb,4);
|
||||
|
@@ -280,6 +280,10 @@ static int cdg_decode_frame(AVCodecContext *avctx,
|
||||
av_log(avctx, AV_LOG_ERROR, "buffer too small for decoder\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
if (buf_size > CDG_HEADER_SIZE + CDG_DATA_SIZE) {
|
||||
av_log(avctx, AV_LOG_ERROR, "buffer too big for decoder\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
ret = avctx->reget_buffer(avctx, &cc->frame);
|
||||
if (ret) {
|
||||
|
@@ -133,9 +133,8 @@ void ff_celp_lp_synthesis_filterf(float *out, const float *filter_coeffs,
|
||||
out2 -= val * old_out2;
|
||||
out3 -= val * old_out3;
|
||||
|
||||
old_out3 = out[-5];
|
||||
|
||||
for (i = 5; i <= filter_length; i += 2) {
|
||||
old_out3 = out[-i];
|
||||
val = filter_coeffs[i-1];
|
||||
|
||||
out0 -= val * old_out3;
|
||||
@@ -154,7 +153,6 @@ void ff_celp_lp_synthesis_filterf(float *out, const float *filter_coeffs,
|
||||
|
||||
FFSWAP(float, old_out0, old_out2);
|
||||
old_out1 = old_out3;
|
||||
old_out3 = out[-i-2];
|
||||
}
|
||||
|
||||
tmp0 = out0;
|
||||
|
@@ -23,6 +23,8 @@
|
||||
#include "avcodec.h"
|
||||
#include "libavutil/intreadwrite.h"
|
||||
#include "bytestream.h"
|
||||
|
||||
#include "libavutil/imgutils.h"
|
||||
#include "libavutil/lzo.h" // for av_memcpy_backptr
|
||||
|
||||
typedef struct DfaContext {
|
||||
@@ -35,9 +37,13 @@ typedef struct DfaContext {
|
||||
static av_cold int dfa_decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
DfaContext *s = avctx->priv_data;
|
||||
int ret;
|
||||
|
||||
avctx->pix_fmt = PIX_FMT_PAL8;
|
||||
|
||||
if ((ret = av_image_check_size(avctx->width, avctx->height, 0, avctx)) < 0)
|
||||
return ret;
|
||||
|
||||
s->frame_buf = av_mallocz(avctx->width * avctx->height + AV_LZO_OUTPUT_PADDING);
|
||||
if (!s->frame_buf)
|
||||
return AVERROR(ENOMEM);
|
||||
@@ -153,8 +159,7 @@ static int decode_dds1(uint8_t *frame, int width, int height,
|
||||
bitbuf = bytestream_get_le16(&src);
|
||||
mask = 1;
|
||||
}
|
||||
if (src_end - src < 2 || frame_end - frame < 2)
|
||||
return -1;
|
||||
|
||||
if (bitbuf & mask) {
|
||||
v = bytestream_get_le16(&src);
|
||||
offset = (v & 0x1FFF) << 2;
|
||||
@@ -168,8 +173,13 @@ static int decode_dds1(uint8_t *frame, int width, int height,
|
||||
frame += 2;
|
||||
}
|
||||
} else if (bitbuf & (mask << 1)) {
|
||||
frame += bytestream_get_le16(&src) * 2;
|
||||
v = bytestream_get_le16(&src)*2;
|
||||
if (frame - frame_end < v)
|
||||
return AVERROR_INVALIDDATA;
|
||||
frame += v;
|
||||
} else {
|
||||
if (frame_end - frame < width + 3)
|
||||
return AVERROR_INVALIDDATA;
|
||||
frame[0] = frame[1] =
|
||||
frame[width] = frame[width + 1] = *src++;
|
||||
frame += 2;
|
||||
@@ -231,6 +241,7 @@ static int decode_wdlt(uint8_t *frame, int width, int height,
|
||||
const uint8_t *frame_end = frame + width * height;
|
||||
uint8_t *line_ptr;
|
||||
int count, i, v, lines, segments;
|
||||
int y = 0;
|
||||
|
||||
lines = bytestream_get_le16(&src);
|
||||
if (lines > height || src >= src_end)
|
||||
@@ -239,10 +250,12 @@ static int decode_wdlt(uint8_t *frame, int width, int height,
|
||||
while (lines--) {
|
||||
segments = bytestream_get_le16(&src);
|
||||
while ((segments & 0xC000) == 0xC000) {
|
||||
unsigned skip_lines = -(int16_t)segments;
|
||||
unsigned delta = -((int16_t)segments * width);
|
||||
if (frame_end - frame <= delta)
|
||||
if (frame_end - frame <= delta || y + lines + skip_lines > height)
|
||||
return -1;
|
||||
frame += delta;
|
||||
y += skip_lines;
|
||||
segments = bytestream_get_le16(&src);
|
||||
}
|
||||
if (segments & 0x8000) {
|
||||
@@ -251,6 +264,7 @@ static int decode_wdlt(uint8_t *frame, int width, int height,
|
||||
}
|
||||
line_ptr = frame;
|
||||
frame += width;
|
||||
y++;
|
||||
while (segments--) {
|
||||
if (src_end - src < 2)
|
||||
return -1;
|
||||
|
@@ -169,6 +169,7 @@ static int dpcm_decode_frame(AVCodecContext *avctx,
|
||||
int in, out = 0;
|
||||
int predictor[2];
|
||||
int channel_number = 0;
|
||||
int stereo = s->channels - 1;
|
||||
short *output_samples = data;
|
||||
int shift[2];
|
||||
unsigned char byte;
|
||||
@@ -177,6 +178,9 @@ static int dpcm_decode_frame(AVCodecContext *avctx,
|
||||
if (!buf_size)
|
||||
return 0;
|
||||
|
||||
if (stereo && (buf_size & 1))
|
||||
buf_size--;
|
||||
|
||||
// almost every DPCM variant expands one byte of data into two
|
||||
if(*data_size/2 < buf_size)
|
||||
return -1;
|
||||
@@ -295,7 +299,7 @@ static int dpcm_decode_frame(AVCodecContext *avctx,
|
||||
}
|
||||
|
||||
*data_size = out * sizeof(short);
|
||||
return buf_size;
|
||||
return avpkt->size;
|
||||
}
|
||||
|
||||
#define DPCM_DECODER(id, name, long_name_) \
|
||||
|
@@ -25,7 +25,14 @@
|
||||
|
||||
#define _WIN32_WINNT 0x0600
|
||||
#define COBJMACROS
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#include "dxva2.h"
|
||||
#if HAVE_DXVA_H
|
||||
#include <dxva.h>
|
||||
#endif
|
||||
|
||||
#include "avcodec.h"
|
||||
#include "mpegvideo.h"
|
||||
|
||||
|
@@ -249,7 +249,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
int chunk_type;
|
||||
int inter;
|
||||
|
||||
if (buf_size < 17) {
|
||||
if (buf_size < 26) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Input buffer too small\n");
|
||||
*data_size = 0;
|
||||
return -1;
|
||||
|
@@ -59,12 +59,15 @@ static av_cold int tqi_decode_init(AVCodecContext *avctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tqi_decode_mb(MpegEncContext *s, DCTELEM (*block)[64])
|
||||
static int tqi_decode_mb(MpegEncContext *s, DCTELEM (*block)[64])
|
||||
{
|
||||
int n;
|
||||
s->dsp.clear_blocks(block[0]);
|
||||
for (n=0; n<6; n++)
|
||||
ff_mpeg1_decode_block_intra(s, block[n], n);
|
||||
if (ff_mpeg1_decode_block_intra(s, block[n], n) < 0)
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void tqi_idct_put(TqiContext *t, DCTELEM (*block)[64])
|
||||
@@ -136,7 +139,8 @@ static int tqi_decode_frame(AVCodecContext *avctx,
|
||||
for (s->mb_y=0; s->mb_y<(avctx->height+15)/16; s->mb_y++)
|
||||
for (s->mb_x=0; s->mb_x<(avctx->width+15)/16; s->mb_x++)
|
||||
{
|
||||
tqi_decode_mb(s, t->block);
|
||||
if (tqi_decode_mb(s, t->block) < 0)
|
||||
break;
|
||||
tqi_idct_put(t, t->block);
|
||||
}
|
||||
|
||||
|
@@ -937,14 +937,16 @@ static int encode_residual_ch(FlacEncodeContext *s, int ch)
|
||||
omethod == ORDER_METHOD_8LEVEL) {
|
||||
int levels = 1 << omethod;
|
||||
uint32_t bits[1 << ORDER_METHOD_8LEVEL];
|
||||
int order;
|
||||
int order = -1;
|
||||
int opt_index = levels-1;
|
||||
opt_order = max_order-1;
|
||||
bits[opt_index] = UINT32_MAX;
|
||||
for (i = levels-1; i >= 0; i--) {
|
||||
int last_order = order;
|
||||
order = min_order + (((max_order-min_order+1) * (i+1)) / levels)-1;
|
||||
if (order < 0)
|
||||
order = 0;
|
||||
order = av_clip(order, min_order - 1, max_order - 1);
|
||||
if (order == last_order)
|
||||
continue;
|
||||
encode_residual_lpc(res, smp, n, order+1, coefs[order], shift[order]);
|
||||
bits[i] = find_subframe_rice_params(s, sub, order+1);
|
||||
if (bits[i] < bits[opt_index]) {
|
||||
|
@@ -98,7 +98,7 @@ void ff_h263_update_motion_val(MpegEncContext * s){
|
||||
}
|
||||
}
|
||||
|
||||
int h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr)
|
||||
int ff_h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr)
|
||||
{
|
||||
int x, y, wrap, a, c, pred_dc;
|
||||
int16_t *dc_val;
|
||||
@@ -226,7 +226,7 @@ void ff_h263_loop_filter(MpegEncContext * s){
|
||||
}
|
||||
}
|
||||
|
||||
void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n)
|
||||
void ff_h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n)
|
||||
{
|
||||
int x, y, wrap, a, c, pred_dc, scale, i;
|
||||
int16_t *dc_val, *ac_val, *ac_val1;
|
||||
@@ -313,8 +313,8 @@ void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n)
|
||||
ac_val1[8 + i] = block[s->dsp.idct_permutation[i ]];
|
||||
}
|
||||
|
||||
int16_t *h263_pred_motion(MpegEncContext * s, int block, int dir,
|
||||
int *px, int *py)
|
||||
int16_t *ff_h263_pred_motion(MpegEncContext * s, int block, int dir,
|
||||
int *px, int *py)
|
||||
{
|
||||
int wrap;
|
||||
int16_t *A, *B, *C, (*mot_val)[2];
|
||||
|
@@ -38,16 +38,16 @@
|
||||
extern const AVRational ff_h263_pixel_aspect[16];
|
||||
extern const uint8_t ff_h263_cbpy_tab[16][2];
|
||||
|
||||
extern const uint8_t cbpc_b_tab[4][2];
|
||||
extern const uint8_t ff_cbpc_b_tab[4][2];
|
||||
|
||||
extern const uint8_t mvtab[33][2];
|
||||
extern const uint8_t ff_mvtab[33][2];
|
||||
|
||||
extern const uint8_t ff_h263_intra_MCBPC_code[9];
|
||||
extern const uint8_t ff_h263_intra_MCBPC_bits[9];
|
||||
|
||||
extern const uint8_t ff_h263_inter_MCBPC_code[28];
|
||||
extern const uint8_t ff_h263_inter_MCBPC_bits[28];
|
||||
extern const uint8_t h263_mbtype_b_tab[15][2];
|
||||
extern const uint8_t ff_h263_mbtype_b_tab[15][2];
|
||||
|
||||
extern VLC ff_h263_intra_MCBPC_vlc;
|
||||
extern VLC ff_h263_inter_MCBPC_vlc;
|
||||
@@ -55,41 +55,41 @@ extern VLC ff_h263_cbpy_vlc;
|
||||
|
||||
extern RLTable ff_h263_rl_inter;
|
||||
|
||||
extern RLTable rl_intra_aic;
|
||||
extern RLTable ff_rl_intra_aic;
|
||||
|
||||
extern const uint16_t h263_format[8][2];
|
||||
extern const uint8_t modified_quant_tab[2][32];
|
||||
extern const uint16_t ff_h263_format[8][2];
|
||||
extern const uint8_t ff_modified_quant_tab[2][32];
|
||||
extern uint16_t ff_mba_max[6];
|
||||
extern uint8_t ff_mba_length[7];
|
||||
|
||||
extern uint8_t ff_h263_static_rl_table_store[2][2][2*MAX_RUN + MAX_LEVEL + 3];
|
||||
|
||||
|
||||
int h263_decode_motion(MpegEncContext * s, int pred, int f_code);
|
||||
int ff_h263_decode_motion(MpegEncContext * s, int pred, int f_code);
|
||||
av_const int ff_h263_aspect_to_info(AVRational aspect);
|
||||
int ff_h263_decode_init(AVCodecContext *avctx);
|
||||
int ff_h263_decode_frame(AVCodecContext *avctx,
|
||||
void *data, int *data_size,
|
||||
AVPacket *avpkt);
|
||||
int ff_h263_decode_end(AVCodecContext *avctx);
|
||||
void h263_encode_mb(MpegEncContext *s,
|
||||
DCTELEM block[6][64],
|
||||
int motion_x, int motion_y);
|
||||
void h263_encode_picture_header(MpegEncContext *s, int picture_number);
|
||||
void h263_encode_gob_header(MpegEncContext * s, int mb_line);
|
||||
int16_t *h263_pred_motion(MpegEncContext * s, int block, int dir,
|
||||
int *px, int *py);
|
||||
void h263_encode_init(MpegEncContext *s);
|
||||
void h263_decode_init_vlc(MpegEncContext *s);
|
||||
int h263_decode_picture_header(MpegEncContext *s);
|
||||
void ff_h263_encode_mb(MpegEncContext *s,
|
||||
DCTELEM block[6][64],
|
||||
int motion_x, int motion_y);
|
||||
void ff_h263_encode_picture_header(MpegEncContext *s, int picture_number);
|
||||
void ff_h263_encode_gob_header(MpegEncContext * s, int mb_line);
|
||||
int16_t *ff_h263_pred_motion(MpegEncContext * s, int block, int dir,
|
||||
int *px, int *py);
|
||||
void ff_h263_encode_init(MpegEncContext *s);
|
||||
void ff_h263_decode_init_vlc(MpegEncContext *s);
|
||||
int ff_h263_decode_picture_header(MpegEncContext *s);
|
||||
int ff_h263_decode_gob_header(MpegEncContext *s);
|
||||
void ff_h263_update_motion_val(MpegEncContext * s);
|
||||
void ff_h263_loop_filter(MpegEncContext * s);
|
||||
int ff_h263_decode_mba(MpegEncContext *s);
|
||||
void ff_h263_encode_mba(MpegEncContext *s);
|
||||
void ff_init_qscale_tab(MpegEncContext *s);
|
||||
int h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr);
|
||||
void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n);
|
||||
int ff_h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr);
|
||||
void ff_h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n);
|
||||
|
||||
|
||||
/**
|
||||
@@ -119,7 +119,7 @@ static inline int h263_get_motion_length(MpegEncContext * s, int val, int f_code
|
||||
int l, bit_size, code;
|
||||
|
||||
if (val == 0) {
|
||||
return mvtab[0][1];
|
||||
return ff_mvtab[0][1];
|
||||
} else {
|
||||
bit_size = f_code - 1;
|
||||
/* modulo encoding */
|
||||
@@ -128,7 +128,7 @@ static inline int h263_get_motion_length(MpegEncContext * s, int val, int f_code
|
||||
val--;
|
||||
code = (val >> bit_size) + 1;
|
||||
|
||||
return mvtab[code][1] + 1 + bit_size;
|
||||
return ff_mvtab[code][1] + 1 + bit_size;
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -57,7 +57,7 @@ const uint8_t ff_h263_inter_MCBPC_bits[28] = {
|
||||
11, 13, 13, 13,/* inter4Q*/
|
||||
};
|
||||
|
||||
const uint8_t h263_mbtype_b_tab[15][2] = {
|
||||
const uint8_t ff_h263_mbtype_b_tab[15][2] = {
|
||||
{1, 1},
|
||||
{3, 3},
|
||||
{1, 5},
|
||||
@@ -75,7 +75,7 @@ const uint8_t h263_mbtype_b_tab[15][2] = {
|
||||
{1, 8},
|
||||
};
|
||||
|
||||
const uint8_t cbpc_b_tab[4][2] = {
|
||||
const uint8_t ff_cbpc_b_tab[4][2] = {
|
||||
{0, 1},
|
||||
{2, 2},
|
||||
{7, 3},
|
||||
@@ -88,7 +88,7 @@ const uint8_t ff_h263_cbpy_tab[16][2] =
|
||||
{2,5}, {3,6}, {5,4}, {10,4}, {4,4}, {8,4}, {6,4}, {3,2}
|
||||
};
|
||||
|
||||
const uint8_t mvtab[33][2] =
|
||||
const uint8_t ff_mvtab[33][2] =
|
||||
{
|
||||
{1,1}, {1,2}, {1,3}, {1,4}, {3,6}, {5,7}, {4,7}, {3,7},
|
||||
{11,9}, {10,9}, {9,9}, {17,10}, {16,10}, {15,10}, {14,10}, {13,10},
|
||||
@@ -98,7 +98,7 @@ const uint8_t mvtab[33][2] =
|
||||
};
|
||||
|
||||
/* third non intra table */
|
||||
const uint16_t inter_vlc[103][2] = {
|
||||
const uint16_t ff_inter_vlc[103][2] = {
|
||||
{ 0x2, 2 },{ 0xf, 4 },{ 0x15, 6 },{ 0x17, 7 },
|
||||
{ 0x1f, 8 },{ 0x25, 9 },{ 0x24, 9 },{ 0x21, 10 },
|
||||
{ 0x20, 10 },{ 0x7, 11 },{ 0x6, 11 },{ 0x20, 11 },
|
||||
@@ -127,7 +127,7 @@ const uint16_t inter_vlc[103][2] = {
|
||||
{ 0x5e, 12 },{ 0x5f, 12 },{ 0x3, 7 },
|
||||
};
|
||||
|
||||
const int8_t inter_level[102] = {
|
||||
const int8_t ff_inter_level[102] = {
|
||||
1, 2, 3, 4, 5, 6, 7, 8,
|
||||
9, 10, 11, 12, 1, 2, 3, 4,
|
||||
5, 6, 1, 2, 3, 4, 1, 2,
|
||||
@@ -143,7 +143,7 @@ const int8_t inter_level[102] = {
|
||||
1, 1, 1, 1, 1, 1,
|
||||
};
|
||||
|
||||
const int8_t inter_run[102] = {
|
||||
const int8_t ff_inter_run[102] = {
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 1, 1, 1, 1,
|
||||
1, 1, 2, 2, 2, 2, 3, 3,
|
||||
@@ -162,9 +162,9 @@ const int8_t inter_run[102] = {
|
||||
RLTable ff_h263_rl_inter = {
|
||||
102,
|
||||
58,
|
||||
inter_vlc,
|
||||
inter_run,
|
||||
inter_level,
|
||||
ff_inter_vlc,
|
||||
ff_inter_run,
|
||||
ff_inter_level,
|
||||
};
|
||||
|
||||
static const uint16_t intra_vlc_aic[103][2] = {
|
||||
@@ -228,7 +228,7 @@ static const int8_t intra_level_aic[102] = {
|
||||
1, 1, 1, 1, 1, 1,
|
||||
};
|
||||
|
||||
RLTable rl_intra_aic = {
|
||||
RLTable ff_rl_intra_aic = {
|
||||
102,
|
||||
58,
|
||||
intra_vlc_aic,
|
||||
@@ -236,7 +236,7 @@ RLTable rl_intra_aic = {
|
||||
intra_level_aic,
|
||||
};
|
||||
|
||||
const uint16_t h263_format[8][2] = {
|
||||
const uint16_t ff_h263_format[8][2] = {
|
||||
{ 0, 0 },
|
||||
{ 128, 96 },
|
||||
{ 176, 144 },
|
||||
@@ -250,7 +250,7 @@ const uint8_t ff_aic_dc_scale_table[32]={
|
||||
0, 2, 4, 6, 8,10,12,14,16,18,20,22,24,26,28,30,32,34,36,38,40,42,44,46,48,50,52,54,56,58,60,62
|
||||
};
|
||||
|
||||
const uint8_t modified_quant_tab[2][32]={
|
||||
const uint8_t ff_modified_quant_tab[2][32]={
|
||||
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
|
||||
{
|
||||
0, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9,10,11,12,13,14,15,16,17,18,18,19,20,21,22,23,24,25,26,27,28
|
||||
|
@@ -111,7 +111,7 @@ av_cold int ff_h263_decode_init(AVCodecContext *avctx)
|
||||
if (MPV_common_init(s) < 0)
|
||||
return -1;
|
||||
|
||||
h263_decode_init_vlc(s);
|
||||
ff_h263_decode_init_vlc(s);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -429,7 +429,7 @@ retry:
|
||||
} else if (CONFIG_FLV_DECODER && s->h263_flv) {
|
||||
ret = ff_flv_decode_picture_header(s);
|
||||
} else {
|
||||
ret = h263_decode_picture_header(s);
|
||||
ret = ff_h263_decode_picture_header(s);
|
||||
}
|
||||
|
||||
if(ret==FRAME_SKIPPED) return get_consumed_bytes(s, buf_size);
|
||||
@@ -438,6 +438,13 @@ retry:
|
||||
if (ret < 0){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "header damaged\n");
|
||||
return -1;
|
||||
} else if ((s->width != avctx->coded_width ||
|
||||
s->height != avctx->coded_height ||
|
||||
(s->width + 15) >> 4 != s->mb_width ||
|
||||
(s->height + 15) >> 4 != s->mb_height) &&
|
||||
(HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))) {
|
||||
av_log_missing_feature(s->avctx, "Width/height/bit depth/chroma idc changing with threads is", 0);
|
||||
return AVERROR_PATCHWELCOME; // width / height changed during parallelized decoding
|
||||
}
|
||||
|
||||
avctx->has_b_frames= !s->low_delay;
|
||||
|
@@ -2617,12 +2617,18 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
else
|
||||
s->height= 16*s->mb_height - (4>>CHROMA444)*FFMIN(h->sps.crop_bottom, (8<<CHROMA444)-1);
|
||||
|
||||
if (FFALIGN(s->avctx->width, 16) == s->width &&
|
||||
FFALIGN(s->avctx->height, 16) == s->height) {
|
||||
s->width = s->avctx->width;
|
||||
s->height = s->avctx->height;
|
||||
}
|
||||
|
||||
if (s->context_initialized
|
||||
&& ( s->width != s->avctx->width || s->height != s->avctx->height
|
||||
|| av_cmp_q(h->sps.sar, s->avctx->sample_aspect_ratio))) {
|
||||
if(h != h0) {
|
||||
if(h != h0 || (HAVE_THREADS && h->s.avctx->active_thread_type & FF_THREAD_FRAME)) {
|
||||
av_log_missing_feature(s->avctx, "Width/height changing with threads is", 0);
|
||||
return -1; // width / height changed during parallelized decoding
|
||||
return AVERROR_PATCHWELCOME; // width / height changed during parallelized decoding
|
||||
}
|
||||
free_tables(h, 0);
|
||||
flush_dpb(s->avctx);
|
||||
@@ -2895,8 +2901,13 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
|
||||
if(num_ref_idx_active_override_flag){
|
||||
h->ref_count[0]= get_ue_golomb(&s->gb) + 1;
|
||||
if(h->slice_type_nos==AV_PICTURE_TYPE_B)
|
||||
if (h->ref_count[0] < 1)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (h->slice_type_nos == AV_PICTURE_TYPE_B) {
|
||||
h->ref_count[1]= get_ue_golomb(&s->gb) + 1;
|
||||
if (h->ref_count[1] < 1)
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
|
||||
if (h->ref_count[0]-1 > max || h->ref_count[1]-1 > max){
|
||||
@@ -3545,7 +3556,9 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg){
|
||||
|
||||
return 0;
|
||||
}else{
|
||||
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END)&part_mask);
|
||||
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y,
|
||||
s->mb_x - 1, s->mb_y,
|
||||
(AC_END|DC_END|MV_END)&part_mask);
|
||||
|
||||
return -1;
|
||||
}
|
||||
@@ -3707,7 +3720,11 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
break;
|
||||
}
|
||||
|
||||
if(buf_index+3 >= buf_size) break;
|
||||
|
||||
if (buf_index + 3 >= buf_size) {
|
||||
buf_index = buf_size;
|
||||
break;
|
||||
}
|
||||
|
||||
buf_index+=3;
|
||||
if(buf_index >= next_avc) continue;
|
||||
@@ -3833,6 +3850,7 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
hx->inter_gb_ptr= &hx->inter_gb;
|
||||
|
||||
if(hx->redundant_pic_count==0 && hx->intra_gb_ptr && hx->s.data_partitioning
|
||||
&& s->current_picture_ptr
|
||||
&& s->context_initialized
|
||||
&& (avctx->skip_frame < AVDISCARD_NONREF || hx->nal_ref_idc)
|
||||
&& (avctx->skip_frame < AVDISCARD_BIDIR || hx->slice_type_nos!=AV_PICTURE_TYPE_B)
|
||||
@@ -3848,9 +3866,16 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
init_get_bits(&s->gb, ptr, bit_length);
|
||||
ff_h264_decode_seq_parameter_set(h);
|
||||
|
||||
if (s->flags& CODEC_FLAG_LOW_DELAY ||
|
||||
(h->sps.bitstream_restriction_flag && !h->sps.num_reorder_frames))
|
||||
s->low_delay=1;
|
||||
if (s->flags & CODEC_FLAG_LOW_DELAY ||
|
||||
(h->sps.bitstream_restriction_flag &&
|
||||
!h->sps.num_reorder_frames)) {
|
||||
if (s->avctx->has_b_frames > 1 || h->delayed_pic[0])
|
||||
av_log(avctx, AV_LOG_WARNING, "Delayed frames seen "
|
||||
"reenabling low delay requires a codec "
|
||||
"flush.\n");
|
||||
else
|
||||
s->low_delay = 1;
|
||||
}
|
||||
|
||||
if(avctx->has_b_frames < 2)
|
||||
avctx->has_b_frames= !s->low_delay;
|
||||
|
@@ -37,6 +37,9 @@
|
||||
//#undef NDEBUG
|
||||
#include <assert.h>
|
||||
|
||||
#define MAX_LOG2_MAX_FRAME_NUM (12 + 4)
|
||||
#define MIN_LOG2_MAX_FRAME_NUM 4
|
||||
|
||||
static const AVRational pixel_aspect[17]={
|
||||
{0, 1},
|
||||
{1, 1},
|
||||
@@ -311,7 +314,7 @@ int ff_h264_decode_seq_parameter_set(H264Context *h){
|
||||
MpegEncContext * const s = &h->s;
|
||||
int profile_idc, level_idc, constraint_set_flags = 0;
|
||||
unsigned int sps_id;
|
||||
int i;
|
||||
int i, log2_max_frame_num_minus4;
|
||||
SPS *sps;
|
||||
|
||||
profile_idc= get_bits(&s->gb, 8);
|
||||
@@ -340,14 +343,18 @@ int ff_h264_decode_seq_parameter_set(H264Context *h){
|
||||
memset(sps->scaling_matrix8, 16, sizeof(sps->scaling_matrix8));
|
||||
sps->scaling_matrix_present = 0;
|
||||
|
||||
if(sps->profile_idc >= 100){ //high profile
|
||||
if (sps->profile_idc == 100 || sps->profile_idc == 110 ||
|
||||
sps->profile_idc == 122 || sps->profile_idc == 244 ||
|
||||
sps->profile_idc == 44 || sps->profile_idc == 83 ||
|
||||
sps->profile_idc == 86 || sps->profile_idc == 118 ||
|
||||
sps->profile_idc == 128 || sps->profile_idc == 144) {
|
||||
sps->chroma_format_idc= get_ue_golomb_31(&s->gb);
|
||||
if (sps->chroma_format_idc > 3U) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "chroma_format_idc %d is illegal\n", sps->chroma_format_idc);
|
||||
goto fail;
|
||||
}
|
||||
if(sps->chroma_format_idc == 3)
|
||||
} else if(sps->chroma_format_idc == 3) {
|
||||
sps->residual_color_transform_flag = get_bits1(&s->gb);
|
||||
}
|
||||
sps->bit_depth_luma = get_ue_golomb(&s->gb) + 8;
|
||||
sps->bit_depth_chroma = get_ue_golomb(&s->gb) + 8;
|
||||
if (sps->bit_depth_luma > 12U || sps->bit_depth_chroma > 12U) {
|
||||
@@ -363,7 +370,16 @@ int ff_h264_decode_seq_parameter_set(H264Context *h){
|
||||
sps->bit_depth_chroma = 8;
|
||||
}
|
||||
|
||||
sps->log2_max_frame_num= get_ue_golomb(&s->gb) + 4;
|
||||
log2_max_frame_num_minus4 = get_ue_golomb(&s->gb);
|
||||
if (log2_max_frame_num_minus4 < MIN_LOG2_MAX_FRAME_NUM - 4 ||
|
||||
log2_max_frame_num_minus4 > MAX_LOG2_MAX_FRAME_NUM - 4) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR,
|
||||
"log2_max_frame_num_minus4 out of range (0-12): %d\n",
|
||||
log2_max_frame_num_minus4);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
sps->log2_max_frame_num = log2_max_frame_num_minus4 + 4;
|
||||
|
||||
sps->poc_type= get_ue_golomb_31(&s->gb);
|
||||
|
||||
if(sps->poc_type == 0){ //FIXME #define
|
||||
@@ -490,6 +506,9 @@ int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length){
|
||||
if(pps_id >= MAX_PPS_COUNT) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "pps_id (%d) out of range\n", pps_id);
|
||||
return -1;
|
||||
} else if (h->sps.bit_depth_luma > 10) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "Unimplemented luma bit depth=%d (max=10)\n", h->sps.bit_depth_luma);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
pps= av_mallocz(sizeof(PPS));
|
||||
|
@@ -28,6 +28,7 @@
|
||||
* huffyuv codec for libavcodec.
|
||||
*/
|
||||
|
||||
#include "libavutil/avassert.h"
|
||||
#include "avcodec.h"
|
||||
#include "get_bits.h"
|
||||
#include "put_bits.h"
|
||||
@@ -283,12 +284,13 @@ static void generate_joint_tables(HYuvContext *s){
|
||||
for(i=y=0; y<256; y++){
|
||||
int len0 = s->len[0][y];
|
||||
int limit = VLC_BITS - len0;
|
||||
if(limit <= 0)
|
||||
if(limit <= 0 || !len0)
|
||||
continue;
|
||||
for(u=0; u<256; u++){
|
||||
int len1 = s->len[p][u];
|
||||
if(len1 > limit)
|
||||
if (len1 > limit || !len1)
|
||||
continue;
|
||||
av_assert0(i < (1 << VLC_BITS));
|
||||
len[i] = len0 + len1;
|
||||
bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
|
||||
symbols[i] = (y<<8) + u;
|
||||
@@ -310,18 +312,19 @@ static void generate_joint_tables(HYuvContext *s){
|
||||
for(i=0, g=-16; g<16; g++){
|
||||
int len0 = s->len[p0][g&255];
|
||||
int limit0 = VLC_BITS - len0;
|
||||
if(limit0 < 2)
|
||||
if (limit0 < 2 || !len0)
|
||||
continue;
|
||||
for(b=-16; b<16; b++){
|
||||
int len1 = s->len[p1][b&255];
|
||||
int limit1 = limit0 - len1;
|
||||
if(limit1 < 1)
|
||||
if (limit1 < 1 || !len1)
|
||||
continue;
|
||||
code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
|
||||
for(r=-16; r<16; r++){
|
||||
int len2 = s->len[2][r&255];
|
||||
if(len2 > limit1)
|
||||
if (len2 > limit1 || !len2)
|
||||
continue;
|
||||
av_assert0(i < (1 << VLC_BITS));
|
||||
len[i] = len0 + len1 + len2;
|
||||
bits[i] = (code << len2) + s->bits[2][r&255];
|
||||
if(s->decorrelate){
|
||||
@@ -345,6 +348,7 @@ static void generate_joint_tables(HYuvContext *s){
|
||||
static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length){
|
||||
GetBitContext gb;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
init_get_bits(&gb, src, length*8);
|
||||
|
||||
@@ -355,7 +359,8 @@ static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length){
|
||||
return -1;
|
||||
}
|
||||
free_vlc(&s->vlc[i]);
|
||||
init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
|
||||
if ((ret = init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0)) < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
generate_joint_tables(s);
|
||||
@@ -367,6 +372,7 @@ static int read_old_huffman_tables(HYuvContext *s){
|
||||
#if 1
|
||||
GetBitContext gb;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
init_get_bits(&gb, classic_shift_luma, classic_shift_luma_table_size*8);
|
||||
if(read_len_table(s->len[0], &gb)<0)
|
||||
@@ -387,7 +393,8 @@ static int read_old_huffman_tables(HYuvContext *s){
|
||||
|
||||
for(i=0; i<3; i++){
|
||||
free_vlc(&s->vlc[i]);
|
||||
init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
|
||||
if ((ret = init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0)) < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
generate_joint_tables(s);
|
||||
|
@@ -176,7 +176,13 @@ static int extract_header(AVCodecContext *const avctx,
|
||||
const uint8_t *buf;
|
||||
unsigned buf_size;
|
||||
IffContext *s = avctx->priv_data;
|
||||
int palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
|
||||
int palette_size;
|
||||
|
||||
if (avctx->extradata_size < 2) {
|
||||
av_log(avctx, AV_LOG_ERROR, "not enough extradata\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
|
||||
|
||||
if (avpkt) {
|
||||
int image_size;
|
||||
@@ -192,8 +198,6 @@ static int extract_header(AVCodecContext *const avctx,
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
} else {
|
||||
if (avctx->extradata_size < 2)
|
||||
return AVERROR_INVALIDDATA;
|
||||
buf = avctx->extradata;
|
||||
buf_size = bytestream_get_be16(&buf);
|
||||
if (buf_size <= 1 || palette_size < 0) {
|
||||
@@ -281,7 +285,12 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
int err;
|
||||
|
||||
if (avctx->bits_per_coded_sample <= 8) {
|
||||
int palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
|
||||
int palette_size;
|
||||
|
||||
if (avctx->extradata_size >= 2)
|
||||
palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
|
||||
else
|
||||
palette_size = 0;
|
||||
avctx->pix_fmt = (avctx->bits_per_coded_sample < 8) ||
|
||||
(avctx->extradata_size >= 2 && palette_size) ? PIX_FMT_PAL8 : PIX_FMT_GRAY8;
|
||||
} else if (avctx->bits_per_coded_sample <= 32) {
|
||||
|
@@ -625,7 +625,8 @@ static enum PixelFormat avcodec_find_best_pix_fmt1(int64_t pix_fmt_mask,
|
||||
/* find exact color match with smallest size */
|
||||
dst_pix_fmt = PIX_FMT_NONE;
|
||||
min_dist = 0x7fffffff;
|
||||
for(i = 0;i < PIX_FMT_NB; i++) {
|
||||
/* test only the first 64 pixel formats to avoid undefined behaviour */
|
||||
for (i = 0; i < 64; i++) {
|
||||
if (pix_fmt_mask & (1ULL << i)) {
|
||||
loss = avcodec_get_pix_fmt_loss(i, src_pix_fmt, has_alpha) & loss_mask;
|
||||
if (loss == 0) {
|
||||
|
@@ -76,6 +76,8 @@ typedef struct {
|
||||
int is_scalable;
|
||||
uint32_t lock_word;
|
||||
IVIPicConfig pic_conf;
|
||||
|
||||
int gop_invalid;
|
||||
} IVI5DecContext;
|
||||
|
||||
|
||||
@@ -219,6 +221,10 @@ static int decode_gop_header(IVI5DecContext *ctx, AVCodecContext *avctx)
|
||||
}
|
||||
|
||||
if (band->blk_size == 8) {
|
||||
if(quant_mat >= 5){
|
||||
av_log(avctx, AV_LOG_ERROR, "quant_mat %d too large!\n", quant_mat);
|
||||
return -1;
|
||||
}
|
||||
band->intra_base = &ivi5_base_quant_8x8_intra[quant_mat][0];
|
||||
band->inter_base = &ivi5_base_quant_8x8_inter[quant_mat][0];
|
||||
band->intra_scale = &ivi5_scale_quant_8x8_intra[quant_mat][0];
|
||||
@@ -335,8 +341,12 @@ static int decode_pic_hdr(IVI5DecContext *ctx, AVCodecContext *avctx)
|
||||
ctx->frame_num = get_bits(&ctx->gb, 8);
|
||||
|
||||
if (ctx->frame_type == FRAMETYPE_INTRA) {
|
||||
if (decode_gop_header(ctx, avctx))
|
||||
return -1;
|
||||
ctx->gop_invalid = 1;
|
||||
if (decode_gop_header(ctx, avctx)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid GOP header, skipping frames.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
ctx->gop_invalid = 0;
|
||||
}
|
||||
|
||||
if (ctx->frame_type != FRAMETYPE_NULL) {
|
||||
@@ -453,6 +463,16 @@ static int decode_mb_info(IVI5DecContext *ctx, IVIBandDesc *band,
|
||||
ref_mb = tile->ref_mbs;
|
||||
offs = tile->ypos * band->pitch + tile->xpos;
|
||||
|
||||
if (!ref_mb &&
|
||||
((band->qdelta_present && band->inherit_qdelta) || band->inherit_mv))
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if (tile->num_MBs != IVI_MBs_PER_TILE(tile->width, tile->height, band->mb_size)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Allocated tile size %d mismatches parameters %d\n",
|
||||
tile->num_MBs, IVI_MBs_PER_TILE(tile->width, tile->height, band->mb_size));
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* scale factor for motion vectors */
|
||||
mv_scale = (ctx->planes[0].bands[0].mb_size >> 3) - (band->mb_size >> 3);
|
||||
mv_x = mv_y = 0;
|
||||
@@ -603,8 +623,10 @@ static int decode_band(IVI5DecContext *ctx, int plane_num,
|
||||
|
||||
tile->is_empty = get_bits1(&ctx->gb);
|
||||
if (tile->is_empty) {
|
||||
ff_ivi_process_empty_tile(avctx, band, tile,
|
||||
result = ff_ivi_process_empty_tile(avctx, band, tile,
|
||||
(ctx->planes[0].bands[0].mb_size >> 3) - (band->mb_size >> 3));
|
||||
if (result < 0)
|
||||
break;
|
||||
} else {
|
||||
tile->data_size = ff_ivi_dec_tile_data_size(&ctx->gb);
|
||||
|
||||
@@ -751,6 +773,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
||||
"Error while decoding picture header: %d\n", result);
|
||||
return -1;
|
||||
}
|
||||
if (ctx->gop_invalid)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if (ctx->gop_flags & IVI5_IS_PROTECTED) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Password-protected clip!\n");
|
||||
|
@@ -65,8 +65,8 @@ int ff_intel_h263_decode_picture_header(MpegEncContext *s)
|
||||
s->pb_frame = get_bits1(&s->gb);
|
||||
|
||||
if (format < 6) {
|
||||
s->width = h263_format[format][0];
|
||||
s->height = h263_format[format][1];
|
||||
s->width = ff_h263_format[format][0];
|
||||
s->height = ff_h263_format[format][1];
|
||||
s->avctx->sample_aspect_ratio.num = 12;
|
||||
s->avctx->sample_aspect_ratio.den = 11;
|
||||
} else {
|
||||
@@ -77,7 +77,7 @@ int ff_intel_h263_decode_picture_header(MpegEncContext *s)
|
||||
}
|
||||
if(get_bits(&s->gb, 2))
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Bad value for reserved field\n");
|
||||
s->loop_filter = get_bits1(&s->gb);
|
||||
s->loop_filter = get_bits1(&s->gb) * !s->avctx->lowres;
|
||||
if(get_bits1(&s->gb))
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Bad value for reserved field\n");
|
||||
if(get_bits1(&s->gb))
|
||||
|
@@ -100,7 +100,7 @@ static VLC cbpc_b_vlc;
|
||||
/* init vlcs */
|
||||
|
||||
/* XXX: find a better solution to handle static init */
|
||||
void h263_decode_init_vlc(MpegEncContext *s)
|
||||
void ff_h263_decode_init_vlc(MpegEncContext *s)
|
||||
{
|
||||
static int done = 0;
|
||||
|
||||
@@ -117,18 +117,18 @@ void h263_decode_init_vlc(MpegEncContext *s)
|
||||
&ff_h263_cbpy_tab[0][1], 2, 1,
|
||||
&ff_h263_cbpy_tab[0][0], 2, 1, 64);
|
||||
INIT_VLC_STATIC(&mv_vlc, MV_VLC_BITS, 33,
|
||||
&mvtab[0][1], 2, 1,
|
||||
&mvtab[0][0], 2, 1, 538);
|
||||
&ff_mvtab[0][1], 2, 1,
|
||||
&ff_mvtab[0][0], 2, 1, 538);
|
||||
init_rl(&ff_h263_rl_inter, ff_h263_static_rl_table_store[0]);
|
||||
init_rl(&rl_intra_aic, ff_h263_static_rl_table_store[1]);
|
||||
init_rl(&ff_rl_intra_aic, ff_h263_static_rl_table_store[1]);
|
||||
INIT_VLC_RL(ff_h263_rl_inter, 554);
|
||||
INIT_VLC_RL(rl_intra_aic, 554);
|
||||
INIT_VLC_RL(ff_rl_intra_aic, 554);
|
||||
INIT_VLC_STATIC(&h263_mbtype_b_vlc, H263_MBTYPE_B_VLC_BITS, 15,
|
||||
&h263_mbtype_b_tab[0][1], 2, 1,
|
||||
&h263_mbtype_b_tab[0][0], 2, 1, 80);
|
||||
&ff_h263_mbtype_b_tab[0][1], 2, 1,
|
||||
&ff_h263_mbtype_b_tab[0][0], 2, 1, 80);
|
||||
INIT_VLC_STATIC(&cbpc_b_vlc, CBPC_B_VLC_BITS, 4,
|
||||
&cbpc_b_tab[0][1], 2, 1,
|
||||
&cbpc_b_tab[0][0], 2, 1, 8);
|
||||
&ff_cbpc_b_tab[0][1], 2, 1,
|
||||
&ff_cbpc_b_tab[0][0], 2, 1, 8);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -268,7 +268,7 @@ int ff_h263_resync(MpegEncContext *s){
|
||||
return -1;
|
||||
}
|
||||
|
||||
int h263_decode_motion(MpegEncContext * s, int pred, int f_code)
|
||||
int ff_h263_decode_motion(MpegEncContext * s, int pred, int f_code)
|
||||
{
|
||||
int code, val, sign, shift, l;
|
||||
code = get_vlc2(&s->gb, mv_vlc.table, MV_VLC_BITS, 2);
|
||||
@@ -379,16 +379,16 @@ static void preview_obmc(MpegEncContext *s){
|
||||
if ((cbpc & 16) == 0) {
|
||||
s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
|
||||
/* 16x16 motion prediction */
|
||||
mot_val= h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
mot_val= ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
if (s->umvplus)
|
||||
mx = h263p_decode_umotion(s, pred_x);
|
||||
else
|
||||
mx = h263_decode_motion(s, pred_x, 1);
|
||||
mx = ff_h263_decode_motion(s, pred_x, 1);
|
||||
|
||||
if (s->umvplus)
|
||||
my = h263p_decode_umotion(s, pred_y);
|
||||
else
|
||||
my = h263_decode_motion(s, pred_y, 1);
|
||||
my = ff_h263_decode_motion(s, pred_y, 1);
|
||||
|
||||
mot_val[0 ]= mot_val[2 ]=
|
||||
mot_val[0+stride]= mot_val[2+stride]= mx;
|
||||
@@ -397,16 +397,16 @@ static void preview_obmc(MpegEncContext *s){
|
||||
} else {
|
||||
s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
|
||||
for(i=0;i<4;i++) {
|
||||
mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
if (s->umvplus)
|
||||
mx = h263p_decode_umotion(s, pred_x);
|
||||
else
|
||||
mx = h263_decode_motion(s, pred_x, 1);
|
||||
mx = ff_h263_decode_motion(s, pred_x, 1);
|
||||
|
||||
if (s->umvplus)
|
||||
my = h263p_decode_umotion(s, pred_y);
|
||||
else
|
||||
my = h263_decode_motion(s, pred_y, 1);
|
||||
my = ff_h263_decode_motion(s, pred_y, 1);
|
||||
if (s->umvplus && (mx - pred_x) == 1 && (my - pred_y) == 1)
|
||||
skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */
|
||||
mot_val[0] = mx;
|
||||
@@ -430,7 +430,7 @@ static void h263_decode_dquant(MpegEncContext *s){
|
||||
|
||||
if(s->modified_quant){
|
||||
if(get_bits1(&s->gb))
|
||||
s->qscale= modified_quant_tab[get_bits1(&s->gb)][ s->qscale ];
|
||||
s->qscale= ff_modified_quant_tab[get_bits1(&s->gb)][ s->qscale ];
|
||||
else
|
||||
s->qscale= get_bits(&s->gb, 5);
|
||||
}else
|
||||
@@ -448,7 +448,7 @@ static int h263_decode_block(MpegEncContext * s, DCTELEM * block,
|
||||
|
||||
scan_table = s->intra_scantable.permutated;
|
||||
if (s->h263_aic && s->mb_intra) {
|
||||
rl = &rl_intra_aic;
|
||||
rl = &ff_rl_intra_aic;
|
||||
i = 0;
|
||||
if (s->ac_pred) {
|
||||
if (s->h263_aic_dir)
|
||||
@@ -537,7 +537,7 @@ retry:
|
||||
if (i >= 64){
|
||||
if(s->alt_inter_vlc && rl == &ff_h263_rl_inter && !s->mb_intra){
|
||||
//Looks like a hack but no, it's the way it is supposed to work ...
|
||||
rl = &rl_intra_aic;
|
||||
rl = &ff_rl_intra_aic;
|
||||
i = 0;
|
||||
s->gb= gb;
|
||||
s->dsp.clear_block(block);
|
||||
@@ -554,7 +554,7 @@ retry:
|
||||
}
|
||||
not_coded:
|
||||
if (s->mb_intra && s->h263_aic) {
|
||||
h263_pred_acdc(s, block, n);
|
||||
ff_h263_pred_acdc(s, block, n);
|
||||
i = 63;
|
||||
}
|
||||
s->block_last_index[n] = i;
|
||||
@@ -653,11 +653,11 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
||||
s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
|
||||
/* 16x16 motion prediction */
|
||||
s->mv_type = MV_TYPE_16X16;
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
if (s->umvplus)
|
||||
mx = h263p_decode_umotion(s, pred_x);
|
||||
else
|
||||
mx = h263_decode_motion(s, pred_x, 1);
|
||||
mx = ff_h263_decode_motion(s, pred_x, 1);
|
||||
|
||||
if (mx >= 0xffff)
|
||||
return -1;
|
||||
@@ -665,7 +665,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
||||
if (s->umvplus)
|
||||
my = h263p_decode_umotion(s, pred_y);
|
||||
else
|
||||
my = h263_decode_motion(s, pred_y, 1);
|
||||
my = ff_h263_decode_motion(s, pred_y, 1);
|
||||
|
||||
if (my >= 0xffff)
|
||||
return -1;
|
||||
@@ -678,18 +678,18 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
||||
s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
|
||||
s->mv_type = MV_TYPE_8X8;
|
||||
for(i=0;i<4;i++) {
|
||||
mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
if (s->umvplus)
|
||||
mx = h263p_decode_umotion(s, pred_x);
|
||||
else
|
||||
mx = h263_decode_motion(s, pred_x, 1);
|
||||
mx = ff_h263_decode_motion(s, pred_x, 1);
|
||||
if (mx >= 0xffff)
|
||||
return -1;
|
||||
|
||||
if (s->umvplus)
|
||||
my = h263p_decode_umotion(s, pred_y);
|
||||
else
|
||||
my = h263_decode_motion(s, pred_y, 1);
|
||||
my = ff_h263_decode_motion(s, pred_y, 1);
|
||||
if (my >= 0xffff)
|
||||
return -1;
|
||||
s->mv[0][i][0] = mx;
|
||||
@@ -761,11 +761,11 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
||||
//FIXME UMV
|
||||
|
||||
if(USES_LIST(mb_type, 0)){
|
||||
int16_t *mot_val= h263_pred_motion(s, 0, 0, &mx, &my);
|
||||
int16_t *mot_val= ff_h263_pred_motion(s, 0, 0, &mx, &my);
|
||||
s->mv_dir = MV_DIR_FORWARD;
|
||||
|
||||
mx = h263_decode_motion(s, mx, 1);
|
||||
my = h263_decode_motion(s, my, 1);
|
||||
mx = ff_h263_decode_motion(s, mx, 1);
|
||||
my = ff_h263_decode_motion(s, my, 1);
|
||||
|
||||
s->mv[0][0][0] = mx;
|
||||
s->mv[0][0][1] = my;
|
||||
@@ -774,11 +774,11 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
||||
}
|
||||
|
||||
if(USES_LIST(mb_type, 1)){
|
||||
int16_t *mot_val= h263_pred_motion(s, 0, 1, &mx, &my);
|
||||
int16_t *mot_val= ff_h263_pred_motion(s, 0, 1, &mx, &my);
|
||||
s->mv_dir |= MV_DIR_BACKWARD;
|
||||
|
||||
mx = h263_decode_motion(s, mx, 1);
|
||||
my = h263_decode_motion(s, my, 1);
|
||||
mx = ff_h263_decode_motion(s, mx, 1);
|
||||
my = ff_h263_decode_motion(s, my, 1);
|
||||
|
||||
s->mv[1][0][0] = mx;
|
||||
s->mv[1][0][1] = my;
|
||||
@@ -829,8 +829,8 @@ intra:
|
||||
}
|
||||
|
||||
while(pb_mv_count--){
|
||||
h263_decode_motion(s, 0, 1);
|
||||
h263_decode_motion(s, 0, 1);
|
||||
ff_h263_decode_motion(s, 0, 1);
|
||||
ff_h263_decode_motion(s, 0, 1);
|
||||
}
|
||||
|
||||
/* decode each block */
|
||||
@@ -864,7 +864,7 @@ end:
|
||||
}
|
||||
|
||||
/* most is hardcoded. should extend to handle all h263 streams */
|
||||
int h263_decode_picture_header(MpegEncContext *s)
|
||||
int ff_h263_decode_picture_header(MpegEncContext *s)
|
||||
{
|
||||
int format, width, height, i;
|
||||
uint32_t startcode;
|
||||
@@ -916,8 +916,8 @@ int h263_decode_picture_header(MpegEncContext *s)
|
||||
if (format != 7 && format != 6) {
|
||||
s->h263_plus = 0;
|
||||
/* H.263v1 */
|
||||
width = h263_format[format][0];
|
||||
height = h263_format[format][1];
|
||||
width = ff_h263_format[format][0];
|
||||
height = ff_h263_format[format][1];
|
||||
if (!width)
|
||||
return -1;
|
||||
|
||||
@@ -961,6 +961,8 @@ int h263_decode_picture_header(MpegEncContext *s)
|
||||
s->h263_aic = get_bits1(&s->gb); /* Advanced Intra Coding (AIC) */
|
||||
s->loop_filter= get_bits1(&s->gb);
|
||||
s->unrestricted_mv = s->umvplus || s->obmc || s->loop_filter;
|
||||
if(s->avctx->lowres)
|
||||
s->loop_filter = 0;
|
||||
|
||||
s->h263_slice_structured= get_bits1(&s->gb);
|
||||
if (get_bits1(&s->gb) != 0) {
|
||||
@@ -1024,8 +1026,8 @@ int h263_decode_picture_header(MpegEncContext *s)
|
||||
s->avctx->sample_aspect_ratio= ff_h263_pixel_aspect[s->aspect_ratio_info];
|
||||
}
|
||||
} else {
|
||||
width = h263_format[format][0];
|
||||
height = h263_format[format][1];
|
||||
width = ff_h263_format[format][0];
|
||||
height = ff_h263_format[format][1];
|
||||
s->avctx->sample_aspect_ratio= (AVRational){12,11};
|
||||
}
|
||||
if ((width == 0) || (height == 0))
|
||||
|
@@ -102,7 +102,7 @@ av_const int ff_h263_aspect_to_info(AVRational aspect){
|
||||
return FF_ASPECT_EXTENDED;
|
||||
}
|
||||
|
||||
void h263_encode_picture_header(MpegEncContext * s, int picture_number)
|
||||
void ff_h263_encode_picture_header(MpegEncContext * s, int picture_number)
|
||||
{
|
||||
int format, coded_frame_rate, coded_frame_rate_base, i, temp_ref;
|
||||
int best_clock_code=1;
|
||||
@@ -141,7 +141,7 @@ void h263_encode_picture_header(MpegEncContext * s, int picture_number)
|
||||
put_bits(&s->pb, 1, 0); /* camera off */
|
||||
put_bits(&s->pb, 1, 0); /* freeze picture release off */
|
||||
|
||||
format = ff_match_2uint16(h263_format, FF_ARRAY_ELEMS(h263_format), s->width, s->height);
|
||||
format = ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format), s->width, s->height);
|
||||
if (!s->h263_plus) {
|
||||
/* H.263v1 */
|
||||
put_bits(&s->pb, 3, format);
|
||||
@@ -247,7 +247,7 @@ void h263_encode_picture_header(MpegEncContext * s, int picture_number)
|
||||
/**
|
||||
* Encode a group of blocks header.
|
||||
*/
|
||||
void h263_encode_gob_header(MpegEncContext * s, int mb_line)
|
||||
void ff_h263_encode_gob_header(MpegEncContext * s, int mb_line)
|
||||
{
|
||||
put_bits(&s->pb, 17, 1); /* GBSC */
|
||||
|
||||
@@ -333,7 +333,7 @@ static void h263_encode_block(MpegEncContext * s, DCTELEM * block, int n)
|
||||
} else {
|
||||
i = 0;
|
||||
if (s->h263_aic && s->mb_intra)
|
||||
rl = &rl_intra_aic;
|
||||
rl = &ff_rl_intra_aic;
|
||||
|
||||
if(s->alt_inter_vlc && !s->mb_intra){
|
||||
int aic_vlc_bits=0;
|
||||
@@ -353,14 +353,14 @@ static void h263_encode_block(MpegEncContext * s, DCTELEM * block, int n)
|
||||
if(level<0) level= -level;
|
||||
|
||||
code = get_rl_index(rl, last, run, level);
|
||||
aic_code = get_rl_index(&rl_intra_aic, last, run, level);
|
||||
aic_code = get_rl_index(&ff_rl_intra_aic, last, run, level);
|
||||
inter_vlc_bits += rl->table_vlc[code][1]+1;
|
||||
aic_vlc_bits += rl_intra_aic.table_vlc[aic_code][1]+1;
|
||||
aic_vlc_bits += ff_rl_intra_aic.table_vlc[aic_code][1]+1;
|
||||
|
||||
if (code == rl->n) {
|
||||
inter_vlc_bits += 1+6+8-1;
|
||||
}
|
||||
if (aic_code == rl_intra_aic.n) {
|
||||
if (aic_code == ff_rl_intra_aic.n) {
|
||||
aic_vlc_bits += 1+6+8-1;
|
||||
wrong_pos += run + 1;
|
||||
}else
|
||||
@@ -370,7 +370,7 @@ static void h263_encode_block(MpegEncContext * s, DCTELEM * block, int n)
|
||||
}
|
||||
i = 0;
|
||||
if(aic_vlc_bits < inter_vlc_bits && wrong_pos > 63)
|
||||
rl = &rl_intra_aic;
|
||||
rl = &ff_rl_intra_aic;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -454,9 +454,9 @@ static void h263p_encode_umotion(MpegEncContext * s, int val)
|
||||
}
|
||||
}
|
||||
|
||||
void h263_encode_mb(MpegEncContext * s,
|
||||
DCTELEM block[6][64],
|
||||
int motion_x, int motion_y)
|
||||
void ff_h263_encode_mb(MpegEncContext * s,
|
||||
DCTELEM block[6][64],
|
||||
int motion_x, int motion_y)
|
||||
{
|
||||
int cbpc, cbpy, i, cbp, pred_x, pred_y;
|
||||
int16_t pred_dc;
|
||||
@@ -500,7 +500,7 @@ void h263_encode_mb(MpegEncContext * s,
|
||||
}
|
||||
|
||||
/* motion vectors: 16x16 mode */
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
|
||||
if (!s->umvplus) {
|
||||
ff_h263_encode_motion_vector(s, motion_x - pred_x,
|
||||
@@ -527,7 +527,7 @@ void h263_encode_mb(MpegEncContext * s,
|
||||
|
||||
for(i=0; i<4; i++){
|
||||
/* motion vectors: 8x8 mode*/
|
||||
h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
|
||||
motion_x= s->current_picture.motion_val[0][ s->block_index[i] ][0];
|
||||
motion_y= s->current_picture.motion_val[0][ s->block_index[i] ][1];
|
||||
@@ -561,7 +561,7 @@ void h263_encode_mb(MpegEncContext * s,
|
||||
if(i<4) scale= s->y_dc_scale;
|
||||
else scale= s->c_dc_scale;
|
||||
|
||||
pred_dc = h263_pred_dc(s, i, &dc_ptr[i]);
|
||||
pred_dc = ff_h263_pred_dc(s, i, &dc_ptr[i]);
|
||||
level -= pred_dc;
|
||||
/* Quant */
|
||||
if (level >= 0)
|
||||
@@ -662,7 +662,7 @@ void ff_h263_encode_motion(MpegEncContext * s, int val, int f_code)
|
||||
if (val == 0) {
|
||||
/* zero vector */
|
||||
code = 0;
|
||||
put_bits(&s->pb, mvtab[code][1], mvtab[code][0]);
|
||||
put_bits(&s->pb, ff_mvtab[code][1], ff_mvtab[code][0]);
|
||||
} else {
|
||||
bit_size = f_code - 1;
|
||||
range = 1 << bit_size;
|
||||
@@ -677,7 +677,7 @@ void ff_h263_encode_motion(MpegEncContext * s, int val, int f_code)
|
||||
code = (val >> bit_size) + 1;
|
||||
bits = val & (range - 1);
|
||||
|
||||
put_bits(&s->pb, mvtab[code][1] + 1, (mvtab[code][0] << 1) | sign);
|
||||
put_bits(&s->pb, ff_mvtab[code][1] + 1, (ff_mvtab[code][0] << 1) | sign);
|
||||
if (bit_size > 0) {
|
||||
put_bits(&s->pb, bit_size, bits);
|
||||
}
|
||||
@@ -693,7 +693,7 @@ static void init_mv_penalty_and_fcode(MpegEncContext *s)
|
||||
for(mv=-MAX_MV; mv<=MAX_MV; mv++){
|
||||
int len;
|
||||
|
||||
if(mv==0) len= mvtab[0][1];
|
||||
if(mv==0) len= ff_mvtab[0][1];
|
||||
else{
|
||||
int val, bit_size, code;
|
||||
|
||||
@@ -705,9 +705,9 @@ static void init_mv_penalty_and_fcode(MpegEncContext *s)
|
||||
val--;
|
||||
code = (val >> bit_size) + 1;
|
||||
if(code<33){
|
||||
len= mvtab[code][1] + 1 + bit_size;
|
||||
len= ff_mvtab[code][1] + 1 + bit_size;
|
||||
}else{
|
||||
len= mvtab[32][1] + av_log2(code>>5) + 2 + bit_size;
|
||||
len= ff_mvtab[32][1] + av_log2(code>>5) + 2 + bit_size;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -769,7 +769,7 @@ static void init_uni_h263_rl_tab(RLTable *rl, uint32_t *bits_tab, uint8_t *len_t
|
||||
}
|
||||
}
|
||||
|
||||
void h263_encode_init(MpegEncContext *s)
|
||||
void ff_h263_encode_init(MpegEncContext *s)
|
||||
{
|
||||
static int done = 0;
|
||||
|
||||
@@ -777,9 +777,9 @@ void h263_encode_init(MpegEncContext *s)
|
||||
done = 1;
|
||||
|
||||
init_rl(&ff_h263_rl_inter, ff_h263_static_rl_table_store[0]);
|
||||
init_rl(&rl_intra_aic, ff_h263_static_rl_table_store[1]);
|
||||
init_rl(&ff_rl_intra_aic, ff_h263_static_rl_table_store[1]);
|
||||
|
||||
init_uni_h263_rl_tab(&rl_intra_aic, NULL, uni_h263_intra_aic_rl_len);
|
||||
init_uni_h263_rl_tab(&ff_rl_intra_aic, NULL, uni_h263_intra_aic_rl_len);
|
||||
init_uni_h263_rl_tab(&ff_h263_rl_inter , NULL, uni_h263_inter_rl_len);
|
||||
|
||||
init_mv_penalty_and_fcode(s);
|
||||
|
@@ -123,6 +123,10 @@ int ff_ivi_dec_huff_desc(GetBitContext *gb, int desc_coded, int which_tab,
|
||||
if (huff_tab->tab_sel == 7) {
|
||||
/* custom huffman table (explicitly encoded) */
|
||||
new_huff.num_rows = get_bits(gb, 4);
|
||||
if (!new_huff.num_rows) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Empty custom Huffman table!\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
for (i = 0; i < new_huff.num_rows; i++)
|
||||
new_huff.xbits[i] = get_bits(gb, 4);
|
||||
@@ -136,9 +140,10 @@ int ff_ivi_dec_huff_desc(GetBitContext *gb, int desc_coded, int which_tab,
|
||||
result = ff_ivi_create_huff_from_desc(&huff_tab->cust_desc,
|
||||
&huff_tab->cust_tab, 0);
|
||||
if (result) {
|
||||
huff_tab->cust_desc.num_rows = 0; // reset faulty description
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Error while initializing custom vlc table!\n");
|
||||
return -1;
|
||||
return result;
|
||||
}
|
||||
}
|
||||
huff_tab->tab = &huff_tab->cust_tab;
|
||||
@@ -207,14 +212,15 @@ int av_cold ff_ivi_init_planes(IVIPlaneDesc *planes, const IVIPicConfig *cfg)
|
||||
band->width = b_width;
|
||||
band->height = b_height;
|
||||
band->pitch = width_aligned;
|
||||
band->bufs[0] = av_malloc(buf_size);
|
||||
band->bufs[1] = av_malloc(buf_size);
|
||||
band->aheight = height_aligned;
|
||||
band->bufs[0] = av_mallocz(buf_size);
|
||||
band->bufs[1] = av_mallocz(buf_size);
|
||||
if (!band->bufs[0] || !band->bufs[1])
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
/* allocate the 3rd band buffer for scalability mode */
|
||||
if (cfg->luma_bands > 1) {
|
||||
band->bufs[2] = av_malloc(buf_size);
|
||||
band->bufs[2] = av_mallocz(buf_size);
|
||||
if (!band->bufs[2])
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
@@ -377,6 +383,21 @@ int ff_ivi_decode_blocks(GetBitContext *gb, IVIBandDesc *band, IVITile *tile)
|
||||
mv_x >>= 1;
|
||||
mv_y >>= 1; /* convert halfpel vectors into fullpel ones */
|
||||
}
|
||||
if (mb->type) {
|
||||
int dmv_x, dmv_y, cx, cy;
|
||||
|
||||
dmv_x = mb->mv_x >> band->is_halfpel;
|
||||
dmv_y = mb->mv_y >> band->is_halfpel;
|
||||
cx = mb->mv_x & band->is_halfpel;
|
||||
cy = mb->mv_y & band->is_halfpel;
|
||||
|
||||
if ( mb->xpos + dmv_x < 0
|
||||
|| mb->xpos + dmv_x + band->mb_size + cx > band->pitch
|
||||
|| mb->ypos + dmv_y < 0
|
||||
|| mb->ypos + dmv_y + band->mb_size + cy > band->aheight) {
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (blk = 0; blk < num_blocks; blk++) {
|
||||
@@ -389,6 +410,11 @@ int ff_ivi_decode_blocks(GetBitContext *gb, IVIBandDesc *band, IVITile *tile)
|
||||
}
|
||||
|
||||
if (cbp & 1) { /* block coded ? */
|
||||
if (!band->scan) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Scan pattern is not set.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
scan_pos = -1;
|
||||
memset(trvec, 0, num_coeffs*sizeof(trvec[0])); /* zero transform vector */
|
||||
memset(col_flags, 0, sizeof(col_flags)); /* zero column flags */
|
||||
@@ -469,7 +495,7 @@ int ff_ivi_decode_blocks(GetBitContext *gb, IVIBandDesc *band, IVITile *tile)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ff_ivi_process_empty_tile(AVCodecContext *avctx, IVIBandDesc *band,
|
||||
int ff_ivi_process_empty_tile(AVCodecContext *avctx, IVIBandDesc *band,
|
||||
IVITile *tile, int32_t mv_scale)
|
||||
{
|
||||
int x, y, need_mc, mbn, blk, num_blocks, mv_x, mv_y, mc_type;
|
||||
@@ -480,6 +506,13 @@ void ff_ivi_process_empty_tile(AVCodecContext *avctx, IVIBandDesc *band,
|
||||
void (*mc_no_delta_func)(int16_t *buf, const int16_t *ref_buf, uint32_t pitch,
|
||||
int mc_type);
|
||||
|
||||
if (tile->num_MBs != IVI_MBs_PER_TILE(tile->width, tile->height, band->mb_size)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Allocated tile size %d mismatches "
|
||||
"parameters %d in ivi_process_empty_tile()\n",
|
||||
tile->num_MBs, IVI_MBs_PER_TILE(tile->width, tile->height, band->mb_size));
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
offs = tile->ypos * band->pitch + tile->xpos;
|
||||
mb = tile->mbs;
|
||||
ref_mb = tile->ref_mbs;
|
||||
@@ -560,6 +593,8 @@ void ff_ivi_process_empty_tile(AVCodecContext *avctx, IVIBandDesc *band,
|
||||
dst += band->pitch;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
@@ -132,6 +132,7 @@ typedef struct {
|
||||
int band_num; ///< band number
|
||||
int width;
|
||||
int height;
|
||||
int aheight; ///< aligned band height
|
||||
const uint8_t *data_ptr; ///< ptr to the first byte of the band data
|
||||
int data_size; ///< size of the band data
|
||||
int16_t *buf; ///< pointer to the output buffer for this band
|
||||
@@ -324,7 +325,7 @@ int ff_ivi_decode_blocks(GetBitContext *gb, IVIBandDesc *band, IVITile *tile);
|
||||
* @param[in] tile pointer to the tile descriptor
|
||||
* @param[in] mv_scale scaling factor for motion vectors
|
||||
*/
|
||||
void ff_ivi_process_empty_tile(AVCodecContext *avctx, IVIBandDesc *band,
|
||||
int ff_ivi_process_empty_tile(AVCodecContext *avctx, IVIBandDesc *band,
|
||||
IVITile *tile, int32_t mv_scale);
|
||||
|
||||
/**
|
||||
|
@@ -143,6 +143,10 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
buf += 5;
|
||||
|
||||
if (video_size) {
|
||||
if(video_size < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "video size %d invalid\n", video_size);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (avctx->reget_buffer(avctx, &s->frame) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return -1;
|
||||
|
@@ -33,6 +33,7 @@
|
||||
#define KMVC_KEYFRAME 0x80
|
||||
#define KMVC_PALETTE 0x40
|
||||
#define KMVC_METHOD 0x0F
|
||||
#define MAX_PALSIZE 256
|
||||
|
||||
/*
|
||||
* Decoder context
|
||||
@@ -43,7 +44,7 @@ typedef struct KmvcContext {
|
||||
|
||||
int setpal;
|
||||
int palsize;
|
||||
uint32_t pal[256];
|
||||
uint32_t pal[MAX_PALSIZE];
|
||||
uint8_t *cur, *prev;
|
||||
uint8_t *frm0, *frm1;
|
||||
} KmvcContext;
|
||||
@@ -414,6 +415,10 @@ static av_cold int decode_init(AVCodecContext * avctx)
|
||||
c->palsize = 127;
|
||||
} else {
|
||||
c->palsize = AV_RL16(avctx->extradata + 10);
|
||||
if (c->palsize >= MAX_PALSIZE) {
|
||||
av_log(avctx, AV_LOG_ERROR, "KMVC palette too large\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
|
||||
if (avctx->extradata_size == 1036) { // palette in extradata
|
||||
|
@@ -322,6 +322,11 @@ static int lag_decode_zero_run_line(LagarithContext *l, uint8_t *dst,
|
||||
output_zeros:
|
||||
if (l->zeros_rem) {
|
||||
count = FFMIN(l->zeros_rem, width - i);
|
||||
if (end - dst < count) {
|
||||
av_log(l->avctx, AV_LOG_ERROR, "Too many zeros remaining.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
memset(dst, 0, count);
|
||||
l->zeros_rem -= count;
|
||||
dst += count;
|
||||
|
@@ -55,6 +55,11 @@ static av_cold int mp_decode_init(AVCodecContext *avctx)
|
||||
int w4 = (avctx->width + 3) & ~3;
|
||||
int h4 = (avctx->height + 3) & ~3;
|
||||
|
||||
if(avctx->extradata_size < 2){
|
||||
av_log(avctx, AV_LOG_ERROR, "extradata too small\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
motionpixels_tableinit();
|
||||
mp->avctx = avctx;
|
||||
dsputil_init(&mp->dsp, avctx);
|
||||
@@ -191,10 +196,13 @@ static void mp_decode_line(MotionPixelsContext *mp, GetBitContext *gb, int y)
|
||||
p = mp_get_yuv_from_rgb(mp, x - 1, y);
|
||||
} else {
|
||||
p.y += mp_gradient(mp, 0, mp_get_vlc(mp, gb));
|
||||
p.y = av_clip(p.y, 0, 31);
|
||||
if ((x & 3) == 0) {
|
||||
if ((y & 3) == 0) {
|
||||
p.v += mp_gradient(mp, 1, mp_get_vlc(mp, gb));
|
||||
p.v = av_clip(p.v, -32, 31);
|
||||
p.u += mp_gradient(mp, 2, mp_get_vlc(mp, gb));
|
||||
p.u = av_clip(p.u, -32, 31);
|
||||
mp->hpt[((y / 4) * mp->avctx->width + x) / 4] = p;
|
||||
} else {
|
||||
p.v = mp->hpt[((y / 4) * mp->avctx->width + x) / 4].v;
|
||||
@@ -218,9 +226,12 @@ static void mp_decode_frame_helper(MotionPixelsContext *mp, GetBitContext *gb)
|
||||
p = mp_get_yuv_from_rgb(mp, 0, y);
|
||||
} else {
|
||||
p.y += mp_gradient(mp, 0, mp_get_vlc(mp, gb));
|
||||
p.y = av_clip(p.y, 0, 31);
|
||||
if ((y & 3) == 0) {
|
||||
p.v += mp_gradient(mp, 1, mp_get_vlc(mp, gb));
|
||||
p.v = av_clip(p.v, -32, 31);
|
||||
p.u += mp_gradient(mp, 2, mp_get_vlc(mp, gb));
|
||||
p.u = av_clip(p.u, -32, 31);
|
||||
}
|
||||
mp->vpt[y] = p;
|
||||
mp_set_rgb_from_yuv(mp, 0, y, &p);
|
||||
|
@@ -138,7 +138,8 @@ static av_cold int mpc8_decode_init(AVCodecContext * avctx)
|
||||
c->frames = 1 << (get_bits(&gb, 3) * 2);
|
||||
|
||||
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
|
||||
avctx->channel_layout = (channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
|
||||
avctx->channels = channels;
|
||||
|
||||
if(vlc_initialized) return 0;
|
||||
av_log(avctx, AV_LOG_DEBUG, "Initing VLC\n");
|
||||
|
@@ -1151,6 +1151,7 @@ typedef struct Mpeg1Context {
|
||||
int save_width, save_height, save_progressive_seq;
|
||||
AVRational frame_rate_ext; ///< MPEG-2 specific framerate modificator
|
||||
int sync; ///< Did we reach a sync point like a GOP/SEQ/KEYFrame?
|
||||
int extradata_decoded;
|
||||
} Mpeg1Context;
|
||||
|
||||
static av_cold int mpeg_decode_init(AVCodecContext *avctx)
|
||||
@@ -2315,8 +2316,10 @@ static int mpeg_decode_frame(AVCodecContext *avctx,
|
||||
|
||||
s->slice_count= 0;
|
||||
|
||||
if(avctx->extradata && !avctx->frame_number)
|
||||
if (avctx->extradata && !s->extradata_decoded) {
|
||||
decode_chunks(avctx, picture, data_size, avctx->extradata, avctx->extradata_size);
|
||||
s->extradata_decoded = 1;
|
||||
}
|
||||
|
||||
return decode_chunks(avctx, picture, data_size, buf, buf_size);
|
||||
}
|
||||
|
@@ -651,13 +651,13 @@ try_again:
|
||||
if ((cbpc & 16) == 0) {
|
||||
/* 16x16 motion prediction */
|
||||
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
if(!s->mcsel){
|
||||
mx = h263_decode_motion(s, pred_x, s->f_code);
|
||||
mx = ff_h263_decode_motion(s, pred_x, s->f_code);
|
||||
if (mx >= 0xffff)
|
||||
return -1;
|
||||
|
||||
my = h263_decode_motion(s, pred_y, s->f_code);
|
||||
my = ff_h263_decode_motion(s, pred_y, s->f_code);
|
||||
if (my >= 0xffff)
|
||||
return -1;
|
||||
s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
|
||||
@@ -675,12 +675,12 @@ try_again:
|
||||
int i;
|
||||
s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
|
||||
for(i=0;i<4;i++) {
|
||||
int16_t *mot_val= h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
mx = h263_decode_motion(s, pred_x, s->f_code);
|
||||
int16_t *mot_val= ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
mx = ff_h263_decode_motion(s, pred_x, s->f_code);
|
||||
if (mx >= 0xffff)
|
||||
return -1;
|
||||
|
||||
my = h263_decode_motion(s, pred_y, s->f_code);
|
||||
my = ff_h263_decode_motion(s, pred_y, s->f_code);
|
||||
if (my >= 0xffff)
|
||||
return -1;
|
||||
mot_val[0] = mx;
|
||||
@@ -1245,14 +1245,14 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
||||
s->field_select[0][0]= get_bits1(&s->gb);
|
||||
s->field_select[0][1]= get_bits1(&s->gb);
|
||||
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
|
||||
for(i=0; i<2; i++){
|
||||
mx = h263_decode_motion(s, pred_x, s->f_code);
|
||||
mx = ff_h263_decode_motion(s, pred_x, s->f_code);
|
||||
if (mx >= 0xffff)
|
||||
return -1;
|
||||
|
||||
my = h263_decode_motion(s, pred_y/2, s->f_code);
|
||||
my = ff_h263_decode_motion(s, pred_y/2, s->f_code);
|
||||
if (my >= 0xffff)
|
||||
return -1;
|
||||
|
||||
@@ -1263,13 +1263,13 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
||||
s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
|
||||
/* 16x16 motion prediction */
|
||||
s->mv_type = MV_TYPE_16X16;
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
mx = h263_decode_motion(s, pred_x, s->f_code);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
mx = ff_h263_decode_motion(s, pred_x, s->f_code);
|
||||
|
||||
if (mx >= 0xffff)
|
||||
return -1;
|
||||
|
||||
my = h263_decode_motion(s, pred_y, s->f_code);
|
||||
my = ff_h263_decode_motion(s, pred_y, s->f_code);
|
||||
|
||||
if (my >= 0xffff)
|
||||
return -1;
|
||||
@@ -1280,12 +1280,12 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
||||
s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
|
||||
s->mv_type = MV_TYPE_8X8;
|
||||
for(i=0;i<4;i++) {
|
||||
mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
mx = h263_decode_motion(s, pred_x, s->f_code);
|
||||
mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
mx = ff_h263_decode_motion(s, pred_x, s->f_code);
|
||||
if (mx >= 0xffff)
|
||||
return -1;
|
||||
|
||||
my = h263_decode_motion(s, pred_y, s->f_code);
|
||||
my = ff_h263_decode_motion(s, pred_y, s->f_code);
|
||||
if (my >= 0xffff)
|
||||
return -1;
|
||||
s->mv[0][i][0] = mx;
|
||||
@@ -1381,8 +1381,8 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
||||
if(USES_LIST(mb_type, 0)){
|
||||
s->mv_dir = MV_DIR_FORWARD;
|
||||
|
||||
mx = h263_decode_motion(s, s->last_mv[0][0][0], s->f_code);
|
||||
my = h263_decode_motion(s, s->last_mv[0][0][1], s->f_code);
|
||||
mx = ff_h263_decode_motion(s, s->last_mv[0][0][0], s->f_code);
|
||||
my = ff_h263_decode_motion(s, s->last_mv[0][0][1], s->f_code);
|
||||
s->last_mv[0][1][0]= s->last_mv[0][0][0]= s->mv[0][0][0] = mx;
|
||||
s->last_mv[0][1][1]= s->last_mv[0][0][1]= s->mv[0][0][1] = my;
|
||||
}
|
||||
@@ -1390,8 +1390,8 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
||||
if(USES_LIST(mb_type, 1)){
|
||||
s->mv_dir |= MV_DIR_BACKWARD;
|
||||
|
||||
mx = h263_decode_motion(s, s->last_mv[1][0][0], s->b_code);
|
||||
my = h263_decode_motion(s, s->last_mv[1][0][1], s->b_code);
|
||||
mx = ff_h263_decode_motion(s, s->last_mv[1][0][0], s->b_code);
|
||||
my = ff_h263_decode_motion(s, s->last_mv[1][0][1], s->b_code);
|
||||
s->last_mv[1][1][0]= s->last_mv[1][0][0]= s->mv[1][0][0] = mx;
|
||||
s->last_mv[1][1][1]= s->last_mv[1][0][1]= s->mv[1][0][1] = my;
|
||||
}
|
||||
@@ -1402,8 +1402,8 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
||||
s->mv_dir = MV_DIR_FORWARD;
|
||||
|
||||
for(i=0; i<2; i++){
|
||||
mx = h263_decode_motion(s, s->last_mv[0][i][0] , s->f_code);
|
||||
my = h263_decode_motion(s, s->last_mv[0][i][1]/2, s->f_code);
|
||||
mx = ff_h263_decode_motion(s, s->last_mv[0][i][0] , s->f_code);
|
||||
my = ff_h263_decode_motion(s, s->last_mv[0][i][1]/2, s->f_code);
|
||||
s->last_mv[0][i][0]= s->mv[0][i][0] = mx;
|
||||
s->last_mv[0][i][1]= (s->mv[0][i][1] = my)*2;
|
||||
}
|
||||
@@ -1413,8 +1413,8 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
||||
s->mv_dir |= MV_DIR_BACKWARD;
|
||||
|
||||
for(i=0; i<2; i++){
|
||||
mx = h263_decode_motion(s, s->last_mv[1][i][0] , s->b_code);
|
||||
my = h263_decode_motion(s, s->last_mv[1][i][1]/2, s->b_code);
|
||||
mx = ff_h263_decode_motion(s, s->last_mv[1][i][0] , s->b_code);
|
||||
my = ff_h263_decode_motion(s, s->last_mv[1][i][1]/2, s->b_code);
|
||||
s->last_mv[1][i][0]= s->mv[1][i][0] = mx;
|
||||
s->last_mv[1][i][1]= (s->mv[1][i][1] = my)*2;
|
||||
}
|
||||
@@ -1426,8 +1426,8 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
||||
if(IS_SKIP(mb_type))
|
||||
mx=my=0;
|
||||
else{
|
||||
mx = h263_decode_motion(s, 0, 1);
|
||||
my = h263_decode_motion(s, 0, 1);
|
||||
mx = ff_h263_decode_motion(s, 0, 1);
|
||||
my = ff_h263_decode_motion(s, 0, 1);
|
||||
}
|
||||
|
||||
s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
|
||||
|
@@ -727,7 +727,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
|
||||
}
|
||||
|
||||
/* motion vectors: 16x16 mode */
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
|
||||
ff_h263_encode_motion_vector(s, motion_x - pred_x,
|
||||
motion_y - pred_y, s->f_code);
|
||||
@@ -751,7 +751,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
|
||||
}
|
||||
|
||||
/* motion vectors: 16x8 interlaced mode */
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
pred_y /=2;
|
||||
|
||||
put_bits(&s->pb, 1, s->field_select[0][0]);
|
||||
@@ -779,7 +779,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
|
||||
|
||||
for(i=0; i<4; i++){
|
||||
/* motion vectors: 8x8 mode*/
|
||||
h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
|
||||
ff_h263_encode_motion_vector(s, s->current_picture.motion_val[0][ s->block_index[i] ][0] - pred_x,
|
||||
s->current_picture.motion_val[0][ s->block_index[i] ][1] - pred_y, s->f_code);
|
||||
|
@@ -210,7 +210,7 @@ static void ff_compute_band_indexes(MPADecodeContext *s, GranuleDef *g){
|
||||
else
|
||||
g->long_end = 4; /* 8000 Hz */
|
||||
|
||||
g->short_start = 2 + (s->sample_rate_index != 8);
|
||||
g->short_start = 3;
|
||||
} else {
|
||||
g->long_end = 0;
|
||||
g->short_start = 0;
|
||||
|
@@ -725,7 +725,8 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
|
||||
0, 0, 0,
|
||||
ref_picture, pix_op, qpix_op,
|
||||
s->mv[dir][0][0], s->mv[dir][0][1], 16);
|
||||
}else if(!is_mpeg12 && (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) && s->mspel && s->codec_id == CODEC_ID_WMV2){
|
||||
} else if (!is_mpeg12 && (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) &&
|
||||
s->mspel && s->codec_id == CODEC_ID_WMV2) {
|
||||
ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
|
||||
ref_picture, pix_op,
|
||||
s->mv[dir][0][0], s->mv[dir][0][1], 16);
|
||||
|
@@ -582,7 +582,7 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
|
||||
break;
|
||||
case CODEC_ID_H263:
|
||||
if (!CONFIG_H263_ENCODER) return -1;
|
||||
if (ff_match_2uint16(h263_format, FF_ARRAY_ELEMS(h263_format), s->width, s->height) == 8) {
|
||||
if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format), s->width, s->height) == 8) {
|
||||
av_log(avctx, AV_LOG_ERROR, "The specified picture size of %dx%d is not valid for the H.263 codec.\nValid sizes are 128x96, 176x144, 352x288, 704x576, and 1408x1152. Try H.263+.\n", s->width, s->height);
|
||||
return -1;
|
||||
}
|
||||
@@ -708,7 +708,7 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
|
||||
if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
|
||||
ff_h261_encode_init(s);
|
||||
if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
|
||||
h263_encode_init(s);
|
||||
ff_h263_encode_init(s);
|
||||
if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
|
||||
ff_msmpeg4_encode_init(s);
|
||||
if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
|
||||
@@ -1768,7 +1768,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x,
|
||||
case CODEC_ID_RV10:
|
||||
case CODEC_ID_RV20:
|
||||
if (CONFIG_H263_ENCODER)
|
||||
h263_encode_mb(s, s->block, motion_x, motion_y);
|
||||
ff_h263_encode_mb(s, s->block, motion_x, motion_y);
|
||||
break;
|
||||
case CODEC_ID_MJPEG:
|
||||
if (CONFIG_MJPEG_ENCODER)
|
||||
@@ -2200,7 +2200,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
|
||||
case CODEC_ID_H263:
|
||||
case CODEC_ID_H263P:
|
||||
if (CONFIG_H263_ENCODER)
|
||||
h263_encode_gob_header(s, mb_y);
|
||||
ff_h263_encode_gob_header(s, mb_y);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -2950,7 +2950,7 @@ static int encode_picture(MpegEncContext *s, int picture_number)
|
||||
else if (CONFIG_FLV_ENCODER && s->codec_id == CODEC_ID_FLV1)
|
||||
ff_flv_encode_picture_header(s, picture_number);
|
||||
else if (CONFIG_H263_ENCODER)
|
||||
h263_encode_picture_header(s, picture_number);
|
||||
ff_h263_encode_picture_header(s, picture_number);
|
||||
break;
|
||||
case FMT_MPEG1:
|
||||
if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
|
||||
|
@@ -511,7 +511,7 @@ static void msmpeg4v2_encode_motion(MpegEncContext * s, int val)
|
||||
if (val == 0) {
|
||||
/* zero vector */
|
||||
code = 0;
|
||||
put_bits(&s->pb, mvtab[code][1], mvtab[code][0]);
|
||||
put_bits(&s->pb, ff_mvtab[code][1], ff_mvtab[code][0]);
|
||||
} else {
|
||||
bit_size = s->f_code - 1;
|
||||
range = 1 << bit_size;
|
||||
@@ -530,7 +530,7 @@ static void msmpeg4v2_encode_motion(MpegEncContext * s, int val)
|
||||
code = (val >> bit_size) + 1;
|
||||
bits = val & (range - 1);
|
||||
|
||||
put_bits(&s->pb, mvtab[code][1] + 1, (mvtab[code][0] << 1) | sign);
|
||||
put_bits(&s->pb, ff_mvtab[code][1] + 1, (ff_mvtab[code][0] << 1) | sign);
|
||||
if (bit_size > 0) {
|
||||
put_bits(&s->pb, bit_size, bits);
|
||||
}
|
||||
@@ -579,7 +579,7 @@ void msmpeg4_encode_mb(MpegEncContext * s,
|
||||
|
||||
s->misc_bits += get_bits_diff(s);
|
||||
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
msmpeg4v2_encode_motion(s, motion_x - pred_x);
|
||||
msmpeg4v2_encode_motion(s, motion_y - pred_y);
|
||||
}else{
|
||||
@@ -590,7 +590,7 @@ void msmpeg4_encode_mb(MpegEncContext * s,
|
||||
s->misc_bits += get_bits_diff(s);
|
||||
|
||||
/* motion vector */
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_msmpeg4_encode_motion(s, motion_x - pred_x,
|
||||
motion_y - pred_y);
|
||||
}
|
||||
@@ -1138,7 +1138,7 @@ static int msmpeg4v12_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
|
||||
cbp|= cbpy<<2;
|
||||
if(s->msmpeg4_version==1 || (cbp&3) != 3) cbp^= 0x3C;
|
||||
|
||||
h263_pred_motion(s, 0, 0, &mx, &my);
|
||||
ff_h263_pred_motion(s, 0, 0, &mx, &my);
|
||||
mx= msmpeg4v2_decode_motion(s, mx, 1);
|
||||
my= msmpeg4v2_decode_motion(s, my, 1);
|
||||
|
||||
@@ -1224,7 +1224,7 @@ static int msmpeg4v34_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
|
||||
s->rl_table_index = decode012(&s->gb);
|
||||
s->rl_chroma_table_index = s->rl_table_index;
|
||||
}
|
||||
h263_pred_motion(s, 0, 0, &mx, &my);
|
||||
ff_h263_pred_motion(s, 0, 0, &mx, &my);
|
||||
if (ff_msmpeg4_decode_motion(s, &mx, &my) < 0)
|
||||
return -1;
|
||||
s->mv_dir = MV_DIR_FORWARD;
|
||||
@@ -1320,8 +1320,8 @@ av_cold int ff_msmpeg4_decode_init(AVCodecContext *avctx)
|
||||
&v2_mb_type[0][1], 2, 1,
|
||||
&v2_mb_type[0][0], 2, 1, 128);
|
||||
INIT_VLC_STATIC(&v2_mv_vlc, V2_MV_VLC_BITS, 33,
|
||||
&mvtab[0][1], 2, 1,
|
||||
&mvtab[0][0], 2, 1, 538);
|
||||
&ff_mvtab[0][1], 2, 1,
|
||||
&ff_mvtab[0][0], 2, 1, 538);
|
||||
|
||||
INIT_VLC_STATIC(&ff_mb_non_intra_vlc[0], MB_NON_INTRA_VLC_BITS, 128,
|
||||
&wmv2_inter_table[0][0][1], 8, 4,
|
||||
|
@@ -592,9 +592,9 @@ static const int8_t table4_run[168] = {
|
||||
29, 30, 31, 32, 33, 34, 35, 36,
|
||||
};
|
||||
|
||||
extern const uint16_t inter_vlc[103][2];
|
||||
extern const int8_t inter_level[102];
|
||||
extern const int8_t inter_run[102];
|
||||
extern const uint16_t ff_inter_vlc[103][2];
|
||||
extern const int8_t ff_inter_level[102];
|
||||
extern const int8_t ff_inter_run[102];
|
||||
|
||||
extern const uint16_t ff_mpeg4_intra_vlc[103][2];
|
||||
extern const int8_t ff_mpeg4_intra_level[102];
|
||||
@@ -647,9 +647,9 @@ RLTable rl_table[NB_RL_TABLES] = {
|
||||
{
|
||||
102,
|
||||
58,
|
||||
inter_vlc,
|
||||
inter_run,
|
||||
inter_level,
|
||||
ff_inter_vlc,
|
||||
ff_inter_run,
|
||||
ff_inter_level,
|
||||
},
|
||||
};
|
||||
|
||||
|
@@ -191,9 +191,10 @@ retry:
|
||||
}
|
||||
if (c->codec_frameheader) {
|
||||
int w, h, q, res;
|
||||
if (buf_size < 12) {
|
||||
if (buf_size < RTJPEG_HEADER_SIZE || buf[4] != RTJPEG_HEADER_SIZE ||
|
||||
buf[5] != RTJPEG_FILE_VERSION) {
|
||||
av_log(avctx, AV_LOG_ERROR, "invalid nuv video frame\n");
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
w = AV_RL16(&buf[6]);
|
||||
h = AV_RL16(&buf[8]);
|
||||
@@ -207,8 +208,8 @@ retry:
|
||||
size_change = 1;
|
||||
goto retry;
|
||||
}
|
||||
buf = &buf[12];
|
||||
buf_size -= 12;
|
||||
buf = &buf[RTJPEG_HEADER_SIZE];
|
||||
buf_size -= RTJPEG_HEADER_SIZE;
|
||||
}
|
||||
|
||||
if ((size_change || keyframe) && c->pic.data[0])
|
||||
|
@@ -148,7 +148,7 @@ static void add_paeth_prediction_c(uint8_t *dst, uint8_t *src, uint8_t *top, int
|
||||
if(bpp >= 2) g = dst[1];\
|
||||
if(bpp >= 3) b = dst[2];\
|
||||
if(bpp >= 4) a = dst[3];\
|
||||
for(; i < size; i+=bpp) {\
|
||||
for(; i <= size - bpp; i+=bpp) {\
|
||||
dst[i+0] = r = op(r, src[i+0], last[i+0]);\
|
||||
if(bpp == 1) continue;\
|
||||
dst[i+1] = g = op(g, src[i+1], last[i+1]);\
|
||||
@@ -164,13 +164,9 @@ static void add_paeth_prediction_c(uint8_t *dst, uint8_t *src, uint8_t *top, int
|
||||
else if(bpp == 2) UNROLL1(2, op)\
|
||||
else if(bpp == 3) UNROLL1(3, op)\
|
||||
else if(bpp == 4) UNROLL1(4, op)\
|
||||
else {\
|
||||
for (; i < size; i += bpp) {\
|
||||
int j;\
|
||||
for (j = 0; j < bpp; j++)\
|
||||
dst[i+j] = op(dst[i+j-bpp], src[i+j], last[i+j]);\
|
||||
}\
|
||||
}
|
||||
for (; i < size; i++) {\
|
||||
dst[i] = op(dst[i-bpp], src[i], last[i]);\
|
||||
}\
|
||||
|
||||
/* NOTE: 'dst' can be equal to 'last' */
|
||||
static void png_filter_row(PNGDecContext *s, uint8_t *dst, int filter_type,
|
||||
@@ -469,11 +465,12 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
avctx->pix_fmt = PIX_FMT_RGB48BE;
|
||||
} else if (s->bit_depth == 1) {
|
||||
avctx->pix_fmt = PIX_FMT_MONOBLACK;
|
||||
} else if (s->color_type == PNG_COLOR_TYPE_PALETTE) {
|
||||
} else if (s->bit_depth == 8 &&
|
||||
s->color_type == PNG_COLOR_TYPE_PALETTE) {
|
||||
avctx->pix_fmt = PIX_FMT_PAL8;
|
||||
} else if (s->bit_depth == 8 &&
|
||||
s->color_type == PNG_COLOR_TYPE_GRAY_ALPHA) {
|
||||
avctx->pix_fmt = PIX_FMT_GRAY8A;
|
||||
avctx->pix_fmt = PIX_FMT_Y400A;
|
||||
} else {
|
||||
goto fail;
|
||||
}
|
||||
|
@@ -881,9 +881,13 @@ static void synthfilt_build_sb_samples (QDM2Context *q, GetBitContext *gb, int l
|
||||
break;
|
||||
|
||||
case 30:
|
||||
if (BITS_LEFT(length,gb) >= 4)
|
||||
samples[0] = type30_dequant[qdm2_get_vlc(gb, &vlc_tab_type30, 0, 1)];
|
||||
else
|
||||
if (BITS_LEFT(length,gb) >= 4) {
|
||||
unsigned index = qdm2_get_vlc(gb, &vlc_tab_type30, 0, 1);
|
||||
if (index < FF_ARRAY_ELEMS(type30_dequant)) {
|
||||
samples[0] = type30_dequant[index];
|
||||
} else
|
||||
samples[0] = SB_DITHERING_NOISE(sb,q->noise_idx);
|
||||
} else
|
||||
samples[0] = SB_DITHERING_NOISE(sb,q->noise_idx);
|
||||
|
||||
run = 1;
|
||||
@@ -897,8 +901,12 @@ static void synthfilt_build_sb_samples (QDM2Context *q, GetBitContext *gb, int l
|
||||
type34_predictor = samples[0];
|
||||
type34_first = 0;
|
||||
} else {
|
||||
samples[0] = type34_delta[qdm2_get_vlc(gb, &vlc_tab_type34, 0, 1)] / type34_div + type34_predictor;
|
||||
type34_predictor = samples[0];
|
||||
unsigned index = qdm2_get_vlc(gb, &vlc_tab_type34, 0, 1);
|
||||
if (index < FF_ARRAY_ELEMS(type34_delta)) {
|
||||
samples[0] = type34_delta[index] / type34_div + type34_predictor;
|
||||
type34_predictor = samples[0];
|
||||
} else
|
||||
samples[0] = SB_DITHERING_NOISE(sb,q->noise_idx);
|
||||
}
|
||||
} else {
|
||||
samples[0] = SB_DITHERING_NOISE(sb,q->noise_idx);
|
||||
@@ -1230,6 +1238,11 @@ static void qdm2_decode_super_block (QDM2Context *q)
|
||||
for (i = 0; packet_bytes > 0; i++) {
|
||||
int j;
|
||||
|
||||
if (i>=FF_ARRAY_ELEMS(q->sub_packet_list_A)) {
|
||||
SAMPLES_NEEDED_2("too many packet bytes");
|
||||
return;
|
||||
}
|
||||
|
||||
q->sub_packet_list_A[i].next = NULL;
|
||||
|
||||
if (i > 0) {
|
||||
|
@@ -157,6 +157,12 @@ static av_cold int roq_decode_init(AVCodecContext *avctx)
|
||||
RoqContext *s = avctx->priv_data;
|
||||
|
||||
s->avctx = avctx;
|
||||
|
||||
if (avctx->width%16 || avctx->height%16) {
|
||||
av_log_ask_for_sample(avctx, "dimensions not being a multiple of 16 are unsupported\n");
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
s->width = avctx->width;
|
||||
s->height = avctx->height;
|
||||
avcodec_get_frame_defaults(&s->frames[0]);
|
||||
|
@@ -25,6 +25,9 @@
|
||||
#include <stdint.h>
|
||||
#include "dsputil.h"
|
||||
|
||||
#define RTJPEG_FILE_VERSION 0
|
||||
#define RTJPEG_HEADER_SIZE 12
|
||||
|
||||
typedef struct {
|
||||
int w, h;
|
||||
DSPContext *dsp;
|
||||
|
@@ -498,7 +498,7 @@ static av_cold int rv10_decode_init(AVCodecContext *avctx)
|
||||
if (MPV_common_init(s) < 0)
|
||||
return -1;
|
||||
|
||||
h263_decode_init_vlc(s);
|
||||
ff_h263_decode_init_vlc(s);
|
||||
|
||||
/* init rv vlc */
|
||||
if (!done) {
|
||||
|
@@ -1280,6 +1280,14 @@ static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int
|
||||
|
||||
if ((s->mb_x == 0 && s->mb_y == 0) || s->current_picture_ptr==NULL) {
|
||||
if(s->width != r->si.width || s->height != r->si.height){
|
||||
|
||||
if (HAVE_THREADS &&
|
||||
(s->avctx->active_thread_type & FF_THREAD_FRAME)) {
|
||||
av_log_missing_feature(s->avctx, "Width/height changing with "
|
||||
"frame threading is", 0);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
av_log(s->avctx, AV_LOG_DEBUG, "Changing dimensions to %dx%d\n", r->si.width,r->si.height);
|
||||
MPV_common_end(s);
|
||||
s->width = r->si.width;
|
||||
@@ -1455,15 +1463,20 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
|
||||
if(get_slice_offset(avctx, slices_hdr, 0) < 0 ||
|
||||
get_slice_offset(avctx, slices_hdr, 0) > buf_size){
|
||||
av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
init_get_bits(&s->gb, buf+get_slice_offset(avctx, slices_hdr, 0), (buf_size-get_slice_offset(avctx, slices_hdr, 0))*8);
|
||||
if(r->parse_slice_header(r, &r->s.gb, &si) < 0 || si.start){
|
||||
av_log(avctx, AV_LOG_ERROR, "First slice header is incorrect\n");
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if((!s->last_picture_ptr || !s->last_picture_ptr->data[0]) && si.type == AV_PICTURE_TYPE_B)
|
||||
return -1;
|
||||
if ((!s->last_picture_ptr || !s->last_picture_ptr->data[0]) &&
|
||||
si.type == AV_PICTURE_TYPE_B) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid decoder state: B-frame without "
|
||||
"reference data.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if( (avctx->skip_frame >= AVDISCARD_NONREF && si.type==AV_PICTURE_TYPE_B)
|
||||
|| (avctx->skip_frame >= AVDISCARD_NONKEY && si.type!=AV_PICTURE_TYPE_I)
|
||||
|| avctx->skip_frame >= AVDISCARD_ALL)
|
||||
|
@@ -123,7 +123,7 @@ static const uint8_t rv34_quant_to_vlc_set[2][31] = {
|
||||
|
||||
/**
|
||||
* table for obtaining the quantizer difference
|
||||
* @todo Use with modified_quant_tab from h263data.h.
|
||||
* @todo Use with ff_modified_quant_tab from h263data.h.
|
||||
*/
|
||||
static const uint8_t rv34_dquant_tab[2][32]={
|
||||
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
|
||||
|
@@ -645,7 +645,7 @@ static int smka_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
||||
}
|
||||
if(bits) { //decode 16-bit data
|
||||
for(i = stereo; i >= 0; i--)
|
||||
pred[i] = av_bswap16(get_bits(&gb, 16));
|
||||
pred[i] = sign_extend(av_bswap16(get_bits(&gb, 16)), 16);
|
||||
for(i = 0; i <= stereo; i++)
|
||||
*samples++ = pred[i];
|
||||
for(; i < unp_size / 2; i++) {
|
||||
|
@@ -2299,7 +2299,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
|
||||
s->m.me.map = av_mallocz(ME_MAP_SIZE*sizeof(uint32_t));
|
||||
s->m.me.score_map = av_mallocz(ME_MAP_SIZE*sizeof(uint32_t));
|
||||
s->m.obmc_scratchpad= av_mallocz(MB_SIZE*MB_SIZE*12*sizeof(uint32_t));
|
||||
h263_encode_init(&s->m); //mv_penalty
|
||||
ff_h263_encode_init(&s->m); //mv_penalty
|
||||
|
||||
s->max_ref_frames = FFMAX(FFMIN(avctx->refs, MAX_REF_FRAMES), 1);
|
||||
|
||||
|
@@ -43,7 +43,7 @@
|
||||
#undef NDEBUG
|
||||
#include <assert.h>
|
||||
|
||||
extern const uint8_t mvtab[33][2];
|
||||
extern const uint8_t ff_mvtab[33][2];
|
||||
|
||||
static VLC svq1_block_type;
|
||||
static VLC svq1_motion_component;
|
||||
@@ -768,8 +768,8 @@ static av_cold int svq1_decode_init(AVCodecContext *avctx)
|
||||
&ff_svq1_block_type_vlc[0][0], 2, 1, 6);
|
||||
|
||||
INIT_VLC_STATIC(&svq1_motion_component, 7, 33,
|
||||
&mvtab[0][1], 2, 1,
|
||||
&mvtab[0][0], 2, 1, 176);
|
||||
&ff_mvtab[0][1], 2, 1,
|
||||
&ff_mvtab[0][0], 2, 1, 176);
|
||||
|
||||
for (i = 0; i < 6; i++) {
|
||||
static const uint8_t sizes[2][6] = {{14, 10, 14, 18, 16, 18}, {10, 10, 14, 14, 14, 16}};
|
||||
|
@@ -406,7 +406,7 @@ static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane
|
||||
int mx, my, pred_x, pred_y, dxy;
|
||||
int16_t *motion_ptr;
|
||||
|
||||
motion_ptr= h263_pred_motion(&s->m, 0, 0, &pred_x, &pred_y);
|
||||
motion_ptr= ff_h263_pred_motion(&s->m, 0, 0, &pred_x, &pred_y);
|
||||
if(s->m.mb_type[x + y*s->m.mb_stride]&CANDIDATE_MB_TYPE_INTER){
|
||||
for(i=0; i<6; i++)
|
||||
init_put_bits(&s->reorder_pb[i], reorder_buffer[1][i], 7*32);
|
||||
@@ -496,7 +496,7 @@ static av_cold int svq1_encode_init(AVCodecContext *avctx)
|
||||
s->m.me.score_map = av_mallocz(ME_MAP_SIZE*sizeof(uint32_t));
|
||||
s->mb_type = av_mallocz((s->y_block_width+1)*s->y_block_height*sizeof(int16_t));
|
||||
s->dummy = av_mallocz((s->y_block_width+1)*s->y_block_height*sizeof(int32_t));
|
||||
h263_encode_init(&s->m); //mv_penalty
|
||||
ff_h263_encode_init(&s->m); //mv_penalty
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -396,7 +396,7 @@ static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *
|
||||
break;
|
||||
case TIFF_ROWSPERSTRIP:
|
||||
if (type == TIFF_LONG && value == UINT_MAX)
|
||||
value = s->avctx->height;
|
||||
value = s->height;
|
||||
if(value < 1){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Incorrect value of rows per strip\n");
|
||||
return -1;
|
||||
|
@@ -305,6 +305,10 @@ static int encode_frame(AVCodecContext * avctx, unsigned char *buf,
|
||||
|
||||
strip_sizes = av_mallocz(sizeof(*strip_sizes) * strips);
|
||||
strip_offsets = av_mallocz(sizeof(*strip_offsets) * strips);
|
||||
if (!strip_sizes || !strip_offsets) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
bytes_per_row = (((s->width - 1)/s->subsampling[0] + 1) * s->bpp
|
||||
* s->subsampling[0] * s->subsampling[1] + 7) >> 3;
|
||||
@@ -312,6 +316,7 @@ static int encode_frame(AVCodecContext * avctx, unsigned char *buf,
|
||||
yuv_line = av_malloc(bytes_per_row);
|
||||
if (yuv_line == NULL){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
@@ -324,6 +329,10 @@ static int encode_frame(AVCodecContext * avctx, unsigned char *buf,
|
||||
|
||||
zlen = bytes_per_row * s->rps;
|
||||
zbuf = av_malloc(zlen);
|
||||
if (!zbuf) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
strip_offsets[0] = ptr - buf;
|
||||
zn = 0;
|
||||
for (j = 0; j < s->rps; j++) {
|
||||
@@ -348,8 +357,13 @@ static int encode_frame(AVCodecContext * avctx, unsigned char *buf,
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
if(s->compr == TIFF_LZW)
|
||||
if (s->compr == TIFF_LZW) {
|
||||
s->lzws = av_malloc(ff_lzw_encode_state_size);
|
||||
if (!s->lzws) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
for (i = 0; i < s->height; i++) {
|
||||
if (strip_sizes[i / s->rps] == 0) {
|
||||
if(s->compr == TIFF_LZW){
|
||||
|
@@ -520,6 +520,10 @@ hres,vres,i,i%vres (0 < i < 4)
|
||||
}
|
||||
|
||||
#define APPLY_C_PREDICTOR() \
|
||||
if(index > 1023){\
|
||||
av_log(s->avctx, AV_LOG_ERROR, " index %d went out of bounds\n", index); \
|
||||
return; \
|
||||
}\
|
||||
predictor_pair = s->c_predictor_table[index]; \
|
||||
horiz_pred += (predictor_pair >> 1); \
|
||||
if (predictor_pair & 1) { \
|
||||
@@ -537,6 +541,10 @@ hres,vres,i,i%vres (0 < i < 4)
|
||||
index++;
|
||||
|
||||
#define APPLY_C_PREDICTOR_24() \
|
||||
if(index > 1023){\
|
||||
av_log(s->avctx, AV_LOG_ERROR, " index %d went out of bounds\n", index); \
|
||||
return; \
|
||||
}\
|
||||
predictor_pair = s->c_predictor_table[index]; \
|
||||
horiz_pred += (predictor_pair >> 1); \
|
||||
if (predictor_pair & 1) { \
|
||||
@@ -555,6 +563,10 @@ hres,vres,i,i%vres (0 < i < 4)
|
||||
|
||||
|
||||
#define APPLY_Y_PREDICTOR() \
|
||||
if(index > 1023){\
|
||||
av_log(s->avctx, AV_LOG_ERROR, " index %d went out of bounds\n", index); \
|
||||
return; \
|
||||
}\
|
||||
predictor_pair = s->y_predictor_table[index]; \
|
||||
horiz_pred += (predictor_pair >> 1); \
|
||||
if (predictor_pair & 1) { \
|
||||
@@ -572,6 +584,10 @@ hres,vres,i,i%vres (0 < i < 4)
|
||||
index++;
|
||||
|
||||
#define APPLY_Y_PREDICTOR_24() \
|
||||
if(index > 1023){\
|
||||
av_log(s->avctx, AV_LOG_ERROR, " index %d went out of bounds\n", index); \
|
||||
return; \
|
||||
}\
|
||||
predictor_pair = s->y_predictor_table[index]; \
|
||||
horiz_pred += (predictor_pair >> 1); \
|
||||
if (predictor_pair & 1) { \
|
||||
|
@@ -3840,9 +3840,11 @@ AVCodec ff_vc1_decoder = {
|
||||
vc1_decode_frame,
|
||||
CODEC_CAP_DR1 | CODEC_CAP_DELAY,
|
||||
NULL,
|
||||
.flush = ff_mpeg_flush,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
|
||||
.pix_fmts = ff_hwaccel_pixfmt_list_420,
|
||||
.profiles = NULL_IF_CONFIG_SMALL(profiles)
|
||||
.profiles = NULL_IF_CONFIG_SMALL(profiles),
|
||||
.flush = ff_mpeg_flush,
|
||||
};
|
||||
|
||||
#if CONFIG_WMV3_DECODER
|
||||
@@ -3857,9 +3859,11 @@ AVCodec ff_wmv3_decoder = {
|
||||
vc1_decode_frame,
|
||||
CODEC_CAP_DR1 | CODEC_CAP_DELAY,
|
||||
NULL,
|
||||
.flush = ff_mpeg_flush,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9"),
|
||||
.pix_fmts = ff_hwaccel_pixfmt_list_420,
|
||||
.profiles = NULL_IF_CONFIG_SMALL(profiles)
|
||||
.profiles = NULL_IF_CONFIG_SMALL(profiles),
|
||||
.flush = ff_mpeg_flush,
|
||||
};
|
||||
#endif
|
||||
|
||||
|
@@ -117,7 +117,8 @@ int ff_vorbis_len2vlc(uint8_t *bits, uint32_t *codes, unsigned num)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ff_vorbis_ready_floor1_list(vorbis_floor1_entry * list, int values)
|
||||
int ff_vorbis_ready_floor1_list(AVCodecContext *avccontext,
|
||||
vorbis_floor1_entry *list, int values)
|
||||
{
|
||||
int i;
|
||||
list[0].sort = 0;
|
||||
@@ -141,6 +142,11 @@ void ff_vorbis_ready_floor1_list(vorbis_floor1_entry * list, int values)
|
||||
for (i = 0; i < values - 1; i++) {
|
||||
int j;
|
||||
for (j = i + 1; j < values; j++) {
|
||||
if (list[i].x == list[j].x) {
|
||||
av_log(avccontext, AV_LOG_ERROR,
|
||||
"Duplicate value found in floor 1 X coordinates\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (list[list[i].sort].x > list[list[j].sort].x) {
|
||||
int tmp = list[i].sort;
|
||||
list[i].sort = list[j].sort;
|
||||
@@ -148,6 +154,7 @@ void ff_vorbis_ready_floor1_list(vorbis_floor1_entry * list, int values)
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void render_line_unrolled(intptr_t x, int y, int x1,
|
||||
|
@@ -36,7 +36,8 @@ typedef struct {
|
||||
uint16_t high;
|
||||
} vorbis_floor1_entry;
|
||||
|
||||
void ff_vorbis_ready_floor1_list(vorbis_floor1_entry * list, int values);
|
||||
int ff_vorbis_ready_floor1_list(AVCodecContext *avccontext,
|
||||
vorbis_floor1_entry *list, int values);
|
||||
unsigned int ff_vorbis_nth_root(unsigned int x, unsigned int n); // x^(1/n)
|
||||
int ff_vorbis_len2vlc(uint8_t *bits, uint32_t *codes, unsigned num);
|
||||
void ff_vorbis_floor1_render_list(vorbis_floor1_entry * list, int values,
|
||||
|
@@ -559,7 +559,11 @@ static int vorbis_parse_setup_hdr_floors(vorbis_context *vc)
|
||||
}
|
||||
|
||||
// Precalculate order of x coordinates - needed for decode
|
||||
ff_vorbis_ready_floor1_list(floor_setup->data.t1.list, floor_setup->data.t1.x_list_dim);
|
||||
if (ff_vorbis_ready_floor1_list(vc->avccontext,
|
||||
floor_setup->data.t1.list,
|
||||
floor_setup->data.t1.x_list_dim)) {
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
} else if (floor_setup->floor_type == 0) {
|
||||
unsigned max_codebook_dim = 0;
|
||||
|
||||
|
@@ -155,7 +155,7 @@ static int cb_lookup_vals(int lookup, int dimentions, int entries)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ready_codebook(vorbis_enc_codebook *cb)
|
||||
static int ready_codebook(vorbis_enc_codebook *cb)
|
||||
{
|
||||
int i;
|
||||
|
||||
@@ -167,6 +167,8 @@ static void ready_codebook(vorbis_enc_codebook *cb)
|
||||
int vals = cb_lookup_vals(cb->lookup, cb->ndimentions, cb->nentries);
|
||||
cb->dimentions = av_malloc(sizeof(float) * cb->nentries * cb->ndimentions);
|
||||
cb->pow2 = av_mallocz(sizeof(float) * cb->nentries);
|
||||
if (!cb->dimentions || !cb->pow2)
|
||||
return AVERROR(ENOMEM);
|
||||
for (i = 0; i < cb->nentries; i++) {
|
||||
float last = 0;
|
||||
int j;
|
||||
@@ -187,13 +189,16 @@ static void ready_codebook(vorbis_enc_codebook *cb)
|
||||
cb->pow2[i] /= 2.;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ready_residue(vorbis_enc_residue *rc, vorbis_enc_context *venc)
|
||||
static int ready_residue(vorbis_enc_residue *rc, vorbis_enc_context *venc)
|
||||
{
|
||||
int i;
|
||||
assert(rc->type == 2);
|
||||
rc->maxes = av_mallocz(sizeof(float[2]) * rc->classifications);
|
||||
if (!rc->maxes)
|
||||
return AVERROR(ENOMEM);
|
||||
for (i = 0; i < rc->classifications; i++) {
|
||||
int j;
|
||||
vorbis_enc_codebook * cb;
|
||||
@@ -223,15 +228,16 @@ static void ready_residue(vorbis_enc_residue *rc, vorbis_enc_context *venc)
|
||||
rc->maxes[i][0] += 0.8;
|
||||
rc->maxes[i][1] += 0.8;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void create_vorbis_context(vorbis_enc_context *venc,
|
||||
AVCodecContext *avccontext)
|
||||
static int create_vorbis_context(vorbis_enc_context *venc,
|
||||
AVCodecContext *avccontext)
|
||||
{
|
||||
vorbis_enc_floor *fc;
|
||||
vorbis_enc_residue *rc;
|
||||
vorbis_enc_mapping *mc;
|
||||
int i, book;
|
||||
int i, book, ret;
|
||||
|
||||
venc->channels = avccontext->channels;
|
||||
venc->sample_rate = avccontext->sample_rate;
|
||||
@@ -239,6 +245,8 @@ static void create_vorbis_context(vorbis_enc_context *venc,
|
||||
|
||||
venc->ncodebooks = FF_ARRAY_ELEMS(cvectors);
|
||||
venc->codebooks = av_malloc(sizeof(vorbis_enc_codebook) * venc->ncodebooks);
|
||||
if (!venc->codebooks)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
// codebook 0..14 - floor1 book, values 0..255
|
||||
// codebook 15 residue masterbook
|
||||
@@ -255,27 +263,36 @@ static void create_vorbis_context(vorbis_enc_context *venc,
|
||||
|
||||
cb->lens = av_malloc(sizeof(uint8_t) * cb->nentries);
|
||||
cb->codewords = av_malloc(sizeof(uint32_t) * cb->nentries);
|
||||
if (!cb->lens || !cb->codewords)
|
||||
return AVERROR(ENOMEM);
|
||||
memcpy(cb->lens, cvectors[book].clens, cvectors[book].len);
|
||||
memset(cb->lens + cvectors[book].len, 0, cb->nentries - cvectors[book].len);
|
||||
|
||||
if (cb->lookup) {
|
||||
vals = cb_lookup_vals(cb->lookup, cb->ndimentions, cb->nentries);
|
||||
cb->quantlist = av_malloc(sizeof(int) * vals);
|
||||
if (!cb->quantlist)
|
||||
return AVERROR(ENOMEM);
|
||||
for (i = 0; i < vals; i++)
|
||||
cb->quantlist[i] = cvectors[book].quant[i];
|
||||
} else {
|
||||
cb->quantlist = NULL;
|
||||
}
|
||||
ready_codebook(cb);
|
||||
if ((ret = ready_codebook(cb)) < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
venc->nfloors = 1;
|
||||
venc->floors = av_malloc(sizeof(vorbis_enc_floor) * venc->nfloors);
|
||||
if (!venc->floors)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
// just 1 floor
|
||||
fc = &venc->floors[0];
|
||||
fc->partitions = NUM_FLOOR_PARTITIONS;
|
||||
fc->partition_to_class = av_malloc(sizeof(int) * fc->partitions);
|
||||
if (!fc->partition_to_class)
|
||||
return AVERROR(ENOMEM);
|
||||
fc->nclasses = 0;
|
||||
for (i = 0; i < fc->partitions; i++) {
|
||||
static const int a[] = {0, 1, 2, 2, 3, 3, 4, 4};
|
||||
@@ -284,6 +301,8 @@ static void create_vorbis_context(vorbis_enc_context *venc,
|
||||
}
|
||||
fc->nclasses++;
|
||||
fc->classes = av_malloc(sizeof(vorbis_enc_floor_class) * fc->nclasses);
|
||||
if (!fc->classes)
|
||||
return AVERROR(ENOMEM);
|
||||
for (i = 0; i < fc->nclasses; i++) {
|
||||
vorbis_enc_floor_class * c = &fc->classes[i];
|
||||
int j, books;
|
||||
@@ -292,6 +311,8 @@ static void create_vorbis_context(vorbis_enc_context *venc,
|
||||
c->masterbook = floor_classes[i].masterbook;
|
||||
books = (1 << c->subclass);
|
||||
c->books = av_malloc(sizeof(int) * books);
|
||||
if (!c->books)
|
||||
return AVERROR(ENOMEM);
|
||||
for (j = 0; j < books; j++)
|
||||
c->books[j] = floor_classes[i].nbooks[j];
|
||||
}
|
||||
@@ -303,6 +324,8 @@ static void create_vorbis_context(vorbis_enc_context *venc,
|
||||
fc->values += fc->classes[fc->partition_to_class[i]].dim;
|
||||
|
||||
fc->list = av_malloc(sizeof(vorbis_floor1_entry) * fc->values);
|
||||
if (!fc->list)
|
||||
return AVERROR(ENOMEM);
|
||||
fc->list[0].x = 0;
|
||||
fc->list[1].x = 1 << fc->rangebits;
|
||||
for (i = 2; i < fc->values; i++) {
|
||||
@@ -313,10 +336,13 @@ static void create_vorbis_context(vorbis_enc_context *venc,
|
||||
};
|
||||
fc->list[i].x = a[i - 2];
|
||||
}
|
||||
ff_vorbis_ready_floor1_list(fc->list, fc->values);
|
||||
if (ff_vorbis_ready_floor1_list(avccontext, fc->list, fc->values))
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
venc->nresidues = 1;
|
||||
venc->residues = av_malloc(sizeof(vorbis_enc_residue) * venc->nresidues);
|
||||
if (!venc->residues)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
// single residue
|
||||
rc = &venc->residues[0];
|
||||
@@ -327,6 +353,8 @@ static void create_vorbis_context(vorbis_enc_context *venc,
|
||||
rc->classifications = 10;
|
||||
rc->classbook = 15;
|
||||
rc->books = av_malloc(sizeof(*rc->books) * rc->classifications);
|
||||
if (!rc->books)
|
||||
return AVERROR(ENOMEM);
|
||||
{
|
||||
static const int8_t a[10][8] = {
|
||||
{ -1, -1, -1, -1, -1, -1, -1, -1, },
|
||||
@@ -342,19 +370,26 @@ static void create_vorbis_context(vorbis_enc_context *venc,
|
||||
};
|
||||
memcpy(rc->books, a, sizeof a);
|
||||
}
|
||||
ready_residue(rc, venc);
|
||||
if ((ret = ready_residue(rc, venc)) < 0)
|
||||
return ret;
|
||||
|
||||
venc->nmappings = 1;
|
||||
venc->mappings = av_malloc(sizeof(vorbis_enc_mapping) * venc->nmappings);
|
||||
if (!venc->mappings)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
// single mapping
|
||||
mc = &venc->mappings[0];
|
||||
mc->submaps = 1;
|
||||
mc->mux = av_malloc(sizeof(int) * venc->channels);
|
||||
if (!mc->mux)
|
||||
return AVERROR(ENOMEM);
|
||||
for (i = 0; i < venc->channels; i++)
|
||||
mc->mux[i] = 0;
|
||||
mc->floor = av_malloc(sizeof(int) * mc->submaps);
|
||||
mc->residue = av_malloc(sizeof(int) * mc->submaps);
|
||||
if (!mc->floor || !mc->residue)
|
||||
return AVERROR(ENOMEM);
|
||||
for (i = 0; i < mc->submaps; i++) {
|
||||
mc->floor[i] = 0;
|
||||
mc->residue[i] = 0;
|
||||
@@ -362,6 +397,8 @@ static void create_vorbis_context(vorbis_enc_context *venc,
|
||||
mc->coupling_steps = venc->channels == 2 ? 1 : 0;
|
||||
mc->magnitude = av_malloc(sizeof(int) * mc->coupling_steps);
|
||||
mc->angle = av_malloc(sizeof(int) * mc->coupling_steps);
|
||||
if (!mc->magnitude || !mc->angle)
|
||||
return AVERROR(ENOMEM);
|
||||
if (mc->coupling_steps) {
|
||||
mc->magnitude[0] = 0;
|
||||
mc->angle[0] = 1;
|
||||
@@ -369,6 +406,8 @@ static void create_vorbis_context(vorbis_enc_context *venc,
|
||||
|
||||
venc->nmodes = 1;
|
||||
venc->modes = av_malloc(sizeof(vorbis_enc_mode) * venc->nmodes);
|
||||
if (!venc->modes)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
// single mode
|
||||
venc->modes[0].blockflag = 0;
|
||||
@@ -379,12 +418,18 @@ static void create_vorbis_context(vorbis_enc_context *venc,
|
||||
venc->samples = av_malloc(sizeof(float) * venc->channels * (1 << venc->log2_blocksize[1]));
|
||||
venc->floor = av_malloc(sizeof(float) * venc->channels * (1 << venc->log2_blocksize[1]) / 2);
|
||||
venc->coeffs = av_malloc(sizeof(float) * venc->channels * (1 << venc->log2_blocksize[1]) / 2);
|
||||
if (!venc->saved || !venc->samples || !venc->floor || !venc->coeffs)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
venc->win[0] = ff_vorbis_vwin[venc->log2_blocksize[0] - 6];
|
||||
venc->win[1] = ff_vorbis_vwin[venc->log2_blocksize[1] - 6];
|
||||
|
||||
ff_mdct_init(&venc->mdct[0], venc->log2_blocksize[0], 0, 1.0);
|
||||
ff_mdct_init(&venc->mdct[1], venc->log2_blocksize[1], 0, 1.0);
|
||||
if ((ret = ff_mdct_init(&venc->mdct[0], venc->log2_blocksize[0], 0, 1.0)) < 0)
|
||||
return ret;
|
||||
if ((ret = ff_mdct_init(&venc->mdct[1], venc->log2_blocksize[1], 0, 1.0)) < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void put_float(PutBitContext *pb, float f)
|
||||
@@ -647,6 +692,8 @@ static int put_main_header(vorbis_enc_context *venc, uint8_t **out)
|
||||
|
||||
len = hlens[0] + hlens[1] + hlens[2];
|
||||
p = *out = av_mallocz(64 + len + len/255);
|
||||
if (!p)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
*p++ = 2;
|
||||
p += av_xiphlacing(p, hlens[0]);
|
||||
@@ -952,33 +999,6 @@ static int apply_window_and_mdct(vorbis_enc_context *venc, const signed short *a
|
||||
return 1;
|
||||
}
|
||||
|
||||
static av_cold int vorbis_encode_init(AVCodecContext *avccontext)
|
||||
{
|
||||
vorbis_enc_context *venc = avccontext->priv_data;
|
||||
|
||||
if (avccontext->channels != 2) {
|
||||
av_log(avccontext, AV_LOG_ERROR, "Current FFmpeg Vorbis encoder only supports 2 channels.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
create_vorbis_context(venc, avccontext);
|
||||
|
||||
if (avccontext->flags & CODEC_FLAG_QSCALE)
|
||||
venc->quality = avccontext->global_quality / (float)FF_QP2LAMBDA / 10.;
|
||||
else
|
||||
venc->quality = 0.03;
|
||||
venc->quality *= venc->quality;
|
||||
|
||||
avccontext->extradata_size = put_main_header(venc, (uint8_t**)&avccontext->extradata);
|
||||
|
||||
avccontext->frame_size = 1 << (venc->log2_blocksize[0] - 1);
|
||||
|
||||
avccontext->coded_frame = avcodec_alloc_frame();
|
||||
avccontext->coded_frame->key_frame = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vorbis_encode_frame(AVCodecContext *avccontext,
|
||||
unsigned char *packets,
|
||||
int buf_size, void *data)
|
||||
@@ -1102,6 +1122,43 @@ static av_cold int vorbis_encode_close(AVCodecContext *avccontext)
|
||||
return 0 ;
|
||||
}
|
||||
|
||||
static av_cold int vorbis_encode_init(AVCodecContext *avccontext)
|
||||
{
|
||||
vorbis_enc_context *venc = avccontext->priv_data;
|
||||
int ret;
|
||||
|
||||
if (avccontext->channels != 2) {
|
||||
av_log(avccontext, AV_LOG_ERROR, "Current FFmpeg Vorbis encoder only supports 2 channels.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ((ret = create_vorbis_context(venc, avccontext)) < 0)
|
||||
goto error;
|
||||
|
||||
if (avccontext->flags & CODEC_FLAG_QSCALE)
|
||||
venc->quality = avccontext->global_quality / (float)FF_QP2LAMBDA / 10.;
|
||||
else
|
||||
venc->quality = 0.03;
|
||||
venc->quality *= venc->quality;
|
||||
|
||||
if ((ret = put_main_header(venc, (uint8_t**)&avccontext->extradata)) < 0)
|
||||
goto error;
|
||||
avccontext->extradata_size = ret;
|
||||
|
||||
avccontext->frame_size = 1 << (venc->log2_blocksize[0] - 1);
|
||||
|
||||
avccontext->coded_frame = avcodec_alloc_frame();
|
||||
if (!avccontext->coded_frame) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto error;
|
||||
}
|
||||
|
||||
return 0;
|
||||
error:
|
||||
vorbis_encode_close(avccontext);
|
||||
return ret;
|
||||
}
|
||||
|
||||
AVCodec ff_vorbis_encoder = {
|
||||
"vorbis",
|
||||
AVMEDIA_TYPE_AUDIO,
|
||||
|
@@ -47,18 +47,18 @@ static int vp5_parse_header(VP56Context *s, const uint8_t *buf, int buf_size,
|
||||
{
|
||||
vp56_rac_gets(c, 8);
|
||||
if(vp56_rac_gets(c, 5) > 5)
|
||||
return 0;
|
||||
return AVERROR_INVALIDDATA;
|
||||
vp56_rac_gets(c, 2);
|
||||
if (vp56_rac_get(c)) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "interlacing not supported\n");
|
||||
return 0;
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
rows = vp56_rac_gets(c, 8); /* number of stored macroblock rows */
|
||||
cols = vp56_rac_gets(c, 8); /* number of stored macroblock cols */
|
||||
if (!rows || !cols) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid size %dx%d\n",
|
||||
cols << 4, rows << 4);
|
||||
return 0;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
vp56_rac_gets(c, 8); /* number of displayed macroblock rows */
|
||||
vp56_rac_gets(c, 8); /* number of displayed macroblock cols */
|
||||
@@ -67,11 +67,11 @@ static int vp5_parse_header(VP56Context *s, const uint8_t *buf, int buf_size,
|
||||
16*cols != s->avctx->coded_width ||
|
||||
16*rows != s->avctx->coded_height) {
|
||||
avcodec_set_dimensions(s->avctx, 16*cols, 16*rows);
|
||||
return 2;
|
||||
return VP56_SIZE_CHANGE;
|
||||
}
|
||||
} else if (!s->macroblocks)
|
||||
return 0;
|
||||
return 1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vp5_parse_vector_adjustment(VP56Context *s, VP56mv *vect)
|
||||
|
@@ -511,10 +511,16 @@ int ff_vp56_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
||||
s->modelp = &s->models[is_alpha];
|
||||
|
||||
res = s->parse_header(s, buf, remaining_buf_size, &golden_frame);
|
||||
if (!res)
|
||||
return -1;
|
||||
if (res < 0) {
|
||||
int i;
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (s->frames[i].data[0])
|
||||
avctx->release_buffer(avctx, &s->frames[i]);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
if (res == 2) {
|
||||
if (res == VP56_SIZE_CHANGE) {
|
||||
int i;
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (s->frames[i].data[0])
|
||||
@@ -533,7 +539,7 @@ int ff_vp56_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (res == 2)
|
||||
if (res == VP56_SIZE_CHANGE)
|
||||
if (vp56_size_changed(avctx)) {
|
||||
avctx->release_buffer(avctx, p);
|
||||
return -1;
|
||||
|
@@ -38,6 +38,8 @@ typedef struct {
|
||||
int16_t y;
|
||||
} DECLARE_ALIGNED(4, , VP56mv);
|
||||
|
||||
#define VP56_SIZE_CHANGE 1
|
||||
|
||||
typedef void (*VP56ParseVectorAdjustment)(VP56Context *s,
|
||||
VP56mv *vect);
|
||||
typedef void (*VP56Filter)(VP56Context *s, uint8_t *dst, uint8_t *src,
|
||||
|
@@ -50,7 +50,7 @@ static int vp6_parse_header(VP56Context *s, const uint8_t *buf, int buf_size,
|
||||
int vrt_shift = 0;
|
||||
int sub_version;
|
||||
int rows, cols;
|
||||
int res = 1;
|
||||
int res = 0;
|
||||
int separated_coeff = buf[0] & 1;
|
||||
|
||||
s->framep[VP56_FRAME_CURRENT]->key_frame = !(buf[0] & 0x80);
|
||||
@@ -59,11 +59,11 @@ static int vp6_parse_header(VP56Context *s, const uint8_t *buf, int buf_size,
|
||||
if (s->framep[VP56_FRAME_CURRENT]->key_frame) {
|
||||
sub_version = buf[1] >> 3;
|
||||
if (sub_version > 8)
|
||||
return 0;
|
||||
return AVERROR_INVALIDDATA;
|
||||
s->filter_header = buf[1] & 0x06;
|
||||
if (buf[1] & 1) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "interlacing not supported\n");
|
||||
return 0;
|
||||
av_log_missing_feature(s->avctx, "Interlacing", 0);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
if (separated_coeff || !s->filter_header) {
|
||||
coeff_offset = AV_RB16(buf+2) - 2;
|
||||
@@ -77,7 +77,7 @@ static int vp6_parse_header(VP56Context *s, const uint8_t *buf, int buf_size,
|
||||
/* buf[5] is number of displayed macroblock cols */
|
||||
if (!rows || !cols) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid size %dx%d\n", cols << 4, rows << 4);
|
||||
return 0;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (!s->macroblocks || /* first frame */
|
||||
@@ -88,7 +88,7 @@ static int vp6_parse_header(VP56Context *s, const uint8_t *buf, int buf_size,
|
||||
s->avctx->width -= s->avctx->extradata[0] >> 4;
|
||||
s->avctx->height -= s->avctx->extradata[0] & 0x0F;
|
||||
}
|
||||
res = 2;
|
||||
res = VP56_SIZE_CHANGE;
|
||||
}
|
||||
|
||||
ff_vp56_init_range_decoder(c, buf+6, buf_size-6);
|
||||
@@ -100,7 +100,7 @@ static int vp6_parse_header(VP56Context *s, const uint8_t *buf, int buf_size,
|
||||
s->sub_version = sub_version;
|
||||
} else {
|
||||
if (!s->sub_version || !s->avctx->coded_width || !s->avctx->coded_height)
|
||||
return 0;
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if (separated_coeff || !s->filter_header) {
|
||||
coeff_offset = AV_RB16(buf+1) - 2;
|
||||
@@ -144,7 +144,7 @@ static int vp6_parse_header(VP56Context *s, const uint8_t *buf, int buf_size,
|
||||
if (buf_size < 0) {
|
||||
if (s->framep[VP56_FRAME_CURRENT]->key_frame)
|
||||
avcodec_set_dimensions(s->avctx, 0, 0);
|
||||
return 0;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (s->use_huffman) {
|
||||
s->parse_coeff = vp6_parse_coeff_huffman;
|
||||
|
@@ -274,6 +274,7 @@ static int decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
|
||||
memcpy(s->prob->pred8x8c , vp8_pred8x8c_prob_inter , sizeof(s->prob->pred8x8c));
|
||||
memcpy(s->prob->mvc , vp8_mv_default_prob , sizeof(s->prob->mvc));
|
||||
memset(&s->segmentation, 0, sizeof(s->segmentation));
|
||||
memset(&s->lf_delta, 0, sizeof(s->lf_delta));
|
||||
}
|
||||
|
||||
if (!s->macroblocks_base || /* first frame */
|
||||
|
@@ -159,6 +159,12 @@ static av_cold int vqa_decode_init(AVCodecContext *avctx)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (s->width & (s->vector_width - 1) ||
|
||||
s->height & (s->vector_height - 1)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Image size not multiple of block size\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* allocate codebooks */
|
||||
s->codebook_size = MAX_CODEBOOK_SIZE;
|
||||
s->codebook = av_malloc(s->codebook_size);
|
||||
@@ -521,6 +527,11 @@ static void vqa_decode_chunk(VqaContext *s)
|
||||
chunk_size = AV_RB32(&s->buf[cbp0_chunk + 4]);
|
||||
cbp0_chunk += CHUNK_PREAMBLE_SIZE;
|
||||
|
||||
if (chunk_size > MAX_CODEBOOK_SIZE - s->next_codebook_buffer_index) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "cbp0 chunk too large (0x%X bytes)\n", chunk_size);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* accumulate partial codebook */
|
||||
memcpy(&s->next_codebook_buffer[s->next_codebook_buffer_index],
|
||||
&s->buf[cbp0_chunk], chunk_size);
|
||||
@@ -544,6 +555,11 @@ static void vqa_decode_chunk(VqaContext *s)
|
||||
chunk_size = AV_RB32(&s->buf[cbpz_chunk + 4]);
|
||||
cbpz_chunk += CHUNK_PREAMBLE_SIZE;
|
||||
|
||||
if (chunk_size > MAX_CODEBOOK_SIZE - s->next_codebook_buffer_index) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "cbpz chunk too large (0x%X bytes)\n", chunk_size);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* accumulate partial codebook */
|
||||
memcpy(&s->next_codebook_buffer[s->next_codebook_buffer_index],
|
||||
&s->buf[cbpz_chunk], chunk_size);
|
||||
|
@@ -326,6 +326,11 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (s->avctx->sample_rate <= 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "invalid sample rate\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
s->num_channels = avctx->channels;
|
||||
|
||||
if (s->num_channels < 0) {
|
||||
@@ -1158,7 +1163,12 @@ static int decode_subframe(WMAProDecodeCtx *s)
|
||||
int num_bits = av_log2((s->subframe_len + 3)/4) + 1;
|
||||
for (i = 0; i < s->channels_for_cur_subframe; i++) {
|
||||
int c = s->channel_indexes_for_cur_subframe[i];
|
||||
s->channel[c].num_vec_coeffs = get_bits(&s->gb, num_bits) << 2;
|
||||
int num_vec_coeffs = get_bits(&s->gb, num_bits) << 2;
|
||||
if (num_vec_coeffs > WMAPRO_BLOCK_MAX_SIZE) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "num_vec_coeffs %d is too large\n", num_vec_coeffs);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
s->channel[c].num_vec_coeffs = num_vec_coeffs;
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < s->channels_for_cur_subframe; i++) {
|
||||
|
@@ -171,7 +171,7 @@ void ff_wmv2_encode_mb(MpegEncContext * s,
|
||||
wmv2_inter_table[w->cbp_table_index][cbp + 64][0]);
|
||||
|
||||
/* motion vector */
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_msmpeg4_encode_motion(s, motion_x - pred_x,
|
||||
motion_y - pred_y);
|
||||
} else {
|
||||
|
@@ -70,6 +70,11 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
int prev_y = 0, prev_u = 0, prev_v = 0;
|
||||
uint8_t *rbuf;
|
||||
|
||||
if(buf_size<=8) {
|
||||
av_log(avctx, AV_LOG_ERROR, "buf_size %d is too small\n", buf_size);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
rbuf = av_malloc(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
if(!rbuf){
|
||||
av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer\n");
|
||||
|
@@ -511,6 +511,10 @@ static int xan_decode_frame(AVCodecContext *avctx,
|
||||
int i;
|
||||
tag = bytestream_get_le32(&buf);
|
||||
size = bytestream_get_be32(&buf);
|
||||
if(size < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid tag size %d\n", size);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
size = FFMIN(size, buf_end - buf);
|
||||
switch (tag) {
|
||||
case PALT_TAG:
|
||||
|
@@ -90,6 +90,11 @@ static av_cold int yop_decode_init(AVCodecContext *avctx)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!avctx->extradata) {
|
||||
av_log(avctx, AV_LOG_ERROR, "extradata missing\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
avctx->pix_fmt = PIX_FMT_PAL8;
|
||||
|
||||
avcodec_get_frame_defaults(&s->frame);
|
||||
@@ -200,6 +205,11 @@ static int yop_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
||||
if (s->frame.data[0])
|
||||
avctx->release_buffer(avctx, &s->frame);
|
||||
|
||||
if (avpkt->size < 4 + 3*s->num_pal_colors) {
|
||||
av_log(avctx, AV_LOG_ERROR, "packet of size %d too small\n", avpkt->size);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
ret = avctx->get_buffer(avctx, &s->frame);
|
||||
if (ret < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
@@ -215,6 +225,10 @@ static int yop_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
||||
s->low_nibble = NULL;
|
||||
|
||||
is_odd_frame = avpkt->data[0];
|
||||
if(is_odd_frame>1){
|
||||
av_log(avctx, AV_LOG_ERROR, "frame is too odd %d\n", is_odd_frame);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
firstcolor = s->first_color[is_odd_frame];
|
||||
palette = (uint32_t *)s->frame.data[1];
|
||||
|
||||
|
@@ -45,7 +45,11 @@ AVFilterFormats *avfilter_merge_formats(AVFilterFormats *a, AVFilterFormats *b)
|
||||
AVFilterFormats *ret;
|
||||
unsigned i, j, k = 0;
|
||||
|
||||
if (a == b) return a;
|
||||
if (a == b)
|
||||
return a;
|
||||
|
||||
if (a == b)
|
||||
return a;
|
||||
|
||||
ret = av_mallocz(sizeof(AVFilterFormats));
|
||||
|
||||
|
@@ -299,6 +299,7 @@ static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
|
||||
{
|
||||
PadContext *pad = inlink->dst->priv;
|
||||
AVFilterBufferRef *outpicref = avfilter_ref_buffer(inpicref, ~0);
|
||||
AVFilterBufferRef *for_next_filter;
|
||||
int plane;
|
||||
|
||||
for (plane = 0; plane < 4 && outpicref->data[plane]; plane++) {
|
||||
@@ -335,12 +336,14 @@ static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
|
||||
outpicref->video->w = pad->w;
|
||||
outpicref->video->h = pad->h;
|
||||
|
||||
avfilter_start_frame(inlink->dst->outputs[0], outpicref);
|
||||
for_next_filter = avfilter_ref_buffer(outpicref, ~0);
|
||||
avfilter_start_frame(inlink->dst->outputs[0], for_next_filter);
|
||||
}
|
||||
|
||||
static void end_frame(AVFilterLink *link)
|
||||
{
|
||||
avfilter_end_frame(link->dst->outputs[0]);
|
||||
avfilter_unref_buffer(link->dst->outputs[0]->out_buf);
|
||||
avfilter_unref_buffer(link->cur_buf);
|
||||
}
|
||||
|
||||
|
@@ -195,6 +195,11 @@ static int fourxm_read_header(AVFormatContext *s,
|
||||
ret= -1;
|
||||
goto fail;
|
||||
}
|
||||
if(!fourxm->tracks[current_track].adpcm && fourxm->tracks[current_track].bits<8){
|
||||
av_log(s, AV_LOG_ERROR, "bits unspecified for non ADPCM\n");
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
goto fail;
|
||||
}
|
||||
i += 8 + size;
|
||||
|
||||
/* allocate a new AVStream */
|
||||
|
@@ -274,6 +274,9 @@ static int ape_read_header(AVFormatContext * s, AVFormatParameters * ap)
|
||||
return AVERROR(ENOMEM);
|
||||
for (i = 0; i < ape->seektablelength / sizeof(uint32_t); i++)
|
||||
ape->seektable[i] = avio_rl32(pb);
|
||||
}else{
|
||||
av_log(s, AV_LOG_ERROR, "Missing seektable\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
ape->frames[0].pos = ape->firstframe;
|
||||
|
@@ -1031,7 +1031,7 @@ resync:
|
||||
}
|
||||
ast->frame_offset += get_duration(ast, pkt->size);
|
||||
}
|
||||
ast->remaining -= size;
|
||||
ast->remaining -= err;
|
||||
if(!ast->remaining){
|
||||
avi->stream_index= -1;
|
||||
ast->packet_size= 0;
|
||||
@@ -1043,7 +1043,7 @@ resync:
|
||||
}
|
||||
ast->seek_pos= 0;
|
||||
|
||||
return size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
memset(d, -1, sizeof(int)*8);
|
||||
|
@@ -470,12 +470,17 @@ static int ea_read_packet(AVFormatContext *s,
|
||||
|
||||
while (!packet_read) {
|
||||
chunk_type = avio_rl32(pb);
|
||||
chunk_size = (ea->big_endian ? avio_rb32(pb) : avio_rl32(pb)) - 8;
|
||||
chunk_size = ea->big_endian ? avio_rb32(pb) : avio_rl32(pb);
|
||||
if (chunk_size <= 8)
|
||||
return AVERROR_INVALIDDATA;
|
||||
chunk_size -= 8;
|
||||
|
||||
switch (chunk_type) {
|
||||
/* audio data */
|
||||
case ISNh_TAG:
|
||||
/* header chunk also contains data; skip over the header portion*/
|
||||
if (chunk_size < 32)
|
||||
return AVERROR_INVALIDDATA;
|
||||
avio_skip(pb, 32);
|
||||
chunk_size -= 32;
|
||||
case ISNd_TAG:
|
||||
|
@@ -69,8 +69,7 @@ static int ogg_save(AVFormatContext *s)
|
||||
|
||||
for (i = 0; i < ogg->nstreams; i++){
|
||||
struct ogg_stream *os = ogg->streams + i;
|
||||
os->buf = av_malloc (os->bufsize);
|
||||
memset (os->buf, 0, os->bufsize);
|
||||
os->buf = av_mallocz (os->bufsize + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
memcpy (os->buf, ost->streams[i].buf, os->bufpos);
|
||||
}
|
||||
|
||||
@@ -161,13 +160,18 @@ static int ogg_new_stream(AVFormatContext *s, uint32_t serial, int new_avstream)
|
||||
AVStream *st;
|
||||
struct ogg_stream *os;
|
||||
|
||||
ogg->streams = av_realloc (ogg->streams,
|
||||
ogg->nstreams * sizeof (*ogg->streams));
|
||||
os = av_realloc (ogg->streams, ogg->nstreams * sizeof (*ogg->streams));
|
||||
|
||||
if (!os)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
ogg->streams = os;
|
||||
|
||||
memset (ogg->streams + idx, 0, sizeof (*ogg->streams));
|
||||
os = ogg->streams + idx;
|
||||
os->serial = serial;
|
||||
os->bufsize = DECODER_BUFFER_SIZE;
|
||||
os->buf = av_malloc(os->bufsize);
|
||||
os->buf = av_malloc(os->bufsize + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
os->header = -1;
|
||||
|
||||
if (new_avstream) {
|
||||
@@ -184,7 +188,7 @@ static int ogg_new_stream(AVFormatContext *s, uint32_t serial, int new_avstream)
|
||||
static int ogg_new_buf(struct ogg *ogg, int idx)
|
||||
{
|
||||
struct ogg_stream *os = ogg->streams + idx;
|
||||
uint8_t *nb = av_malloc(os->bufsize);
|
||||
uint8_t *nb = av_malloc(os->bufsize + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
int size = os->bufpos - os->pstart;
|
||||
if(os->buf){
|
||||
memcpy(nb, os->buf + os->pstart, size);
|
||||
@@ -295,7 +299,9 @@ static int ogg_read_page(AVFormatContext *s, int *str)
|
||||
}
|
||||
|
||||
if (os->bufsize - os->bufpos < size){
|
||||
uint8_t *nb = av_malloc (os->bufsize *= 2);
|
||||
uint8_t *nb = av_malloc ((os->bufsize *= 2) + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
if (!nb)
|
||||
return AVERROR(ENOMEM);
|
||||
memcpy (nb, os->buf, os->bufpos);
|
||||
av_free (os->buf);
|
||||
os->buf = nb;
|
||||
@@ -309,6 +315,7 @@ static int ogg_read_page(AVFormatContext *s, int *str)
|
||||
os->granule = gp;
|
||||
os->flags = flags;
|
||||
|
||||
memset(os->buf + os->bufpos, 0, FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
if (str)
|
||||
*str = idx;
|
||||
|
||||
@@ -504,14 +511,28 @@ static int ogg_get_length(AVFormatContext *s)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ogg_read_header(AVFormatContext *s, AVFormatParameters *ap)
|
||||
static int ogg_read_close(AVFormatContext *s)
|
||||
{
|
||||
struct ogg *ogg = s->priv_data;
|
||||
int ret, i;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ogg->nstreams; i++) {
|
||||
av_free(ogg->streams[i].buf);
|
||||
av_free(ogg->streams[i].private);
|
||||
}
|
||||
av_free(ogg->streams);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ogg_read_header(AVFormatContext *s)
|
||||
{
|
||||
struct ogg *ogg = s->priv_data;
|
||||
int i, ret;
|
||||
ogg->curidx = -1;
|
||||
//linear headers seek from start
|
||||
ret = ogg_get_headers (s);
|
||||
if (ret < 0){
|
||||
ret = ogg_get_headers(s);
|
||||
if (ret < 0) {
|
||||
ogg_read_close(s);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -596,19 +617,6 @@ retry:
|
||||
return psize;
|
||||
}
|
||||
|
||||
static int ogg_read_close(AVFormatContext *s)
|
||||
{
|
||||
struct ogg *ogg = s->priv_data;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ogg->nstreams; i++){
|
||||
av_free (ogg->streams[i].buf);
|
||||
av_free (ogg->streams[i].private);
|
||||
}
|
||||
av_free (ogg->streams);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int64_t ogg_read_timestamp(AVFormatContext *s, int stream_index,
|
||||
int64_t *pos_arg, int64_t pos_limit)
|
||||
{
|
||||
|
@@ -278,11 +278,11 @@ int ff_amf_tag_size(const uint8_t *data, const uint8_t *data_end)
|
||||
data++;
|
||||
break;
|
||||
}
|
||||
if (data + size >= data_end || data + size < data)
|
||||
if (size < 0 || size >= data_end - data)
|
||||
return -1;
|
||||
data += size;
|
||||
t = ff_amf_tag_size(data, data_end);
|
||||
if (t < 0 || data + t >= data_end)
|
||||
if (t < 0 || t >= data_end - data)
|
||||
return -1;
|
||||
data += t;
|
||||
}
|
||||
@@ -311,7 +311,7 @@ int ff_amf_get_field_value(const uint8_t *data, const uint8_t *data_end,
|
||||
int size = bytestream_get_be16(&data);
|
||||
if (!size)
|
||||
break;
|
||||
if (data + size >= data_end || data + size < data)
|
||||
if (size < 0 || size >= data_end - data)
|
||||
return -1;
|
||||
data += size;
|
||||
if (size == namelen && !memcmp(data-size, name, namelen)) {
|
||||
@@ -332,7 +332,7 @@ int ff_amf_get_field_value(const uint8_t *data, const uint8_t *data_end,
|
||||
return 0;
|
||||
}
|
||||
len = ff_amf_tag_size(data, data_end);
|
||||
if (len < 0 || data + len >= data_end || data + len < data)
|
||||
if (len < 0 || len >= data_end - data)
|
||||
return -1;
|
||||
data += len;
|
||||
}
|
||||
@@ -362,7 +362,7 @@ static const char* rtmp_packet_type(int type)
|
||||
|
||||
static void ff_amf_tag_contents(void *ctx, const uint8_t *data, const uint8_t *data_end)
|
||||
{
|
||||
int size;
|
||||
unsigned int size;
|
||||
char buf[1024];
|
||||
|
||||
if (data >= data_end)
|
||||
@@ -381,7 +381,7 @@ static void ff_amf_tag_contents(void *ctx, const uint8_t *data, const uint8_t *d
|
||||
} else {
|
||||
size = bytestream_get_be32(&data);
|
||||
}
|
||||
size = FFMIN(size, 1023);
|
||||
size = FFMIN(size, sizeof(buf) - 1);
|
||||
memcpy(buf, data, size);
|
||||
buf[size] = 0;
|
||||
av_log(ctx, AV_LOG_DEBUG, " string '%s'\n", buf);
|
||||
@@ -394,22 +394,21 @@ static void ff_amf_tag_contents(void *ctx, const uint8_t *data, const uint8_t *d
|
||||
case AMF_DATA_TYPE_OBJECT:
|
||||
av_log(ctx, AV_LOG_DEBUG, " {\n");
|
||||
for (;;) {
|
||||
int size = bytestream_get_be16(&data);
|
||||
int t;
|
||||
memcpy(buf, data, size);
|
||||
buf[size] = 0;
|
||||
size = bytestream_get_be16(&data);
|
||||
av_strlcpy(buf, data, FFMIN(sizeof(buf), size + 1));
|
||||
if (!size) {
|
||||
av_log(ctx, AV_LOG_DEBUG, " }\n");
|
||||
data++;
|
||||
break;
|
||||
}
|
||||
if (data + size >= data_end || data + size < data)
|
||||
if (size >= data_end - data)
|
||||
return;
|
||||
data += size;
|
||||
av_log(ctx, AV_LOG_DEBUG, " %s: ", buf);
|
||||
ff_amf_tag_contents(ctx, data, data_end);
|
||||
t = ff_amf_tag_size(data, data_end);
|
||||
if (t < 0 || data + t >= data_end)
|
||||
if (t < 0 || t >= data_end - data)
|
||||
return;
|
||||
data += t;
|
||||
}
|
||||
|
@@ -233,14 +233,16 @@ static int asfrtp_parse_packet(AVFormatContext *s, PayloadContext *asf,
|
||||
|
||||
int cur_len = start_off + len_off - off;
|
||||
int prev_len = out_len;
|
||||
void *newbuf;
|
||||
void *newmem;
|
||||
|
||||
out_len += cur_len;
|
||||
if(FFMIN(cur_len, len - off)<0)
|
||||
|
||||
if (FFMIN(cur_len, len - off) < 0)
|
||||
return -1;
|
||||
newbuf = av_realloc(asf->buf, out_len);
|
||||
if(!newbuf)
|
||||
newmem = av_realloc(asf->buf, out_len);
|
||||
if (!newmem)
|
||||
return -1;
|
||||
asf->buf= newbuf;
|
||||
asf->buf = newmem;
|
||||
memcpy(asf->buf + prev_len, buf + off,
|
||||
FFMIN(cur_len, len - off));
|
||||
avio_skip(pb, cur_len);
|
||||
|
@@ -1641,6 +1641,7 @@ int ff_rtsp_fetch_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
rt->cur_transport_priv = NULL;
|
||||
}
|
||||
|
||||
redo:
|
||||
if (rt->transport == RTSP_TRANSPORT_RTP) {
|
||||
int i;
|
||||
int64_t first_queue_time = 0;
|
||||
@@ -1656,12 +1657,15 @@ int ff_rtsp_fetch_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
first_queue_st = rt->rtsp_streams[i];
|
||||
}
|
||||
}
|
||||
if (first_queue_time)
|
||||
if (first_queue_time) {
|
||||
wait_end = first_queue_time + s->max_delay;
|
||||
} else {
|
||||
wait_end = 0;
|
||||
first_queue_st = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* read next RTP packet */
|
||||
redo:
|
||||
if (!rt->recvbuf) {
|
||||
rt->recvbuf = av_malloc(RECVBUF_SIZE);
|
||||
if (!rt->recvbuf)
|
||||
|
@@ -498,8 +498,10 @@ static int swf_write_trailer(AVFormatContext *s)
|
||||
avio_wl32(pb, file_size);
|
||||
avio_seek(pb, swf->duration_pos, SEEK_SET);
|
||||
avio_wl16(pb, swf->video_frame_number);
|
||||
if (swf->vframes_pos) {
|
||||
avio_seek(pb, swf->vframes_pos, SEEK_SET);
|
||||
avio_wl16(pb, swf->video_frame_number);
|
||||
}
|
||||
avio_seek(pb, file_size, SEEK_SET);
|
||||
}
|
||||
return 0;
|
||||
|
@@ -649,7 +649,7 @@ int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputForma
|
||||
}
|
||||
|
||||
s->duration = s->start_time = AV_NOPTS_VALUE;
|
||||
av_strlcpy(s->filename, filename, sizeof(s->filename));
|
||||
av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
|
||||
|
||||
/* allocate private data */
|
||||
if (s->iformat->priv_data_size > 0) {
|
||||
@@ -846,7 +846,10 @@ static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
|
||||
*pnum = st->codec->time_base.num;
|
||||
*pden = st->codec->time_base.den;
|
||||
if (pc && pc->repeat_pict) {
|
||||
*pnum = (*pnum) * (1 + pc->repeat_pict);
|
||||
if (*pnum > INT_MAX / (1 + pc->repeat_pict))
|
||||
*pden /= 1 + pc->repeat_pict;
|
||||
else
|
||||
*pnum *= 1 + pc->repeat_pict;
|
||||
}
|
||||
//If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
|
||||
//Thus if we have no parser in such case leave duration undefined.
|
||||
|
@@ -155,9 +155,8 @@ static int yuv4_write_header(AVFormatContext *s)
|
||||
return AVERROR(EIO);
|
||||
|
||||
if (s->streams[0]->codec->codec_id != CODEC_ID_RAWVIDEO) {
|
||||
av_log(s, AV_LOG_ERROR,
|
||||
"A non-rawvideo stream was selected, but yuv4mpeg only handles rawvideo streams\n");
|
||||
return AVERROR(EINVAL);
|
||||
av_log(s, AV_LOG_ERROR, "ERROR: Only rawvideo supported.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (s->streams[0]->codec->pix_fmt == PIX_FMT_YUV411P) {
|
||||
@@ -353,7 +352,7 @@ static int yuv4_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
{
|
||||
int i;
|
||||
char header[MAX_FRAME_HEADER+1];
|
||||
int packet_size, width, height;
|
||||
int packet_size, width, height, ret;
|
||||
AVStream *st = s->streams[0];
|
||||
struct frame_attributes *s1 = s->priv_data;
|
||||
|
||||
@@ -364,18 +363,28 @@ static int yuv4_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (i == MAX_FRAME_HEADER) return -1;
|
||||
if (strncmp(header, Y4M_FRAME_MAGIC, strlen(Y4M_FRAME_MAGIC))) return -1;
|
||||
if (s->pb->error)
|
||||
return s->pb->error;
|
||||
else if (s->pb->eof_reached)
|
||||
return AVERROR_EOF;
|
||||
else if (i == MAX_FRAME_HEADER)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if (strncmp(header, Y4M_FRAME_MAGIC, strlen(Y4M_FRAME_MAGIC)))
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
width = st->codec->width;
|
||||
height = st->codec->height;
|
||||
|
||||
packet_size = avpicture_get_size(st->codec->pix_fmt, width, height);
|
||||
if (packet_size < 0)
|
||||
return -1;
|
||||
return packet_size;
|
||||
|
||||
if (av_get_packet(s->pb, pkt, packet_size) != packet_size)
|
||||
return AVERROR(EIO);
|
||||
ret = av_get_packet(s->pb, pkt, packet_size);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
else if (ret != packet_size)
|
||||
return s->pb->eof_reached ? AVERROR_EOF : AVERROR(EIO);
|
||||
|
||||
if (s->streams[0]->codec->coded_frame) {
|
||||
s->streams[0]->codec->coded_frame->interlaced_frame = s1->interlaced_frame;
|
||||
|
@@ -279,8 +279,8 @@ static int parse_primary(AVExpr **e, Parser *p)
|
||||
else if (strmatch(next, "eq" )) d->type = e_eq;
|
||||
else if (strmatch(next, "gte" )) d->type = e_gte;
|
||||
else if (strmatch(next, "gt" )) d->type = e_gt;
|
||||
else if (strmatch(next, "lte" )) { AVExpr *tmp = d->param[1]; d->param[1] = d->param[0]; d->param[0] = tmp; d->type = e_gt; }
|
||||
else if (strmatch(next, "lt" )) { AVExpr *tmp = d->param[1]; d->param[1] = d->param[0]; d->param[0] = tmp; d->type = e_gte; }
|
||||
else if (strmatch(next, "lte" )) { AVExpr *tmp = d->param[1]; d->param[1] = d->param[0]; d->param[0] = tmp; d->type = e_gte; }
|
||||
else if (strmatch(next, "lt" )) { AVExpr *tmp = d->param[1]; d->param[1] = d->param[0]; d->param[0] = tmp; d->type = e_gt; }
|
||||
else if (strmatch(next, "ld" )) d->type = e_ld;
|
||||
else if (strmatch(next, "isnan" )) d->type = e_isnan;
|
||||
else if (strmatch(next, "st" )) d->type = e_st;
|
||||
|
Reference in New Issue
Block a user