Compare commits
175 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
f139838d64 | ||
![]() |
0a224ab102 | ||
![]() |
d39b183d8d | ||
![]() |
dc8054128a | ||
![]() |
001f4c7dc6 | ||
![]() |
313ddbfe48 | ||
![]() |
7f5bd6c72b | ||
![]() |
0be85fd80f | ||
![]() |
9f253ebb41 | ||
![]() |
6242dae507 | ||
![]() |
1749b0d74d | ||
![]() |
568e9062bd | ||
![]() |
5dbc75870f | ||
![]() |
c91a14638e | ||
![]() |
c00c380724 | ||
![]() |
43625c5128 | ||
![]() |
5effcfa767 | ||
![]() |
1ee0cd1ad7 | ||
![]() |
b594732475 | ||
![]() |
ce15406e78 | ||
![]() |
c9e95636a8 | ||
![]() |
6e5c07f4c8 | ||
![]() |
c999a8ed65 | ||
![]() |
4d343a6f47 | ||
![]() |
a81a6d9c80 | ||
![]() |
48f0eeb2e5 | ||
![]() |
d26e47bf6c | ||
![]() |
568a474a08 | ||
![]() |
9a66cdbc16 | ||
![]() |
ddb1149e25 | ||
![]() |
f6778f58d4 | ||
![]() |
e4e4d92641 | ||
![]() |
de0ff4ce69 | ||
![]() |
6548cb2578 | ||
![]() |
f6257cf4b7 | ||
![]() |
a15adb18fa | ||
![]() |
666bd5848a | ||
![]() |
d94256d36c | ||
![]() |
7bb97a61df | ||
![]() |
c65eadee5d | ||
![]() |
a43f4bd601 | ||
![]() |
8f881885c2 | ||
![]() |
26521d87ba | ||
![]() |
e1a4143793 | ||
![]() |
b9482a6efd | ||
![]() |
88c3cc019c | ||
![]() |
9980e4df3b | ||
![]() |
d4f2786cda | ||
![]() |
2744fdbd9e | ||
![]() |
1fcc2c6091 | ||
![]() |
74871ac70a | ||
![]() |
9cb7f6e54a | ||
![]() |
ed6aaf579d | ||
![]() |
e1b4614ab4 | ||
![]() |
c3bf08d04c | ||
![]() |
12247a13e0 | ||
![]() |
7503861b42 | ||
![]() |
9def2f200e | ||
![]() |
7b676935ee | ||
![]() |
9550c63196 | ||
![]() |
4a15240a27 | ||
![]() |
a47b96bdd3 | ||
![]() |
fb049da952 | ||
![]() |
4a325ddeae | ||
![]() |
48ac765efe | ||
![]() |
522645e38f | ||
![]() |
e891ee4bf6 | ||
![]() |
ef673211e7 | ||
![]() |
eaeaeb265f | ||
![]() |
db315c796d | ||
![]() |
035dd77cbb | ||
![]() |
e3743869e9 | ||
![]() |
ce14f00dea | ||
![]() |
627f4621f5 | ||
![]() |
3e8434bcea | ||
![]() |
efd30c4d95 | ||
![]() |
d7fddc97d4 | ||
![]() |
feed0c6b6a | ||
![]() |
d0e53ecff7 | ||
![]() |
1ca84aa162 | ||
![]() |
d5f2382d03 | ||
![]() |
416849f2e0 | ||
![]() |
dd37038ac7 | ||
![]() |
e410dd1792 | ||
![]() |
ffdc41f039 | ||
![]() |
ca7e97bdcf | ||
![]() |
4ae138cb12 | ||
![]() |
003f7e3dd0 | ||
![]() |
85eb76a23f | ||
![]() |
5186984ee9 | ||
![]() |
b5331b979b | ||
![]() |
11f3173e1b | ||
![]() |
cd17195d1c | ||
![]() |
1128b10247 | ||
![]() |
6a073aa7a7 | ||
![]() |
073891e875 | ||
![]() |
2e341bc99a | ||
![]() |
b7c8fff803 | ||
![]() |
3f7e90cf0c | ||
![]() |
78d4f8cc56 | ||
![]() |
de2656ec25 | ||
![]() |
9686a2c2cf | ||
![]() |
b863979c0f | ||
![]() |
fecd7468fc | ||
![]() |
19da1a39e8 | ||
![]() |
7e88df99e1 | ||
![]() |
7f3f85544c | ||
![]() |
750f5baf30 | ||
![]() |
a63f3f714c | ||
![]() |
1dd1ee00d5 | ||
![]() |
4493af756b | ||
![]() |
e904e9b720 | ||
![]() |
5f896773e0 | ||
![]() |
b2dcac7141 | ||
![]() |
40ccc81146 | ||
![]() |
1c63d61372 | ||
![]() |
2ad77c60ef | ||
![]() |
a1556d37b8 | ||
![]() |
083a8a0037 | ||
![]() |
71a939fee4 | ||
![]() |
9dbd437da2 | ||
![]() |
2510e1476e | ||
![]() |
0f839cff6b | ||
![]() |
abe3572878 | ||
![]() |
0d30e2c6f2 | ||
![]() |
a0473085f3 | ||
![]() |
e537dc230b | ||
![]() |
19f4943d12 | ||
![]() |
bf6d1a1ca7 | ||
![]() |
424b6edd19 | ||
![]() |
4f48417fe7 | ||
![]() |
8e3dc37bc0 | ||
![]() |
0312969b9e | ||
![]() |
62beae313a | ||
![]() |
8011a29fa8 | ||
![]() |
fe710f2074 | ||
![]() |
bba43a1ea0 | ||
![]() |
f947e965be | ||
![]() |
5c365dc979 | ||
![]() |
95a9d44dc3 | ||
![]() |
27558bd87e | ||
![]() |
5ab9294a8d | ||
![]() |
cfd7d166e2 | ||
![]() |
5bcd47cf63 | ||
![]() |
0c60d5c59f | ||
![]() |
cd9bdc6395 | ||
![]() |
b68470707b | ||
![]() |
7046ae5593 | ||
![]() |
d19e3e19d6 | ||
![]() |
04597e2595 | ||
![]() |
d16653c3d4 | ||
![]() |
183e0eb5b9 | ||
![]() |
be0b3137d0 | ||
![]() |
683213230e | ||
![]() |
ad0ee682b3 | ||
![]() |
ba418ad400 | ||
![]() |
6dcbbdc011 | ||
![]() |
e43bd4fa58 | ||
![]() |
25b4ed053f | ||
![]() |
e1f2a6a32b | ||
![]() |
6fc3287b9c | ||
![]() |
f43b6e2b1e | ||
![]() |
697a45d861 | ||
![]() |
4c7879775e | ||
![]() |
a2c8db1b79 | ||
![]() |
fc89f15497 | ||
![]() |
e364f50718 | ||
![]() |
571a4cf273 | ||
![]() |
bafd38a352 | ||
![]() |
350d06d63f | ||
![]() |
9f82cbf7c1 | ||
![]() |
dcde8e1c90 | ||
![]() |
569cb94869 | ||
![]() |
0df7d7482c | ||
![]() |
b2f27d2926 |
35
Changelog
35
Changelog
@@ -3,6 +3,41 @@ releases are sorted from youngest to oldest.
|
||||
|
||||
version next:
|
||||
|
||||
version 0.10.1
|
||||
- Several security fixes, many bugfixes affecting many formats and
|
||||
codecs, the list below is not complete.
|
||||
|
||||
- swapuv filter
|
||||
|
||||
- Several bugs and crashes have been fixed in the following codecs: AAC,
|
||||
AC-3, ADPCM, AMR (both NB and WB), ATRAC3, CAVC, Cook, camstudio, DCA,
|
||||
DPCM, DSI CIN, DV, EA TGQ, FLAC, fraps, G.722 (both encoder and
|
||||
decoder), H.264, huvffyuv, BB JV decoder, Indeo 3, KGV1, LCL, the
|
||||
libx264 wrapper, MJPEG, mp3on4, Musepack, MPEG1/2, PNG, QDM2, Qt RLE,
|
||||
ROQ, RV10, RV30/RV34/RV40, shorten, smacker, subrip, SVQ3, TIFF,
|
||||
Truemotion2, TTA, VC1, VMware Screen codec, Vorbis, VP5, VP6, WMA,
|
||||
Westwood SNDx, XXAN.
|
||||
|
||||
- This release additionally updates the following codecs to the
|
||||
bytestream2 API, and therefore benefit from additional overflow
|
||||
checks: XXAN, ALG MM, TQG, SMC, Qt SMC, ROQ, PNG
|
||||
|
||||
- Several bugs and crashes have been fixed in the following formats:
|
||||
AIFF, ASF, DV, Matroska, NSV, MOV, MPEG-TS, Smacker, Sony OpenMG, RM,
|
||||
SWF.
|
||||
|
||||
- Libswscale has an potential overflow for large image size fixed.
|
||||
|
||||
- The following APIs have been added:
|
||||
|
||||
avcodec_is_open()
|
||||
avformat_get_riff_video_tags()
|
||||
avformat_get_riff_audio_tags()
|
||||
|
||||
Please see the file doc/APIchanges and the Doxygen documentation for
|
||||
further information.
|
||||
|
||||
|
||||
version 0.10:
|
||||
- Fixes: CVE-2011-3929, CVE-2011-3934, CVE-2011-3935, CVE-2011-3936,
|
||||
CVE-2011-3937, CVE-2011-3940, CVE-2011-3941, CVE-2011-3944,
|
||||
|
2
Doxyfile
2
Doxyfile
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
||||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER = 0.10
|
||||
PROJECT_NUMBER = 0.10.2
|
||||
|
||||
# With the PROJECT_LOGO tag one can specify an logo or icon that is included
|
||||
# in the documentation. The maximum height of the logo should not exceed 55
|
||||
|
@@ -806,7 +806,7 @@ int opt_codecs(const char *opt, const char *arg)
|
||||
if (p2 && strcmp(p->name, p2->name) == 0) {
|
||||
if (p->decode)
|
||||
decode = 1;
|
||||
if (p->encode)
|
||||
if (p->encode || p->encode2)
|
||||
encode = 1;
|
||||
cap |= p->capabilities;
|
||||
}
|
||||
|
@@ -22,6 +22,19 @@ API changes, most recent first:
|
||||
muxers supporting it (av_write_frame makes sure it is called
|
||||
only for muxers with this flag).
|
||||
|
||||
2012-03-04 - xxxxxxx - lavu 51.22.1 - error.h
|
||||
Add AVERROR_UNKNOWN
|
||||
|
||||
2012-02-29 - xxxxxxx - lavf 53.21.0
|
||||
Add avformat_get_riff_video_tags() and avformat_get_riff_audio_tags().
|
||||
|
||||
2012-02-29 - xxxxxxx - lavu 51.22.0 - intfloat.h
|
||||
Add a new installed header libavutil/intfloat.h with int/float punning
|
||||
functions.
|
||||
|
||||
2012-02-17 - xxxxxxx - lavc 53.35.0
|
||||
Add avcodec_is_open() function.
|
||||
|
||||
2012-01-15 - lavc 53.34.0
|
||||
New audio encoding API:
|
||||
b2c75b6 Add CODEC_CAP_VARIABLE_FRAME_SIZE capability for use by audio
|
||||
|
@@ -2549,6 +2549,9 @@ For example:
|
||||
will create two separate outputs from the same input, one cropped and
|
||||
one padded.
|
||||
|
||||
@section swapuv
|
||||
Swap U & V plane.
|
||||
|
||||
@section thumbnail
|
||||
Select the most representative frame in a given sequence of consecutive frames.
|
||||
|
||||
|
7
ffmpeg.c
7
ffmpeg.c
@@ -2626,14 +2626,16 @@ static int transcode_init(OutputFile *output_files, int nb_output_files,
|
||||
break;
|
||||
}
|
||||
/* two pass mode */
|
||||
if (codec->codec_id != CODEC_ID_H264 &&
|
||||
(codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
|
||||
if (codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2)) {
|
||||
char logfilename[1024];
|
||||
FILE *f;
|
||||
|
||||
snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
|
||||
pass_logfilename_prefix ? pass_logfilename_prefix : DEFAULT_PASS_LOGFILENAME_PREFIX,
|
||||
i);
|
||||
if (!strcmp(ost->enc->name, "libx264")) {
|
||||
av_dict_set(&ost->opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
|
||||
} else {
|
||||
if (codec->flags & CODEC_FLAG_PASS2) {
|
||||
char *logbuffer;
|
||||
size_t logbuffer_size;
|
||||
@@ -2655,6 +2657,7 @@ static int transcode_init(OutputFile *output_files, int nb_output_files,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (codec->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
/* maximum video buffer size is 8-bytes per pixel, plus DPX header size (1664)*/
|
||||
int size = codec->width * codec->height;
|
||||
|
@@ -594,7 +594,7 @@ OBJS-$(CONFIG_MATROSKA_MUXER) += xiph.o mpeg4audio.o \
|
||||
flacdec.o flacdata.o flac.o \
|
||||
mpegaudiodata.o vorbis_data.o
|
||||
OBJS-$(CONFIG_MP3_MUXER) += mpegaudiodata.o mpegaudiodecheader.o
|
||||
OBJS-$(CONFIG_MOV_DEMUXER) += mpeg4audio.o mpegaudiodata.o timecode.o
|
||||
OBJS-$(CONFIG_MOV_DEMUXER) += mpeg4audio.o mpegaudiodata.o ac3tab.o timecode.o
|
||||
OBJS-$(CONFIG_MOV_MUXER) += mpeg4audio.o mpegaudiodata.o
|
||||
OBJS-$(CONFIG_MPEGTS_MUXER) += mpegvideo.o mpeg4audio.o
|
||||
OBJS-$(CONFIG_MPEGTS_DEMUXER) += mpeg4audio.o mpegaudiodata.o
|
||||
|
@@ -826,10 +826,10 @@ static int decode_band_types(AACContext *ac, enum BandType band_type[120],
|
||||
av_log(ac->avctx, AV_LOG_ERROR, "invalid band type\n");
|
||||
return -1;
|
||||
}
|
||||
while ((sect_len_incr = get_bits(gb, bits)) == (1 << bits) - 1 && get_bits_left(gb) >= bits)
|
||||
do {
|
||||
sect_len_incr = get_bits(gb, bits);
|
||||
sect_end += sect_len_incr;
|
||||
sect_end += sect_len_incr;
|
||||
if (get_bits_left(gb) < 0 || sect_len_incr == (1 << bits) - 1) {
|
||||
if (get_bits_left(gb) < 0) {
|
||||
av_log(ac->avctx, AV_LOG_ERROR, overread_err);
|
||||
return -1;
|
||||
}
|
||||
@@ -839,6 +839,7 @@ static int decode_band_types(AACContext *ac, enum BandType band_type[120],
|
||||
sect_end, ics->max_sfb);
|
||||
return -1;
|
||||
}
|
||||
} while (sect_len_incr == (1 << bits) - 1);
|
||||
for (; k < sect_end; k++) {
|
||||
band_type [idx] = sect_band_type;
|
||||
band_type_run_end[idx++] = sect_end;
|
||||
|
@@ -200,8 +200,8 @@ WINDOW_FUNC(long_start)
|
||||
float *out = sce->ret;
|
||||
|
||||
dsp->vector_fmul(out, audio, lwindow, 1024);
|
||||
memcpy(out + 1024, audio, sizeof(out[0]) * 448);
|
||||
dsp->vector_fmul_reverse(out + 1024 + 448, audio, swindow, 128);
|
||||
memcpy(out + 1024, audio + 1024, sizeof(out[0]) * 448);
|
||||
dsp->vector_fmul_reverse(out + 1024 + 448, audio + 1024 + 448, swindow, 128);
|
||||
memset(out + 1024 + 576, 0, sizeof(out[0]) * 448);
|
||||
}
|
||||
|
||||
@@ -487,10 +487,10 @@ static void deinterleave_input_samples(AACEncContext *s,
|
||||
const float *sptr = samples + channel_map[ch];
|
||||
|
||||
/* copy last 1024 samples of previous frame to the start of the current frame */
|
||||
memcpy(&s->planar_samples[ch][0], &s->planar_samples[ch][1024], 1024 * sizeof(s->planar_samples[0][0]));
|
||||
memcpy(&s->planar_samples[ch][1024], &s->planar_samples[ch][2048], 1024 * sizeof(s->planar_samples[0][0]));
|
||||
|
||||
/* deinterleave */
|
||||
for (i = 1024; i < 1024 * 2; i++) {
|
||||
for (i = 2048; i < 3072; i++) {
|
||||
s->planar_samples[ch][i] = *sptr;
|
||||
sptr += sinc;
|
||||
}
|
||||
|
@@ -134,7 +134,7 @@ int avpriv_ac3_parse_header(GetBitContext *gbc, AC3HeaderInfo *hdr)
|
||||
(hdr->num_blocks * 256.0));
|
||||
hdr->channels = ff_ac3_channels_tab[hdr->channel_mode] + hdr->lfe_on;
|
||||
}
|
||||
hdr->channel_layout = ff_ac3_channel_layout_tab[hdr->channel_mode];
|
||||
hdr->channel_layout = avpriv_ac3_channel_layout_tab[hdr->channel_mode];
|
||||
if (hdr->lfe_on)
|
||||
hdr->channel_layout |= AV_CH_LOW_FREQUENCY;
|
||||
|
||||
|
@@ -1383,7 +1383,7 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data,
|
||||
avctx->request_channels < s->channels) {
|
||||
s->out_channels = avctx->request_channels;
|
||||
s->output_mode = avctx->request_channels == 1 ? AC3_CHMODE_MONO : AC3_CHMODE_STEREO;
|
||||
s->channel_layout = ff_ac3_channel_layout_tab[s->output_mode];
|
||||
s->channel_layout = avpriv_ac3_channel_layout_tab[s->output_mode];
|
||||
}
|
||||
avctx->channels = s->out_channels;
|
||||
avctx->channel_layout = s->channel_layout;
|
||||
|
@@ -109,7 +109,7 @@ static void ac3_bit_alloc_calc_bap_c(int16_t *mask, int16_t *psd,
|
||||
int snr_offset, int floor,
|
||||
const uint8_t *bap_tab, uint8_t *bap)
|
||||
{
|
||||
int bin, band;
|
||||
int bin, band, band_end;
|
||||
|
||||
/* special case, if snr offset is -960, set all bap's to zero */
|
||||
if (snr_offset == -960) {
|
||||
@@ -121,12 +121,14 @@ static void ac3_bit_alloc_calc_bap_c(int16_t *mask, int16_t *psd,
|
||||
band = ff_ac3_bin_to_band_tab[start];
|
||||
do {
|
||||
int m = (FFMAX(mask[band] - snr_offset - floor, 0) & 0x1FE0) + floor;
|
||||
int band_end = FFMIN(ff_ac3_band_start_tab[band+1], end);
|
||||
band_end = ff_ac3_band_start_tab[++band];
|
||||
band_end = FFMIN(band_end, end);
|
||||
|
||||
for (; bin < band_end; bin++) {
|
||||
int address = av_clip((psd[bin] - m) >> 5, 0, 63);
|
||||
bap[bin] = bap_tab[address];
|
||||
}
|
||||
} while (end > ff_ac3_band_start_tab[band++]);
|
||||
} while (end > band_end);
|
||||
}
|
||||
|
||||
static void ac3_update_bap_counts_c(uint16_t mant_cnt[16], uint8_t *bap,
|
||||
|
@@ -84,7 +84,7 @@ const uint8_t ff_ac3_channels_tab[8] = {
|
||||
/**
|
||||
* Map audio coding mode (acmod) to channel layout mask.
|
||||
*/
|
||||
const uint16_t ff_ac3_channel_layout_tab[8] = {
|
||||
const uint16_t avpriv_ac3_channel_layout_tab[8] = {
|
||||
AV_CH_LAYOUT_STEREO,
|
||||
AV_CH_LAYOUT_MONO,
|
||||
AV_CH_LAYOUT_STEREO,
|
||||
|
@@ -33,7 +33,7 @@
|
||||
|
||||
extern const uint16_t ff_ac3_frame_size_tab[38][3];
|
||||
extern const uint8_t ff_ac3_channels_tab[8];
|
||||
extern const uint16_t ff_ac3_channel_layout_tab[8];
|
||||
extern const uint16_t avpriv_ac3_channel_layout_tab[8];
|
||||
extern const uint8_t ff_ac3_enc_channel_map[8][2][6];
|
||||
extern const uint8_t ff_ac3_dec_channel_map[8][2][6];
|
||||
extern const uint16_t ff_ac3_sample_rate_tab[3];
|
||||
|
@@ -265,7 +265,8 @@ static inline short adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, unsigned c
|
||||
return c->predictor;
|
||||
}
|
||||
|
||||
static void xa_decode(short *out, const unsigned char *in,
|
||||
static int xa_decode(AVCodecContext *avctx,
|
||||
short *out, const unsigned char *in,
|
||||
ADPCMChannelStatus *left, ADPCMChannelStatus *right, int inc)
|
||||
{
|
||||
int i, j;
|
||||
@@ -277,6 +278,12 @@ static void xa_decode(short *out, const unsigned char *in,
|
||||
|
||||
shift = 12 - (in[4+i*2] & 15);
|
||||
filter = in[4+i*2] >> 4;
|
||||
if (filter > 4) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Invalid XA-ADPCM filter %d (max. allowed is 4)\n",
|
||||
filter);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
f0 = xa_adpcm_table[filter][0];
|
||||
f1 = xa_adpcm_table[filter][1];
|
||||
|
||||
@@ -304,7 +311,12 @@ static void xa_decode(short *out, const unsigned char *in,
|
||||
|
||||
shift = 12 - (in[5+i*2] & 15);
|
||||
filter = in[5+i*2] >> 4;
|
||||
|
||||
if (filter > 4) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Invalid XA-ADPCM filter %d (max. allowed is 4)\n",
|
||||
filter);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
f0 = xa_adpcm_table[filter][0];
|
||||
f1 = xa_adpcm_table[filter][1];
|
||||
|
||||
@@ -328,6 +340,8 @@ static void xa_decode(short *out, const unsigned char *in,
|
||||
left->sample2 = s_2;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -699,7 +713,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
for (channel = 0; channel < avctx->channels; channel++) {
|
||||
cs = &c->status[channel];
|
||||
cs->predictor = (int16_t)bytestream_get_le16(&src);
|
||||
cs->step_index = *src++;
|
||||
cs->step_index = av_clip(*src++, 0, 88);
|
||||
src++;
|
||||
*samples++ = cs->predictor;
|
||||
}
|
||||
@@ -722,8 +736,8 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
|
||||
c->status[0].predictor = (int16_t)AV_RL16(src + 10);
|
||||
c->status[1].predictor = (int16_t)AV_RL16(src + 12);
|
||||
c->status[0].step_index = src[14];
|
||||
c->status[1].step_index = src[15];
|
||||
c->status[0].step_index = av_clip(src[14], 0, 88);
|
||||
c->status[1].step_index = av_clip(src[15], 0, 88);
|
||||
/* sign extend the predictors */
|
||||
src += 16;
|
||||
diff_channel = c->status[1].predictor;
|
||||
@@ -763,7 +777,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
for (channel = 0; channel < avctx->channels; channel++) {
|
||||
cs = &c->status[channel];
|
||||
cs->predictor = (int16_t)bytestream_get_le16(&src);
|
||||
cs->step_index = *src++;
|
||||
cs->step_index = av_clip(*src++, 0, 88);
|
||||
src++;
|
||||
}
|
||||
|
||||
@@ -815,8 +829,9 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
break;
|
||||
case CODEC_ID_ADPCM_XA:
|
||||
while (buf_size >= 128) {
|
||||
xa_decode(samples, src, &c->status[0], &c->status[1],
|
||||
avctx->channels);
|
||||
if ((ret = xa_decode(avctx, samples, src, &c->status[0],
|
||||
&c->status[1], avctx->channels)) < 0)
|
||||
return ret;
|
||||
src += 128;
|
||||
samples += 28 * 8;
|
||||
buf_size -= 128;
|
||||
@@ -826,7 +841,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
src += 4; // skip sample count (already read)
|
||||
|
||||
for (i=0; i<=st; i++)
|
||||
c->status[i].step_index = bytestream_get_le32(&src);
|
||||
c->status[i].step_index = av_clip(bytestream_get_le32(&src), 0, 88);
|
||||
for (i=0; i<=st; i++)
|
||||
c->status[i].predictor = bytestream_get_le32(&src);
|
||||
|
||||
@@ -1043,11 +1058,11 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
case CODEC_ID_ADPCM_IMA_SMJPEG:
|
||||
if (avctx->codec->id == CODEC_ID_ADPCM_IMA_AMV) {
|
||||
c->status[0].predictor = sign_extend(bytestream_get_le16(&src), 16);
|
||||
c->status[0].step_index = bytestream_get_le16(&src);
|
||||
c->status[0].step_index = av_clip(bytestream_get_le16(&src), 0, 88);
|
||||
src += 4;
|
||||
} else {
|
||||
c->status[0].predictor = sign_extend(bytestream_get_be16(&src), 16);
|
||||
c->status[0].step_index = bytestream_get_byte(&src);
|
||||
c->status[0].step_index = av_clip(bytestream_get_byte(&src), 0, 88);
|
||||
src += 1;
|
||||
}
|
||||
|
||||
|
@@ -200,6 +200,10 @@ static enum Mode unpack_bitstream(AMRContext *p, const uint8_t *buf,
|
||||
p->bad_frame_indicator = !get_bits1(&gb); // quality bit
|
||||
skip_bits(&gb, 2); // two padding bits
|
||||
|
||||
if (mode >= N_MODES || buf_size < frame_sizes_nb[mode] + 1) {
|
||||
return NO_DATA;
|
||||
}
|
||||
|
||||
if (mode < MODE_DTX)
|
||||
ff_amr_bit_reorder((uint16_t *) &p->frame, sizeof(AMRNBFrame), buf + 1,
|
||||
amr_unpacking_bitmaps_per_mode[mode]);
|
||||
@@ -947,6 +951,10 @@ static int amrnb_decode_frame(AVCodecContext *avctx, void *data,
|
||||
buf_out = (float *)p->avframe.data[0];
|
||||
|
||||
p->cur_frame_mode = unpack_bitstream(p, buf, buf_size);
|
||||
if (p->cur_frame_mode == NO_DATA) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Corrupt bitstream\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (p->cur_frame_mode == MODE_DTX) {
|
||||
av_log_missing_feature(avctx, "dtx mode", 0);
|
||||
av_log(avctx, AV_LOG_INFO, "Note: libopencore_amrnb supports dtx\n");
|
||||
|
@@ -898,10 +898,10 @@ static float auto_correlation(float *diff_isf, float mean, int lag)
|
||||
* Extrapolate a ISF vector to the 16kHz range (20th order LP)
|
||||
* used at mode 6k60 LP filter for the high frequency band.
|
||||
*
|
||||
* @param[out] out Buffer for extrapolated isf
|
||||
* @param[in] isf Input isf vector
|
||||
* @param[out] isf Buffer for extrapolated isf; contains LP_ORDER
|
||||
* values on input
|
||||
*/
|
||||
static void extrapolate_isf(float out[LP_ORDER_16k], float isf[LP_ORDER])
|
||||
static void extrapolate_isf(float isf[LP_ORDER_16k])
|
||||
{
|
||||
float diff_isf[LP_ORDER - 2], diff_mean;
|
||||
float *diff_hi = diff_isf - LP_ORDER + 1; // diff array for extrapolated indexes
|
||||
@@ -909,8 +909,7 @@ static void extrapolate_isf(float out[LP_ORDER_16k], float isf[LP_ORDER])
|
||||
float est, scale;
|
||||
int i, i_max_corr;
|
||||
|
||||
memcpy(out, isf, (LP_ORDER - 1) * sizeof(float));
|
||||
out[LP_ORDER_16k - 1] = isf[LP_ORDER - 1];
|
||||
isf[LP_ORDER_16k - 1] = isf[LP_ORDER - 1];
|
||||
|
||||
/* Calculate the difference vector */
|
||||
for (i = 0; i < LP_ORDER - 2; i++)
|
||||
@@ -931,16 +930,16 @@ static void extrapolate_isf(float out[LP_ORDER_16k], float isf[LP_ORDER])
|
||||
i_max_corr++;
|
||||
|
||||
for (i = LP_ORDER - 1; i < LP_ORDER_16k - 1; i++)
|
||||
out[i] = isf[i - 1] + isf[i - 1 - i_max_corr]
|
||||
isf[i] = isf[i - 1] + isf[i - 1 - i_max_corr]
|
||||
- isf[i - 2 - i_max_corr];
|
||||
|
||||
/* Calculate an estimate for ISF(18) and scale ISF based on the error */
|
||||
est = 7965 + (out[2] - out[3] - out[4]) / 6.0;
|
||||
scale = 0.5 * (FFMIN(est, 7600) - out[LP_ORDER - 2]) /
|
||||
(out[LP_ORDER_16k - 2] - out[LP_ORDER - 2]);
|
||||
est = 7965 + (isf[2] - isf[3] - isf[4]) / 6.0;
|
||||
scale = 0.5 * (FFMIN(est, 7600) - isf[LP_ORDER - 2]) /
|
||||
(isf[LP_ORDER_16k - 2] - isf[LP_ORDER - 2]);
|
||||
|
||||
for (i = LP_ORDER - 1; i < LP_ORDER_16k - 1; i++)
|
||||
diff_hi[i] = scale * (out[i] - out[i - 1]);
|
||||
diff_hi[i] = scale * (isf[i] - isf[i - 1]);
|
||||
|
||||
/* Stability insurance */
|
||||
for (i = LP_ORDER; i < LP_ORDER_16k - 1; i++)
|
||||
@@ -952,11 +951,11 @@ static void extrapolate_isf(float out[LP_ORDER_16k], float isf[LP_ORDER])
|
||||
}
|
||||
|
||||
for (i = LP_ORDER - 1; i < LP_ORDER_16k - 1; i++)
|
||||
out[i] = out[i - 1] + diff_hi[i] * (1.0f / (1 << 15));
|
||||
isf[i] = isf[i - 1] + diff_hi[i] * (1.0f / (1 << 15));
|
||||
|
||||
/* Scale the ISF vector for 16000 Hz */
|
||||
for (i = 0; i < LP_ORDER_16k - 1; i++)
|
||||
out[i] *= 0.8;
|
||||
isf[i] *= 0.8;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1003,7 +1002,7 @@ static void hb_synthesis(AMRWBContext *ctx, int subframe, float *samples,
|
||||
ff_weighted_vector_sumf(e_isf, isf_past, isf, isfp_inter[subframe],
|
||||
1.0 - isfp_inter[subframe], LP_ORDER);
|
||||
|
||||
extrapolate_isf(e_isf, e_isf);
|
||||
extrapolate_isf(e_isf);
|
||||
|
||||
e_isf[LP_ORDER_16k - 1] *= 2.0;
|
||||
ff_acelp_lsf2lspd(e_isp, e_isf, LP_ORDER_16k);
|
||||
@@ -1095,23 +1094,27 @@ static int amrwb_decode_frame(AVCodecContext *avctx, void *data,
|
||||
buf_out = (float *)ctx->avframe.data[0];
|
||||
|
||||
header_size = decode_mime_header(ctx, buf);
|
||||
if (ctx->fr_cur_mode > MODE_SID) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Invalid mode %d\n", ctx->fr_cur_mode);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
expected_fr_size = ((cf_sizes_wb[ctx->fr_cur_mode] + 7) >> 3) + 1;
|
||||
|
||||
if (buf_size < expected_fr_size) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Frame too small (%d bytes). Truncated file?\n", buf_size);
|
||||
*got_frame_ptr = 0;
|
||||
return buf_size;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (!ctx->fr_quality || ctx->fr_cur_mode > MODE_SID)
|
||||
av_log(avctx, AV_LOG_ERROR, "Encountered a bad or corrupted frame\n");
|
||||
|
||||
if (ctx->fr_cur_mode == MODE_SID) /* Comfort noise frame */
|
||||
if (ctx->fr_cur_mode == MODE_SID) { /* Comfort noise frame */
|
||||
av_log_missing_feature(avctx, "SID mode", 1);
|
||||
|
||||
if (ctx->fr_cur_mode >= MODE_SID)
|
||||
return -1;
|
||||
}
|
||||
|
||||
ff_amr_bit_reorder((uint16_t *) &ctx->frame, sizeof(AMRWBFrame),
|
||||
buf + header_size, amr_bit_orderings_by_mode[ctx->fr_cur_mode]);
|
||||
|
@@ -402,7 +402,7 @@ static int decodeTonalComponents (GetBitContext *gb, tonal_component *pComponent
|
||||
|
||||
for (k=0; k<coded_components; k++) {
|
||||
sfIndx = get_bits(gb,6);
|
||||
if(component_count>=64)
|
||||
if (component_count >= 64)
|
||||
return AVERROR_INVALIDDATA;
|
||||
pComponent[component_count].pos = j * 64 + (get_bits(gb,6));
|
||||
max_coded_values = SAMPLES_PER_FRAME - pComponent[component_count].pos;
|
||||
|
@@ -4032,7 +4032,8 @@ AVCodecContext *avcodec_alloc_context2(enum AVMediaType);
|
||||
|
||||
/**
|
||||
* Allocate an AVCodecContext and set its fields to default values. The
|
||||
* resulting struct can be deallocated by simply calling av_free().
|
||||
* resulting struct can be deallocated by calling avcodec_close() on it followed
|
||||
* by av_free().
|
||||
*
|
||||
* @param codec if non-NULL, allocate private data and initialize defaults
|
||||
* for the given codec. It is illegal to then call avcodec_open2()
|
||||
@@ -4178,6 +4179,11 @@ int avcodec_open(AVCodecContext *avctx, AVCodec *codec);
|
||||
* @endcode
|
||||
*
|
||||
* @param avctx The context to initialize.
|
||||
* @param codec The codec to open this context for. If a non-NULL codec has been
|
||||
* previously passed to avcodec_alloc_context3() or
|
||||
* avcodec_get_context_defaults3() for this context, then this
|
||||
* parameter MUST be either NULL or equal to the previously passed
|
||||
* codec.
|
||||
* @param options A dictionary filled with AVCodecContext and codec-private options.
|
||||
* On return this object will be filled with options that were not found.
|
||||
*
|
||||
@@ -4463,6 +4469,15 @@ int avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf, int buf_size,
|
||||
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,
|
||||
const AVSubtitle *sub);
|
||||
|
||||
/**
|
||||
* Close a given AVCodecContext and free all the data associated with it
|
||||
* (but not the AVCodecContext itself).
|
||||
*
|
||||
* Calling this function on an AVCodecContext that hasn't been opened will free
|
||||
* the codec-specific data allocated in avcodec_alloc_context3() /
|
||||
* avcodec_get_context_defaults3() with a non-NULL codec. Subsequent calls will
|
||||
* do nothing.
|
||||
*/
|
||||
int avcodec_close(AVCodecContext *avctx);
|
||||
|
||||
/**
|
||||
@@ -4874,4 +4889,10 @@ const AVClass *avcodec_get_class(void);
|
||||
*/
|
||||
const AVClass *avcodec_get_frame_class(void);
|
||||
|
||||
/**
|
||||
* @return a positive value if s is open (i.e. avcodec_open2() was called on it
|
||||
* with no corresponding avcodec_close()), 0 otherwise.
|
||||
*/
|
||||
int avcodec_is_open(AVCodecContext *s);
|
||||
|
||||
#endif /* AVCODEC_AVCODEC_H */
|
||||
|
@@ -656,7 +656,8 @@ static int cavs_decode_frame(AVCodecContext * avctx,void *data, int *data_size,
|
||||
if (buf_size == 0) {
|
||||
if (!s->low_delay && h->DPB[0].f.data[0]) {
|
||||
*data_size = sizeof(AVPicture);
|
||||
*picture = *(AVFrame *) &h->DPB[0];
|
||||
*picture = h->DPB[0].f;
|
||||
memset(&h->DPB[0], 0, sizeof(h->DPB[0]));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@@ -366,7 +366,7 @@ static void decode_gain_info(GetBitContext *gb, int *gaininfo)
|
||||
* @param q pointer to the COOKContext
|
||||
* @param quant_index_table pointer to the array
|
||||
*/
|
||||
static void decode_envelope(COOKContext *q, COOKSubpacket *p,
|
||||
static int decode_envelope(COOKContext *q, COOKSubpacket *p,
|
||||
int *quant_index_table)
|
||||
{
|
||||
int i, j, vlc_index;
|
||||
@@ -388,7 +388,15 @@ static void decode_envelope(COOKContext *q, COOKSubpacket *p,
|
||||
j = get_vlc2(&q->gb, q->envelope_quant_index[vlc_index - 1].table,
|
||||
q->envelope_quant_index[vlc_index - 1].bits, 2);
|
||||
quant_index_table[i] = quant_index_table[i - 1] + j - 12; // differential encoding
|
||||
if (quant_index_table[i] > 63 || quant_index_table[i] < -63) {
|
||||
av_log(q->avctx, AV_LOG_ERROR,
|
||||
"Invalid quantizer %d at position %d, outside [-63, 63] range\n",
|
||||
quant_index_table[i], i);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -507,7 +515,11 @@ static inline void expand_category(COOKContext *q, int *category,
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < q->num_vectors; i++)
|
||||
++category[category_index[i]];
|
||||
{
|
||||
int idx = category_index[i];
|
||||
if (++category[idx] >= FF_ARRAY_ELEMS(dither_tab))
|
||||
--category[idx];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -635,20 +647,24 @@ static void decode_vectors(COOKContext *q, COOKSubpacket *p, int *category,
|
||||
* @param q pointer to the COOKContext
|
||||
* @param mlt_buffer pointer to mlt coefficients
|
||||
*/
|
||||
static void mono_decode(COOKContext *q, COOKSubpacket *p, float *mlt_buffer)
|
||||
static int mono_decode(COOKContext *q, COOKSubpacket *p, float *mlt_buffer)
|
||||
{
|
||||
int category_index[128];
|
||||
int quant_index_table[102];
|
||||
int category[128];
|
||||
int res;
|
||||
|
||||
memset(&category, 0, sizeof(category));
|
||||
memset(&category_index, 0, sizeof(category_index));
|
||||
|
||||
decode_envelope(q, p, quant_index_table);
|
||||
if ((res = decode_envelope(q, p, quant_index_table)) < 0)
|
||||
return res;
|
||||
q->num_vectors = get_bits(&q->gb, p->log2_numvector_size);
|
||||
categorize(q, p, quant_index_table, category, category_index);
|
||||
expand_category(q, category, category_index);
|
||||
decode_vectors(q, p, category, quant_index_table, mlt_buffer);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -798,10 +814,10 @@ static void decouple_float(COOKContext *q,
|
||||
* @param mlt_buffer1 pointer to left channel mlt coefficients
|
||||
* @param mlt_buffer2 pointer to right channel mlt coefficients
|
||||
*/
|
||||
static void joint_decode(COOKContext *q, COOKSubpacket *p, float *mlt_buffer1,
|
||||
static int joint_decode(COOKContext *q, COOKSubpacket *p, float *mlt_buffer1,
|
||||
float *mlt_buffer2)
|
||||
{
|
||||
int i, j;
|
||||
int i, j, res;
|
||||
int decouple_tab[SUBBAND_SIZE];
|
||||
float *decode_buffer = q->decode_buffer_0;
|
||||
int idx, cpl_tmp;
|
||||
@@ -815,7 +831,8 @@ static void joint_decode(COOKContext *q, COOKSubpacket *p, float *mlt_buffer1,
|
||||
memset(mlt_buffer1, 0, 1024 * sizeof(*mlt_buffer1));
|
||||
memset(mlt_buffer2, 0, 1024 * sizeof(*mlt_buffer2));
|
||||
decouple_info(q, p, decouple_tab);
|
||||
mono_decode(q, p, decode_buffer);
|
||||
if ((res = mono_decode(q, p, decode_buffer)) < 0)
|
||||
return res;
|
||||
|
||||
/* The two channels are stored interleaved in decode_buffer. */
|
||||
for (i = 0; i < p->js_subband_start; i++) {
|
||||
@@ -832,11 +849,13 @@ static void joint_decode(COOKContext *q, COOKSubpacket *p, float *mlt_buffer1,
|
||||
cpl_tmp = cplband[i];
|
||||
idx -= decouple_tab[cpl_tmp];
|
||||
cplscale = q->cplscales[p->js_vlc_bits - 2]; // choose decoupler table
|
||||
f1 = cplscale[decouple_tab[cpl_tmp]];
|
||||
f2 = cplscale[idx - 1];
|
||||
f1 = cplscale[decouple_tab[cpl_tmp] + 1];
|
||||
f2 = cplscale[idx];
|
||||
q->decouple(q, p, i, f1, f2, decode_buffer, mlt_buffer1, mlt_buffer2);
|
||||
idx = (1 << p->js_vlc_bits) - 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -909,10 +928,11 @@ static inline void mlt_compensate_output(COOKContext *q, float *decode_buffer,
|
||||
* @param inbuffer pointer to the inbuffer
|
||||
* @param outbuffer pointer to the outbuffer
|
||||
*/
|
||||
static void decode_subpacket(COOKContext *q, COOKSubpacket *p,
|
||||
static int decode_subpacket(COOKContext *q, COOKSubpacket *p,
|
||||
const uint8_t *inbuffer, float *outbuffer)
|
||||
{
|
||||
int sub_packet_size = p->size;
|
||||
int res;
|
||||
/* packet dump */
|
||||
// for (i = 0; i < sub_packet_size ; i++)
|
||||
// av_log(q->avctx, AV_LOG_ERROR, "%02x", inbuffer[i]);
|
||||
@@ -921,13 +941,16 @@ static void decode_subpacket(COOKContext *q, COOKSubpacket *p,
|
||||
decode_bytes_and_gain(q, p, inbuffer, &p->gains1);
|
||||
|
||||
if (p->joint_stereo) {
|
||||
joint_decode(q, p, q->decode_buffer_1, q->decode_buffer_2);
|
||||
if ((res = joint_decode(q, p, q->decode_buffer_1, q->decode_buffer_2)) < 0)
|
||||
return res;
|
||||
} else {
|
||||
mono_decode(q, p, q->decode_buffer_1);
|
||||
if ((res = mono_decode(q, p, q->decode_buffer_1)) < 0)
|
||||
return res;
|
||||
|
||||
if (p->num_channels == 2) {
|
||||
decode_bytes_and_gain(q, p, inbuffer + sub_packet_size / 2, &p->gains2);
|
||||
mono_decode(q, p, q->decode_buffer_2);
|
||||
if ((res = mono_decode(q, p, q->decode_buffer_2)) < 0)
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -941,6 +964,8 @@ static void decode_subpacket(COOKContext *q, COOKSubpacket *p,
|
||||
else
|
||||
mlt_compensate_output(q, q->decode_buffer_2, &p->gains2,
|
||||
p->mono_previous_buffer2, outbuffer, p->ch_idx + 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -996,7 +1021,8 @@ static int cook_decode_frame(AVCodecContext *avctx, void *data,
|
||||
i, q->subpacket[i].size, q->subpacket[i].joint_stereo, offset,
|
||||
avctx->block_align);
|
||||
|
||||
decode_subpacket(q, &q->subpacket[i], buf + offset, samples);
|
||||
if ((ret = decode_subpacket(q, &q->subpacket[i], buf + offset, samples)) < 0)
|
||||
return ret;
|
||||
offset += q->subpacket[i].size;
|
||||
chidx += q->subpacket[i].num_channels;
|
||||
av_log(avctx, AV_LOG_DEBUG, "subpacket[%i] %i %i\n",
|
||||
@@ -1078,6 +1104,10 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
|
||||
q->sample_rate = avctx->sample_rate;
|
||||
q->nb_channels = avctx->channels;
|
||||
q->bit_rate = avctx->bit_rate;
|
||||
if (!q->nb_channels) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* Initialize RNG. */
|
||||
av_lfg_init(&q->random_state, 0);
|
||||
|
@@ -36,8 +36,8 @@ static const int expbits_tab[8] = {
|
||||
52,47,43,37,29,22,16,0,
|
||||
};
|
||||
|
||||
static const float dither_tab[8] = {
|
||||
0.0, 0.0, 0.0, 0.0, 0.0, 0.176777, 0.25, 0.707107,
|
||||
static const float dither_tab[9] = {
|
||||
0.0, 0.0, 0.0, 0.0, 0.0, 0.176777, 0.25, 0.707107, 1.0
|
||||
};
|
||||
|
||||
static const float quant_centroid_tab[7][14] = {
|
||||
@@ -510,23 +510,37 @@ static const int cplband[51] = {
|
||||
19,
|
||||
};
|
||||
|
||||
static const float cplscale2[3] = {
|
||||
// The 1 and 0 at the beginning/end are to prevent overflows with
|
||||
// bitstream-read indexes. E.g. if n_bits=5, we can access any
|
||||
// index from [1, (1<<n_bits)] for the first decoupling coeff,
|
||||
// and (1<<n_bits)-coeff1 as index for coeff2, i.e.:
|
||||
// coeff1_idx = [1, 32], and coeff2_idx = [0, 31].
|
||||
// These values aren't part of the tables in the original binary.
|
||||
|
||||
static const float cplscale2[5] = {
|
||||
1,
|
||||
0.953020632266998,0.70710676908493,0.302905440330505,
|
||||
0,
|
||||
};
|
||||
|
||||
static const float cplscale3[7] = {
|
||||
static const float cplscale3[9] = {
|
||||
1,
|
||||
0.981279790401459,0.936997592449188,0.875934481620789,0.70710676908493,
|
||||
0.482430040836334,0.349335819482803,0.192587479948997,
|
||||
0,
|
||||
};
|
||||
|
||||
static const float cplscale4[15] = {
|
||||
static const float cplscale4[17] = {
|
||||
1,
|
||||
0.991486728191376,0.973249018192291,0.953020632266998,0.930133521556854,
|
||||
0.903453230857849,0.870746195316315,0.826180458068848,0.70710676908493,
|
||||
0.563405573368073,0.491732746362686,0.428686618804932,0.367221474647522,
|
||||
0.302905440330505,0.229752898216248,0.130207896232605,
|
||||
0,
|
||||
};
|
||||
|
||||
static const float cplscale5[31] = {
|
||||
static const float cplscale5[33] = {
|
||||
1,
|
||||
0.995926380157471,0.987517595291138,0.978726446628571,0.969505727291107,
|
||||
0.95979779958725,0.949531257152557,0.938616216182709,0.926936149597168,
|
||||
0.914336204528809,0.900602877140045,0.885426938533783,0.868331849575043,
|
||||
@@ -535,9 +549,11 @@ static const float cplscale5[31] = {
|
||||
0.464778542518616,0.434642940759659,0.404955863952637,0.375219136476517,
|
||||
0.344963222742081,0.313672333955765,0.280692428350449,0.245068684220314,
|
||||
0.205169528722763,0.157508864998817,0.0901700109243393,
|
||||
0,
|
||||
};
|
||||
|
||||
static const float cplscale6[63] = {
|
||||
static const float cplscale6[65] = {
|
||||
1,
|
||||
0.998005926609039,0.993956744670868,0.989822506904602,0.985598564147949,
|
||||
0.981279790401459,0.976860702037811,0.972335040569305,0.967696130275726,
|
||||
0.962936460971832,0.958047747612000,0.953020632266998,0.947844684123993,
|
||||
@@ -554,6 +570,7 @@ static const float cplscale6[63] = {
|
||||
0.302905440330505,0.286608695983887,0.269728302955627,0.252119421958923,
|
||||
0.233590632677078,0.213876649737358,0.192587479948997,0.169101938605309,
|
||||
0.142307326197624,0.109772264957428,0.0631198287010193,
|
||||
0,
|
||||
};
|
||||
|
||||
static const float* const cplscales[5] = {
|
||||
|
@@ -228,7 +228,7 @@ static av_cold int decode_init(AVCodecContext *avctx) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"CamStudio codec error: invalid depth %i bpp\n",
|
||||
avctx->bits_per_coded_sample);
|
||||
return 1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
c->bpp = avctx->bits_per_coded_sample;
|
||||
avcodec_get_frame_defaults(&c->pic);
|
||||
@@ -242,7 +242,7 @@ static av_cold int decode_init(AVCodecContext *avctx) {
|
||||
c->decomp_buf = av_malloc(c->decomp_size + AV_LZO_OUTPUT_PADDING);
|
||||
if (!c->decomp_buf) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n");
|
||||
return 1;
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@@ -29,6 +29,7 @@
|
||||
#include "libavutil/common.h"
|
||||
#include "libavutil/intmath.h"
|
||||
#include "libavutil/intreadwrite.h"
|
||||
#include "libavutil/mathematics.h"
|
||||
#include "libavutil/audioconvert.h"
|
||||
#include "avcodec.h"
|
||||
#include "dsputil.h"
|
||||
@@ -638,13 +639,20 @@ static int dca_parse_frame_header(DCAContext *s)
|
||||
}
|
||||
|
||||
|
||||
static inline int get_scale(GetBitContext *gb, int level, int value)
|
||||
static inline int get_scale(GetBitContext *gb, int level, int value, int log2range)
|
||||
{
|
||||
if (level < 5) {
|
||||
/* huffman encoded */
|
||||
value += get_bitalloc(gb, &dca_scalefactor, level);
|
||||
} else if (level < 8)
|
||||
value = av_clip(value, 0, (1 << log2range) - 1);
|
||||
} else if (level < 8) {
|
||||
if (level + 1 > log2range) {
|
||||
skip_bits(gb, level + 1 - log2range);
|
||||
value = get_bits(gb, log2range);
|
||||
} else {
|
||||
value = get_bits(gb, level + 1);
|
||||
}
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
@@ -717,28 +725,31 @@ static int dca_subframe_header(DCAContext *s, int base_channel, int block_index)
|
||||
|
||||
for (j = base_channel; j < s->prim_channels; j++) {
|
||||
const uint32_t *scale_table;
|
||||
int scale_sum;
|
||||
int scale_sum, log_size;
|
||||
|
||||
memset(s->scale_factor[j], 0,
|
||||
s->subband_activity[j] * sizeof(s->scale_factor[0][0][0]) * 2);
|
||||
|
||||
if (s->scalefactor_huffman[j] == 6)
|
||||
if (s->scalefactor_huffman[j] == 6) {
|
||||
scale_table = scale_factor_quant7;
|
||||
else
|
||||
log_size = 7;
|
||||
} else {
|
||||
scale_table = scale_factor_quant6;
|
||||
log_size = 6;
|
||||
}
|
||||
|
||||
/* When huffman coded, only the difference is encoded */
|
||||
scale_sum = 0;
|
||||
|
||||
for (k = 0; k < s->subband_activity[j]; k++) {
|
||||
if (k >= s->vq_start_subband[j] || s->bitalloc[j][k] > 0) {
|
||||
scale_sum = get_scale(&s->gb, s->scalefactor_huffman[j], scale_sum);
|
||||
scale_sum = get_scale(&s->gb, s->scalefactor_huffman[j], scale_sum, log_size);
|
||||
s->scale_factor[j][k][0] = scale_table[scale_sum];
|
||||
}
|
||||
|
||||
if (k < s->vq_start_subband[j] && s->transition_mode[j][k]) {
|
||||
/* Get second scale factor */
|
||||
scale_sum = get_scale(&s->gb, s->scalefactor_huffman[j], scale_sum);
|
||||
scale_sum = get_scale(&s->gb, s->scalefactor_huffman[j], scale_sum, log_size);
|
||||
s->scale_factor[j][k][1] = scale_table[scale_sum];
|
||||
}
|
||||
}
|
||||
@@ -767,8 +778,7 @@ static int dca_subframe_header(DCAContext *s, int base_channel, int block_index)
|
||||
* (is this valid as well for joint scales ???) */
|
||||
|
||||
for (k = s->subband_activity[j]; k < s->subband_activity[source_channel]; k++) {
|
||||
scale = get_scale(&s->gb, s->joint_huff[j], 0);
|
||||
scale += 64; /* bias */
|
||||
scale = get_scale(&s->gb, s->joint_huff[j], 64 /* bias */, 7);
|
||||
s->joint_scale_factor[j][k] = scale; /*joint_scale_table[scale]; */
|
||||
}
|
||||
|
||||
@@ -789,6 +799,11 @@ static int dca_subframe_header(DCAContext *s, int base_channel, int block_index)
|
||||
}
|
||||
} else {
|
||||
int am = s->amode & DCA_CHANNEL_MASK;
|
||||
if (am >= FF_ARRAY_ELEMS(dca_default_coeffs)) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"Invalid channel mode %d\n", am);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
for (j = base_channel; j < s->prim_channels; j++) {
|
||||
s->downmix_coef[j][0] = dca_default_coeffs[am][j][0];
|
||||
s->downmix_coef[j][1] = dca_default_coeffs[am][j][1];
|
||||
@@ -828,7 +843,8 @@ static int dca_subframe_header(DCAContext *s, int base_channel, int block_index)
|
||||
}
|
||||
|
||||
/* Scale factor index */
|
||||
s->lfe_scale_factor = scale_factor_quant7[get_bits(&s->gb, 8)];
|
||||
skip_bits(&s->gb, 1);
|
||||
s->lfe_scale_factor = scale_factor_quant7[get_bits(&s->gb, 7)];
|
||||
|
||||
/* Quantization step size * scale factor */
|
||||
lfe_scale = 0.035 * s->lfe_scale_factor;
|
||||
|
@@ -7528,7 +7528,7 @@ static const float dca_downmix_coeffs[65] = {
|
||||
0.001412537544623, 0.001000000000000, 0.000501187233627, 0.000251188643151, 0.000000000000000,
|
||||
};
|
||||
|
||||
static const uint8_t dca_default_coeffs[16][5][2] = {
|
||||
static const uint8_t dca_default_coeffs[10][5][2] = {
|
||||
{ { 13, 13 }, },
|
||||
{ { 0, 64 }, { 64, 0 }, },
|
||||
{ { 0, 64 }, { 64, 0 }, },
|
||||
|
@@ -491,10 +491,16 @@ static inline void codeblock(DiracContext *s, SubBand *b,
|
||||
}
|
||||
|
||||
if (s->codeblock_mode && !(s->old_delta_quant && blockcnt_one)) {
|
||||
int quant = b->quant;
|
||||
if (is_arith)
|
||||
b->quant += dirac_get_arith_int(c, CTX_DELTA_Q_F, CTX_DELTA_Q_DATA);
|
||||
quant += dirac_get_arith_int(c, CTX_DELTA_Q_F, CTX_DELTA_Q_DATA);
|
||||
else
|
||||
b->quant += dirac_get_se_golomb(gb);
|
||||
quant += dirac_get_se_golomb(gb);
|
||||
if (quant < 0) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid quant\n");
|
||||
return;
|
||||
}
|
||||
b->quant = quant;
|
||||
}
|
||||
|
||||
b->quant = FFMIN(b->quant, MAX_QUANT);
|
||||
@@ -619,7 +625,7 @@ static void decode_component(DiracContext *s, int comp)
|
||||
b->quant = svq3_get_ue_golomb(&s->gb);
|
||||
align_get_bits(&s->gb);
|
||||
b->coeff_data = s->gb.buffer + get_bits_count(&s->gb)/8;
|
||||
b->length = FFMIN(b->length, get_bits_left(&s->gb)/8);
|
||||
b->length = FFMIN(b->length, FFMAX(get_bits_left(&s->gb)/8, 0));
|
||||
skip_bits_long(&s->gb, b->length*8);
|
||||
}
|
||||
}
|
||||
@@ -1172,7 +1178,7 @@ static void propagate_block_data(DiracBlock *block, int stride, int size)
|
||||
* Dirac Specification ->
|
||||
* 12. Block motion data syntax
|
||||
*/
|
||||
static void dirac_unpack_block_motion_data(DiracContext *s)
|
||||
static int dirac_unpack_block_motion_data(DiracContext *s)
|
||||
{
|
||||
GetBitContext *gb = &s->gb;
|
||||
uint8_t *sbsplit = s->sbsplit;
|
||||
@@ -1192,7 +1198,9 @@ static void dirac_unpack_block_motion_data(DiracContext *s)
|
||||
ff_dirac_init_arith_decoder(arith, gb, svq3_get_ue_golomb(gb)); /* svq3_get_ue_golomb(gb) is the length */
|
||||
for (y = 0; y < s->sbheight; y++) {
|
||||
for (x = 0; x < s->sbwidth; x++) {
|
||||
int split = dirac_get_arith_uint(arith, CTX_SB_F1, CTX_SB_DATA);
|
||||
unsigned int split = dirac_get_arith_uint(arith, CTX_SB_F1, CTX_SB_DATA);
|
||||
if (split > 2)
|
||||
return -1;
|
||||
sbsplit[x] = (split + pred_sbsplit(sbsplit+x, s->sbwidth, x, y)) % 3;
|
||||
}
|
||||
sbsplit += s->sbwidth;
|
||||
@@ -1221,6 +1229,8 @@ static void dirac_unpack_block_motion_data(DiracContext *s)
|
||||
propagate_block_data(block, s->blwidth, step);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int weight(int i, int blen, int offset)
|
||||
@@ -1675,7 +1685,8 @@ static int dirac_decode_picture_header(DiracContext *s)
|
||||
if (s->num_refs) {
|
||||
if (dirac_unpack_prediction_parameters(s)) /* [DIRAC_STD] 11.2 Picture Prediction Data. picture_prediction() */
|
||||
return -1;
|
||||
dirac_unpack_block_motion_data(s); /* [DIRAC_STD] 12. Block motion data syntax */
|
||||
if (dirac_unpack_block_motion_data(s)) /* [DIRAC_STD] 12. Block motion data syntax */
|
||||
return -1;
|
||||
}
|
||||
if (dirac_unpack_idwt_params(s)) /* [DIRAC_STD] 11.3 Wavelet transform data */
|
||||
return -1;
|
||||
|
@@ -183,6 +183,11 @@ static int dpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
int stereo = s->channels - 1;
|
||||
int16_t *output_samples;
|
||||
|
||||
if (stereo && (buf_size & 1)) {
|
||||
buf_size--;
|
||||
buf_end--;
|
||||
}
|
||||
|
||||
/* calculate output size */
|
||||
switch(avctx->codec->id) {
|
||||
case CODEC_ID_ROQ_DPCM:
|
||||
@@ -320,7 +325,7 @@ static int dpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
*got_frame_ptr = 1;
|
||||
*(AVFrame *)data = s->frame;
|
||||
|
||||
return buf_size;
|
||||
return avpkt->size;
|
||||
}
|
||||
|
||||
#define DPCM_DECODER(id_, name_, long_name_) \
|
||||
|
@@ -147,11 +147,11 @@ static int cin_decode_huffman(const unsigned char *src, int src_size, unsigned c
|
||||
return dst_cur - dst;
|
||||
}
|
||||
|
||||
static void cin_decode_lzss(const unsigned char *src, int src_size, unsigned char *dst, int dst_size)
|
||||
static int cin_decode_lzss(const unsigned char *src, int src_size, unsigned char *dst, int dst_size)
|
||||
{
|
||||
uint16_t cmd;
|
||||
int i, sz, offset, code;
|
||||
unsigned char *dst_end = dst + dst_size;
|
||||
unsigned char *dst_end = dst + dst_size, *dst_start = dst;
|
||||
const unsigned char *src_end = src + src_size;
|
||||
|
||||
while (src < src_end && dst < dst_end) {
|
||||
@@ -162,6 +162,8 @@ static void cin_decode_lzss(const unsigned char *src, int src_size, unsigned cha
|
||||
} else {
|
||||
cmd = AV_RL16(src); src += 2;
|
||||
offset = cmd >> 4;
|
||||
if ((int) (dst - dst_start) < offset + 1)
|
||||
return AVERROR_INVALIDDATA;
|
||||
sz = (cmd & 0xF) + 2;
|
||||
/* don't use memcpy/memmove here as the decoding routine (ab)uses */
|
||||
/* buffer overlappings to repeat bytes in the destination */
|
||||
@@ -173,6 +175,8 @@ static void cin_decode_lzss(const unsigned char *src, int src_size, unsigned cha
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cin_decode_rle(const unsigned char *src, int src_size, unsigned char *dst, int dst_size)
|
||||
@@ -202,13 +206,7 @@ static int cinvideo_decode_frame(AVCodecContext *avctx,
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
CinVideoContext *cin = avctx->priv_data;
|
||||
int i, y, palette_type, palette_colors_count, bitmap_frame_type, bitmap_frame_size;
|
||||
|
||||
cin->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
|
||||
if (avctx->reget_buffer(avctx, &cin->frame)) {
|
||||
av_log(cin->avctx, AV_LOG_ERROR, "delphinecinvideo: reget_buffer() failed to allocate a frame\n");
|
||||
return -1;
|
||||
}
|
||||
int i, y, palette_type, palette_colors_count, bitmap_frame_type, bitmap_frame_size, res = 0;
|
||||
|
||||
palette_type = buf[0];
|
||||
palette_colors_count = AV_RL16(buf+1);
|
||||
@@ -234,8 +232,6 @@ static int cinvideo_decode_frame(AVCodecContext *avctx,
|
||||
bitmap_frame_size -= 4;
|
||||
}
|
||||
}
|
||||
memcpy(cin->frame.data[1], cin->palette, sizeof(cin->palette));
|
||||
cin->frame.palette_has_changed = 1;
|
||||
|
||||
/* note: the decoding routines below assumes that surface.width = surface.pitch */
|
||||
switch (bitmap_frame_type) {
|
||||
@@ -268,17 +264,31 @@ static int cinvideo_decode_frame(AVCodecContext *avctx,
|
||||
cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
|
||||
break;
|
||||
case 38:
|
||||
cin_decode_lzss(buf, bitmap_frame_size,
|
||||
cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
|
||||
res = cin_decode_lzss(buf, bitmap_frame_size,
|
||||
cin->bitmap_table[CIN_CUR_BMP],
|
||||
cin->bitmap_size);
|
||||
if (res < 0)
|
||||
return res;
|
||||
break;
|
||||
case 39:
|
||||
cin_decode_lzss(buf, bitmap_frame_size,
|
||||
cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
|
||||
res = cin_decode_lzss(buf, bitmap_frame_size,
|
||||
cin->bitmap_table[CIN_CUR_BMP],
|
||||
cin->bitmap_size);
|
||||
if (res < 0)
|
||||
return res;
|
||||
cin_apply_delta_data(cin->bitmap_table[CIN_PRE_BMP],
|
||||
cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
|
||||
break;
|
||||
}
|
||||
|
||||
cin->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
|
||||
if (avctx->reget_buffer(avctx, &cin->frame)) {
|
||||
av_log(cin->avctx, AV_LOG_ERROR, "delphinecinvideo: reget_buffer() failed to allocate a frame\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
memcpy(cin->frame.data[1], cin->palette, sizeof(cin->palette));
|
||||
cin->frame.palette_has_changed = 1;
|
||||
for (y = 0; y < cin->avctx->height; ++y)
|
||||
memcpy(cin->frame.data[0] + (cin->avctx->height - 1 - y) * cin->frame.linesize[0],
|
||||
cin->bitmap_table[CIN_CUR_BMP] + y * cin->avctx->width,
|
||||
|
@@ -367,18 +367,17 @@ void ff_put_pixels_clamped_c(const DCTELEM *block, uint8_t *restrict pixels,
|
||||
int line_size)
|
||||
{
|
||||
int i;
|
||||
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
|
||||
|
||||
/* read the pixels */
|
||||
for(i=0;i<8;i++) {
|
||||
pixels[0] = cm[block[0]];
|
||||
pixels[1] = cm[block[1]];
|
||||
pixels[2] = cm[block[2]];
|
||||
pixels[3] = cm[block[3]];
|
||||
pixels[4] = cm[block[4]];
|
||||
pixels[5] = cm[block[5]];
|
||||
pixels[6] = cm[block[6]];
|
||||
pixels[7] = cm[block[7]];
|
||||
pixels[0] = av_clip_uint8(block[0]);
|
||||
pixels[1] = av_clip_uint8(block[1]);
|
||||
pixels[2] = av_clip_uint8(block[2]);
|
||||
pixels[3] = av_clip_uint8(block[3]);
|
||||
pixels[4] = av_clip_uint8(block[4]);
|
||||
pixels[5] = av_clip_uint8(block[5]);
|
||||
pixels[6] = av_clip_uint8(block[6]);
|
||||
pixels[7] = av_clip_uint8(block[7]);
|
||||
|
||||
pixels += line_size;
|
||||
block += 8;
|
||||
@@ -389,14 +388,13 @@ static void put_pixels_clamped4_c(const DCTELEM *block, uint8_t *restrict pixels
|
||||
int line_size)
|
||||
{
|
||||
int i;
|
||||
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
|
||||
|
||||
/* read the pixels */
|
||||
for(i=0;i<4;i++) {
|
||||
pixels[0] = cm[block[0]];
|
||||
pixels[1] = cm[block[1]];
|
||||
pixels[2] = cm[block[2]];
|
||||
pixels[3] = cm[block[3]];
|
||||
pixels[0] = av_clip_uint8(block[0]);
|
||||
pixels[1] = av_clip_uint8(block[1]);
|
||||
pixels[2] = av_clip_uint8(block[2]);
|
||||
pixels[3] = av_clip_uint8(block[3]);
|
||||
|
||||
pixels += line_size;
|
||||
block += 8;
|
||||
@@ -407,12 +405,11 @@ static void put_pixels_clamped2_c(const DCTELEM *block, uint8_t *restrict pixels
|
||||
int line_size)
|
||||
{
|
||||
int i;
|
||||
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
|
||||
|
||||
/* read the pixels */
|
||||
for(i=0;i<2;i++) {
|
||||
pixels[0] = cm[block[0]];
|
||||
pixels[1] = cm[block[1]];
|
||||
pixels[0] = av_clip_uint8(block[0]);
|
||||
pixels[1] = av_clip_uint8(block[1]);
|
||||
|
||||
pixels += line_size;
|
||||
block += 8;
|
||||
@@ -444,18 +441,17 @@ void ff_add_pixels_clamped_c(const DCTELEM *block, uint8_t *restrict pixels,
|
||||
int line_size)
|
||||
{
|
||||
int i;
|
||||
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
|
||||
|
||||
/* read the pixels */
|
||||
for(i=0;i<8;i++) {
|
||||
pixels[0] = cm[pixels[0] + block[0]];
|
||||
pixels[1] = cm[pixels[1] + block[1]];
|
||||
pixels[2] = cm[pixels[2] + block[2]];
|
||||
pixels[3] = cm[pixels[3] + block[3]];
|
||||
pixels[4] = cm[pixels[4] + block[4]];
|
||||
pixels[5] = cm[pixels[5] + block[5]];
|
||||
pixels[6] = cm[pixels[6] + block[6]];
|
||||
pixels[7] = cm[pixels[7] + block[7]];
|
||||
pixels[0] = av_clip_uint8(pixels[0] + block[0]);
|
||||
pixels[1] = av_clip_uint8(pixels[1] + block[1]);
|
||||
pixels[2] = av_clip_uint8(pixels[2] + block[2]);
|
||||
pixels[3] = av_clip_uint8(pixels[3] + block[3]);
|
||||
pixels[4] = av_clip_uint8(pixels[4] + block[4]);
|
||||
pixels[5] = av_clip_uint8(pixels[5] + block[5]);
|
||||
pixels[6] = av_clip_uint8(pixels[6] + block[6]);
|
||||
pixels[7] = av_clip_uint8(pixels[7] + block[7]);
|
||||
pixels += line_size;
|
||||
block += 8;
|
||||
}
|
||||
@@ -465,14 +461,13 @@ static void add_pixels_clamped4_c(const DCTELEM *block, uint8_t *restrict pixels
|
||||
int line_size)
|
||||
{
|
||||
int i;
|
||||
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
|
||||
|
||||
/* read the pixels */
|
||||
for(i=0;i<4;i++) {
|
||||
pixels[0] = cm[pixels[0] + block[0]];
|
||||
pixels[1] = cm[pixels[1] + block[1]];
|
||||
pixels[2] = cm[pixels[2] + block[2]];
|
||||
pixels[3] = cm[pixels[3] + block[3]];
|
||||
pixels[0] = av_clip_uint8(pixels[0] + block[0]);
|
||||
pixels[1] = av_clip_uint8(pixels[1] + block[1]);
|
||||
pixels[2] = av_clip_uint8(pixels[2] + block[2]);
|
||||
pixels[3] = av_clip_uint8(pixels[3] + block[3]);
|
||||
pixels += line_size;
|
||||
block += 8;
|
||||
}
|
||||
@@ -482,12 +477,11 @@ static void add_pixels_clamped2_c(const DCTELEM *block, uint8_t *restrict pixels
|
||||
int line_size)
|
||||
{
|
||||
int i;
|
||||
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
|
||||
|
||||
/* read the pixels */
|
||||
for(i=0;i<2;i++) {
|
||||
pixels[0] = cm[pixels[0] + block[0]];
|
||||
pixels[1] = cm[pixels[1] + block[1]];
|
||||
pixels[0] = av_clip_uint8(pixels[0] + block[0]);
|
||||
pixels[1] = av_clip_uint8(pixels[1] + block[1]);
|
||||
pixels += line_size;
|
||||
block += 8;
|
||||
}
|
||||
@@ -2779,15 +2773,11 @@ static void ff_jref_idct2_add(uint8_t *dest, int line_size, DCTELEM *block)
|
||||
|
||||
static void ff_jref_idct1_put(uint8_t *dest, int line_size, DCTELEM *block)
|
||||
{
|
||||
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
|
||||
|
||||
dest[0] = cm[(block[0] + 4)>>3];
|
||||
dest[0] = av_clip_uint8((block[0] + 4)>>3);
|
||||
}
|
||||
static void ff_jref_idct1_add(uint8_t *dest, int line_size, DCTELEM *block)
|
||||
{
|
||||
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
|
||||
|
||||
dest[0] = cm[dest[0] + ((block[0] + 4)>>3)];
|
||||
dest[0] = av_clip_uint8(dest[0] + ((block[0] + 4)>>3));
|
||||
}
|
||||
|
||||
static void just_return(void *mem av_unused, int stride av_unused, int h av_unused) { return; }
|
||||
|
@@ -43,6 +43,7 @@ typedef struct TgqContext {
|
||||
ScanTable scantable;
|
||||
int qtable[64];
|
||||
DECLARE_ALIGNED(16, DCTELEM, block)[6][64];
|
||||
GetByteContext gb;
|
||||
} TgqContext;
|
||||
|
||||
static av_cold int tgq_decode_init(AVCodecContext *avctx){
|
||||
@@ -141,39 +142,36 @@ static void tgq_idct_put_mb_dconly(TgqContext *s, int mb_x, int mb_y, const int8
|
||||
}
|
||||
}
|
||||
|
||||
static void tgq_decode_mb(TgqContext *s, int mb_y, int mb_x, const uint8_t **bs, const uint8_t *buf_end){
|
||||
static void tgq_decode_mb(TgqContext *s, int mb_y, int mb_x){
|
||||
int mode;
|
||||
int i;
|
||||
int8_t dc[6];
|
||||
|
||||
mode = bytestream_get_byte(bs);
|
||||
if (mode>buf_end-*bs) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "truncated macroblock\n");
|
||||
return;
|
||||
}
|
||||
|
||||
mode = bytestream2_get_byte(&s->gb);
|
||||
if (mode>12) {
|
||||
GetBitContext gb;
|
||||
init_get_bits(&gb, *bs, mode*8);
|
||||
init_get_bits(&gb, s->gb.buffer, FFMIN(s->gb.buffer_end - s->gb.buffer, mode) * 8);
|
||||
for(i=0; i<6; i++)
|
||||
tgq_decode_block(s, s->block[i], &gb);
|
||||
tgq_idct_put_mb(s, s->block, mb_x, mb_y);
|
||||
bytestream2_skip(&s->gb, mode);
|
||||
}else{
|
||||
if (mode==3) {
|
||||
memset(dc, (*bs)[0], 4);
|
||||
dc[4] = (*bs)[1];
|
||||
dc[5] = (*bs)[2];
|
||||
memset(dc, bytestream2_get_byte(&s->gb), 4);
|
||||
dc[4] = bytestream2_get_byte(&s->gb);
|
||||
dc[5] = bytestream2_get_byte(&s->gb);
|
||||
}else if (mode==6) {
|
||||
memcpy(dc, *bs, 6);
|
||||
bytestream2_get_buffer(&s->gb, dc, 6);
|
||||
}else if (mode==12) {
|
||||
for(i=0; i<6; i++)
|
||||
dc[i] = (*bs)[i*2];
|
||||
for (i = 0; i < 6; i++) {
|
||||
dc[i] = bytestream2_get_byte(&s->gb);
|
||||
bytestream2_skip(&s->gb, 1);
|
||||
}
|
||||
}else{
|
||||
av_log(s->avctx, AV_LOG_ERROR, "unsupported mb mode %i\n", mode);
|
||||
}
|
||||
tgq_idct_put_mb_dconly(s, mb_x, mb_y, dc);
|
||||
}
|
||||
*bs += mode;
|
||||
}
|
||||
|
||||
static void tgq_calculate_qtable(TgqContext *s, int quant){
|
||||
@@ -193,28 +191,30 @@ static int tgq_decode_frame(AVCodecContext *avctx,
|
||||
AVPacket *avpkt){
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
const uint8_t *buf_start = buf;
|
||||
const uint8_t *buf_end = buf + buf_size;
|
||||
TgqContext *s = avctx->priv_data;
|
||||
int x,y;
|
||||
|
||||
int big_endian = AV_RL32(&buf[4]) > 0x000FFFFF;
|
||||
buf += 8;
|
||||
|
||||
if(8>buf_end-buf) {
|
||||
if (buf_size < 16) {
|
||||
av_log(avctx, AV_LOG_WARNING, "truncated header\n");
|
||||
return -1;
|
||||
}
|
||||
s->width = big_endian ? AV_RB16(&buf[0]) : AV_RL16(&buf[0]);
|
||||
s->height = big_endian ? AV_RB16(&buf[2]) : AV_RL16(&buf[2]);
|
||||
bytestream2_init(&s->gb, buf + 8, buf_size - 8);
|
||||
if (big_endian) {
|
||||
s->width = bytestream2_get_be16u(&s->gb);
|
||||
s->height = bytestream2_get_be16u(&s->gb);
|
||||
} else {
|
||||
s->width = bytestream2_get_le16u(&s->gb);
|
||||
s->height = bytestream2_get_le16u(&s->gb);
|
||||
}
|
||||
|
||||
if (s->avctx->width!=s->width || s->avctx->height!=s->height) {
|
||||
avcodec_set_dimensions(s->avctx, s->width, s->height);
|
||||
if (s->frame.data[0])
|
||||
avctx->release_buffer(avctx, &s->frame);
|
||||
}
|
||||
tgq_calculate_qtable(s, buf[4]);
|
||||
buf += 8;
|
||||
tgq_calculate_qtable(s, bytestream2_get_byteu(&s->gb));
|
||||
bytestream2_skip(&s->gb, 3);
|
||||
|
||||
if (!s->frame.data[0]) {
|
||||
s->frame.key_frame = 1;
|
||||
@@ -226,14 +226,14 @@ static int tgq_decode_frame(AVCodecContext *avctx,
|
||||
}
|
||||
}
|
||||
|
||||
for (y=0; y<(avctx->height+15)/16; y++)
|
||||
for (x=0; x<(avctx->width+15)/16; x++)
|
||||
tgq_decode_mb(s, y, x, &buf, buf_end);
|
||||
for (y = 0; y < FFALIGN(avctx->height, 16) >> 4; y++)
|
||||
for (x = 0; x < FFALIGN(avctx->width, 16) >> 4; x++)
|
||||
tgq_decode_mb(s, y, x);
|
||||
|
||||
*data_size = sizeof(AVFrame);
|
||||
*(AVFrame*)data = s->frame;
|
||||
|
||||
return buf-buf_start;
|
||||
return avpkt->size;
|
||||
}
|
||||
|
||||
static av_cold int tgq_decode_end(AVCodecContext *avctx){
|
||||
|
@@ -440,9 +440,14 @@ static void guess_mv(MpegEncContext *s)
|
||||
if ((!(s->avctx->error_concealment&FF_EC_GUESS_MVS)) ||
|
||||
num_avail <= mb_width / 2) {
|
||||
for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
|
||||
s->mb_x = 0;
|
||||
s->mb_y = mb_y;
|
||||
ff_init_block_index(s);
|
||||
for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
|
||||
const int mb_xy = mb_x + mb_y * s->mb_stride;
|
||||
|
||||
ff_update_block_index(s);
|
||||
|
||||
if (IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
|
||||
continue;
|
||||
if (!(s->error_status_table[mb_xy] & ER_MV_ERROR))
|
||||
@@ -477,6 +482,9 @@ static void guess_mv(MpegEncContext *s)
|
||||
|
||||
changed = 0;
|
||||
for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
|
||||
s->mb_x = 0;
|
||||
s->mb_y = mb_y;
|
||||
ff_init_block_index(s);
|
||||
for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
|
||||
const int mb_xy = mb_x + mb_y * s->mb_stride;
|
||||
int mv_predictor[8][2] = { { 0 } };
|
||||
@@ -488,6 +496,8 @@ static void guess_mv(MpegEncContext *s)
|
||||
const int mot_index = (mb_x + mb_y * mot_stride) * mot_step;
|
||||
int prev_x, prev_y, prev_ref;
|
||||
|
||||
ff_update_block_index(s);
|
||||
|
||||
if ((mb_x ^ mb_y ^ pass) & 1)
|
||||
continue;
|
||||
|
||||
@@ -1098,11 +1108,16 @@ void ff_er_frame_end(MpegEncContext *s)
|
||||
|
||||
/* handle inter blocks with damaged AC */
|
||||
for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
|
||||
s->mb_x = 0;
|
||||
s->mb_y = mb_y;
|
||||
ff_init_block_index(s);
|
||||
for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
|
||||
const int mb_xy = mb_x + mb_y * s->mb_stride;
|
||||
const int mb_type = s->current_picture.f.mb_type[mb_xy];
|
||||
int dir = !s->last_picture.f.data[0];
|
||||
|
||||
ff_update_block_index(s);
|
||||
|
||||
error = s->error_status_table[mb_xy];
|
||||
|
||||
if (IS_INTRA(mb_type))
|
||||
@@ -1140,11 +1155,16 @@ void ff_er_frame_end(MpegEncContext *s)
|
||||
/* guess MVs */
|
||||
if (s->pict_type == AV_PICTURE_TYPE_B) {
|
||||
for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
|
||||
s->mb_x = 0;
|
||||
s->mb_y = mb_y;
|
||||
ff_init_block_index(s);
|
||||
for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
|
||||
int xy = mb_x * 2 + mb_y * 2 * s->b8_stride;
|
||||
const int mb_xy = mb_x + mb_y * s->mb_stride;
|
||||
const int mb_type = s->current_picture.f.mb_type[mb_xy];
|
||||
|
||||
ff_update_block_index(s);
|
||||
|
||||
error = s->error_status_table[mb_xy];
|
||||
|
||||
if (IS_INTRA(mb_type))
|
||||
|
@@ -49,7 +49,7 @@ typedef struct Escape124Context {
|
||||
} Escape124Context;
|
||||
|
||||
static int can_safely_read(GetBitContext* gb, int bits) {
|
||||
return get_bits_count(gb) + bits <= gb->size_in_bits;
|
||||
return get_bits_left(gb) >= bits;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -422,7 +422,16 @@ static inline int decode_subframe(FLACContext *s, int channel)
|
||||
type = get_bits(&s->gb, 6);
|
||||
|
||||
if (get_bits1(&s->gb)) {
|
||||
int left = get_bits_left(&s->gb);
|
||||
wasted = 1;
|
||||
if ( left < 0 ||
|
||||
(left < s->curr_bps && !show_bits_long(&s->gb, left)) ||
|
||||
!show_bits_long(&s->gb, s->curr_bps)) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"Invalid number of wasted bits > available bits (%d) - left=%d\n",
|
||||
s->curr_bps, left);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
while (!get_bits1(&s->gb))
|
||||
wasted++;
|
||||
s->curr_bps -= wasted;
|
||||
|
@@ -140,7 +140,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
uint32_t offs[4];
|
||||
int i, j, is_chroma;
|
||||
const int planes = 3;
|
||||
|
||||
enum PixelFormat pix_fmt;
|
||||
|
||||
header = AV_RL32(buf);
|
||||
version = header & 0xff;
|
||||
@@ -155,8 +155,6 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
|
||||
buf += header_size;
|
||||
|
||||
avctx->pix_fmt = version & 1 ? PIX_FMT_BGR24 : PIX_FMT_YUVJ420P;
|
||||
|
||||
if (version < 2) {
|
||||
unsigned needed_size = avctx->width*avctx->height*3;
|
||||
if (version == 0) needed_size /= 2;
|
||||
@@ -176,6 +174,12 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
FF_BUFFER_HINTS_PRESERVE |
|
||||
FF_BUFFER_HINTS_REUSABLE;
|
||||
|
||||
pix_fmt = version & 1 ? PIX_FMT_BGR24 : PIX_FMT_YUVJ420P;
|
||||
if (avctx->pix_fmt != pix_fmt && f->data[0]) {
|
||||
avctx->release_buffer(avctx, f);
|
||||
}
|
||||
avctx->pix_fmt = pix_fmt;
|
||||
|
||||
switch(version) {
|
||||
case 0:
|
||||
default:
|
||||
|
@@ -126,8 +126,8 @@ static int g722_decode_frame(AVCodecContext *avctx, void *data,
|
||||
c->prev_samples[c->prev_samples_pos++] = rlow - rhigh;
|
||||
ff_g722_apply_qmf(c->prev_samples + c->prev_samples_pos - 24,
|
||||
&xout1, &xout2);
|
||||
*out_buf++ = av_clip_int16(xout1 >> 12);
|
||||
*out_buf++ = av_clip_int16(xout2 >> 12);
|
||||
*out_buf++ = av_clip_int16(xout1 >> 11);
|
||||
*out_buf++ = av_clip_int16(xout2 >> 11);
|
||||
if (c->prev_samples_pos >= PREV_SAMPLES_BUF_SIZE) {
|
||||
memmove(c->prev_samples, c->prev_samples + c->prev_samples_pos - 22,
|
||||
22 * sizeof(c->prev_samples[0]));
|
||||
|
@@ -128,8 +128,8 @@ static inline void filter_samples(G722Context *c, const int16_t *samples,
|
||||
c->prev_samples[c->prev_samples_pos++] = samples[0];
|
||||
c->prev_samples[c->prev_samples_pos++] = samples[1];
|
||||
ff_g722_apply_qmf(c->prev_samples + c->prev_samples_pos - 24, &xout1, &xout2);
|
||||
*xlow = xout1 + xout2 >> 13;
|
||||
*xhigh = xout1 - xout2 >> 13;
|
||||
*xlow = xout1 + xout2 >> 14;
|
||||
*xhigh = xout1 - xout2 >> 14;
|
||||
if (c->prev_samples_pos >= PREV_SAMPLES_BUF_SIZE) {
|
||||
memmove(c->prev_samples,
|
||||
c->prev_samples + c->prev_samples_pos - 22,
|
||||
|
@@ -118,10 +118,23 @@ for examples see get_bits, show_bits, skip_bits, get_vlc
|
||||
# define MIN_CACHE_BITS 25
|
||||
#endif
|
||||
|
||||
#if UNCHECKED_BITSTREAM_READER
|
||||
#define OPEN_READER(name, gb) \
|
||||
unsigned int name##_index = (gb)->index; \
|
||||
av_unused unsigned int name##_cache
|
||||
|
||||
#define HAVE_BITS_REMAINING(name, gb) 1
|
||||
#else
|
||||
#define OPEN_READER(name, gb) \
|
||||
unsigned int name##_index = (gb)->index; \
|
||||
unsigned int av_unused name##_cache = 0; \
|
||||
unsigned int av_unused name##_size_plus8 = \
|
||||
(gb)->size_in_bits_plus8
|
||||
|
||||
#define HAVE_BITS_REMAINING(name, gb) \
|
||||
name##_index < name##_size_plus8
|
||||
#endif
|
||||
|
||||
#define CLOSE_READER(name, gb) (gb)->index = name##_index
|
||||
|
||||
#ifdef BITSTREAM_READER_LE
|
||||
@@ -154,7 +167,7 @@ for examples see get_bits, show_bits, skip_bits, get_vlc
|
||||
# define SKIP_COUNTER(name, gb, num) name##_index += (num)
|
||||
#else
|
||||
# define SKIP_COUNTER(name, gb, num) \
|
||||
name##_index = FFMIN((gb)->size_in_bits_plus8, name##_index + (num))
|
||||
name##_index = FFMIN(name##_size_plus8, name##_index + (num))
|
||||
#endif
|
||||
|
||||
#define SKIP_BITS(name, gb, num) do { \
|
||||
|
@@ -135,7 +135,7 @@ static inline int svq3_get_ue_golomb(GetBitContext *gb){
|
||||
ret = (ret << 4) | ff_interleaved_dirac_golomb_vlc_code[buf];
|
||||
UPDATE_CACHE(re, gb);
|
||||
buf = GET_CACHE(re, gb);
|
||||
} while(ret<0x8000000U);
|
||||
} while (ret<0x8000000U && HAVE_BITS_REMAINING(re, gb));
|
||||
|
||||
CLOSE_READER(re, gb);
|
||||
return ret - 1;
|
||||
@@ -301,7 +301,7 @@ static inline int get_ur_golomb_jpegls(GetBitContext *gb, int k, int limit, int
|
||||
return buf;
|
||||
}else{
|
||||
int i;
|
||||
for(i=0; SHOW_UBITS(re, gb, 1) == 0; i++){
|
||||
for (i = 0; i < limit && SHOW_UBITS(re, gb, 1) == 0; i++) {
|
||||
if (gb->size_in_bits <= re_index)
|
||||
return -1;
|
||||
LAST_SKIP_BITS(re, gb, 1);
|
||||
|
@@ -265,7 +265,7 @@ static int h261_decode_mb(H261Context *h){
|
||||
while( h->mba_diff == MBA_STUFFING ); // stuffing
|
||||
|
||||
if ( h->mba_diff < 0 ){
|
||||
if ( get_bits_count(&s->gb) + 7 >= s->gb.size_in_bits )
|
||||
if (get_bits_left(&s->gb) <= 7)
|
||||
return SLICE_END;
|
||||
|
||||
av_log(s->avctx, AV_LOG_ERROR, "illegal mba at %d %d\n", s->mb_x, s->mb_y);
|
||||
|
@@ -664,7 +664,7 @@ retry:
|
||||
ret = decode_slice(s);
|
||||
while(s->mb_y<s->mb_height){
|
||||
if(s->msmpeg4_version){
|
||||
if(s->slice_height==0 || s->mb_x!=0 || (s->mb_y%s->slice_height)!=0 || get_bits_count(&s->gb) > s->gb.size_in_bits)
|
||||
if(s->slice_height==0 || s->mb_x!=0 || (s->mb_y%s->slice_height)!=0 || get_bits_left(&s->gb)<0)
|
||||
break;
|
||||
}else{
|
||||
int prev_x=s->mb_x, prev_y=s->mb_y;
|
||||
|
@@ -104,7 +104,7 @@ int ff_h264_check_intra4x4_pred_mode(H264Context *h){
|
||||
return 0;
|
||||
} //FIXME cleanup like check_intra_pred_mode
|
||||
|
||||
static int check_intra_pred_mode(H264Context *h, int mode, int is_chroma){
|
||||
int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma){
|
||||
MpegEncContext * const s = &h->s;
|
||||
static const int8_t top [7]= {LEFT_DC_PRED8x8, 1,-1,-1};
|
||||
static const int8_t left[7]= { TOP_DC_PRED8x8,-1, 2,-1,DC_128_PRED8x8};
|
||||
@@ -136,22 +136,6 @@ static int check_intra_pred_mode(H264Context *h, int mode, int is_chroma){
|
||||
return mode;
|
||||
}
|
||||
|
||||
/**
|
||||
* checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
|
||||
*/
|
||||
int ff_h264_check_intra16x16_pred_mode(H264Context *h, int mode)
|
||||
{
|
||||
return check_intra_pred_mode(h, mode, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
|
||||
*/
|
||||
int ff_h264_check_intra_chroma_pred_mode(H264Context *h, int mode)
|
||||
{
|
||||
return check_intra_pred_mode(h, mode, 1);
|
||||
}
|
||||
|
||||
|
||||
const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src, int *dst_length, int *consumed, int length){
|
||||
int i, si, di;
|
||||
@@ -2707,11 +2691,6 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
s->avctx->level = h->sps.level_idc;
|
||||
s->avctx->refs = h->sps.ref_frame_count;
|
||||
|
||||
if(h == h0 && h->dequant_coeff_pps != pps_id){
|
||||
h->dequant_coeff_pps = pps_id;
|
||||
init_dequant_tables(h);
|
||||
}
|
||||
|
||||
s->mb_width= h->sps.mb_width;
|
||||
s->mb_height= h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag);
|
||||
|
||||
@@ -2806,7 +2785,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
else
|
||||
s->avctx->pix_fmt = PIX_FMT_YUV420P10;
|
||||
break;
|
||||
default:
|
||||
case 8:
|
||||
if (CHROMA444){
|
||||
s->avctx->pix_fmt = s->avctx->color_range == AVCOL_RANGE_JPEG ? PIX_FMT_YUVJ444P : PIX_FMT_YUV444P;
|
||||
if (s->avctx->colorspace == AVCOL_SPC_RGB) {
|
||||
@@ -2825,6 +2804,11 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
hwaccel_pixfmt_list_h264_jpeg_420 :
|
||||
ff_hwaccel_pixfmt_list_420);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"Unsupported bit depth: %d\n", h->sps.bit_depth_luma);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
s->avctx->hwaccel = ff_find_hwaccel(s->avctx->codec->id, s->avctx->pix_fmt);
|
||||
@@ -2870,6 +2854,11 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
}
|
||||
}
|
||||
|
||||
if(h == h0 && h->dequant_coeff_pps != pps_id){
|
||||
h->dequant_coeff_pps = pps_id;
|
||||
init_dequant_tables(h);
|
||||
}
|
||||
|
||||
h->frame_num= get_bits(&s->gb, h->sps.log2_max_frame_num);
|
||||
|
||||
h->mb_mbaff = 0;
|
||||
@@ -3041,7 +3030,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
h->ref_count[1]= h->pps.ref_count[1];
|
||||
|
||||
if(h->slice_type_nos != AV_PICTURE_TYPE_I){
|
||||
unsigned max= (16<<(s->picture_structure != PICT_FRAME))-1;
|
||||
unsigned max= s->picture_structure == PICT_FRAME ? 15 : 31;
|
||||
|
||||
if(h->slice_type_nos == AV_PICTURE_TYPE_B){
|
||||
h->direct_spatial_mv_pred= get_bits1(&s->gb);
|
||||
}
|
||||
@@ -3051,13 +3041,14 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
h->ref_count[0]= get_ue_golomb(&s->gb) + 1;
|
||||
if(h->slice_type_nos==AV_PICTURE_TYPE_B)
|
||||
h->ref_count[1]= get_ue_golomb(&s->gb) + 1;
|
||||
}
|
||||
|
||||
}
|
||||
if(h->ref_count[0]-1 > max || h->ref_count[1]-1 > max){
|
||||
if (h->ref_count[0]-1 > max || h->ref_count[1]-1 > max){
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "reference overflow\n");
|
||||
h->ref_count[0]= h->ref_count[1]= 1;
|
||||
return -1;
|
||||
h->ref_count[0] = h->ref_count[1] = 1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if(h->slice_type_nos == AV_PICTURE_TYPE_B)
|
||||
h->list_count= 2;
|
||||
else
|
||||
@@ -3694,8 +3685,8 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg){
|
||||
if(s->mb_y >= s->mb_height){
|
||||
tprintf(s->avctx, "slice end %d %d\n", get_bits_count(&s->gb), s->gb.size_in_bits);
|
||||
|
||||
if( get_bits_count(&s->gb) == s->gb.size_in_bits
|
||||
|| get_bits_count(&s->gb) < s->gb.size_in_bits && s->avctx->error_recognition < FF_ER_AGGRESSIVE) {
|
||||
if ( get_bits_left(&s->gb) == 0
|
||||
|| get_bits_left(&s->gb) > 0 && !(s->avctx->err_recognition & AV_EF_AGGRESSIVE)) {
|
||||
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END&part_mask);
|
||||
|
||||
return 0;
|
||||
@@ -3707,9 +3698,9 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg){
|
||||
}
|
||||
}
|
||||
|
||||
if(get_bits_count(&s->gb) >= s->gb.size_in_bits && s->mb_skip_run<=0){
|
||||
if (get_bits_left(&s->gb) <= 0 && s->mb_skip_run <= 0){
|
||||
tprintf(s->avctx, "slice end %d %d\n", get_bits_count(&s->gb), s->gb.size_in_bits);
|
||||
if(get_bits_count(&s->gb) == s->gb.size_in_bits ){
|
||||
if (get_bits_left(&s->gb) == 0) {
|
||||
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END&part_mask);
|
||||
if (s->mb_x > lf_x_start) loop_filter(h, lf_x_start, s->mb_x);
|
||||
|
||||
@@ -3798,7 +3789,7 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
int consumed;
|
||||
int dst_length;
|
||||
int bit_length;
|
||||
uint8_t *ptr;
|
||||
const uint8_t *ptr;
|
||||
int i, nalsize = 0;
|
||||
int err;
|
||||
|
||||
@@ -3974,10 +3965,10 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
break;
|
||||
case NAL_SPS:
|
||||
init_get_bits(&s->gb, ptr, bit_length);
|
||||
if(ff_h264_decode_seq_parameter_set(h) < 0 && (h->is_avc ? (nalsize != consumed) && nalsize : 1)){
|
||||
if (ff_h264_decode_seq_parameter_set(h) < 0 && (h->is_avc ? (nalsize != consumed) && nalsize : 1)){
|
||||
av_log(h->s.avctx, AV_LOG_DEBUG, "SPS decoding failure, trying alternative mode\n");
|
||||
if(h->is_avc) av_assert0(next_avc - buf_index + consumed == nalsize);
|
||||
init_get_bits(&s->gb, &buf[buf_index + 1 - consumed], 8*(next_avc - buf_index + consumed));
|
||||
init_get_bits(&s->gb, &buf[buf_index + 1 - consumed], 8*(next_avc - buf_index + consumed - 1));
|
||||
ff_h264_decode_seq_parameter_set(h);
|
||||
}
|
||||
|
||||
|
@@ -671,15 +671,7 @@ void ff_generate_sliding_window_mmcos(H264Context *h);
|
||||
*/
|
||||
int ff_h264_check_intra4x4_pred_mode(H264Context *h);
|
||||
|
||||
/**
|
||||
* Check if the top & left blocks are available if needed & change the dc mode so it only uses the available blocks.
|
||||
*/
|
||||
int ff_h264_check_intra16x16_pred_mode(H264Context *h, int mode);
|
||||
|
||||
/**
|
||||
* Check if the top & left blocks are available if needed & change the dc mode so it only uses the available blocks.
|
||||
*/
|
||||
int ff_h264_check_intra_chroma_pred_mode(H264Context *h, int mode);
|
||||
int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma);
|
||||
|
||||
void ff_h264_hl_decode_mb(H264Context *h);
|
||||
int ff_h264_frame_start(H264Context *h);
|
||||
|
@@ -1998,6 +1998,8 @@ decode_intra_mb:
|
||||
}
|
||||
|
||||
// The pixels are stored in the same order as levels in h->mb array.
|
||||
if ((int) (h->cabac.bytestream_end - ptr) < mb_size)
|
||||
return -1;
|
||||
memcpy(h->mb, ptr, mb_size); ptr+=mb_size;
|
||||
|
||||
ff_init_cabac_decoder(&h->cabac, ptr, h->cabac.bytestream_end - ptr);
|
||||
@@ -2042,14 +2044,14 @@ decode_intra_mb:
|
||||
write_back_intra_pred_mode(h);
|
||||
if( ff_h264_check_intra4x4_pred_mode(h) < 0 ) return -1;
|
||||
} else {
|
||||
h->intra16x16_pred_mode= ff_h264_check_intra16x16_pred_mode( h, h->intra16x16_pred_mode );
|
||||
h->intra16x16_pred_mode= ff_h264_check_intra_pred_mode( h, h->intra16x16_pred_mode, 0 );
|
||||
if( h->intra16x16_pred_mode < 0 ) return -1;
|
||||
}
|
||||
if(decode_chroma){
|
||||
h->chroma_pred_mode_table[mb_xy] =
|
||||
pred_mode = decode_cabac_mb_chroma_pre_mode( h );
|
||||
|
||||
pred_mode= ff_h264_check_intra_chroma_pred_mode( h, pred_mode );
|
||||
pred_mode= ff_h264_check_intra_pred_mode( h, pred_mode, 1 );
|
||||
if( pred_mode < 0 ) return -1;
|
||||
h->chroma_pred_mode= pred_mode;
|
||||
} else {
|
||||
|
@@ -823,12 +823,12 @@ decode_intra_mb:
|
||||
if( ff_h264_check_intra4x4_pred_mode(h) < 0)
|
||||
return -1;
|
||||
}else{
|
||||
h->intra16x16_pred_mode= ff_h264_check_intra16x16_pred_mode(h, h->intra16x16_pred_mode);
|
||||
h->intra16x16_pred_mode= ff_h264_check_intra_pred_mode(h, h->intra16x16_pred_mode, 0);
|
||||
if(h->intra16x16_pred_mode < 0)
|
||||
return -1;
|
||||
}
|
||||
if(decode_chroma){
|
||||
pred_mode= ff_h264_check_intra_chroma_pred_mode(h, get_ue_golomb_31(&s->gb));
|
||||
pred_mode= ff_h264_check_intra_pred_mode(h, get_ue_golomb_31(&s->gb), 1);
|
||||
if(pred_mode < 0)
|
||||
return -1;
|
||||
h->chroma_pred_mode= pred_mode;
|
||||
|
@@ -253,7 +253,7 @@ static void pred_spatial_direct_motion(H264Context * const h, int *mb_type){
|
||||
mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy + s->mb_stride];
|
||||
b8_stride = 2+4*s->mb_stride;
|
||||
b4_stride *= 6;
|
||||
if(IS_INTERLACED(mb_type_col[0]) != IS_INTERLACED(mb_type_col[1])){
|
||||
if (IS_INTERLACED(mb_type_col[0]) != IS_INTERLACED(mb_type_col[1])) {
|
||||
mb_type_col[0] &= ~MB_TYPE_INTERLACED;
|
||||
mb_type_col[1] &= ~MB_TYPE_INTERLACED;
|
||||
}
|
||||
@@ -443,6 +443,10 @@ static void pred_temp_direct_motion(H264Context * const h, int *mb_type){
|
||||
mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy + s->mb_stride];
|
||||
b8_stride = 2+4*s->mb_stride;
|
||||
b4_stride *= 6;
|
||||
if (IS_INTERLACED(mb_type_col[0]) != IS_INTERLACED(mb_type_col[1])) {
|
||||
mb_type_col[0] &= ~MB_TYPE_INTERLACED;
|
||||
mb_type_col[1] &= ~MB_TYPE_INTERLACED;
|
||||
}
|
||||
|
||||
sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
|
||||
|
||||
|
@@ -241,7 +241,7 @@ static inline int decode_vui_parameters(H264Context *h, SPS *sps){
|
||||
sps->num_reorder_frames= get_ue_golomb(&s->gb);
|
||||
get_ue_golomb(&s->gb); /*max_dec_frame_buffering*/
|
||||
|
||||
if(get_bits_left(&s->gb) < 0){
|
||||
if (get_bits_left(&s->gb) < 0) {
|
||||
sps->num_reorder_frames=0;
|
||||
sps->bitstream_restriction_flag= 0;
|
||||
}
|
||||
@@ -251,9 +251,9 @@ static inline int decode_vui_parameters(H264Context *h, SPS *sps){
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
if(get_bits_left(&s->gb) < 0){
|
||||
if (get_bits_left(&s->gb) < 0) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "Overread VUI by %d bits\n", -get_bits_left(&s->gb));
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@@ -164,7 +164,7 @@ static int decode_buffering_period(H264Context *h){
|
||||
int ff_h264_decode_sei(H264Context *h){
|
||||
MpegEncContext * const s = &h->s;
|
||||
|
||||
while(get_bits_count(&s->gb) + 16 < s->gb.size_in_bits){
|
||||
while (get_bits_left(&s->gb) > 16) {
|
||||
int size, type;
|
||||
|
||||
type=0;
|
||||
|
@@ -49,7 +49,6 @@ static const uint8_t scan8[16*3]={
|
||||
void FUNCC(ff_h264_idct_add)(uint8_t *_dst, DCTELEM *_block, int stride)
|
||||
{
|
||||
int i;
|
||||
INIT_CLIP
|
||||
pixel *dst = (pixel*)_dst;
|
||||
dctcoef *block = (dctcoef*)_block;
|
||||
stride >>= sizeof(pixel)-1;
|
||||
@@ -74,16 +73,15 @@ void FUNCC(ff_h264_idct_add)(uint8_t *_dst, DCTELEM *_block, int stride)
|
||||
const int z2= (block[1 + 4*i]>>1) - block[3 + 4*i];
|
||||
const int z3= block[1 + 4*i] + (block[3 + 4*i]>>1);
|
||||
|
||||
dst[i + 0*stride]= CLIP(dst[i + 0*stride] + ((z0 + z3) >> 6));
|
||||
dst[i + 1*stride]= CLIP(dst[i + 1*stride] + ((z1 + z2) >> 6));
|
||||
dst[i + 2*stride]= CLIP(dst[i + 2*stride] + ((z1 - z2) >> 6));
|
||||
dst[i + 3*stride]= CLIP(dst[i + 3*stride] + ((z0 - z3) >> 6));
|
||||
dst[i + 0*stride]= av_clip_pixel(dst[i + 0*stride] + ((z0 + z3) >> 6));
|
||||
dst[i + 1*stride]= av_clip_pixel(dst[i + 1*stride] + ((z1 + z2) >> 6));
|
||||
dst[i + 2*stride]= av_clip_pixel(dst[i + 2*stride] + ((z1 - z2) >> 6));
|
||||
dst[i + 3*stride]= av_clip_pixel(dst[i + 3*stride] + ((z0 - z3) >> 6));
|
||||
}
|
||||
}
|
||||
|
||||
void FUNCC(ff_h264_idct8_add)(uint8_t *_dst, DCTELEM *_block, int stride){
|
||||
int i;
|
||||
INIT_CLIP
|
||||
pixel *dst = (pixel*)_dst;
|
||||
dctcoef *block = (dctcoef*)_block;
|
||||
stride >>= sizeof(pixel)-1;
|
||||
@@ -143,14 +141,14 @@ void FUNCC(ff_h264_idct8_add)(uint8_t *_dst, DCTELEM *_block, int stride){
|
||||
const int b5 = (a3>>2) - a5;
|
||||
const int b7 = a7 - (a1>>2);
|
||||
|
||||
dst[i + 0*stride] = CLIP( dst[i + 0*stride] + ((b0 + b7) >> 6) );
|
||||
dst[i + 1*stride] = CLIP( dst[i + 1*stride] + ((b2 + b5) >> 6) );
|
||||
dst[i + 2*stride] = CLIP( dst[i + 2*stride] + ((b4 + b3) >> 6) );
|
||||
dst[i + 3*stride] = CLIP( dst[i + 3*stride] + ((b6 + b1) >> 6) );
|
||||
dst[i + 4*stride] = CLIP( dst[i + 4*stride] + ((b6 - b1) >> 6) );
|
||||
dst[i + 5*stride] = CLIP( dst[i + 5*stride] + ((b4 - b3) >> 6) );
|
||||
dst[i + 6*stride] = CLIP( dst[i + 6*stride] + ((b2 - b5) >> 6) );
|
||||
dst[i + 7*stride] = CLIP( dst[i + 7*stride] + ((b0 - b7) >> 6) );
|
||||
dst[i + 0*stride] = av_clip_pixel( dst[i + 0*stride] + ((b0 + b7) >> 6) );
|
||||
dst[i + 1*stride] = av_clip_pixel( dst[i + 1*stride] + ((b2 + b5) >> 6) );
|
||||
dst[i + 2*stride] = av_clip_pixel( dst[i + 2*stride] + ((b4 + b3) >> 6) );
|
||||
dst[i + 3*stride] = av_clip_pixel( dst[i + 3*stride] + ((b6 + b1) >> 6) );
|
||||
dst[i + 4*stride] = av_clip_pixel( dst[i + 4*stride] + ((b6 - b1) >> 6) );
|
||||
dst[i + 5*stride] = av_clip_pixel( dst[i + 5*stride] + ((b4 - b3) >> 6) );
|
||||
dst[i + 6*stride] = av_clip_pixel( dst[i + 6*stride] + ((b2 - b5) >> 6) );
|
||||
dst[i + 7*stride] = av_clip_pixel( dst[i + 7*stride] + ((b0 - b7) >> 6) );
|
||||
}
|
||||
}
|
||||
|
||||
@@ -158,13 +156,12 @@ void FUNCC(ff_h264_idct8_add)(uint8_t *_dst, DCTELEM *_block, int stride){
|
||||
void FUNCC(ff_h264_idct_dc_add)(uint8_t *p_dst, DCTELEM *block, int stride){
|
||||
int i, j;
|
||||
int dc = (((dctcoef*)block)[0] + 32) >> 6;
|
||||
INIT_CLIP
|
||||
pixel *dst = (pixel*)p_dst;
|
||||
stride >>= sizeof(pixel)-1;
|
||||
for( j = 0; j < 4; j++ )
|
||||
{
|
||||
for( i = 0; i < 4; i++ )
|
||||
dst[i] = CLIP( dst[i] + dc );
|
||||
dst[i] = av_clip_pixel( dst[i] + dc );
|
||||
dst += stride;
|
||||
}
|
||||
}
|
||||
@@ -172,13 +169,12 @@ void FUNCC(ff_h264_idct_dc_add)(uint8_t *p_dst, DCTELEM *block, int stride){
|
||||
void FUNCC(ff_h264_idct8_dc_add)(uint8_t *p_dst, DCTELEM *block, int stride){
|
||||
int i, j;
|
||||
int dc = (((dctcoef*)block)[0] + 32) >> 6;
|
||||
INIT_CLIP
|
||||
pixel *dst = (pixel*)p_dst;
|
||||
stride >>= sizeof(pixel)-1;
|
||||
for( j = 0; j < 8; j++ )
|
||||
{
|
||||
for( i = 0; i < 8; i++ )
|
||||
dst[i] = CLIP( dst[i] + dc );
|
||||
dst[i] = av_clip_pixel( dst[i] + dc );
|
||||
dst += stride;
|
||||
}
|
||||
}
|
||||
|
@@ -82,13 +82,15 @@ typedef struct HYuvContext{
|
||||
DSPContext dsp;
|
||||
}HYuvContext;
|
||||
|
||||
static const unsigned char classic_shift_luma[] = {
|
||||
#define classic_shift_luma_table_size 42
|
||||
static const unsigned char classic_shift_luma[classic_shift_luma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = {
|
||||
34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
|
||||
16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
|
||||
69,68, 0
|
||||
};
|
||||
|
||||
static const unsigned char classic_shift_chroma[] = {
|
||||
#define classic_shift_chroma_table_size 59
|
||||
static const unsigned char classic_shift_chroma[classic_shift_chroma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = {
|
||||
66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
|
||||
56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
|
||||
214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
|
||||
@@ -212,7 +214,7 @@ static int read_len_table(uint8_t *dst, GetBitContext *gb){
|
||||
if(repeat==0)
|
||||
repeat= get_bits(gb, 8);
|
||||
//printf("%d %d\n", val, repeat);
|
||||
if(i+repeat > 256) {
|
||||
if(i+repeat > 256 || get_bits_left(gb) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
|
||||
return -1;
|
||||
}
|
||||
@@ -394,10 +396,10 @@ static int read_old_huffman_tables(HYuvContext *s){
|
||||
GetBitContext gb;
|
||||
int i;
|
||||
|
||||
init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
|
||||
init_get_bits(&gb, classic_shift_luma, classic_shift_luma_table_size*8);
|
||||
if(read_len_table(s->len[0], &gb)<0)
|
||||
return -1;
|
||||
init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
|
||||
init_get_bits(&gb, classic_shift_chroma, classic_shift_chroma_table_size*8);
|
||||
if(read_len_table(s->len[1], &gb)<0)
|
||||
return -1;
|
||||
|
||||
@@ -543,7 +545,7 @@ s->bgr32=1;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
assert(0);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
alloc_temp(s);
|
||||
@@ -750,7 +752,7 @@ static void decode_422_bitstream(HYuvContext *s, int count){
|
||||
count/=2;
|
||||
|
||||
if(count >= (get_bits_left(&s->gb))/(31*4)){
|
||||
for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
|
||||
for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
|
||||
READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
|
||||
READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
|
||||
}
|
||||
@@ -768,7 +770,7 @@ static void decode_gray_bitstream(HYuvContext *s, int count){
|
||||
count/=2;
|
||||
|
||||
if(count >= (get_bits_left(&s->gb))/(31*2)){
|
||||
for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
|
||||
for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
|
||||
READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
|
||||
}
|
||||
}else{
|
||||
|
@@ -727,6 +727,8 @@ static int parse_bintree(Indeo3DecodeContext *ctx, AVCodecContext *avctx,
|
||||
SPLIT_CELL(ref_cell->height, curr_cell.height);
|
||||
ref_cell->ypos += curr_cell.height;
|
||||
ref_cell->height -= curr_cell.height;
|
||||
if (ref_cell->height <= 0 || curr_cell.height <= 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
} else if (code == V_SPLIT) {
|
||||
if (curr_cell.width > strip_width) {
|
||||
/* split strip */
|
||||
@@ -735,6 +737,8 @@ static int parse_bintree(Indeo3DecodeContext *ctx, AVCodecContext *avctx,
|
||||
SPLIT_CELL(ref_cell->width, curr_cell.width);
|
||||
ref_cell->xpos += curr_cell.width;
|
||||
ref_cell->width -= curr_cell.width;
|
||||
if (ref_cell->width <= 0 || curr_cell.width <= 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
while (get_bits_left(&ctx->gb) >= 2) { /* loop until return */
|
||||
@@ -890,14 +894,16 @@ static int decode_frame_headers(Indeo3DecodeContext *ctx, AVCodecContext *avctx,
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if (width != ctx->width || height != ctx->height) {
|
||||
int res;
|
||||
|
||||
av_dlog(avctx, "Frame dimensions changed!\n");
|
||||
|
||||
ctx->width = width;
|
||||
ctx->height = height;
|
||||
|
||||
free_frame_buffers(ctx);
|
||||
if(allocate_frame_buffers(ctx, avctx) < 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if ((res = allocate_frame_buffers(ctx, avctx)) < 0)
|
||||
return res;
|
||||
avcodec_set_dimensions(avctx, width, height);
|
||||
}
|
||||
|
||||
|
@@ -854,8 +854,8 @@ end:
|
||||
{
|
||||
int v= show_bits(&s->gb, 16);
|
||||
|
||||
if(get_bits_count(&s->gb) + 16 > s->gb.size_in_bits){
|
||||
v>>= get_bits_count(&s->gb) + 16 - s->gb.size_in_bits;
|
||||
if (get_bits_left(&s->gb) < 16) {
|
||||
v >>= 16 - get_bits_left(&s->gb);
|
||||
}
|
||||
|
||||
if(v==0)
|
||||
|
@@ -198,6 +198,9 @@ static inline void ls_decode_line(JLSState *state, MJpegDecodeContext *s, void *
|
||||
r = ff_log2_run[state->run_index[comp]];
|
||||
if(r)
|
||||
r = get_bits_long(&s->gb, r);
|
||||
if(x + r * stride > w) {
|
||||
r = (w - x) / stride;
|
||||
}
|
||||
for(i = 0; i < r; i++) {
|
||||
W(dst, x, Ra);
|
||||
x += stride;
|
||||
|
@@ -150,7 +150,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
|
||||
if (video_type == 0 || video_type == 1) {
|
||||
GetBitContext gb;
|
||||
init_get_bits(&gb, buf, FFMIN(video_size, (buf_end - buf) * 8));
|
||||
init_get_bits(&gb, buf, 8 * FFMIN(video_size, buf_end - buf));
|
||||
|
||||
for (j = 0; j < avctx->height; j += 8)
|
||||
for (i = 0; i < avctx->width; i += 8)
|
||||
|
@@ -30,10 +30,17 @@
|
||||
|
||||
typedef struct {
|
||||
AVCodecContext *avctx;
|
||||
AVFrame pic;
|
||||
uint16_t *prev, *cur;
|
||||
AVFrame prev, cur;
|
||||
} KgvContext;
|
||||
|
||||
static void decode_flush(AVCodecContext *avctx)
|
||||
{
|
||||
KgvContext * const c = avctx->priv_data;
|
||||
|
||||
if (c->prev.data[0])
|
||||
avctx->release_buffer(avctx, &c->prev);
|
||||
}
|
||||
|
||||
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt)
|
||||
{
|
||||
const uint8_t *buf = avpkt->data;
|
||||
@@ -42,7 +49,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
|
||||
int offsets[8];
|
||||
uint16_t *out, *prev;
|
||||
int outcnt = 0, maxcnt;
|
||||
int w, h, i;
|
||||
int w, h, i, res;
|
||||
|
||||
if (avpkt->size < 2)
|
||||
return -1;
|
||||
@@ -54,20 +61,23 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
|
||||
if (av_image_check_size(w, h, 0, avctx))
|
||||
return -1;
|
||||
|
||||
if (w != avctx->width || h != avctx->height)
|
||||
if (w != avctx->width || h != avctx->height) {
|
||||
if (c->prev.data[0])
|
||||
avctx->release_buffer(avctx, &c->prev);
|
||||
avcodec_set_dimensions(avctx, w, h);
|
||||
}
|
||||
|
||||
maxcnt = w * h;
|
||||
|
||||
out = av_realloc(c->cur, w * h * 2);
|
||||
if (!out)
|
||||
return -1;
|
||||
c->cur = out;
|
||||
|
||||
prev = av_realloc(c->prev, w * h * 2);
|
||||
if (!prev)
|
||||
return -1;
|
||||
c->prev = prev;
|
||||
c->cur.reference = 3;
|
||||
if ((res = avctx->get_buffer(avctx, &c->cur)) < 0)
|
||||
return res;
|
||||
out = (uint16_t *) c->cur.data[0];
|
||||
if (c->prev.data[0]) {
|
||||
prev = (uint16_t *) c->prev.data[0];
|
||||
} else {
|
||||
prev = NULL;
|
||||
}
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
offsets[i] = -1;
|
||||
@@ -80,6 +90,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
|
||||
out[outcnt++] = code; // rgb555 pixel coded directly
|
||||
} else {
|
||||
int count;
|
||||
int inp_off;
|
||||
uint16_t *inp;
|
||||
|
||||
if ((code & 0x6000) == 0x6000) {
|
||||
@@ -101,7 +112,14 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
|
||||
if (maxcnt - start < count)
|
||||
break;
|
||||
|
||||
inp = prev + start;
|
||||
if (!prev) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Frame reference does not exist\n");
|
||||
break;
|
||||
}
|
||||
|
||||
inp = prev;
|
||||
inp_off = start;
|
||||
} else {
|
||||
// copy from earlier in this frame
|
||||
int offset = (code & 0x1FFF) + 1;
|
||||
@@ -119,27 +137,28 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
|
||||
if (outcnt < offset)
|
||||
break;
|
||||
|
||||
inp = out + outcnt - offset;
|
||||
inp = out;
|
||||
inp_off = outcnt - offset;
|
||||
}
|
||||
|
||||
if (maxcnt - outcnt < count)
|
||||
break;
|
||||
|
||||
for (i = 0; i < count; i++)
|
||||
for (i = inp_off; i < count + inp_off; i++) {
|
||||
out[outcnt++] = inp[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (outcnt - maxcnt)
|
||||
av_log(avctx, AV_LOG_DEBUG, "frame finished with %d diff\n", outcnt - maxcnt);
|
||||
|
||||
c->pic.data[0] = (uint8_t *)c->cur;
|
||||
c->pic.linesize[0] = w * 2;
|
||||
|
||||
*data_size = sizeof(AVFrame);
|
||||
*(AVFrame*)data = c->pic;
|
||||
*(AVFrame*)data = c->cur;
|
||||
|
||||
FFSWAP(uint16_t *, c->cur, c->prev);
|
||||
if (c->prev.data[0])
|
||||
avctx->release_buffer(avctx, &c->prev);
|
||||
FFSWAP(AVFrame, c->cur, c->prev);
|
||||
|
||||
return avpkt->size;
|
||||
}
|
||||
@@ -150,18 +169,14 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
|
||||
c->avctx = avctx;
|
||||
avctx->pix_fmt = PIX_FMT_RGB555;
|
||||
avcodec_get_frame_defaults(&c->pic);
|
||||
avctx->flags |= CODEC_FLAG_EMU_EDGE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_cold int decode_end(AVCodecContext *avctx)
|
||||
{
|
||||
KgvContext * const c = avctx->priv_data;
|
||||
|
||||
av_freep(&c->cur);
|
||||
av_freep(&c->prev);
|
||||
|
||||
decode_flush(avctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -173,5 +188,6 @@ AVCodec ff_kgv1_decoder = {
|
||||
.init = decode_init,
|
||||
.close = decode_end,
|
||||
.decode = decode_frame,
|
||||
.flush = decode_flush,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("Kega Game Video"),
|
||||
};
|
||||
|
@@ -33,6 +33,7 @@
|
||||
#define KMVC_KEYFRAME 0x80
|
||||
#define KMVC_PALETTE 0x40
|
||||
#define KMVC_METHOD 0x0F
|
||||
#define MAX_PALSIZE 256
|
||||
|
||||
/*
|
||||
* Decoder context
|
||||
@@ -43,7 +44,7 @@ typedef struct KmvcContext {
|
||||
|
||||
int setpal;
|
||||
int palsize;
|
||||
uint32_t pal[256];
|
||||
uint32_t pal[MAX_PALSIZE];
|
||||
uint8_t *cur, *prev;
|
||||
uint8_t *frm0, *frm1;
|
||||
GetByteContext g;
|
||||
@@ -380,10 +381,10 @@ static av_cold int decode_init(AVCodecContext * avctx)
|
||||
c->palsize = 127;
|
||||
} else {
|
||||
c->palsize = AV_RL16(avctx->extradata + 10);
|
||||
if (c->palsize > 255U) {
|
||||
if (c->palsize >= (unsigned)MAX_PALSIZE) {
|
||||
c->palsize = 127;
|
||||
av_log(NULL, AV_LOG_ERROR, "palsize too big\n");
|
||||
return -1;
|
||||
av_log(avctx, AV_LOG_ERROR, "KMVC palette too large\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -229,8 +229,29 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
|
||||
len = mszh_dlen;
|
||||
}
|
||||
break;
|
||||
case COMP_MSZH_NOCOMP:
|
||||
case COMP_MSZH_NOCOMP: {
|
||||
int bppx2;
|
||||
switch (c->imgtype) {
|
||||
case IMGTYPE_YUV111:
|
||||
case IMGTYPE_RGB24:
|
||||
bppx2 = 6;
|
||||
break;
|
||||
case IMGTYPE_YUV422:
|
||||
case IMGTYPE_YUV211:
|
||||
bppx2 = 4;
|
||||
break;
|
||||
case IMGTYPE_YUV411:
|
||||
case IMGTYPE_YUV420:
|
||||
bppx2 = 3;
|
||||
break;
|
||||
default:
|
||||
bppx2 = 0; // will error out below
|
||||
break;
|
||||
}
|
||||
if (len < ((width * height * bppx2) >> 1))
|
||||
return AVERROR_INVALIDDATA;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
av_log(avctx, AV_LOG_ERROR, "BUG! Unknown MSZH compression in frame decoder.\n");
|
||||
return -1;
|
||||
@@ -462,7 +483,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
avcodec_get_frame_defaults(&c->pic);
|
||||
if (avctx->extradata_size < 8) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Extradata size too small.\n");
|
||||
return 1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* Check codec type */
|
||||
@@ -511,7 +532,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
break;
|
||||
default:
|
||||
av_log(avctx, AV_LOG_ERROR, "Unsupported image format %d.\n", c->imgtype);
|
||||
return 1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* Detect compression method */
|
||||
@@ -528,7 +549,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
break;
|
||||
default:
|
||||
av_log(avctx, AV_LOG_ERROR, "Unsupported compression format for MSZH (%d).\n", c->compression);
|
||||
return 1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
break;
|
||||
#if CONFIG_ZLIB_DECODER
|
||||
@@ -546,7 +567,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
default:
|
||||
if (c->compression < Z_NO_COMPRESSION || c->compression > Z_BEST_COMPRESSION) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Unsupported compression level for ZLIB: (%d).\n", c->compression);
|
||||
return 1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
av_log(avctx, AV_LOG_DEBUG, "Compression level for ZLIB: (%d).\n", c->compression);
|
||||
}
|
||||
@@ -554,14 +575,14 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
#endif
|
||||
default:
|
||||
av_log(avctx, AV_LOG_ERROR, "BUG! Unknown codec in compression switch.\n");
|
||||
return 1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* Allocate decompression buffer */
|
||||
if (c->decomp_size) {
|
||||
if ((c->decomp_buf = av_malloc(max_decomp_size)) == NULL) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n");
|
||||
return 1;
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -587,7 +608,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
if (zret != Z_OK) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Inflate init error: %d\n", zret);
|
||||
av_freep(&c->decomp_buf);
|
||||
return 1;
|
||||
return AVERROR_UNKNOWN;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@@ -462,6 +462,7 @@ static av_cold int X264_init(AVCodecContext *avctx)
|
||||
PARSE_X264_OPT("psy-rd", psy_rd);
|
||||
PARSE_X264_OPT("deblock", deblock);
|
||||
PARSE_X264_OPT("partitions", partitions);
|
||||
PARSE_X264_OPT("stats", stats);
|
||||
if (x4->psy >= 0)
|
||||
x4->params.analyse.b_psy = x4->psy;
|
||||
if (x4->rc_lookahead >= 0)
|
||||
@@ -647,7 +648,8 @@ static const AVOption options[] = {
|
||||
{ "spatial", NULL, 0, AV_OPT_TYPE_CONST, { X264_DIRECT_PRED_SPATIAL }, 0, 0, VE, "direct-pred" },
|
||||
{ "temporal", NULL, 0, AV_OPT_TYPE_CONST, { X264_DIRECT_PRED_TEMPORAL }, 0, 0, VE, "direct-pred" },
|
||||
{ "auto", NULL, 0, AV_OPT_TYPE_CONST, { X264_DIRECT_PRED_AUTO }, 0, 0, VE, "direct-pred" },
|
||||
{ "slice-max-size","Constant quantization parameter rate control method",OFFSET(slice_max_size), AV_OPT_TYPE_INT, {-1 }, -1, INT_MAX, VE },
|
||||
{ "slice-max-size","Limit the size of each slice in bytes", OFFSET(slice_max_size),AV_OPT_TYPE_INT, {-1 }, -1, INT_MAX, VE },
|
||||
{ "stats", "Filename for 2 pass stats", OFFSET(stats), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE },
|
||||
{ NULL },
|
||||
};
|
||||
|
||||
|
@@ -59,6 +59,9 @@ read_header:
|
||||
s->restart_count = 0;
|
||||
s->mjpb_skiptosod = 0;
|
||||
|
||||
if (buf_end - buf_ptr >= 1 << 28)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
init_get_bits(&hgb, buf_ptr, /*buf_size*/(buf_end - buf_ptr)*8);
|
||||
|
||||
skip_bits(&hgb, 32); /* reserved zeros */
|
||||
@@ -66,7 +69,7 @@ read_header:
|
||||
if (get_bits_long(&hgb, 32) != MKBETAG('m','j','p','g'))
|
||||
{
|
||||
av_log(avctx, AV_LOG_WARNING, "not mjpeg-b (bad fourcc)\n");
|
||||
return 0;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
field_size = get_bits_long(&hgb, 32); /* field size */
|
||||
@@ -111,7 +114,8 @@ read_header:
|
||||
av_log(avctx, AV_LOG_DEBUG, "sod offs: 0x%x\n", sod_offs);
|
||||
if (sos_offs)
|
||||
{
|
||||
init_get_bits(&s->gb, buf_ptr+sos_offs, FFMIN(field_size, buf_end - (buf_ptr+sos_offs))*8);
|
||||
init_get_bits(&s->gb, buf_ptr + sos_offs,
|
||||
8 * FFMIN(field_size, buf_end - buf_ptr - sos_offs));
|
||||
s->mjpb_skiptosod = (sod_offs - sos_offs - show_bits(&s->gb, 16));
|
||||
s->start_code = SOS;
|
||||
if (ff_mjpeg_decode_sos(s, NULL, NULL) < 0 &&
|
||||
@@ -145,7 +149,7 @@ read_header:
|
||||
picture->quality*= FF_QP2LAMBDA;
|
||||
}
|
||||
|
||||
return buf_ptr - buf;
|
||||
return buf_size;
|
||||
}
|
||||
|
||||
AVCodec ff_mjpegb_decoder = {
|
||||
|
@@ -984,9 +984,9 @@ static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
|
||||
if (s->restart_interval && !s->restart_count)
|
||||
s->restart_count = s->restart_interval;
|
||||
|
||||
if (get_bits_count(&s->gb)>s->gb.size_in_bits) {
|
||||
if (get_bits_left(&s->gb) < 0) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "overread %d\n",
|
||||
get_bits_count(&s->gb) - s->gb.size_in_bits);
|
||||
-get_bits_left(&s->gb));
|
||||
return -1;
|
||||
}
|
||||
for (i = 0; i < nb_components; i++) {
|
||||
@@ -1269,7 +1269,7 @@ static int mjpeg_decode_app(MJpegDecodeContext *s)
|
||||
len = get_bits(&s->gb, 16);
|
||||
if (len < 5)
|
||||
return -1;
|
||||
if (8 * len + get_bits_count(&s->gb) > s->gb.size_in_bits)
|
||||
if (8 * len > get_bits_left(&s->gb))
|
||||
return -1;
|
||||
|
||||
id = get_bits_long(&s->gb, 32);
|
||||
@@ -1407,8 +1407,7 @@ out:
|
||||
static int mjpeg_decode_com(MJpegDecodeContext *s)
|
||||
{
|
||||
int len = get_bits(&s->gb, 16);
|
||||
if (len >= 2 &&
|
||||
8 * len - 16 + get_bits_count(&s->gb) <= s->gb.size_in_bits) {
|
||||
if (len >= 2 && 8 * len - 16 <= get_bits_left(&s->gb)) {
|
||||
char *cbuf = av_malloc(len - 1);
|
||||
if (cbuf) {
|
||||
int i;
|
||||
@@ -1574,6 +1573,10 @@ int ff_mjpeg_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
||||
/* EOF */
|
||||
if (start_code < 0) {
|
||||
goto the_end;
|
||||
} else if (unescaped_buf_size > (1U<<29)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "MJPEG packet 0x%x too big (0x%x/0x%x), corrupt data?\n",
|
||||
start_code, unescaped_buf_ptr, buf_size);
|
||||
return AVERROR_INVALIDDATA;
|
||||
} else {
|
||||
av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%td\n",
|
||||
start_code, buf_end - buf_ptr);
|
||||
|
@@ -33,6 +33,7 @@
|
||||
|
||||
#include "libavutil/intreadwrite.h"
|
||||
#include "avcodec.h"
|
||||
#include "bytestream.h"
|
||||
|
||||
#define MM_PREAMBLE_SIZE 6
|
||||
|
||||
@@ -48,6 +49,7 @@ typedef struct MmContext {
|
||||
AVCodecContext *avctx;
|
||||
AVFrame frame;
|
||||
int palette[AVPALETTE_COUNT];
|
||||
GetByteContext gb;
|
||||
} MmContext;
|
||||
|
||||
static av_cold int mm_decode_init(AVCodecContext *avctx)
|
||||
@@ -64,40 +66,40 @@ static av_cold int mm_decode_init(AVCodecContext *avctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mm_decode_pal(MmContext *s, const uint8_t *buf, const uint8_t *buf_end)
|
||||
static int mm_decode_pal(MmContext *s)
|
||||
{
|
||||
int i;
|
||||
buf += 4;
|
||||
for (i=0; i<128 && buf+2<buf_end; i++) {
|
||||
s->palette[i] = 0xFF << 24 | AV_RB24(buf);
|
||||
|
||||
bytestream2_skip(&s->gb, 4);
|
||||
for (i = 0; i < 128; i++) {
|
||||
s->palette[i] = 0xFF << 24 | bytestream2_get_be24(&s->gb);
|
||||
s->palette[i+128] = s->palette[i]<<2;
|
||||
buf += 3;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param half_horiz Half horizontal resolution (0 or 1)
|
||||
* @param half_vert Half vertical resolution (0 or 1)
|
||||
*/
|
||||
static void mm_decode_intra(MmContext * s, int half_horiz, int half_vert, const uint8_t *buf, int buf_size)
|
||||
static int mm_decode_intra(MmContext * s, int half_horiz, int half_vert)
|
||||
{
|
||||
int i, x, y;
|
||||
i=0; x=0; y=0;
|
||||
|
||||
while(i<buf_size) {
|
||||
while (bytestream2_get_bytes_left(&s->gb) > 0) {
|
||||
int run_length, color;
|
||||
|
||||
if (y >= s->avctx->height)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
if (buf[i] & 0x80) {
|
||||
color = bytestream2_get_byte(&s->gb);
|
||||
if (color & 0x80) {
|
||||
run_length = 1;
|
||||
color = buf[i];
|
||||
i++;
|
||||
}else{
|
||||
run_length = (buf[i] & 0x7f) + 2;
|
||||
color = buf[i+1];
|
||||
i+=2;
|
||||
run_length = (color & 0x7f) + 2;
|
||||
color = bytestream2_get_byte(&s->gb);
|
||||
}
|
||||
|
||||
if (half_horiz)
|
||||
@@ -115,23 +117,28 @@ static void mm_decode_intra(MmContext * s, int half_horiz, int half_vert, const
|
||||
y += 1 + half_vert;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* @param half_horiz Half horizontal resolution (0 or 1)
|
||||
* @param half_vert Half vertical resolution (0 or 1)
|
||||
*/
|
||||
static void mm_decode_inter(MmContext * s, int half_horiz, int half_vert, const uint8_t *buf, int buf_size)
|
||||
static int mm_decode_inter(MmContext * s, int half_horiz, int half_vert)
|
||||
{
|
||||
const int data_ptr = 2 + AV_RL16(&buf[0]);
|
||||
int d, r, y;
|
||||
d = data_ptr; r = 2; y = 0;
|
||||
int data_off = bytestream2_get_le16(&s->gb), y = 0;
|
||||
GetByteContext data_ptr;
|
||||
|
||||
while(r < data_ptr) {
|
||||
if (bytestream2_get_bytes_left(&s->gb) < data_off)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
bytestream2_init(&data_ptr, s->gb.buffer + data_off, bytestream2_get_bytes_left(&s->gb) - data_off);
|
||||
while (s->gb.buffer < data_ptr.buffer_start) {
|
||||
int i, j;
|
||||
int length = buf[r] & 0x7f;
|
||||
int x = buf[r+1] + ((buf[r] & 0x80) << 1);
|
||||
r += 2;
|
||||
int length = bytestream2_get_byte(&s->gb);
|
||||
int x = bytestream2_get_byte(&s->gb) + ((length & 0x80) << 1);
|
||||
length &= 0x7F;
|
||||
|
||||
if (length==0) {
|
||||
y += x;
|
||||
@@ -139,13 +146,14 @@ static void mm_decode_inter(MmContext * s, int half_horiz, int half_vert, const
|
||||
}
|
||||
|
||||
if (y + half_vert >= s->avctx->height)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
for(i=0; i<length; i++) {
|
||||
int replace_array = bytestream2_get_byte(&s->gb);
|
||||
for(j=0; j<8; j++) {
|
||||
int replace = (buf[r+i] >> (7-j)) & 1;
|
||||
int replace = (replace_array >> (7-j)) & 1;
|
||||
if (replace) {
|
||||
int color = buf[d];
|
||||
int color = bytestream2_get_byte(&data_ptr);
|
||||
s->frame.data[0][y*s->frame.linesize[0] + x] = color;
|
||||
if (half_horiz)
|
||||
s->frame.data[0][y*s->frame.linesize[0] + x + 1] = color;
|
||||
@@ -154,15 +162,15 @@ static void mm_decode_inter(MmContext * s, int half_horiz, int half_vert, const
|
||||
if (half_horiz)
|
||||
s->frame.data[0][(y+1)*s->frame.linesize[0] + x + 1] = color;
|
||||
}
|
||||
d++;
|
||||
}
|
||||
x += 1 + half_horiz;
|
||||
}
|
||||
}
|
||||
|
||||
r += length;
|
||||
y += 1 + half_vert;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mm_decode_frame(AVCodecContext *avctx,
|
||||
@@ -172,12 +180,14 @@ static int mm_decode_frame(AVCodecContext *avctx,
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
MmContext *s = avctx->priv_data;
|
||||
const uint8_t *buf_end = buf+buf_size;
|
||||
int type;
|
||||
int type, res;
|
||||
|
||||
if (buf_size < MM_PREAMBLE_SIZE)
|
||||
return AVERROR_INVALIDDATA;
|
||||
type = AV_RL16(&buf[0]);
|
||||
buf += MM_PREAMBLE_SIZE;
|
||||
buf_size -= MM_PREAMBLE_SIZE;
|
||||
bytestream2_init(&s->gb, buf, buf_size);
|
||||
|
||||
if (avctx->reget_buffer(avctx, &s->frame) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
|
||||
@@ -185,16 +195,19 @@ static int mm_decode_frame(AVCodecContext *avctx,
|
||||
}
|
||||
|
||||
switch(type) {
|
||||
case MM_TYPE_PALETTE : mm_decode_pal(s, buf, buf_end); return buf_size;
|
||||
case MM_TYPE_INTRA : mm_decode_intra(s, 0, 0, buf, buf_size); break;
|
||||
case MM_TYPE_INTRA_HH : mm_decode_intra(s, 1, 0, buf, buf_size); break;
|
||||
case MM_TYPE_INTRA_HHV : mm_decode_intra(s, 1, 1, buf, buf_size); break;
|
||||
case MM_TYPE_INTER : mm_decode_inter(s, 0, 0, buf, buf_size); break;
|
||||
case MM_TYPE_INTER_HH : mm_decode_inter(s, 1, 0, buf, buf_size); break;
|
||||
case MM_TYPE_INTER_HHV : mm_decode_inter(s, 1, 1, buf, buf_size); break;
|
||||
default :
|
||||
return -1;
|
||||
case MM_TYPE_PALETTE : res = mm_decode_pal(s); return buf_size;
|
||||
case MM_TYPE_INTRA : res = mm_decode_intra(s, 0, 0); break;
|
||||
case MM_TYPE_INTRA_HH : res = mm_decode_intra(s, 1, 0); break;
|
||||
case MM_TYPE_INTRA_HHV : res = mm_decode_intra(s, 1, 1); break;
|
||||
case MM_TYPE_INTER : res = mm_decode_inter(s, 0, 0); break;
|
||||
case MM_TYPE_INTER_HH : res = mm_decode_inter(s, 1, 0); break;
|
||||
case MM_TYPE_INTER_HHV : res = mm_decode_inter(s, 1, 1); break;
|
||||
default:
|
||||
res = AVERROR_INVALIDDATA;
|
||||
break;
|
||||
}
|
||||
if (res < 0)
|
||||
return res;
|
||||
|
||||
memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE);
|
||||
|
||||
|
@@ -78,13 +78,13 @@ void ff_mpc_dequantize_and_synth(MPCContext * c, int maxband, void *data, int ch
|
||||
for(ch = 0; ch < 2; ch++){
|
||||
if(bands[i].res[ch]){
|
||||
j = 0;
|
||||
mul = mpc_CC[bands[i].res[ch]] * mpc_SCF[bands[i].scf_idx[ch][0]];
|
||||
mul = mpc_CC[bands[i].res[ch] + 1] * mpc_SCF[bands[i].scf_idx[ch][0]+6];
|
||||
for(; j < 12; j++)
|
||||
c->sb_samples[ch][j][i] = mul * c->Q[ch][j + off];
|
||||
mul = mpc_CC[bands[i].res[ch]] * mpc_SCF[bands[i].scf_idx[ch][1]];
|
||||
mul = mpc_CC[bands[i].res[ch] + 1] * mpc_SCF[bands[i].scf_idx[ch][1]+6];
|
||||
for(; j < 24; j++)
|
||||
c->sb_samples[ch][j][i] = mul * c->Q[ch][j + off];
|
||||
mul = mpc_CC[bands[i].res[ch]] * mpc_SCF[bands[i].scf_idx[ch][2]];
|
||||
mul = mpc_CC[bands[i].res[ch] + 1] * mpc_SCF[bands[i].scf_idx[ch][2]+6];
|
||||
for(; j < 36; j++)
|
||||
c->sb_samples[ch][j][i] = mul * c->Q[ch][j + off];
|
||||
}
|
||||
|
@@ -193,7 +193,7 @@ static int get_scale_idx(GetBitContext *gb, int ref)
|
||||
int t = get_vlc2(gb, dscf_vlc.table, MPC7_DSCF_BITS, 1) - 7;
|
||||
if (t == 8)
|
||||
return get_bits(gb, 6);
|
||||
return ref + t;
|
||||
return av_clip_uintp2(ref + t, 7);
|
||||
}
|
||||
|
||||
static int mpc7_decode_frame(AVCodecContext * avctx, void *data,
|
||||
@@ -235,7 +235,7 @@ static int mpc7_decode_frame(AVCodecContext * avctx, void *data,
|
||||
int t = 4;
|
||||
if(i) t = get_vlc2(&gb, hdr_vlc.table, MPC7_HDR_BITS, 1) - 5;
|
||||
if(t == 4) bands[i].res[ch] = get_bits(&gb, 4);
|
||||
else bands[i].res[ch] = bands[i-1].res[ch] + t;
|
||||
else bands[i].res[ch] = av_clip(bands[i-1].res[ch] + t, 0, 17);
|
||||
}
|
||||
|
||||
if(bands[i].res[0] || bands[i].res[1]){
|
||||
|
@@ -22,13 +22,17 @@
|
||||
#ifndef AVCODEC_MPCDATA_H
|
||||
#define AVCODEC_MPCDATA_H
|
||||
|
||||
static const float mpc_CC[18] = {
|
||||
65536.0000, 21845.3333, 13107.2000, 9362.2857, 7281.7778, 4369.0667, 2114.0645,
|
||||
static const float mpc_CC[18+1] = {
|
||||
111.285962475327f, // 32768/2/255*sqrt(3)
|
||||
65536.0000 /* this value is never used */,
|
||||
21845.3333, 13107.2000, 9362.2857, 7281.7778, 4369.0667, 2114.0645,
|
||||
1040.2539, 516.0315, 257.0039, 128.2505, 64.0626, 32.0156, 16.0039, 8.0010,
|
||||
4.0002, 2.0001, 1.0000
|
||||
};
|
||||
|
||||
static const float mpc_SCF[128] = {
|
||||
static const float mpc_SCF[128+6] = {
|
||||
920.016296386718750000, 766.355773925781250000, 638.359558105468750000,
|
||||
531.741149902343750000, 442.930114746093750000, 368.952209472656250000,
|
||||
307.330047607421875000, 255.999984741210937500, 213.243041992187500000, 177.627334594726562500,
|
||||
147.960128784179687500, 123.247924804687500000, 102.663139343261718750, 85.516410827636718750,
|
||||
71.233520507812500000, 59.336143493652343750, 49.425861358642578125, 41.170787811279296875,
|
||||
|
@@ -305,7 +305,7 @@ const uint8_t ff_mpeg12_mbMotionVectorTable[17][2] = {
|
||||
{ 0xc, 10 },
|
||||
};
|
||||
|
||||
const AVRational avpriv_frame_rate_tab[] = {
|
||||
const AVRational avpriv_frame_rate_tab[16] = {
|
||||
{ 0, 0},
|
||||
{24000, 1001},
|
||||
{ 24, 1},
|
||||
|
@@ -42,6 +42,7 @@
|
||||
|
||||
#define BACKSTEP_SIZE 512
|
||||
#define EXTRABYTES 24
|
||||
#define LAST_BUF_SIZE 2 * BACKSTEP_SIZE + EXTRABYTES
|
||||
|
||||
/* layer 3 "granule" */
|
||||
typedef struct GranuleDef {
|
||||
@@ -65,7 +66,7 @@ typedef struct GranuleDef {
|
||||
|
||||
typedef struct MPADecodeContext {
|
||||
MPA_DECODE_HEADER
|
||||
uint8_t last_buf[2 * BACKSTEP_SIZE + EXTRABYTES];
|
||||
uint8_t last_buf[LAST_BUF_SIZE];
|
||||
int last_buf_size;
|
||||
/* next header (used in free format parsing) */
|
||||
uint32_t free_format_next_header;
|
||||
@@ -1380,18 +1381,18 @@ static int mp_decode_layer3(MPADecodeContext *s)
|
||||
if (!s->adu_mode) {
|
||||
int skip;
|
||||
const uint8_t *ptr = s->gb.buffer + (get_bits_count(&s->gb)>>3);
|
||||
int extrasize = av_clip(get_bits_left(&s->gb) >> 3, 0,
|
||||
FFMAX(0, LAST_BUF_SIZE - s->last_buf_size));
|
||||
assert((get_bits_count(&s->gb) & 7) == 0);
|
||||
/* now we get bits from the main_data_begin offset */
|
||||
av_dlog(s->avctx, "seekback: %d\n", main_data_begin);
|
||||
//av_log(NULL, AV_LOG_ERROR, "backstep:%d, lastbuf:%d\n", main_data_begin, s->last_buf_size);
|
||||
|
||||
if (s->gb.size_in_bits > get_bits_count(&s->gb))
|
||||
memcpy(s->last_buf + s->last_buf_size, ptr,
|
||||
FFMIN(EXTRABYTES, (s->gb.size_in_bits - get_bits_count(&s->gb))>>3));
|
||||
memcpy(s->last_buf + s->last_buf_size, ptr, extrasize);
|
||||
s->in_gb = s->gb;
|
||||
init_get_bits(&s->gb, s->last_buf, s->last_buf_size*8);
|
||||
#if !UNCHECKED_BITSTREAM_READER
|
||||
s->gb.size_in_bits_plus8 += EXTRABYTES * 8;
|
||||
s->gb.size_in_bits_plus8 += extrasize * 8;
|
||||
#endif
|
||||
skip_bits_long(&s->gb, 8*(s->last_buf_size - main_data_begin));
|
||||
}
|
||||
@@ -1921,6 +1922,10 @@ static int decode_frame_mp3on4(AVCodecContext *avctx, void *data,
|
||||
m = s->mp3decctx[fr];
|
||||
assert(m != NULL);
|
||||
|
||||
if (fsize < HEADER_SIZE) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Frame size smaller than header size\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
header = (AV_RB32(buf) & 0x000fffff) | s->syncword; // patch header
|
||||
|
||||
if (ff_mpa_check_header(header) < 0) // Bad header, discard block
|
||||
|
@@ -647,7 +647,7 @@ AVCodecContext *avcodec_alloc_context(void){
|
||||
|
||||
int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src)
|
||||
{
|
||||
if (dest->codec) { // check that the dest context is uninitialized
|
||||
if (avcodec_is_open(dest)) { // check that the dest context is uninitialized
|
||||
av_log(dest, AV_LOG_ERROR,
|
||||
"Tried to copy AVCodecContext %p into already-initialized %p\n",
|
||||
src, dest);
|
||||
|
@@ -158,7 +158,7 @@ static int pcm_bluray_decode_frame(AVCodecContext *avctx, void *data,
|
||||
|
||||
/* There's always an even number of channels in the source */
|
||||
num_source_channels = FFALIGN(avctx->channels, 2);
|
||||
sample_size = (num_source_channels * avctx->bits_per_coded_sample) >> 3;
|
||||
sample_size = (num_source_channels * (avctx->sample_fmt == AV_SAMPLE_FMT_S16 ? 16 : 24)) >> 3;
|
||||
samples = buf_size / sample_size;
|
||||
|
||||
/* get output buffer */
|
||||
|
@@ -26,6 +26,7 @@
|
||||
#include <zlib.h>
|
||||
|
||||
#include "avcodec.h"
|
||||
#include "bytestream.h"
|
||||
|
||||
#define PNG_COLOR_MASK_PALETTE 1
|
||||
#define PNG_COLOR_MASK_COLOR 2
|
||||
@@ -73,9 +74,7 @@ int ff_png_pass_row_size(int pass, int bits_per_pixel, int width);
|
||||
void ff_add_png_paeth_prediction(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp);
|
||||
|
||||
typedef struct PNGDecContext {
|
||||
const uint8_t *bytestream;
|
||||
const uint8_t *bytestream_start;
|
||||
const uint8_t *bytestream_end;
|
||||
GetByteContext gb;
|
||||
AVFrame picture1, picture2;
|
||||
AVFrame *current_picture, *last_picture;
|
||||
|
||||
|
@@ -361,12 +361,9 @@ static void png_handle_row(PNGDecContext *s)
|
||||
static int png_decode_idat(PNGDecContext *s, int length)
|
||||
{
|
||||
int ret;
|
||||
s->zstream.avail_in = length;
|
||||
s->zstream.next_in = s->bytestream;
|
||||
s->bytestream += length;
|
||||
|
||||
if(s->bytestream > s->bytestream_end)
|
||||
return -1;
|
||||
s->zstream.avail_in = FFMIN(length, bytestream2_get_bytes_left(&s->gb));
|
||||
s->zstream.next_in = s->gb.buffer;
|
||||
bytestream2_skip(&s->gb, length);
|
||||
|
||||
/* decode one line if possible */
|
||||
while (s->zstream.avail_in > 0) {
|
||||
@@ -402,15 +399,13 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
avctx->coded_frame= s->current_picture;
|
||||
p = s->current_picture;
|
||||
|
||||
s->bytestream_start=
|
||||
s->bytestream= buf;
|
||||
s->bytestream_end= buf + buf_size;
|
||||
|
||||
/* check signature */
|
||||
if (memcmp(s->bytestream, ff_pngsig, 8) != 0 &&
|
||||
memcmp(s->bytestream, ff_mngsig, 8) != 0)
|
||||
if (buf_size < 8 ||
|
||||
memcmp(buf, ff_pngsig, 8) != 0 &&
|
||||
memcmp(buf, ff_mngsig, 8) != 0)
|
||||
return -1;
|
||||
s->bytestream+= 8;
|
||||
|
||||
bytestream2_init(&s->gb, buf + 8, buf_size - 8);
|
||||
s->y=
|
||||
s->state=0;
|
||||
// memset(s, 0, sizeof(PNGDecContext));
|
||||
@@ -422,14 +417,12 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
if (ret != Z_OK)
|
||||
return -1;
|
||||
for(;;) {
|
||||
int tag32;
|
||||
if (s->bytestream >= s->bytestream_end)
|
||||
if (bytestream2_get_bytes_left(&s->gb) <= 0)
|
||||
goto fail;
|
||||
length = bytestream_get_be32(&s->bytestream);
|
||||
length = bytestream2_get_be32(&s->gb);
|
||||
if (length > 0x7fffffff)
|
||||
goto fail;
|
||||
tag32 = bytestream_get_be32(&s->bytestream);
|
||||
tag = av_bswap32(tag32);
|
||||
tag = bytestream2_get_le32(&s->gb);
|
||||
if (avctx->debug & FF_DEBUG_STARTCODE)
|
||||
av_log(avctx, AV_LOG_DEBUG, "png: tag=%c%c%c%c length=%u\n",
|
||||
(tag & 0xff),
|
||||
@@ -440,18 +433,18 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
case MKTAG('I', 'H', 'D', 'R'):
|
||||
if (length != 13)
|
||||
goto fail;
|
||||
s->width = bytestream_get_be32(&s->bytestream);
|
||||
s->height = bytestream_get_be32(&s->bytestream);
|
||||
s->width = bytestream2_get_be32(&s->gb);
|
||||
s->height = bytestream2_get_be32(&s->gb);
|
||||
if(av_image_check_size(s->width, s->height, 0, avctx)){
|
||||
s->width= s->height= 0;
|
||||
goto fail;
|
||||
}
|
||||
s->bit_depth = *s->bytestream++;
|
||||
s->color_type = *s->bytestream++;
|
||||
s->compression_type = *s->bytestream++;
|
||||
s->filter_type = *s->bytestream++;
|
||||
s->interlace_type = *s->bytestream++;
|
||||
s->bytestream += 4; /* crc */
|
||||
s->bit_depth = bytestream2_get_byte(&s->gb);
|
||||
s->color_type = bytestream2_get_byte(&s->gb);
|
||||
s->compression_type = bytestream2_get_byte(&s->gb);
|
||||
s->filter_type = bytestream2_get_byte(&s->gb);
|
||||
s->interlace_type = bytestream2_get_byte(&s->gb);
|
||||
bytestream2_skip(&s->gb, 4); /* crc */
|
||||
s->state |= PNG_IHDR;
|
||||
if (avctx->debug & FF_DEBUG_PICT_INFO)
|
||||
av_log(avctx, AV_LOG_DEBUG, "width=%d height=%d depth=%d color_type=%d compression_type=%d filter_type=%d interlace_type=%d\n",
|
||||
@@ -551,7 +544,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
s->state |= PNG_IDAT;
|
||||
if (png_decode_idat(s, length) < 0)
|
||||
goto fail;
|
||||
s->bytestream += 4; /* crc */
|
||||
bytestream2_skip(&s->gb, 4); /* crc */
|
||||
break;
|
||||
case MKTAG('P', 'L', 'T', 'E'):
|
||||
{
|
||||
@@ -562,16 +555,16 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
/* read the palette */
|
||||
n = length / 3;
|
||||
for(i=0;i<n;i++) {
|
||||
r = *s->bytestream++;
|
||||
g = *s->bytestream++;
|
||||
b = *s->bytestream++;
|
||||
r = bytestream2_get_byte(&s->gb);
|
||||
g = bytestream2_get_byte(&s->gb);
|
||||
b = bytestream2_get_byte(&s->gb);
|
||||
s->palette[i] = (0xff << 24) | (r << 16) | (g << 8) | b;
|
||||
}
|
||||
for(;i<256;i++) {
|
||||
s->palette[i] = (0xff << 24);
|
||||
}
|
||||
s->state |= PNG_PLTE;
|
||||
s->bytestream += 4; /* crc */
|
||||
bytestream2_skip(&s->gb, 4); /* crc */
|
||||
}
|
||||
break;
|
||||
case MKTAG('t', 'R', 'N', 'S'):
|
||||
@@ -584,21 +577,21 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
!(s->state & PNG_PLTE))
|
||||
goto skip_tag;
|
||||
for(i=0;i<length;i++) {
|
||||
v = *s->bytestream++;
|
||||
v = bytestream2_get_byte(&s->gb);
|
||||
s->palette[i] = (s->palette[i] & 0x00ffffff) | (v << 24);
|
||||
}
|
||||
s->bytestream += 4; /* crc */
|
||||
bytestream2_skip(&s->gb, 4); /* crc */
|
||||
}
|
||||
break;
|
||||
case MKTAG('I', 'E', 'N', 'D'):
|
||||
if (!(s->state & PNG_ALLIMAGE))
|
||||
goto fail;
|
||||
s->bytestream += 4; /* crc */
|
||||
bytestream2_skip(&s->gb, 4); /* crc */
|
||||
goto exit_loop;
|
||||
default:
|
||||
/* skip tag */
|
||||
skip_tag:
|
||||
s->bytestream += length + 4;
|
||||
bytestream2_skip(&s->gb, length + 4);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -666,7 +659,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
*picture= *s->current_picture;
|
||||
*data_size = sizeof(AVFrame);
|
||||
|
||||
ret = s->bytestream - s->bytestream_start;
|
||||
ret = bytestream2_tell(&s->gb);
|
||||
the_end:
|
||||
inflateEnd(&s->zstream);
|
||||
av_free(crow_buf_base);
|
||||
|
@@ -302,7 +302,7 @@ static av_always_inline void decode_dc_coeffs(GetBitContext *gb, DCTELEM *out,
|
||||
code = 5;
|
||||
sign = 0;
|
||||
for (i = 1; i < blocks_per_slice; i++, out += 64) {
|
||||
DECODE_CODEWORD(code, dc_codebook[FFMIN(code, 6)]);
|
||||
DECODE_CODEWORD(code, dc_codebook[FFMIN(code, 6U)]);
|
||||
if(code) sign ^= -(code & 1);
|
||||
else sign = 0;
|
||||
prev_dc += (((code + 1) >> 1) ^ sign) - sign;
|
||||
|
@@ -203,6 +203,8 @@ static void qpeg_decode_inter(const uint8_t *src, uint8_t *dst, int size,
|
||||
filled = 0;
|
||||
dst -= stride;
|
||||
height--;
|
||||
if(height < 0)
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else if(code >= 0xC0) { /* copy code: 0xC0..0xDF */
|
||||
@@ -214,6 +216,8 @@ static void qpeg_decode_inter(const uint8_t *src, uint8_t *dst, int size,
|
||||
filled = 0;
|
||||
dst -= stride;
|
||||
height--;
|
||||
if(height < 0)
|
||||
break;
|
||||
}
|
||||
}
|
||||
size -= code + 1;
|
||||
|
@@ -424,7 +424,7 @@ static av_cold int qtrle_decode_init(AVCodecContext *avctx)
|
||||
default:
|
||||
av_log (avctx, AV_LOG_ERROR, "Unsupported colorspace: %d bits/sample?\n",
|
||||
avctx->bits_per_coded_sample);
|
||||
break;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
avcodec_get_frame_defaults(&s->frame);
|
||||
|
@@ -155,6 +155,9 @@ static int raw_decode(AVCodecContext *avctx,
|
||||
frame->top_field_first = context->tff;
|
||||
}
|
||||
|
||||
if(buf_size < context->length - (avctx->pix_fmt==PIX_FMT_PAL8 ? 256*4 : 0))
|
||||
return -1;
|
||||
|
||||
//2bpp and 4bpp raw in avi and mov (yes this is ugly ...)
|
||||
if (context->buffer) {
|
||||
int i;
|
||||
@@ -182,9 +185,6 @@ static int raw_decode(AVCodecContext *avctx,
|
||||
avctx->codec_tag == MKTAG('A', 'V', 'u', 'p'))
|
||||
buf += buf_size - context->length;
|
||||
|
||||
if(buf_size < context->length - (avctx->pix_fmt==PIX_FMT_PAL8 ? 256*4 : 0))
|
||||
return -1;
|
||||
|
||||
avpicture_fill(picture, buf, avctx->pix_fmt, avctx->width, avctx->height);
|
||||
if((avctx->pix_fmt==PIX_FMT_PAL8 && buf_size < context->length) ||
|
||||
(avctx->pix_fmt!=PIX_FMT_PAL8 &&
|
||||
|
@@ -24,6 +24,7 @@
|
||||
|
||||
#include "libavutil/lfg.h"
|
||||
#include "avcodec.h"
|
||||
#include "bytestream.h"
|
||||
#include "dsputil.h"
|
||||
|
||||
typedef struct {
|
||||
@@ -53,8 +54,7 @@ typedef struct RoqContext {
|
||||
roq_cell cb2x2[256];
|
||||
roq_qcell cb4x4[256];
|
||||
|
||||
const unsigned char *buf;
|
||||
int size;
|
||||
GetByteContext gb;
|
||||
int width, height;
|
||||
|
||||
/* Encoder only data */
|
||||
|
@@ -38,16 +38,15 @@ static void roqvideo_decode_frame(RoqContext *ri)
|
||||
unsigned int chunk_id = 0, chunk_arg = 0;
|
||||
unsigned long chunk_size = 0;
|
||||
int i, j, k, nv1, nv2, vqflg = 0, vqflg_pos = -1;
|
||||
int vqid, bpos, xpos, ypos, xp, yp, x, y, mx, my;
|
||||
int vqid, xpos, ypos, xp, yp, x, y, mx, my;
|
||||
int frame_stats[2][4] = {{0},{0}};
|
||||
roq_qcell *qcell;
|
||||
const unsigned char *buf = ri->buf;
|
||||
const unsigned char *buf_end = ri->buf + ri->size;
|
||||
int64_t chunk_start;
|
||||
|
||||
while (buf < buf_end) {
|
||||
chunk_id = bytestream_get_le16(&buf);
|
||||
chunk_size = bytestream_get_le32(&buf);
|
||||
chunk_arg = bytestream_get_le16(&buf);
|
||||
while (bytestream2_get_bytes_left(&ri->gb) > 0) {
|
||||
chunk_id = bytestream2_get_le16(&ri->gb);
|
||||
chunk_size = bytestream2_get_le32(&ri->gb);
|
||||
chunk_arg = bytestream2_get_le16(&ri->gb);
|
||||
|
||||
if(chunk_id == RoQ_QUAD_VQ)
|
||||
break;
|
||||
@@ -57,33 +56,36 @@ static void roqvideo_decode_frame(RoqContext *ri)
|
||||
if((nv2 = chunk_arg & 0xff) == 0 && nv1 * 6 < chunk_size)
|
||||
nv2 = 256;
|
||||
for(i = 0; i < nv1; i++) {
|
||||
ri->cb2x2[i].y[0] = *buf++;
|
||||
ri->cb2x2[i].y[1] = *buf++;
|
||||
ri->cb2x2[i].y[2] = *buf++;
|
||||
ri->cb2x2[i].y[3] = *buf++;
|
||||
ri->cb2x2[i].u = *buf++;
|
||||
ri->cb2x2[i].v = *buf++;
|
||||
ri->cb2x2[i].y[0] = bytestream2_get_byte(&ri->gb);
|
||||
ri->cb2x2[i].y[1] = bytestream2_get_byte(&ri->gb);
|
||||
ri->cb2x2[i].y[2] = bytestream2_get_byte(&ri->gb);
|
||||
ri->cb2x2[i].y[3] = bytestream2_get_byte(&ri->gb);
|
||||
ri->cb2x2[i].u = bytestream2_get_byte(&ri->gb);
|
||||
ri->cb2x2[i].v = bytestream2_get_byte(&ri->gb);
|
||||
}
|
||||
for(i = 0; i < nv2; i++)
|
||||
for(j = 0; j < 4; j++)
|
||||
ri->cb4x4[i].idx[j] = *buf++;
|
||||
ri->cb4x4[i].idx[j] = bytestream2_get_byte(&ri->gb);
|
||||
}
|
||||
}
|
||||
|
||||
bpos = xpos = ypos = 0;
|
||||
if (chunk_size > buf_end - buf) {
|
||||
chunk_start = bytestream2_tell(&ri->gb);
|
||||
xpos = ypos = 0;
|
||||
|
||||
if (chunk_size > bytestream2_get_bytes_left(&ri->gb)) {
|
||||
av_log(ri->avctx, AV_LOG_ERROR, "Chunk does not fit in input buffer\n");
|
||||
chunk_size = buf_end - buf;
|
||||
chunk_size = bytestream2_get_bytes_left(&ri->gb);
|
||||
}
|
||||
while(bpos < chunk_size) {
|
||||
|
||||
while (bytestream2_tell(&ri->gb) < chunk_start + chunk_size) {
|
||||
for (yp = ypos; yp < ypos + 16; yp += 8)
|
||||
for (xp = xpos; xp < xpos + 16; xp += 8) {
|
||||
if (bpos >= chunk_size) {
|
||||
if (bytestream2_tell(&ri->gb) >= chunk_start + chunk_size) {
|
||||
av_log(ri->avctx, AV_LOG_ERROR, "Input buffer too small\n");
|
||||
return;
|
||||
}
|
||||
if (vqflg_pos < 0) {
|
||||
vqflg = buf[bpos++]; vqflg |= (buf[bpos++] << 8);
|
||||
vqflg = bytestream2_get_le16(&ri->gb);
|
||||
vqflg_pos = 7;
|
||||
}
|
||||
vqid = (vqflg >> (vqflg_pos * 2)) & 0x3;
|
||||
@@ -93,13 +95,15 @@ static void roqvideo_decode_frame(RoqContext *ri)
|
||||
switch(vqid) {
|
||||
case RoQ_ID_MOT:
|
||||
break;
|
||||
case RoQ_ID_FCC:
|
||||
mx = 8 - (buf[bpos] >> 4) - ((signed char) (chunk_arg >> 8));
|
||||
my = 8 - (buf[bpos++] & 0xf) - ((signed char) chunk_arg);
|
||||
case RoQ_ID_FCC: {
|
||||
int byte = bytestream2_get_byte(&ri->gb);
|
||||
mx = 8 - (byte >> 4) - ((signed char) (chunk_arg >> 8));
|
||||
my = 8 - (byte & 0xf) - ((signed char) chunk_arg);
|
||||
ff_apply_motion_8x8(ri, xp, yp, mx, my);
|
||||
break;
|
||||
}
|
||||
case RoQ_ID_SLD:
|
||||
qcell = ri->cb4x4 + buf[bpos++];
|
||||
qcell = ri->cb4x4 + bytestream2_get_byte(&ri->gb);
|
||||
ff_apply_vector_4x4(ri, xp, yp, ri->cb2x2 + qcell->idx[0]);
|
||||
ff_apply_vector_4x4(ri, xp+4, yp, ri->cb2x2 + qcell->idx[1]);
|
||||
ff_apply_vector_4x4(ri, xp, yp+4, ri->cb2x2 + qcell->idx[2]);
|
||||
@@ -111,13 +115,12 @@ static void roqvideo_decode_frame(RoqContext *ri)
|
||||
if(k & 0x01) x += 4;
|
||||
if(k & 0x02) y += 4;
|
||||
|
||||
if (bpos >= chunk_size) {
|
||||
if (bytestream2_tell(&ri->gb) >= chunk_start + chunk_size) {
|
||||
av_log(ri->avctx, AV_LOG_ERROR, "Input buffer too small\n");
|
||||
return;
|
||||
}
|
||||
if (vqflg_pos < 0) {
|
||||
vqflg = buf[bpos++];
|
||||
vqflg |= (buf[bpos++] << 8);
|
||||
vqflg = bytestream2_get_le16(&ri->gb);
|
||||
vqflg_pos = 7;
|
||||
}
|
||||
vqid = (vqflg >> (vqflg_pos * 2)) & 0x3;
|
||||
@@ -126,24 +129,25 @@ static void roqvideo_decode_frame(RoqContext *ri)
|
||||
switch(vqid) {
|
||||
case RoQ_ID_MOT:
|
||||
break;
|
||||
case RoQ_ID_FCC:
|
||||
mx = 8 - (buf[bpos] >> 4) - ((signed char) (chunk_arg >> 8));
|
||||
my = 8 - (buf[bpos++] & 0xf) - ((signed char) chunk_arg);
|
||||
case RoQ_ID_FCC: {
|
||||
int byte = bytestream2_get_byte(&ri->gb);
|
||||
mx = 8 - (byte >> 4) - ((signed char) (chunk_arg >> 8));
|
||||
my = 8 - (byte & 0xf) - ((signed char) chunk_arg);
|
||||
ff_apply_motion_4x4(ri, x, y, mx, my);
|
||||
break;
|
||||
}
|
||||
case RoQ_ID_SLD:
|
||||
qcell = ri->cb4x4 + buf[bpos++];
|
||||
qcell = ri->cb4x4 + bytestream2_get_byte(&ri->gb);
|
||||
ff_apply_vector_2x2(ri, x, y, ri->cb2x2 + qcell->idx[0]);
|
||||
ff_apply_vector_2x2(ri, x+2, y, ri->cb2x2 + qcell->idx[1]);
|
||||
ff_apply_vector_2x2(ri, x, y+2, ri->cb2x2 + qcell->idx[2]);
|
||||
ff_apply_vector_2x2(ri, x+2, y+2, ri->cb2x2 + qcell->idx[3]);
|
||||
break;
|
||||
case RoQ_ID_CCC:
|
||||
ff_apply_vector_2x2(ri, x, y, ri->cb2x2 + buf[bpos]);
|
||||
ff_apply_vector_2x2(ri, x+2, y, ri->cb2x2 + buf[bpos+1]);
|
||||
ff_apply_vector_2x2(ri, x, y+2, ri->cb2x2 + buf[bpos+2]);
|
||||
ff_apply_vector_2x2(ri, x+2, y+2, ri->cb2x2 + buf[bpos+3]);
|
||||
bpos += 4;
|
||||
ff_apply_vector_2x2(ri, x, y, ri->cb2x2 + bytestream2_get_byte(&ri->gb));
|
||||
ff_apply_vector_2x2(ri, x+2, y, ri->cb2x2 + bytestream2_get_byte(&ri->gb));
|
||||
ff_apply_vector_2x2(ri, x, y+2, ri->cb2x2 + bytestream2_get_byte(&ri->gb));
|
||||
ff_apply_vector_2x2(ri, x+2, y+2, ri->cb2x2 + bytestream2_get_byte(&ri->gb));
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -199,8 +203,7 @@ static int roq_decode_frame(AVCodecContext *avctx,
|
||||
av_picture_copy((AVPicture*)s->current_frame, (AVPicture*)s->last_frame,
|
||||
avctx->pix_fmt, avctx->width, avctx->height);
|
||||
|
||||
s->buf = buf;
|
||||
s->size = buf_size;
|
||||
bytestream2_init(&s->gb, buf, buf_size);
|
||||
roqvideo_decode_frame(s);
|
||||
|
||||
*data_size = sizeof(AVFrame);
|
||||
|
@@ -183,6 +183,8 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
color4[1] |= ((11 * ta + 21 * tb) >> 5);
|
||||
color4[2] |= ((21 * ta + 11 * tb) >> 5);
|
||||
|
||||
if (s->size - stream_ptr < n_blocks * 4)
|
||||
return;
|
||||
while (n_blocks--) {
|
||||
block_ptr = row_ptr + pixel_ptr;
|
||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
|
||||
@@ -200,6 +202,8 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
|
||||
/* Fill block with 16 colors */
|
||||
case 0x00:
|
||||
if (s->size - stream_ptr < 16)
|
||||
return;
|
||||
block_ptr = row_ptr + pixel_ptr;
|
||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
|
||||
for (pixel_x = 0; pixel_x < 4; pixel_x++){
|
||||
|
@@ -515,9 +515,10 @@ static int rv10_decode_packet(AVCodecContext *avctx,
|
||||
const uint8_t *buf, int buf_size, int buf_size2)
|
||||
{
|
||||
MpegEncContext *s = avctx->priv_data;
|
||||
int mb_count, mb_pos, left, start_mb_x;
|
||||
int mb_count, mb_pos, left, start_mb_x, active_bits_size;
|
||||
|
||||
init_get_bits(&s->gb, buf, buf_size*8);
|
||||
active_bits_size = buf_size * 8;
|
||||
init_get_bits(&s->gb, buf, FFMAX(buf_size, buf_size2) * 8);
|
||||
if(s->codec_id ==CODEC_ID_RV10)
|
||||
mb_count = rv10_decode_picture_header(s);
|
||||
else
|
||||
@@ -601,13 +602,26 @@ static int rv10_decode_packet(AVCodecContext *avctx,
|
||||
s->mv_type = MV_TYPE_16X16;
|
||||
ret=ff_h263_decode_mb(s, s->block);
|
||||
|
||||
if (ret != SLICE_ERROR && s->gb.size_in_bits < get_bits_count(&s->gb) && 8*buf_size2 >= get_bits_count(&s->gb)){
|
||||
av_log(avctx, AV_LOG_DEBUG, "update size from %d to %d\n", s->gb.size_in_bits, 8*buf_size2);
|
||||
s->gb.size_in_bits= 8*buf_size2;
|
||||
// Repeat the slice end check from ff_h263_decode_mb with our active
|
||||
// bitstream size
|
||||
if (ret != SLICE_ERROR) {
|
||||
int v = show_bits(&s->gb, 16);
|
||||
|
||||
if (get_bits_count(&s->gb) + 16 > active_bits_size)
|
||||
v >>= get_bits_count(&s->gb) + 16 - active_bits_size;
|
||||
|
||||
if (!v)
|
||||
ret = SLICE_END;
|
||||
}
|
||||
if (ret != SLICE_ERROR && active_bits_size < get_bits_count(&s->gb) &&
|
||||
8 * buf_size2 >= get_bits_count(&s->gb)) {
|
||||
active_bits_size = buf_size2 * 8;
|
||||
av_log(avctx, AV_LOG_DEBUG, "update size from %d to %d\n",
|
||||
8 * buf_size, active_bits_size);
|
||||
ret= SLICE_OK;
|
||||
}
|
||||
|
||||
if (ret == SLICE_ERROR || s->gb.size_in_bits < get_bits_count(&s->gb)) {
|
||||
if (ret == SLICE_ERROR || active_bits_size < get_bits_count(&s->gb)) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "ERROR at MB %d %d\n", s->mb_x, s->mb_y);
|
||||
return -1;
|
||||
}
|
||||
@@ -629,7 +643,7 @@ static int rv10_decode_packet(AVCodecContext *avctx,
|
||||
|
||||
ff_er_add_slice(s, start_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END);
|
||||
|
||||
return s->gb.size_in_bits;
|
||||
return active_bits_size;
|
||||
}
|
||||
|
||||
static int get_slice_offset(AVCodecContext *avctx, const uint8_t *buf, int n)
|
||||
@@ -661,8 +675,12 @@ static int rv10_decode_frame(AVCodecContext *avctx,
|
||||
|
||||
if(!avctx->slice_count){
|
||||
slice_count = (*buf++) + 1;
|
||||
buf_size--;
|
||||
slices_hdr = buf + 4;
|
||||
buf += 8 * slice_count;
|
||||
buf_size -= 8 * slice_count;
|
||||
if (buf_size <= 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
}else
|
||||
slice_count = avctx->slice_count;
|
||||
|
||||
@@ -708,7 +726,7 @@ static int rv10_decode_frame(AVCodecContext *avctx,
|
||||
s->current_picture_ptr= NULL; //so we can detect if frame_end wasnt called (find some nicer solution...)
|
||||
}
|
||||
|
||||
return buf_size;
|
||||
return avpkt->size;
|
||||
}
|
||||
|
||||
AVCodec ff_rv10_decoder = {
|
||||
|
@@ -711,8 +711,7 @@ static inline void rv34_mc(RV34DecContext *r, const int block_type,
|
||||
|
||||
if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME)) {
|
||||
/* wait for the referenced mb row to be finished */
|
||||
int mb_row = FFMIN(s->mb_height - 1,
|
||||
s->mb_y + ((yoff + my + 5 + 8 * height) >> 4));
|
||||
int mb_row = s->mb_y + ((yoff + my + 5 + 8 * height) >> 4);
|
||||
AVFrame *f = dir ? &s->next_picture_ptr->f : &s->last_picture_ptr->f;
|
||||
ff_thread_await_progress(f, mb_row, 0);
|
||||
}
|
||||
@@ -1361,6 +1360,53 @@ static int check_slice_end(RV34DecContext *r, MpegEncContext *s)
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void rv34_decoder_free(RV34DecContext *r)
|
||||
{
|
||||
av_freep(&r->intra_types_hist);
|
||||
r->intra_types = NULL;
|
||||
av_freep(&r->tmp_b_block_base);
|
||||
av_freep(&r->mb_type);
|
||||
av_freep(&r->cbp_luma);
|
||||
av_freep(&r->cbp_chroma);
|
||||
av_freep(&r->deblock_coefs);
|
||||
}
|
||||
|
||||
|
||||
static int rv34_decoder_alloc(RV34DecContext *r)
|
||||
{
|
||||
r->intra_types_stride = r->s.mb_width * 4 + 4;
|
||||
|
||||
r->cbp_chroma = av_malloc(r->s.mb_stride * r->s.mb_height *
|
||||
sizeof(*r->cbp_chroma));
|
||||
r->cbp_luma = av_malloc(r->s.mb_stride * r->s.mb_height *
|
||||
sizeof(*r->cbp_luma));
|
||||
r->deblock_coefs = av_malloc(r->s.mb_stride * r->s.mb_height *
|
||||
sizeof(*r->deblock_coefs));
|
||||
r->intra_types_hist = av_malloc(r->intra_types_stride * 4 * 2 *
|
||||
sizeof(*r->intra_types_hist));
|
||||
r->mb_type = av_mallocz(r->s.mb_stride * r->s.mb_height *
|
||||
sizeof(*r->mb_type));
|
||||
|
||||
if (!(r->cbp_chroma && r->cbp_luma && r->deblock_coefs &&
|
||||
r->intra_types_hist && r->mb_type)) {
|
||||
rv34_decoder_free(r);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
r->intra_types = r->intra_types_hist + r->intra_types_stride * 4;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int rv34_decoder_realloc(RV34DecContext *r)
|
||||
{
|
||||
rv34_decoder_free(r);
|
||||
return rv34_decoder_alloc(r);
|
||||
}
|
||||
|
||||
|
||||
static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int buf_size)
|
||||
{
|
||||
MpegEncContext *s = &r->s;
|
||||
@@ -1376,22 +1422,19 @@ static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int
|
||||
}
|
||||
|
||||
if ((s->mb_x == 0 && s->mb_y == 0) || s->current_picture_ptr==NULL) {
|
||||
if(s->width != r->si.width || s->height != r->si.height){
|
||||
av_log(s->avctx, AV_LOG_DEBUG, "Changing dimensions to %dx%d\n", r->si.width,r->si.height);
|
||||
if (s->width != r->si.width || s->height != r->si.height) {
|
||||
int err;
|
||||
|
||||
av_log(s->avctx, AV_LOG_WARNING, "Changing dimensions to %dx%d\n",
|
||||
r->si.width, r->si.height);
|
||||
MPV_common_end(s);
|
||||
s->width = r->si.width;
|
||||
s->height = r->si.height;
|
||||
avcodec_set_dimensions(s->avctx, s->width, s->height);
|
||||
if(MPV_common_init(s) < 0)
|
||||
return -1;
|
||||
r->intra_types_stride = s->mb_width*4 + 4;
|
||||
r->intra_types_hist = av_realloc(r->intra_types_hist, r->intra_types_stride * 4 * 2 * sizeof(*r->intra_types_hist));
|
||||
r->intra_types = r->intra_types_hist + r->intra_types_stride * 4;
|
||||
r->mb_type = av_realloc(r->mb_type, r->s.mb_stride * r->s.mb_height * sizeof(*r->mb_type));
|
||||
r->cbp_luma = av_realloc(r->cbp_luma, r->s.mb_stride * r->s.mb_height * sizeof(*r->cbp_luma));
|
||||
r->cbp_chroma = av_realloc(r->cbp_chroma, r->s.mb_stride * r->s.mb_height * sizeof(*r->cbp_chroma));
|
||||
r->deblock_coefs = av_realloc(r->deblock_coefs, r->s.mb_stride * r->s.mb_height * sizeof(*r->deblock_coefs));
|
||||
av_freep(&r->tmp_b_block_base);
|
||||
if ((err = MPV_common_init(s)) < 0)
|
||||
return err;
|
||||
if ((err = rv34_decoder_realloc(r)) < 0)
|
||||
return err;
|
||||
}
|
||||
s->pict_type = r->si.type ? r->si.type : AV_PICTURE_TYPE_I;
|
||||
if(MPV_frame_start(s, s->avctx) < 0)
|
||||
@@ -1500,6 +1543,7 @@ av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
RV34DecContext *r = avctx->priv_data;
|
||||
MpegEncContext *s = &r->s;
|
||||
int ret;
|
||||
|
||||
MPV_decode_defaults(s);
|
||||
s->avctx = avctx;
|
||||
@@ -1516,8 +1560,8 @@ av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
|
||||
avctx->has_b_frames = 1;
|
||||
s->low_delay = 0;
|
||||
|
||||
if (MPV_common_init(s) < 0)
|
||||
return -1;
|
||||
if ((ret = MPV_common_init(s)) < 0)
|
||||
return ret;
|
||||
|
||||
ff_h264_pred_init(&r->h, CODEC_ID_RV40, 8, 1);
|
||||
|
||||
@@ -1530,15 +1574,8 @@ av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
|
||||
ff_rv40dsp_init(&r->rdsp, &r->s.dsp);
|
||||
#endif
|
||||
|
||||
r->intra_types_stride = 4*s->mb_stride + 4;
|
||||
r->intra_types_hist = av_malloc(r->intra_types_stride * 4 * 2 * sizeof(*r->intra_types_hist));
|
||||
r->intra_types = r->intra_types_hist + r->intra_types_stride * 4;
|
||||
|
||||
r->mb_type = av_mallocz(r->s.mb_stride * r->s.mb_height * sizeof(*r->mb_type));
|
||||
|
||||
r->cbp_luma = av_malloc(r->s.mb_stride * r->s.mb_height * sizeof(*r->cbp_luma));
|
||||
r->cbp_chroma = av_malloc(r->s.mb_stride * r->s.mb_height * sizeof(*r->cbp_chroma));
|
||||
r->deblock_coefs = av_malloc(r->s.mb_stride * r->s.mb_height * sizeof(*r->deblock_coefs));
|
||||
if ((ret = rv34_decoder_alloc(r)) < 0)
|
||||
return ret;
|
||||
|
||||
if(!intra_vlcs[0].cbppattern[0].bits)
|
||||
rv34_init_tables();
|
||||
@@ -1548,40 +1585,17 @@ av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
|
||||
|
||||
int ff_rv34_decode_init_thread_copy(AVCodecContext *avctx)
|
||||
{
|
||||
int err;
|
||||
RV34DecContext *r = avctx->priv_data;
|
||||
|
||||
r->s.avctx = avctx;
|
||||
|
||||
if (avctx->internal->is_copy) {
|
||||
r->cbp_chroma = av_malloc(r->s.mb_stride * r->s.mb_height *
|
||||
sizeof(*r->cbp_chroma));
|
||||
r->cbp_luma = av_malloc(r->s.mb_stride * r->s.mb_height *
|
||||
sizeof(*r->cbp_luma));
|
||||
r->deblock_coefs = av_malloc(r->s.mb_stride * r->s.mb_height *
|
||||
sizeof(*r->deblock_coefs));
|
||||
r->intra_types_hist = av_malloc(r->intra_types_stride * 4 * 2 *
|
||||
sizeof(*r->intra_types_hist));
|
||||
r->mb_type = av_malloc(r->s.mb_stride * r->s.mb_height *
|
||||
sizeof(*r->mb_type));
|
||||
|
||||
if (!(r->cbp_chroma && r->cbp_luma && r->deblock_coefs &&
|
||||
r->intra_types_hist && r->mb_type)) {
|
||||
av_freep(&r->cbp_chroma);
|
||||
av_freep(&r->cbp_luma);
|
||||
av_freep(&r->deblock_coefs);
|
||||
av_freep(&r->intra_types_hist);
|
||||
av_freep(&r->mb_type);
|
||||
r->intra_types = NULL;
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
r->intra_types = r->intra_types_hist + r->intra_types_stride * 4;
|
||||
r->tmp_b_block_base = NULL;
|
||||
|
||||
memset(r->mb_type, 0, r->s.mb_stride * r->s.mb_height *
|
||||
sizeof(*r->mb_type));
|
||||
|
||||
MPV_common_init(&r->s);
|
||||
if ((err = MPV_common_init(&r->s)) < 0)
|
||||
return err;
|
||||
if ((err = rv34_decoder_alloc(r)) < 0)
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -1595,6 +1609,16 @@ int ff_rv34_decode_update_thread_context(AVCodecContext *dst, const AVCodecConte
|
||||
if (dst == src || !s1->context_initialized)
|
||||
return 0;
|
||||
|
||||
if (s->height != s1->height || s->width != s1->width) {
|
||||
MPV_common_end(s);
|
||||
s->height = s1->height;
|
||||
s->width = s1->width;
|
||||
if ((err = MPV_common_init(s)) < 0)
|
||||
return err;
|
||||
if ((err = rv34_decoder_realloc(r)) < 0)
|
||||
return err;
|
||||
}
|
||||
|
||||
if ((err = ff_mpeg_update_thread_context(dst, src)))
|
||||
return err;
|
||||
|
||||
@@ -1712,11 +1736,12 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
|
||||
if(last && s->current_picture_ptr){
|
||||
if(r->loop_filter)
|
||||
r->loop_filter(r, s->mb_height - 1);
|
||||
if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
|
||||
ff_thread_report_progress(&s->current_picture_ptr->f,
|
||||
s->mb_height - 1, 0);
|
||||
ff_er_frame_end(s);
|
||||
MPV_frame_end(s);
|
||||
|
||||
if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
|
||||
ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
|
||||
|
||||
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
|
||||
*pict = *(AVFrame*)s->current_picture_ptr;
|
||||
} else if (s->last_picture_ptr != NULL) {
|
||||
@@ -1737,14 +1762,7 @@ av_cold int ff_rv34_decode_end(AVCodecContext *avctx)
|
||||
RV34DecContext *r = avctx->priv_data;
|
||||
|
||||
MPV_common_end(&r->s);
|
||||
|
||||
av_freep(&r->intra_types_hist);
|
||||
r->intra_types = NULL;
|
||||
av_freep(&r->tmp_b_block_base);
|
||||
av_freep(&r->mb_type);
|
||||
av_freep(&r->cbp_luma);
|
||||
av_freep(&r->cbp_chroma);
|
||||
av_freep(&r->deblock_coefs);
|
||||
rv34_decoder_free(r);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -55,7 +55,6 @@ static av_always_inline void rv34_row_transform(int temp[16], DCTELEM *block)
|
||||
*/
|
||||
static void rv34_idct_add_c(uint8_t *dst, int stride, DCTELEM *block){
|
||||
int temp[16];
|
||||
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
|
||||
int i;
|
||||
|
||||
rv34_row_transform(temp, block);
|
||||
@@ -67,10 +66,10 @@ static void rv34_idct_add_c(uint8_t *dst, int stride, DCTELEM *block){
|
||||
const int z2 = 7* temp[4*1+i] - 17*temp[4*3+i];
|
||||
const int z3 = 17* temp[4*1+i] + 7*temp[4*3+i];
|
||||
|
||||
dst[0] = cm[ dst[0] + ( (z0 + z3) >> 10 ) ];
|
||||
dst[1] = cm[ dst[1] + ( (z1 + z2) >> 10 ) ];
|
||||
dst[2] = cm[ dst[2] + ( (z1 - z2) >> 10 ) ];
|
||||
dst[3] = cm[ dst[3] + ( (z0 - z3) >> 10 ) ];
|
||||
dst[0] = av_clip_uint8( dst[0] + ( (z0 + z3) >> 10 ) );
|
||||
dst[1] = av_clip_uint8( dst[1] + ( (z1 + z2) >> 10 ) );
|
||||
dst[2] = av_clip_uint8( dst[2] + ( (z1 - z2) >> 10 ) );
|
||||
dst[3] = av_clip_uint8( dst[3] + ( (z0 - z3) >> 10 ) );
|
||||
|
||||
dst += stride;
|
||||
}
|
||||
@@ -103,15 +102,13 @@ static void rv34_inv_transform_noround_c(DCTELEM *block){
|
||||
|
||||
static void rv34_idct_dc_add_c(uint8_t *dst, int stride, int dc)
|
||||
{
|
||||
const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
|
||||
int i, j;
|
||||
|
||||
cm += (13*13*dc + 0x200) >> 10;
|
||||
|
||||
dc = (13*13*dc + 0x200) >> 10;
|
||||
for (i = 0; i < 4; i++)
|
||||
{
|
||||
for (j = 0; j < 4; j++)
|
||||
dst[j] = cm[ dst[j] ];
|
||||
dst[j] = av_clip_uint8( dst[j] + dc );
|
||||
|
||||
dst += stride;
|
||||
}
|
||||
|
@@ -141,7 +141,8 @@ static int allocate_buffers(ShortenContext *s)
|
||||
return AVERROR(ENOMEM);
|
||||
s->offset[chan] = tmp_ptr;
|
||||
|
||||
tmp_ptr = av_realloc(s->decoded_base[chan], sizeof(int32_t)*(s->blocksize + s->nwrap));
|
||||
tmp_ptr = av_realloc(s->decoded_base[chan], (s->blocksize + s->nwrap) *
|
||||
sizeof(s->decoded_base[0][0]));
|
||||
if (!tmp_ptr)
|
||||
return AVERROR(ENOMEM);
|
||||
s->decoded_base[chan] = tmp_ptr;
|
||||
|
@@ -53,7 +53,6 @@
|
||||
static inline void idct4col_put(uint8_t *dest, int line_size, const DCTELEM *col)
|
||||
{
|
||||
int c0, c1, c2, c3, a0, a1, a2, a3;
|
||||
const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
|
||||
|
||||
a0 = col[8*0];
|
||||
a1 = col[8*2];
|
||||
@@ -63,13 +62,13 @@ static inline void idct4col_put(uint8_t *dest, int line_size, const DCTELEM *col
|
||||
c2 = ((a0 - a2) << (CN_SHIFT - 1)) + (1 << (C_SHIFT - 1));
|
||||
c1 = a1 * C1 + a3 * C2;
|
||||
c3 = a1 * C2 - a3 * C1;
|
||||
dest[0] = cm[(c0 + c1) >> C_SHIFT];
|
||||
dest[0] = av_clip_uint8((c0 + c1) >> C_SHIFT);
|
||||
dest += line_size;
|
||||
dest[0] = cm[(c2 + c3) >> C_SHIFT];
|
||||
dest[0] = av_clip_uint8((c2 + c3) >> C_SHIFT);
|
||||
dest += line_size;
|
||||
dest[0] = cm[(c2 - c3) >> C_SHIFT];
|
||||
dest[0] = av_clip_uint8((c2 - c3) >> C_SHIFT);
|
||||
dest += line_size;
|
||||
dest[0] = cm[(c0 - c1) >> C_SHIFT];
|
||||
dest[0] = av_clip_uint8((c0 - c1) >> C_SHIFT);
|
||||
}
|
||||
|
||||
#define BF(k) \
|
||||
@@ -133,7 +132,6 @@ void ff_simple_idct248_put(uint8_t *dest, int line_size, DCTELEM *block)
|
||||
static inline void idct4col_add(uint8_t *dest, int line_size, const DCTELEM *col)
|
||||
{
|
||||
int c0, c1, c2, c3, a0, a1, a2, a3;
|
||||
const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
|
||||
|
||||
a0 = col[8*0];
|
||||
a1 = col[8*1];
|
||||
@@ -143,13 +141,13 @@ static inline void idct4col_add(uint8_t *dest, int line_size, const DCTELEM *col
|
||||
c2 = (a0 - a2)*C3 + (1 << (C_SHIFT - 1));
|
||||
c1 = a1 * C1 + a3 * C2;
|
||||
c3 = a1 * C2 - a3 * C1;
|
||||
dest[0] = cm[dest[0] + ((c0 + c1) >> C_SHIFT)];
|
||||
dest[0] = av_clip_uint8(dest[0] + ((c0 + c1) >> C_SHIFT));
|
||||
dest += line_size;
|
||||
dest[0] = cm[dest[0] + ((c2 + c3) >> C_SHIFT)];
|
||||
dest[0] = av_clip_uint8(dest[0] + ((c2 + c3) >> C_SHIFT));
|
||||
dest += line_size;
|
||||
dest[0] = cm[dest[0] + ((c2 - c3) >> C_SHIFT)];
|
||||
dest[0] = av_clip_uint8(dest[0] + ((c2 - c3) >> C_SHIFT));
|
||||
dest += line_size;
|
||||
dest[0] = cm[dest[0] + ((c0 - c1) >> C_SHIFT)];
|
||||
dest[0] = av_clip_uint8(dest[0] + ((c0 - c1) >> C_SHIFT));
|
||||
}
|
||||
|
||||
#define RN_SHIFT 15
|
||||
@@ -161,7 +159,6 @@ static inline void idct4col_add(uint8_t *dest, int line_size, const DCTELEM *col
|
||||
static inline void idct4row(DCTELEM *row)
|
||||
{
|
||||
int c0, c1, c2, c3, a0, a1, a2, a3;
|
||||
//const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
|
||||
|
||||
a0 = row[0];
|
||||
a1 = row[1];
|
||||
|
@@ -224,50 +224,48 @@ static inline void FUNC(idctSparseColPut)(pixel *dest, int line_size,
|
||||
DCTELEM *col)
|
||||
{
|
||||
int a0, a1, a2, a3, b0, b1, b2, b3;
|
||||
INIT_CLIP;
|
||||
|
||||
IDCT_COLS;
|
||||
|
||||
dest[0] = CLIP((a0 + b0) >> COL_SHIFT);
|
||||
dest[0] = av_clip_pixel((a0 + b0) >> COL_SHIFT);
|
||||
dest += line_size;
|
||||
dest[0] = CLIP((a1 + b1) >> COL_SHIFT);
|
||||
dest[0] = av_clip_pixel((a1 + b1) >> COL_SHIFT);
|
||||
dest += line_size;
|
||||
dest[0] = CLIP((a2 + b2) >> COL_SHIFT);
|
||||
dest[0] = av_clip_pixel((a2 + b2) >> COL_SHIFT);
|
||||
dest += line_size;
|
||||
dest[0] = CLIP((a3 + b3) >> COL_SHIFT);
|
||||
dest[0] = av_clip_pixel((a3 + b3) >> COL_SHIFT);
|
||||
dest += line_size;
|
||||
dest[0] = CLIP((a3 - b3) >> COL_SHIFT);
|
||||
dest[0] = av_clip_pixel((a3 - b3) >> COL_SHIFT);
|
||||
dest += line_size;
|
||||
dest[0] = CLIP((a2 - b2) >> COL_SHIFT);
|
||||
dest[0] = av_clip_pixel((a2 - b2) >> COL_SHIFT);
|
||||
dest += line_size;
|
||||
dest[0] = CLIP((a1 - b1) >> COL_SHIFT);
|
||||
dest[0] = av_clip_pixel((a1 - b1) >> COL_SHIFT);
|
||||
dest += line_size;
|
||||
dest[0] = CLIP((a0 - b0) >> COL_SHIFT);
|
||||
dest[0] = av_clip_pixel((a0 - b0) >> COL_SHIFT);
|
||||
}
|
||||
|
||||
static inline void FUNC(idctSparseColAdd)(pixel *dest, int line_size,
|
||||
DCTELEM *col)
|
||||
{
|
||||
int a0, a1, a2, a3, b0, b1, b2, b3;
|
||||
INIT_CLIP;
|
||||
|
||||
IDCT_COLS;
|
||||
|
||||
dest[0] = CLIP(dest[0] + ((a0 + b0) >> COL_SHIFT));
|
||||
dest[0] = av_clip_pixel(dest[0] + ((a0 + b0) >> COL_SHIFT));
|
||||
dest += line_size;
|
||||
dest[0] = CLIP(dest[0] + ((a1 + b1) >> COL_SHIFT));
|
||||
dest[0] = av_clip_pixel(dest[0] + ((a1 + b1) >> COL_SHIFT));
|
||||
dest += line_size;
|
||||
dest[0] = CLIP(dest[0] + ((a2 + b2) >> COL_SHIFT));
|
||||
dest[0] = av_clip_pixel(dest[0] + ((a2 + b2) >> COL_SHIFT));
|
||||
dest += line_size;
|
||||
dest[0] = CLIP(dest[0] + ((a3 + b3) >> COL_SHIFT));
|
||||
dest[0] = av_clip_pixel(dest[0] + ((a3 + b3) >> COL_SHIFT));
|
||||
dest += line_size;
|
||||
dest[0] = CLIP(dest[0] + ((a3 - b3) >> COL_SHIFT));
|
||||
dest[0] = av_clip_pixel(dest[0] + ((a3 - b3) >> COL_SHIFT));
|
||||
dest += line_size;
|
||||
dest[0] = CLIP(dest[0] + ((a2 - b2) >> COL_SHIFT));
|
||||
dest[0] = av_clip_pixel(dest[0] + ((a2 - b2) >> COL_SHIFT));
|
||||
dest += line_size;
|
||||
dest[0] = CLIP(dest[0] + ((a1 - b1) >> COL_SHIFT));
|
||||
dest[0] = av_clip_pixel(dest[0] + ((a1 - b1) >> COL_SHIFT));
|
||||
dest += line_size;
|
||||
dest[0] = CLIP(dest[0] + ((a0 - b0) >> COL_SHIFT));
|
||||
dest[0] = av_clip_pixel(dest[0] + ((a0 - b0) >> COL_SHIFT));
|
||||
}
|
||||
|
||||
static inline void FUNC(idctSparseCol)(DCTELEM *col)
|
||||
|
@@ -128,12 +128,12 @@ static int smacker_decode_tree(GetBitContext *gb, HuffContext *hc, uint32_t pref
|
||||
*/
|
||||
static int smacker_decode_bigtree(GetBitContext *gb, HuffContext *hc, DBCtx *ctx)
|
||||
{
|
||||
if(!get_bits1(gb)){ //Leaf
|
||||
int val, i1, i2, b1, b2;
|
||||
if(hc->current >= hc->length){
|
||||
if (hc->current + 1 >= hc->length) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Tree size exceeded!\n");
|
||||
return -1;
|
||||
}
|
||||
if(!get_bits1(gb)){ //Leaf
|
||||
int val, i1, i2, b1, b2;
|
||||
b1 = get_bits_count(gb);
|
||||
i1 = ctx->v1->table ? get_vlc2(gb, ctx->v1->table, SMKTREE_BITS, 3) : 0;
|
||||
b1 = get_bits_count(gb) - b1;
|
||||
@@ -157,7 +157,7 @@ static int smacker_decode_bigtree(GetBitContext *gb, HuffContext *hc, DBCtx *ctx
|
||||
hc->values[hc->current++] = val;
|
||||
return 1;
|
||||
} else { //Node
|
||||
int r = 0, t;
|
||||
int r = 0, r_new, t;
|
||||
|
||||
t = hc->current++;
|
||||
r = smacker_decode_bigtree(gb, hc, ctx);
|
||||
@@ -165,8 +165,10 @@ static int smacker_decode_bigtree(GetBitContext *gb, HuffContext *hc, DBCtx *ctx
|
||||
return r;
|
||||
hc->values[t] = SMK_NODE | r;
|
||||
r++;
|
||||
r += smacker_decode_bigtree(gb, hc, ctx);
|
||||
return r;
|
||||
r_new = smacker_decode_bigtree(gb, hc, ctx);
|
||||
if (r_new < 0)
|
||||
return r_new;
|
||||
return r + r_new;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -181,6 +183,7 @@ static int smacker_decode_header_tree(SmackVContext *smk, GetBitContext *gb, int
|
||||
VLC vlc[2];
|
||||
int escapes[3];
|
||||
DBCtx ctx;
|
||||
int err = 0;
|
||||
|
||||
if(size >= UINT_MAX>>4){ // (((size + 3) >> 2) + 3) << 2 must not overflow
|
||||
av_log(smk->avctx, AV_LOG_ERROR, "size too large\n");
|
||||
@@ -254,7 +257,8 @@ static int smacker_decode_header_tree(SmackVContext *smk, GetBitContext *gb, int
|
||||
huff.current = 0;
|
||||
huff.values = av_mallocz(huff.length * sizeof(int));
|
||||
|
||||
smacker_decode_bigtree(gb, &huff, &ctx);
|
||||
if (smacker_decode_bigtree(gb, &huff, &ctx) < 0)
|
||||
err = -1;
|
||||
skip_bits1(gb);
|
||||
if(ctx.last[0] == -1) ctx.last[0] = huff.current++;
|
||||
if(ctx.last[1] == -1) ctx.last[1] = huff.current++;
|
||||
@@ -278,7 +282,7 @@ static int smacker_decode_header_tree(SmackVContext *smk, GetBitContext *gb, int
|
||||
av_free(tmp2.lengths);
|
||||
av_free(tmp2.values);
|
||||
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int decode_header_trees(SmackVContext *smk) {
|
||||
|
@@ -34,6 +34,7 @@
|
||||
|
||||
#include "libavutil/intreadwrite.h"
|
||||
#include "avcodec.h"
|
||||
#include "bytestream.h"
|
||||
|
||||
#define CPAIR 2
|
||||
#define CQUAD 4
|
||||
@@ -46,8 +47,7 @@ typedef struct SmcContext {
|
||||
AVCodecContext *avctx;
|
||||
AVFrame frame;
|
||||
|
||||
const unsigned char *buf;
|
||||
int size;
|
||||
GetByteContext gb;
|
||||
|
||||
/* SMC color tables */
|
||||
unsigned char color_pairs[COLORS_PER_TABLE * CPAIR];
|
||||
@@ -58,7 +58,7 @@ typedef struct SmcContext {
|
||||
} SmcContext;
|
||||
|
||||
#define GET_BLOCK_COUNT() \
|
||||
(opcode & 0x10) ? (1 + s->buf[stream_ptr++]) : 1 + (opcode & 0x0F);
|
||||
(opcode & 0x10) ? (1 + bytestream2_get_byte(&s->gb)) : 1 + (opcode & 0x0F);
|
||||
|
||||
#define ADVANCE_BLOCK() \
|
||||
{ \
|
||||
@@ -82,8 +82,8 @@ static void smc_decode_stream(SmcContext *s)
|
||||
int height = s->avctx->height;
|
||||
int stride = s->frame.linesize[0];
|
||||
int i;
|
||||
int stream_ptr = 0;
|
||||
int chunk_size;
|
||||
int buf_size = (int) (s->gb.buffer_end - s->gb.buffer_start);
|
||||
unsigned char opcode;
|
||||
int n_blocks;
|
||||
unsigned int color_flags;
|
||||
@@ -113,24 +113,18 @@ static void smc_decode_stream(SmcContext *s)
|
||||
/* make the palette available */
|
||||
memcpy(s->frame.data[1], s->pal, AVPALETTE_SIZE);
|
||||
|
||||
chunk_size = AV_RB32(&s->buf[stream_ptr]) & 0x00FFFFFF;
|
||||
stream_ptr += 4;
|
||||
if (chunk_size != s->size)
|
||||
bytestream2_skip(&s->gb, 1);
|
||||
chunk_size = bytestream2_get_be24(&s->gb);
|
||||
if (chunk_size != buf_size)
|
||||
av_log(s->avctx, AV_LOG_INFO, "warning: MOV chunk size != encoded chunk size (%d != %d); using MOV chunk size\n",
|
||||
chunk_size, s->size);
|
||||
chunk_size, buf_size);
|
||||
|
||||
chunk_size = s->size;
|
||||
chunk_size = buf_size;
|
||||
total_blocks = ((s->avctx->width + 3) / 4) * ((s->avctx->height + 3) / 4);
|
||||
|
||||
/* traverse through the blocks */
|
||||
while (total_blocks) {
|
||||
/* sanity checks */
|
||||
/* make sure stream ptr hasn't gone out of bounds */
|
||||
if (stream_ptr > chunk_size) {
|
||||
av_log(s->avctx, AV_LOG_INFO, "SMC decoder just went out of bounds (stream ptr = %d, chunk size = %d)\n",
|
||||
stream_ptr, chunk_size);
|
||||
return;
|
||||
}
|
||||
/* make sure the row pointer hasn't gone wild */
|
||||
if (row_ptr >= image_size) {
|
||||
av_log(s->avctx, AV_LOG_INFO, "SMC decoder just went out of bounds (row ptr = %d, height = %d)\n",
|
||||
@@ -138,7 +132,7 @@ static void smc_decode_stream(SmcContext *s)
|
||||
return;
|
||||
}
|
||||
|
||||
opcode = s->buf[stream_ptr++];
|
||||
opcode = bytestream2_get_byte(&s->gb);
|
||||
switch (opcode & 0xF0) {
|
||||
/* skip n blocks */
|
||||
case 0x00:
|
||||
@@ -158,7 +152,7 @@ static void smc_decode_stream(SmcContext *s)
|
||||
if ((row_ptr == 0) && (pixel_ptr == 0)) {
|
||||
av_log(s->avctx, AV_LOG_INFO, "encountered repeat block opcode (%02X) but no blocks rendered yet\n",
|
||||
opcode & 0xF0);
|
||||
break;
|
||||
return;
|
||||
}
|
||||
|
||||
/* figure out where the previous block started */
|
||||
@@ -192,7 +186,7 @@ static void smc_decode_stream(SmcContext *s)
|
||||
if ((row_ptr == 0) && (pixel_ptr < 2 * 4)) {
|
||||
av_log(s->avctx, AV_LOG_INFO, "encountered repeat block opcode (%02X) but not enough blocks rendered yet\n",
|
||||
opcode & 0xF0);
|
||||
break;
|
||||
return;
|
||||
}
|
||||
|
||||
/* figure out where the previous 2 blocks started */
|
||||
@@ -233,7 +227,7 @@ static void smc_decode_stream(SmcContext *s)
|
||||
case 0x60:
|
||||
case 0x70:
|
||||
n_blocks = GET_BLOCK_COUNT();
|
||||
pixel = s->buf[stream_ptr++];
|
||||
pixel = bytestream2_get_byte(&s->gb);
|
||||
|
||||
while (n_blocks--) {
|
||||
block_ptr = row_ptr + pixel_ptr;
|
||||
@@ -257,7 +251,7 @@ static void smc_decode_stream(SmcContext *s)
|
||||
/* fetch the next 2 colors from bytestream and store in next
|
||||
* available entry in the color pair table */
|
||||
for (i = 0; i < CPAIR; i++) {
|
||||
pixel = s->buf[stream_ptr++];
|
||||
pixel = bytestream2_get_byte(&s->gb);
|
||||
color_table_index = CPAIR * color_pair_index + i;
|
||||
s->color_pairs[color_table_index] = pixel;
|
||||
}
|
||||
@@ -268,11 +262,10 @@ static void smc_decode_stream(SmcContext *s)
|
||||
if (color_pair_index == COLORS_PER_TABLE)
|
||||
color_pair_index = 0;
|
||||
} else
|
||||
color_table_index = CPAIR * s->buf[stream_ptr++];
|
||||
color_table_index = CPAIR * bytestream2_get_byte(&s->gb);
|
||||
|
||||
while (n_blocks--) {
|
||||
color_flags = AV_RB16(&s->buf[stream_ptr]);
|
||||
stream_ptr += 2;
|
||||
color_flags = bytestream2_get_be16(&s->gb);
|
||||
flag_mask = 0x8000;
|
||||
block_ptr = row_ptr + pixel_ptr;
|
||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
|
||||
@@ -300,7 +293,7 @@ static void smc_decode_stream(SmcContext *s)
|
||||
/* fetch the next 4 colors from bytestream and store in next
|
||||
* available entry in the color quad table */
|
||||
for (i = 0; i < CQUAD; i++) {
|
||||
pixel = s->buf[stream_ptr++];
|
||||
pixel = bytestream2_get_byte(&s->gb);
|
||||
color_table_index = CQUAD * color_quad_index + i;
|
||||
s->color_quads[color_table_index] = pixel;
|
||||
}
|
||||
@@ -311,11 +304,10 @@ static void smc_decode_stream(SmcContext *s)
|
||||
if (color_quad_index == COLORS_PER_TABLE)
|
||||
color_quad_index = 0;
|
||||
} else
|
||||
color_table_index = CQUAD * s->buf[stream_ptr++];
|
||||
color_table_index = CQUAD * bytestream2_get_byte(&s->gb);
|
||||
|
||||
while (n_blocks--) {
|
||||
color_flags = AV_RB32(&s->buf[stream_ptr]);
|
||||
stream_ptr += 4;
|
||||
color_flags = bytestream2_get_be32(&s->gb);
|
||||
/* flag mask actually acts as a bit shift count here */
|
||||
flag_mask = 30;
|
||||
block_ptr = row_ptr + pixel_ptr;
|
||||
@@ -342,7 +334,7 @@ static void smc_decode_stream(SmcContext *s)
|
||||
/* fetch the next 8 colors from bytestream and store in next
|
||||
* available entry in the color octet table */
|
||||
for (i = 0; i < COCTET; i++) {
|
||||
pixel = s->buf[stream_ptr++];
|
||||
pixel = bytestream2_get_byte(&s->gb);
|
||||
color_table_index = COCTET * color_octet_index + i;
|
||||
s->color_octets[color_table_index] = pixel;
|
||||
}
|
||||
@@ -353,7 +345,7 @@ static void smc_decode_stream(SmcContext *s)
|
||||
if (color_octet_index == COLORS_PER_TABLE)
|
||||
color_octet_index = 0;
|
||||
} else
|
||||
color_table_index = COCTET * s->buf[stream_ptr++];
|
||||
color_table_index = COCTET * bytestream2_get_byte(&s->gb);
|
||||
|
||||
while (n_blocks--) {
|
||||
/*
|
||||
@@ -363,15 +355,12 @@ static void smc_decode_stream(SmcContext *s)
|
||||
flags_a = xx012456, flags_b = xx89A37B
|
||||
*/
|
||||
/* build the color flags */
|
||||
color_flags_a =
|
||||
((AV_RB16(s->buf + stream_ptr ) & 0xFFF0) << 8) |
|
||||
(AV_RB16(s->buf + stream_ptr + 2) >> 4);
|
||||
color_flags_b =
|
||||
((AV_RB16(s->buf + stream_ptr + 4) & 0xFFF0) << 8) |
|
||||
((s->buf[stream_ptr + 1] & 0x0F) << 8) |
|
||||
((s->buf[stream_ptr + 3] & 0x0F) << 4) |
|
||||
(s->buf[stream_ptr + 5] & 0x0F);
|
||||
stream_ptr += 6;
|
||||
int val1 = bytestream2_get_be16(&s->gb);
|
||||
int val2 = bytestream2_get_be16(&s->gb);
|
||||
int val3 = bytestream2_get_be16(&s->gb);
|
||||
color_flags_a = ((val1 & 0xFFF0) << 8) | (val2 >> 4);
|
||||
color_flags_b = ((val3 & 0xFFF0) << 8) |
|
||||
((val1 & 0x0F) << 8) | ((val2 & 0x0F) << 4) | (val3 & 0x0F);
|
||||
|
||||
color_flags = color_flags_a;
|
||||
/* flag mask actually acts as a bit shift count here */
|
||||
@@ -403,7 +392,7 @@ static void smc_decode_stream(SmcContext *s)
|
||||
block_ptr = row_ptr + pixel_ptr;
|
||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
|
||||
for (pixel_x = 0; pixel_x < 4; pixel_x++) {
|
||||
pixels[block_ptr++] = s->buf[stream_ptr++];
|
||||
pixels[block_ptr++] = bytestream2_get_byte(&s->gb);
|
||||
}
|
||||
block_ptr += row_inc;
|
||||
}
|
||||
@@ -412,10 +401,12 @@ static void smc_decode_stream(SmcContext *s)
|
||||
break;
|
||||
|
||||
case 0xF0:
|
||||
av_log(s->avctx, AV_LOG_INFO, "0xF0 opcode seen in SMC chunk (contact the developers)\n");
|
||||
av_log_missing_feature(s->avctx, "0xF0 opcode", 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static av_cold int smc_decode_init(AVCodecContext *avctx)
|
||||
@@ -440,8 +431,7 @@ static int smc_decode_frame(AVCodecContext *avctx,
|
||||
SmcContext *s = avctx->priv_data;
|
||||
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
|
||||
|
||||
s->buf = buf;
|
||||
s->size = buf_size;
|
||||
bytestream2_init(&s->gb, buf, buf_size);
|
||||
|
||||
s->frame.reference = 3;
|
||||
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE |
|
||||
|
@@ -132,7 +132,7 @@ static inline void decode_subband_slice_buffered(SnowContext *s, SubBand *b, sli
|
||||
return;
|
||||
}
|
||||
|
||||
static void decode_q_branch(SnowContext *s, int level, int x, int y){
|
||||
static int decode_q_branch(SnowContext *s, int level, int x, int y){
|
||||
const int w= s->b_width << s->block_max_depth;
|
||||
const int rem_depth= s->block_max_depth - level;
|
||||
const int index= (x + y*w) << rem_depth;
|
||||
@@ -142,10 +142,11 @@ static void decode_q_branch(SnowContext *s, int level, int x, int y){
|
||||
const BlockNode *tl = y && x ? &s->block[index-w-1] : left;
|
||||
const BlockNode *tr = y && trx<w && ((x&1)==0 || level==0) ? &s->block[index-w+(1<<rem_depth)] : tl; //FIXME use lt
|
||||
int s_context= 2*left->level + 2*top->level + tl->level + tr->level;
|
||||
int res;
|
||||
|
||||
if(s->keyframe){
|
||||
set_blocks(s, level, x, y, null_block.color[0], null_block.color[1], null_block.color[2], null_block.mx, null_block.my, null_block.ref, BLOCK_INTRA);
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if(level==s->block_max_depth || get_rac(&s->c, &s->block_state[4 + s_context])){
|
||||
@@ -168,17 +169,23 @@ static void decode_q_branch(SnowContext *s, int level, int x, int y){
|
||||
}else{
|
||||
if(s->ref_frames > 1)
|
||||
ref= get_symbol(&s->c, &s->block_state[128 + 1024 + 32*ref_context], 0);
|
||||
if (ref >= s->ref_frames) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid ref\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
pred_mv(s, &mx, &my, ref, left, top, tr);
|
||||
mx+= get_symbol(&s->c, &s->block_state[128 + 32*(mx_context + 16*!!ref)], 1);
|
||||
my+= get_symbol(&s->c, &s->block_state[128 + 32*(my_context + 16*!!ref)], 1);
|
||||
}
|
||||
set_blocks(s, level, x, y, l, cb, cr, mx, my, ref, type);
|
||||
}else{
|
||||
decode_q_branch(s, level+1, 2*x+0, 2*y+0);
|
||||
decode_q_branch(s, level+1, 2*x+1, 2*y+0);
|
||||
decode_q_branch(s, level+1, 2*x+0, 2*y+1);
|
||||
decode_q_branch(s, level+1, 2*x+1, 2*y+1);
|
||||
if ((res = decode_q_branch(s, level+1, 2*x+0, 2*y+0)) < 0 ||
|
||||
(res = decode_q_branch(s, level+1, 2*x+1, 2*y+0)) < 0 ||
|
||||
(res = decode_q_branch(s, level+1, 2*x+0, 2*y+1)) < 0 ||
|
||||
(res = decode_q_branch(s, level+1, 2*x+1, 2*y+1)) < 0)
|
||||
return res;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dequantize_slice_buffered(SnowContext *s, slice_buffer * sb, SubBand *b, IDWTELEM *src, int stride, int start_y, int end_y){
|
||||
@@ -327,6 +334,11 @@ static int decode_header(SnowContext *s){
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (s->chroma_h_shift != 1 || s->chroma_v_shift != 1) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid chroma shift\n");
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
s->qlog += get_symbol(&s->c, s->header_state, 1);
|
||||
s->mv_scale += get_symbol(&s->c, s->header_state, 1);
|
||||
s->qbias += get_symbol(&s->c, s->header_state, 1);
|
||||
@@ -349,16 +361,19 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void decode_blocks(SnowContext *s){
|
||||
static int decode_blocks(SnowContext *s){
|
||||
int x, y;
|
||||
int w= s->b_width;
|
||||
int h= s->b_height;
|
||||
int res;
|
||||
|
||||
for(y=0; y<h; y++){
|
||||
for(x=0; x<w; x++){
|
||||
decode_q_branch(s, 0, x, y);
|
||||
if ((res = decode_q_branch(s, 0, x, y)) < 0)
|
||||
return res;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
|
||||
@@ -369,6 +384,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
|
||||
int bytes_read;
|
||||
AVFrame *picture = data;
|
||||
int level, orientation, plane_index;
|
||||
int res;
|
||||
|
||||
ff_init_range_decoder(c, buf, buf_size);
|
||||
ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
|
||||
@@ -397,7 +413,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
|
||||
if(avctx->debug&FF_DEBUG_PICT_INFO)
|
||||
av_log(avctx, AV_LOG_ERROR, "keyframe:%d qlog:%d\n", s->keyframe, s->qlog);
|
||||
|
||||
decode_blocks(s);
|
||||
if ((res = decode_blocks(s)) < 0)
|
||||
return res;
|
||||
|
||||
for(plane_index=0; plane_index<3; plane_index++){
|
||||
Plane *p= &s->plane[plane_index];
|
||||
|
@@ -44,6 +44,7 @@
|
||||
#define RIGHT_SIDE 2
|
||||
|
||||
typedef struct SonicContext {
|
||||
AVFrame frame;
|
||||
int lossless, decorrelation;
|
||||
|
||||
int num_taps, downsampling;
|
||||
@@ -757,6 +758,9 @@ static av_cold int sonic_decode_init(AVCodecContext *avctx)
|
||||
s->channels = avctx->channels;
|
||||
s->samplerate = avctx->sample_rate;
|
||||
|
||||
avcodec_get_frame_defaults(&s->frame);
|
||||
avctx->coded_frame = &s->frame;
|
||||
|
||||
if (!avctx->extradata)
|
||||
{
|
||||
av_log(avctx, AV_LOG_ERROR, "No mandatory headers present\n");
|
||||
@@ -848,18 +852,25 @@ static av_cold int sonic_decode_close(AVCodecContext *avctx)
|
||||
}
|
||||
|
||||
static int sonic_decode_frame(AVCodecContext *avctx,
|
||||
void *data, int *data_size,
|
||||
void *data, int *got_frame_ptr,
|
||||
AVPacket *avpkt)
|
||||
{
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
SonicContext *s = avctx->priv_data;
|
||||
GetBitContext gb;
|
||||
int i, quant, ch, j;
|
||||
short *samples = data;
|
||||
int i, quant, ch, j, ret;
|
||||
short *samples;
|
||||
|
||||
if (buf_size == 0) return 0;
|
||||
|
||||
s->frame.nb_samples = s->frame_size;
|
||||
if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return ret;
|
||||
}
|
||||
samples = s->frame.data[0];
|
||||
|
||||
// av_log(NULL, AV_LOG_INFO, "buf_size: %d\n", buf_size);
|
||||
|
||||
init_get_bits(&gb, buf, buf_size*8);
|
||||
@@ -930,7 +941,8 @@ static int sonic_decode_frame(AVCodecContext *avctx,
|
||||
|
||||
align_get_bits(&gb);
|
||||
|
||||
*data_size = s->frame_size * 2;
|
||||
*got_frame_ptr = 1;
|
||||
*(AVFrame*)data = s->frame;
|
||||
|
||||
return (get_bits_count(&gb)+7)/8;
|
||||
}
|
||||
@@ -943,6 +955,7 @@ AVCodec ff_sonic_decoder = {
|
||||
.init = sonic_decode_init,
|
||||
.close = sonic_decode_close,
|
||||
.decode = sonic_decode_frame,
|
||||
.capabilities = CODEC_CAP_DR1,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("Sonic"),
|
||||
};
|
||||
#endif /* CONFIG_SONIC_DECODER */
|
||||
|
@@ -110,7 +110,7 @@ static const char *srt_to_ass(AVCodecContext *avctx, char *out, char *out_end,
|
||||
for (j=sptr-2; j>=0; j--)
|
||||
if (stack[j].param[i][0]) {
|
||||
out += snprintf(out, out_end-out,
|
||||
stack[j].param[i]);
|
||||
"%s", stack[j].param[i]);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
@@ -146,7 +146,7 @@ static const char *srt_to_ass(AVCodecContext *avctx, char *out, char *out_end,
|
||||
for (i=0; i<PARAM_NUMBER; i++)
|
||||
if (stack[sptr].param[i][0])
|
||||
out += snprintf(out, out_end-out,
|
||||
stack[sptr].param[i]);
|
||||
"%s", stack[sptr].param[i]);
|
||||
}
|
||||
} else if (!buffer[1] && strspn(buffer, "bisu") == 1) {
|
||||
out += snprintf(out, out_end-out,
|
||||
|
@@ -175,7 +175,6 @@ void ff_svq3_add_idct_c(uint8_t *dst, DCTELEM *block, int stride, int qp,
|
||||
{
|
||||
const int qmul = svq3_dequant_coeff[qp];
|
||||
int i;
|
||||
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
|
||||
|
||||
if (dc) {
|
||||
dc = 13*13*((dc == 1) ? 1538*block[0] : ((qmul*(block[0] >> 3)) / 2));
|
||||
@@ -201,10 +200,10 @@ void ff_svq3_add_idct_c(uint8_t *dst, DCTELEM *block, int stride, int qp,
|
||||
const int z3 = 17* block[i + 4*1] + 7*block[i + 4*3];
|
||||
const int rr = (dc + 0x80000);
|
||||
|
||||
dst[i + stride*0] = cm[ dst[i + stride*0] + (((z0 + z3)*qmul + rr) >> 20) ];
|
||||
dst[i + stride*1] = cm[ dst[i + stride*1] + (((z1 + z2)*qmul + rr) >> 20) ];
|
||||
dst[i + stride*2] = cm[ dst[i + stride*2] + (((z1 - z2)*qmul + rr) >> 20) ];
|
||||
dst[i + stride*3] = cm[ dst[i + stride*3] + (((z0 - z3)*qmul + rr) >> 20) ];
|
||||
dst[i + stride*0] = av_clip_uint8( dst[i + stride*0] + (((z0 + z3)*qmul + rr) >> 20) );
|
||||
dst[i + stride*1] = av_clip_uint8( dst[i + stride*1] + (((z1 + z2)*qmul + rr) >> 20) );
|
||||
dst[i + stride*2] = av_clip_uint8( dst[i + stride*2] + (((z1 - z2)*qmul + rr) >> 20) );
|
||||
dst[i + stride*3] = av_clip_uint8( dst[i + stride*3] + (((z0 - z3)*qmul + rr) >> 20) );
|
||||
}
|
||||
}
|
||||
|
||||
@@ -614,7 +613,7 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type)
|
||||
dir = i_mb_type_info[mb_type - 8].pred_mode;
|
||||
dir = (dir >> 1) ^ 3*(dir & 1) ^ 1;
|
||||
|
||||
if ((h->intra16x16_pred_mode = ff_h264_check_intra16x16_pred_mode(h, dir)) == -1){
|
||||
if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir, 0)) == -1){
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "check_intra_pred_mode = -1\n");
|
||||
return -1;
|
||||
}
|
||||
@@ -653,7 +652,7 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type)
|
||||
if (IS_INTRA16x16(mb_type) || (s->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
|
||||
s->qscale += svq3_get_se_golomb(&s->gb);
|
||||
|
||||
if (s->qscale > 31U){
|
||||
if (s->qscale > 31u){
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "qscale:%d\n", s->qscale);
|
||||
return -1;
|
||||
}
|
||||
@@ -713,7 +712,7 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type)
|
||||
s->current_picture.f.mb_type[mb_xy] = mb_type;
|
||||
|
||||
if (IS_INTRA(mb_type)) {
|
||||
h->chroma_pred_mode = ff_h264_check_intra_chroma_pred_mode(h, DC_PRED8x8);
|
||||
h->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, DC_PRED8x8, 1);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -813,7 +812,9 @@ static av_cold int svq3_decode_init(AVCodecContext *avctx)
|
||||
MpegEncContext *s = &h->s;
|
||||
int m;
|
||||
unsigned char *extradata;
|
||||
unsigned char *extradata_end;
|
||||
unsigned int size;
|
||||
int marker_found = 0;
|
||||
|
||||
if (ff_h264_decode_init(avctx) < 0)
|
||||
return -1;
|
||||
@@ -834,19 +835,26 @@ static av_cold int svq3_decode_init(AVCodecContext *avctx)
|
||||
|
||||
/* prowl for the "SEQH" marker in the extradata */
|
||||
extradata = (unsigned char *)avctx->extradata;
|
||||
for (m = 0; m < avctx->extradata_size; m++) {
|
||||
if (!memcmp(extradata, "SEQH", 4))
|
||||
extradata_end = avctx->extradata + avctx->extradata_size;
|
||||
if (extradata) {
|
||||
for (m = 0; m + 8 < avctx->extradata_size; m++) {
|
||||
if (!memcmp(extradata, "SEQH", 4)) {
|
||||
marker_found = 1;
|
||||
break;
|
||||
}
|
||||
extradata++;
|
||||
}
|
||||
}
|
||||
|
||||
/* if a match was found, parse the extra data */
|
||||
if (extradata && !memcmp(extradata, "SEQH", 4)) {
|
||||
if (marker_found) {
|
||||
|
||||
GetBitContext gb;
|
||||
int frame_size_code;
|
||||
|
||||
size = AV_RB32(&extradata[4]);
|
||||
if (size > extradata_end - extradata - 8)
|
||||
return AVERROR_INVALIDDATA;
|
||||
init_get_bits(&gb, extradata + 8, size*8);
|
||||
|
||||
/* 'frame size code' and optional 'width, height' */
|
||||
|
@@ -58,24 +58,24 @@ typedef struct TiffContext {
|
||||
LZWState *lzw;
|
||||
} TiffContext;
|
||||
|
||||
static int tget_short(const uint8_t **p, int le){
|
||||
int v = le ? AV_RL16(*p) : AV_RB16(*p);
|
||||
static unsigned tget_short(const uint8_t **p, int le) {
|
||||
unsigned v = le ? AV_RL16(*p) : AV_RB16(*p);
|
||||
*p += 2;
|
||||
return v;
|
||||
}
|
||||
|
||||
static int tget_long(const uint8_t **p, int le){
|
||||
int v = le ? AV_RL32(*p) : AV_RB32(*p);
|
||||
static unsigned tget_long(const uint8_t **p, int le) {
|
||||
unsigned v = le ? AV_RL32(*p) : AV_RB32(*p);
|
||||
*p += 4;
|
||||
return v;
|
||||
}
|
||||
|
||||
static int tget(const uint8_t **p, int type, int le){
|
||||
static unsigned tget(const uint8_t **p, int type, int le) {
|
||||
switch(type){
|
||||
case TIFF_BYTE : return *(*p)++;
|
||||
case TIFF_SHORT: return tget_short(p, le);
|
||||
case TIFF_LONG : return tget_long (p, le);
|
||||
default : return -1;
|
||||
default : return UINT_MAX;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -340,7 +340,7 @@ static int init_image(TiffContext *s)
|
||||
|
||||
static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *buf, const uint8_t *end_buf)
|
||||
{
|
||||
int tag, type, count, off, value = 0;
|
||||
unsigned tag, type, count, off, value = 0;
|
||||
int i, j;
|
||||
uint32_t *pal;
|
||||
const uint8_t *rp, *gp, *bp;
|
||||
@@ -352,6 +352,11 @@ static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *
|
||||
count = tget_long(&buf, s->le);
|
||||
off = tget_long(&buf, s->le);
|
||||
|
||||
if (type == 0 || type >= FF_ARRAY_ELEMS(type_sizes)) {
|
||||
av_log(s->avctx, AV_LOG_DEBUG, "Unknown tiff type (%u) encountered\n", type);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if(count == 1){
|
||||
switch(type){
|
||||
case TIFF_BYTE:
|
||||
@@ -370,14 +375,16 @@ static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *
|
||||
break;
|
||||
}
|
||||
default:
|
||||
value = -1;
|
||||
value = UINT_MAX;
|
||||
buf = start + off;
|
||||
}
|
||||
}else if(type_sizes[type] * count <= 4){
|
||||
} else {
|
||||
if (count <= 4 && type_sizes[type] * count <= 4) {
|
||||
buf -= 4;
|
||||
}else{
|
||||
} else {
|
||||
buf = start + off;
|
||||
}
|
||||
}
|
||||
|
||||
if(buf && (buf < start || buf > end_buf)){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Tag referencing position outside the image\n");
|
||||
@@ -454,7 +461,7 @@ static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *
|
||||
}
|
||||
break;
|
||||
case TIFF_ROWSPERSTRIP:
|
||||
if(type == TIFF_LONG && value == -1)
|
||||
if (type == TIFF_LONG && value == UINT_MAX)
|
||||
value = s->avctx->height;
|
||||
if(value < 1){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Incorrect value of rows per strip\n");
|
||||
@@ -599,6 +606,8 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
av_log(avctx, AV_LOG_ERROR, "The answer to life, universe and everything is not correct!\n");
|
||||
return -1;
|
||||
}
|
||||
// Reset these pointers so we can tell if they were set this frame
|
||||
s->stripsizes = s->stripdata = NULL;
|
||||
/* parse image file directory */
|
||||
off = tget_long(&buf, le);
|
||||
if (off >= UINT_MAX - 14 || end_buf - orig_buf < off + 14) {
|
||||
|
@@ -135,7 +135,7 @@ static int tm2_build_huff_table(TM2Context *ctx, TM2Codes *code)
|
||||
huff.val_bits, huff.max_bits);
|
||||
return -1;
|
||||
}
|
||||
if((huff.nodes < 0) || (huff.nodes > 0x10000)) {
|
||||
if((huff.nodes <= 0) || (huff.nodes > 0x10000)) {
|
||||
av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect number of Huffman tree nodes: %i\n", huff.nodes);
|
||||
return -1;
|
||||
}
|
||||
|
@@ -236,6 +236,9 @@ static av_cold int tta_decode_init(AVCodecContext * avctx)
|
||||
if (s->channels == 0) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid number of channels\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
} else if (avctx->sample_rate == 0) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid samplerate\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
switch(s->bps) {
|
||||
|
@@ -703,6 +703,21 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVD
|
||||
int ret = 0;
|
||||
AVDictionary *tmp = NULL;
|
||||
|
||||
if (avcodec_is_open(avctx))
|
||||
return 0;
|
||||
|
||||
if ((!codec && !avctx->codec)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "No codec provided to avcodec_open2().\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
if ((codec && avctx->codec && codec != avctx->codec)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "This AVCodecContext was allocated for %s, "
|
||||
"but %s passed to avcodec_open2().\n", avctx->codec->name, codec->name);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
if (!codec)
|
||||
codec = avctx->codec;
|
||||
|
||||
if (avctx->extradata_size < 0 || avctx->extradata_size >= FF_MAX_EXTRADATA_SIZE)
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
@@ -722,11 +737,6 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVD
|
||||
goto end;
|
||||
}
|
||||
|
||||
if(avctx->codec || !codec) {
|
||||
ret = AVERROR(EINVAL);
|
||||
goto end;
|
||||
}
|
||||
|
||||
avctx->internal = av_mallocz(sizeof(AVCodecInternal));
|
||||
if (!avctx->internal) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
@@ -816,6 +826,12 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVD
|
||||
avctx->error_recognition, avctx->err_recognition);
|
||||
#endif
|
||||
|
||||
if (avctx->codec_type == AVMEDIA_TYPE_AUDIO &&
|
||||
(!avctx->time_base.num || !avctx->time_base.den)) {
|
||||
avctx->time_base.num = 1;
|
||||
avctx->time_base.den = avctx->sample_rate;
|
||||
}
|
||||
|
||||
if (!HAVE_THREADS)
|
||||
av_log(avctx, AV_LOG_WARNING, "Warning: not compiled with thread support, using thread emulation\n");
|
||||
|
||||
@@ -1421,6 +1437,7 @@ av_cold int avcodec_close(AVCodecContext *avctx)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (avcodec_is_open(avctx)) {
|
||||
if (HAVE_THREADS && avctx->thread_opaque)
|
||||
ff_thread_free(avctx);
|
||||
if (avctx->codec && avctx->codec->close)
|
||||
@@ -1428,7 +1445,9 @@ av_cold int avcodec_close(AVCodecContext *avctx)
|
||||
avcodec_default_free_buffers(avctx);
|
||||
avctx->coded_frame = NULL;
|
||||
av_freep(&avctx->internal);
|
||||
if (avctx->codec && avctx->codec->priv_class)
|
||||
}
|
||||
|
||||
if (avctx->priv_data && avctx->codec && avctx->codec->priv_class)
|
||||
av_opt_free(avctx->priv_data);
|
||||
av_opt_free(avctx);
|
||||
av_freep(&avctx->priv_data);
|
||||
@@ -2009,3 +2028,8 @@ enum AVMediaType avcodec_get_type(enum CodecID codec_id)
|
||||
|
||||
return AVMEDIA_TYPE_UNKNOWN;
|
||||
}
|
||||
|
||||
int avcodec_is_open(AVCodecContext *s)
|
||||
{
|
||||
return !!s->internal;
|
||||
}
|
||||
|
@@ -447,5 +447,6 @@ int vc1_decode_entry_point(AVCodecContext *avctx, VC1Context *v, GetBitContext *
|
||||
|
||||
int vc1_parse_frame_header (VC1Context *v, GetBitContext *gb);
|
||||
int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext *gb);
|
||||
int ff_vc1_init_common(VC1Context *v);
|
||||
|
||||
#endif /* AVCODEC_VC1_H */
|
||||
|
@@ -188,7 +188,7 @@ static int vc1_parse_init(AVCodecParserContext *s)
|
||||
{
|
||||
VC1ParseContext *vpc = s->priv_data;
|
||||
vpc->v.s.slice_context_count = 1;
|
||||
return 0;
|
||||
return ff_vc1_init_common(&vpc->v);
|
||||
}
|
||||
|
||||
AVCodecParser ff_vc1_parser = {
|
||||
|
@@ -67,7 +67,7 @@ static const int offset_table2[9] = { 0, 1, 3, 7, 15, 31, 63, 127, 255 };
|
||||
* @param v The VC1Context to initialize
|
||||
* @return Status
|
||||
*/
|
||||
static int vc1_init_common(VC1Context *v)
|
||||
int ff_vc1_init_common(VC1Context *v)
|
||||
{
|
||||
static int done = 0;
|
||||
int i = 0;
|
||||
@@ -478,7 +478,10 @@ static void vc1_mc_1mv(VC1Context *v, int dir)
|
||||
int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
|
||||
int off, off_uv;
|
||||
int v_edge_pos = s->v_edge_pos >> v->field_mode;
|
||||
if (!v->field_mode && !v->s.last_picture.f.data[0])
|
||||
|
||||
if ((!v->field_mode ||
|
||||
(v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
|
||||
!v->s.last_picture.f.data[0])
|
||||
return;
|
||||
|
||||
mx = s->mv[dir][0][0];
|
||||
@@ -690,7 +693,9 @@ static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir)
|
||||
int fieldmv = (v->fcm == ILACE_FRAME) ? v->blk_mv_type[s->block_index[n]] : 0;
|
||||
int v_edge_pos = s->v_edge_pos >> v->field_mode;
|
||||
|
||||
if (!v->field_mode && !v->s.last_picture.f.data[0])
|
||||
if ((!v->field_mode ||
|
||||
(v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
|
||||
!v->s.last_picture.f.data[0])
|
||||
return;
|
||||
|
||||
mx = s->mv[dir][n][0];
|
||||
@@ -946,6 +951,8 @@ static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
|
||||
if (dominant)
|
||||
chroma_ref_type = !v->cur_field_type;
|
||||
}
|
||||
if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f.data[0])
|
||||
return;
|
||||
s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
|
||||
s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
|
||||
uvmx = (tx + ((tx & 3) == 3)) >> 1;
|
||||
@@ -5266,7 +5273,7 @@ static av_cold int vc1_decode_init(AVCodecContext *avctx)
|
||||
avctx->idct_algo = FF_IDCT_WMV2;
|
||||
}
|
||||
|
||||
if (vc1_init_common(v) < 0)
|
||||
if (ff_vc1_init_common(v) < 0)
|
||||
return -1;
|
||||
ff_vc1dsp_init(&v->vc1dsp);
|
||||
|
||||
@@ -5711,7 +5718,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
|
||||
if (!v->field_mode || v->second_field)
|
||||
s->end_mb_y = (i == n_slices ) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
|
||||
else
|
||||
s->end_mb_y = (i == n_slices1 + 1) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
|
||||
s->end_mb_y = (i <= n_slices1 + 1) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
|
||||
vc1_decode_blocks(v);
|
||||
if (i != n_slices)
|
||||
s->gb = slices[i].gb;
|
||||
|
@@ -139,8 +139,6 @@ static void vc1_h_s_overlap_c(DCTELEM *left, DCTELEM *right)
|
||||
* @see 8.6
|
||||
*/
|
||||
static av_always_inline int vc1_filter_line(uint8_t* src, int stride, int pq){
|
||||
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
|
||||
|
||||
int a0 = (2*(src[-2*stride] - src[ 1*stride]) - 5*(src[-1*stride] - src[ 0*stride]) + 4) >> 3;
|
||||
int a0_sign = a0 >> 31; /* Store sign */
|
||||
a0 = (a0 ^ a0_sign) - a0_sign; /* a0 = FFABS(a0); */
|
||||
@@ -163,8 +161,8 @@ static av_always_inline int vc1_filter_line(uint8_t* src, int stride, int pq){
|
||||
else{
|
||||
d = FFMIN(d, clip);
|
||||
d = (d ^ d_sign) - d_sign; /* Restore sign */
|
||||
src[-1*stride] = cm[src[-1*stride] - d];
|
||||
src[ 0*stride] = cm[src[ 0*stride] + d];
|
||||
src[-1*stride] = av_clip_uint8(src[-1*stride] - d);
|
||||
src[ 0*stride] = av_clip_uint8(src[ 0*stride] + d);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
@@ -234,19 +232,17 @@ static void vc1_inv_trans_8x8_dc_c(uint8_t *dest, int linesize, DCTELEM *block)
|
||||
{
|
||||
int i;
|
||||
int dc = block[0];
|
||||
const uint8_t *cm;
|
||||
dc = (3 * dc + 1) >> 1;
|
||||
dc = (3 * dc + 16) >> 5;
|
||||
cm = ff_cropTbl + MAX_NEG_CROP + dc;
|
||||
for(i = 0; i < 8; i++){
|
||||
dest[0] = cm[dest[0]];
|
||||
dest[1] = cm[dest[1]];
|
||||
dest[2] = cm[dest[2]];
|
||||
dest[3] = cm[dest[3]];
|
||||
dest[4] = cm[dest[4]];
|
||||
dest[5] = cm[dest[5]];
|
||||
dest[6] = cm[dest[6]];
|
||||
dest[7] = cm[dest[7]];
|
||||
dest[0] = av_clip_uint8(dest[0] + dc);
|
||||
dest[1] = av_clip_uint8(dest[1] + dc);
|
||||
dest[2] = av_clip_uint8(dest[2] + dc);
|
||||
dest[3] = av_clip_uint8(dest[3] + dc);
|
||||
dest[4] = av_clip_uint8(dest[4] + dc);
|
||||
dest[5] = av_clip_uint8(dest[5] + dc);
|
||||
dest[6] = av_clip_uint8(dest[6] + dc);
|
||||
dest[7] = av_clip_uint8(dest[7] + dc);
|
||||
dest += linesize;
|
||||
}
|
||||
}
|
||||
@@ -326,19 +322,17 @@ static void vc1_inv_trans_8x4_dc_c(uint8_t *dest, int linesize, DCTELEM *block)
|
||||
{
|
||||
int i;
|
||||
int dc = block[0];
|
||||
const uint8_t *cm;
|
||||
dc = ( 3 * dc + 1) >> 1;
|
||||
dc = (17 * dc + 64) >> 7;
|
||||
cm = ff_cropTbl + MAX_NEG_CROP + dc;
|
||||
for(i = 0; i < 4; i++){
|
||||
dest[0] = cm[dest[0]];
|
||||
dest[1] = cm[dest[1]];
|
||||
dest[2] = cm[dest[2]];
|
||||
dest[3] = cm[dest[3]];
|
||||
dest[4] = cm[dest[4]];
|
||||
dest[5] = cm[dest[5]];
|
||||
dest[6] = cm[dest[6]];
|
||||
dest[7] = cm[dest[7]];
|
||||
dest[0] = av_clip_uint8(dest[0] + dc);
|
||||
dest[1] = av_clip_uint8(dest[1] + dc);
|
||||
dest[2] = av_clip_uint8(dest[2] + dc);
|
||||
dest[3] = av_clip_uint8(dest[3] + dc);
|
||||
dest[4] = av_clip_uint8(dest[4] + dc);
|
||||
dest[5] = av_clip_uint8(dest[5] + dc);
|
||||
dest[6] = av_clip_uint8(dest[6] + dc);
|
||||
dest[7] = av_clip_uint8(dest[7] + dc);
|
||||
dest += linesize;
|
||||
}
|
||||
}
|
||||
@@ -348,7 +342,6 @@ static void vc1_inv_trans_8x4_c(uint8_t *dest, int linesize, DCTELEM *block)
|
||||
int i;
|
||||
register int t1,t2,t3,t4,t5,t6,t7,t8;
|
||||
DCTELEM *src, *dst;
|
||||
const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
|
||||
|
||||
src = block;
|
||||
dst = block;
|
||||
@@ -388,10 +381,10 @@ static void vc1_inv_trans_8x4_c(uint8_t *dest, int linesize, DCTELEM *block)
|
||||
t3 = 22 * src[ 8] + 10 * src[24];
|
||||
t4 = 22 * src[24] - 10 * src[ 8];
|
||||
|
||||
dest[0*linesize] = cm[dest[0*linesize] + ((t1 + t3) >> 7)];
|
||||
dest[1*linesize] = cm[dest[1*linesize] + ((t2 - t4) >> 7)];
|
||||
dest[2*linesize] = cm[dest[2*linesize] + ((t2 + t4) >> 7)];
|
||||
dest[3*linesize] = cm[dest[3*linesize] + ((t1 - t3) >> 7)];
|
||||
dest[0*linesize] = av_clip_uint8(dest[0*linesize] + ((t1 + t3) >> 7));
|
||||
dest[1*linesize] = av_clip_uint8(dest[1*linesize] + ((t2 - t4) >> 7));
|
||||
dest[2*linesize] = av_clip_uint8(dest[2*linesize] + ((t2 + t4) >> 7));
|
||||
dest[3*linesize] = av_clip_uint8(dest[3*linesize] + ((t1 - t3) >> 7));
|
||||
|
||||
src ++;
|
||||
dest++;
|
||||
@@ -404,15 +397,13 @@ static void vc1_inv_trans_4x8_dc_c(uint8_t *dest, int linesize, DCTELEM *block)
|
||||
{
|
||||
int i;
|
||||
int dc = block[0];
|
||||
const uint8_t *cm;
|
||||
dc = (17 * dc + 4) >> 3;
|
||||
dc = (12 * dc + 64) >> 7;
|
||||
cm = ff_cropTbl + MAX_NEG_CROP + dc;
|
||||
for(i = 0; i < 8; i++){
|
||||
dest[0] = cm[dest[0]];
|
||||
dest[1] = cm[dest[1]];
|
||||
dest[2] = cm[dest[2]];
|
||||
dest[3] = cm[dest[3]];
|
||||
dest[0] = av_clip_uint8(dest[0] + dc);
|
||||
dest[1] = av_clip_uint8(dest[1] + dc);
|
||||
dest[2] = av_clip_uint8(dest[2] + dc);
|
||||
dest[3] = av_clip_uint8(dest[3] + dc);
|
||||
dest += linesize;
|
||||
}
|
||||
}
|
||||
@@ -422,7 +413,6 @@ static void vc1_inv_trans_4x8_c(uint8_t *dest, int linesize, DCTELEM *block)
|
||||
int i;
|
||||
register int t1,t2,t3,t4,t5,t6,t7,t8;
|
||||
DCTELEM *src, *dst;
|
||||
const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
|
||||
|
||||
src = block;
|
||||
dst = block;
|
||||
@@ -458,14 +448,14 @@ static void vc1_inv_trans_4x8_c(uint8_t *dest, int linesize, DCTELEM *block)
|
||||
t3 = 9 * src[ 8] - 16 * src[24] + 4 * src[40] + 15 * src[56];
|
||||
t4 = 4 * src[ 8] - 9 * src[24] + 15 * src[40] - 16 * src[56];
|
||||
|
||||
dest[0*linesize] = cm[dest[0*linesize] + ((t5 + t1) >> 7)];
|
||||
dest[1*linesize] = cm[dest[1*linesize] + ((t6 + t2) >> 7)];
|
||||
dest[2*linesize] = cm[dest[2*linesize] + ((t7 + t3) >> 7)];
|
||||
dest[3*linesize] = cm[dest[3*linesize] + ((t8 + t4) >> 7)];
|
||||
dest[4*linesize] = cm[dest[4*linesize] + ((t8 - t4 + 1) >> 7)];
|
||||
dest[5*linesize] = cm[dest[5*linesize] + ((t7 - t3 + 1) >> 7)];
|
||||
dest[6*linesize] = cm[dest[6*linesize] + ((t6 - t2 + 1) >> 7)];
|
||||
dest[7*linesize] = cm[dest[7*linesize] + ((t5 - t1 + 1) >> 7)];
|
||||
dest[0*linesize] = av_clip_uint8(dest[0*linesize] + ((t5 + t1) >> 7));
|
||||
dest[1*linesize] = av_clip_uint8(dest[1*linesize] + ((t6 + t2) >> 7));
|
||||
dest[2*linesize] = av_clip_uint8(dest[2*linesize] + ((t7 + t3) >> 7));
|
||||
dest[3*linesize] = av_clip_uint8(dest[3*linesize] + ((t8 + t4) >> 7));
|
||||
dest[4*linesize] = av_clip_uint8(dest[4*linesize] + ((t8 - t4 + 1) >> 7));
|
||||
dest[5*linesize] = av_clip_uint8(dest[5*linesize] + ((t7 - t3 + 1) >> 7));
|
||||
dest[6*linesize] = av_clip_uint8(dest[6*linesize] + ((t6 - t2 + 1) >> 7));
|
||||
dest[7*linesize] = av_clip_uint8(dest[7*linesize] + ((t5 - t1 + 1) >> 7));
|
||||
|
||||
src ++;
|
||||
dest++;
|
||||
@@ -478,15 +468,13 @@ static void vc1_inv_trans_4x4_dc_c(uint8_t *dest, int linesize, DCTELEM *block)
|
||||
{
|
||||
int i;
|
||||
int dc = block[0];
|
||||
const uint8_t *cm;
|
||||
dc = (17 * dc + 4) >> 3;
|
||||
dc = (17 * dc + 64) >> 7;
|
||||
cm = ff_cropTbl + MAX_NEG_CROP + dc;
|
||||
for(i = 0; i < 4; i++){
|
||||
dest[0] = cm[dest[0]];
|
||||
dest[1] = cm[dest[1]];
|
||||
dest[2] = cm[dest[2]];
|
||||
dest[3] = cm[dest[3]];
|
||||
dest[0] = av_clip_uint8(dest[0] + dc);
|
||||
dest[1] = av_clip_uint8(dest[1] + dc);
|
||||
dest[2] = av_clip_uint8(dest[2] + dc);
|
||||
dest[3] = av_clip_uint8(dest[3] + dc);
|
||||
dest += linesize;
|
||||
}
|
||||
}
|
||||
@@ -496,7 +484,6 @@ static void vc1_inv_trans_4x4_c(uint8_t *dest, int linesize, DCTELEM *block)
|
||||
int i;
|
||||
register int t1,t2,t3,t4;
|
||||
DCTELEM *src, *dst;
|
||||
const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
|
||||
|
||||
src = block;
|
||||
dst = block;
|
||||
@@ -522,10 +509,10 @@ static void vc1_inv_trans_4x4_c(uint8_t *dest, int linesize, DCTELEM *block)
|
||||
t3 = 22 * src[ 8] + 10 * src[24];
|
||||
t4 = 22 * src[24] - 10 * src[ 8];
|
||||
|
||||
dest[0*linesize] = cm[dest[0*linesize] + ((t1 + t3) >> 7)];
|
||||
dest[1*linesize] = cm[dest[1*linesize] + ((t2 - t4) >> 7)];
|
||||
dest[2*linesize] = cm[dest[2*linesize] + ((t2 + t4) >> 7)];
|
||||
dest[3*linesize] = cm[dest[3*linesize] + ((t1 - t3) >> 7)];
|
||||
dest[0*linesize] = av_clip_uint8(dest[0*linesize] + ((t1 + t3) >> 7));
|
||||
dest[1*linesize] = av_clip_uint8(dest[1*linesize] + ((t2 - t4) >> 7));
|
||||
dest[2*linesize] = av_clip_uint8(dest[2*linesize] + ((t2 + t4) >> 7));
|
||||
dest[3*linesize] = av_clip_uint8(dest[3*linesize] + ((t1 - t3) >> 7));
|
||||
|
||||
src ++;
|
||||
dest++;
|
||||
|
@@ -21,7 +21,7 @@
|
||||
#define AVCODEC_VERSION_H
|
||||
|
||||
#define LIBAVCODEC_VERSION_MAJOR 53
|
||||
#define LIBAVCODEC_VERSION_MINOR 60
|
||||
#define LIBAVCODEC_VERSION_MINOR 61
|
||||
#define LIBAVCODEC_VERSION_MICRO 100
|
||||
|
||||
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
|
||||
|
@@ -484,6 +484,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
break;
|
||||
default:
|
||||
av_log(avctx, AV_LOG_ERROR, "Unsupported bitdepth %i\n", c->bpp);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user