Merge remote-tracking branch 'qatar/release/9' into release/1.1

* qatar/release/9: (21 commits)
  ogg: Fix potential infinite discard loop
  dxa: Make sure the reference frame exists
  h261: check the mtype index
  segafilm: Error out on impossible packet size
  ogg: Always alloc the private context in vorbis_header
  rtjpeg: Use init_get_bits8
  nuv: Reset the frame on resize
  nuv: Use av_fast_realloc
  nuv: return meaningful error codes.
  nuv: Pad the lzo outbuf
  nuv: Do not ignore lzo decompression failures
  rtmp: Do not misuse memcmp
  rtmp: rename data_size to size
  vc1: check mb_height validity.
  vc1: check the source buffer in vc1_mc functions
  bink: Bound check the quantization matrix.
  aac: Check init_get_bits return value
  aac: return meaningful errors
  aac: K&R formatting cosmetics
  oma: correctly mark and decrypt partial packets
  ...

Conflicts:
	libavcodec/aacdec.c
	libavcodec/h261dec.c
	libavcodec/nuv.c
	libavcodec/vc1dec.c
	libavformat/oggparsevorbis.c
	libavformat/omadec.c
	libavformat/rtmpproto.c
	tests/ref/fate/nuv-rtjpeg

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer
2013-08-27 19:08:02 +02:00
14 changed files with 425 additions and 245 deletions

View File

@@ -219,21 +219,32 @@ static int assign_pair(struct elem_to_channel e2c_vec[MAX_ELEM_ID],
{ {
if (layout_map[offset][0] == TYPE_CPE) { if (layout_map[offset][0] == TYPE_CPE) {
e2c_vec[offset] = (struct elem_to_channel) { e2c_vec[offset] = (struct elem_to_channel) {
.av_position = left | right, .syn_ele = TYPE_CPE, .av_position = left | right,
.elem_id = layout_map[offset ][1], .aac_position = pos }; .syn_ele = TYPE_CPE,
.elem_id = layout_map[offset][1],
.aac_position = pos
};
return 1; return 1;
} else { } else {
e2c_vec[offset] = (struct elem_to_channel) { e2c_vec[offset] = (struct elem_to_channel) {
.av_position = left, .syn_ele = TYPE_SCE, .av_position = left,
.elem_id = layout_map[offset ][1], .aac_position = pos }; .syn_ele = TYPE_SCE,
.elem_id = layout_map[offset][1],
.aac_position = pos
};
e2c_vec[offset + 1] = (struct elem_to_channel) { e2c_vec[offset + 1] = (struct elem_to_channel) {
.av_position = right, .syn_ele = TYPE_SCE, .av_position = right,
.elem_id = layout_map[offset + 1][1], .aac_position = pos }; .syn_ele = TYPE_SCE,
.elem_id = layout_map[offset + 1][1],
.aac_position = pos
};
return 2; return 2;
} }
} }
static int count_paired_channels(uint8_t (*layout_map)[3], int tags, int pos, int *current) { static int count_paired_channels(uint8_t (*layout_map)[3], int tags, int pos,
int *current)
{
int num_pos_channels = 0; int num_pos_channels = 0;
int first_cpe = 0; int first_cpe = 0;
int sce_parity = 0; int sce_parity = 0;
@@ -290,8 +301,11 @@ static uint64_t sniff_channel_order(uint8_t (*layout_map)[3], int tags)
i = 0; i = 0;
if (num_front_channels & 1) { if (num_front_channels & 1) {
e2c_vec[i] = (struct elem_to_channel) { e2c_vec[i] = (struct elem_to_channel) {
.av_position = AV_CH_FRONT_CENTER, .syn_ele = TYPE_SCE, .av_position = AV_CH_FRONT_CENTER,
.elem_id = layout_map[i][1], .aac_position = AAC_CHANNEL_FRONT }; .syn_ele = TYPE_SCE,
.elem_id = layout_map[i][1],
.aac_position = AAC_CHANNEL_FRONT
};
i++; i++;
num_front_channels--; num_front_channels--;
} }
@@ -348,22 +362,31 @@ static uint64_t sniff_channel_order(uint8_t (*layout_map)[3], int tags)
} }
if (num_back_channels) { if (num_back_channels) {
e2c_vec[i] = (struct elem_to_channel) { e2c_vec[i] = (struct elem_to_channel) {
.av_position = AV_CH_BACK_CENTER, .syn_ele = TYPE_SCE, .av_position = AV_CH_BACK_CENTER,
.elem_id = layout_map[i][1], .aac_position = AAC_CHANNEL_BACK }; .syn_ele = TYPE_SCE,
.elem_id = layout_map[i][1],
.aac_position = AAC_CHANNEL_BACK
};
i++; i++;
num_back_channels--; num_back_channels--;
} }
if (i < tags && layout_map[i][2] == AAC_CHANNEL_LFE) { if (i < tags && layout_map[i][2] == AAC_CHANNEL_LFE) {
e2c_vec[i] = (struct elem_to_channel) { e2c_vec[i] = (struct elem_to_channel) {
.av_position = AV_CH_LOW_FREQUENCY, .syn_ele = TYPE_LFE, .av_position = AV_CH_LOW_FREQUENCY,
.elem_id = layout_map[i][1], .aac_position = AAC_CHANNEL_LFE }; .syn_ele = TYPE_LFE,
.elem_id = layout_map[i][1],
.aac_position = AAC_CHANNEL_LFE
};
i++; i++;
} }
while (i < tags && layout_map[i][2] == AAC_CHANNEL_LFE) { while (i < tags && layout_map[i][2] == AAC_CHANNEL_LFE) {
e2c_vec[i] = (struct elem_to_channel) { e2c_vec[i] = (struct elem_to_channel) {
.av_position = UINT64_MAX, .syn_ele = TYPE_LFE, .av_position = UINT64_MAX,
.elem_id = layout_map[i][1], .aac_position = AAC_CHANNEL_LFE }; .syn_ele = TYPE_LFE,
.elem_id = layout_map[i][1],
.aac_position = AAC_CHANNEL_LFE
};
i++; i++;
} }
@@ -371,12 +394,11 @@ static uint64_t sniff_channel_order(uint8_t (*layout_map)[3], int tags)
total_non_cc_elements = n = i; total_non_cc_elements = n = i;
do { do {
int next_n = 0; int next_n = 0;
for (i = 1; i < n; i++) { for (i = 1; i < n; i++)
if (e2c_vec[i - 1].av_position > e2c_vec[i].av_position) { if (e2c_vec[i - 1].av_position > e2c_vec[i].av_position) {
FFSWAP(struct elem_to_channel, e2c_vec[i - 1], e2c_vec[i]); FFSWAP(struct elem_to_channel, e2c_vec[i - 1], e2c_vec[i]);
next_n = i; next_n = i;
} }
}
n = next_n; n = next_n;
} while (n > 0); } while (n > 0);
@@ -418,7 +440,8 @@ static void pop_output_configuration(AACContext *ac) {
} }
/** /**
* Configure output channel order based on the current program configuration element. * Configure output channel order based on the current program
* configuration element.
* *
* @return Returns error status. 0 - OK, !0 - error * @return Returns error status. 0 - OK, !0 - error
*/ */
@@ -500,31 +523,35 @@ static int set_default_channel_config(AVCodecContext *avctx,
int channel_config) int channel_config)
{ {
if (channel_config < 1 || channel_config > 7) { if (channel_config < 1 || channel_config > 7) {
av_log(avctx, AV_LOG_ERROR, "invalid default channel configuration (%d)\n", av_log(avctx, AV_LOG_ERROR,
"invalid default channel configuration (%d)\n",
channel_config); channel_config);
return -1; return AVERROR_INVALIDDATA;
} }
*tags = tags_per_config[channel_config]; *tags = tags_per_config[channel_config];
memcpy(layout_map, aac_channel_layout_map[channel_config-1], *tags * sizeof(*layout_map)); memcpy(layout_map, aac_channel_layout_map[channel_config - 1],
*tags * sizeof(*layout_map));
return 0; return 0;
} }
static ChannelElement *get_che(AACContext *ac, int type, int elem_id) static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
{ {
// For PCE based channel configurations map the channels solely based on tags. /* For PCE based channel configurations map the channels solely based
* on tags. */
if (!ac->oc[1].m4ac.chan_config) { if (!ac->oc[1].m4ac.chan_config) {
return ac->tag_che_map[type][elem_id]; return ac->tag_che_map[type][elem_id];
} }
// Allow single CPE stereo files to be signalled with mono configuration. // Allow single CPE stereo files to be signalled with mono configuration.
if (!ac->tags_mapped && type == TYPE_CPE && ac->oc[1].m4ac.chan_config == 1) { if (!ac->tags_mapped && type == TYPE_CPE &&
ac->oc[1].m4ac.chan_config == 1) {
uint8_t layout_map[MAX_ELEM_ID*4][3]; uint8_t layout_map[MAX_ELEM_ID*4][3];
int layout_map_tags; int layout_map_tags;
push_output_configuration(ac); push_output_configuration(ac);
av_log(ac->avctx, AV_LOG_DEBUG, "mono with CPE\n"); av_log(ac->avctx, AV_LOG_DEBUG, "mono with CPE\n");
if (set_default_channel_config(ac->avctx, layout_map, &layout_map_tags, if (set_default_channel_config(ac->avctx, layout_map,
2) < 0) &layout_map_tags, 2) < 0)
return NULL; return NULL;
if (output_configure(ac, layout_map, layout_map_tags, if (output_configure(ac, layout_map, layout_map_tags,
OC_TRIAL_FRAME, 1) < 0) OC_TRIAL_FRAME, 1) < 0)
@@ -534,15 +561,16 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
ac->oc[1].m4ac.ps = 0; ac->oc[1].m4ac.ps = 0;
} }
// And vice-versa // And vice-versa
if (!ac->tags_mapped && type == TYPE_SCE && ac->oc[1].m4ac.chan_config == 2) { if (!ac->tags_mapped && type == TYPE_SCE &&
ac->oc[1].m4ac.chan_config == 2) {
uint8_t layout_map[MAX_ELEM_ID * 4][3]; uint8_t layout_map[MAX_ELEM_ID * 4][3];
int layout_map_tags; int layout_map_tags;
push_output_configuration(ac); push_output_configuration(ac);
av_log(ac->avctx, AV_LOG_DEBUG, "stereo with SCE\n"); av_log(ac->avctx, AV_LOG_DEBUG, "stereo with SCE\n");
if (set_default_channel_config(ac->avctx, layout_map, &layout_map_tags, if (set_default_channel_config(ac->avctx, layout_map,
1) < 0) &layout_map_tags, 1) < 0)
return NULL; return NULL;
if (output_configure(ac, layout_map, layout_map_tags, if (output_configure(ac, layout_map, layout_map_tags,
OC_TRIAL_FRAME, 1) < 0) OC_TRIAL_FRAME, 1) < 0)
@@ -552,7 +580,8 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
if (ac->oc[1].m4ac.sbr) if (ac->oc[1].m4ac.sbr)
ac->oc[1].m4ac.ps = -1; ac->oc[1].m4ac.ps = -1;
} }
// For indexed channel configurations map the channels solely based on position. /* For indexed channel configurations map the channels solely based
* on position. */
switch (ac->oc[1].m4ac.chan_config) { switch (ac->oc[1].m4ac.chan_config) {
case 7: case 7:
if (ac->tags_mapped == 3 && type == TYPE_CPE) { if (ac->tags_mapped == 3 && type == TYPE_CPE) {
@@ -560,9 +589,12 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][2]; return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][2];
} }
case 6: case 6:
/* Some streams incorrectly code 5.1 audio as SCE[0] CPE[0] CPE[1] SCE[1] /* Some streams incorrectly code 5.1 audio as
instead of SCE[0] CPE[0] CPE[1] LFE[0]. If we seem to have * SCE[0] CPE[0] CPE[1] SCE[1]
encountered such a stream, transfer the LFE[0] element to the SCE[1]'s mapping */ * instead of
* SCE[0] CPE[0] CPE[1] LFE[0].
* If we seem to have encountered such a stream, transfer
* the LFE[0] element to the SCE[1]'s mapping */
if (ac->tags_mapped == tags_per_config[ac->oc[1].m4ac.chan_config] - 1 && (type == TYPE_LFE || type == TYPE_SCE)) { if (ac->tags_mapped == tags_per_config[ac->oc[1].m4ac.chan_config] - 1 && (type == TYPE_LFE || type == TYPE_SCE)) {
ac->tags_mapped++; ac->tags_mapped++;
return ac->tag_che_map[type][elem_id] = ac->che[TYPE_LFE][0]; return ac->tag_che_map[type][elem_id] = ac->che[TYPE_LFE][0];
@@ -573,13 +605,16 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][1]; return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][1];
} }
case 4: case 4:
if (ac->tags_mapped == 2 && ac->oc[1].m4ac.chan_config == 4 && type == TYPE_SCE) { if (ac->tags_mapped == 2 &&
ac->oc[1].m4ac.chan_config == 4 &&
type == TYPE_SCE) {
ac->tags_mapped++; ac->tags_mapped++;
return ac->tag_che_map[TYPE_SCE][elem_id] = ac->che[TYPE_SCE][1]; return ac->tag_che_map[TYPE_SCE][elem_id] = ac->che[TYPE_SCE][1];
} }
case 3: case 3:
case 2: case 2:
if (ac->tags_mapped == (ac->oc[1].m4ac.chan_config != 2) && type == TYPE_CPE) { if (ac->tags_mapped == (ac->oc[1].m4ac.chan_config != 2) &&
type == TYPE_CPE) {
ac->tags_mapped++; ac->tags_mapped++;
return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][0]; return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][0];
} else if (ac->oc[1].m4ac.chan_config == 2) { } else if (ac->oc[1].m4ac.chan_config == 2) {
@@ -596,7 +631,8 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
} }
/** /**
* Decode an array of 4 bit element IDs, optionally interleaved with a stereo/mono switching bit. * Decode an array of 4 bit element IDs, optionally interleaved with a
* stereo/mono switching bit.
* *
* @param type speaker type/position for these channels * @param type speaker type/position for these channels
*/ */
@@ -638,7 +674,8 @@ static int decode_pce(AVCodecContext *avctx, MPEG4AudioConfig *m4ac,
uint8_t (*layout_map)[3], uint8_t (*layout_map)[3],
GetBitContext *gb) GetBitContext *gb)
{ {
int num_front, num_side, num_back, num_lfe, num_assoc_data, num_cc, sampling_index; int num_front, num_side, num_back, num_lfe, num_assoc_data, num_cc;
int sampling_index;
int comment_len; int comment_len;
int tags; int tags;
@@ -646,7 +683,9 @@ static int decode_pce(AVCodecContext *avctx, MPEG4AudioConfig *m4ac,
sampling_index = get_bits(gb, 4); sampling_index = get_bits(gb, 4);
if (m4ac->sampling_index != sampling_index) if (m4ac->sampling_index != sampling_index)
av_log(avctx, AV_LOG_WARNING, "Sample rate index in program config element does not match the sample rate index configured by the container.\n"); av_log(avctx, AV_LOG_WARNING,
"Sample rate index in program config element does not "
"match the sample rate index configured by the container.\n");
num_front = get_bits(gb, 4); num_front = get_bits(gb, 4);
num_side = get_bits(gb, 4); num_side = get_bits(gb, 4);
@@ -687,7 +726,7 @@ static int decode_pce(AVCodecContext *avctx, MPEG4AudioConfig *m4ac,
comment_len = get_bits(gb, 8) * 8; comment_len = get_bits(gb, 8) * 8;
if (get_bits_left(gb) < comment_len) { if (get_bits_left(gb) < comment_len) {
av_log(avctx, AV_LOG_ERROR, "decode_pce: " overread_err); av_log(avctx, AV_LOG_ERROR, "decode_pce: " overread_err);
return -1; return AVERROR_INVALIDDATA;
} }
skip_bits_long(gb, comment_len); skip_bits_long(gb, comment_len);
return tags; return tags;
@@ -729,7 +768,8 @@ static int decode_ga_specific_config(AACContext *ac, AVCodecContext *avctx,
if (tags < 0) if (tags < 0)
return tags; return tags;
} else { } else {
if ((ret = set_default_channel_config(avctx, layout_map, &tags, channel_config))) if ((ret = set_default_channel_config(avctx, layout_map,
&tags, channel_config)))
return ret; return ret;
} }
@@ -781,20 +821,24 @@ static int decode_audio_specific_config(AACContext *ac,
int sync_extension) int sync_extension)
{ {
GetBitContext gb; GetBitContext gb;
int i; int i, ret;
av_dlog(avctx, "audio specific config size %d\n", bit_size >> 3); av_dlog(avctx, "audio specific config size %d\n", bit_size >> 3);
for (i = 0; i < bit_size >> 3; i++) for (i = 0; i < bit_size >> 3; i++)
av_dlog(avctx, "%02x ", data[i]); av_dlog(avctx, "%02x ", data[i]);
av_dlog(avctx, "\n"); av_dlog(avctx, "\n");
init_get_bits(&gb, data, bit_size); if ((ret = init_get_bits(&gb, data, bit_size)) < 0)
return ret;
if ((i = avpriv_mpeg4audio_get_config(m4ac, data, bit_size, sync_extension)) < 0) if ((i = avpriv_mpeg4audio_get_config(m4ac, data, bit_size,
return -1; sync_extension)) < 0)
return AVERROR_INVALIDDATA;
if (m4ac->sampling_index > 12) { if (m4ac->sampling_index > 12) {
av_log(avctx, AV_LOG_ERROR, "invalid sampling rate index %d\n", m4ac->sampling_index); av_log(avctx, AV_LOG_ERROR,
return -1; "invalid sampling rate index %d\n",
m4ac->sampling_index);
return AVERROR_INVALIDDATA;
} }
skip_bits_long(&gb, i); skip_bits_long(&gb, i);
@@ -803,18 +847,23 @@ static int decode_audio_specific_config(AACContext *ac,
case AOT_AAC_MAIN: case AOT_AAC_MAIN:
case AOT_AAC_LC: case AOT_AAC_LC:
case AOT_AAC_LTP: case AOT_AAC_LTP:
if (decode_ga_specific_config(ac, avctx, &gb, m4ac, m4ac->chan_config)) if ((ret = decode_ga_specific_config(ac, avctx, &gb,
return -1; m4ac, m4ac->chan_config)) < 0)
return ret;
break; break;
default: default:
av_log(avctx, AV_LOG_ERROR, "Audio object type %s%d is not supported.\n", av_log(avctx, AV_LOG_ERROR,
m4ac->sbr == 1? "SBR+" : "", m4ac->object_type); "Audio object type %s%d is not supported.\n",
return -1; m4ac->sbr == 1 ? "SBR+" : "",
m4ac->object_type);
return AVERROR(ENOSYS);
} }
av_dlog(avctx, "AOT %d chan config %d sampling index %d (%d) SBR %d PS %d\n", av_dlog(avctx,
"AOT %d chan config %d sampling index %d (%d) SBR %d PS %d\n",
m4ac->object_type, m4ac->chan_config, m4ac->sampling_index, m4ac->object_type, m4ac->chan_config, m4ac->sampling_index,
m4ac->sample_rate, m4ac->sbr, m4ac->ps); m4ac->sample_rate, m4ac->sbr,
m4ac->ps);
return get_bits_count(&gb); return get_bits_count(&gb);
} }
@@ -874,13 +923,16 @@ static void reset_predictor_group(PredictorState *ps, int group_num)
#define AAC_INIT_VLC_STATIC(num, size) \ #define AAC_INIT_VLC_STATIC(num, size) \
INIT_VLC_STATIC(&vlc_spectral[num], 8, ff_aac_spectral_sizes[num], \ INIT_VLC_STATIC(&vlc_spectral[num], 8, ff_aac_spectral_sizes[num], \
ff_aac_spectral_bits[num], sizeof( ff_aac_spectral_bits[num][0]), sizeof( ff_aac_spectral_bits[num][0]), \ ff_aac_spectral_bits[num], sizeof(ff_aac_spectral_bits[num][0]), \
ff_aac_spectral_codes[num], sizeof(ff_aac_spectral_codes[num][0]), sizeof(ff_aac_spectral_codes[num][0]), \ sizeof(ff_aac_spectral_bits[num][0]), \
ff_aac_spectral_codes[num], sizeof(ff_aac_spectral_codes[num][0]), \
sizeof(ff_aac_spectral_codes[num][0]), \
size); size);
static av_cold int aac_decode_init(AVCodecContext *avctx) static av_cold int aac_decode_init(AVCodecContext *avctx)
{ {
AACContext *ac = avctx->priv_data; AACContext *ac = avctx->priv_data;
int ret;
ac->avctx = avctx; ac->avctx = avctx;
ac->oc[1].m4ac.sample_rate = avctx->sample_rate; ac->oc[1].m4ac.sample_rate = avctx->sample_rate;
@@ -888,10 +940,11 @@ static av_cold int aac_decode_init(AVCodecContext *avctx)
avctx->sample_fmt = AV_SAMPLE_FMT_FLTP; avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
if (avctx->extradata_size > 0) { if (avctx->extradata_size > 0) {
if (decode_audio_specific_config(ac, ac->avctx, &ac->oc[1].m4ac, if ((ret = decode_audio_specific_config(ac, ac->avctx, &ac->oc[1].m4ac,
avctx->extradata, avctx->extradata,
avctx->extradata_size*8, 1) < 0) avctx->extradata_size * 8,
return -1; 1)) < 0)
return ret;
} else { } else {
int sr, i; int sr, i;
uint8_t layout_map[MAX_ELEM_ID*4][3]; uint8_t layout_map[MAX_ELEM_ID*4][3];
@@ -949,9 +1002,14 @@ static av_cold int aac_decode_init(AVCodecContext *avctx)
ff_aac_tableinit(); ff_aac_tableinit();
INIT_VLC_STATIC(&vlc_scalefactors,7,FF_ARRAY_ELEMS(ff_aac_scalefactor_code), INIT_VLC_STATIC(&vlc_scalefactors, 7,
ff_aac_scalefactor_bits, sizeof(ff_aac_scalefactor_bits[0]), sizeof(ff_aac_scalefactor_bits[0]), FF_ARRAY_ELEMS(ff_aac_scalefactor_code),
ff_aac_scalefactor_code, sizeof(ff_aac_scalefactor_code[0]), sizeof(ff_aac_scalefactor_code[0]), ff_aac_scalefactor_bits,
sizeof(ff_aac_scalefactor_bits[0]),
sizeof(ff_aac_scalefactor_bits[0]),
ff_aac_scalefactor_code,
sizeof(ff_aac_scalefactor_code[0]),
sizeof(ff_aac_scalefactor_code[0]),
352); 352);
ff_mdct_init(&ac->mdct, 11, 1, 1.0 / (32768.0 * 1024.0)); ff_mdct_init(&ac->mdct, 11, 1, 1.0 / (32768.0 * 1024.0));
@@ -985,7 +1043,7 @@ static int skip_data_stream_element(AACContext *ac, GetBitContext *gb)
if (get_bits_left(gb) < 8 * count) { if (get_bits_left(gb) < 8 * count) {
av_log(ac->avctx, AV_LOG_ERROR, "skip_data_stream_element: "overread_err); av_log(ac->avctx, AV_LOG_ERROR, "skip_data_stream_element: "overread_err);
return -1; return AVERROR_INVALIDDATA;
} }
skip_bits_long(gb, 8 * count); skip_bits_long(gb, 8 * count);
return 0; return 0;
@@ -997,9 +1055,11 @@ static int decode_prediction(AACContext *ac, IndividualChannelStream *ics,
int sfb; int sfb;
if (get_bits1(gb)) { if (get_bits1(gb)) {
ics->predictor_reset_group = get_bits(gb, 5); ics->predictor_reset_group = get_bits(gb, 5);
if (ics->predictor_reset_group == 0 || ics->predictor_reset_group > 30) { if (ics->predictor_reset_group == 0 ||
av_log(ac->avctx, AV_LOG_ERROR, "Invalid Predictor Reset Group.\n"); ics->predictor_reset_group > 30) {
return -1; av_log(ac->avctx, AV_LOG_ERROR,
"Invalid Predictor Reset Group.\n");
return AVERROR_INVALIDDATA;
} }
} }
for (sfb = 0; sfb < FFMIN(ics->max_sfb, ff_aac_pred_sfb_max[ac->oc[1].m4ac.sampling_index]); sfb++) { for (sfb = 0; sfb < FFMIN(ics->max_sfb, ff_aac_pred_sfb_max[ac->oc[1].m4ac.sampling_index]); sfb++) {
@@ -1068,7 +1128,8 @@ static int decode_ics_info(AACContext *ac, IndividualChannelStream *ics,
goto fail; goto fail;
} }
} else if (ac->oc[1].m4ac.object_type == AOT_AAC_LC) { } else if (ac->oc[1].m4ac.object_type == AOT_AAC_LC) {
av_log(ac->avctx, AV_LOG_ERROR, "Prediction is not allowed in AAC-LC.\n"); av_log(ac->avctx, AV_LOG_ERROR,
"Prediction is not allowed in AAC-LC.\n");
goto fail; goto fail;
} else { } else {
if ((ics->ltp.present = get_bits(gb, 1))) if ((ics->ltp.present = get_bits(gb, 1)))
@@ -1079,7 +1140,8 @@ static int decode_ics_info(AACContext *ac, IndividualChannelStream *ics,
if (ics->max_sfb > ics->num_swb) { if (ics->max_sfb > ics->num_swb) {
av_log(ac->avctx, AV_LOG_ERROR, av_log(ac->avctx, AV_LOG_ERROR,
"Number of scalefactor bands in group (%d) exceeds limit (%d).\n", "Number of scalefactor bands in group (%d) "
"exceeds limit (%d).\n",
ics->max_sfb, ics->num_swb); ics->max_sfb, ics->num_swb);
goto fail; goto fail;
} }
@@ -1112,20 +1174,20 @@ static int decode_band_types(AACContext *ac, enum BandType band_type[120],
int sect_band_type = get_bits(gb, 4); int sect_band_type = get_bits(gb, 4);
if (sect_band_type == 12) { if (sect_band_type == 12) {
av_log(ac->avctx, AV_LOG_ERROR, "invalid band type\n"); av_log(ac->avctx, AV_LOG_ERROR, "invalid band type\n");
return -1; return AVERROR_INVALIDDATA;
} }
do { do {
sect_len_incr = get_bits(gb, bits); sect_len_incr = get_bits(gb, bits);
sect_end += sect_len_incr; sect_end += sect_len_incr;
if (get_bits_left(gb) < 0) { if (get_bits_left(gb) < 0) {
av_log(ac->avctx, AV_LOG_ERROR, "decode_band_types: "overread_err); av_log(ac->avctx, AV_LOG_ERROR, "decode_band_types: "overread_err);
return -1; return AVERROR_INVALIDDATA;
} }
if (sect_end > ics->max_sfb) { if (sect_end > ics->max_sfb) {
av_log(ac->avctx, AV_LOG_ERROR, av_log(ac->avctx, AV_LOG_ERROR,
"Number of bands (%d) exceeds limit (%d).\n", "Number of bands (%d) exceeds limit (%d).\n",
sect_end, ics->max_sfb); sect_end, ics->max_sfb);
return -1; return AVERROR_INVALIDDATA;
} }
} while (sect_len_incr == (1 << bits) - 1); } while (sect_len_incr == (1 << bits) - 1);
for (; k < sect_end; k++) { for (; k < sect_end; k++) {
@@ -1163,7 +1225,8 @@ static int decode_scalefactors(AACContext *ac, float sf[120], GetBitContext *gb,
if (band_type[idx] == ZERO_BT) { if (band_type[idx] == ZERO_BT) {
for (; i < run_end; i++, idx++) for (; i < run_end; i++, idx++)
sf[idx] = 0.; sf[idx] = 0.;
} else if ((band_type[idx] == INTENSITY_BT) || (band_type[idx] == INTENSITY_BT2)) { } else if ((band_type[idx] == INTENSITY_BT) ||
(band_type[idx] == INTENSITY_BT2)) {
for (; i < run_end; i++, idx++) { for (; i < run_end; i++, idx++) {
offset[2] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60; offset[2] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60;
clipped_offset = av_clip(offset[2], -155, 100); clipped_offset = av_clip(offset[2], -155, 100);
@@ -1196,7 +1259,7 @@ static int decode_scalefactors(AACContext *ac, float sf[120], GetBitContext *gb,
if (offset[0] > 255U) { if (offset[0] > 255U) {
av_log(ac->avctx, AV_LOG_ERROR, av_log(ac->avctx, AV_LOG_ERROR,
"Scalefactor (%d) out of range.\n", offset[0]); "Scalefactor (%d) out of range.\n", offset[0]);
return -1; return AVERROR_INVALIDDATA;
} }
sf[idx] = -ff_aac_pow2sf_tab[offset[0] - 100 + POW_SF2_ZERO]; sf[idx] = -ff_aac_pow2sf_tab[offset[0] - 100 + POW_SF2_ZERO];
} }
@@ -1251,10 +1314,11 @@ static int decode_tns(AACContext *ac, TemporalNoiseShaping *tns,
tns->length[w][filt] = get_bits(gb, 6 - 2 * is8); tns->length[w][filt] = get_bits(gb, 6 - 2 * is8);
if ((tns->order[w][filt] = get_bits(gb, 5 - 2 * is8)) > tns_max_order) { if ((tns->order[w][filt] = get_bits(gb, 5 - 2 * is8)) > tns_max_order) {
av_log(ac->avctx, AV_LOG_ERROR, "TNS filter order %d is greater than maximum %d.\n", av_log(ac->avctx, AV_LOG_ERROR,
"TNS filter order %d is greater than maximum %d.\n",
tns->order[w][filt], tns_max_order); tns->order[w][filt], tns_max_order);
tns->order[w][filt] = 0; tns->order[w][filt] = 0;
return -1; return AVERROR_INVALIDDATA;
} }
if (tns->order[w][filt]) { if (tns->order[w][filt]) {
tns->direction[w][filt] = get_bits1(gb); tns->direction[w][filt] = get_bits1(gb);
@@ -1283,7 +1347,9 @@ static void decode_mid_side_stereo(ChannelElement *cpe, GetBitContext *gb,
{ {
int idx; int idx;
if (ms_present == 1) { if (ms_present == 1) {
for (idx = 0; idx < cpe->ch[0].ics.num_window_groups * cpe->ch[0].ics.max_sfb; idx++) for (idx = 0;
idx < cpe->ch[0].ics.num_window_groups * cpe->ch[0].ics.max_sfb;
idx++)
cpe->ms_mask[idx] = get_bits1(gb); cpe->ms_mask[idx] = get_bits1(gb);
} else if (ms_present == 2) { } else if (ms_present == 2) {
memset(cpe->ms_mask, 1, sizeof(cpe->ms_mask[0]) * cpe->ch[0].ics.num_window_groups * cpe->ch[0].ics.max_sfb); memset(cpe->ms_mask, 1, sizeof(cpe->ms_mask[0]) * cpe->ch[0].ics.num_window_groups * cpe->ch[0].ics.max_sfb);
@@ -1382,7 +1448,8 @@ static int decode_spectrum_and_dequant(AACContext *ac, float coef[1024],
float *coef_base = coef; float *coef_base = coef;
for (g = 0; g < ics->num_windows; g++) for (g = 0; g < ics->num_windows; g++)
memset(coef + g * 128 + offsets[ics->max_sfb], 0, sizeof(float) * (c - offsets[ics->max_sfb])); memset(coef + g * 128 + offsets[ics->max_sfb], 0,
sizeof(float) * (c - offsets[ics->max_sfb]));
for (g = 0; g < ics->num_window_groups; g++) { for (g = 0; g < ics->num_window_groups; g++) {
unsigned g_len = ics->group_len[g]; unsigned g_len = ics->group_len[g];
@@ -1537,7 +1604,7 @@ static int decode_spectrum_and_dequant(AACContext *ac, float coef[1024],
if (b > 8) { if (b > 8) {
av_log(ac->avctx, AV_LOG_ERROR, "error in spectral data, ESC overflow\n"); av_log(ac->avctx, AV_LOG_ERROR, "error in spectral data, ESC overflow\n");
return -1; return AVERROR_INVALIDDATA;
} }
SKIP_BITS(re, gb, b + 1); SKIP_BITS(re, gb, b + 1);
@@ -1652,14 +1719,20 @@ static void apply_prediction(AACContext *ac, SingleChannelElement *sce)
} }
if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE) { if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE) {
for (sfb = 0; sfb < ff_aac_pred_sfb_max[ac->oc[1].m4ac.sampling_index]; sfb++) { for (sfb = 0;
for (k = sce->ics.swb_offset[sfb]; k < sce->ics.swb_offset[sfb + 1]; k++) { sfb < ff_aac_pred_sfb_max[ac->oc[1].m4ac.sampling_index];
sfb++) {
for (k = sce->ics.swb_offset[sfb];
k < sce->ics.swb_offset[sfb + 1];
k++) {
predict(&sce->predictor_state[k], &sce->coeffs[k], predict(&sce->predictor_state[k], &sce->coeffs[k],
sce->ics.predictor_present && sce->ics.prediction_used[sfb]); sce->ics.predictor_present &&
sce->ics.prediction_used[sfb]);
} }
} }
if (sce->ics.predictor_reset_group) if (sce->ics.predictor_reset_group)
reset_predictor_group(sce->predictor_state, sce->ics.predictor_reset_group); reset_predictor_group(sce->predictor_state,
sce->ics.predictor_reset_group);
} else } else
reset_all_predictors(sce->predictor_state); reset_all_predictors(sce->predictor_state);
} }
@@ -1680,6 +1753,7 @@ static int decode_ics(AACContext *ac, SingleChannelElement *sce,
IndividualChannelStream *ics = &sce->ics; IndividualChannelStream *ics = &sce->ics;
float *out = sce->coeffs; float *out = sce->coeffs;
int global_gain, pulse_present = 0; int global_gain, pulse_present = 0;
int ret;
/* This assignment is to silence a GCC warning about the variable being used /* This assignment is to silence a GCC warning about the variable being used
* uninitialized when in fact it always is. * uninitialized when in fact it always is.
@@ -1693,33 +1767,38 @@ static int decode_ics(AACContext *ac, SingleChannelElement *sce,
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
if (decode_band_types(ac, sce->band_type, sce->band_type_run_end, gb, ics) < 0) if ((ret = decode_band_types(ac, sce->band_type,
return -1; sce->band_type_run_end, gb, ics)) < 0)
if (decode_scalefactors(ac, sce->sf, gb, global_gain, ics, sce->band_type, sce->band_type_run_end) < 0) return ret;
return -1; if ((ret = decode_scalefactors(ac, sce->sf, gb, global_gain, ics,
sce->band_type, sce->band_type_run_end)) < 0)
return ret;
pulse_present = 0; pulse_present = 0;
if (!scale_flag) { if (!scale_flag) {
if ((pulse_present = get_bits1(gb))) { if ((pulse_present = get_bits1(gb))) {
if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) { if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
av_log(ac->avctx, AV_LOG_ERROR, "Pulse tool not allowed in eight short sequence.\n"); av_log(ac->avctx, AV_LOG_ERROR,
return -1; "Pulse tool not allowed in eight short sequence.\n");
return AVERROR_INVALIDDATA;
} }
if (decode_pulses(&pulse, gb, ics->swb_offset, ics->num_swb)) { if (decode_pulses(&pulse, gb, ics->swb_offset, ics->num_swb)) {
av_log(ac->avctx, AV_LOG_ERROR, "Pulse data corrupt or invalid.\n"); av_log(ac->avctx, AV_LOG_ERROR,
return -1; "Pulse data corrupt or invalid.\n");
return AVERROR_INVALIDDATA;
} }
} }
if ((tns->present = get_bits1(gb)) && decode_tns(ac, tns, gb, ics)) if ((tns->present = get_bits1(gb)) && decode_tns(ac, tns, gb, ics))
return -1; return AVERROR_INVALIDDATA;
if (get_bits1(gb)) { if (get_bits1(gb)) {
av_log_missing_feature(ac->avctx, "SSR", 1); av_log_missing_feature(ac->avctx, "SSR", 1);
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
} }
if (decode_spectrum_and_dequant(ac, out, gb, sce->sf, pulse_present, &pulse, ics, sce->band_type) < 0) if (decode_spectrum_and_dequant(ac, out, gb, sce->sf, pulse_present,
return -1; &pulse, ics, sce->band_type) < 0)
return AVERROR_INVALIDDATA;
if (ac->oc[1].m4ac.object_type == AOT_AAC_MAIN && !common_window) if (ac->oc[1].m4ac.object_type == AOT_AAC_MAIN && !common_window)
apply_prediction(ac, sce); apply_prediction(ac, sce);
@@ -1740,7 +1819,8 @@ static void apply_mid_side_stereo(AACContext *ac, ChannelElement *cpe)
for (g = 0; g < ics->num_window_groups; g++) { for (g = 0; g < ics->num_window_groups; g++) {
for (i = 0; i < ics->max_sfb; i++, idx++) { for (i = 0; i < ics->max_sfb; i++, idx++) {
if (cpe->ms_mask[idx] && if (cpe->ms_mask[idx] &&
cpe->ch[0].band_type[idx] < NOISE_BT && cpe->ch[1].band_type[idx] < NOISE_BT) { cpe->ch[0].band_type[idx] < NOISE_BT &&
cpe->ch[1].band_type[idx] < NOISE_BT) {
for (group = 0; group < ics->group_len[g]; group++) { for (group = 0; group < ics->group_len[g]; group++) {
ac->dsp.butterflies_float(ch0 + group * 128 + offsets[i], ac->dsp.butterflies_float(ch0 + group * 128 + offsets[i],
ch1 + group * 128 + offsets[i], ch1 + group * 128 + offsets[i],
@@ -1760,7 +1840,8 @@ static void apply_mid_side_stereo(AACContext *ac, ChannelElement *cpe)
* [1] mask is decoded from bitstream; [2] mask is all 1s; * [1] mask is decoded from bitstream; [2] mask is all 1s;
* [3] reserved for scalable AAC * [3] reserved for scalable AAC
*/ */
static void apply_intensity_stereo(AACContext *ac, ChannelElement *cpe, int ms_present) static void apply_intensity_stereo(AACContext *ac,
ChannelElement *cpe, int ms_present)
{ {
const IndividualChannelStream *ics = &cpe->ch[1].ics; const IndividualChannelStream *ics = &cpe->ch[1].ics;
SingleChannelElement *sce1 = &cpe->ch[1]; SingleChannelElement *sce1 = &cpe->ch[1];
@@ -1771,7 +1852,8 @@ static void apply_intensity_stereo(AACContext *ac, ChannelElement *cpe, int ms_p
float scale; float scale;
for (g = 0; g < ics->num_window_groups; g++) { for (g = 0; g < ics->num_window_groups; g++) {
for (i = 0; i < ics->max_sfb;) { for (i = 0; i < ics->max_sfb;) {
if (sce1->band_type[idx] == INTENSITY_BT || sce1->band_type[idx] == INTENSITY_BT2) { if (sce1->band_type[idx] == INTENSITY_BT ||
sce1->band_type[idx] == INTENSITY_BT2) {
const int bt_run_end = sce1->band_type_run_end[idx]; const int bt_run_end = sce1->band_type_run_end[idx];
for (; i < bt_run_end; i++, idx++) { for (; i < bt_run_end; i++, idx++) {
c = -1 + 2 * (sce1->band_type[idx] - 14); c = -1 + 2 * (sce1->band_type[idx] - 14);
@@ -1811,13 +1893,14 @@ static int decode_cpe(AACContext *ac, GetBitContext *gb, ChannelElement *cpe)
i = cpe->ch[1].ics.use_kb_window[0]; i = cpe->ch[1].ics.use_kb_window[0];
cpe->ch[1].ics = cpe->ch[0].ics; cpe->ch[1].ics = cpe->ch[0].ics;
cpe->ch[1].ics.use_kb_window[1] = i; cpe->ch[1].ics.use_kb_window[1] = i;
if (cpe->ch[1].ics.predictor_present && (ac->oc[1].m4ac.object_type != AOT_AAC_MAIN)) if (cpe->ch[1].ics.predictor_present &&
(ac->oc[1].m4ac.object_type != AOT_AAC_MAIN))
if ((cpe->ch[1].ics.ltp.present = get_bits(gb, 1))) if ((cpe->ch[1].ics.ltp.present = get_bits(gb, 1)))
decode_ltp(&cpe->ch[1].ics.ltp, gb, cpe->ch[1].ics.max_sfb); decode_ltp(&cpe->ch[1].ics.ltp, gb, cpe->ch[1].ics.max_sfb);
ms_present = get_bits(gb, 2); ms_present = get_bits(gb, 2);
if (ms_present == 3) { if (ms_present == 3) {
av_log(ac->avctx, AV_LOG_ERROR, "ms_present = 3 is reserved.\n"); av_log(ac->avctx, AV_LOG_ERROR, "ms_present = 3 is reserved.\n");
return -1; return AVERROR_INVALIDDATA;
} else if (ms_present) } else if (ms_present)
decode_mid_side_stereo(cpe, gb, ms_present); decode_mid_side_stereo(cpe, gb, ms_present);
} }
@@ -2678,7 +2761,8 @@ static int aac_decode_frame(AVCodecContext *avctx, void *data,
if (ac->force_dmono_mode >= 0) if (ac->force_dmono_mode >= 0)
ac->dmono_mode = ac->force_dmono_mode; ac->dmono_mode = ac->force_dmono_mode;
init_get_bits(&gb, buf, buf_size * 8); if ((err = init_get_bits(&gb, buf, buf_size * 8)) < 0)
return err;
if ((err = aac_decode_frame_int(avctx, data, got_frame_ptr, &gb, avpkt)) < 0) if ((err = aac_decode_frame_int(avctx, data, got_frame_ptr, &gb, avpkt)) < 0)
return err; return err;
@@ -2715,7 +2799,7 @@ static av_cold int aac_decode_close(AVCodecContext *avctx)
struct LATMContext { struct LATMContext {
AACContext aac_ctx; ///< containing AACContext AACContext aac_ctx; ///< containing AACContext
int initialized; ///< initialized after a valid extradata was seen int initialized; ///< initilized after a valid extradata was seen
// parser data // parser data
int audio_mux_version_A; ///< LATM syntax version int audio_mux_version_A; ///< LATM syntax version
@@ -2928,7 +3012,8 @@ static int latm_decode_frame(AVCodecContext *avctx, void *out,
int muxlength, err; int muxlength, err;
GetBitContext gb; GetBitContext gb;
init_get_bits(&gb, avpkt->data, avpkt->size * 8); if ((err = init_get_bits(&gb, avpkt->data, avpkt->size * 8)) < 0)
return err;
// check for LOAS sync word // check for LOAS sync word
if (get_bits(&gb, 11) != LOAS_SYNC_WORD) if (get_bits(&gb, 11) != LOAS_SYNC_WORD)

View File

@@ -685,6 +685,9 @@ static int read_dct_coeffs(GetBitContext *gb, int32_t block[64], const uint8_t *
} }
} }
if (quant_idx >= 16)
return AVERROR_INVALIDDATA;
quant = quant_matrices[quant_idx]; quant = quant_matrices[quant_idx];
block[0] = (block[0] * quant[0]) >> 11; block[0] = (block[0] * quant[0]) >> 11;

View File

@@ -255,6 +255,12 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
case 5: case 5:
c->pic.key_frame = !(compr & 1); c->pic.key_frame = !(compr & 1);
c->pic.pict_type = (compr & 1) ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I; c->pic.pict_type = (compr & 1) ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
if (!tmpptr && !c->pic.key_frame) {
av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
return AVERROR_INVALIDDATA;
}
for(j = 0; j < avctx->height; j++){ for(j = 0; j < avctx->height; j++){
if((compr & 1) && tmpptr){ if((compr & 1) && tmpptr){
for(i = 0; i < avctx->width; i++) for(i = 0; i < avctx->width; i++)

View File

@@ -291,9 +291,11 @@ static int h261_decode_mb(H261Context *h){
// Read mtype // Read mtype
h->mtype = get_vlc2(&s->gb, h261_mtype_vlc.table, H261_MTYPE_VLC_BITS, 2); h->mtype = get_vlc2(&s->gb, h261_mtype_vlc.table, H261_MTYPE_VLC_BITS, 2);
if (h->mtype < 0) { if (h->mtype < 0) {
av_log(s->avctx, AV_LOG_ERROR, "illegal mtype %d\n", h->mtype); av_log(s->avctx, AV_LOG_ERROR, "Invalid mtype index %d\n",
h->mtype);
return SLICE_ERROR; return SLICE_ERROR;
} }
av_assert0(h->mtype < FF_ARRAY_ELEMS(ff_h261_mtype_map));
h->mtype = ff_h261_mtype_map[h->mtype]; h->mtype = ff_h261_mtype_map[h->mtype];
// Read mquant // Read mquant

View File

@@ -88,7 +88,7 @@ static int get_quant(AVCodecContext *avctx, NuvContext *c, const uint8_t *buf,
int i; int i;
if (size < 2 * 64 * 4) { if (size < 2 * 64 * 4) {
av_log(avctx, AV_LOG_ERROR, "insufficient rtjpeg quant data\n"); av_log(avctx, AV_LOG_ERROR, "insufficient rtjpeg quant data\n");
return -1; return AVERROR_INVALIDDATA;
} }
for (i = 0; i < 64; i++, buf += 4) for (i = 0; i < 64; i++, buf += 4)
c->lq[i] = AV_RL32(buf); c->lq[i] = AV_RL32(buf);
@@ -114,6 +114,8 @@ static int codec_reinit(AVCodecContext *avctx, int width, int height,
int quality) int quality)
{ {
NuvContext *c = avctx->priv_data; NuvContext *c = avctx->priv_data;
int ret;
width = FFALIGN(width, 2); width = FFALIGN(width, 2);
height = FFALIGN(height, 2); height = FFALIGN(height, 2);
if (quality >= 0) if (quality >= 0)
@@ -121,9 +123,10 @@ static int codec_reinit(AVCodecContext *avctx, int width, int height,
if (width != c->width || height != c->height) { if (width != c->width || height != c->height) {
// also reserve space for a possible additional header // also reserve space for a possible additional header
int buf_size = 24 + height * width * 3 / 2 + AV_LZO_OUTPUT_PADDING; int buf_size = 24 + height * width * 3 / 2 + AV_LZO_OUTPUT_PADDING;
if (av_image_check_size(height, width, 0, avctx) < 0 || if (buf_size > INT_MAX/8)
buf_size > INT_MAX/8)
return -1; return -1;
if ((ret = av_image_check_size(height, width, 0, avctx)) < 0)
return ret;
avctx->width = c->width = width; avctx->width = c->width = width;
avctx->height = c->height = height; avctx->height = c->height = height;
av_fast_malloc(&c->decomp_buf, &c->decomp_size, av_fast_malloc(&c->decomp_buf, &c->decomp_size,
@@ -165,7 +168,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
if (buf_size < 12) { if (buf_size < 12) {
av_log(avctx, AV_LOG_ERROR, "coded frame too small\n"); av_log(avctx, AV_LOG_ERROR, "coded frame too small\n");
return -1; return AVERROR_INVALIDDATA;
} }
// codec data (rtjpeg quant tables) // codec data (rtjpeg quant tables)
@@ -184,7 +187,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
if (buf_size < 12 || buf[0] != 'V') { if (buf_size < 12 || buf[0] != 'V') {
av_log(avctx, AV_LOG_ERROR, "not a nuv video frame\n"); av_log(avctx, AV_LOG_ERROR, "not a nuv video frame\n");
return -1; return AVERROR_INVALIDDATA;
} }
comptype = buf[1]; comptype = buf[1];
switch (comptype) { switch (comptype) {
@@ -204,11 +207,14 @@ retry:
buf = &buf[12]; buf = &buf[12];
buf_size -= 12; buf_size -= 12;
if (comptype == NUV_RTJPEG_IN_LZO || comptype == NUV_LZO) { if (comptype == NUV_RTJPEG_IN_LZO || comptype == NUV_LZO) {
int outlen = c->decomp_size - AV_LZO_OUTPUT_PADDING, inlen = buf_size; int outlen = c->decomp_size - FFMAX(FF_INPUT_BUFFER_PADDING_SIZE, AV_LZO_OUTPUT_PADDING);
if (av_lzo1x_decode(c->decomp_buf, &outlen, buf, &inlen)) int inlen = buf_size;
if (av_lzo1x_decode(c->decomp_buf, &outlen, buf, &inlen)) {
av_log(avctx, AV_LOG_ERROR, "error during lzo decompression\n"); av_log(avctx, AV_LOG_ERROR, "error during lzo decompression\n");
return AVERROR_INVALIDDATA;
}
buf = c->decomp_buf; buf = c->decomp_buf;
buf_size = c->decomp_size - AV_LZO_OUTPUT_PADDING - outlen; buf_size = c->decomp_size - FFMAX(FF_INPUT_BUFFER_PADDING_SIZE, AV_LZO_OUTPUT_PADDING) - outlen;
} }
if (c->codec_frameheader) { if (c->codec_frameheader) {
int w, h, q; int w, h, q;
@@ -227,10 +233,9 @@ retry:
w = AV_RL16(&buf[6]); w = AV_RL16(&buf[6]);
h = AV_RL16(&buf[8]); h = AV_RL16(&buf[8]);
q = buf[10]; q = buf[10];
res = codec_reinit(avctx, w, h, q); if ((result = codec_reinit(avctx, w, h, q)) < 0)
if (res < 0) return result;
return res; if (result) {
if (res) {
buf = avpkt->data; buf = avpkt->data;
buf_size = avpkt->size; buf_size = avpkt->size;
size_change = 1; size_change = 1;
@@ -248,7 +253,7 @@ retry:
result = avctx->reget_buffer(avctx, &c->pic); result = avctx->reget_buffer(avctx, &c->pic);
if (result < 0) { if (result < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1; return result;
} }
c->pic.pict_type = keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P; c->pic.pict_type = keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
@@ -280,7 +285,7 @@ retry:
break; break;
default: default:
av_log(avctx, AV_LOG_ERROR, "unknown compression\n"); av_log(avctx, AV_LOG_ERROR, "unknown compression\n");
return -1; return AVERROR_INVALIDDATA;
} }
*picture = c->pic; *picture = c->pic;
@@ -291,6 +296,8 @@ retry:
static av_cold int decode_init(AVCodecContext *avctx) static av_cold int decode_init(AVCodecContext *avctx)
{ {
NuvContext *c = avctx->priv_data; NuvContext *c = avctx->priv_data;
int ret;
avctx->pix_fmt = AV_PIX_FMT_YUV420P; avctx->pix_fmt = AV_PIX_FMT_YUV420P;
c->pic.data[0] = NULL; c->pic.data[0] = NULL;
c->decomp_buf = NULL; c->decomp_buf = NULL;
@@ -305,8 +312,8 @@ static av_cold int decode_init(AVCodecContext *avctx)
ff_dsputil_init(&c->dsp, avctx); ff_dsputil_init(&c->dsp, avctx);
if (codec_reinit(avctx, avctx->width, avctx->height, -1) < 0) if ((ret = codec_reinit(avctx, avctx->width, avctx->height, -1)) < 0)
return 1; return ret;
return 0; return 0;
} }

View File

@@ -108,10 +108,13 @@ int ff_rtjpeg_decode_frame_yuv420(RTJpegContext *c, AVFrame *f,
const uint8_t *buf, int buf_size) { const uint8_t *buf, int buf_size) {
GetBitContext gb; GetBitContext gb;
int w = c->w / 16, h = c->h / 16; int w = c->w / 16, h = c->h / 16;
int x, y; int x, y, ret;
uint8_t *y1 = f->data[0], *y2 = f->data[0] + 8 * f->linesize[0]; uint8_t *y1 = f->data[0], *y2 = f->data[0] + 8 * f->linesize[0];
uint8_t *u = f->data[1], *v = f->data[2]; uint8_t *u = f->data[1], *v = f->data[2];
init_get_bits(&gb, buf, buf_size * 8);
if ((ret = init_get_bits8(&gb, buf, buf_size)) < 0)
return ret;
for (y = 0; y < h; y++) { for (y = 0; y < h; y++) {
for (x = 0; x < w; x++) { for (x = 0; x < w; x++) {
#define BLOCK(quant, dst, stride) do { \ #define BLOCK(quant, dst, stride) do { \

View File

@@ -395,8 +395,10 @@ static void vc1_mc_1mv(VC1Context *v, int dir)
} }
} }
if(!srcY) if (!srcY || !srcU) {
av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
return; return;
}
src_x = s->mb_x * 16 + (mx >> 2); src_x = s->mb_x * 16 + (mx >> 2);
src_y = s->mb_y * 16 + (my >> 2); src_y = s->mb_y * 16 + (my >> 2);
@@ -573,8 +575,10 @@ static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir)
} else } else
srcY = s->next_picture.f.data[0]; srcY = s->next_picture.f.data[0];
if(!srcY) if (!srcY) {
av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
return; return;
}
if (v->field_mode) { if (v->field_mode) {
if (v->cur_field_type != v->ref_field_type[dir]) if (v->cur_field_type != v->ref_field_type[dir])
@@ -865,8 +869,10 @@ static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
srcV = s->next_picture.f.data[2]; srcV = s->next_picture.f.data[2];
} }
if(!srcU) if (!srcU) {
av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
return; return;
}
srcU += uvsrc_y * s->uvlinesize + uvsrc_x; srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
srcV += uvsrc_y * s->uvlinesize + uvsrc_x; srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
@@ -5666,6 +5672,12 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
v->mv_f[1] = tmp[1]; v->mv_f[1] = tmp[1];
} }
mb_height = s->mb_height >> v->field_mode; mb_height = s->mb_height >> v->field_mode;
if (!mb_height) {
av_log(v->s.avctx, AV_LOG_ERROR, "Invalid mb_height.\n");
goto err;
}
for (i = 0; i <= n_slices; i++) { for (i = 0; i <= n_slices; i++) {
if (i > 0 && slices[i - 1].mby_start >= mb_height) { if (i > 0 && slices[i - 1].mby_start >= mb_height) {
if (v->field_mode <= 0) { if (v->field_mode <= 0) {

View File

@@ -212,15 +212,15 @@ vorbis_header (AVFormatContext * s, int idx)
struct oggvorbis_private *priv; struct oggvorbis_private *priv;
int pkt_type = os->buf[os->pstart]; int pkt_type = os->buf[os->pstart];
if (!(pkt_type & 1))
return os->private ? 0 : -1;
if (!os->private) { if (!os->private) {
os->private = av_mallocz(sizeof(struct oggvorbis_private)); os->private = av_mallocz(sizeof(struct oggvorbis_private));
if (!os->private) if (!os->private)
return -1; return -1;
} }
if (!(pkt_type & 1))
return 0;
if (os->psize < 1 || pkt_type > 5) if (os->psize < 1 || pkt_type > 5)
return -1; return -1;

View File

@@ -112,13 +112,18 @@ static int kset(AVFormatContext *s, const uint8_t *r_val, const uint8_t *n_val,
return 0; return 0;
} }
static int rprobe(AVFormatContext *s, uint8_t *enc_header, const uint8_t *r_val) #define OMA_RPROBE_M_VAL 48 + 1
static int rprobe(AVFormatContext *s, uint8_t *enc_header, unsigned size,
const uint8_t *r_val)
{ {
OMAContext *oc = s->priv_data; OMAContext *oc = s->priv_data;
unsigned int pos; unsigned int pos;
struct AVDES av_des; struct AVDES av_des;
if (!enc_header || !r_val) if (!enc_header || !r_val ||
size < OMA_ENC_HEADER_SIZE + oc->k_size + oc->e_size + oc->i_size ||
size < OMA_RPROBE_M_VAL)
return -1; return -1;
/* m_val */ /* m_val */
@@ -139,35 +144,41 @@ static int rprobe(AVFormatContext *s, uint8_t *enc_header, const uint8_t *r_val)
return memcmp(&enc_header[pos], oc->sm_val, 8) ? -1 : 0; return memcmp(&enc_header[pos], oc->sm_val, 8) ? -1 : 0;
} }
static int nprobe(AVFormatContext *s, uint8_t *enc_header, int size, const uint8_t *n_val) static int nprobe(AVFormatContext *s, uint8_t *enc_header, unsigned size,
const uint8_t *n_val)
{ {
OMAContext *oc = s->priv_data; OMAContext *oc = s->priv_data;
uint32_t pos, taglen, datalen; uint64_t pos;
uint32_t taglen, datalen;
struct AVDES av_des; struct AVDES av_des;
if (!enc_header || !n_val) if (!enc_header || !n_val ||
size < OMA_ENC_HEADER_SIZE + oc->k_size + 4)
return -1; return -1;
pos = OMA_ENC_HEADER_SIZE + oc->k_size; pos = OMA_ENC_HEADER_SIZE + oc->k_size;
if (!memcmp(&enc_header[pos], "EKB ", 4)) if (!memcmp(&enc_header[pos], "EKB ", 4))
pos += 32; pos += 32;
if (size < pos + 44)
return -1;
if (AV_RB32(&enc_header[pos]) != oc->rid) if (AV_RB32(&enc_header[pos]) != oc->rid)
av_log(s, AV_LOG_DEBUG, "Mismatching RID\n"); av_log(s, AV_LOG_DEBUG, "Mismatching RID\n");
taglen = AV_RB32(&enc_header[pos+32]); taglen = AV_RB32(&enc_header[pos+32]);
datalen = AV_RB32(&enc_header[pos+36]) >> 4; datalen = AV_RB32(&enc_header[pos+36]) >> 4;
if(pos + (uint64_t)taglen + (((uint64_t)datalen)<<4) + 44 > size)
return -1;
pos += 44 + taglen; pos += 44 + taglen;
if (pos + (((uint64_t)datalen) << 4) > size)
return -1;
av_des_init(&av_des, n_val, 192, 1); av_des_init(&av_des, n_val, 192, 1);
while (datalen-- > 0) { while (datalen-- > 0) {
av_des_crypt(&av_des, oc->r_val, &enc_header[pos], 2, NULL, 1); av_des_crypt(&av_des, oc->r_val, &enc_header[pos], 2, NULL, 1);
kset(s, oc->r_val, NULL, 16); kset(s, oc->r_val, NULL, 16);
if (!rprobe(s, enc_header, oc->r_val)) if (!rprobe(s, enc_header, size, oc->r_val))
return 0; return 0;
pos += 16; pos += 16;
} }
@@ -236,7 +247,7 @@ static int decrypt_init(AVFormatContext *s, ID3v2ExtraMeta *em, uint8_t *header)
kset(s, s->key, s->key, s->keylen); kset(s, s->key, s->key, s->keylen);
} }
if (!memcmp(oc->r_val, (const uint8_t[8]){0}, 8) || if (!memcmp(oc->r_val, (const uint8_t[8]){0}, 8) ||
rprobe(s, gdata, oc->r_val) < 0 && rprobe(s, gdata, geob->datasize, oc->r_val) < 0 &&
nprobe(s, gdata, geob->datasize, oc->n_val) < 0) { nprobe(s, gdata, geob->datasize, oc->n_val) < 0) {
int i; int i;
for (i = 0; i < FF_ARRAY_ELEMS(leaf_table); i += 2) { for (i = 0; i < FF_ARRAY_ELEMS(leaf_table); i += 2) {
@@ -244,7 +255,8 @@ static int decrypt_init(AVFormatContext *s, ID3v2ExtraMeta *em, uint8_t *header)
AV_WL64(buf, leaf_table[i]); AV_WL64(buf, leaf_table[i]);
AV_WL64(&buf[8], leaf_table[i+1]); AV_WL64(&buf[8], leaf_table[i+1]);
kset(s, buf, buf, 16); kset(s, buf, buf, 16);
if (!rprobe(s, gdata, oc->r_val) || !nprobe(s, gdata, geob->datasize, oc->n_val)) if (!rprobe(s, gdata, geob->datasize, oc->r_val) ||
!nprobe(s, gdata, geob->datasize, oc->n_val))
break; break;
} }
if (i >= FF_ARRAY_ELEMS(leaf_table)) { if (i >= FF_ARRAY_ELEMS(leaf_table)) {
@@ -386,6 +398,9 @@ static int oma_read_packet(AVFormatContext *s, AVPacket *pkt)
int packet_size = s->streams[0]->codec->block_align; int packet_size = s->streams[0]->codec->block_align;
int ret = av_get_packet(s->pb, pkt, packet_size); int ret = av_get_packet(s->pb, pkt, packet_size);
if (ret < packet_size)
pkt->flags |= AV_PKT_FLAG_CORRUPT;
if (ret < 0) if (ret < 0)
return ret; return ret;
if (!ret) if (!ret)
@@ -394,8 +409,13 @@ static int oma_read_packet(AVFormatContext *s, AVPacket *pkt)
pkt->stream_index = 0; pkt->stream_index = 0;
if (oc->encrypted) { if (oc->encrypted) {
/* previous unencrypted block saved in IV for the next packet (CBC mode) */ /* previous unencrypted block saved in IV for
av_des_crypt(&oc->av_des, pkt->data, pkt->data, (ret >> 3), oc->iv, 1); * the next packet (CBC mode) */
if (ret == packet_size)
av_des_crypt(&oc->av_des, pkt->data, pkt->data,
(packet_size >> 3), oc->iv, 1);
else
memset(oc->iv, 0, 8);
} }
return ret; return ret;

View File

@@ -145,23 +145,23 @@ int ff_rtmp_packet_read_internal(URLContext *h, RTMPPacket *p, int chunk_size,
{ {
uint8_t t, buf[16]; uint8_t t, buf[16];
int channel_id, timestamp, data_size, offset = 0; int channel_id, timestamp, size, offset = 0;
uint32_t extra = 0; uint32_t extra = 0;
enum RTMPPacketType type; enum RTMPPacketType type;
int size = 0; int written = 0;
int ret; int ret;
size++; written++;
channel_id = hdr & 0x3F; channel_id = hdr & 0x3F;
if (channel_id < 2) { //special case for channel number >= 64 if (channel_id < 2) { //special case for channel number >= 64
buf[1] = 0; buf[1] = 0;
if (ffurl_read_complete(h, buf, channel_id + 1) != channel_id + 1) if (ffurl_read_complete(h, buf, channel_id + 1) != channel_id + 1)
return AVERROR(EIO); return AVERROR(EIO);
size += channel_id + 1; written += channel_id + 1;
channel_id = AV_RL16(buf) + 64; channel_id = AV_RL16(buf) + 64;
} }
data_size = prev_pkt[channel_id].data_size; size = prev_pkt[channel_id].size;
type = prev_pkt[channel_id].type; type = prev_pkt[channel_id].type;
extra = prev_pkt[channel_id].extra; extra = prev_pkt[channel_id].extra;
@@ -171,21 +171,21 @@ int ff_rtmp_packet_read_internal(URLContext *h, RTMPPacket *p, int chunk_size,
} else { } else {
if (ffurl_read_complete(h, buf, 3) != 3) if (ffurl_read_complete(h, buf, 3) != 3)
return AVERROR(EIO); return AVERROR(EIO);
size += 3; written += 3;
timestamp = AV_RB24(buf); timestamp = AV_RB24(buf);
if (hdr != RTMP_PS_FOURBYTES) { if (hdr != RTMP_PS_FOURBYTES) {
if (ffurl_read_complete(h, buf, 3) != 3) if (ffurl_read_complete(h, buf, 3) != 3)
return AVERROR(EIO); return AVERROR(EIO);
size += 3; written += 3;
data_size = AV_RB24(buf); size = AV_RB24(buf);
if (ffurl_read_complete(h, buf, 1) != 1) if (ffurl_read_complete(h, buf, 1) != 1)
return AVERROR(EIO); return AVERROR(EIO);
size++; written++;
type = buf[0]; type = buf[0];
if (hdr == RTMP_PS_TWELVEBYTES) { if (hdr == RTMP_PS_TWELVEBYTES) {
if (ffurl_read_complete(h, buf, 4) != 4) if (ffurl_read_complete(h, buf, 4) != 4)
return AVERROR(EIO); return AVERROR(EIO);
size += 4; written += 4;
extra = AV_RL32(buf); extra = AV_RL32(buf);
} }
} }
@@ -199,36 +199,36 @@ int ff_rtmp_packet_read_internal(URLContext *h, RTMPPacket *p, int chunk_size,
timestamp += prev_pkt[channel_id].timestamp; timestamp += prev_pkt[channel_id].timestamp;
if ((ret = ff_rtmp_packet_create(p, channel_id, type, timestamp, if ((ret = ff_rtmp_packet_create(p, channel_id, type, timestamp,
data_size)) < 0) size)) < 0)
return ret; return ret;
p->extra = extra; p->extra = extra;
// save history // save history
prev_pkt[channel_id].channel_id = channel_id; prev_pkt[channel_id].channel_id = channel_id;
prev_pkt[channel_id].type = type; prev_pkt[channel_id].type = type;
prev_pkt[channel_id].data_size = data_size; prev_pkt[channel_id].size = size;
prev_pkt[channel_id].ts_delta = timestamp - prev_pkt[channel_id].timestamp; prev_pkt[channel_id].ts_delta = timestamp - prev_pkt[channel_id].timestamp;
prev_pkt[channel_id].timestamp = timestamp; prev_pkt[channel_id].timestamp = timestamp;
prev_pkt[channel_id].extra = extra; prev_pkt[channel_id].extra = extra;
while (data_size > 0) { while (size > 0) {
int toread = FFMIN(data_size, chunk_size); int toread = FFMIN(size, chunk_size);
if (ffurl_read_complete(h, p->data + offset, toread) != toread) { if (ffurl_read_complete(h, p->data + offset, toread) != toread) {
ff_rtmp_packet_destroy(p); ff_rtmp_packet_destroy(p);
return AVERROR(EIO); return AVERROR(EIO);
} }
data_size -= chunk_size; size -= chunk_size;
offset += chunk_size; offset += chunk_size;
size += chunk_size; written += chunk_size;
if (data_size > 0) { if (size > 0) {
if ((ret = ffurl_read_complete(h, &t, 1)) < 0) { // marker if ((ret = ffurl_read_complete(h, &t, 1)) < 0) { // marker
ff_rtmp_packet_destroy(p); ff_rtmp_packet_destroy(p);
return ret; return ret;
} }
size++; written++;
if (t != (0xC0 + channel_id)) if (t != (0xC0 + channel_id))
return -1; return -1;
} }
} }
return size; return written;
} }
int ff_rtmp_packet_write(URLContext *h, RTMPPacket *pkt, int ff_rtmp_packet_write(URLContext *h, RTMPPacket *pkt,
@@ -237,7 +237,7 @@ int ff_rtmp_packet_write(URLContext *h, RTMPPacket *pkt,
uint8_t pkt_hdr[16], *p = pkt_hdr; uint8_t pkt_hdr[16], *p = pkt_hdr;
int mode = RTMP_PS_TWELVEBYTES; int mode = RTMP_PS_TWELVEBYTES;
int off = 0; int off = 0;
int size = 0; int written = 0;
int ret; int ret;
pkt->ts_delta = pkt->timestamp - prev_pkt[pkt->channel_id].timestamp; pkt->ts_delta = pkt->timestamp - prev_pkt[pkt->channel_id].timestamp;
@@ -246,7 +246,7 @@ int ff_rtmp_packet_write(URLContext *h, RTMPPacket *pkt,
if (prev_pkt[pkt->channel_id].channel_id && if (prev_pkt[pkt->channel_id].channel_id &&
pkt->extra == prev_pkt[pkt->channel_id].extra) { pkt->extra == prev_pkt[pkt->channel_id].extra) {
if (pkt->type == prev_pkt[pkt->channel_id].type && if (pkt->type == prev_pkt[pkt->channel_id].type &&
pkt->data_size == prev_pkt[pkt->channel_id].data_size) { pkt->size == prev_pkt[pkt->channel_id].size) {
mode = RTMP_PS_FOURBYTES; mode = RTMP_PS_FOURBYTES;
if (pkt->ts_delta == prev_pkt[pkt->channel_id].ts_delta) if (pkt->ts_delta == prev_pkt[pkt->channel_id].ts_delta)
mode = RTMP_PS_ONEBYTE; mode = RTMP_PS_ONEBYTE;
@@ -270,7 +270,7 @@ int ff_rtmp_packet_write(URLContext *h, RTMPPacket *pkt,
timestamp = pkt->ts_delta; timestamp = pkt->ts_delta;
bytestream_put_be24(&p, timestamp >= 0xFFFFFF ? 0xFFFFFF : timestamp); bytestream_put_be24(&p, timestamp >= 0xFFFFFF ? 0xFFFFFF : timestamp);
if (mode != RTMP_PS_FOURBYTES) { if (mode != RTMP_PS_FOURBYTES) {
bytestream_put_be24(&p, pkt->data_size); bytestream_put_be24(&p, pkt->size);
bytestream_put_byte(&p, pkt->type); bytestream_put_byte(&p, pkt->type);
if (mode == RTMP_PS_TWELVEBYTES) if (mode == RTMP_PS_TWELVEBYTES)
bytestream_put_le32(&p, pkt->extra); bytestream_put_le32(&p, pkt->extra);
@@ -281,7 +281,7 @@ int ff_rtmp_packet_write(URLContext *h, RTMPPacket *pkt,
// save history // save history
prev_pkt[pkt->channel_id].channel_id = pkt->channel_id; prev_pkt[pkt->channel_id].channel_id = pkt->channel_id;
prev_pkt[pkt->channel_id].type = pkt->type; prev_pkt[pkt->channel_id].type = pkt->type;
prev_pkt[pkt->channel_id].data_size = pkt->data_size; prev_pkt[pkt->channel_id].size = pkt->size;
prev_pkt[pkt->channel_id].timestamp = pkt->timestamp; prev_pkt[pkt->channel_id].timestamp = pkt->timestamp;
if (mode != RTMP_PS_TWELVEBYTES) { if (mode != RTMP_PS_TWELVEBYTES) {
prev_pkt[pkt->channel_id].ts_delta = pkt->ts_delta; prev_pkt[pkt->channel_id].ts_delta = pkt->ts_delta;
@@ -292,20 +292,20 @@ int ff_rtmp_packet_write(URLContext *h, RTMPPacket *pkt,
if ((ret = ffurl_write(h, pkt_hdr, p - pkt_hdr)) < 0) if ((ret = ffurl_write(h, pkt_hdr, p - pkt_hdr)) < 0)
return ret; return ret;
size = p - pkt_hdr + pkt->data_size; written = p - pkt_hdr + pkt->size;
while (off < pkt->data_size) { while (off < pkt->size) {
int towrite = FFMIN(chunk_size, pkt->data_size - off); int towrite = FFMIN(chunk_size, pkt->size - off);
if ((ret = ffurl_write(h, pkt->data + off, towrite)) < 0) if ((ret = ffurl_write(h, pkt->data + off, towrite)) < 0)
return ret; return ret;
off += towrite; off += towrite;
if (off < pkt->data_size) { if (off < pkt->size) {
uint8_t marker = 0xC0 | pkt->channel_id; uint8_t marker = 0xC0 | pkt->channel_id;
if ((ret = ffurl_write(h, &marker, 1)) < 0) if ((ret = ffurl_write(h, &marker, 1)) < 0)
return ret; return ret;
size++; written++;
} }
} }
return size; return written;
} }
int ff_rtmp_packet_create(RTMPPacket *pkt, int channel_id, RTMPPacketType type, int ff_rtmp_packet_create(RTMPPacket *pkt, int channel_id, RTMPPacketType type,
@@ -316,7 +316,7 @@ int ff_rtmp_packet_create(RTMPPacket *pkt, int channel_id, RTMPPacketType type,
if (!pkt->data) if (!pkt->data)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
} }
pkt->data_size = size; pkt->size = size;
pkt->channel_id = channel_id; pkt->channel_id = channel_id;
pkt->type = type; pkt->type = type;
pkt->timestamp = timestamp; pkt->timestamp = timestamp;
@@ -331,7 +331,7 @@ void ff_rtmp_packet_destroy(RTMPPacket *pkt)
if (!pkt) if (!pkt)
return; return;
av_freep(&pkt->data); av_freep(&pkt->data);
pkt->data_size = 0; pkt->size = 0;
} }
int ff_amf_tag_size(const uint8_t *data, const uint8_t *data_end) int ff_amf_tag_size(const uint8_t *data, const uint8_t *data_end)
@@ -502,9 +502,9 @@ static void ff_amf_tag_contents(void *ctx, const uint8_t *data, const uint8_t *d
void ff_rtmp_packet_dump(void *ctx, RTMPPacket *p) void ff_rtmp_packet_dump(void *ctx, RTMPPacket *p)
{ {
av_log(ctx, AV_LOG_DEBUG, "RTMP packet type '%s'(%d) for channel %d, timestamp %d, extra field %d size %d\n", av_log(ctx, AV_LOG_DEBUG, "RTMP packet type '%s'(%d) for channel %d, timestamp %d, extra field %d size %d\n",
rtmp_packet_type(p->type), p->type, p->channel_id, p->timestamp, p->extra, p->data_size); rtmp_packet_type(p->type), p->type, p->channel_id, p->timestamp, p->extra, p->size);
if (p->type == RTMP_PT_INVOKE || p->type == RTMP_PT_NOTIFY) { if (p->type == RTMP_PT_INVOKE || p->type == RTMP_PT_NOTIFY) {
uint8_t *src = p->data, *src_end = p->data + p->data_size; uint8_t *src = p->data, *src_end = p->data + p->size;
while (src < src_end) { while (src < src_end) {
int sz; int sz;
ff_amf_tag_contents(ctx, src, src_end); ff_amf_tag_contents(ctx, src, src_end);
@@ -519,8 +519,41 @@ void ff_rtmp_packet_dump(void *ctx, RTMPPacket *p)
av_log(ctx, AV_LOG_DEBUG, "Client BW = %d\n", AV_RB32(p->data)); av_log(ctx, AV_LOG_DEBUG, "Client BW = %d\n", AV_RB32(p->data));
} else if (p->type != RTMP_PT_AUDIO && p->type != RTMP_PT_VIDEO && p->type != RTMP_PT_METADATA) { } else if (p->type != RTMP_PT_AUDIO && p->type != RTMP_PT_VIDEO && p->type != RTMP_PT_METADATA) {
int i; int i;
for (i = 0; i < p->data_size; i++) for (i = 0; i < p->size; i++)
av_log(ctx, AV_LOG_DEBUG, " %02X", p->data[i]); av_log(ctx, AV_LOG_DEBUG, " %02X", p->data[i]);
av_log(ctx, AV_LOG_DEBUG, "\n"); av_log(ctx, AV_LOG_DEBUG, "\n");
} }
} }
int ff_amf_match_string(const uint8_t *data, int size, const char *str)
{
int len = strlen(str);
int amf_len, type;
if (size < 1)
return 0;
type = *data++;
if (type != AMF_DATA_TYPE_LONG_STRING &&
type != AMF_DATA_TYPE_STRING)
return 0;
if (type == AMF_DATA_TYPE_LONG_STRING) {
if ((size -= 4 + 1) < 0)
return 0;
amf_len = bytestream_get_be32(&data);
} else {
if ((size -= 2 + 1) < 0)
return 0;
amf_len = bytestream_get_be16(&data);
}
if (amf_len > size)
return 0;
if (amf_len != len)
return 0;
return !memcmp(data, str, len);
}

View File

@@ -81,7 +81,7 @@ typedef struct RTMPPacket {
uint32_t ts_delta; ///< timestamp increment to the previous one in milliseconds (latter only for media packets) uint32_t ts_delta; ///< timestamp increment to the previous one in milliseconds (latter only for media packets)
uint32_t extra; ///< probably an additional channel ID used during streaming data uint32_t extra; ///< probably an additional channel ID used during streaming data
uint8_t *data; ///< packet payload uint8_t *data; ///< packet payload
int data_size; ///< packet payload size int size; ///< packet payload size
} RTMPPacket; } RTMPPacket;
/** /**
@@ -282,6 +282,13 @@ int ff_amf_read_string(GetByteContext *gbc, uint8_t *str,
*/ */
int ff_amf_read_null(GetByteContext *gbc); int ff_amf_read_null(GetByteContext *gbc);
/**
* Match AMF string with a NULL-terminated string.
*
* @return 0 if the strings do not match.
*/
int ff_amf_match_string(const uint8_t *data, int size, const char *str);
/** @} */ // AMF funcs /** @} */ // AMF funcs

View File

@@ -186,7 +186,7 @@ static int find_tracked_method(URLContext *s, RTMPPacket *pkt, int offset,
int ret; int ret;
int i; int i;
bytestream2_init(&gbc, pkt->data + offset, pkt->data_size - offset); bytestream2_init(&gbc, pkt->data + offset, pkt->size - offset);
if ((ret = ff_amf_read_number(&gbc, &pkt_id)) < 0) if ((ret = ff_amf_read_number(&gbc, &pkt_id)) < 0)
return ret; return ret;
@@ -224,7 +224,7 @@ static int rtmp_send_packet(RTMPContext *rt, RTMPPacket *pkt, int track)
double pkt_id; double pkt_id;
int len; int len;
bytestream2_init(&gbc, pkt->data, pkt->data_size); bytestream2_init(&gbc, pkt->data, pkt->size);
if ((ret = ff_amf_read_string(&gbc, name, sizeof(name), &len)) < 0) if ((ret = ff_amf_read_string(&gbc, name, sizeof(name), &len)) < 0)
goto fail; goto fail;
@@ -385,7 +385,7 @@ static int gen_connect(URLContext *s, RTMPContext *rt)
} }
} }
pkt.data_size = p - pkt.data; pkt.size = p - pkt.data;
return rtmp_send_packet(rt, &pkt, 1); return rtmp_send_packet(rt, &pkt, 1);
} }
@@ -406,7 +406,7 @@ static int read_connect(URLContext *s, RTMPContext *rt)
rt->prev_pkt[1])) < 0) rt->prev_pkt[1])) < 0)
return ret; return ret;
cp = pkt.data; cp = pkt.data;
bytestream2_init(&gbc, cp, pkt.data_size); bytestream2_init(&gbc, cp, pkt.size);
if (ff_amf_read_string(&gbc, command, sizeof(command), &stringlen)) { if (ff_amf_read_string(&gbc, command, sizeof(command), &stringlen)) {
av_log(s, AV_LOG_ERROR, "Unable to read command string\n"); av_log(s, AV_LOG_ERROR, "Unable to read command string\n");
ff_rtmp_packet_destroy(&pkt); ff_rtmp_packet_destroy(&pkt);
@@ -437,7 +437,7 @@ static int read_connect(URLContext *s, RTMPContext *rt)
return ret; return ret;
p = pkt.data; p = pkt.data;
bytestream_put_be32(&p, rt->server_bw); bytestream_put_be32(&p, rt->server_bw);
pkt.data_size = p - pkt.data; pkt.size = p - pkt.data;
ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size, ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
rt->prev_pkt[1]); rt->prev_pkt[1]);
ff_rtmp_packet_destroy(&pkt); ff_rtmp_packet_destroy(&pkt);
@@ -450,7 +450,7 @@ static int read_connect(URLContext *s, RTMPContext *rt)
p = pkt.data; p = pkt.data;
bytestream_put_be32(&p, rt->server_bw); bytestream_put_be32(&p, rt->server_bw);
bytestream_put_byte(&p, 2); // dynamic bytestream_put_byte(&p, 2); // dynamic
pkt.data_size = p - pkt.data; pkt.size = p - pkt.data;
ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size, ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
rt->prev_pkt[1]); rt->prev_pkt[1]);
ff_rtmp_packet_destroy(&pkt); ff_rtmp_packet_destroy(&pkt);
@@ -512,7 +512,7 @@ static int read_connect(URLContext *s, RTMPContext *rt)
ff_amf_write_number(&p, 0); ff_amf_write_number(&p, 0);
ff_amf_write_object_end(&p); ff_amf_write_object_end(&p);
pkt.data_size = p - pkt.data; pkt.size = p - pkt.data;
ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size, ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
rt->prev_pkt[1]); rt->prev_pkt[1]);
ff_rtmp_packet_destroy(&pkt); ff_rtmp_packet_destroy(&pkt);
@@ -527,7 +527,7 @@ static int read_connect(URLContext *s, RTMPContext *rt)
ff_amf_write_number(&p, 0); ff_amf_write_number(&p, 0);
ff_amf_write_null(&p); ff_amf_write_null(&p);
ff_amf_write_number(&p, 8192); ff_amf_write_number(&p, 8192);
pkt.data_size = p - pkt.data; pkt.size = p - pkt.data;
ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size, ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
rt->prev_pkt[1]); rt->prev_pkt[1]);
ff_rtmp_packet_destroy(&pkt); ff_rtmp_packet_destroy(&pkt);
@@ -742,9 +742,9 @@ static int gen_pong(URLContext *s, RTMPContext *rt, RTMPPacket *ppkt)
uint8_t *p; uint8_t *p;
int ret; int ret;
if (ppkt->data_size < 6) { if (ppkt->size < 6) {
av_log(s, AV_LOG_ERROR, "Too short ping packet (%d)\n", av_log(s, AV_LOG_ERROR, "Too short ping packet (%d)\n",
ppkt->data_size); ppkt->size);
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
@@ -1418,10 +1418,10 @@ static int handle_chunk_size(URLContext *s, RTMPPacket *pkt)
RTMPContext *rt = s->priv_data; RTMPContext *rt = s->priv_data;
int ret; int ret;
if (pkt->data_size < 4) { if (pkt->size < 4) {
av_log(s, AV_LOG_ERROR, av_log(s, AV_LOG_ERROR,
"Too short chunk size change packet (%d)\n", "Too short chunk size change packet (%d)\n",
pkt->data_size); pkt->size);
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
@@ -1451,9 +1451,9 @@ static int handle_ping(URLContext *s, RTMPPacket *pkt)
RTMPContext *rt = s->priv_data; RTMPContext *rt = s->priv_data;
int t, ret; int t, ret;
if (pkt->data_size < 2) { if (pkt->size < 2) {
av_log(s, AV_LOG_ERROR, "Too short ping packet (%d)\n", av_log(s, AV_LOG_ERROR, "Too short ping packet (%d)\n",
pkt->data_size); pkt->size);
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
@@ -1477,10 +1477,10 @@ static int handle_client_bw(URLContext *s, RTMPPacket *pkt)
{ {
RTMPContext *rt = s->priv_data; RTMPContext *rt = s->priv_data;
if (pkt->data_size < 4) { if (pkt->size < 4) {
av_log(s, AV_LOG_ERROR, av_log(s, AV_LOG_ERROR,
"Client bandwidth report packet is less than 4 bytes long (%d)\n", "Client bandwidth report packet is less than 4 bytes long (%d)\n",
pkt->data_size); pkt->size);
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
@@ -1501,10 +1501,10 @@ static int handle_server_bw(URLContext *s, RTMPPacket *pkt)
{ {
RTMPContext *rt = s->priv_data; RTMPContext *rt = s->priv_data;
if (pkt->data_size < 4) { if (pkt->size < 4) {
av_log(s, AV_LOG_ERROR, av_log(s, AV_LOG_ERROR,
"Too short server bandwidth report packet (%d)\n", "Too short server bandwidth report packet (%d)\n",
pkt->data_size); pkt->size);
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
@@ -1704,7 +1704,7 @@ static int handle_connect_error(URLContext *s, const char *desc)
static int handle_invoke_error(URLContext *s, RTMPPacket *pkt) static int handle_invoke_error(URLContext *s, RTMPPacket *pkt)
{ {
RTMPContext *rt = s->priv_data; RTMPContext *rt = s->priv_data;
const uint8_t *data_end = pkt->data + pkt->data_size; const uint8_t *data_end = pkt->data + pkt->size;
char *tracked_method = NULL; char *tracked_method = NULL;
int level = AV_LOG_ERROR; int level = AV_LOG_ERROR;
uint8_t tmpstr[256]; uint8_t tmpstr[256];
@@ -1752,7 +1752,7 @@ static int send_invoke_response(URLContext *s, RTMPPacket *pkt)
GetByteContext gbc; GetByteContext gbc;
int ret; int ret;
bytestream2_init(&gbc, p, pkt->data_size); bytestream2_init(&gbc, p, pkt->size);
if (ff_amf_read_string(&gbc, command, sizeof(command), if (ff_amf_read_string(&gbc, command, sizeof(command),
&stringlen)) { &stringlen)) {
av_log(s, AV_LOG_ERROR, "Error in PT_INVOKE\n"); av_log(s, AV_LOG_ERROR, "Error in PT_INVOKE\n");
@@ -1804,7 +1804,7 @@ static int send_invoke_response(URLContext *s, RTMPPacket *pkt)
return ret; return ret;
} }
pp = spkt.data; pp = spkt.data;
bytestream2_init_writer(&pbc, pp, spkt.data_size); bytestream2_init_writer(&pbc, pp, spkt.size);
bytestream2_put_be16(&pbc, 0); // 0 -> Stream Begin bytestream2_put_be16(&pbc, 0); // 0 -> Stream Begin
bytestream2_put_be32(&pbc, rt->nb_streamid); bytestream2_put_be32(&pbc, rt->nb_streamid);
ret = ff_rtmp_packet_write(rt->stream, &spkt, rt->out_chunk_size, ret = ff_rtmp_packet_write(rt->stream, &spkt, rt->out_chunk_size,
@@ -1863,7 +1863,7 @@ static int send_invoke_response(URLContext *s, RTMPPacket *pkt)
* if a client creates more than 2^32 - 2 streams. */ * if a client creates more than 2^32 - 2 streams. */
} }
} }
spkt.data_size = pp - spkt.data; spkt.size = pp - spkt.data;
ret = ff_rtmp_packet_write(rt->stream, &spkt, rt->out_chunk_size, ret = ff_rtmp_packet_write(rt->stream, &spkt, rt->out_chunk_size,
rt->prev_pkt[1]); rt->prev_pkt[1]);
ff_rtmp_packet_destroy(&spkt); ff_rtmp_packet_destroy(&spkt);
@@ -1884,7 +1884,7 @@ static int handle_invoke_result(URLContext *s, RTMPPacket *pkt)
return ret; return ret;
} }
if (!memcmp(tracked_method, "connect", 7)) { if (!strcmp(tracked_method, "connect")) {
if (!rt->is_input) { if (!rt->is_input) {
if ((ret = gen_release_stream(s, rt)) < 0) if ((ret = gen_release_stream(s, rt)) < 0)
goto fail; goto fail;
@@ -1910,7 +1910,7 @@ static int handle_invoke_result(URLContext *s, RTMPPacket *pkt)
goto fail; goto fail;
} }
} }
} else if (!memcmp(tracked_method, "createStream", 12)) { } else if (!strcmp(tracked_method, "createStream")) {
//extract a number from the result //extract a number from the result
if (pkt->data[10] || pkt->data[19] != 5 || pkt->data[20]) { if (pkt->data[10] || pkt->data[19] != 5 || pkt->data[20]) {
av_log(s, AV_LOG_WARNING, "Unexpected reply on connect()\n"); av_log(s, AV_LOG_WARNING, "Unexpected reply on connect()\n");
@@ -1937,7 +1937,7 @@ fail:
static int handle_invoke_status(URLContext *s, RTMPPacket *pkt) static int handle_invoke_status(URLContext *s, RTMPPacket *pkt)
{ {
RTMPContext *rt = s->priv_data; RTMPContext *rt = s->priv_data;
const uint8_t *data_end = pkt->data + pkt->data_size; const uint8_t *data_end = pkt->data + pkt->size;
const uint8_t *ptr = pkt->data + 11; const uint8_t *ptr = pkt->data + 11;
uint8_t tmpstr[256]; uint8_t tmpstr[256];
int i, t; int i, t;
@@ -1972,23 +1972,23 @@ static int handle_invoke(URLContext *s, RTMPPacket *pkt)
int ret = 0; int ret = 0;
//TODO: check for the messages sent for wrong state? //TODO: check for the messages sent for wrong state?
if (!memcmp(pkt->data, "\002\000\006_error", 9)) { if (ff_amf_match_string(pkt->data, pkt->size, "_error")) {
if ((ret = handle_invoke_error(s, pkt)) < 0) if ((ret = handle_invoke_error(s, pkt)) < 0)
return ret; return ret;
} else if (!memcmp(pkt->data, "\002\000\007_result", 10)) { } else if (ff_amf_match_string(pkt->data, pkt->size, "_result")) {
if ((ret = handle_invoke_result(s, pkt)) < 0) if ((ret = handle_invoke_result(s, pkt)) < 0)
return ret; return ret;
} else if (!memcmp(pkt->data, "\002\000\010onStatus", 11)) { } else if (ff_amf_match_string(pkt->data, pkt->size, "onStatus")) {
if ((ret = handle_invoke_status(s, pkt)) < 0) if ((ret = handle_invoke_status(s, pkt)) < 0)
return ret; return ret;
} else if (!memcmp(pkt->data, "\002\000\010onBWDone", 11)) { } else if (ff_amf_match_string(pkt->data, pkt->size, "onBWDone")) {
if ((ret = gen_check_bw(s, rt)) < 0) if ((ret = gen_check_bw(s, rt)) < 0)
return ret; return ret;
} else if (!memcmp(pkt->data, "\002\000\015releaseStream", 16) || } else if (ff_amf_match_string(pkt->data, pkt->size, "releaseStream") ||
!memcmp(pkt->data, "\002\000\011FCPublish", 12) || ff_amf_match_string(pkt->data, pkt->size, "FCPublish") ||
!memcmp(pkt->data, "\002\000\007publish", 10) || ff_amf_match_string(pkt->data, pkt->size, "publish") ||
!memcmp(pkt->data, "\002\000\010_checkbw", 11) || ff_amf_match_string(pkt->data, pkt->size, "_checkbw") ||
!memcmp(pkt->data, "\002\000\014createStream", 15)) { ff_amf_match_string(pkt->data, pkt->size, "createStream")) {
if ((ret = send_invoke_response(s, pkt)) < 0) if ((ret = send_invoke_response(s, pkt)) < 0)
return ret; return ret;
} }
@@ -2011,7 +2011,7 @@ static int handle_notify(URLContext *s, RTMPPacket *pkt) {
unsigned datatowritelength; unsigned datatowritelength;
p = pkt->data; p = pkt->data;
bytestream2_init(&gbc, p, pkt->data_size); bytestream2_init(&gbc, p, pkt->size);
if (ff_amf_read_string(&gbc, commandbuffer, sizeof(commandbuffer), if (ff_amf_read_string(&gbc, commandbuffer, sizeof(commandbuffer),
&stringlen)) &stringlen))
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
@@ -2125,7 +2125,7 @@ static int get_packet(URLContext *s, int for_header)
int ret; int ret;
uint8_t *p; uint8_t *p;
const uint8_t *next; const uint8_t *next;
uint32_t data_size; uint32_t size;
uint32_t ts, cts, pts=0; uint32_t ts, cts, pts=0;
if (rt->state == STATE_STOPPED) if (rt->state == STATE_STOPPED)
@@ -2168,24 +2168,25 @@ static int get_packet(URLContext *s, int for_header)
ff_rtmp_packet_destroy(&rpkt); ff_rtmp_packet_destroy(&rpkt);
return 0; return 0;
} }
if (!rpkt.data_size || !rt->is_input) { if (!rpkt.size || !rt->is_input) {
ff_rtmp_packet_destroy(&rpkt); ff_rtmp_packet_destroy(&rpkt);
continue; continue;
} }
if (rpkt.type == RTMP_PT_VIDEO || rpkt.type == RTMP_PT_AUDIO || if (rpkt.type == RTMP_PT_VIDEO || rpkt.type == RTMP_PT_AUDIO ||
(rpkt.type == RTMP_PT_NOTIFY && !memcmp("\002\000\012onMetaData", rpkt.data, 13))) { (rpkt.type == RTMP_PT_NOTIFY &&
ff_amf_match_string(rpkt.data, rpkt.size, "onMetaData"))) {
ts = rpkt.timestamp; ts = rpkt.timestamp;
// generate packet header and put data into buffer for FLV demuxer // generate packet header and put data into buffer for FLV demuxer
rt->flv_off = 0; rt->flv_off = 0;
rt->flv_size = rpkt.data_size + 15; rt->flv_size = rpkt.size + 15;
rt->flv_data = p = av_realloc(rt->flv_data, rt->flv_size); rt->flv_data = p = av_realloc(rt->flv_data, rt->flv_size);
bytestream_put_byte(&p, rpkt.type); bytestream_put_byte(&p, rpkt.type);
bytestream_put_be24(&p, rpkt.data_size); bytestream_put_be24(&p, rpkt.size);
bytestream_put_be24(&p, ts); bytestream_put_be24(&p, ts);
bytestream_put_byte(&p, ts >> 24); bytestream_put_byte(&p, ts >> 24);
bytestream_put_be24(&p, 0); bytestream_put_be24(&p, 0);
bytestream_put_buffer(&p, rpkt.data, rpkt.data_size); bytestream_put_buffer(&p, rpkt.data, rpkt.size);
bytestream_put_be32(&p, 0); bytestream_put_be32(&p, 0);
ff_rtmp_packet_destroy(&rpkt); ff_rtmp_packet_destroy(&rpkt);
return 0; return 0;
@@ -2200,14 +2201,14 @@ static int get_packet(URLContext *s, int for_header)
} else if (rpkt.type == RTMP_PT_METADATA) { } else if (rpkt.type == RTMP_PT_METADATA) {
// we got raw FLV data, make it available for FLV demuxer // we got raw FLV data, make it available for FLV demuxer
rt->flv_off = 0; rt->flv_off = 0;
rt->flv_size = rpkt.data_size; rt->flv_size = rpkt.size;
rt->flv_data = av_realloc(rt->flv_data, rt->flv_size); rt->flv_data = av_realloc(rt->flv_data, rt->flv_size);
/* rewrite timestamps */ /* rewrite timestamps */
next = rpkt.data; next = rpkt.data;
ts = rpkt.timestamp; ts = rpkt.timestamp;
while (next - rpkt.data < rpkt.data_size - 11) { while (next - rpkt.data < rpkt.size - 11) {
next++; next++;
data_size = bytestream_get_be24(&next); size = bytestream_get_be24(&next);
p=next; p=next;
cts = bytestream_get_be24(&next); cts = bytestream_get_be24(&next);
cts |= bytestream_get_byte(&next) << 24; cts |= bytestream_get_byte(&next) << 24;
@@ -2217,9 +2218,9 @@ static int get_packet(URLContext *s, int for_header)
pts = cts; pts = cts;
bytestream_put_be24(&p, ts); bytestream_put_be24(&p, ts);
bytestream_put_byte(&p, ts >> 24); bytestream_put_byte(&p, ts >> 24);
next += data_size + 3 + 4; next += size + 3 + 4;
} }
memcpy(rt->flv_data, rpkt.data, rpkt.data_size); memcpy(rt->flv_data, rpkt.data, rpkt.size);
ff_rtmp_packet_destroy(&rpkt); ff_rtmp_packet_destroy(&rpkt);
return 0; return 0;
} }
@@ -2234,7 +2235,7 @@ static int rtmp_close(URLContext *h)
if (!rt->is_input) { if (!rt->is_input) {
rt->flv_data = NULL; rt->flv_data = NULL;
if (rt->out_pkt.data_size) if (rt->out_pkt.size)
ff_rtmp_packet_destroy(&rt->out_pkt); ff_rtmp_packet_destroy(&rt->out_pkt);
if (rt->state > STATE_FCPUBLISH) if (rt->state > STATE_FCPUBLISH)
ret = gen_fcunpublish_stream(h, rt); ret = gen_fcunpublish_stream(h, rt);

View File

@@ -215,6 +215,8 @@ static int film_read_header(AVFormatContext *s)
film->sample_table[i].sample_offset = film->sample_table[i].sample_offset =
data_offset + AV_RB32(&scratch[0]); data_offset + AV_RB32(&scratch[0]);
film->sample_table[i].sample_size = AV_RB32(&scratch[4]); film->sample_table[i].sample_size = AV_RB32(&scratch[4]);
if (film->sample_table[i].sample_size > INT_MAX / 4)
return AVERROR_INVALIDDATA;
if (AV_RB32(&scratch[8]) == 0xFFFFFFFF) { if (AV_RB32(&scratch[8]) == 0xFFFFFFFF) {
film->sample_table[i].stream = film->audio_stream_index; film->sample_table[i].stream = film->audio_stream_index;
film->sample_table[i].pts = audio_frame_counter; film->sample_table[i].pts = audio_frame_counter;

View File

@@ -7,4 +7,3 @@
0, 9, 9, 1, 460800, 0x4e091ee2 0, 9, 9, 1, 460800, 0x4e091ee2
0, 10, 10, 1, 460800, 0x2ea88828 0, 10, 10, 1, 460800, 0x2ea88828
0, 11, 11, 1, 460800, 0x4b7f4df0 0, 11, 11, 1, 460800, 0x4b7f4df0
0, 12, 12, 1, 460800, 0xa57f20d0