Merge remote-tracking branch 'qatar/master'

* qatar/master: (38 commits)
  v210enc: remove redundant check for pix_fmt
  wavpack: allow user to disable CRC checking
  v210enc: Use Bytestream2 functions
  cafdec: Check return value of avio_seek and avoid modifying state if it fails
  yop: Check return value of avio_seek and avoid modifying state if it fails
  tta: Check return value of avio_seek and avoid modifying state if it fails
  tmv: Check return value of avio_seek and avoid modifying state if it fails
  r3d: Check return value of avio_seek and avoid modifying state if it fails
  nsvdec: Check return value of avio_seek and avoid modifying state if it fails
  mpc8: Check return value of avio_seek and avoid modifying state if it fails
  jvdec: Check return value of avio_seek and avoid modifying state if it fails
  filmstripdec: Check return value of avio_seek and avoid modifying state if it fails
  ffmdec: Check return value of avio_seek and avoid modifying state if it fails
  dv: Check return value of avio_seek and avoid modifying state if it fails
  bink: Check return value of avio_seek and avoid modifying state if it fails
  Check AVCodec.pix_fmts in avcodec_open2()
  svq3: Prevent illegal reads while parsing extradata.
  remove ParseContext1
  vc1: use ff_parse_close
  mpegvideo parser: move specific fields into private context
  ...

Conflicts:
	libavcodec/4xm.c
	libavcodec/aacdec.c
	libavcodec/h264.c
	libavcodec/h264.h
	libavcodec/h264_cabac.c
	libavcodec/h264_cavlc.c
	libavcodec/mpeg4video_parser.c
	libavcodec/svq3.c
	libavcodec/v210enc.c
	libavformat/cafdec.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer
2012-02-11 01:22:22 +01:00
30 changed files with 1177 additions and 925 deletions

View File

@@ -1093,6 +1093,8 @@ static int encode_audio_frame(AVFormatContext *s, OutputStream *ost,
if (got_packet) {
if (pkt.pts != AV_NOPTS_VALUE)
pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
if (pkt.dts != AV_NOPTS_VALUE)
pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
if (pkt.duration > 0)
pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);

View File

@@ -26,9 +26,9 @@
#include "libavutil/intreadwrite.h"
#include "avcodec.h"
#include "bytestream.h"
#include "dsputil.h"
#include "get_bits.h"
#include "bytestream.h"
//#undef NDEBUG
//#include <assert.h>
@@ -104,7 +104,8 @@ static const int8_t mv[256][2]={
{ -29, 18 }, { -16, -31 }, { -28, -22 }, { 21, -30 }, { -25, 28 }, { 26, -29 }, { 25, -32 }, { -32, -32 }
};
// this is simply the scaled down elementwise product of the standard jpeg quantizer table and the AAN premul table
/* This is simply the scaled down elementwise product of the standard JPEG
* quantizer table and the AAN premul table. */
static const uint8_t dequant_table[64] = {
16, 15, 13, 19, 24, 31, 28, 17,
17, 23, 25, 31, 36, 63, 45, 21,
@@ -152,7 +153,8 @@ typedef struct FourXContext{
#define MULTIPLY(var, const) (((var) * (const)) >> 16)
static void idct(DCTELEM block[64]){
static void idct(DCTELEM block[64])
{
int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
int tmp10, tmp11, tmp12, tmp13;
int z5, z10, z11, z12, z13;
@@ -236,7 +238,8 @@ static void idct(DCTELEM block[64]){
}
}
static av_cold void init_vlcs(FourXContext *f){
static av_cold void init_vlcs(FourXContext *f)
{
static VLC_TYPE table[8][32][2];
int i;
@@ -249,7 +252,8 @@ static av_cold void init_vlcs(FourXContext *f){
}
}
static void init_mv(FourXContext *f){
static void init_mv(FourXContext *f)
{
int i;
for (i = 0; i < 256; i++) {
@@ -277,7 +281,9 @@ static void init_mv(FourXContext *f){
}
#endif
static inline void mcdc(uint16_t *dst, uint16_t *src, int log2w, int h, int stride, int scale, unsigned dc){
static inline void mcdc(uint16_t *dst, uint16_t *src, int log2w,
int h, int stride, int scale, unsigned dc)
{
int i;
dc *= 0x10001;
@@ -285,14 +291,16 @@ static inline void mcdc(uint16_t *dst, uint16_t *src, int log2w, int h, int stri
case 0:
for (i = 0; i < h; i++) {
dst[0] = scale * src[0] + dc;
if(scale) src += stride;
if (scale)
src += stride;
dst += stride;
}
break;
case 1:
for (i = 0; i < h; i++) {
LE_CENTRIC_MUL(dst, src, scale, dc);
if(scale) src += stride;
if (scale)
src += stride;
dst += stride;
}
break;
@@ -300,7 +308,8 @@ static inline void mcdc(uint16_t *dst, uint16_t *src, int log2w, int h, int stri
for (i = 0; i < h; i++) {
LE_CENTRIC_MUL(dst, src, scale, dc);
LE_CENTRIC_MUL(dst + 2, src + 2, scale, dc);
if(scale) src += stride;
if (scale)
src += stride;
dst += stride;
}
break;
@@ -310,18 +319,24 @@ static inline void mcdc(uint16_t *dst, uint16_t *src, int log2w, int h, int stri
LE_CENTRIC_MUL(dst + 2, src + 2, scale, dc);
LE_CENTRIC_MUL(dst + 4, src + 4, scale, dc);
LE_CENTRIC_MUL(dst + 6, src + 6, scale, dc);
if(scale) src += stride;
if (scale)
src += stride;
dst += stride;
}
break;
default: assert(0);
default:
assert(0);
}
}
static void decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src, int log2w, int log2h, int stride){
static void decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src,
int log2w, int log2h, int stride)
{
const int index = size2index[log2h][log2w];
const int h = 1 << log2h;
int code= get_vlc2(&f->gb, block_type_vlc[1-(f->version>1)][index].table, BLOCK_TYPE_VLC_BITS, 1);
int code = get_vlc2(&f->gb,
block_type_vlc[1 - (f->version > 1)][index].table,
BLOCK_TYPE_VLC_BITS, 1);
uint16_t *start = (uint16_t *)f->last_picture.data[0];
uint16_t *end = start + stride * (f->avctx->height - h + 1) - (1 << log2w);
@@ -332,7 +347,7 @@ static void decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src, int lo
av_log(f->avctx, AV_LOG_ERROR, "bytestream overread\n");
return;
}
src += f->mv[ *f->g.buffer++ ];
src += f->mv[bytestream2_get_byte(&f->g)];
if (start > src || src > end) {
av_log(f->avctx, AV_LOG_ERROR, "mv out of pic\n");
return;
@@ -341,11 +356,13 @@ static void decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src, int lo
} else if (code == 1) {
log2h--;
decode_p_block(f, dst, src, log2w, log2h, stride);
decode_p_block(f, dst + (stride<<log2h), src + (stride<<log2h), log2w, log2h, stride);
decode_p_block(f, dst + (stride << log2h),
src + (stride << log2h), log2w, log2h, stride);
} else if (code == 2) {
log2w--;
decode_p_block(f, dst , src, log2w, log2h, stride);
decode_p_block(f, dst + (1<<log2w), src + (1<<log2w), log2w, log2h, stride);
decode_p_block(f, dst + (1 << log2w),
src + (1 << log2w), log2w, log2h, stride);
} else if (code == 3 && f->version < 2) {
mcdc(dst, src, log2w, h, stride, 1, 0);
} else if (code == 4) {
@@ -353,7 +370,7 @@ static void decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src, int lo
av_log(f->avctx, AV_LOG_ERROR, "bytestream overread\n");
return;
}
src += f->mv[ *f->g.buffer++ ];
src += f->mv[bytestream2_get_byte(&f->g)];
if (start > src || src > end) {
av_log(f->avctx, AV_LOG_ERROR, "mv out of pic\n");
return;
@@ -384,14 +401,16 @@ static void decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src, int lo
}
}
static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length){
static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length)
{
int x, y;
const int width = f->avctx->width;
const int height = f->avctx->height;
uint16_t *src = (uint16_t *)f->last_picture.data[0];
uint16_t *dst = (uint16_t *)f->current_picture.data[0];
const int stride = f->current_picture.linesize[0] >> 1;
unsigned int bitstream_size, bytestream_size, wordstream_size, extra, bytestream_offset, wordstream_offset;
unsigned int bitstream_size, bytestream_size, wordstream_size, extra,
bytestream_offset, wordstream_offset;
if (f->version > 1) {
extra = 20;
@@ -416,24 +435,28 @@ static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length){
return -1;
}
av_fast_malloc(&f->bitstream_buffer, &f->bitstream_buffer_size, bitstream_size + FF_INPUT_BUFFER_PADDING_SIZE);
av_fast_malloc(&f->bitstream_buffer, &f->bitstream_buffer_size,
bitstream_size + FF_INPUT_BUFFER_PADDING_SIZE);
if (!f->bitstream_buffer)
return AVERROR(ENOMEM);
f->dsp.bswap_buf(f->bitstream_buffer, (const uint32_t*)(buf + extra), bitstream_size/4);
memset((uint8_t*)f->bitstream_buffer + bitstream_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
f->dsp.bswap_buf(f->bitstream_buffer, (const uint32_t*)(buf + extra),
bitstream_size / 4);
memset((uint8_t*)f->bitstream_buffer + bitstream_size,
0, FF_INPUT_BUFFER_PADDING_SIZE);
init_get_bits(&f->gb, f->bitstream_buffer, 8 * bitstream_size);
wordstream_offset = extra + bitstream_size;
bytestream_offset = extra + bitstream_size + wordstream_size;
bytestream2_init(&f->g2, buf + wordstream_offset, length - wordstream_offset);
bytestream2_init(&f->g, buf + bytestream_offset, length - bytestream_offset);
bytestream2_init(&f->g2, buf + wordstream_offset,
length - wordstream_offset);
bytestream2_init(&f->g, buf + bytestream_offset,
length - bytestream_offset);
init_mv(f);
for (y = 0; y < height; y += 8) {
for(x=0; x<width; x+=8){
for (x = 0; x < width; x += 8)
decode_p_block(f, dst + x, src + x, 3, 3, stride);
}
src += 8 * stride;
dst += 8 * stride;
}
@@ -445,7 +468,8 @@ static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length){
* decode block and dequantize.
* Note this is almost identical to MJPEG.
*/
static int decode_i_block(FourXContext *f, DCTELEM *block){
static int decode_i_block(FourXContext *f, DCTELEM *block)
{
int code, i, j, level, val;
if (get_bits_left(&f->gb) < 2){
@@ -455,16 +479,14 @@ static int decode_i_block(FourXContext *f, DCTELEM *block){
/* DC coef */
val = get_vlc2(&f->pre_gb, f->pre_vlc.table, ACDC_VLC_BITS, 3);
if (val>>4){
if (val >> 4)
av_log(f->avctx, AV_LOG_ERROR, "error dc run != 0\n");
}
if (val)
val = get_xbits(&f->gb, val);
val = val * dequant_table[0] + f->last_dc;
f->last_dc =
block[0] = val;
f->last_dc = block[0] = val;
/* AC coefs */
i = 1;
for (;;) {
@@ -494,7 +516,8 @@ static int decode_i_block(FourXContext *f, DCTELEM *block){
return 0;
}
static inline void idct_put(FourXContext *f, int x, int y){
static inline void idct_put(FourXContext *f, int x, int y)
{
DCTELEM (*block)[64] = f->block;
int stride = f->current_picture.linesize[0] >> 1;
int i;
@@ -506,17 +529,18 @@ static inline void idct_put(FourXContext *f, int x, int y){
}
if (!(f->avctx->flags & CODEC_FLAG_GRAY)) {
for(i=4; i<6; i++) idct(block[i]);
for (i = 4; i < 6; i++)
idct(block[i]);
}
/* Note transform is:
y= ( 1b + 4g + 2r)/14
cb=( 3b - 2g - 1r)/14
cr=(-1b - 4g + 5r)/14
*/
* y = ( 1b + 4g + 2r) / 14
* cb = ( 3b - 2g - 1r) / 14
* cr = (-1b - 4g + 5r) / 14 */
for (y = 0; y < 8; y++) {
for (x = 0; x < 8; x++) {
DCTELEM *temp= block[(x>>2) + 2*(y>>2)] + 2*(x&3) + 2*8*(y&3); //FIXME optimize
DCTELEM *temp = block[(x >> 2) + 2 * (y >> 2)] +
2 * (x & 3) + 2 * 8 * (y & 3); // FIXME optimize
int cb = block[4][x + 8 * y];
int cr = block[5][x + 8 * y];
int cg = (cb + cr) >> 1;
@@ -538,20 +562,22 @@ cr=(-1b - 4g + 5r)/14
}
}
static int decode_i_mb(FourXContext *f){
static int decode_i_mb(FourXContext *f)
{
int i;
f->dsp.clear_blocks(f->block[0]);
for(i=0; i<6; i++){
for (i = 0; i < 6; i++)
if (decode_i_block(f, f->block[i]) < 0)
return -1;
}
return 0;
}
static const uint8_t *read_huffman_tables(FourXContext *f, const uint8_t * const buf, int buf_size){
static const uint8_t *read_huffman_tables(FourXContext *f,
const uint8_t * const buf, int buf_size)
{
int frequency[512];
uint8_t flag[512];
int up[512];
@@ -572,34 +598,40 @@ static const uint8_t *read_huffman_tables(FourXContext *f, const uint8_t * const
if (start <= end && ptr_end - ptr < end - start + 1 + 1)
return NULL;
for(i=start; i<=end; i++){
for (i = start; i <= end; i++)
frequency[i] = *ptr++;
}
start = *ptr++;
if(start==0) break;
if (start == 0)
break;
end = *ptr++;
}
frequency[256] = 1;
while((ptr - buf)&3) ptr++; // 4byte align
while ((ptr - buf) & 3)
ptr++; // 4byte align
for (j = 257; j < 512; j++) {
int min_freq[2] = { 256 * 256, 256 * 256 };
int smallest[2] = { 0, 0 };
int i;
for (i = 0; i < j; i++) {
if(frequency[i] == 0) continue;
if (frequency[i] == 0)
continue;
if (frequency[i] < min_freq[1]) {
if (frequency[i] < min_freq[0]) {
min_freq[1]= min_freq[0]; smallest[1]= smallest[0];
min_freq[0]= frequency[i];smallest[0]= i;
min_freq[1] = min_freq[0];
smallest[1] = smallest[0];
min_freq[0] = frequency[i];
smallest[0] = i;
} else {
min_freq[1]= frequency[i];smallest[1]= i;
min_freq[1] = frequency[i];
smallest[1] = i;
}
}
}
if(min_freq[1] == 256*256) break;
if (min_freq[1] == 256 * 256)
break;
frequency[j] = min_freq[0] + min_freq[1];
flag[smallest[0]] = 0;
@@ -610,36 +642,38 @@ static const uint8_t *read_huffman_tables(FourXContext *f, const uint8_t * const
}
for (j = 0; j < 257; j++) {
int node;
int len=0;
int bits=0;
int node, len = 0, bits = 0;
for (node = j; up[node] != -1; node = up[node]) {
bits += flag[node] << len;
len++;
if(len > 31) av_log(f->avctx, AV_LOG_ERROR, "vlc length overflow\n"); //can this happen at all ?
if (len > 31)
// can this happen at all ?
av_log(f->avctx, AV_LOG_ERROR,
"vlc length overflow\n");
}
bits_tab[j] = bits;
len_tab[j] = len;
}
if (init_vlc(&f->pre_vlc, ACDC_VLC_BITS, 257,
len_tab , 1, 1,
if (init_vlc(&f->pre_vlc, ACDC_VLC_BITS, 257, len_tab, 1, 1,
bits_tab, 4, 4, 0))
return NULL;
return ptr;
}
static int mix(int c0, int c1){
static int mix(int c0, int c1)
{
int blue = 2 * (c0 & 0x001F) + (c1 & 0x001F);
int green = (2 * (c0 & 0x03E0) + (c1 & 0x03E0)) >> 5;
int red = 2 * (c0 >> 10) + (c1 >> 10);
return red / 3 * 1024 + green / 3 * 32 + blue / 3;
}
static int decode_i2_frame(FourXContext *f, const uint8_t *buf, int length){
static int decode_i2_frame(FourXContext *f, const uint8_t *buf, int length)
{
int x, y, x2, y2;
const int width = f->avctx->width;
const int height = f->avctx->height;
@@ -665,8 +699,10 @@ static int decode_i2_frame(FourXContext *f, const uint8_t *buf, int length){
color[0] = bytestream2_get_le16u(&g3);
color[1] = bytestream2_get_le16u(&g3);
if(color[0]&0x8000) av_log(NULL, AV_LOG_ERROR, "unk bit 1\n");
if(color[1]&0x8000) av_log(NULL, AV_LOG_ERROR, "unk bit 2\n");
if (color[0] & 0x8000)
av_log(NULL, AV_LOG_ERROR, "unk bit 1\n");
if (color[1] & 0x8000)
av_log(NULL, AV_LOG_ERROR, "unk bit 2\n");
color[2] = mix(color[0], color[1]);
color[3] = mix(color[1], color[0]);
@@ -686,7 +722,8 @@ static int decode_i2_frame(FourXContext *f, const uint8_t *buf, int length){
return 0;
}
static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length){
static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length)
{
int x, y;
const int width = f->avctx->width;
const int height = f->avctx->height;
@@ -702,9 +739,11 @@ static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length){
prestream_size = 4 * AV_RL32(buf + bitstream_size + 4);
prestream = buf + bitstream_size + 12;
if (prestream_size > (1<<26) ||
prestream_size != length - (bitstream_size + 12)){
av_log(f->avctx, AV_LOG_ERROR, "size mismatch %d %d %d\n", prestream_size, bitstream_size, length);
if (prestream_size + bitstream_size + 12 != length
|| bitstream_size > (1 << 26)
|| prestream_size > (1 << 26)) {
av_log(f->avctx, AV_LOG_ERROR, "size mismatch %d %d %d\n",
prestream_size, bitstream_size, length);
return -1;
}
@@ -716,11 +755,14 @@ static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length){
prestream_size = length + buf - prestream;
av_fast_malloc(&f->bitstream_buffer, &f->bitstream_buffer_size, prestream_size + FF_INPUT_BUFFER_PADDING_SIZE);
av_fast_malloc(&f->bitstream_buffer, &f->bitstream_buffer_size,
prestream_size + FF_INPUT_BUFFER_PADDING_SIZE);
if (!f->bitstream_buffer)
return AVERROR(ENOMEM);
f->dsp.bswap_buf(f->bitstream_buffer, (const uint32_t*)prestream, prestream_size/4);
memset((uint8_t*)f->bitstream_buffer + prestream_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
f->dsp.bswap_buf(f->bitstream_buffer, (const uint32_t*)prestream,
prestream_size / 4);
memset((uint8_t*)f->bitstream_buffer + prestream_size,
0, FF_INPUT_BUFFER_PADDING_SIZE);
init_get_bits(&f->pre_gb, f->bitstream_buffer, 8 * prestream_size);
f->last_dc = 0 * 128 * 8 * 8;
@@ -740,9 +782,8 @@ static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length){
return 0;
}
static int decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
AVPacket *avpkt)
static int decode_frame(AVCodecContext *avctx, void *data,
int *data_size, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
@@ -754,9 +795,9 @@ static int decode_frame(AVCodecContext *avctx,
if (buf_size < 12)
return AVERROR_INVALIDDATA;
frame_4cc = AV_RL32(buf);
if(buf_size != AV_RL32(buf+4)+8 || buf_size < 20){
av_log(f->avctx, AV_LOG_ERROR, "size mismatch %d %d\n", buf_size, AV_RL32(buf+4));
}
if (buf_size != AV_RL32(buf + 4) + 8 || buf_size < 20)
av_log(f->avctx, AV_LOG_ERROR, "size mismatch %d %d\n",
buf_size, AV_RL32(buf + 4));
if (frame_4cc == AV_RL32("cfrm")) {
int free_index = -1;
@@ -770,14 +811,16 @@ static int decode_frame(AVCodecContext *avctx,
return AVERROR_INVALIDDATA;
}
for(i=0; i<CFRAME_BUFFER_COUNT; i++){
for (i = 0; i < CFRAME_BUFFER_COUNT; i++)
if (f->cfrm[i].id && f->cfrm[i].id < avctx->frame_number)
av_log(f->avctx, AV_LOG_ERROR, "lost c frame %d\n", f->cfrm[i].id);
}
av_log(f->avctx, AV_LOG_ERROR, "lost c frame %d\n",
f->cfrm[i].id);
for (i = 0; i < CFRAME_BUFFER_COUNT; i++) {
if(f->cfrm[i].id == id) break;
if(f->cfrm[i].size == 0 ) free_index= i;
if (f->cfrm[i].id == id)
break;
if (f->cfrm[i].size == 0)
free_index = i;
}
if (i >= CFRAME_BUFFER_COUNT) {
@@ -788,8 +831,11 @@ static int decode_frame(AVCodecContext *avctx,
if (data_size > UINT_MAX - cfrm->size - FF_INPUT_BUFFER_PADDING_SIZE)
return AVERROR_INVALIDDATA;
cfrm->data= av_fast_realloc(cfrm->data, &cfrm->allocated_size, cfrm->size + data_size + FF_INPUT_BUFFER_PADDING_SIZE);
if(!cfrm->data){ //explicit check needed as memcpy below might not catch a NULL
cfrm->data = av_fast_realloc(cfrm->data, &cfrm->allocated_size,
cfrm->size + data_size + FF_INPUT_BUFFER_PADDING_SIZE);
// explicit check needed as memcpy below might not catch a NULL
if (!cfrm->data) {
av_log(f->avctx, AV_LOG_ERROR, "realloc falure");
return -1;
}
@@ -801,9 +847,9 @@ static int decode_frame(AVCodecContext *avctx,
buf = cfrm->data;
frame_size = cfrm->size;
if(id != avctx->frame_number){
av_log(f->avctx, AV_LOG_ERROR, "cframe id mismatch %d %d\n", id, avctx->frame_number);
}
if (id != avctx->frame_number)
av_log(f->avctx, AV_LOG_ERROR, "cframe id mismatch %d %d\n",
id, avctx->frame_number);
cfrm->size = cfrm->id = 0;
frame_4cc = AV_RL32("pfrm");
@@ -821,7 +867,8 @@ static int decode_frame(AVCodecContext *avctx,
p = &f->current_picture;
avctx->coded_frame = p;
avctx->flags |= CODEC_FLAG_EMU_EDGE; // alternatively we would have to use our own buffer management
// alternatively we would have to use our own buffer management
avctx->flags |= CODEC_FLAG_EMU_EDGE;
p->reference= 3;
if (avctx->reget_buffer(avctx, p) < 0) {
@@ -856,9 +903,11 @@ static int decode_frame(AVCodecContext *avctx,
return -1;
}
} else if (frame_4cc == AV_RL32("snd_")) {
av_log(avctx, AV_LOG_ERROR, "ignoring snd_ chunk length:%d\n", buf_size);
av_log(avctx, AV_LOG_ERROR, "ignoring snd_ chunk length:%d\n",
buf_size);
} else {
av_log(avctx, AV_LOG_ERROR, "ignoring unknown chunk length:%d\n", buf_size);
av_log(avctx, AV_LOG_ERROR, "ignoring unknown chunk length:%d\n",
buf_size);
}
p->key_frame = p->pict_type == AV_PICTURE_TYPE_I;
@@ -872,7 +921,8 @@ static int decode_frame(AVCodecContext *avctx,
}
static av_cold void common_init(AVCodecContext *avctx){
static av_cold void common_init(AVCodecContext *avctx)
{
FourXContext * const f = avctx->priv_data;
dsputil_init(&f->dsp, avctx);
@@ -880,7 +930,8 @@ static av_cold void common_init(AVCodecContext *avctx){
f->avctx = avctx;
}
static av_cold int decode_init(AVCodecContext *avctx){
static av_cold int decode_init(AVCodecContext *avctx)
{
FourXContext * const f = avctx->priv_data;
if (avctx->extradata_size != 4 || !avctx->extradata) {
@@ -898,14 +949,17 @@ static av_cold int decode_init(AVCodecContext *avctx){
common_init(avctx);
init_vlcs(f);
if(f->version>2) avctx->pix_fmt= PIX_FMT_RGB565;
else avctx->pix_fmt= PIX_FMT_BGR555;
if (f->version > 2)
avctx->pix_fmt = PIX_FMT_RGB565;
else
avctx->pix_fmt = PIX_FMT_BGR555;
return 0;
}
static av_cold int decode_end(AVCodecContext *avctx){
static av_cold int decode_end(AVCodecContext *avctx)
{
FourXContext * const f = avctx->priv_data;
int i;

View File

@@ -263,9 +263,8 @@ typedef struct {
* @name Channel element related data
* @{
*/
enum ChannelPosition che_pos[4][MAX_ELEM_ID]; /**< channel element channel mapping with the
* first index as the first 4 raw data block types
*/
uint8_t layout_map[MAX_ELEM_ID*4][3];
int layout_map_tags;
ChannelElement *che[4][MAX_ELEM_ID];
ChannelElement *tag_che_map[4][MAX_ELEM_ID];
int tags_mapped;

View File

@@ -163,15 +163,14 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
}
}
static int count_channels(enum ChannelPosition che_pos[4][MAX_ELEM_ID])
static int count_channels(uint8_t (*layout)[3], int tags)
{
int i, type, sum = 0;
for (i = 0; i < MAX_ELEM_ID; i++) {
for (type = 0; type < 4; type++) {
sum += (1 + (type == TYPE_CPE)) *
(che_pos[type][i] != AAC_CHANNEL_OFF &&
che_pos[type][i] != AAC_CHANNEL_CC);
}
int i, sum = 0;
for (i = 0; i < tags; i++) {
int syn_ele = layout[i][0];
int pos = layout[i][2];
sum += (1 + (syn_ele == TYPE_CPE)) *
(pos != AAC_CHANNEL_OFF && pos != AAC_CHANNEL_CC);
}
return sum;
}
@@ -213,56 +212,228 @@ static av_cold int che_configure(AACContext *ac,
return 0;
}
struct elem_to_channel {
uint64_t av_position;
uint8_t syn_ele;
uint8_t elem_id;
uint8_t aac_position;
};
static int assign_pair(struct elem_to_channel e2c_vec[MAX_ELEM_ID],
uint8_t (*layout_map)[3], int offset, int tags, uint64_t left,
uint64_t right, int pos)
{
if (layout_map[offset][0] == TYPE_CPE) {
e2c_vec[offset] = (struct elem_to_channel) {
.av_position = left | right, .syn_ele = TYPE_CPE,
.elem_id = layout_map[offset ][1], .aac_position = pos };
return 1;
} else {
e2c_vec[offset] = (struct elem_to_channel) {
.av_position = left, .syn_ele = TYPE_SCE,
.elem_id = layout_map[offset ][1], .aac_position = pos };
e2c_vec[offset + 1] = (struct elem_to_channel) {
.av_position = right, .syn_ele = TYPE_SCE,
.elem_id = layout_map[offset + 1][1], .aac_position = pos };
return 2;
}
}
static int count_paired_channels(uint8_t (*layout_map)[3], int tags, int pos, int *current) {
int num_pos_channels = 0;
int first_cpe = 0;
int sce_parity = 0;
int i;
for (i = *current; i < tags; i++) {
if (layout_map[i][2] != pos)
break;
if (layout_map[i][0] == TYPE_CPE) {
if (sce_parity) {
if (pos == AAC_CHANNEL_FRONT || !first_cpe) {
sce_parity = 0;
} else {
return -1;
}
}
num_pos_channels += 2;
first_cpe = 1;
} else {
num_pos_channels++;
sce_parity ^= 1;
}
}
if (sce_parity &&
((pos == AAC_CHANNEL_FRONT && first_cpe) || pos == AAC_CHANNEL_SIDE))
return -1;
*current = i;
return num_pos_channels;
}
static uint64_t sniff_channel_order(uint8_t (*layout_map)[3], int tags)
{
int i, n, total_non_cc_elements;
struct elem_to_channel e2c_vec[MAX_ELEM_ID] = {{ 0 }};
int num_front_channels, num_side_channels, num_back_channels;
uint64_t layout;
i = 0;
num_front_channels =
count_paired_channels(layout_map, tags, AAC_CHANNEL_FRONT, &i);
if (num_front_channels < 0)
return 0;
num_side_channels =
count_paired_channels(layout_map, tags, AAC_CHANNEL_SIDE, &i);
if (num_side_channels < 0)
return 0;
num_back_channels =
count_paired_channels(layout_map, tags, AAC_CHANNEL_BACK, &i);
if (num_back_channels < 0)
return 0;
i = 0;
if (num_front_channels & 1) {
e2c_vec[i] = (struct elem_to_channel) {
.av_position = AV_CH_FRONT_CENTER, .syn_ele = TYPE_SCE,
.elem_id = layout_map[i][1], .aac_position = AAC_CHANNEL_FRONT };
i++;
num_front_channels--;
}
if (num_front_channels >= 4) {
i += assign_pair(e2c_vec, layout_map, i, tags,
AV_CH_FRONT_LEFT_OF_CENTER,
AV_CH_FRONT_RIGHT_OF_CENTER,
AAC_CHANNEL_FRONT);
num_front_channels -= 2;
}
if (num_front_channels >= 2) {
i += assign_pair(e2c_vec, layout_map, i, tags,
AV_CH_FRONT_LEFT,
AV_CH_FRONT_RIGHT,
AAC_CHANNEL_FRONT);
num_front_channels -= 2;
}
while (num_front_channels >= 2) {
i += assign_pair(e2c_vec, layout_map, i, tags,
UINT64_MAX,
UINT64_MAX,
AAC_CHANNEL_FRONT);
num_front_channels -= 2;
}
if (num_side_channels >= 2) {
i += assign_pair(e2c_vec, layout_map, i, tags,
AV_CH_SIDE_LEFT,
AV_CH_SIDE_RIGHT,
AAC_CHANNEL_FRONT);
num_side_channels -= 2;
}
while (num_side_channels >= 2) {
i += assign_pair(e2c_vec, layout_map, i, tags,
UINT64_MAX,
UINT64_MAX,
AAC_CHANNEL_SIDE);
num_side_channels -= 2;
}
while (num_back_channels >= 4) {
i += assign_pair(e2c_vec, layout_map, i, tags,
UINT64_MAX,
UINT64_MAX,
AAC_CHANNEL_BACK);
num_back_channels -= 2;
}
if (num_back_channels >= 2) {
i += assign_pair(e2c_vec, layout_map, i, tags,
AV_CH_BACK_LEFT,
AV_CH_BACK_RIGHT,
AAC_CHANNEL_BACK);
num_back_channels -= 2;
}
if (num_back_channels) {
e2c_vec[i] = (struct elem_to_channel) {
.av_position = AV_CH_BACK_CENTER, .syn_ele = TYPE_SCE,
.elem_id = layout_map[i][1], .aac_position = AAC_CHANNEL_BACK };
i++;
num_back_channels--;
}
if (i < tags && layout_map[i][2] == AAC_CHANNEL_LFE) {
e2c_vec[i] = (struct elem_to_channel) {
.av_position = AV_CH_LOW_FREQUENCY, .syn_ele = TYPE_LFE,
.elem_id = layout_map[i][1], .aac_position = AAC_CHANNEL_LFE };
i++;
}
while (i < tags && layout_map[i][2] == AAC_CHANNEL_LFE) {
e2c_vec[i] = (struct elem_to_channel) {
.av_position = UINT64_MAX, .syn_ele = TYPE_LFE,
.elem_id = layout_map[i][1], .aac_position = AAC_CHANNEL_LFE };
i++;
}
// Must choose a stable sort
total_non_cc_elements = n = i;
do {
int next_n = 0;
for (i = 1; i < n; i++) {
if (e2c_vec[i-1].av_position > e2c_vec[i].av_position) {
FFSWAP(struct elem_to_channel, e2c_vec[i-1], e2c_vec[i]);
next_n = i;
}
}
n = next_n;
} while (n > 0);
layout = 0;
for (i = 0; i < total_non_cc_elements; i++) {
layout_map[i][0] = e2c_vec[i].syn_ele;
layout_map[i][1] = e2c_vec[i].elem_id;
layout_map[i][2] = e2c_vec[i].aac_position;
if (e2c_vec[i].av_position != UINT64_MAX) {
layout |= e2c_vec[i].av_position;
}
}
return layout;
}
/**
* Configure output channel order based on the current program configuration element.
*
* @param che_pos current channel position configuration
* @param new_che_pos New channel position configuration - we only do something if it differs from the current one.
*
* @return Returns error status. 0 - OK, !0 - error
*/
static av_cold int output_configure(AACContext *ac,
enum ChannelPosition new_che_pos[4][MAX_ELEM_ID],
uint8_t layout_map[MAX_ELEM_ID*4][3], int tags,
int channel_config, enum OCStatus oc_type)
{
AVCodecContext *avctx = ac->avctx;
int i, type, channels = 0, ret;
int i, channels = 0, ret;
uint64_t layout = 0;
if (new_che_pos)
memcpy(ac->che_pos, new_che_pos, 4 * MAX_ELEM_ID * sizeof(new_che_pos[0][0]));
if (channel_config) {
for (i = 0; i < tags_per_config[channel_config]; i++) {
int id = aac_channel_layout_map[channel_config - 1][i][1];
type = aac_channel_layout_map[channel_config - 1][i][0];
if ((ret = che_configure(ac, ac->che_pos[type][id],
type, id, &channels)))
return ret;
if (ac->layout_map != layout_map) {
memcpy(ac->layout_map, layout_map, tags * sizeof(layout_map[0]));
ac->layout_map_tags = tags;
}
memset(ac->tag_che_map, 0, 4 * MAX_ELEM_ID * sizeof(ac->che[0][0]));
avctx->channel_layout = aac_channel_layout[channel_config - 1];
} else {
/* Allocate or free elements depending on if they are in the
* current program configuration.
*
* Set up default 1:1 output mapping.
*/
for (i = 0; i < MAX_ELEM_ID; i++) {
for (type = 0; type < 4; type++) {
if ((ret = che_configure(ac, ac->che_pos[type][i],
type, i, &channels)))
// Try to sniff a reasonable channel order, otherwise output the
// channels in the order the PCE declared them.
if (avctx->request_channel_layout != AV_CH_LAYOUT_NATIVE)
layout = sniff_channel_order(layout_map, tags);
for (i = 0; i < tags; i++) {
int type = layout_map[i][0];
int id = layout_map[i][1];
int position = layout_map[i][2];
// Allocate or free elements depending on if they are in the
// current program configuration.
ret = che_configure(ac, position, type, id, &channels);
if (ret < 0)
return ret;
}
}
memcpy(ac->tag_che_map, ac->che, 4 * MAX_ELEM_ID * sizeof(ac->che[0][0]));
}
if (layout) avctx->channel_layout = layout;
avctx->channels = channels;
ac->output_configured = oc_type;
return 0;
@@ -292,30 +463,45 @@ static void flush(AVCodecContext *avctx)
* @param sce_map mono (Single Channel Element) map
* @param type speaker type/position for these channels
*/
static void decode_channel_map(enum ChannelPosition *cpe_map,
enum ChannelPosition *sce_map,
static void decode_channel_map(uint8_t layout_map[][3],
enum ChannelPosition type,
GetBitContext *gb, int n)
{
while (n--) {
enum ChannelPosition *map = cpe_map && get_bits1(gb) ? cpe_map : sce_map; // stereo or mono map
map[get_bits(gb, 4)] = type;
enum RawDataBlockType syn_ele;
switch (type) {
case AAC_CHANNEL_FRONT:
case AAC_CHANNEL_BACK:
case AAC_CHANNEL_SIDE:
syn_ele = get_bits1(gb);
break;
case AAC_CHANNEL_CC:
skip_bits1(gb);
syn_ele = TYPE_CCE;
break;
case AAC_CHANNEL_LFE:
syn_ele = TYPE_LFE;
break;
}
layout_map[0][0] = syn_ele;
layout_map[0][1] = get_bits(gb, 4);
layout_map[0][2] = type;
layout_map++;
}
}
/**
* Decode program configuration element; reference: table 4.2.
*
* @param new_che_pos New channel position configuration - we only do something if it differs from the current one.
*
* @return Returns error status. 0 - OK, !0 - error
*/
static int decode_pce(AVCodecContext *avctx, MPEG4AudioConfig *m4ac,
enum ChannelPosition new_che_pos[4][MAX_ELEM_ID],
uint8_t (*layout_map)[3],
GetBitContext *gb)
{
int num_front, num_side, num_back, num_lfe, num_assoc_data, num_cc, sampling_index;
int comment_len;
int tags;
skip_bits(gb, 2); // object_type
@@ -342,14 +528,19 @@ static int decode_pce(AVCodecContext *avctx, MPEG4AudioConfig *m4ac,
av_log(avctx, AV_LOG_ERROR, overread_err);
return -1;
}
decode_channel_map(new_che_pos[TYPE_CPE], new_che_pos[TYPE_SCE], AAC_CHANNEL_FRONT, gb, num_front);
decode_channel_map(new_che_pos[TYPE_CPE], new_che_pos[TYPE_SCE], AAC_CHANNEL_SIDE, gb, num_side );
decode_channel_map(new_che_pos[TYPE_CPE], new_che_pos[TYPE_SCE], AAC_CHANNEL_BACK, gb, num_back );
decode_channel_map(NULL, new_che_pos[TYPE_LFE], AAC_CHANNEL_LFE, gb, num_lfe );
decode_channel_map(layout_map , AAC_CHANNEL_FRONT, gb, num_front);
tags = num_front;
decode_channel_map(layout_map + tags, AAC_CHANNEL_SIDE, gb, num_side);
tags += num_side;
decode_channel_map(layout_map + tags, AAC_CHANNEL_BACK, gb, num_back);
tags += num_back;
decode_channel_map(layout_map + tags, AAC_CHANNEL_LFE, gb, num_lfe);
tags += num_lfe;
skip_bits_long(gb, 4 * num_assoc_data);
decode_channel_map(new_che_pos[TYPE_CCE], new_che_pos[TYPE_CCE], AAC_CHANNEL_CC, gb, num_cc );
decode_channel_map(layout_map + tags, AAC_CHANNEL_CC, gb, num_cc);
tags += num_cc;
align_get_bits(gb);
@@ -360,19 +551,18 @@ static int decode_pce(AVCodecContext *avctx, MPEG4AudioConfig *m4ac,
return -1;
}
skip_bits_long(gb, comment_len);
return 0;
return tags;
}
/**
* Set up channel positions based on a default channel configuration
* as specified in table 1.17.
*
* @param new_che_pos New channel position configuration - we only do something if it differs from the current one.
*
* @return Returns error status. 0 - OK, !0 - error
*/
static av_cold int set_default_channel_config(AVCodecContext *avctx,
enum ChannelPosition new_che_pos[4][MAX_ELEM_ID],
uint8_t (*layout_map)[3],
int *tags,
int channel_config)
{
if (channel_config < 1 || channel_config > 7) {
@@ -380,32 +570,8 @@ static av_cold int set_default_channel_config(AVCodecContext *avctx,
channel_config);
return -1;
}
/* default channel configurations:
*
* 1ch : front center (mono)
* 2ch : L + R (stereo)
* 3ch : front center + L + R
* 4ch : front center + L + R + back center
* 5ch : front center + L + R + back stereo
* 6ch : front center + L + R + back stereo + LFE
* 7ch : front center + L + R + outer front left + outer front right + back stereo + LFE
*/
if (channel_config != 2)
new_che_pos[TYPE_SCE][0] = AAC_CHANNEL_FRONT; // front center (or mono)
if (channel_config > 1)
new_che_pos[TYPE_CPE][0] = AAC_CHANNEL_FRONT; // L + R (or stereo)
if (channel_config == 4)
new_che_pos[TYPE_SCE][1] = AAC_CHANNEL_BACK; // back center
if (channel_config > 4)
new_che_pos[TYPE_CPE][(channel_config == 7) + 1]
= AAC_CHANNEL_BACK; // back stereo
if (channel_config > 5)
new_che_pos[TYPE_LFE][0] = AAC_CHANNEL_LFE; // LFE
if (channel_config == 7)
new_che_pos[TYPE_CPE][1] = AAC_CHANNEL_FRONT; // outer front left + outer front right
*tags = tags_per_config[channel_config];
memcpy(layout_map, aac_channel_layout_map[channel_config-1], *tags * sizeof(*layout_map));
return 0;
}
@@ -422,8 +588,9 @@ static int decode_ga_specific_config(AACContext *ac, AVCodecContext *avctx,
MPEG4AudioConfig *m4ac,
int channel_config)
{
enum ChannelPosition new_che_pos[4][MAX_ELEM_ID];
int extension_flag, ret;
uint8_t layout_map[MAX_ELEM_ID*4][3];
int tags = 0;
if (get_bits1(gb)) { // frameLengthFlag
av_log_missing_feature(avctx, "960/120 MDCT window is", 1);
@@ -438,22 +605,23 @@ static int decode_ga_specific_config(AACContext *ac, AVCodecContext *avctx,
m4ac->object_type == AOT_ER_AAC_SCALABLE)
skip_bits(gb, 3); // layerNr
memset(new_che_pos, 0, 4 * MAX_ELEM_ID * sizeof(new_che_pos[0][0]));
if (channel_config == 0) {
skip_bits(gb, 4); // element_instance_tag
if ((ret = decode_pce(avctx, m4ac, new_che_pos, gb)))
return ret;
tags = decode_pce(avctx, m4ac, layout_map, gb);
if (tags < 0)
return tags;
} else {
if ((ret = set_default_channel_config(avctx, new_che_pos, channel_config)))
if ((ret = set_default_channel_config(avctx, layout_map, &tags, channel_config)))
return ret;
}
if (count_channels(new_che_pos) > 1) {
if (count_channels(layout_map, tags) > 1) {
m4ac->ps = 0;
} else if (m4ac->sbr == 1 && m4ac->ps == -1)
m4ac->ps = 1;
if (ac && (ret = output_configure(ac, new_che_pos, channel_config, OC_GLOBAL_HDR)))
if (ac && (ret = output_configure(ac, layout_map, tags,
channel_config, OC_GLOBAL_HDR)))
return ret;
if (extension_flag) {
@@ -607,7 +775,8 @@ static av_cold int aac_decode_init(AVCodecContext *avctx)
return -1;
} else {
int sr, i;
enum ChannelPosition new_che_pos[4][MAX_ELEM_ID];
uint8_t layout_map[MAX_ELEM_ID*4][3];
int layout_map_tags;
sr = sample_rate_idx(avctx->sample_rate);
ac->m4ac.sampling_index = sr;
@@ -624,9 +793,11 @@ static av_cold int aac_decode_init(AVCodecContext *avctx)
ac->m4ac.chan_config = i;
if (ac->m4ac.chan_config) {
int ret = set_default_channel_config(avctx, new_che_pos, ac->m4ac.chan_config);
int ret = set_default_channel_config(avctx, layout_map,
&layout_map_tags, ac->m4ac.chan_config);
if (!ret)
output_configure(ac, new_che_pos, ac->m4ac.chan_config, OC_GLOBAL_HDR);
output_configure(ac, layout_map, layout_map_tags,
ac->m4ac.chan_config, OC_GLOBAL_HDR);
else if (avctx->err_recognition & AV_EF_EXPLODE)
return AVERROR_INVALIDDATA;
}
@@ -1730,7 +1901,8 @@ static int decode_extension_payload(AACContext *ac, GetBitContext *gb, int cnt,
} else if (ac->m4ac.ps == -1 && ac->output_configured < OC_LOCKED && ac->avctx->channels == 1) {
ac->m4ac.sbr = 1;
ac->m4ac.ps = 1;
output_configure(ac, NULL, ac->m4ac.chan_config, ac->output_configured);
output_configure(ac, ac->layout_map, ac->layout_map_tags,
ac->m4ac.chan_config, ac->output_configured);
} else {
ac->m4ac.sbr = 1;
}
@@ -2104,16 +2276,18 @@ static int parse_adts_frame_header(AACContext *ac, GetBitContext *gb)
{
int size;
AACADTSHeaderInfo hdr_info;
uint8_t layout_map[MAX_ELEM_ID*4][3];
int layout_map_tags;
size = avpriv_aac_parse_header(gb, &hdr_info);
if (size > 0) {
if (hdr_info.chan_config) {
enum ChannelPosition new_che_pos[4][MAX_ELEM_ID];
memset(new_che_pos, 0, 4 * MAX_ELEM_ID * sizeof(new_che_pos[0][0]));
ac->m4ac.chan_config = hdr_info.chan_config;
if (set_default_channel_config(ac->avctx, new_che_pos, hdr_info.chan_config))
if (set_default_channel_config(ac->avctx, layout_map,
&layout_map_tags, hdr_info.chan_config))
return -7;
if (output_configure(ac, new_che_pos, hdr_info.chan_config,
if (output_configure(ac, layout_map, layout_map_tags,
hdr_info.chan_config,
FFMAX(ac->output_configured, OC_TRIAL_FRAME)))
return -7;
} else if (ac->output_configured != OC_LOCKED) {
@@ -2167,15 +2341,6 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
elem_id = get_bits(gb, 4);
if (elem_type < TYPE_DSE) {
if (!ac->tags_mapped && elem_type == TYPE_CPE && ac->m4ac.chan_config==1) {
enum ChannelPosition new_che_pos[4][MAX_ELEM_ID]= {0};
ac->m4ac.chan_config=2;
if (set_default_channel_config(ac->avctx, new_che_pos, 2)<0)
return -1;
if (output_configure(ac, new_che_pos, 2, OC_TRIAL_FRAME)<0)
return -1;
}
if (!(che=get_che(ac, elem_type, elem_id))) {
av_log(ac->avctx, AV_LOG_ERROR, "channel element %d.%d is not allocated\n",
elem_type, elem_id);
@@ -2210,14 +2375,17 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
break;
case TYPE_PCE: {
enum ChannelPosition new_che_pos[4][MAX_ELEM_ID];
memset(new_che_pos, 0, 4 * MAX_ELEM_ID * sizeof(new_che_pos[0][0]));
if ((err = decode_pce(avctx, &ac->m4ac, new_che_pos, gb)))
uint8_t layout_map[MAX_ELEM_ID*4][3];
int tags;
tags = decode_pce(avctx, &ac->m4ac, layout_map, gb);
if (tags < 0) {
err = tags;
break;
}
if (ac->output_configured > OC_TRIAL_PCE)
av_log(avctx, AV_LOG_INFO,
"Evaluating a further program_config_element.\n");
err = output_configure(ac, new_che_pos, 0, OC_TRIAL_PCE);
err = output_configure(ac, layout_map, tags, 0, OC_TRIAL_PCE);
if (!err)
ac->m4ac.chan_config = 0;
break;

View File

@@ -80,14 +80,14 @@ static const float * const tns_tmp2_map[4] = {
static const int8_t tags_per_config[16] = { 0, 1, 1, 2, 3, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0 };
static const uint8_t aac_channel_layout_map[7][5][2] = {
{ { TYPE_SCE, 0 }, },
{ { TYPE_CPE, 0 }, },
{ { TYPE_CPE, 0 }, { TYPE_SCE, 0 }, },
{ { TYPE_CPE, 0 }, { TYPE_SCE, 0 }, { TYPE_SCE, 1 }, },
{ { TYPE_CPE, 0 }, { TYPE_SCE, 0 }, { TYPE_CPE, 1 }, },
{ { TYPE_CPE, 0 }, { TYPE_SCE, 0 }, { TYPE_LFE, 0 }, { TYPE_CPE, 1 }, },
{ { TYPE_CPE, 1 }, { TYPE_SCE, 0 }, { TYPE_LFE, 0 }, { TYPE_CPE, 2 }, { TYPE_CPE, 0 }, },
static const uint8_t aac_channel_layout_map[7][5][3] = {
{ { TYPE_SCE, 0, AAC_CHANNEL_FRONT }, },
{ { TYPE_CPE, 0, AAC_CHANNEL_FRONT }, },
{ { TYPE_SCE, 0, AAC_CHANNEL_FRONT }, { TYPE_CPE, 0, AAC_CHANNEL_FRONT }, },
{ { TYPE_SCE, 0, AAC_CHANNEL_FRONT }, { TYPE_CPE, 0, AAC_CHANNEL_FRONT }, { TYPE_SCE, 1, AAC_CHANNEL_BACK }, },
{ { TYPE_SCE, 0, AAC_CHANNEL_FRONT }, { TYPE_CPE, 0, AAC_CHANNEL_FRONT }, { TYPE_CPE, 1, AAC_CHANNEL_BACK }, },
{ { TYPE_SCE, 0, AAC_CHANNEL_FRONT }, { TYPE_CPE, 0, AAC_CHANNEL_FRONT }, { TYPE_CPE, 1, AAC_CHANNEL_BACK }, { TYPE_LFE, 0, AAC_CHANNEL_LFE }, },
{ { TYPE_SCE, 0, AAC_CHANNEL_FRONT }, { TYPE_CPE, 0, AAC_CHANNEL_FRONT }, { TYPE_CPE, 1, AAC_CHANNEL_FRONT }, { TYPE_CPE, 2, AAC_CHANNEL_BACK }, { TYPE_LFE, 0, AAC_CHANNEL_LFE }, },
};
static const uint64_t aac_channel_layout[8] = {

View File

@@ -39,7 +39,7 @@ AVBitStreamFilterContext *av_bitstream_filter_init(const char *name){
if(!strcmp(name, bsf->name)){
AVBitStreamFilterContext *bsfc= av_mallocz(sizeof(AVBitStreamFilterContext));
bsfc->filter= bsf;
bsfc->priv_data= av_mallocz(bsf->priv_data_size);
bsfc->priv_data = bsf->priv_data_size ? av_mallocz(bsf->priv_data_size) : NULL;
return bsfc;
}
bsf= bsf->next;

View File

@@ -99,8 +99,8 @@ static int cavsvideo_parse(AVCodecParserContext *s,
AVCodecParser ff_cavsvideo_parser = {
.codec_ids = { CODEC_ID_CAVS },
.priv_data_size = sizeof(ParseContext1),
.priv_data_size = sizeof(ParseContext),
.parser_parse = cavsvideo_parse,
.parser_close = ff_parse1_close,
.parser_close = ff_parse_close,
.split = ff_mpeg4video_split,
};

View File

@@ -45,6 +45,7 @@
#include "put_bits.h"
#include "simple_idct.h"
#include "dvdata.h"
#include "dvquant.h"
#include "dv_tablegen.h"
//#undef NDEBUG
@@ -1190,6 +1191,41 @@ static inline int dv_write_pack(enum dv_pack_type pack_id, DVVideoContext *c,
}
#if CONFIG_DVVIDEO_ENCODER
static inline int dv_write_dif_id(enum dv_section_type t, uint8_t chan_num,
uint8_t seq_num, uint8_t dif_num,
uint8_t* buf)
{
buf[0] = (uint8_t)t; /* Section type */
buf[1] = (seq_num << 4) | /* DIF seq number 0-9 for 525/60; 0-11 for 625/50 */
(chan_num << 3) | /* FSC: for 50Mb/s 0 - first channel; 1 - second */
7; /* reserved -- always 1 */
buf[2] = dif_num; /* DIF block number Video: 0-134, Audio: 0-8 */
return 3;
}
static inline int dv_write_ssyb_id(uint8_t syb_num, uint8_t fr, uint8_t* buf)
{
if (syb_num == 0 || syb_num == 6) {
buf[0] = (fr << 7) | /* FR ID 1 - first half of each channel; 0 - second */
(0 << 4) | /* AP3 (Subcode application ID) */
0x0f; /* reserved -- always 1 */
}
else if (syb_num == 11) {
buf[0] = (fr << 7) | /* FR ID 1 - first half of each channel; 0 - second */
0x7f; /* reserved -- always 1 */
}
else {
buf[0] = (fr << 7) | /* FR ID 1 - first half of each channel; 0 - second */
(0 << 4) | /* APT (Track application ID) */
0x0f; /* reserved -- always 1 */
}
buf[1] = 0xf0 | /* reserved -- always 1 */
(syb_num & 0x0f); /* SSYB number 0 - 11 */
buf[2] = 0xff; /* reserved -- always 1 */
return 3;
}
static void dv_format_frame(DVVideoContext* c, uint8_t* buf)
{
int chan, i, j, k;

View File

@@ -43,6 +43,44 @@ static uint32_t dv_idct_factor_sd [2*2*22*64];
static uint32_t dv_idct_factor_hd1080[2*4*16*64];
static uint32_t dv_idct_factor_hd720 [2*4*16*64];
static const uint8_t dv_audio_shuffle525[10][9] = {
{ 0, 30, 60, 20, 50, 80, 10, 40, 70 }, /* 1st channel */
{ 6, 36, 66, 26, 56, 86, 16, 46, 76 },
{ 12, 42, 72, 2, 32, 62, 22, 52, 82 },
{ 18, 48, 78, 8, 38, 68, 28, 58, 88 },
{ 24, 54, 84, 14, 44, 74, 4, 34, 64 },
{ 1, 31, 61, 21, 51, 81, 11, 41, 71 }, /* 2nd channel */
{ 7, 37, 67, 27, 57, 87, 17, 47, 77 },
{ 13, 43, 73, 3, 33, 63, 23, 53, 83 },
{ 19, 49, 79, 9, 39, 69, 29, 59, 89 },
{ 25, 55, 85, 15, 45, 75, 5, 35, 65 },
};
static const uint8_t dv_audio_shuffle625[12][9] = {
{ 0, 36, 72, 26, 62, 98, 16, 52, 88}, /* 1st channel */
{ 6, 42, 78, 32, 68, 104, 22, 58, 94},
{ 12, 48, 84, 2, 38, 74, 28, 64, 100},
{ 18, 54, 90, 8, 44, 80, 34, 70, 106},
{ 24, 60, 96, 14, 50, 86, 4, 40, 76},
{ 30, 66, 102, 20, 56, 92, 10, 46, 82},
{ 1, 37, 73, 27, 63, 99, 17, 53, 89}, /* 2nd channel */
{ 7, 43, 79, 33, 69, 105, 23, 59, 95},
{ 13, 49, 85, 3, 39, 75, 29, 65, 101},
{ 19, 55, 91, 9, 45, 81, 35, 71, 107},
{ 25, 61, 97, 15, 51, 87, 5, 41, 77},
{ 31, 67, 103, 21, 57, 93, 11, 47, 83},
};
/* macroblock bit budgets */
static const uint8_t block_sizes_dv2550[8] = {
112, 112, 112, 112, 80, 80, 0, 0,
};
static const uint8_t block_sizes_dv100[8] = {
80, 80, 80, 80, 80, 80, 64, 64,
};
static const DVprofile dv_profiles[] = {
{ .dsf = 0,
.video_stype = 0x0,

View File

@@ -65,174 +65,6 @@ typedef struct DVprofile {
const uint8_t (*audio_shuffle)[9]; /* PCM shuffling table */
} DVprofile;
/* unquant tables (not used directly) */
static const uint8_t dv_quant_shifts[22][4] = {
{ 3,3,4,4 },
{ 3,3,4,4 },
{ 2,3,3,4 },
{ 2,3,3,4 },
{ 2,2,3,3 },
{ 2,2,3,3 },
{ 1,2,2,3 },
{ 1,2,2,3 },
{ 1,1,2,2 },
{ 1,1,2,2 },
{ 0,1,1,2 },
{ 0,1,1,2 },
{ 0,0,1,1 },
{ 0,0,1,1 },
{ 0,0,0,1 },
{ 0,0,0,0 },
{ 0,0,0,0 },
{ 0,0,0,0 },
{ 0,0,0,0 },
{ 0,0,0,0 },
{ 0,0,0,0 },
{ 0,0,0,0 },
};
static const uint8_t dv_quant_offset[4] = { 6, 3, 0, 1 };
static const uint8_t dv_quant_areas[4] = { 6, 21, 43, 64 };
/* quantization quanta by QNO for DV100 */
static const uint8_t dv100_qstep[16] = {
1, /* QNO = 0 and 1 both have no quantization */
1,
2, 3, 4, 5, 6, 7, 8, 16, 18, 20, 22, 24, 28, 52
};
/* DV25/50 DCT coefficient weights and inverse weights */
/* created by dvtables.py */
static const int dv_weight_bits = 18;
static const int dv_weight_88[64] = {
131072, 257107, 257107, 242189, 252167, 242189, 235923, 237536,
237536, 235923, 229376, 231390, 223754, 231390, 229376, 222935,
224969, 217965, 217965, 224969, 222935, 200636, 218652, 211916,
212325, 211916, 218652, 200636, 188995, 196781, 205965, 206433,
206433, 205965, 196781, 188995, 185364, 185364, 200636, 200704,
200636, 185364, 185364, 174609, 180568, 195068, 195068, 180568,
174609, 170091, 175557, 189591, 175557, 170091, 165371, 170627,
170627, 165371, 160727, 153560, 160727, 144651, 144651, 136258,
};
static const int dv_weight_248[64] = {
131072, 242189, 257107, 237536, 229376, 200636, 242189, 223754,
224969, 196781, 262144, 242189, 229376, 200636, 257107, 237536,
211916, 185364, 235923, 217965, 229376, 211916, 206433, 180568,
242189, 223754, 224969, 196781, 211916, 185364, 235923, 217965,
200704, 175557, 222935, 205965, 200636, 185364, 195068, 170627,
229376, 211916, 206433, 180568, 200704, 175557, 222935, 205965,
175557, 153560, 188995, 174609, 165371, 144651, 200636, 185364,
195068, 170627, 175557, 153560, 188995, 174609, 165371, 144651,
};
static const int dv_iweight_bits = 14;
static const int dv_iweight_88[64] = {
32768, 16710, 16710, 17735, 17015, 17735, 18197, 18079,
18079, 18197, 18725, 18559, 19196, 18559, 18725, 19284,
19108, 19692, 19692, 19108, 19284, 21400, 19645, 20262,
20214, 20262, 19645, 21400, 22733, 21845, 20867, 20815,
20815, 20867, 21845, 22733, 23173, 23173, 21400, 21400,
21400, 23173, 23173, 24600, 23764, 22017, 22017, 23764,
24600, 25267, 24457, 22672, 24457, 25267, 25971, 25191,
25191, 25971, 26715, 27962, 26715, 29642, 29642, 31536,
};
static const int dv_iweight_248[64] = {
32768, 17735, 16710, 18079, 18725, 21400, 17735, 19196,
19108, 21845, 16384, 17735, 18725, 21400, 16710, 18079,
20262, 23173, 18197, 19692, 18725, 20262, 20815, 23764,
17735, 19196, 19108, 21845, 20262, 23173, 18197, 19692,
21400, 24457, 19284, 20867, 21400, 23173, 22017, 25191,
18725, 20262, 20815, 23764, 21400, 24457, 19284, 20867,
24457, 27962, 22733, 24600, 25971, 29642, 21400, 23173,
22017, 25191, 24457, 27962, 22733, 24600, 25971, 29642,
};
/**
* The "inverse" DV100 weights are actually just the spec weights (zig-zagged).
*/
static const int dv_iweight_1080_y[64] = {
128, 16, 16, 17, 17, 17, 18, 18,
18, 18, 18, 18, 19, 18, 18, 19,
19, 19, 19, 19, 19, 42, 38, 40,
40, 40, 38, 42, 44, 43, 41, 41,
41, 41, 43, 44, 45, 45, 42, 42,
42, 45, 45, 48, 46, 43, 43, 46,
48, 49, 48, 44, 48, 49, 101, 98,
98, 101, 104, 109, 104, 116, 116, 123,
};
static const int dv_iweight_1080_c[64] = {
128, 16, 16, 17, 17, 17, 25, 25,
25, 25, 26, 25, 26, 25, 26, 26,
26, 27, 27, 26, 26, 42, 38, 40,
40, 40, 38, 42, 44, 43, 41, 41,
41, 41, 43, 44, 91, 91, 84, 84,
84, 91, 91, 96, 93, 86, 86, 93,
96, 197, 191, 177, 191, 197, 203, 197,
197, 203, 209, 219, 209, 232, 232, 246,
};
static const int dv_iweight_720_y[64] = {
128, 16, 16, 17, 17, 17, 18, 18,
18, 18, 18, 18, 19, 18, 18, 19,
19, 19, 19, 19, 19, 42, 38, 40,
40, 40, 38, 42, 44, 43, 41, 41,
41, 41, 43, 44, 68, 68, 63, 63,
63, 68, 68, 96, 92, 86, 86, 92,
96, 98, 96, 88, 96, 98, 202, 196,
196, 202, 208, 218, 208, 232, 232, 246,
};
static const int dv_iweight_720_c[64] = {
128, 24, 24, 26, 26, 26, 36, 36,
36, 36, 36, 36, 38, 36, 36, 38,
38, 38, 38, 38, 38, 84, 76, 80,
80, 80, 76, 84, 88, 86, 82, 82,
82, 82, 86, 88, 182, 182, 168, 168,
168, 182, 182, 192, 186, 192, 172, 186,
192, 394, 382, 354, 382, 394, 406, 394,
394, 406, 418, 438, 418, 464, 464, 492,
};
static const uint8_t dv_audio_shuffle525[10][9] = {
{ 0, 30, 60, 20, 50, 80, 10, 40, 70 }, /* 1st channel */
{ 6, 36, 66, 26, 56, 86, 16, 46, 76 },
{ 12, 42, 72, 2, 32, 62, 22, 52, 82 },
{ 18, 48, 78, 8, 38, 68, 28, 58, 88 },
{ 24, 54, 84, 14, 44, 74, 4, 34, 64 },
{ 1, 31, 61, 21, 51, 81, 11, 41, 71 }, /* 2nd channel */
{ 7, 37, 67, 27, 57, 87, 17, 47, 77 },
{ 13, 43, 73, 3, 33, 63, 23, 53, 83 },
{ 19, 49, 79, 9, 39, 69, 29, 59, 89 },
{ 25, 55, 85, 15, 45, 75, 5, 35, 65 },
};
static const uint8_t dv_audio_shuffle625[12][9] = {
{ 0, 36, 72, 26, 62, 98, 16, 52, 88}, /* 1st channel */
{ 6, 42, 78, 32, 68, 104, 22, 58, 94},
{ 12, 48, 84, 2, 38, 74, 28, 64, 100},
{ 18, 54, 90, 8, 44, 80, 34, 70, 106},
{ 24, 60, 96, 14, 50, 86, 4, 40, 76},
{ 30, 66, 102, 20, 56, 92, 10, 46, 82},
{ 1, 37, 73, 27, 63, 99, 17, 53, 89}, /* 2nd channel */
{ 7, 43, 79, 33, 69, 105, 23, 59, 95},
{ 13, 49, 85, 3, 39, 75, 29, 65, 101},
{ 19, 55, 91, 9, 45, 81, 35, 71, 107},
{ 25, 61, 97, 15, 51, 87, 5, 41, 77},
{ 31, 67, 103, 21, 57, 93, 11, 47, 83},
};
static const av_unused int dv_audio_frequency[3] = {
48000, 44100, 32000,
};
/* macroblock bit budgets */
static const uint8_t block_sizes_dv2550[8] = {
112, 112, 112, 112, 80, 80, 0, 0,
};
static const uint8_t block_sizes_dv100[8] = {
80, 80, 80, 80, 80, 80, 64, 64,
};
enum dv_section_type {
dv_sect_header = 0x1f,
dv_sect_subcode = 0x3f,
@@ -280,39 +112,4 @@ const DVprofile* avpriv_dv_frame_profile2(AVCodecContext* codec, const DVprofile
const uint8_t* frame, unsigned buf_size);
const DVprofile* avpriv_dv_codec_profile(AVCodecContext* codec);
static inline int dv_write_dif_id(enum dv_section_type t, uint8_t chan_num,
uint8_t seq_num, uint8_t dif_num,
uint8_t* buf)
{
buf[0] = (uint8_t)t; /* Section type */
buf[1] = (seq_num << 4) | /* DIF seq number 0-9 for 525/60; 0-11 for 625/50 */
(chan_num << 3) | /* FSC: for 50Mb/s 0 - first channel; 1 - second */
7; /* reserved -- always 1 */
buf[2] = dif_num; /* DIF block number Video: 0-134, Audio: 0-8 */
return 3;
}
static inline int dv_write_ssyb_id(uint8_t syb_num, uint8_t fr, uint8_t* buf)
{
if (syb_num == 0 || syb_num == 6) {
buf[0] = (fr << 7) | /* FR ID 1 - first half of each channel; 0 - second */
(0 << 4) | /* AP3 (Subcode application ID) */
0x0f; /* reserved -- always 1 */
}
else if (syb_num == 11) {
buf[0] = (fr << 7) | /* FR ID 1 - first half of each channel; 0 - second */
0x7f; /* reserved -- always 1 */
}
else {
buf[0] = (fr << 7) | /* FR ID 1 - first half of each channel; 0 - second */
(0 << 4) | /* APT (Track application ID) */
0x0f; /* reserved -- always 1 */
}
buf[1] = 0xf0 | /* reserved -- always 1 */
(syb_num & 0x0f); /* SSYB number 0 - 11 */
buf[2] = 0xff; /* reserved -- always 1 */
return 3;
}
#endif /* AVCODEC_DVDATA_H */

157
libavcodec/dvquant.h Normal file
View File

@@ -0,0 +1,157 @@
/*
* Quant and Weight for DV codec
* Copyright (c) 2002 Fabrice Bellard
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Constants for DV codec.
*/
#ifndef AVCODEC_DVQUANT_H
#define AVCODEC_DVQUANT_H
#include <stdint.h>
/* unquant tables (not used directly) */
static const uint8_t dv_quant_shifts[22][4] = {
{ 3,3,4,4 },
{ 3,3,4,4 },
{ 2,3,3,4 },
{ 2,3,3,4 },
{ 2,2,3,3 },
{ 2,2,3,3 },
{ 1,2,2,3 },
{ 1,2,2,3 },
{ 1,1,2,2 },
{ 1,1,2,2 },
{ 0,1,1,2 },
{ 0,1,1,2 },
{ 0,0,1,1 },
{ 0,0,1,1 },
{ 0,0,0,1 },
{ 0,0,0,0 },
{ 0,0,0,0 },
{ 0,0,0,0 },
{ 0,0,0,0 },
{ 0,0,0,0 },
{ 0,0,0,0 },
{ 0,0,0,0 },
};
static const uint8_t dv_quant_offset[4] = { 6, 3, 0, 1 };
static const uint8_t dv_quant_areas[4] = { 6, 21, 43, 64 };
/* quantization quanta by QNO for DV100 */
static const uint8_t dv100_qstep[16] = {
1, /* QNO = 0 and 1 both have no quantization */
1,
2, 3, 4, 5, 6, 7, 8, 16, 18, 20, 22, 24, 28, 52
};
/* DV25/50 DCT coefficient weights and inverse weights */
/* created by dvtables.py */
static const int dv_weight_bits = 18;
static const int dv_weight_88[64] = {
131072, 257107, 257107, 242189, 252167, 242189, 235923, 237536,
237536, 235923, 229376, 231390, 223754, 231390, 229376, 222935,
224969, 217965, 217965, 224969, 222935, 200636, 218652, 211916,
212325, 211916, 218652, 200636, 188995, 196781, 205965, 206433,
206433, 205965, 196781, 188995, 185364, 185364, 200636, 200704,
200636, 185364, 185364, 174609, 180568, 195068, 195068, 180568,
174609, 170091, 175557, 189591, 175557, 170091, 165371, 170627,
170627, 165371, 160727, 153560, 160727, 144651, 144651, 136258,
};
static const int dv_weight_248[64] = {
131072, 242189, 257107, 237536, 229376, 200636, 242189, 223754,
224969, 196781, 262144, 242189, 229376, 200636, 257107, 237536,
211916, 185364, 235923, 217965, 229376, 211916, 206433, 180568,
242189, 223754, 224969, 196781, 211916, 185364, 235923, 217965,
200704, 175557, 222935, 205965, 200636, 185364, 195068, 170627,
229376, 211916, 206433, 180568, 200704, 175557, 222935, 205965,
175557, 153560, 188995, 174609, 165371, 144651, 200636, 185364,
195068, 170627, 175557, 153560, 188995, 174609, 165371, 144651,
};
static const int dv_iweight_bits = 14;
static const int dv_iweight_88[64] = {
32768, 16710, 16710, 17735, 17015, 17735, 18197, 18079,
18079, 18197, 18725, 18559, 19196, 18559, 18725, 19284,
19108, 19692, 19692, 19108, 19284, 21400, 19645, 20262,
20214, 20262, 19645, 21400, 22733, 21845, 20867, 20815,
20815, 20867, 21845, 22733, 23173, 23173, 21400, 21400,
21400, 23173, 23173, 24600, 23764, 22017, 22017, 23764,
24600, 25267, 24457, 22672, 24457, 25267, 25971, 25191,
25191, 25971, 26715, 27962, 26715, 29642, 29642, 31536,
};
static const int dv_iweight_248[64] = {
32768, 17735, 16710, 18079, 18725, 21400, 17735, 19196,
19108, 21845, 16384, 17735, 18725, 21400, 16710, 18079,
20262, 23173, 18197, 19692, 18725, 20262, 20815, 23764,
17735, 19196, 19108, 21845, 20262, 23173, 18197, 19692,
21400, 24457, 19284, 20867, 21400, 23173, 22017, 25191,
18725, 20262, 20815, 23764, 21400, 24457, 19284, 20867,
24457, 27962, 22733, 24600, 25971, 29642, 21400, 23173,
22017, 25191, 24457, 27962, 22733, 24600, 25971, 29642,
};
/**
* The "inverse" DV100 weights are actually just the spec weights (zig-zagged).
*/
static const int dv_iweight_1080_y[64] = {
128, 16, 16, 17, 17, 17, 18, 18,
18, 18, 18, 18, 19, 18, 18, 19,
19, 19, 19, 19, 19, 42, 38, 40,
40, 40, 38, 42, 44, 43, 41, 41,
41, 41, 43, 44, 45, 45, 42, 42,
42, 45, 45, 48, 46, 43, 43, 46,
48, 49, 48, 44, 48, 49, 101, 98,
98, 101, 104, 109, 104, 116, 116, 123,
};
static const int dv_iweight_1080_c[64] = {
128, 16, 16, 17, 17, 17, 25, 25,
25, 25, 26, 25, 26, 25, 26, 26,
26, 27, 27, 26, 26, 42, 38, 40,
40, 40, 38, 42, 44, 43, 41, 41,
41, 41, 43, 44, 91, 91, 84, 84,
84, 91, 91, 96, 93, 86, 86, 93,
96, 197, 191, 177, 191, 197, 203, 197,
197, 203, 209, 219, 209, 232, 232, 246,
};
static const int dv_iweight_720_y[64] = {
128, 16, 16, 17, 17, 17, 18, 18,
18, 18, 18, 18, 19, 18, 18, 19,
19, 19, 19, 19, 19, 42, 38, 40,
40, 40, 38, 42, 44, 43, 41, 41,
41, 41, 43, 44, 68, 68, 63, 63,
63, 68, 68, 96, 92, 86, 86, 92,
96, 98, 96, 88, 96, 98, 202, 196,
196, 202, 208, 218, 208, 232, 232, 246,
};
static const int dv_iweight_720_c[64] = {
128, 24, 24, 26, 26, 26, 36, 36,
36, 36, 36, 36, 38, 36, 36, 38,
38, 38, 38, 38, 38, 84, 76, 80,
80, 80, 76, 84, 88, 86, 82, 82,
82, 82, 86, 88, 182, 182, 168, 168,
168, 182, 182, 192, 186, 192, 172, 186,
192, 394, 382, 354, 382, 394, 406, 394,
394, 406, 418, 438, 418, 464, 464, 492,
};
#endif /* AVCODEC_DVQUANT_H */

View File

@@ -104,7 +104,7 @@ int ff_h264_check_intra4x4_pred_mode(H264Context *h){
return 0;
} //FIXME cleanup like check_intra_pred_mode
static int check_intra_pred_mode(H264Context *h, int mode, int is_chroma){
int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma){
MpegEncContext * const s = &h->s;
static const int8_t top [7]= {LEFT_DC_PRED8x8, 1,-1,-1};
static const int8_t left[7]= { TOP_DC_PRED8x8,-1, 2,-1,DC_128_PRED8x8};
@@ -136,22 +136,6 @@ static int check_intra_pred_mode(H264Context *h, int mode, int is_chroma){
return mode;
}
/**
* checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
*/
int ff_h264_check_intra16x16_pred_mode(H264Context *h, int mode)
{
return check_intra_pred_mode(h, mode, 0);
}
/**
* checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
*/
int ff_h264_check_intra_chroma_pred_mode(H264Context *h, int mode)
{
return check_intra_pred_mode(h, mode, 1);
}
const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src, int *dst_length, int *consumed, int length){
int i, si, di;

View File

@@ -671,15 +671,7 @@ void ff_generate_sliding_window_mmcos(H264Context *h);
*/
int ff_h264_check_intra4x4_pred_mode(H264Context *h);
/**
* Check if the top & left blocks are available if needed & change the dc mode so it only uses the available blocks.
*/
int ff_h264_check_intra16x16_pred_mode(H264Context *h, int mode);
/**
* Check if the top & left blocks are available if needed & change the dc mode so it only uses the available blocks.
*/
int ff_h264_check_intra_chroma_pred_mode(H264Context *h, int mode);
int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma);
void ff_h264_hl_decode_mb(H264Context *h);
int ff_h264_frame_start(H264Context *h);

View File

@@ -2042,14 +2042,14 @@ decode_intra_mb:
write_back_intra_pred_mode(h);
if( ff_h264_check_intra4x4_pred_mode(h) < 0 ) return -1;
} else {
h->intra16x16_pred_mode= ff_h264_check_intra16x16_pred_mode( h, h->intra16x16_pred_mode );
h->intra16x16_pred_mode= ff_h264_check_intra_pred_mode( h, h->intra16x16_pred_mode, 0 );
if( h->intra16x16_pred_mode < 0 ) return -1;
}
if(decode_chroma){
h->chroma_pred_mode_table[mb_xy] =
pred_mode = decode_cabac_mb_chroma_pre_mode( h );
pred_mode= ff_h264_check_intra_chroma_pred_mode( h, pred_mode );
pred_mode= ff_h264_check_intra_pred_mode( h, pred_mode, 1 );
if( pred_mode < 0 ) return -1;
h->chroma_pred_mode= pred_mode;
} else {

View File

@@ -823,12 +823,12 @@ decode_intra_mb:
if( ff_h264_check_intra4x4_pred_mode(h) < 0)
return -1;
}else{
h->intra16x16_pred_mode= ff_h264_check_intra16x16_pred_mode(h, h->intra16x16_pred_mode);
h->intra16x16_pred_mode= ff_h264_check_intra_pred_mode(h, h->intra16x16_pred_mode, 0);
if(h->intra16x16_pred_mode < 0)
return -1;
}
if(decode_chroma){
pred_mode= ff_h264_check_intra_chroma_pred_mode(h, get_ue_golomb_31(&s->gb));
pred_mode= ff_h264_check_intra_pred_mode(h, get_ue_golomb_31(&s->gb), 1);
if(pred_mode < 0)
return -1;
h->chroma_pred_mode= pred_mode;

View File

@@ -27,6 +27,11 @@
#include "mpeg4video.h"
#include "mpeg4video_parser.h"
struct Mp4vParseContext {
ParseContext pc;
struct MpegEncContext enc;
int first_picture;
};
int ff_mpeg4_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size){
int vop_found, i;
@@ -70,8 +75,8 @@ static int av_mpeg4_decode_header(AVCodecParserContext *s1,
AVCodecContext *avctx,
const uint8_t *buf, int buf_size)
{
ParseContext1 *pc = s1->priv_data;
MpegEncContext *s = pc->enc;
struct Mp4vParseContext *pc = s1->priv_data;
MpegEncContext *s = &pc->enc;
GetBitContext gb1, *gb = &gb1;
int ret;
@@ -95,14 +100,11 @@ static int av_mpeg4_decode_header(AVCodecParserContext *s1,
static av_cold int mpeg4video_parse_init(AVCodecParserContext *s)
{
ParseContext1 *pc = s->priv_data;
struct Mp4vParseContext *pc = s->priv_data;
pc->enc = av_mallocz(sizeof(MpegEncContext));
if (!pc->enc)
return -1;
pc->first_picture = 1;
pc->enc->quant_precision=5;
pc->enc->slice_context_count = 1;
pc->enc.quant_precision=5;
pc->enc.slice_context_count = 1;
return 0;
}
@@ -135,9 +137,9 @@ static int mpeg4video_parse(AVCodecParserContext *s,
AVCodecParser ff_mpeg4video_parser = {
.codec_ids = { CODEC_ID_MPEG4 },
.priv_data_size = sizeof(ParseContext1),
.priv_data_size = sizeof(struct Mp4vParseContext),
.parser_init = mpeg4video_parse_init,
.parser_parse = mpeg4video_parse,
.parser_close = ff_parse1_close,
.parser_close = ff_parse_close,
.split = ff_mpeg4video_split,
};

View File

@@ -23,11 +23,19 @@
#include "parser.h"
#include "mpegvideo.h"
struct MpvParseContext {
ParseContext pc;
AVRational frame_rate;
int progressive_sequence;
int width, height;
};
static void mpegvideo_extract_headers(AVCodecParserContext *s,
AVCodecContext *avctx,
const uint8_t *buf, int buf_size)
{
ParseContext1 *pc = s->priv_data;
struct MpvParseContext *pc = s->priv_data;
const uint8_t *buf_end = buf + buf_size;
uint32_t start_code;
int frame_rate_index, ext_type, bytes_left;
@@ -131,7 +139,7 @@ static int mpegvideo_parse(AVCodecParserContext *s,
const uint8_t **poutbuf, int *poutbuf_size,
const uint8_t *buf, int buf_size)
{
ParseContext1 *pc1 = s->priv_data;
struct MpvParseContext *pc1 = s->priv_data;
ParseContext *pc= &pc1->pc;
int next;
@@ -178,8 +186,8 @@ static int mpegvideo_split(AVCodecContext *avctx,
AVCodecParser ff_mpegvideo_parser = {
.codec_ids = { CODEC_ID_MPEG1VIDEO, CODEC_ID_MPEG2VIDEO },
.priv_data_size = sizeof(ParseContext1),
.priv_data_size = sizeof(struct MpvParseContext),
.parser_parse = mpegvideo_parse,
.parser_close = ff_parse1_close,
.parser_close = ff_parse_close,
.split = mpegvideo_split,
};

View File

@@ -287,14 +287,6 @@ void ff_parse_close(AVCodecParserContext *s)
av_freep(&pc->buffer);
}
void ff_parse1_close(AVCodecParserContext *s)
{
ParseContext1 *pc1 = s->priv_data;
av_free(pc1->pc.buffer);
av_free(pc1->enc);
}
/*************************/
int ff_mpeg4video_split(AVCodecContext *avctx,

View File

@@ -39,26 +39,12 @@ typedef struct ParseContext{
struct MpegEncContext;
typedef struct ParseContext1{
ParseContext pc;
/* XXX/FIXME PC1 vs. PC */
/* MPEG-2-specific */
AVRational frame_rate;
int progressive_sequence;
int width, height;
/* XXX: suppress that, needed by MPEG-4 */
struct MpegEncContext *enc;
int first_picture;
} ParseContext1;
#define END_NOT_FOUND (-100)
int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_size);
int ff_mpeg4video_split(AVCodecContext *avctx, const uint8_t *buf,
int buf_size);
void ff_parse_close(AVCodecParserContext *s);
void ff_parse1_close(AVCodecParserContext *s);
/**
* Fetch timestamps for a specific byte within the current access unit.

View File

@@ -614,7 +614,7 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type)
dir = i_mb_type_info[mb_type - 8].pred_mode;
dir = (dir >> 1) ^ 3*(dir & 1) ^ 1;
if ((h->intra16x16_pred_mode = ff_h264_check_intra16x16_pred_mode(h, dir)) == -1){
if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir, 0)) == -1){
av_log(h->s.avctx, AV_LOG_ERROR, "check_intra_pred_mode = -1\n");
return -1;
}
@@ -713,7 +713,7 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type)
s->current_picture.f.mb_type[mb_xy] = mb_type;
if (IS_INTRA(mb_type)) {
h->chroma_pred_mode = ff_h264_check_intra_chroma_pred_mode(h, DC_PRED8x8);
h->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, DC_PRED8x8, 1);
}
return 0;
@@ -813,7 +813,9 @@ static av_cold int svq3_decode_init(AVCodecContext *avctx)
MpegEncContext *s = &h->s;
int m;
unsigned char *extradata;
unsigned char *extradata_end;
unsigned int size;
int marker_found = 0;
if (ff_h264_decode_init(avctx) < 0)
return -1;
@@ -834,19 +836,26 @@ static av_cold int svq3_decode_init(AVCodecContext *avctx)
/* prowl for the "SEQH" marker in the extradata */
extradata = (unsigned char *)avctx->extradata;
for (m = 0; m < avctx->extradata_size; m++) {
if (!memcmp(extradata, "SEQH", 4))
extradata_end = avctx->extradata + avctx->extradata_size;
if (extradata) {
for (m = 0; m + 8 < avctx->extradata_size; m++) {
if (!memcmp(extradata, "SEQH", 4)) {
marker_found = 1;
break;
}
extradata++;
}
}
/* if a match was found, parse the extra data */
if (extradata && !memcmp(extradata, "SEQH", 4)) {
if (marker_found) {
GetBitContext gb;
int frame_size_code;
size = AV_RB32(&extradata[4]);
if (size > extradata_end - extradata - 8)
return AVERROR_INVALIDDATA;
init_get_bits(&gb, extradata + 8, size*8);
/* 'frame size code' and optional 'width, height' */

View File

@@ -831,6 +831,16 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVD
goto free_and_end;
}
}
if (avctx->codec->pix_fmts) {
for (i = 0; avctx->codec->pix_fmts[i] != PIX_FMT_NONE; i++)
if (avctx->pix_fmt == avctx->codec->pix_fmts[i])
break;
if (avctx->codec->pix_fmts[i] == PIX_FMT_NONE) {
av_log(avctx, AV_LOG_ERROR, "Specified pix_fmt is not supported\n");
ret = AVERROR(EINVAL);
goto free_and_end;
}
}
if (avctx->codec->supported_samplerates) {
for (i = 0; avctx->codec->supported_samplerates[i] != 0; i++)
if (avctx->sample_rate == avctx->codec->supported_samplerates[i])
@@ -953,13 +963,17 @@ int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx,
if (avctx->codec->encode2) {
*got_packet_ptr = 0;
ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr);
if (!ret && *got_packet_ptr &&
!(avctx->codec->capabilities & CODEC_CAP_DELAY)) {
if (!ret && *got_packet_ptr) {
if (!(avctx->codec->capabilities & CODEC_CAP_DELAY)) {
avpkt->pts = frame->pts;
avpkt->duration = av_rescale_q(frame->nb_samples,
(AVRational){ 1, avctx->sample_rate },
avctx->time_base);
}
avpkt->dts = avpkt->pts;
} else {
avpkt->size = 0;
}
} else {
/* for compatibility with encoders not supporting encode2(), we need to
allocate a packet buffer if the user has not provided one or check
@@ -1007,7 +1021,7 @@ int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx,
av_freep(&avpkt->data);
} else {
if (avctx->coded_frame)
avpkt->pts = avctx->coded_frame->pts;
avpkt->pts = avpkt->dts = avctx->coded_frame->pts;
/* Set duration for final small packet. This can be removed
once all encoders supporting CODEC_CAP_SMALL_LAST_FRAME use
encode2() */

View File

@@ -31,11 +31,6 @@ static av_cold int encode_init(AVCodecContext *avctx)
return AVERROR(EINVAL);
}
if (avctx->pix_fmt != PIX_FMT_YUV422P10) {
av_log(avctx, AV_LOG_ERROR, "v210 needs YUV422P10\n");
return -1;
}
if (avctx->bits_per_raw_sample != 10)
av_log(avctx, AV_LOG_WARNING, "bits per raw sample: %d != 10-bit\n",
avctx->bits_per_raw_sample);
@@ -55,18 +50,20 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf,
const AVFrame *pic = data;
int aligned_width = ((avctx->width + 47) / 48) * 48;
int stride = aligned_width * 8 / 3;
int line_padding = stride - ((avctx->width * 8 + 11) / 12) * 4;
int h, w;
const uint16_t *y = (const uint16_t*)pic->data[0];
const uint16_t *u = (const uint16_t*)pic->data[1];
const uint16_t *v = (const uint16_t*)pic->data[2];
uint8_t *p = buf;
uint8_t *pdst = buf;
PutByteContext p;
if (buf_size < avctx->height * stride) {
av_log(avctx, AV_LOG_ERROR, "output buffer too small\n");
return AVERROR(ENOMEM);
}
bytestream2_init_writer(&p, buf, buf_size);
#define CLIP(v) av_clip(v, 4, 1019)
#define WRITE_PIXELS(a, b, c) \
@@ -74,7 +71,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf,
val = CLIP(*a++); \
val |= (CLIP(*b++) << 10) | \
(CLIP(*c++) << 20); \
bytestream_put_le32(&p, val); \
bytestream2_put_le32u(&p, val); \
} while (0)
for (h = 0; h < avctx->height; h++) {
@@ -90,25 +87,24 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf,
val = CLIP(*y++);
if (w == avctx->width - 2)
bytestream_put_le32(&p, val);
bytestream2_put_le32u(&p, val);
if (w < avctx->width - 3) {
val |= (CLIP(*u++) << 10) | (CLIP(*y++) << 20);
bytestream_put_le32(&p, val);
bytestream2_put_le32u(&p, val);
val = CLIP(*v++) | (CLIP(*y++) << 10);
bytestream_put_le32(&p, val);
bytestream2_put_le32u(&p, val);
}
}
pdst += stride;
memset(p, 0, pdst - p);
p = pdst;
bytestream2_set_buffer(&p, 0, line_padding);
y += pic->linesize[0] / 2 - avctx->width;
u += pic->linesize[1] / 2 - avctx->width / 2;
v += pic->linesize[2] / 2 - avctx->width / 2;
}
return p - buf;
return bytestream2_tell_p(&p);
}
static av_cold int encode_close(AVCodecContext *avctx)

View File

@@ -196,6 +196,6 @@ AVCodecParser ff_vc1_parser = {
.priv_data_size = sizeof(VC1ParseContext),
.parser_init = vc1_parse_init,
.parser_parse = vc1_parse,
.parser_close = ff_parse1_close,
.parser_close = ff_parse_close,
.split = vc1_split,
};

View File

@@ -499,6 +499,21 @@ static void wv_reset_saved_context(WavpackFrameContext *s)
s->sc.crc = s->extra_sc.crc = 0xFFFFFFFF;
}
static inline int wv_check_crc(WavpackFrameContext *s, uint32_t crc,
uint32_t crc_extra_bits)
{
if (crc != s->CRC) {
av_log(s->avctx, AV_LOG_ERROR, "CRC error\n");
return AVERROR_INVALIDDATA;
}
if (s->got_extra_bits && crc_extra_bits != s->crc_extra_bits) {
av_log(s->avctx, AV_LOG_ERROR, "Extra bits CRC error\n");
return AVERROR_INVALIDDATA;
}
return 0;
}
static inline int wv_unpack_stereo(WavpackFrameContext *s, GetBitContext *gb,
void *dst, const int type)
{
@@ -609,14 +624,9 @@ static inline int wv_unpack_stereo(WavpackFrameContext *s, GetBitContext *gb,
} while (!last && count < s->samples);
wv_reset_saved_context(s);
if (crc != s->CRC) {
av_log(s->avctx, AV_LOG_ERROR, "CRC error\n");
return -1;
}
if (s->got_extra_bits && crc_extra_bits != s->crc_extra_bits) {
av_log(s->avctx, AV_LOG_ERROR, "Extra bits CRC error\n");
return -1;
}
if ((s->avctx->err_recognition & AV_EF_CRCCHECK) &&
wv_check_crc(s, crc, crc_extra_bits))
return AVERROR_INVALIDDATA;
return count * 2;
}
@@ -679,14 +689,9 @@ static inline int wv_unpack_mono(WavpackFrameContext *s, GetBitContext *gb,
} while (!last && count < s->samples);
wv_reset_saved_context(s);
if (crc != s->CRC) {
av_log(s->avctx, AV_LOG_ERROR, "CRC error\n");
return -1;
}
if (s->got_extra_bits && crc_extra_bits != s->crc_extra_bits) {
av_log(s->avctx, AV_LOG_ERROR, "Extra bits CRC error\n");
return -1;
}
if ((s->avctx->err_recognition & AV_EF_CRCCHECK) &&
wv_check_crc(s, crc, crc_extra_bits))
return AVERROR_INVALIDDATA;
return count;
}

View File

@@ -35,7 +35,6 @@ pw_bap_mul2: dw 5, 7, 0, 7, 5, 7, 0, 7
; used in ff_ac3_extract_exponents()
pd_1: times 4 dd 1
pd_151: times 4 dd 151
pb_shuf_4dwb: db 0, 4, 8, 12
SECTION .text
@@ -404,15 +403,12 @@ cglobal ac3_extract_exponents_3dnow, 3,3,0, exp, coef, len
%endif
%macro AC3_EXTRACT_EXPONENTS 1
cglobal ac3_extract_exponents_%1, 3,3,5, exp, coef, len
cglobal ac3_extract_exponents_%1, 3,3,4, exp, coef, len
add expq, lenq
lea coefq, [coefq+4*lenq]
neg lenq
mova m2, [pd_1]
mova m3, [pd_151]
%ifidn %1, ssse3 ;
movd m4, [pb_shuf_4dwb]
%endif
.loop:
; move 4 32-bit coefs to xmm0
mova m0, [coefq+4*lenq]
@@ -426,12 +422,11 @@ cglobal ac3_extract_exponents_%1, 3,3,5, exp, coef, len
mova m0, m3
psubd m0, m1
; move the lowest byte in each of 4 dwords to the low dword
%ifidn %1, ssse3
pshufb m0, m4
%else
; NOTE: We cannot just extract the low bytes with pshufb because the dword
; result for 16777215 is -1 due to float inaccuracy. Using packuswb
; clips this to 0, which is the correct exponent.
packssdw m0, m0
packuswb m0, m0
%endif
movd [expq+lenq], m0
add lenq, 4

View File

@@ -22,10 +22,9 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/mem.h"
#include "config.h"
#include "timefilter.h"
#include "libavutil/mem.h"
struct TimeFilter {
/// Delay Locked Loop data. These variables refer to mathematical
@@ -37,7 +36,9 @@ struct TimeFilter {
int count;
};
TimeFilter * ff_timefilter_new(double clock_period, double feedback2_factor, double feedback3_factor)
TimeFilter *ff_timefilter_new(double clock_period,
double feedback2_factor,
double feedback3_factor)
{
TimeFilter *self = av_mallocz(sizeof(TimeFilter));
self->clock_period = clock_period;
@@ -92,7 +93,8 @@ int main(void)
for (n0 = 0; n0 < 40; n0 = 2 * n0 + 1) {
for (n1 = 0; n1 < 10; n1 = 2 * n1 + 1) {
#else
{{
{
{
n0 = 7;
n1 = 1;
#endif
@@ -104,8 +106,7 @@ int main(void)
av_lfg_init(&prng, 123);
for (i = 0; i < SAMPLES; i++) {
ideal[i] = 10 + i + n1 * i / (1000);
samples[i] = ideal[i] + n0 * (av_lfg_get(&prng) - LFG_MAX / 2)
/ (LFG_MAX * 10LL);
samples[i] = ideal[i] + n0 * (av_lfg_get(&prng) - LFG_MAX / 2) / (LFG_MAX * 10LL);
}
do {
@@ -136,7 +137,8 @@ int main(void)
for (i = 0; i < SAMPLES; i++) {
double filtered;
filtered = ff_timefilter_update(tf, samples[i], 1);
printf("%f %f %f %f\n", i - samples[i] + 10, filtered - samples[i], samples[FFMAX(i, 1)] - samples[FFMAX(i-1, 0)], filtered - lastfil);
printf("%f %f %f %f\n", i - samples[i] + 10, filtered - samples[i],
samples[FFMAX(i, 1)] - samples[FFMAX(i - 1, 0)], filtered - lastfil);
lastfil = filtered;
}
ff_timefilter_destroy(tf);

View File

@@ -374,8 +374,7 @@ static int read_seek(AVFormatContext *s, int stream_index,
{
AVStream *st = s->streams[0];
CaffContext *caf = s->priv_data;
CaffContext caf2 = *caf;
int64_t pos;
int64_t pos, packet_cnt, frame_cnt;
timestamp = FFMAX(timestamp, 0);
@@ -384,20 +383,22 @@ static int read_seek(AVFormatContext *s, int stream_index,
pos = caf->bytes_per_packet * timestamp / caf->frames_per_packet;
if (caf->data_size > 0)
pos = FFMIN(pos, caf->data_size);
caf->packet_cnt = pos / caf->bytes_per_packet;
caf->frame_cnt = caf->frames_per_packet * caf->packet_cnt;
packet_cnt = pos / caf->bytes_per_packet;
frame_cnt = caf->frames_per_packet * packet_cnt;
} else if (st->nb_index_entries) {
caf->packet_cnt = av_index_search_timestamp(st, timestamp, flags);
caf->frame_cnt = st->index_entries[caf->packet_cnt].timestamp;
pos = st->index_entries[caf->packet_cnt].pos;
packet_cnt = av_index_search_timestamp(st, timestamp, flags);
frame_cnt = st->index_entries[packet_cnt].timestamp;
pos = st->index_entries[packet_cnt].pos;
} else {
return -1;
}
if (avio_seek(s->pb, pos + caf->data_start, SEEK_SET) < 0) {
*caf = caf2;
if (avio_seek(s->pb, pos + caf->data_start, SEEK_SET) < 0)
return -1;
}
caf->packet_cnt = packet_cnt;
caf->frame_cnt = frame_cnt;
return 0;
}

View File

@@ -99,6 +99,10 @@ static const uint8_t* dv_extract_pack(uint8_t* frame, enum dv_pack_type t)
return frame[offs] == t ? &frame[offs] : NULL;
}
static const int dv_audio_frequency[3] = {
48000, 44100, 32000,
};
/*
* There's a couple of assumptions being made here:
* 1. By default we silence erroneous (0x8000/16bit 0x800/12bit) audio samples.
@@ -126,6 +130,9 @@ static int dv_extract_audio(uint8_t* frame, uint8_t* ppcm[4],
if (quant > 1)
return -1; /* unsupported quantization */
if (freq >= FF_ARRAY_ELEMS(dv_audio_frequency))
return AVERROR_INVALIDDATA;
size = (sys->audio_min_samples[freq] + smpls) * 4; /* 2ch, 2bytes */
half_ch = sys->difseg_size / 2;
@@ -209,6 +216,12 @@ static int dv_extract_audio_info(DVDemuxContext* c, uint8_t* frame)
stype = (as_pack[3] & 0x1f); /* 0 - 2CH, 2 - 4CH, 3 - 8CH */
quant = as_pack[4] & 0x07; /* 0 - 16bit linear, 1 - 12bit nonlinear */
if (freq >= FF_ARRAY_ELEMS(dv_audio_frequency)) {
av_log(c->fctx, AV_LOG_ERROR,
"Unrecognized audio sample rate index (%d)\n", freq);
return 0;
}
if (stype > 3) {
av_log(c->fctx, AV_LOG_ERROR, "stype %d is invalid\n", stype);
c->ach = 0;

View File

@@ -108,8 +108,10 @@ static int rtp_write_header(AVFormatContext *s1)
NTP_OFFSET_US;
max_packet_size = s1->pb->max_packet_size;
if (max_packet_size <= 12)
if (max_packet_size <= 12) {
av_log(s1, AV_LOG_ERROR, "Max packet size %d too low\n", max_packet_size);
return AVERROR(EIO);
}
s->buf = av_malloc(max_packet_size);
if (s->buf == NULL) {
return AVERROR(ENOMEM);

View File

@@ -8,15 +8,15 @@ fate-aac-al05_44: REF = $(SAMPLES)/aac/al05_44.s16
FATE_AAC += fate-aac-al06_44
fate-aac-al06_44: CMD = pcm -i $(SAMPLES)/aac/al06_44.mp4
fate-aac-al06_44: REF = $(SAMPLES)/aac/al06_44.s16
fate-aac-al06_44: REF = $(SAMPLES)/aac/al06_44_reorder.s16
FATE_AAC += fate-aac-al07_96
fate-aac-al07_96: CMD = pcm -i $(SAMPLES)/aac/al07_96.mp4
fate-aac-al07_96: REF = $(SAMPLES)/aac/al07_96.s16
fate-aac-al07_96: REF = $(SAMPLES)/aac/al07_96_reorder.s16
FATE_AAC += fate-aac-al15_44
fate-aac-al15_44: CMD = pcm -i $(SAMPLES)/aac/al15_44.mp4
fate-aac-al15_44: REF = $(SAMPLES)/aac/al15_44.s16
fate-aac-al15_44: REF = $(SAMPLES)/aac/al15_44_reorder.s16
FATE_AAC += fate-aac-al17_44
fate-aac-al17_44: CMD = pcm -i $(SAMPLES)/aac/al17_44.mp4
@@ -32,7 +32,7 @@ fate-aac-am00_88: REF = $(SAMPLES)/aac/am00_88.s16
FATE_AAC += fate-aac-am05_44
fate-aac-am05_44: CMD = pcm -i $(SAMPLES)/aac/am05_44.mp4
fate-aac-am05_44: REF = $(SAMPLES)/aac/am05_44.s16
fate-aac-am05_44: REF = $(SAMPLES)/aac/am05_44_reorder.s16
FATE_AAC += fate-aac-al_sbr_hq_cm_48_2
fate-aac-al_sbr_hq_cm_48_2: CMD = pcm -i $(SAMPLES)/aac/al_sbr_cm_48_2.mp4
@@ -40,7 +40,7 @@ fate-aac-al_sbr_hq_cm_48_2: REF = $(SAMPLES)/aac/al_sbr_hq_cm_48_2.s16
FATE_AAC += fate-aac-al_sbr_hq_cm_48_5.1
fate-aac-al_sbr_hq_cm_48_5.1: CMD = pcm -i $(SAMPLES)/aac/al_sbr_cm_48_5.1.mp4
fate-aac-al_sbr_hq_cm_48_5.1: REF = $(SAMPLES)/aac/al_sbr_hq_cm_48_5.1.s16
fate-aac-al_sbr_hq_cm_48_5.1: REF = $(SAMPLES)/aac/al_sbr_hq_cm_48_5.1_reorder.s16
FATE_AAC += fate-aac-al_sbr_ps_06_ur
fate-aac-al_sbr_ps_06_ur: CMD = pcm -i $(SAMPLES)/aac/al_sbr_ps_06_new.mp4