diff --git a/vp8/encoder/lookahead.c b/vp8/encoder/lookahead.c index 3b86d4094..d7f85cba1 100644 --- a/vp8/encoder/lookahead.c +++ b/vp8/encoder/lookahead.c @@ -86,7 +86,8 @@ vp8_lookahead_init(unsigned int width, if(!ctx->buf) goto bail; for(i=0; ibuf[i].img, width, height, 16)) + if (vp8_yv12_alloc_frame_buffer(&ctx->buf[i].img, + width, height, VP8BORDERINPIXELS)) goto bail; } return ctx; diff --git a/vp8/encoder/onyx_if.c b/vp8/encoder/onyx_if.c index ba8793dc8..35baa4c2b 100644 --- a/vp8/encoder/onyx_if.c +++ b/vp8/encoder/onyx_if.c @@ -1240,7 +1240,7 @@ static void alloc_raw_frame_buffers(VP8_COMP *cpi) #if VP8_TEMPORAL_ALT_REF if (vp8_yv12_alloc_frame_buffer(&cpi->alt_ref_buffer, - width, height, 16)) + width, height, VP8BORDERINPIXELS)) vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, "Failed to allocate altref buffer"); @@ -1290,7 +1290,8 @@ void vp8_alloc_compressor_data(VP8_COMP *cpi) vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, "Failed to allocate last frame buffer"); - if (vp8_yv12_alloc_frame_buffer(&cpi->scaled_source, width, height, 16)) + if (vp8_yv12_alloc_frame_buffer(&cpi->scaled_source, + width, height, VP8BORDERINPIXELS)) vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, "Failed to allocate scaled source buffer"); diff --git a/vp8/vp8_dx_iface.c b/vp8/vp8_dx_iface.c index 58dc486de..13a072bff 100644 --- a/vp8/vp8_dx_iface.c +++ b/vp8/vp8_dx_iface.c @@ -301,6 +301,36 @@ update_error_state(vpx_codec_alg_priv_t *ctx, return res; } +static void yuvconfig2image(vpx_image_t *img, + const YV12_BUFFER_CONFIG *yv12, + void *user_priv) +{ + /** vpx_img_wrap() doesn't allow specifying independent strides for + * the Y, U, and V planes, nor other alignment adjustments that + * might be representable by a YV12_BUFFER_CONFIG, so we just + * initialize all the fields.*/ + img->fmt = yv12->clrtype == REG_YUV ? + VPX_IMG_FMT_I420 : VPX_IMG_FMT_VPXI420; + img->w = yv12->y_stride; + img->h = (yv12->y_height + 2 * VP8BORDERINPIXELS + 15) & ~15; + img->d_w = yv12->y_width; + img->d_h = yv12->y_height; + img->x_chroma_shift = 1; + img->y_chroma_shift = 1; + img->planes[VPX_PLANE_Y] = yv12->y_buffer; + img->planes[VPX_PLANE_U] = yv12->u_buffer; + img->planes[VPX_PLANE_V] = yv12->v_buffer; + img->planes[VPX_PLANE_ALPHA] = NULL; + img->stride[VPX_PLANE_Y] = yv12->y_stride; + img->stride[VPX_PLANE_U] = yv12->uv_stride; + img->stride[VPX_PLANE_V] = yv12->uv_stride; + img->stride[VPX_PLANE_ALPHA] = yv12->y_stride; + img->bps = 12; + img->user_priv = user_priv; + img->img_data = yv12->buffer_alloc; + img->img_data_owner = 0; + img->self_allocd = 0; +} static vpx_codec_err_t vp8_decode(vpx_codec_alg_priv_t *ctx, const uint8_t *data, @@ -429,21 +459,8 @@ static vpx_codec_err_t vp8_decode(vpx_codec_alg_priv_t *ctx, if (!res && 0 == vp8dx_get_raw_frame(ctx->pbi, &sd, &time_stamp, &time_end_stamp, &flags)) { - /* Align width/height */ - unsigned int a_w = (sd.y_width + 15) & ~15; - unsigned int a_h = (sd.y_height + 15) & ~15; - - vpx_img_wrap(&ctx->img, VPX_IMG_FMT_I420, - a_w + 2 * VP8BORDERINPIXELS, - a_h + 2 * VP8BORDERINPIXELS, - 1, - sd.buffer_alloc); - vpx_img_set_rect(&ctx->img, - VP8BORDERINPIXELS, VP8BORDERINPIXELS, - sd.y_width, sd.y_height); - ctx->img.user_priv = user_priv; + yuvconfig2image(&ctx->img, &sd, user_priv); ctx->img_avail = 1; - } } diff --git a/vpx_scale/generic/yv12config.c b/vpx_scale/generic/yv12config.c index d02cde28f..eff594e2d 100644 --- a/vpx_scale/generic/yv12config.c +++ b/vpx_scale/generic/yv12config.c @@ -49,25 +49,33 @@ vp8_yv12_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height, int if (ybf) { + int y_stride = ((width + 2 * border) + 31) & ~31; + int yplane_size = (height + 2 * border) * y_stride; int uv_width = width >> 1; int uv_height = height >> 1; - int yplane_size = (height + 2 * border) * (width + 2 * border); - int uvplane_size = (uv_height + border) * (uv_width + border); + /** There is currently a bunch of code which assumes + * uv_stride == y_stride/2, so enforce this here. */ + int uv_stride = y_stride >> 1; + int uvplane_size = (uv_height + border) * uv_stride; vp8_yv12_de_alloc_frame_buffer(ybf); - /* only support allocating buffers that have - a height and width that are multiples of 16 */ - if ((width & 0xf) | (height & 0xf)) + /** Only support allocating buffers that have a height and width that + * are multiples of 16, and a border that's a multiple of 32. + * The border restriction is required to get 16-byte alignment of the + * start of the chroma rows without intoducing an arbitrary gap + * between planes, which would break the semantics of things like + * vpx_img_set_rect(). */ + if ((width & 0xf) | (height & 0xf) | (border & 0x1f)) return -3; ybf->y_width = width; ybf->y_height = height; - ybf->y_stride = width + 2 * border; + ybf->y_stride = y_stride; ybf->uv_width = uv_width; ybf->uv_height = uv_height; - ybf->uv_stride = uv_width + border; + ybf->uv_stride = uv_stride; ybf->border = border; ybf->frame_size = yplane_size + 2 * uvplane_size; @@ -77,9 +85,9 @@ vp8_yv12_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height, int if (ybf->buffer_alloc == NULL) return -1; - ybf->y_buffer = ybf->buffer_alloc + (border * ybf->y_stride) + border; - ybf->u_buffer = ybf->buffer_alloc + yplane_size + (border / 2 * ybf->uv_stride) + border / 2; - ybf->v_buffer = ybf->buffer_alloc + yplane_size + uvplane_size + (border / 2 * ybf->uv_stride) + border / 2; + ybf->y_buffer = ybf->buffer_alloc + (border * y_stride) + border; + ybf->u_buffer = ybf->buffer_alloc + yplane_size + (border / 2 * uv_stride) + border / 2; + ybf->v_buffer = ybf->buffer_alloc + yplane_size + uvplane_size + (border / 2 * uv_stride) + border / 2; ybf->corrupted = 0; /* assume not currupted by errors */ }