2011-04-13 20:00:18 +02:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2011 The WebM project authors. All Rights Reserved.
|
|
|
|
*
|
|
|
|
* Use of this source code is governed by a BSD-style license
|
|
|
|
* that can be found in the LICENSE file in the root of the source
|
|
|
|
* tree. An additional intellectual property rights grant can be found
|
|
|
|
* in the file PATENTS. All contributing project authors may
|
|
|
|
* be found in the AUTHORS file in the root of the source tree.
|
|
|
|
*/
|
|
|
|
#include <assert.h>
|
|
|
|
#include <stdlib.h>
|
2013-03-12 01:02:27 +01:00
|
|
|
|
2011-04-13 20:00:18 +02:00
|
|
|
#include "vpx_config.h"
|
2013-03-12 01:02:27 +01:00
|
|
|
#include "vp9/common/vp9_common.h"
|
2012-11-28 19:41:40 +01:00
|
|
|
#include "vp9/encoder/vp9_lookahead.h"
|
2012-11-27 22:59:17 +01:00
|
|
|
#include "vp9/common/vp9_extend.h"
|
2011-04-13 20:00:18 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
struct lookahead_ctx {
|
|
|
|
unsigned int max_sz; /* Absolute size of the queue */
|
|
|
|
unsigned int sz; /* Number of buffers currently in the queue */
|
|
|
|
unsigned int read_idx; /* Read index */
|
|
|
|
unsigned int write_idx; /* Write index */
|
|
|
|
struct lookahead_entry *buf; /* Buffer list */
|
2011-04-13 20:00:18 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
/* Return the buffer at the given absolute index and increment the index */
|
2013-03-12 01:02:27 +01:00
|
|
|
static struct lookahead_entry * pop(struct lookahead_ctx *ctx,
|
|
|
|
unsigned int *idx) {
|
|
|
|
unsigned int index = *idx;
|
2012-07-14 00:21:29 +02:00
|
|
|
struct lookahead_entry *buf = ctx->buf + index;
|
|
|
|
|
|
|
|
assert(index < ctx->max_sz);
|
|
|
|
if (++index >= ctx->max_sz)
|
|
|
|
index -= ctx->max_sz;
|
|
|
|
*idx = index;
|
|
|
|
return buf;
|
2011-04-13 20:00:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-03-12 01:02:27 +01:00
|
|
|
void vp9_lookahead_destroy(struct lookahead_ctx *ctx) {
|
2012-07-14 00:21:29 +02:00
|
|
|
if (ctx) {
|
|
|
|
if (ctx->buf) {
|
2012-11-05 23:22:59 +01:00
|
|
|
unsigned int i;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
for (i = 0; i < ctx->max_sz; i++)
|
2013-05-07 00:52:06 +02:00
|
|
|
vp9_free_frame_buffer(&ctx->buf[i].img);
|
2012-07-14 00:21:29 +02:00
|
|
|
free(ctx->buf);
|
2011-04-13 20:00:18 +02:00
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
free(ctx);
|
|
|
|
}
|
2011-04-13 20:00:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-03-12 01:02:27 +01:00
|
|
|
struct lookahead_ctx * vp9_lookahead_init(unsigned int width,
|
|
|
|
unsigned int height,
|
2013-05-07 00:52:06 +02:00
|
|
|
unsigned int subsampling_x,
|
|
|
|
unsigned int subsampling_y,
|
2013-03-12 01:02:27 +01:00
|
|
|
unsigned int depth) {
|
2012-07-14 00:21:29 +02:00
|
|
|
struct lookahead_ctx *ctx = NULL;
|
|
|
|
|
2013-03-12 01:02:27 +01:00
|
|
|
// Clamp the lookahead queue depth
|
|
|
|
depth = clamp(depth, 1, MAX_LAG_BUFFERS);
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2013-03-12 01:02:27 +01:00
|
|
|
// Allocate the lookahead structures
|
2012-07-14 00:21:29 +02:00
|
|
|
ctx = calloc(1, sizeof(*ctx));
|
|
|
|
if (ctx) {
|
2012-11-05 23:22:59 +01:00
|
|
|
unsigned int i;
|
2012-07-14 00:21:29 +02:00
|
|
|
ctx->max_sz = depth;
|
|
|
|
ctx->buf = calloc(depth, sizeof(*ctx->buf));
|
|
|
|
if (!ctx->buf)
|
|
|
|
goto bail;
|
|
|
|
for (i = 0; i < depth; i++)
|
2013-05-07 00:52:06 +02:00
|
|
|
if (vp9_alloc_frame_buffer(&ctx->buf[i].img,
|
|
|
|
width, height, subsampling_x, subsampling_y,
|
|
|
|
VP9BORDERINPIXELS))
|
2012-07-14 00:21:29 +02:00
|
|
|
goto bail;
|
|
|
|
}
|
|
|
|
return ctx;
|
2011-04-13 20:00:18 +02:00
|
|
|
bail:
|
2012-10-30 20:58:42 +01:00
|
|
|
vp9_lookahead_destroy(ctx);
|
2012-07-14 00:21:29 +02:00
|
|
|
return NULL;
|
2011-04-13 20:00:18 +02:00
|
|
|
}
|
|
|
|
|
2013-05-16 02:55:08 +02:00
|
|
|
#define USE_PARTIAL_COPY 0
|
2011-04-13 20:00:18 +02:00
|
|
|
|
2013-03-12 01:02:27 +01:00
|
|
|
int vp9_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
|
|
|
|
int64_t ts_start, int64_t ts_end, unsigned int flags,
|
|
|
|
unsigned char *active_map) {
|
2012-07-14 00:21:29 +02:00
|
|
|
struct lookahead_entry *buf;
|
2013-05-16 02:55:08 +02:00
|
|
|
#if USE_PARTIAL_COPY
|
2012-07-14 00:21:29 +02:00
|
|
|
int row, col, active_end;
|
|
|
|
int mb_rows = (src->y_height + 15) >> 4;
|
|
|
|
int mb_cols = (src->y_width + 15) >> 4;
|
2013-05-16 02:55:08 +02:00
|
|
|
#endif
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
if (ctx->sz + 1 > ctx->max_sz)
|
|
|
|
return 1;
|
|
|
|
ctx->sz++;
|
|
|
|
buf = pop(ctx, &ctx->write_idx);
|
|
|
|
|
2013-05-16 02:55:08 +02:00
|
|
|
#if USE_PARTIAL_COPY
|
|
|
|
// TODO(jkoleszar): This is disabled for now, as
|
|
|
|
// vp9_copy_and_extend_frame_with_rect is not subsampling/alpha aware.
|
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
// Only do this partial copy if the following conditions are all met:
|
|
|
|
// 1. Lookahead queue has has size of 1.
|
|
|
|
// 2. Active map is provided.
|
|
|
|
// 3. This is not a key frame, golden nor altref frame.
|
|
|
|
if (ctx->max_sz == 1 && active_map && !flags) {
|
|
|
|
for (row = 0; row < mb_rows; ++row) {
|
|
|
|
col = 0;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
// Find the first active macroblock in this row.
|
|
|
|
for (; col < mb_cols; ++col) {
|
|
|
|
if (active_map[col])
|
|
|
|
break;
|
2011-08-09 21:59:45 +02:00
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
// No more active macroblock in this row.
|
|
|
|
if (col == mb_cols)
|
|
|
|
break;
|
|
|
|
|
|
|
|
// Find the end of active region in this row.
|
|
|
|
active_end = col;
|
|
|
|
|
|
|
|
for (; active_end < mb_cols; ++active_end) {
|
|
|
|
if (!active_map[active_end])
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Only copy this active region.
|
2012-10-31 00:25:53 +01:00
|
|
|
vp9_copy_and_extend_frame_with_rect(src, &buf->img,
|
2012-07-14 00:21:29 +02:00
|
|
|
row << 4,
|
|
|
|
col << 4, 16,
|
|
|
|
(active_end - col) << 4);
|
|
|
|
|
|
|
|
// Start again from the end of this active region.
|
|
|
|
col = active_end;
|
|
|
|
}
|
|
|
|
|
|
|
|
active_map += mb_cols;
|
2011-08-09 21:59:45 +02:00
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
} else {
|
2012-10-31 00:25:53 +01:00
|
|
|
vp9_copy_and_extend_frame(src, &buf->img);
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2013-05-16 02:55:08 +02:00
|
|
|
#else
|
|
|
|
// Partial copy not implemented yet
|
|
|
|
vp9_copy_and_extend_frame(src, &buf->img);
|
|
|
|
#endif
|
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
buf->ts_start = ts_start;
|
|
|
|
buf->ts_end = ts_end;
|
|
|
|
buf->flags = flags;
|
|
|
|
return 0;
|
2011-04-13 20:00:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-03-12 01:02:27 +01:00
|
|
|
struct lookahead_entry * vp9_lookahead_pop(struct lookahead_ctx *ctx,
|
|
|
|
int drain) {
|
2012-07-14 00:21:29 +02:00
|
|
|
struct lookahead_entry *buf = NULL;
|
|
|
|
|
|
|
|
if (ctx->sz && (drain || ctx->sz == ctx->max_sz)) {
|
|
|
|
buf = pop(ctx, &ctx->read_idx);
|
|
|
|
ctx->sz--;
|
|
|
|
}
|
|
|
|
return buf;
|
2011-04-13 20:00:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-03-12 01:02:27 +01:00
|
|
|
struct lookahead_entry * vp9_lookahead_peek(struct lookahead_ctx *ctx,
|
|
|
|
int index) {
|
2012-07-14 00:21:29 +02:00
|
|
|
struct lookahead_entry *buf = NULL;
|
|
|
|
|
2013-01-18 18:44:23 +01:00
|
|
|
assert(index < (int)ctx->max_sz);
|
2012-11-05 23:22:59 +01:00
|
|
|
if (index < (int)ctx->sz) {
|
2012-07-14 00:21:29 +02:00
|
|
|
index += ctx->read_idx;
|
2012-11-05 23:22:59 +01:00
|
|
|
if (index >= (int)ctx->max_sz)
|
2012-07-14 00:21:29 +02:00
|
|
|
index -= ctx->max_sz;
|
|
|
|
buf = ctx->buf + index;
|
|
|
|
}
|
|
|
|
return buf;
|
2011-04-13 20:00:18 +02:00
|
|
|
}
|
|
|
|
|
2013-03-12 01:02:27 +01:00
|
|
|
unsigned int vp9_lookahead_depth(struct lookahead_ctx *ctx) {
|
2012-07-14 00:21:29 +02:00
|
|
|
return ctx->sz;
|
2011-04-13 20:00:18 +02:00
|
|
|
}
|