4fcabf5169
Change-Id: Iab7215f82be0c0c831cd81b6f8091afc3710dd54
1086 lines
34 KiB
C
1086 lines
34 KiB
C
/*
|
|
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
|
|
*
|
|
* Use of this source code is governed by a BSD-style license
|
|
* that can be found in the LICENSE file in the root of the source
|
|
* tree. An additional intellectual property rights grant can be found
|
|
* in the file PATENTS. All contributing project authors may
|
|
* be found in the AUTHORS file in the root of the source tree.
|
|
*/
|
|
|
|
#include "./vpx_config.h"
|
|
#include "./vp9_rtcd.h"
|
|
|
|
#include "vpx_mem/vpx_mem.h"
|
|
#include "vpx_ports/mem.h"
|
|
#include "vpx_ports/vpx_once.h"
|
|
|
|
#include "vp9/common/vp9_reconintra.h"
|
|
#include "vp9/common/vp9_onyxc_int.h"
|
|
|
|
const TX_TYPE intra_mode_to_tx_type_lookup[INTRA_MODES] = {
|
|
DCT_DCT, // DC
|
|
ADST_DCT, // V
|
|
DCT_ADST, // H
|
|
DCT_DCT, // D45
|
|
ADST_ADST, // D135
|
|
ADST_DCT, // D117
|
|
DCT_ADST, // D153
|
|
DCT_ADST, // D207
|
|
ADST_DCT, // D63
|
|
ADST_ADST, // TM
|
|
};
|
|
|
|
enum {
|
|
NEED_LEFT = 1 << 1,
|
|
NEED_ABOVE = 1 << 2,
|
|
NEED_ABOVERIGHT = 1 << 3,
|
|
};
|
|
|
|
static const uint8_t extend_modes[INTRA_MODES] = {
|
|
NEED_ABOVE | NEED_LEFT, // DC
|
|
NEED_ABOVE, // V
|
|
NEED_LEFT, // H
|
|
NEED_ABOVERIGHT, // D45
|
|
NEED_LEFT | NEED_ABOVE, // D135
|
|
NEED_LEFT | NEED_ABOVE, // D117
|
|
NEED_LEFT | NEED_ABOVE, // D153
|
|
NEED_LEFT, // D207
|
|
NEED_ABOVERIGHT, // D63
|
|
NEED_LEFT | NEED_ABOVE, // TM
|
|
};
|
|
|
|
// This serves as a wrapper function, so that all the prediction functions
|
|
// can be unified and accessed as a pointer array. Note that the boundary
|
|
// above and left are not necessarily used all the time.
|
|
#define intra_pred_sized(type, size) \
|
|
void vp9_##type##_predictor_##size##x##size##_c(uint8_t *dst, \
|
|
ptrdiff_t stride, \
|
|
const uint8_t *above, \
|
|
const uint8_t *left) { \
|
|
type##_predictor(dst, stride, size, above, left); \
|
|
}
|
|
|
|
#if CONFIG_VP9_HIGHBITDEPTH
|
|
#define intra_pred_highbd_sized(type, size) \
|
|
void vp9_highbd_##type##_predictor_##size##x##size##_c( \
|
|
uint16_t *dst, ptrdiff_t stride, const uint16_t *above, \
|
|
const uint16_t *left, int bd) { \
|
|
highbd_##type##_predictor(dst, stride, size, above, left, bd); \
|
|
}
|
|
|
|
#define intra_pred_allsizes(type) \
|
|
intra_pred_sized(type, 4) \
|
|
intra_pred_sized(type, 8) \
|
|
intra_pred_sized(type, 16) \
|
|
intra_pred_sized(type, 32) \
|
|
intra_pred_highbd_sized(type, 4) \
|
|
intra_pred_highbd_sized(type, 8) \
|
|
intra_pred_highbd_sized(type, 16) \
|
|
intra_pred_highbd_sized(type, 32)
|
|
|
|
#define intra_pred_no_4x4(type) \
|
|
intra_pred_sized(type, 8) \
|
|
intra_pred_sized(type, 16) \
|
|
intra_pred_sized(type, 32) \
|
|
intra_pred_highbd_sized(type, 4) \
|
|
intra_pred_highbd_sized(type, 8) \
|
|
intra_pred_highbd_sized(type, 16) \
|
|
intra_pred_highbd_sized(type, 32)
|
|
|
|
#else
|
|
|
|
#define intra_pred_allsizes(type) \
|
|
intra_pred_sized(type, 4) \
|
|
intra_pred_sized(type, 8) \
|
|
intra_pred_sized(type, 16) \
|
|
intra_pred_sized(type, 32)
|
|
|
|
#define intra_pred_no_4x4(type) \
|
|
intra_pred_sized(type, 8) \
|
|
intra_pred_sized(type, 16) \
|
|
intra_pred_sized(type, 32)
|
|
#endif // CONFIG_VP9_HIGHBITDEPTH
|
|
|
|
#define DST(x, y) dst[(x) + (y) * stride]
|
|
#define AVG3(a, b, c) (((a) + 2 * (b) + (c) + 2) >> 2)
|
|
#define AVG2(a, b) (((a) + (b) + 1) >> 1)
|
|
|
|
#if CONFIG_VP9_HIGHBITDEPTH
|
|
static INLINE void highbd_d207_predictor(uint16_t *dst, ptrdiff_t stride,
|
|
int bs, const uint16_t *above,
|
|
const uint16_t *left, int bd) {
|
|
int r, c;
|
|
(void) above;
|
|
(void) bd;
|
|
|
|
// First column.
|
|
for (r = 0; r < bs - 1; ++r) {
|
|
dst[r * stride] = AVG2(left[r], left[r + 1]);
|
|
}
|
|
dst[(bs - 1) * stride] = left[bs - 1];
|
|
dst++;
|
|
|
|
// Second column.
|
|
for (r = 0; r < bs - 2; ++r) {
|
|
dst[r * stride] = AVG3(left[r], left[r + 1], left[r + 2]);
|
|
}
|
|
dst[(bs - 2) * stride] = AVG3(left[bs - 2], left[bs - 1], left[bs - 1]);
|
|
dst[(bs - 1) * stride] = left[bs - 1];
|
|
dst++;
|
|
|
|
// Rest of last row.
|
|
for (c = 0; c < bs - 2; ++c)
|
|
dst[(bs - 1) * stride + c] = left[bs - 1];
|
|
|
|
for (r = bs - 2; r >= 0; --r) {
|
|
for (c = 0; c < bs - 2; ++c)
|
|
dst[r * stride + c] = dst[(r + 1) * stride + c - 2];
|
|
}
|
|
}
|
|
|
|
static INLINE void highbd_d63_predictor(uint16_t *dst, ptrdiff_t stride,
|
|
int bs, const uint16_t *above,
|
|
const uint16_t *left, int bd) {
|
|
int r, c;
|
|
(void) left;
|
|
(void) bd;
|
|
for (r = 0; r < bs; ++r) {
|
|
for (c = 0; c < bs; ++c) {
|
|
dst[c] = r & 1 ? AVG3(above[(r >> 1) + c], above[(r >> 1) + c + 1],
|
|
above[(r >> 1) + c + 2])
|
|
: AVG2(above[(r >> 1) + c], above[(r >> 1) + c + 1]);
|
|
}
|
|
dst += stride;
|
|
}
|
|
}
|
|
|
|
static INLINE void highbd_d45_predictor(uint16_t *dst, ptrdiff_t stride, int bs,
|
|
const uint16_t *above,
|
|
const uint16_t *left, int bd) {
|
|
int r, c;
|
|
(void) left;
|
|
(void) bd;
|
|
for (r = 0; r < bs; ++r) {
|
|
for (c = 0; c < bs; ++c) {
|
|
dst[c] = r + c + 2 < bs * 2 ? AVG3(above[r + c], above[r + c + 1],
|
|
above[r + c + 2])
|
|
: above[bs * 2 - 1];
|
|
}
|
|
dst += stride;
|
|
}
|
|
}
|
|
|
|
static INLINE void highbd_d117_predictor(uint16_t *dst, ptrdiff_t stride,
|
|
int bs, const uint16_t *above,
|
|
const uint16_t *left, int bd) {
|
|
int r, c;
|
|
(void) bd;
|
|
|
|
// first row
|
|
for (c = 0; c < bs; c++)
|
|
dst[c] = AVG2(above[c - 1], above[c]);
|
|
dst += stride;
|
|
|
|
// second row
|
|
dst[0] = AVG3(left[0], above[-1], above[0]);
|
|
for (c = 1; c < bs; c++)
|
|
dst[c] = AVG3(above[c - 2], above[c - 1], above[c]);
|
|
dst += stride;
|
|
|
|
// the rest of first col
|
|
dst[0] = AVG3(above[-1], left[0], left[1]);
|
|
for (r = 3; r < bs; ++r)
|
|
dst[(r - 2) * stride] = AVG3(left[r - 3], left[r - 2], left[r - 1]);
|
|
|
|
// the rest of the block
|
|
for (r = 2; r < bs; ++r) {
|
|
for (c = 1; c < bs; c++)
|
|
dst[c] = dst[-2 * stride + c - 1];
|
|
dst += stride;
|
|
}
|
|
}
|
|
|
|
static INLINE void highbd_d135_predictor(uint16_t *dst, ptrdiff_t stride,
|
|
int bs, const uint16_t *above,
|
|
const uint16_t *left, int bd) {
|
|
int r, c;
|
|
(void) bd;
|
|
dst[0] = AVG3(left[0], above[-1], above[0]);
|
|
for (c = 1; c < bs; c++)
|
|
dst[c] = AVG3(above[c - 2], above[c - 1], above[c]);
|
|
|
|
dst[stride] = AVG3(above[-1], left[0], left[1]);
|
|
for (r = 2; r < bs; ++r)
|
|
dst[r * stride] = AVG3(left[r - 2], left[r - 1], left[r]);
|
|
|
|
dst += stride;
|
|
for (r = 1; r < bs; ++r) {
|
|
for (c = 1; c < bs; c++)
|
|
dst[c] = dst[-stride + c - 1];
|
|
dst += stride;
|
|
}
|
|
}
|
|
|
|
static INLINE void highbd_d153_predictor(uint16_t *dst, ptrdiff_t stride,
|
|
int bs, const uint16_t *above,
|
|
const uint16_t *left, int bd) {
|
|
int r, c;
|
|
(void) bd;
|
|
dst[0] = AVG2(above[-1], left[0]);
|
|
for (r = 1; r < bs; r++)
|
|
dst[r * stride] = AVG2(left[r - 1], left[r]);
|
|
dst++;
|
|
|
|
dst[0] = AVG3(left[0], above[-1], above[0]);
|
|
dst[stride] = AVG3(above[-1], left[0], left[1]);
|
|
for (r = 2; r < bs; r++)
|
|
dst[r * stride] = AVG3(left[r - 2], left[r - 1], left[r]);
|
|
dst++;
|
|
|
|
for (c = 0; c < bs - 2; c++)
|
|
dst[c] = AVG3(above[c - 1], above[c], above[c + 1]);
|
|
dst += stride;
|
|
|
|
for (r = 1; r < bs; ++r) {
|
|
for (c = 0; c < bs - 2; c++)
|
|
dst[c] = dst[-stride + c - 2];
|
|
dst += stride;
|
|
}
|
|
}
|
|
|
|
static INLINE void highbd_v_predictor(uint16_t *dst, ptrdiff_t stride,
|
|
int bs, const uint16_t *above,
|
|
const uint16_t *left, int bd) {
|
|
int r;
|
|
(void) left;
|
|
(void) bd;
|
|
for (r = 0; r < bs; r++) {
|
|
memcpy(dst, above, bs * sizeof(uint16_t));
|
|
dst += stride;
|
|
}
|
|
}
|
|
|
|
static INLINE void highbd_h_predictor(uint16_t *dst, ptrdiff_t stride,
|
|
int bs, const uint16_t *above,
|
|
const uint16_t *left, int bd) {
|
|
int r;
|
|
(void) above;
|
|
(void) bd;
|
|
for (r = 0; r < bs; r++) {
|
|
vpx_memset16(dst, left[r], bs);
|
|
dst += stride;
|
|
}
|
|
}
|
|
|
|
static INLINE void highbd_tm_predictor(uint16_t *dst, ptrdiff_t stride,
|
|
int bs, const uint16_t *above,
|
|
const uint16_t *left, int bd) {
|
|
int r, c;
|
|
int ytop_left = above[-1];
|
|
(void) bd;
|
|
|
|
for (r = 0; r < bs; r++) {
|
|
for (c = 0; c < bs; c++)
|
|
dst[c] = clip_pixel_highbd(left[r] + above[c] - ytop_left, bd);
|
|
dst += stride;
|
|
}
|
|
}
|
|
|
|
static INLINE void highbd_dc_128_predictor(uint16_t *dst, ptrdiff_t stride,
|
|
int bs, const uint16_t *above,
|
|
const uint16_t *left, int bd) {
|
|
int r;
|
|
(void) above;
|
|
(void) left;
|
|
|
|
for (r = 0; r < bs; r++) {
|
|
vpx_memset16(dst, 128 << (bd - 8), bs);
|
|
dst += stride;
|
|
}
|
|
}
|
|
|
|
static INLINE void highbd_dc_left_predictor(uint16_t *dst, ptrdiff_t stride,
|
|
int bs, const uint16_t *above,
|
|
const uint16_t *left, int bd) {
|
|
int i, r, expected_dc, sum = 0;
|
|
(void) above;
|
|
(void) bd;
|
|
|
|
for (i = 0; i < bs; i++)
|
|
sum += left[i];
|
|
expected_dc = (sum + (bs >> 1)) / bs;
|
|
|
|
for (r = 0; r < bs; r++) {
|
|
vpx_memset16(dst, expected_dc, bs);
|
|
dst += stride;
|
|
}
|
|
}
|
|
|
|
static INLINE void highbd_dc_top_predictor(uint16_t *dst, ptrdiff_t stride,
|
|
int bs, const uint16_t *above,
|
|
const uint16_t *left, int bd) {
|
|
int i, r, expected_dc, sum = 0;
|
|
(void) left;
|
|
(void) bd;
|
|
|
|
for (i = 0; i < bs; i++)
|
|
sum += above[i];
|
|
expected_dc = (sum + (bs >> 1)) / bs;
|
|
|
|
for (r = 0; r < bs; r++) {
|
|
vpx_memset16(dst, expected_dc, bs);
|
|
dst += stride;
|
|
}
|
|
}
|
|
|
|
static INLINE void highbd_dc_predictor(uint16_t *dst, ptrdiff_t stride,
|
|
int bs, const uint16_t *above,
|
|
const uint16_t *left, int bd) {
|
|
int i, r, expected_dc, sum = 0;
|
|
const int count = 2 * bs;
|
|
(void) bd;
|
|
|
|
for (i = 0; i < bs; i++) {
|
|
sum += above[i];
|
|
sum += left[i];
|
|
}
|
|
|
|
expected_dc = (sum + (count >> 1)) / count;
|
|
|
|
for (r = 0; r < bs; r++) {
|
|
vpx_memset16(dst, expected_dc, bs);
|
|
dst += stride;
|
|
}
|
|
}
|
|
#endif // CONFIG_VP9_HIGHBITDEPTH
|
|
|
|
void vp9_d207_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
|
|
const uint8_t *above, const uint8_t *left) {
|
|
const int I = left[0];
|
|
const int J = left[1];
|
|
const int K = left[2];
|
|
const int L = left[3];
|
|
(void)above;
|
|
DST(0, 0) = AVG2(I, J);
|
|
DST(2, 0) = DST(0, 1) = AVG2(J, K);
|
|
DST(2, 1) = DST(0, 2) = AVG2(K, L);
|
|
DST(1, 0) = AVG3(I, J, K);
|
|
DST(3, 0) = DST(1, 1) = AVG3(J, K, L);
|
|
DST(3, 1) = DST(1, 2) = AVG3(K, L, L);
|
|
DST(3, 2) = DST(2, 2) =
|
|
DST(0, 3) = DST(1, 3) = DST(2, 3) = DST(3, 3) = L;
|
|
}
|
|
|
|
static INLINE void d207_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
|
|
const uint8_t *above, const uint8_t *left) {
|
|
int r, c;
|
|
(void) above;
|
|
// first column
|
|
for (r = 0; r < bs - 1; ++r)
|
|
dst[r * stride] = AVG2(left[r], left[r + 1]);
|
|
dst[(bs - 1) * stride] = left[bs - 1];
|
|
dst++;
|
|
|
|
// second column
|
|
for (r = 0; r < bs - 2; ++r)
|
|
dst[r * stride] = AVG3(left[r], left[r + 1], left[r + 2]);
|
|
dst[(bs - 2) * stride] = AVG3(left[bs - 2], left[bs - 1], left[bs - 1]);
|
|
dst[(bs - 1) * stride] = left[bs - 1];
|
|
dst++;
|
|
|
|
// rest of last row
|
|
for (c = 0; c < bs - 2; ++c)
|
|
dst[(bs - 1) * stride + c] = left[bs - 1];
|
|
|
|
for (r = bs - 2; r >= 0; --r)
|
|
for (c = 0; c < bs - 2; ++c)
|
|
dst[r * stride + c] = dst[(r + 1) * stride + c - 2];
|
|
}
|
|
intra_pred_no_4x4(d207)
|
|
|
|
void vp9_d63_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
|
|
const uint8_t *above, const uint8_t *left) {
|
|
const int A = above[0];
|
|
const int B = above[1];
|
|
const int C = above[2];
|
|
const int D = above[3];
|
|
const int E = above[4];
|
|
const int F = above[5];
|
|
const int G = above[6];
|
|
const int H = above[7];
|
|
(void)left;
|
|
DST(0, 0) = AVG2(A, B);
|
|
DST(1, 0) = DST(0, 2) = AVG2(B, C);
|
|
DST(2, 0) = DST(1, 2) = AVG2(C, D);
|
|
DST(3, 0) = DST(2, 2) = AVG2(D, E);
|
|
|
|
DST(0, 1) = AVG3(A, B, C);
|
|
DST(1, 1) = DST(0, 3) = AVG3(B, C, D);
|
|
DST(2, 1) = DST(1, 3) = AVG3(C, D, E);
|
|
DST(3, 1) = DST(2, 3) = AVG3(D, E, F);
|
|
DST(3, 2) = AVG3(E, F, G);
|
|
DST(3, 3) = AVG3(F, G, H);
|
|
}
|
|
|
|
static INLINE void d63_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
|
|
const uint8_t *above, const uint8_t *left) {
|
|
int r, c;
|
|
(void) left;
|
|
for (r = 0; r < bs; ++r) {
|
|
for (c = 0; c < bs; ++c)
|
|
dst[c] = r & 1 ? AVG3(above[(r >> 1) + c], above[(r >> 1) + c + 1],
|
|
above[(r >> 1) + c + 2])
|
|
: AVG2(above[(r >> 1) + c], above[(r >> 1) + c + 1]);
|
|
dst += stride;
|
|
}
|
|
}
|
|
intra_pred_no_4x4(d63)
|
|
|
|
void vp9_d45_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
|
|
const uint8_t *above, const uint8_t *left) {
|
|
const int A = above[0];
|
|
const int B = above[1];
|
|
const int C = above[2];
|
|
const int D = above[3];
|
|
const int E = above[4];
|
|
const int F = above[5];
|
|
const int G = above[6];
|
|
const int H = above[7];
|
|
(void)stride;
|
|
(void)left;
|
|
DST(0, 0) = AVG3(A, B, C);
|
|
DST(1, 0) = DST(0, 1) = AVG3(B, C, D);
|
|
DST(2, 0) = DST(1, 1) = DST(0, 2) = AVG3(C, D, E);
|
|
DST(3, 0) = DST(2, 1) = DST(1, 2) = DST(0, 3) = AVG3(D, E, F);
|
|
DST(3, 1) = DST(2, 2) = DST(1, 3) = AVG3(E, F, G);
|
|
DST(3, 2) = DST(2, 3) = AVG3(F, G, H);
|
|
DST(3, 3) = AVG3(G, H, H);
|
|
}
|
|
|
|
static INLINE void d45_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
|
|
const uint8_t *above, const uint8_t *left) {
|
|
const uint8_t above_right = above[bs - 1];
|
|
int x, size;
|
|
uint8_t avg[31]; // TODO(jzern): this could be block size specific
|
|
(void)left;
|
|
|
|
for (x = 0; x < bs - 1; ++x) {
|
|
avg[x] = AVG3(above[x], above[x + 1], above[x + 2]);
|
|
}
|
|
for (x = 0, size = bs - 1; x < bs; ++x, --size) {
|
|
memcpy(dst, avg + x, size);
|
|
memset(dst + size, above_right, x + 1);
|
|
dst += stride;
|
|
}
|
|
}
|
|
intra_pred_no_4x4(d45)
|
|
|
|
void vp9_d117_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
|
|
const uint8_t *above, const uint8_t *left) {
|
|
const int I = left[0];
|
|
const int J = left[1];
|
|
const int K = left[2];
|
|
const int X = above[-1];
|
|
const int A = above[0];
|
|
const int B = above[1];
|
|
const int C = above[2];
|
|
const int D = above[3];
|
|
DST(0, 0) = DST(1, 2) = AVG2(X, A);
|
|
DST(1, 0) = DST(2, 2) = AVG2(A, B);
|
|
DST(2, 0) = DST(3, 2) = AVG2(B, C);
|
|
DST(3, 0) = AVG2(C, D);
|
|
|
|
DST(0, 3) = AVG3(K, J, I);
|
|
DST(0, 2) = AVG3(J, I, X);
|
|
DST(0, 1) = DST(1, 3) = AVG3(I, X, A);
|
|
DST(1, 1) = DST(2, 3) = AVG3(X, A, B);
|
|
DST(2, 1) = DST(3, 3) = AVG3(A, B, C);
|
|
DST(3, 1) = AVG3(B, C, D);
|
|
}
|
|
|
|
static INLINE void d117_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
|
|
const uint8_t *above, const uint8_t *left) {
|
|
int r, c;
|
|
|
|
// first row
|
|
for (c = 0; c < bs; c++)
|
|
dst[c] = AVG2(above[c - 1], above[c]);
|
|
dst += stride;
|
|
|
|
// second row
|
|
dst[0] = AVG3(left[0], above[-1], above[0]);
|
|
for (c = 1; c < bs; c++)
|
|
dst[c] = AVG3(above[c - 2], above[c - 1], above[c]);
|
|
dst += stride;
|
|
|
|
// the rest of first col
|
|
dst[0] = AVG3(above[-1], left[0], left[1]);
|
|
for (r = 3; r < bs; ++r)
|
|
dst[(r - 2) * stride] = AVG3(left[r - 3], left[r - 2], left[r - 1]);
|
|
|
|
// the rest of the block
|
|
for (r = 2; r < bs; ++r) {
|
|
for (c = 1; c < bs; c++)
|
|
dst[c] = dst[-2 * stride + c - 1];
|
|
dst += stride;
|
|
}
|
|
}
|
|
intra_pred_no_4x4(d117)
|
|
|
|
void vp9_d135_predictor_4x4(uint8_t *dst, ptrdiff_t stride,
|
|
const uint8_t *above, const uint8_t *left) {
|
|
const int I = left[0];
|
|
const int J = left[1];
|
|
const int K = left[2];
|
|
const int L = left[3];
|
|
const int X = above[-1];
|
|
const int A = above[0];
|
|
const int B = above[1];
|
|
const int C = above[2];
|
|
const int D = above[3];
|
|
(void)stride;
|
|
DST(0, 3) = AVG3(J, K, L);
|
|
DST(1, 3) = DST(0, 2) = AVG3(I, J, K);
|
|
DST(2, 3) = DST(1, 2) = DST(0, 1) = AVG3(X, I, J);
|
|
DST(3, 3) = DST(2, 2) = DST(1, 1) = DST(0, 0) = AVG3(A, X, I);
|
|
DST(3, 2) = DST(2, 1) = DST(1, 0) = AVG3(B, A, X);
|
|
DST(3, 1) = DST(2, 0) = AVG3(C, B, A);
|
|
DST(3, 0) = AVG3(D, C, B);
|
|
}
|
|
|
|
static INLINE void d135_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
|
|
const uint8_t *above, const uint8_t *left) {
|
|
int r, c;
|
|
dst[0] = AVG3(left[0], above[-1], above[0]);
|
|
for (c = 1; c < bs; c++)
|
|
dst[c] = AVG3(above[c - 2], above[c - 1], above[c]);
|
|
|
|
dst[stride] = AVG3(above[-1], left[0], left[1]);
|
|
for (r = 2; r < bs; ++r)
|
|
dst[r * stride] = AVG3(left[r - 2], left[r - 1], left[r]);
|
|
|
|
dst += stride;
|
|
for (r = 1; r < bs; ++r) {
|
|
for (c = 1; c < bs; c++)
|
|
dst[c] = dst[-stride + c - 1];
|
|
dst += stride;
|
|
}
|
|
}
|
|
intra_pred_no_4x4(d135)
|
|
|
|
void vp9_d153_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
|
|
const uint8_t *above, const uint8_t *left) {
|
|
const int I = left[0];
|
|
const int J = left[1];
|
|
const int K = left[2];
|
|
const int L = left[3];
|
|
const int X = above[-1];
|
|
const int A = above[0];
|
|
const int B = above[1];
|
|
const int C = above[2];
|
|
|
|
DST(0, 0) = DST(2, 1) = AVG2(I, X);
|
|
DST(0, 1) = DST(2, 2) = AVG2(J, I);
|
|
DST(0, 2) = DST(2, 3) = AVG2(K, J);
|
|
DST(0, 3) = AVG2(L, K);
|
|
|
|
DST(3, 0) = AVG3(A, B, C);
|
|
DST(2, 0) = AVG3(X, A, B);
|
|
DST(1, 0) = DST(3, 1) = AVG3(I, X, A);
|
|
DST(1, 1) = DST(3, 2) = AVG3(J, I, X);
|
|
DST(1, 2) = DST(3, 3) = AVG3(K, J, I);
|
|
DST(1, 3) = AVG3(L, K, J);
|
|
}
|
|
|
|
static INLINE void d153_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
|
|
const uint8_t *above, const uint8_t *left) {
|
|
int r, c;
|
|
dst[0] = AVG2(above[-1], left[0]);
|
|
for (r = 1; r < bs; r++)
|
|
dst[r * stride] = AVG2(left[r - 1], left[r]);
|
|
dst++;
|
|
|
|
dst[0] = AVG3(left[0], above[-1], above[0]);
|
|
dst[stride] = AVG3(above[-1], left[0], left[1]);
|
|
for (r = 2; r < bs; r++)
|
|
dst[r * stride] = AVG3(left[r - 2], left[r - 1], left[r]);
|
|
dst++;
|
|
|
|
for (c = 0; c < bs - 2; c++)
|
|
dst[c] = AVG3(above[c - 1], above[c], above[c + 1]);
|
|
dst += stride;
|
|
|
|
for (r = 1; r < bs; ++r) {
|
|
for (c = 0; c < bs - 2; c++)
|
|
dst[c] = dst[-stride + c - 2];
|
|
dst += stride;
|
|
}
|
|
}
|
|
intra_pred_no_4x4(d153)
|
|
|
|
static INLINE void v_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
|
|
const uint8_t *above, const uint8_t *left) {
|
|
int r;
|
|
(void) left;
|
|
|
|
for (r = 0; r < bs; r++) {
|
|
memcpy(dst, above, bs);
|
|
dst += stride;
|
|
}
|
|
}
|
|
intra_pred_allsizes(v)
|
|
|
|
static INLINE void h_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
|
|
const uint8_t *above, const uint8_t *left) {
|
|
int r;
|
|
(void) above;
|
|
|
|
for (r = 0; r < bs; r++) {
|
|
memset(dst, left[r], bs);
|
|
dst += stride;
|
|
}
|
|
}
|
|
intra_pred_allsizes(h)
|
|
|
|
static INLINE void tm_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
|
|
const uint8_t *above, const uint8_t *left) {
|
|
int r, c;
|
|
int ytop_left = above[-1];
|
|
|
|
for (r = 0; r < bs; r++) {
|
|
for (c = 0; c < bs; c++)
|
|
dst[c] = clip_pixel(left[r] + above[c] - ytop_left);
|
|
dst += stride;
|
|
}
|
|
}
|
|
intra_pred_allsizes(tm)
|
|
|
|
static INLINE void dc_128_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
|
|
const uint8_t *above, const uint8_t *left) {
|
|
int r;
|
|
(void) above;
|
|
(void) left;
|
|
|
|
for (r = 0; r < bs; r++) {
|
|
memset(dst, 128, bs);
|
|
dst += stride;
|
|
}
|
|
}
|
|
intra_pred_allsizes(dc_128)
|
|
|
|
static INLINE void dc_left_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
|
|
const uint8_t *above,
|
|
const uint8_t *left) {
|
|
int i, r, expected_dc, sum = 0;
|
|
(void) above;
|
|
|
|
for (i = 0; i < bs; i++)
|
|
sum += left[i];
|
|
expected_dc = (sum + (bs >> 1)) / bs;
|
|
|
|
for (r = 0; r < bs; r++) {
|
|
memset(dst, expected_dc, bs);
|
|
dst += stride;
|
|
}
|
|
}
|
|
intra_pred_allsizes(dc_left)
|
|
|
|
static INLINE void dc_top_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
|
|
const uint8_t *above, const uint8_t *left) {
|
|
int i, r, expected_dc, sum = 0;
|
|
(void) left;
|
|
|
|
for (i = 0; i < bs; i++)
|
|
sum += above[i];
|
|
expected_dc = (sum + (bs >> 1)) / bs;
|
|
|
|
for (r = 0; r < bs; r++) {
|
|
memset(dst, expected_dc, bs);
|
|
dst += stride;
|
|
}
|
|
}
|
|
intra_pred_allsizes(dc_top)
|
|
|
|
static INLINE void dc_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
|
|
const uint8_t *above, const uint8_t *left) {
|
|
int i, r, expected_dc, sum = 0;
|
|
const int count = 2 * bs;
|
|
|
|
for (i = 0; i < bs; i++) {
|
|
sum += above[i];
|
|
sum += left[i];
|
|
}
|
|
|
|
expected_dc = (sum + (count >> 1)) / count;
|
|
|
|
for (r = 0; r < bs; r++) {
|
|
memset(dst, expected_dc, bs);
|
|
dst += stride;
|
|
}
|
|
}
|
|
intra_pred_allsizes(dc)
|
|
#undef intra_pred_allsizes
|
|
|
|
typedef void (*intra_pred_fn)(uint8_t *dst, ptrdiff_t stride,
|
|
const uint8_t *above, const uint8_t *left);
|
|
|
|
static intra_pred_fn pred[INTRA_MODES][TX_SIZES];
|
|
static intra_pred_fn dc_pred[2][2][TX_SIZES];
|
|
|
|
#if CONFIG_VP9_HIGHBITDEPTH
|
|
typedef void (*intra_high_pred_fn)(uint16_t *dst, ptrdiff_t stride,
|
|
const uint16_t *above, const uint16_t *left,
|
|
int bd);
|
|
static intra_high_pred_fn pred_high[INTRA_MODES][4];
|
|
static intra_high_pred_fn dc_pred_high[2][2][4];
|
|
#endif // CONFIG_VP9_HIGHBITDEPTH
|
|
|
|
static void vp9_init_intra_predictors_internal(void) {
|
|
#define INIT_ALL_SIZES(p, type) \
|
|
p[TX_4X4] = vp9_##type##_predictor_4x4; \
|
|
p[TX_8X8] = vp9_##type##_predictor_8x8; \
|
|
p[TX_16X16] = vp9_##type##_predictor_16x16; \
|
|
p[TX_32X32] = vp9_##type##_predictor_32x32
|
|
|
|
INIT_ALL_SIZES(pred[V_PRED], v);
|
|
INIT_ALL_SIZES(pred[H_PRED], h);
|
|
INIT_ALL_SIZES(pred[D207_PRED], d207);
|
|
INIT_ALL_SIZES(pred[D45_PRED], d45);
|
|
INIT_ALL_SIZES(pred[D63_PRED], d63);
|
|
INIT_ALL_SIZES(pred[D117_PRED], d117);
|
|
INIT_ALL_SIZES(pred[D135_PRED], d135);
|
|
INIT_ALL_SIZES(pred[D153_PRED], d153);
|
|
INIT_ALL_SIZES(pred[TM_PRED], tm);
|
|
|
|
INIT_ALL_SIZES(dc_pred[0][0], dc_128);
|
|
INIT_ALL_SIZES(dc_pred[0][1], dc_top);
|
|
INIT_ALL_SIZES(dc_pred[1][0], dc_left);
|
|
INIT_ALL_SIZES(dc_pred[1][1], dc);
|
|
|
|
#if CONFIG_VP9_HIGHBITDEPTH
|
|
INIT_ALL_SIZES(pred_high[V_PRED], highbd_v);
|
|
INIT_ALL_SIZES(pred_high[H_PRED], highbd_h);
|
|
INIT_ALL_SIZES(pred_high[D207_PRED], highbd_d207);
|
|
INIT_ALL_SIZES(pred_high[D45_PRED], highbd_d45);
|
|
INIT_ALL_SIZES(pred_high[D63_PRED], highbd_d63);
|
|
INIT_ALL_SIZES(pred_high[D117_PRED], highbd_d117);
|
|
INIT_ALL_SIZES(pred_high[D135_PRED], highbd_d135);
|
|
INIT_ALL_SIZES(pred_high[D153_PRED], highbd_d153);
|
|
INIT_ALL_SIZES(pred_high[TM_PRED], highbd_tm);
|
|
|
|
INIT_ALL_SIZES(dc_pred_high[0][0], highbd_dc_128);
|
|
INIT_ALL_SIZES(dc_pred_high[0][1], highbd_dc_top);
|
|
INIT_ALL_SIZES(dc_pred_high[1][0], highbd_dc_left);
|
|
INIT_ALL_SIZES(dc_pred_high[1][1], highbd_dc);
|
|
#endif // CONFIG_VP9_HIGHBITDEPTH
|
|
|
|
#undef intra_pred_allsizes
|
|
}
|
|
|
|
#if CONFIG_VP9_HIGHBITDEPTH
|
|
static void build_intra_predictors_high(const MACROBLOCKD *xd,
|
|
const uint8_t *ref8,
|
|
int ref_stride,
|
|
uint8_t *dst8,
|
|
int dst_stride,
|
|
PREDICTION_MODE mode,
|
|
TX_SIZE tx_size,
|
|
int up_available,
|
|
int left_available,
|
|
int right_available,
|
|
int x, int y,
|
|
int plane, int bd) {
|
|
int i;
|
|
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
|
|
uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
|
|
DECLARE_ALIGNED(16, uint16_t, left_col[32]);
|
|
DECLARE_ALIGNED(16, uint16_t, above_data[64 + 16]);
|
|
uint16_t *above_row = above_data + 16;
|
|
const uint16_t *const_above_row = above_row;
|
|
const int bs = 4 << tx_size;
|
|
int frame_width, frame_height;
|
|
int x0, y0;
|
|
const struct macroblockd_plane *const pd = &xd->plane[plane];
|
|
// int base=128;
|
|
int base = 128 << (bd - 8);
|
|
// 127 127 127 .. 127 127 127 127 127 127
|
|
// 129 A B .. Y Z
|
|
// 129 C D .. W X
|
|
// 129 E F .. U V
|
|
// 129 G H .. S T T T T T
|
|
|
|
// Get current frame pointer, width and height.
|
|
if (plane == 0) {
|
|
frame_width = xd->cur_buf->y_width;
|
|
frame_height = xd->cur_buf->y_height;
|
|
} else {
|
|
frame_width = xd->cur_buf->uv_width;
|
|
frame_height = xd->cur_buf->uv_height;
|
|
}
|
|
|
|
// Get block position in current frame.
|
|
x0 = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)) + x;
|
|
y0 = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)) + y;
|
|
|
|
// left
|
|
if (left_available) {
|
|
if (xd->mb_to_bottom_edge < 0) {
|
|
/* slower path if the block needs border extension */
|
|
if (y0 + bs <= frame_height) {
|
|
for (i = 0; i < bs; ++i)
|
|
left_col[i] = ref[i * ref_stride - 1];
|
|
} else {
|
|
const int extend_bottom = frame_height - y0;
|
|
for (i = 0; i < extend_bottom; ++i)
|
|
left_col[i] = ref[i * ref_stride - 1];
|
|
for (; i < bs; ++i)
|
|
left_col[i] = ref[(extend_bottom - 1) * ref_stride - 1];
|
|
}
|
|
} else {
|
|
/* faster path if the block does not need extension */
|
|
for (i = 0; i < bs; ++i)
|
|
left_col[i] = ref[i * ref_stride - 1];
|
|
}
|
|
} else {
|
|
// TODO(Peter): this value should probably change for high bitdepth
|
|
vpx_memset16(left_col, base + 1, bs);
|
|
}
|
|
|
|
// TODO(hkuang) do not extend 2*bs pixels for all modes.
|
|
// above
|
|
if (up_available) {
|
|
const uint16_t *above_ref = ref - ref_stride;
|
|
if (xd->mb_to_right_edge < 0) {
|
|
/* slower path if the block needs border extension */
|
|
if (x0 + 2 * bs <= frame_width) {
|
|
if (right_available && bs == 4) {
|
|
memcpy(above_row, above_ref, 2 * bs * sizeof(uint16_t));
|
|
} else {
|
|
memcpy(above_row, above_ref, bs * sizeof(uint16_t));
|
|
vpx_memset16(above_row + bs, above_row[bs - 1], bs);
|
|
}
|
|
} else if (x0 + bs <= frame_width) {
|
|
const int r = frame_width - x0;
|
|
if (right_available && bs == 4) {
|
|
memcpy(above_row, above_ref, r * sizeof(uint16_t));
|
|
vpx_memset16(above_row + r, above_row[r - 1],
|
|
x0 + 2 * bs - frame_width);
|
|
} else {
|
|
memcpy(above_row, above_ref, bs * sizeof(uint16_t));
|
|
vpx_memset16(above_row + bs, above_row[bs - 1], bs);
|
|
}
|
|
} else if (x0 <= frame_width) {
|
|
const int r = frame_width - x0;
|
|
memcpy(above_row, above_ref, r * sizeof(uint16_t));
|
|
vpx_memset16(above_row + r, above_row[r - 1],
|
|
x0 + 2 * bs - frame_width);
|
|
}
|
|
// TODO(Peter) this value should probably change for high bitdepth
|
|
above_row[-1] = left_available ? above_ref[-1] : (base+1);
|
|
} else {
|
|
/* faster path if the block does not need extension */
|
|
if (bs == 4 && right_available && left_available) {
|
|
const_above_row = above_ref;
|
|
} else {
|
|
memcpy(above_row, above_ref, bs * sizeof(uint16_t));
|
|
if (bs == 4 && right_available)
|
|
memcpy(above_row + bs, above_ref + bs, bs * sizeof(uint16_t));
|
|
else
|
|
vpx_memset16(above_row + bs, above_row[bs - 1], bs);
|
|
// TODO(Peter): this value should probably change for high bitdepth
|
|
above_row[-1] = left_available ? above_ref[-1] : (base+1);
|
|
}
|
|
}
|
|
} else {
|
|
vpx_memset16(above_row, base - 1, bs * 2);
|
|
// TODO(Peter): this value should probably change for high bitdepth
|
|
above_row[-1] = base - 1;
|
|
}
|
|
|
|
// predict
|
|
if (mode == DC_PRED) {
|
|
dc_pred_high[left_available][up_available][tx_size](dst, dst_stride,
|
|
const_above_row,
|
|
left_col, xd->bd);
|
|
} else {
|
|
pred_high[mode][tx_size](dst, dst_stride, const_above_row, left_col,
|
|
xd->bd);
|
|
}
|
|
}
|
|
#endif // CONFIG_VP9_HIGHBITDEPTH
|
|
|
|
static void build_intra_predictors(const MACROBLOCKD *xd, const uint8_t *ref,
|
|
int ref_stride, uint8_t *dst, int dst_stride,
|
|
PREDICTION_MODE mode, TX_SIZE tx_size,
|
|
int up_available, int left_available,
|
|
int right_available, int x, int y,
|
|
int plane) {
|
|
int i;
|
|
DECLARE_ALIGNED(16, uint8_t, left_col[32]);
|
|
DECLARE_ALIGNED(16, uint8_t, above_data[64 + 16]);
|
|
uint8_t *above_row = above_data + 16;
|
|
const uint8_t *const_above_row = above_row;
|
|
const int bs = 4 << tx_size;
|
|
int frame_width, frame_height;
|
|
int x0, y0;
|
|
const struct macroblockd_plane *const pd = &xd->plane[plane];
|
|
|
|
// 127 127 127 .. 127 127 127 127 127 127
|
|
// 129 A B .. Y Z
|
|
// 129 C D .. W X
|
|
// 129 E F .. U V
|
|
// 129 G H .. S T T T T T
|
|
// ..
|
|
|
|
// Get current frame pointer, width and height.
|
|
if (plane == 0) {
|
|
frame_width = xd->cur_buf->y_width;
|
|
frame_height = xd->cur_buf->y_height;
|
|
} else {
|
|
frame_width = xd->cur_buf->uv_width;
|
|
frame_height = xd->cur_buf->uv_height;
|
|
}
|
|
|
|
// Get block position in current frame.
|
|
x0 = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)) + x;
|
|
y0 = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)) + y;
|
|
|
|
// NEED_LEFT
|
|
if (extend_modes[mode] & NEED_LEFT) {
|
|
if (left_available) {
|
|
if (xd->mb_to_bottom_edge < 0) {
|
|
/* slower path if the block needs border extension */
|
|
if (y0 + bs <= frame_height) {
|
|
for (i = 0; i < bs; ++i)
|
|
left_col[i] = ref[i * ref_stride - 1];
|
|
} else {
|
|
const int extend_bottom = frame_height - y0;
|
|
for (i = 0; i < extend_bottom; ++i)
|
|
left_col[i] = ref[i * ref_stride - 1];
|
|
for (; i < bs; ++i)
|
|
left_col[i] = ref[(extend_bottom - 1) * ref_stride - 1];
|
|
}
|
|
} else {
|
|
/* faster path if the block does not need extension */
|
|
for (i = 0; i < bs; ++i)
|
|
left_col[i] = ref[i * ref_stride - 1];
|
|
}
|
|
} else {
|
|
memset(left_col, 129, bs);
|
|
}
|
|
}
|
|
|
|
// NEED_ABOVE
|
|
if (extend_modes[mode] & NEED_ABOVE) {
|
|
if (up_available) {
|
|
const uint8_t *above_ref = ref - ref_stride;
|
|
if (xd->mb_to_right_edge < 0) {
|
|
/* slower path if the block needs border extension */
|
|
if (x0 + bs <= frame_width) {
|
|
memcpy(above_row, above_ref, bs);
|
|
} else if (x0 <= frame_width) {
|
|
const int r = frame_width - x0;
|
|
memcpy(above_row, above_ref, r);
|
|
memset(above_row + r, above_row[r - 1], x0 + bs - frame_width);
|
|
}
|
|
} else {
|
|
/* faster path if the block does not need extension */
|
|
if (bs == 4 && right_available && left_available) {
|
|
const_above_row = above_ref;
|
|
} else {
|
|
memcpy(above_row, above_ref, bs);
|
|
}
|
|
}
|
|
above_row[-1] = left_available ? above_ref[-1] : 129;
|
|
} else {
|
|
memset(above_row, 127, bs);
|
|
above_row[-1] = 127;
|
|
}
|
|
}
|
|
|
|
// NEED_ABOVERIGHT
|
|
if (extend_modes[mode] & NEED_ABOVERIGHT) {
|
|
if (up_available) {
|
|
const uint8_t *above_ref = ref - ref_stride;
|
|
if (xd->mb_to_right_edge < 0) {
|
|
/* slower path if the block needs border extension */
|
|
if (x0 + 2 * bs <= frame_width) {
|
|
if (right_available && bs == 4) {
|
|
memcpy(above_row, above_ref, 2 * bs);
|
|
} else {
|
|
memcpy(above_row, above_ref, bs);
|
|
memset(above_row + bs, above_row[bs - 1], bs);
|
|
}
|
|
} else if (x0 + bs <= frame_width) {
|
|
const int r = frame_width - x0;
|
|
if (right_available && bs == 4) {
|
|
memcpy(above_row, above_ref, r);
|
|
memset(above_row + r, above_row[r - 1], x0 + 2 * bs - frame_width);
|
|
} else {
|
|
memcpy(above_row, above_ref, bs);
|
|
memset(above_row + bs, above_row[bs - 1], bs);
|
|
}
|
|
} else if (x0 <= frame_width) {
|
|
const int r = frame_width - x0;
|
|
memcpy(above_row, above_ref, r);
|
|
memset(above_row + r, above_row[r - 1], x0 + 2 * bs - frame_width);
|
|
}
|
|
} else {
|
|
/* faster path if the block does not need extension */
|
|
if (bs == 4 && right_available && left_available) {
|
|
const_above_row = above_ref;
|
|
} else {
|
|
memcpy(above_row, above_ref, bs);
|
|
if (bs == 4 && right_available)
|
|
memcpy(above_row + bs, above_ref + bs, bs);
|
|
else
|
|
memset(above_row + bs, above_row[bs - 1], bs);
|
|
}
|
|
}
|
|
above_row[-1] = left_available ? above_ref[-1] : 129;
|
|
} else {
|
|
memset(above_row, 127, bs * 2);
|
|
above_row[-1] = 127;
|
|
}
|
|
}
|
|
|
|
// predict
|
|
if (mode == DC_PRED) {
|
|
dc_pred[left_available][up_available][tx_size](dst, dst_stride,
|
|
const_above_row, left_col);
|
|
} else {
|
|
pred[mode][tx_size](dst, dst_stride, const_above_row, left_col);
|
|
}
|
|
}
|
|
|
|
void vp9_predict_intra_block(const MACROBLOCKD *xd, int block_idx, int bwl_in,
|
|
TX_SIZE tx_size, PREDICTION_MODE mode,
|
|
const uint8_t *ref, int ref_stride,
|
|
uint8_t *dst, int dst_stride,
|
|
int aoff, int loff, int plane) {
|
|
const int bwl = bwl_in - tx_size;
|
|
const int wmask = (1 << bwl) - 1;
|
|
const int have_top = (block_idx >> bwl) || xd->up_available;
|
|
const int have_left = (block_idx & wmask) || xd->left_available;
|
|
const int have_right = ((block_idx & wmask) != wmask);
|
|
const int x = aoff * 4;
|
|
const int y = loff * 4;
|
|
|
|
assert(bwl >= 0);
|
|
#if CONFIG_VP9_HIGHBITDEPTH
|
|
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
|
|
build_intra_predictors_high(xd, ref, ref_stride, dst, dst_stride, mode,
|
|
tx_size, have_top, have_left, have_right,
|
|
x, y, plane, xd->bd);
|
|
return;
|
|
}
|
|
#endif
|
|
build_intra_predictors(xd, ref, ref_stride, dst, dst_stride, mode, tx_size,
|
|
have_top, have_left, have_right, x, y, plane);
|
|
}
|
|
|
|
void vp9_init_intra_predictors(void) {
|
|
once(vp9_init_intra_predictors_internal);
|
|
}
|