vpx/vp9/common/vp9_convolve.c

346 lines
12 KiB
C
Raw Normal View History

/*
* Copyright (c) 2013 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <assert.h>
#include "./vpx_config.h"
#include "./vp9_rtcd.h"
#include "vp9/common/vp9_common.h"
#include "vpx/vpx_integer.h"
#define VP9_FILTER_WEIGHT 128
#define VP9_FILTER_SHIFT 7
#define ALIGN_FILTERS_256 0
/* Assume a bank of 16 filters to choose from. There are two implementations
* for filter wrapping behavior, since we want to be able to pick which filter
* to start with. We could either:
*
* 1) make filter_ a pointer to the base of the filter array, and then add an
* additional offset parameter, to choose the starting filter.
* 2) use a pointer to 2 periods worth of filters, so that even if the original
* phase offset is at 15/16, we'll have valid data to read. The filter
* tables become [32][8], and the second half is duplicated.
* 3) fix the alignment of the filter tables, so that we know the 0/16 is
* always 256 byte aligned.
*
* Implementations 2 and 3 are likely preferable, as they avoid an extra 2
* parameters, and switching between them is trivial.
*/
static void convolve_horiz_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x0, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h, int taps) {
int x, y, k, sum;
const int16_t *filter_x_base = filter_x0;
#if ALIGN_FILTERS_256
filter_x_base = (const int16_t *)(((intptr_t)filter_x0) & ~(intptr_t)0xff);
#endif
/* Adjust base pointer address for this source line */
src -= taps / 2 - 1;
for (y = 0; y < h; ++y) {
/* Pointer to filter to use */
const int16_t *filter_x = filter_x0;
/* Initial phase offset */
int x_q4 = (filter_x - filter_x_base) / taps;
for (x = 0; x < w; ++x) {
/* Per-pixel src offset */
int src_x = x_q4 >> 4;
for (sum = 0, k = 0; k < taps; ++k) {
sum += src[src_x + k] * filter_x[k];
}
sum += (VP9_FILTER_WEIGHT >> 1);
dst[x] = clip_pixel(sum >> VP9_FILTER_SHIFT);
/* Adjust source and filter to use for the next pixel */
x_q4 += x_step_q4;
filter_x = filter_x_base + (x_q4 & 0xf) * taps;
}
src += src_stride;
dst += dst_stride;
}
}
static void convolve_avg_horiz_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x0, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h, int taps) {
int x, y, k, sum;
const int16_t *filter_x_base = filter_x0;
#if ALIGN_FILTERS_256
filter_x_base = (const int16_t *)(((intptr_t)filter_x0) & ~(intptr_t)0xff);
#endif
/* Adjust base pointer address for this source line */
src -= taps / 2 - 1;
for (y = 0; y < h; ++y) {
/* Pointer to filter to use */
const int16_t *filter_x = filter_x0;
/* Initial phase offset */
int x_q4 = (filter_x - filter_x_base) / taps;
for (x = 0; x < w; ++x) {
/* Per-pixel src offset */
int src_x = x_q4 >> 4;
for (sum = 0, k = 0; k < taps; ++k) {
sum += src[src_x + k] * filter_x[k];
}
sum += (VP9_FILTER_WEIGHT >> 1);
dst[x] = (dst[x] + clip_pixel(sum >> VP9_FILTER_SHIFT) + 1) >> 1;
/* Adjust source and filter to use for the next pixel */
x_q4 += x_step_q4;
filter_x = filter_x_base + (x_q4 & 0xf) * taps;
}
src += src_stride;
dst += dst_stride;
}
}
static void convolve_vert_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y0, int y_step_q4,
int w, int h, int taps) {
int x, y, k, sum;
const int16_t *filter_y_base = filter_y0;
#if ALIGN_FILTERS_256
filter_y_base = (const int16_t *)(((intptr_t)filter_y0) & ~(intptr_t)0xff);
#endif
/* Adjust base pointer address for this source column */
src -= src_stride * (taps / 2 - 1);
for (x = 0; x < w; ++x) {
/* Pointer to filter to use */
const int16_t *filter_y = filter_y0;
/* Initial phase offset */
int y_q4 = (filter_y - filter_y_base) / taps;
for (y = 0; y < h; ++y) {
/* Per-pixel src offset */
int src_y = y_q4 >> 4;
for (sum = 0, k = 0; k < taps; ++k) {
sum += src[(src_y + k) * src_stride] * filter_y[k];
}
sum += (VP9_FILTER_WEIGHT >> 1);
dst[y * dst_stride] = clip_pixel(sum >> VP9_FILTER_SHIFT);
/* Adjust source and filter to use for the next pixel */
y_q4 += y_step_q4;
filter_y = filter_y_base + (y_q4 & 0xf) * taps;
}
++src;
++dst;
}
}
static void convolve_avg_vert_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y0, int y_step_q4,
int w, int h, int taps) {
int x, y, k, sum;
const int16_t *filter_y_base = filter_y0;
#if ALIGN_FILTERS_256
filter_y_base = (const int16_t *)(((intptr_t)filter_y0) & ~(intptr_t)0xff);
#endif
/* Adjust base pointer address for this source column */
src -= src_stride * (taps / 2 - 1);
for (x = 0; x < w; ++x) {
/* Pointer to filter to use */
const int16_t *filter_y = filter_y0;
/* Initial phase offset */
int y_q4 = (filter_y - filter_y_base) / taps;
for (y = 0; y < h; ++y) {
/* Per-pixel src offset */
int src_y = y_q4 >> 4;
for (sum = 0, k = 0; k < taps; ++k) {
sum += src[(src_y + k) * src_stride] * filter_y[k];
}
sum += (VP9_FILTER_WEIGHT >> 1);
dst[y * dst_stride] =
(dst[y * dst_stride] + clip_pixel(sum >> VP9_FILTER_SHIFT) + 1) >> 1;
/* Adjust source and filter to use for the next pixel */
y_q4 += y_step_q4;
filter_y = filter_y_base + (y_q4 & 0xf) * taps;
}
++src;
++dst;
}
}
static void convolve_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h, int taps) {
/* Fixed size intermediate buffer places limits on parameters. */
uint8_t temp[16 * 23];
assert(w <= 16);
assert(h <= 16);
assert(taps <= 8);
convolve_horiz_c(src - src_stride * (taps / 2 - 1), src_stride,
temp, 16,
filter_x, x_step_q4, filter_y, y_step_q4,
w, h + taps - 1, taps);
convolve_vert_c(temp + 16 * (taps / 2 - 1), 16, dst, dst_stride,
filter_x, x_step_q4, filter_y, y_step_q4,
w, h, taps);
}
static void convolve_avg_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h, int taps) {
/* Fixed size intermediate buffer places limits on parameters. */
uint8_t temp[16 * 23];
assert(w <= 16);
assert(h <= 16);
assert(taps <= 8);
convolve_horiz_c(src - src_stride * (taps / 2 - 1), src_stride,
temp, 16,
filter_x, x_step_q4, filter_y, y_step_q4,
w, h + taps - 1, taps);
convolve_avg_vert_c(temp + 16 * (taps / 2 - 1), 16, dst, dst_stride,
filter_x, x_step_q4, filter_y, y_step_q4,
w, h, taps);
}
void vp9_convolve8_horiz_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h) {
convolve_horiz_c(src, src_stride, dst, dst_stride,
filter_x, x_step_q4, filter_y, y_step_q4,
w, h, 8);
}
void vp9_convolve8_avg_horiz_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h) {
convolve_avg_horiz_c(src, src_stride, dst, dst_stride,
filter_x, x_step_q4, filter_y, y_step_q4,
w, h, 8);
}
void vp9_convolve8_vert_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h) {
convolve_vert_c(src, src_stride, dst, dst_stride,
filter_x, x_step_q4, filter_y, y_step_q4,
w, h, 8);
}
void vp9_convolve8_avg_vert_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h) {
convolve_avg_vert_c(src, src_stride, dst, dst_stride,
filter_x, x_step_q4, filter_y, y_step_q4,
w, h, 8);
}
void vp9_convolve8_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h) {
convolve_c(src, src_stride, dst, dst_stride,
filter_x, x_step_q4, filter_y, y_step_q4,
w, h, 8);
}
void vp9_convolve8_avg_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h) {
convolve_avg_c(src, src_stride, dst, dst_stride,
filter_x, x_step_q4, filter_y, y_step_q4,
w, h, 8);
}
void vp9_convolve_copy(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int filter_x_stride,
const int16_t *filter_y, int filter_y_stride,
int w, int h) {
if (h == 16) {
vp9_copy_mem16x16(src, src_stride, dst, dst_stride);
} else if (h == 8) {
vp9_copy_mem8x8(src, src_stride, dst, dst_stride);
} else if (w == 8) {
vp9_copy_mem8x4(src, src_stride, dst, dst_stride);
} else {
// 4x4
int r;
for (r = 0; r < 4; ++r) {
#if !(CONFIG_FAST_UNALIGNED)
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
#else
*(uint32_t *)dst = *(const uint32_t *)src;
#endif
src += src_stride;
dst += dst_stride;
}
}
}
void vp9_convolve_avg(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int filter_x_stride,
const int16_t *filter_y, int filter_y_stride,
int w, int h) {
int x, y;
for (y = 0; y < h; ++y) {
for (x = 0; x < w; ++x) {
dst[x] = (dst[x] + src[x] + 1) >> 1;
}
src += src_stride;
dst += dst_stride;
}
}