2013-01-25 18:47:09 +01:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2013 The WebM project authors. All Rights Reserved.
|
|
|
|
*
|
|
|
|
* Use of this source code is governed by a BSD-style license
|
|
|
|
* that can be found in the LICENSE file in the root of the source
|
|
|
|
* tree. An additional intellectual property rights grant can be found
|
|
|
|
* in the file PATENTS. All contributing project authors may
|
|
|
|
* be found in the AUTHORS file in the root of the source tree.
|
|
|
|
*/
|
2013-02-12 00:34:08 +01:00
|
|
|
#include "vp9/common/vp9_convolve.h"
|
|
|
|
|
2013-01-25 18:47:09 +01:00
|
|
|
#include <assert.h>
|
|
|
|
|
|
|
|
#include "./vpx_config.h"
|
|
|
|
#include "./vp9_rtcd.h"
|
|
|
|
#include "vp9/common/vp9_common.h"
|
|
|
|
#include "vpx/vpx_integer.h"
|
2013-02-12 00:34:08 +01:00
|
|
|
#include "vpx_ports/mem.h"
|
2013-01-25 18:47:09 +01:00
|
|
|
|
|
|
|
#define VP9_FILTER_WEIGHT 128
|
|
|
|
#define VP9_FILTER_SHIFT 7
|
|
|
|
|
|
|
|
/* Assume a bank of 16 filters to choose from. There are two implementations
|
|
|
|
* for filter wrapping behavior, since we want to be able to pick which filter
|
|
|
|
* to start with. We could either:
|
|
|
|
*
|
|
|
|
* 1) make filter_ a pointer to the base of the filter array, and then add an
|
|
|
|
* additional offset parameter, to choose the starting filter.
|
|
|
|
* 2) use a pointer to 2 periods worth of filters, so that even if the original
|
|
|
|
* phase offset is at 15/16, we'll have valid data to read. The filter
|
|
|
|
* tables become [32][8], and the second half is duplicated.
|
|
|
|
* 3) fix the alignment of the filter tables, so that we know the 0/16 is
|
|
|
|
* always 256 byte aligned.
|
|
|
|
*
|
|
|
|
* Implementations 2 and 3 are likely preferable, as they avoid an extra 2
|
2013-02-21 00:59:20 +01:00
|
|
|
* parameters, and switching between them is trivial, with the
|
|
|
|
* ALIGN_FILTERS_256 macro, below.
|
2013-01-25 18:47:09 +01:00
|
|
|
*/
|
2013-02-21 00:59:20 +01:00
|
|
|
#define ALIGN_FILTERS_256 1
|
|
|
|
|
2013-01-25 18:47:09 +01:00
|
|
|
static void convolve_horiz_c(const uint8_t *src, int src_stride,
|
|
|
|
uint8_t *dst, int dst_stride,
|
|
|
|
const int16_t *filter_x0, int x_step_q4,
|
|
|
|
const int16_t *filter_y, int y_step_q4,
|
|
|
|
int w, int h, int taps) {
|
|
|
|
int x, y, k, sum;
|
|
|
|
const int16_t *filter_x_base = filter_x0;
|
|
|
|
|
|
|
|
#if ALIGN_FILTERS_256
|
|
|
|
filter_x_base = (const int16_t *)(((intptr_t)filter_x0) & ~(intptr_t)0xff);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Adjust base pointer address for this source line */
|
|
|
|
src -= taps / 2 - 1;
|
|
|
|
|
|
|
|
for (y = 0; y < h; ++y) {
|
|
|
|
/* Pointer to filter to use */
|
|
|
|
const int16_t *filter_x = filter_x0;
|
|
|
|
|
|
|
|
/* Initial phase offset */
|
2013-02-21 00:59:20 +01:00
|
|
|
int x0_q4 = (filter_x - filter_x_base) / taps;
|
|
|
|
int x_q4 = x0_q4;
|
2013-01-25 18:47:09 +01:00
|
|
|
|
|
|
|
for (x = 0; x < w; ++x) {
|
|
|
|
/* Per-pixel src offset */
|
2013-02-21 00:59:20 +01:00
|
|
|
int src_x = (x_q4 - x0_q4) >> 4;
|
2013-01-25 18:47:09 +01:00
|
|
|
|
|
|
|
for (sum = 0, k = 0; k < taps; ++k) {
|
|
|
|
sum += src[src_x + k] * filter_x[k];
|
|
|
|
}
|
|
|
|
sum += (VP9_FILTER_WEIGHT >> 1);
|
|
|
|
dst[x] = clip_pixel(sum >> VP9_FILTER_SHIFT);
|
|
|
|
|
|
|
|
/* Adjust source and filter to use for the next pixel */
|
|
|
|
x_q4 += x_step_q4;
|
|
|
|
filter_x = filter_x_base + (x_q4 & 0xf) * taps;
|
|
|
|
}
|
|
|
|
src += src_stride;
|
|
|
|
dst += dst_stride;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void convolve_avg_horiz_c(const uint8_t *src, int src_stride,
|
|
|
|
uint8_t *dst, int dst_stride,
|
|
|
|
const int16_t *filter_x0, int x_step_q4,
|
|
|
|
const int16_t *filter_y, int y_step_q4,
|
|
|
|
int w, int h, int taps) {
|
|
|
|
int x, y, k, sum;
|
|
|
|
const int16_t *filter_x_base = filter_x0;
|
|
|
|
|
|
|
|
#if ALIGN_FILTERS_256
|
|
|
|
filter_x_base = (const int16_t *)(((intptr_t)filter_x0) & ~(intptr_t)0xff);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Adjust base pointer address for this source line */
|
|
|
|
src -= taps / 2 - 1;
|
|
|
|
|
|
|
|
for (y = 0; y < h; ++y) {
|
|
|
|
/* Pointer to filter to use */
|
|
|
|
const int16_t *filter_x = filter_x0;
|
|
|
|
|
|
|
|
/* Initial phase offset */
|
2013-02-21 00:59:20 +01:00
|
|
|
int x0_q4 = (filter_x - filter_x_base) / taps;
|
|
|
|
int x_q4 = x0_q4;
|
2013-01-25 18:47:09 +01:00
|
|
|
|
|
|
|
for (x = 0; x < w; ++x) {
|
|
|
|
/* Per-pixel src offset */
|
2013-02-21 00:59:20 +01:00
|
|
|
int src_x = (x_q4 - x0_q4) >> 4;
|
2013-01-25 18:47:09 +01:00
|
|
|
|
|
|
|
for (sum = 0, k = 0; k < taps; ++k) {
|
|
|
|
sum += src[src_x + k] * filter_x[k];
|
|
|
|
}
|
|
|
|
sum += (VP9_FILTER_WEIGHT >> 1);
|
|
|
|
dst[x] = (dst[x] + clip_pixel(sum >> VP9_FILTER_SHIFT) + 1) >> 1;
|
|
|
|
|
|
|
|
/* Adjust source and filter to use for the next pixel */
|
|
|
|
x_q4 += x_step_q4;
|
|
|
|
filter_x = filter_x_base + (x_q4 & 0xf) * taps;
|
|
|
|
}
|
|
|
|
src += src_stride;
|
|
|
|
dst += dst_stride;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void convolve_vert_c(const uint8_t *src, int src_stride,
|
|
|
|
uint8_t *dst, int dst_stride,
|
|
|
|
const int16_t *filter_x, int x_step_q4,
|
|
|
|
const int16_t *filter_y0, int y_step_q4,
|
|
|
|
int w, int h, int taps) {
|
|
|
|
int x, y, k, sum;
|
|
|
|
|
|
|
|
const int16_t *filter_y_base = filter_y0;
|
|
|
|
|
|
|
|
#if ALIGN_FILTERS_256
|
|
|
|
filter_y_base = (const int16_t *)(((intptr_t)filter_y0) & ~(intptr_t)0xff);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Adjust base pointer address for this source column */
|
|
|
|
src -= src_stride * (taps / 2 - 1);
|
|
|
|
for (x = 0; x < w; ++x) {
|
|
|
|
/* Pointer to filter to use */
|
|
|
|
const int16_t *filter_y = filter_y0;
|
|
|
|
|
|
|
|
/* Initial phase offset */
|
2013-02-21 00:59:20 +01:00
|
|
|
int y0_q4 = (filter_y - filter_y_base) / taps;
|
|
|
|
int y_q4 = y0_q4;
|
2013-01-25 18:47:09 +01:00
|
|
|
|
|
|
|
for (y = 0; y < h; ++y) {
|
|
|
|
/* Per-pixel src offset */
|
2013-02-21 00:59:20 +01:00
|
|
|
int src_y = (y_q4 - y0_q4) >> 4;
|
2013-01-25 18:47:09 +01:00
|
|
|
|
|
|
|
for (sum = 0, k = 0; k < taps; ++k) {
|
|
|
|
sum += src[(src_y + k) * src_stride] * filter_y[k];
|
|
|
|
}
|
|
|
|
sum += (VP9_FILTER_WEIGHT >> 1);
|
|
|
|
dst[y * dst_stride] = clip_pixel(sum >> VP9_FILTER_SHIFT);
|
|
|
|
|
|
|
|
/* Adjust source and filter to use for the next pixel */
|
|
|
|
y_q4 += y_step_q4;
|
|
|
|
filter_y = filter_y_base + (y_q4 & 0xf) * taps;
|
|
|
|
}
|
|
|
|
++src;
|
|
|
|
++dst;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void convolve_avg_vert_c(const uint8_t *src, int src_stride,
|
|
|
|
uint8_t *dst, int dst_stride,
|
|
|
|
const int16_t *filter_x, int x_step_q4,
|
|
|
|
const int16_t *filter_y0, int y_step_q4,
|
|
|
|
int w, int h, int taps) {
|
|
|
|
int x, y, k, sum;
|
|
|
|
|
|
|
|
const int16_t *filter_y_base = filter_y0;
|
|
|
|
|
|
|
|
#if ALIGN_FILTERS_256
|
|
|
|
filter_y_base = (const int16_t *)(((intptr_t)filter_y0) & ~(intptr_t)0xff);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Adjust base pointer address for this source column */
|
|
|
|
src -= src_stride * (taps / 2 - 1);
|
|
|
|
for (x = 0; x < w; ++x) {
|
|
|
|
/* Pointer to filter to use */
|
|
|
|
const int16_t *filter_y = filter_y0;
|
|
|
|
|
|
|
|
/* Initial phase offset */
|
2013-02-21 00:59:20 +01:00
|
|
|
int y0_q4 = (filter_y - filter_y_base) / taps;
|
|
|
|
int y_q4 = y0_q4;
|
2013-01-25 18:47:09 +01:00
|
|
|
|
|
|
|
for (y = 0; y < h; ++y) {
|
|
|
|
/* Per-pixel src offset */
|
2013-02-21 00:59:20 +01:00
|
|
|
int src_y = (y_q4 - y0_q4) >> 4;
|
2013-01-25 18:47:09 +01:00
|
|
|
|
|
|
|
for (sum = 0, k = 0; k < taps; ++k) {
|
|
|
|
sum += src[(src_y + k) * src_stride] * filter_y[k];
|
|
|
|
}
|
|
|
|
sum += (VP9_FILTER_WEIGHT >> 1);
|
|
|
|
dst[y * dst_stride] =
|
|
|
|
(dst[y * dst_stride] + clip_pixel(sum >> VP9_FILTER_SHIFT) + 1) >> 1;
|
|
|
|
|
|
|
|
/* Adjust source and filter to use for the next pixel */
|
|
|
|
y_q4 += y_step_q4;
|
|
|
|
filter_y = filter_y_base + (y_q4 & 0xf) * taps;
|
|
|
|
}
|
|
|
|
++src;
|
|
|
|
++dst;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void convolve_c(const uint8_t *src, int src_stride,
|
|
|
|
uint8_t *dst, int dst_stride,
|
|
|
|
const int16_t *filter_x, int x_step_q4,
|
|
|
|
const int16_t *filter_y, int y_step_q4,
|
|
|
|
int w, int h, int taps) {
|
Spatial resamping of ZEROMV predictors
This patch allows coding frames using references of different
resolution, in ZEROMV mode. For compound prediction, either
reference may be scaled.
To test, I use the resize_test and enable WRITE_RECON_BUFFER
in vp9_onyxd_if.c. It's also useful to apply this patch to
test/i420_video_source.h:
--- a/test/i420_video_source.h
+++ b/test/i420_video_source.h
@@ -93,6 +93,7 @@ class I420VideoSource : public VideoSource {
virtual void FillFrame() {
// Read a frame from input_file.
+ if (frame_ != 3)
if (fread(img_->img_data, raw_sz_, 1, input_file_) == 0) {
limit_ = frame_;
}
This forces the frame that the resolution changes on to be coded
with no motion, only scaling, and improves the quality of the
result.
Change-Id: I1ee75d19a437ff801192f767fd02a36bcbd1d496
2013-02-25 05:55:14 +01:00
|
|
|
/* Fixed size intermediate buffer places limits on parameters.
|
2013-04-18 22:05:38 +02:00
|
|
|
* Maximum intermediate_height is 135, for y_step_q4 == 32,
|
|
|
|
* h == 64, taps == 8.
|
Spatial resamping of ZEROMV predictors
This patch allows coding frames using references of different
resolution, in ZEROMV mode. For compound prediction, either
reference may be scaled.
To test, I use the resize_test and enable WRITE_RECON_BUFFER
in vp9_onyxd_if.c. It's also useful to apply this patch to
test/i420_video_source.h:
--- a/test/i420_video_source.h
+++ b/test/i420_video_source.h
@@ -93,6 +93,7 @@ class I420VideoSource : public VideoSource {
virtual void FillFrame() {
// Read a frame from input_file.
+ if (frame_ != 3)
if (fread(img_->img_data, raw_sz_, 1, input_file_) == 0) {
limit_ = frame_;
}
This forces the frame that the resolution changes on to be coded
with no motion, only scaling, and improves the quality of the
result.
Change-Id: I1ee75d19a437ff801192f767fd02a36bcbd1d496
2013-02-25 05:55:14 +01:00
|
|
|
*/
|
2013-04-18 22:05:38 +02:00
|
|
|
uint8_t temp[64 * 135];
|
Spatial resamping of ZEROMV predictors
This patch allows coding frames using references of different
resolution, in ZEROMV mode. For compound prediction, either
reference may be scaled.
To test, I use the resize_test and enable WRITE_RECON_BUFFER
in vp9_onyxd_if.c. It's also useful to apply this patch to
test/i420_video_source.h:
--- a/test/i420_video_source.h
+++ b/test/i420_video_source.h
@@ -93,6 +93,7 @@ class I420VideoSource : public VideoSource {
virtual void FillFrame() {
// Read a frame from input_file.
+ if (frame_ != 3)
if (fread(img_->img_data, raw_sz_, 1, input_file_) == 0) {
limit_ = frame_;
}
This forces the frame that the resolution changes on to be coded
with no motion, only scaling, and improves the quality of the
result.
Change-Id: I1ee75d19a437ff801192f767fd02a36bcbd1d496
2013-02-25 05:55:14 +01:00
|
|
|
int intermediate_height = ((h * y_step_q4) >> 4) + taps - 1;
|
|
|
|
|
2013-04-18 22:05:38 +02:00
|
|
|
assert(w <= 64);
|
|
|
|
assert(h <= 64);
|
2013-01-25 18:47:09 +01:00
|
|
|
assert(taps <= 8);
|
Spatial resamping of ZEROMV predictors
This patch allows coding frames using references of different
resolution, in ZEROMV mode. For compound prediction, either
reference may be scaled.
To test, I use the resize_test and enable WRITE_RECON_BUFFER
in vp9_onyxd_if.c. It's also useful to apply this patch to
test/i420_video_source.h:
--- a/test/i420_video_source.h
+++ b/test/i420_video_source.h
@@ -93,6 +93,7 @@ class I420VideoSource : public VideoSource {
virtual void FillFrame() {
// Read a frame from input_file.
+ if (frame_ != 3)
if (fread(img_->img_data, raw_sz_, 1, input_file_) == 0) {
limit_ = frame_;
}
This forces the frame that the resolution changes on to be coded
with no motion, only scaling, and improves the quality of the
result.
Change-Id: I1ee75d19a437ff801192f767fd02a36bcbd1d496
2013-02-25 05:55:14 +01:00
|
|
|
assert(y_step_q4 <= 32);
|
|
|
|
|
|
|
|
if (intermediate_height < h)
|
|
|
|
intermediate_height = h;
|
2013-01-25 18:47:09 +01:00
|
|
|
|
|
|
|
convolve_horiz_c(src - src_stride * (taps / 2 - 1), src_stride,
|
2013-04-18 22:05:38 +02:00
|
|
|
temp, 64,
|
2013-01-25 18:47:09 +01:00
|
|
|
filter_x, x_step_q4, filter_y, y_step_q4,
|
Spatial resamping of ZEROMV predictors
This patch allows coding frames using references of different
resolution, in ZEROMV mode. For compound prediction, either
reference may be scaled.
To test, I use the resize_test and enable WRITE_RECON_BUFFER
in vp9_onyxd_if.c. It's also useful to apply this patch to
test/i420_video_source.h:
--- a/test/i420_video_source.h
+++ b/test/i420_video_source.h
@@ -93,6 +93,7 @@ class I420VideoSource : public VideoSource {
virtual void FillFrame() {
// Read a frame from input_file.
+ if (frame_ != 3)
if (fread(img_->img_data, raw_sz_, 1, input_file_) == 0) {
limit_ = frame_;
}
This forces the frame that the resolution changes on to be coded
with no motion, only scaling, and improves the quality of the
result.
Change-Id: I1ee75d19a437ff801192f767fd02a36bcbd1d496
2013-02-25 05:55:14 +01:00
|
|
|
w, intermediate_height, taps);
|
2013-04-18 22:05:38 +02:00
|
|
|
convolve_vert_c(temp + 64 * (taps / 2 - 1), 64, dst, dst_stride,
|
2013-01-25 18:47:09 +01:00
|
|
|
filter_x, x_step_q4, filter_y, y_step_q4,
|
|
|
|
w, h, taps);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void convolve_avg_c(const uint8_t *src, int src_stride,
|
|
|
|
uint8_t *dst, int dst_stride,
|
|
|
|
const int16_t *filter_x, int x_step_q4,
|
|
|
|
const int16_t *filter_y, int y_step_q4,
|
|
|
|
int w, int h, int taps) {
|
Spatial resamping of ZEROMV predictors
This patch allows coding frames using references of different
resolution, in ZEROMV mode. For compound prediction, either
reference may be scaled.
To test, I use the resize_test and enable WRITE_RECON_BUFFER
in vp9_onyxd_if.c. It's also useful to apply this patch to
test/i420_video_source.h:
--- a/test/i420_video_source.h
+++ b/test/i420_video_source.h
@@ -93,6 +93,7 @@ class I420VideoSource : public VideoSource {
virtual void FillFrame() {
// Read a frame from input_file.
+ if (frame_ != 3)
if (fread(img_->img_data, raw_sz_, 1, input_file_) == 0) {
limit_ = frame_;
}
This forces the frame that the resolution changes on to be coded
with no motion, only scaling, and improves the quality of the
result.
Change-Id: I1ee75d19a437ff801192f767fd02a36bcbd1d496
2013-02-25 05:55:14 +01:00
|
|
|
/* Fixed size intermediate buffer places limits on parameters.
|
2013-04-18 22:05:38 +02:00
|
|
|
* Maximum intermediate_height is 135, for y_step_q4 == 32,
|
|
|
|
* h == 64, taps == 8.
|
Spatial resamping of ZEROMV predictors
This patch allows coding frames using references of different
resolution, in ZEROMV mode. For compound prediction, either
reference may be scaled.
To test, I use the resize_test and enable WRITE_RECON_BUFFER
in vp9_onyxd_if.c. It's also useful to apply this patch to
test/i420_video_source.h:
--- a/test/i420_video_source.h
+++ b/test/i420_video_source.h
@@ -93,6 +93,7 @@ class I420VideoSource : public VideoSource {
virtual void FillFrame() {
// Read a frame from input_file.
+ if (frame_ != 3)
if (fread(img_->img_data, raw_sz_, 1, input_file_) == 0) {
limit_ = frame_;
}
This forces the frame that the resolution changes on to be coded
with no motion, only scaling, and improves the quality of the
result.
Change-Id: I1ee75d19a437ff801192f767fd02a36bcbd1d496
2013-02-25 05:55:14 +01:00
|
|
|
*/
|
2013-04-18 22:05:38 +02:00
|
|
|
uint8_t temp[64 * 135];
|
Spatial resamping of ZEROMV predictors
This patch allows coding frames using references of different
resolution, in ZEROMV mode. For compound prediction, either
reference may be scaled.
To test, I use the resize_test and enable WRITE_RECON_BUFFER
in vp9_onyxd_if.c. It's also useful to apply this patch to
test/i420_video_source.h:
--- a/test/i420_video_source.h
+++ b/test/i420_video_source.h
@@ -93,6 +93,7 @@ class I420VideoSource : public VideoSource {
virtual void FillFrame() {
// Read a frame from input_file.
+ if (frame_ != 3)
if (fread(img_->img_data, raw_sz_, 1, input_file_) == 0) {
limit_ = frame_;
}
This forces the frame that the resolution changes on to be coded
with no motion, only scaling, and improves the quality of the
result.
Change-Id: I1ee75d19a437ff801192f767fd02a36bcbd1d496
2013-02-25 05:55:14 +01:00
|
|
|
int intermediate_height = ((h * y_step_q4) >> 4) + taps - 1;
|
|
|
|
|
2013-04-18 22:05:38 +02:00
|
|
|
assert(w <= 64);
|
|
|
|
assert(h <= 64);
|
2013-01-25 18:47:09 +01:00
|
|
|
assert(taps <= 8);
|
Spatial resamping of ZEROMV predictors
This patch allows coding frames using references of different
resolution, in ZEROMV mode. For compound prediction, either
reference may be scaled.
To test, I use the resize_test and enable WRITE_RECON_BUFFER
in vp9_onyxd_if.c. It's also useful to apply this patch to
test/i420_video_source.h:
--- a/test/i420_video_source.h
+++ b/test/i420_video_source.h
@@ -93,6 +93,7 @@ class I420VideoSource : public VideoSource {
virtual void FillFrame() {
// Read a frame from input_file.
+ if (frame_ != 3)
if (fread(img_->img_data, raw_sz_, 1, input_file_) == 0) {
limit_ = frame_;
}
This forces the frame that the resolution changes on to be coded
with no motion, only scaling, and improves the quality of the
result.
Change-Id: I1ee75d19a437ff801192f767fd02a36bcbd1d496
2013-02-25 05:55:14 +01:00
|
|
|
assert(y_step_q4 <= 32);
|
|
|
|
|
|
|
|
if (intermediate_height < h)
|
|
|
|
intermediate_height = h;
|
2013-01-25 18:47:09 +01:00
|
|
|
|
|
|
|
convolve_horiz_c(src - src_stride * (taps / 2 - 1), src_stride,
|
2013-04-18 22:05:38 +02:00
|
|
|
temp, 64,
|
2013-01-25 18:47:09 +01:00
|
|
|
filter_x, x_step_q4, filter_y, y_step_q4,
|
Spatial resamping of ZEROMV predictors
This patch allows coding frames using references of different
resolution, in ZEROMV mode. For compound prediction, either
reference may be scaled.
To test, I use the resize_test and enable WRITE_RECON_BUFFER
in vp9_onyxd_if.c. It's also useful to apply this patch to
test/i420_video_source.h:
--- a/test/i420_video_source.h
+++ b/test/i420_video_source.h
@@ -93,6 +93,7 @@ class I420VideoSource : public VideoSource {
virtual void FillFrame() {
// Read a frame from input_file.
+ if (frame_ != 3)
if (fread(img_->img_data, raw_sz_, 1, input_file_) == 0) {
limit_ = frame_;
}
This forces the frame that the resolution changes on to be coded
with no motion, only scaling, and improves the quality of the
result.
Change-Id: I1ee75d19a437ff801192f767fd02a36bcbd1d496
2013-02-25 05:55:14 +01:00
|
|
|
w, intermediate_height, taps);
|
2013-04-18 22:05:38 +02:00
|
|
|
convolve_avg_vert_c(temp + 64 * (taps / 2 - 1), 64, dst, dst_stride,
|
2013-01-25 18:47:09 +01:00
|
|
|
filter_x, x_step_q4, filter_y, y_step_q4,
|
|
|
|
w, h, taps);
|
|
|
|
}
|
|
|
|
|
|
|
|
void vp9_convolve8_horiz_c(const uint8_t *src, int src_stride,
|
|
|
|
uint8_t *dst, int dst_stride,
|
|
|
|
const int16_t *filter_x, int x_step_q4,
|
|
|
|
const int16_t *filter_y, int y_step_q4,
|
|
|
|
int w, int h) {
|
|
|
|
convolve_horiz_c(src, src_stride, dst, dst_stride,
|
|
|
|
filter_x, x_step_q4, filter_y, y_step_q4,
|
|
|
|
w, h, 8);
|
|
|
|
}
|
|
|
|
|
|
|
|
void vp9_convolve8_avg_horiz_c(const uint8_t *src, int src_stride,
|
|
|
|
uint8_t *dst, int dst_stride,
|
|
|
|
const int16_t *filter_x, int x_step_q4,
|
|
|
|
const int16_t *filter_y, int y_step_q4,
|
|
|
|
int w, int h) {
|
|
|
|
convolve_avg_horiz_c(src, src_stride, dst, dst_stride,
|
|
|
|
filter_x, x_step_q4, filter_y, y_step_q4,
|
|
|
|
w, h, 8);
|
|
|
|
}
|
|
|
|
|
|
|
|
void vp9_convolve8_vert_c(const uint8_t *src, int src_stride,
|
|
|
|
uint8_t *dst, int dst_stride,
|
|
|
|
const int16_t *filter_x, int x_step_q4,
|
|
|
|
const int16_t *filter_y, int y_step_q4,
|
|
|
|
int w, int h) {
|
|
|
|
convolve_vert_c(src, src_stride, dst, dst_stride,
|
|
|
|
filter_x, x_step_q4, filter_y, y_step_q4,
|
|
|
|
w, h, 8);
|
|
|
|
}
|
|
|
|
|
|
|
|
void vp9_convolve8_avg_vert_c(const uint8_t *src, int src_stride,
|
|
|
|
uint8_t *dst, int dst_stride,
|
|
|
|
const int16_t *filter_x, int x_step_q4,
|
|
|
|
const int16_t *filter_y, int y_step_q4,
|
|
|
|
int w, int h) {
|
|
|
|
convolve_avg_vert_c(src, src_stride, dst, dst_stride,
|
|
|
|
filter_x, x_step_q4, filter_y, y_step_q4,
|
|
|
|
w, h, 8);
|
|
|
|
}
|
|
|
|
|
|
|
|
void vp9_convolve8_c(const uint8_t *src, int src_stride,
|
|
|
|
uint8_t *dst, int dst_stride,
|
|
|
|
const int16_t *filter_x, int x_step_q4,
|
|
|
|
const int16_t *filter_y, int y_step_q4,
|
|
|
|
int w, int h) {
|
|
|
|
convolve_c(src, src_stride, dst, dst_stride,
|
|
|
|
filter_x, x_step_q4, filter_y, y_step_q4,
|
|
|
|
w, h, 8);
|
|
|
|
}
|
|
|
|
|
|
|
|
void vp9_convolve8_avg_c(const uint8_t *src, int src_stride,
|
|
|
|
uint8_t *dst, int dst_stride,
|
|
|
|
const int16_t *filter_x, int x_step_q4,
|
|
|
|
const int16_t *filter_y, int y_step_q4,
|
|
|
|
int w, int h) {
|
2013-02-12 00:34:08 +01:00
|
|
|
/* Fixed size intermediate buffer places limits on parameters. */
|
2013-04-18 22:05:38 +02:00
|
|
|
DECLARE_ALIGNED_ARRAY(16, uint8_t, temp, 64 * 64);
|
|
|
|
assert(w <= 64);
|
|
|
|
assert(h <= 64);
|
2013-02-12 00:34:08 +01:00
|
|
|
|
|
|
|
vp9_convolve8(src, src_stride,
|
2013-04-18 22:05:38 +02:00
|
|
|
temp, 64,
|
2013-02-12 00:34:08 +01:00
|
|
|
filter_x, x_step_q4,
|
|
|
|
filter_y, y_step_q4,
|
|
|
|
w, h);
|
2013-04-18 22:05:38 +02:00
|
|
|
vp9_convolve_avg(temp, 64,
|
2013-02-12 00:34:08 +01:00
|
|
|
dst, dst_stride,
|
|
|
|
NULL, 0, /* These unused parameter should be removed! */
|
|
|
|
NULL, 0, /* These unused parameter should be removed! */
|
|
|
|
w, h);
|
2013-01-25 18:47:09 +01:00
|
|
|
}
|
Convert subpixel filters to use convolve framework
Update the code to call the new convolution functions to do subpixel
prediction rather than the existing functions. Remove the old C and
assembly code, since it is unused. This causes a 50% performance
reduction on the decoder, but that will be resolved when the asm for
the new functions is available.
There is no consensus for whether 6-tap or 2-tap predictors will be
supported in the final codec, so these filters are implemented in
terms of the 8-tap code, so that quality testing of these modes
can continue. Implementing the lower complexity algorithms is a
simple exercise, should it be necessary.
This code produces slightly better results in the EIGHTTAP_SMOOTH
case, since the filter is now applied in only one direction when
the subpel motion is only in one direction. Like the previous code,
the filtering is skipped entirely on full-pel MVs. This combination
seems to give the best quality gains, but this may be indicative of a
bug in the encoder's filter selection, since the encoder could
achieve the result of skipping the filtering on full-pel by selecting
one of the other filters. This should be revisited.
Quality gains on derf positive on almost all clips. The only clip
that seemed to be hurt at all datarates was football
(-0.115% PSNR average, -0.587% min). Overall averages 0.375% PSNR,
0.347% SSIM.
Change-Id: I7d469716091b1d89b4b08adde5863999319d69ff
2013-01-29 01:59:03 +01:00
|
|
|
|
|
|
|
void vp9_convolve_copy(const uint8_t *src, int src_stride,
|
|
|
|
uint8_t *dst, int dst_stride,
|
|
|
|
const int16_t *filter_x, int filter_x_stride,
|
|
|
|
const int16_t *filter_y, int filter_y_stride,
|
|
|
|
int w, int h) {
|
2013-02-09 02:49:44 +01:00
|
|
|
if (w == 16 && h == 16) {
|
Convert subpixel filters to use convolve framework
Update the code to call the new convolution functions to do subpixel
prediction rather than the existing functions. Remove the old C and
assembly code, since it is unused. This causes a 50% performance
reduction on the decoder, but that will be resolved when the asm for
the new functions is available.
There is no consensus for whether 6-tap or 2-tap predictors will be
supported in the final codec, so these filters are implemented in
terms of the 8-tap code, so that quality testing of these modes
can continue. Implementing the lower complexity algorithms is a
simple exercise, should it be necessary.
This code produces slightly better results in the EIGHTTAP_SMOOTH
case, since the filter is now applied in only one direction when
the subpel motion is only in one direction. Like the previous code,
the filtering is skipped entirely on full-pel MVs. This combination
seems to give the best quality gains, but this may be indicative of a
bug in the encoder's filter selection, since the encoder could
achieve the result of skipping the filtering on full-pel by selecting
one of the other filters. This should be revisited.
Quality gains on derf positive on almost all clips. The only clip
that seemed to be hurt at all datarates was football
(-0.115% PSNR average, -0.587% min). Overall averages 0.375% PSNR,
0.347% SSIM.
Change-Id: I7d469716091b1d89b4b08adde5863999319d69ff
2013-01-29 01:59:03 +01:00
|
|
|
vp9_copy_mem16x16(src, src_stride, dst, dst_stride);
|
2013-02-09 02:49:44 +01:00
|
|
|
} else if (w == 8 && h == 8) {
|
Convert subpixel filters to use convolve framework
Update the code to call the new convolution functions to do subpixel
prediction rather than the existing functions. Remove the old C and
assembly code, since it is unused. This causes a 50% performance
reduction on the decoder, but that will be resolved when the asm for
the new functions is available.
There is no consensus for whether 6-tap or 2-tap predictors will be
supported in the final codec, so these filters are implemented in
terms of the 8-tap code, so that quality testing of these modes
can continue. Implementing the lower complexity algorithms is a
simple exercise, should it be necessary.
This code produces slightly better results in the EIGHTTAP_SMOOTH
case, since the filter is now applied in only one direction when
the subpel motion is only in one direction. Like the previous code,
the filtering is skipped entirely on full-pel MVs. This combination
seems to give the best quality gains, but this may be indicative of a
bug in the encoder's filter selection, since the encoder could
achieve the result of skipping the filtering on full-pel by selecting
one of the other filters. This should be revisited.
Quality gains on derf positive on almost all clips. The only clip
that seemed to be hurt at all datarates was football
(-0.115% PSNR average, -0.587% min). Overall averages 0.375% PSNR,
0.347% SSIM.
Change-Id: I7d469716091b1d89b4b08adde5863999319d69ff
2013-01-29 01:59:03 +01:00
|
|
|
vp9_copy_mem8x8(src, src_stride, dst, dst_stride);
|
2013-02-09 02:49:44 +01:00
|
|
|
} else if (w == 8 && h == 4) {
|
Convert subpixel filters to use convolve framework
Update the code to call the new convolution functions to do subpixel
prediction rather than the existing functions. Remove the old C and
assembly code, since it is unused. This causes a 50% performance
reduction on the decoder, but that will be resolved when the asm for
the new functions is available.
There is no consensus for whether 6-tap or 2-tap predictors will be
supported in the final codec, so these filters are implemented in
terms of the 8-tap code, so that quality testing of these modes
can continue. Implementing the lower complexity algorithms is a
simple exercise, should it be necessary.
This code produces slightly better results in the EIGHTTAP_SMOOTH
case, since the filter is now applied in only one direction when
the subpel motion is only in one direction. Like the previous code,
the filtering is skipped entirely on full-pel MVs. This combination
seems to give the best quality gains, but this may be indicative of a
bug in the encoder's filter selection, since the encoder could
achieve the result of skipping the filtering on full-pel by selecting
one of the other filters. This should be revisited.
Quality gains on derf positive on almost all clips. The only clip
that seemed to be hurt at all datarates was football
(-0.115% PSNR average, -0.587% min). Overall averages 0.375% PSNR,
0.347% SSIM.
Change-Id: I7d469716091b1d89b4b08adde5863999319d69ff
2013-01-29 01:59:03 +01:00
|
|
|
vp9_copy_mem8x4(src, src_stride, dst, dst_stride);
|
|
|
|
} else {
|
|
|
|
int r;
|
|
|
|
|
2013-02-09 02:49:44 +01:00
|
|
|
for (r = h; r > 0; --r) {
|
|
|
|
memcpy(dst, src, w);
|
Convert subpixel filters to use convolve framework
Update the code to call the new convolution functions to do subpixel
prediction rather than the existing functions. Remove the old C and
assembly code, since it is unused. This causes a 50% performance
reduction on the decoder, but that will be resolved when the asm for
the new functions is available.
There is no consensus for whether 6-tap or 2-tap predictors will be
supported in the final codec, so these filters are implemented in
terms of the 8-tap code, so that quality testing of these modes
can continue. Implementing the lower complexity algorithms is a
simple exercise, should it be necessary.
This code produces slightly better results in the EIGHTTAP_SMOOTH
case, since the filter is now applied in only one direction when
the subpel motion is only in one direction. Like the previous code,
the filtering is skipped entirely on full-pel MVs. This combination
seems to give the best quality gains, but this may be indicative of a
bug in the encoder's filter selection, since the encoder could
achieve the result of skipping the filtering on full-pel by selecting
one of the other filters. This should be revisited.
Quality gains on derf positive on almost all clips. The only clip
that seemed to be hurt at all datarates was football
(-0.115% PSNR average, -0.587% min). Overall averages 0.375% PSNR,
0.347% SSIM.
Change-Id: I7d469716091b1d89b4b08adde5863999319d69ff
2013-01-29 01:59:03 +01:00
|
|
|
src += src_stride;
|
|
|
|
dst += dst_stride;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void vp9_convolve_avg(const uint8_t *src, int src_stride,
|
|
|
|
uint8_t *dst, int dst_stride,
|
|
|
|
const int16_t *filter_x, int filter_x_stride,
|
|
|
|
const int16_t *filter_y, int filter_y_stride,
|
|
|
|
int w, int h) {
|
|
|
|
int x, y;
|
|
|
|
|
|
|
|
for (y = 0; y < h; ++y) {
|
|
|
|
for (x = 0; x < w; ++x) {
|
|
|
|
dst[x] = (dst[x] + src[x] + 1) >> 1;
|
|
|
|
}
|
|
|
|
src += src_stride;
|
|
|
|
dst += dst_stride;
|
|
|
|
}
|
|
|
|
}
|