Remove vp9_temporal denoiser

Change-Id: Id1a12e1aa97e1baa68a640d1233bf8ab01058d89
This commit is contained in:
Yaowu Xu 2016-01-20 17:47:06 -08:00
parent b89861a463
commit 0738390c19
9 changed files with 0 additions and 1047 deletions

2
configure vendored
View File

@ -56,8 +56,6 @@ Advanced options:
${toggle_postproc_visualizer} macro block / block level visualizers
${toggle_multi_res_encoding} enable multiple-resolution encoding
${toggle_temporal_denoising} enable temporal denoising and disable the spatial denoiser
${toggle_vp9_temporal_denoising}
enable vp9 temporal denoising
${toggle_webm_io} enable input from and output to WebM container
${toggle_libyuv} enable libyuv

View File

@ -287,23 +287,6 @@ if (vpx_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
add_proto qw/void vp10_highbd_convolve8_avg_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
specialize qw/vp10_highbd_convolve8_avg_vert/, "$sse2_x86_64";
#
# post proc
#
if (vpx_config("CONFIG_VP9_POSTPROC") eq "yes") {
add_proto qw/void vp10_highbd_mbpost_proc_down/, "uint16_t *dst, int pitch, int rows, int cols, int flimit";
specialize qw/vp10_highbd_mbpost_proc_down/;
add_proto qw/void vp10_highbd_mbpost_proc_across_ip/, "uint16_t *src, int pitch, int rows, int cols, int flimit";
specialize qw/vp10_highbd_mbpost_proc_across_ip/;
add_proto qw/void vp10_highbd_post_proc_down_and_across/, "const uint16_t *src_ptr, uint16_t *dst_ptr, int src_pixels_per_line, int dst_pixels_per_line, int rows, int cols, int flimit";
specialize qw/vp10_highbd_post_proc_down_and_across/;
add_proto qw/void vp10_highbd_plane_add_noise/, "uint8_t *Start, char *noise, char blackclamp[16], char whiteclamp[16], char bothclamp[16], unsigned int Width, unsigned int Height, int Pitch";
specialize qw/vp10_highbd_plane_add_noise/;
}
#
# dct
#
@ -326,14 +309,6 @@ if (vpx_config("CONFIG_VP10_ENCODER") eq "yes") {
# ENCODEMB INVOKE
#
# Denoiser
#
if (vpx_config("CONFIG_VP9_TEMPORAL_DENOISING") eq "yes") {
add_proto qw/int vp10_denoiser_filter/, "const uint8_t *sig, int sig_stride, const uint8_t *mc_avg, int mc_avg_stride, uint8_t *avg, int avg_stride, int increase_denoising, BLOCK_SIZE bs, int motion_magnitude";
specialize qw/vp10_denoiser_filter sse2/;
}
if (vpx_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
# the transform coefficients are held in 32-bit
# values, so the assembler code for vp10_block_error can no longer be used.

View File

@ -58,15 +58,6 @@ typedef struct {
int rate;
int64_t dist;
#if CONFIG_VP9_TEMPORAL_DENOISING
unsigned int newmv_sse;
unsigned int zeromv_sse;
PREDICTION_MODE best_sse_inter_mode;
int_mv best_sse_mv;
MV_REFERENCE_FRAME best_reference_frame;
MV_REFERENCE_FRAME best_zeromv_reference_frame;
#endif
// motion vector cache for adaptive motion search control in partition
// search loop
MV pred_mv[MAX_REF_FRAMES];

View File

@ -1,500 +0,0 @@
/*
* Copyright (c) 2012 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <assert.h>
#include <limits.h>
#include "./vpx_dsp_rtcd.h"
#include "vpx_dsp/vpx_dsp_common.h"
#include "vpx_scale/yv12config.h"
#include "vpx/vpx_integer.h"
#include "vp10/common/reconinter.h"
#include "vp10/encoder/context_tree.h"
#include "vp10/encoder/denoiser.h"
/* The VP9 denoiser is a work-in-progress. It currently is only designed to work
* with speed 6, though it (inexplicably) seems to also work with speed 5 (one
* would need to modify the source code in vp10_pickmode.c and vp10_encoder.c to
* make the calls to the vp10_denoiser_* functions when in speed 5).
*
* The implementation is very similar to that of the VP8 denoiser. While
* choosing the motion vectors / reference frames, the denoiser is run, and if
* it did not modify the signal to much, the denoised block is copied to the
* signal.
*/
#ifdef OUTPUT_YUV_DENOISED
static void make_grayscale(YV12_BUFFER_CONFIG *yuv);
#endif
static int absdiff_thresh(BLOCK_SIZE bs, int increase_denoising) {
(void)bs;
return 3 + (increase_denoising ? 1 : 0);
}
static int delta_thresh(BLOCK_SIZE bs, int increase_denoising) {
(void)bs;
(void)increase_denoising;
return 4;
}
static int noise_motion_thresh(BLOCK_SIZE bs, int increase_denoising) {
(void)bs;
(void)increase_denoising;
return 625;
}
static unsigned int sse_thresh(BLOCK_SIZE bs, int increase_denoising) {
return (1 << num_pels_log2_lookup[bs]) * (increase_denoising ? 60 : 40);
}
static int sse_diff_thresh(BLOCK_SIZE bs, int increase_denoising,
int motion_magnitude) {
if (motion_magnitude >
noise_motion_thresh(bs, increase_denoising)) {
return 0;
} else {
return (1 << num_pels_log2_lookup[bs]) * 20;
}
}
int total_adj_strong_thresh(BLOCK_SIZE bs, int increase_denoising) {
return (1 << num_pels_log2_lookup[bs]) * (increase_denoising ? 3 : 2);
}
static int total_adj_weak_thresh(BLOCK_SIZE bs, int increase_denoising) {
return (1 << num_pels_log2_lookup[bs]) * (increase_denoising ? 3 : 2);
}
// TODO(jackychen): If increase_denoising is enabled in the future,
// we might need to update the code for calculating 'total_adj' in
// case the C code is not bit-exact with corresponding sse2 code.
int vp10_denoiser_filter_c(const uint8_t *sig, int sig_stride,
const uint8_t *mc_avg,
int mc_avg_stride,
uint8_t *avg, int avg_stride,
int increase_denoising,
BLOCK_SIZE bs,
int motion_magnitude) {
int r, c;
const uint8_t *sig_start = sig;
const uint8_t *mc_avg_start = mc_avg;
uint8_t *avg_start = avg;
int diff, adj, absdiff, delta;
int adj_val[] = {3, 4, 6};
int total_adj = 0;
int shift_inc = 1;
// If motion_magnitude is small, making the denoiser more aggressive by
// increasing the adjustment for each level. Add another increment for
// blocks that are labeled for increase denoising.
if (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) {
if (increase_denoising) {
shift_inc = 2;
}
adj_val[0] += shift_inc;
adj_val[1] += shift_inc;
adj_val[2] += shift_inc;
}
// First attempt to apply a strong temporal denoising filter.
for (r = 0; r < (4 << b_height_log2_lookup[bs]); ++r) {
for (c = 0; c < (4 << b_width_log2_lookup[bs]); ++c) {
diff = mc_avg[c] - sig[c];
absdiff = abs(diff);
if (absdiff <= absdiff_thresh(bs, increase_denoising)) {
avg[c] = mc_avg[c];
total_adj += diff;
} else {
switch (absdiff) {
case 4: case 5: case 6: case 7:
adj = adj_val[0];
break;
case 8: case 9: case 10: case 11:
case 12: case 13: case 14: case 15:
adj = adj_val[1];
break;
default:
adj = adj_val[2];
}
if (diff > 0) {
avg[c] = VPXMIN(UINT8_MAX, sig[c] + adj);
total_adj += adj;
} else {
avg[c] = VPXMAX(0, sig[c] - adj);
total_adj -= adj;
}
}
}
sig += sig_stride;
avg += avg_stride;
mc_avg += mc_avg_stride;
}
// If the strong filter did not modify the signal too much, we're all set.
if (abs(total_adj) <= total_adj_strong_thresh(bs, increase_denoising)) {
return FILTER_BLOCK;
}
// Otherwise, we try to dampen the filter if the delta is not too high.
delta = ((abs(total_adj) - total_adj_strong_thresh(bs, increase_denoising))
>> num_pels_log2_lookup[bs]) + 1;
if (delta >= delta_thresh(bs, increase_denoising)) {
return COPY_BLOCK;
}
mc_avg = mc_avg_start;
avg = avg_start;
sig = sig_start;
for (r = 0; r < (4 << b_height_log2_lookup[bs]); ++r) {
for (c = 0; c < (4 << b_width_log2_lookup[bs]); ++c) {
diff = mc_avg[c] - sig[c];
adj = abs(diff);
if (adj > delta) {
adj = delta;
}
if (diff > 0) {
// Diff positive means we made positive adjustment above
// (in first try/attempt), so now make negative adjustment to bring
// denoised signal down.
avg[c] = VPXMAX(0, avg[c] - adj);
total_adj -= adj;
} else {
// Diff negative means we made negative adjustment above
// (in first try/attempt), so now make positive adjustment to bring
// denoised signal up.
avg[c] = VPXMIN(UINT8_MAX, avg[c] + adj);
total_adj += adj;
}
}
sig += sig_stride;
avg += avg_stride;
mc_avg += mc_avg_stride;
}
// We can use the filter if it has been sufficiently dampened
if (abs(total_adj) <= total_adj_weak_thresh(bs, increase_denoising)) {
return FILTER_BLOCK;
}
return COPY_BLOCK;
}
static uint8_t *block_start(uint8_t *framebuf, int stride,
int mi_row, int mi_col) {
return framebuf + (stride * mi_row * 8) + (mi_col * 8);
}
static VP9_DENOISER_DECISION perform_motion_compensation(VP9_DENOISER *denoiser,
MACROBLOCK *mb,
BLOCK_SIZE bs,
int increase_denoising,
int mi_row,
int mi_col,
PICK_MODE_CONTEXT *ctx,
int *motion_magnitude
) {
int mv_col, mv_row;
int sse_diff = ctx->zeromv_sse - ctx->newmv_sse;
MV_REFERENCE_FRAME frame;
MACROBLOCKD *filter_mbd = &mb->e_mbd;
MB_MODE_INFO *mbmi = &filter_mbd->mi[0]->mbmi;
MB_MODE_INFO saved_mbmi;
int i, j;
struct buf_2d saved_dst[MAX_MB_PLANE];
struct buf_2d saved_pre[MAX_MB_PLANE][2]; // 2 pre buffers
mv_col = ctx->best_sse_mv.as_mv.col;
mv_row = ctx->best_sse_mv.as_mv.row;
*motion_magnitude = mv_row * mv_row + mv_col * mv_col;
frame = ctx->best_reference_frame;
saved_mbmi = *mbmi;
// If the best reference frame uses inter-prediction and there is enough of a
// difference in sum-squared-error, use it.
if (frame != INTRA_FRAME &&
sse_diff > sse_diff_thresh(bs, increase_denoising, *motion_magnitude)) {
mbmi->ref_frame[0] = ctx->best_reference_frame;
mbmi->mode = ctx->best_sse_inter_mode;
mbmi->mv[0] = ctx->best_sse_mv;
} else {
// Otherwise, use the zero reference frame.
frame = ctx->best_zeromv_reference_frame;
mbmi->ref_frame[0] = ctx->best_zeromv_reference_frame;
mbmi->mode = ZEROMV;
mbmi->mv[0].as_int = 0;
ctx->best_sse_inter_mode = ZEROMV;
ctx->best_sse_mv.as_int = 0;
ctx->newmv_sse = ctx->zeromv_sse;
}
if (ctx->newmv_sse > sse_thresh(bs, increase_denoising)) {
// Restore everything to its original state
*mbmi = saved_mbmi;
return COPY_BLOCK;
}
if (*motion_magnitude >
(noise_motion_thresh(bs, increase_denoising) << 3)) {
// Restore everything to its original state
*mbmi = saved_mbmi;
return COPY_BLOCK;
}
// We will restore these after motion compensation.
for (i = 0; i < MAX_MB_PLANE; ++i) {
for (j = 0; j < 2; ++j) {
saved_pre[i][j] = filter_mbd->plane[i].pre[j];
}
saved_dst[i] = filter_mbd->plane[i].dst;
}
// Set the pointers in the MACROBLOCKD to point to the buffers in the denoiser
// struct.
for (j = 0; j < 2; ++j) {
filter_mbd->plane[0].pre[j].buf =
block_start(denoiser->running_avg_y[frame].y_buffer,
denoiser->running_avg_y[frame].y_stride,
mi_row, mi_col);
filter_mbd->plane[0].pre[j].stride =
denoiser->running_avg_y[frame].y_stride;
filter_mbd->plane[1].pre[j].buf =
block_start(denoiser->running_avg_y[frame].u_buffer,
denoiser->running_avg_y[frame].uv_stride,
mi_row, mi_col);
filter_mbd->plane[1].pre[j].stride =
denoiser->running_avg_y[frame].uv_stride;
filter_mbd->plane[2].pre[j].buf =
block_start(denoiser->running_avg_y[frame].v_buffer,
denoiser->running_avg_y[frame].uv_stride,
mi_row, mi_col);
filter_mbd->plane[2].pre[j].stride =
denoiser->running_avg_y[frame].uv_stride;
}
filter_mbd->plane[0].dst.buf =
block_start(denoiser->mc_running_avg_y.y_buffer,
denoiser->mc_running_avg_y.y_stride,
mi_row, mi_col);
filter_mbd->plane[0].dst.stride = denoiser->mc_running_avg_y.y_stride;
filter_mbd->plane[1].dst.buf =
block_start(denoiser->mc_running_avg_y.u_buffer,
denoiser->mc_running_avg_y.uv_stride,
mi_row, mi_col);
filter_mbd->plane[1].dst.stride = denoiser->mc_running_avg_y.uv_stride;
filter_mbd->plane[2].dst.buf =
block_start(denoiser->mc_running_avg_y.v_buffer,
denoiser->mc_running_avg_y.uv_stride,
mi_row, mi_col);
filter_mbd->plane[2].dst.stride = denoiser->mc_running_avg_y.uv_stride;
vp10_build_inter_predictors_sby(filter_mbd, mv_row, mv_col, bs);
// Restore everything to its original state
*mbmi = saved_mbmi;
for (i = 0; i < MAX_MB_PLANE; ++i) {
for (j = 0; j < 2; ++j) {
filter_mbd->plane[i].pre[j] = saved_pre[i][j];
}
filter_mbd->plane[i].dst = saved_dst[i];
}
mv_row = ctx->best_sse_mv.as_mv.row;
mv_col = ctx->best_sse_mv.as_mv.col;
return FILTER_BLOCK;
}
void vp10_denoiser_denoise(VP9_DENOISER *denoiser, MACROBLOCK *mb,
int mi_row, int mi_col, BLOCK_SIZE bs,
PICK_MODE_CONTEXT *ctx) {
int motion_magnitude = 0;
VP9_DENOISER_DECISION decision = FILTER_BLOCK;
YV12_BUFFER_CONFIG avg = denoiser->running_avg_y[INTRA_FRAME];
YV12_BUFFER_CONFIG mc_avg = denoiser->mc_running_avg_y;
uint8_t *avg_start = block_start(avg.y_buffer, avg.y_stride, mi_row, mi_col);
uint8_t *mc_avg_start = block_start(mc_avg.y_buffer, mc_avg.y_stride,
mi_row, mi_col);
struct buf_2d src = mb->plane[0].src;
decision = perform_motion_compensation(denoiser, mb, bs,
denoiser->increase_denoising,
mi_row, mi_col, ctx,
&motion_magnitude);
if (decision == FILTER_BLOCK) {
decision = vp10_denoiser_filter(src.buf, src.stride,
mc_avg_start, mc_avg.y_stride,
avg_start, avg.y_stride,
0, bs, motion_magnitude);
}
if (decision == FILTER_BLOCK) {
vpx_convolve_copy(avg_start, avg.y_stride, src.buf, src.stride,
NULL, 0, NULL, 0,
num_4x4_blocks_wide_lookup[bs] << 2,
num_4x4_blocks_high_lookup[bs] << 2);
} else { // COPY_BLOCK
vpx_convolve_copy(src.buf, src.stride, avg_start, avg.y_stride,
NULL, 0, NULL, 0,
num_4x4_blocks_wide_lookup[bs] << 2,
num_4x4_blocks_high_lookup[bs] << 2);
}
}
static void copy_frame(YV12_BUFFER_CONFIG dest, const YV12_BUFFER_CONFIG src) {
int r;
const uint8_t *srcbuf = src.y_buffer;
uint8_t *destbuf = dest.y_buffer;
assert(dest.y_width == src.y_width);
assert(dest.y_height == src.y_height);
for (r = 0; r < dest.y_height; ++r) {
memcpy(destbuf, srcbuf, dest.y_width);
destbuf += dest.y_stride;
srcbuf += src.y_stride;
}
}
static void swap_frame_buffer(YV12_BUFFER_CONFIG *dest,
YV12_BUFFER_CONFIG *src) {
uint8_t *tmp_buf = dest->y_buffer;
assert(dest->y_width == src->y_width);
assert(dest->y_height == src->y_height);
dest->y_buffer = src->y_buffer;
src->y_buffer = tmp_buf;
}
void vp10_denoiser_update_frame_info(VP9_DENOISER *denoiser,
YV12_BUFFER_CONFIG src,
FRAME_TYPE frame_type,
int refresh_alt_ref_frame,
int refresh_golden_frame,
int refresh_last_frame) {
if (frame_type == KEY_FRAME) {
int i;
// Start at 1 so as not to overwrite the INTRA_FRAME
for (i = 1; i < MAX_REF_FRAMES; ++i)
copy_frame(denoiser->running_avg_y[i], src);
return;
}
/* For non key frames */
if (refresh_alt_ref_frame) {
swap_frame_buffer(&denoiser->running_avg_y[ALTREF_FRAME],
&denoiser->running_avg_y[INTRA_FRAME]);
}
if (refresh_golden_frame) {
swap_frame_buffer(&denoiser->running_avg_y[GOLDEN_FRAME],
&denoiser->running_avg_y[INTRA_FRAME]);
}
if (refresh_last_frame) {
swap_frame_buffer(&denoiser->running_avg_y[LAST_FRAME],
&denoiser->running_avg_y[INTRA_FRAME]);
}
}
void vp10_denoiser_reset_frame_stats(PICK_MODE_CONTEXT *ctx) {
ctx->zeromv_sse = UINT_MAX;
ctx->newmv_sse = UINT_MAX;
}
void vp10_denoiser_update_frame_stats(MB_MODE_INFO *mbmi, unsigned int sse,
PREDICTION_MODE mode,
PICK_MODE_CONTEXT *ctx) {
// TODO(tkopp): Use both MVs if possible
if (mbmi->mv[0].as_int == 0 && sse < ctx->zeromv_sse) {
ctx->zeromv_sse = sse;
ctx->best_zeromv_reference_frame = mbmi->ref_frame[0];
}
if (mbmi->mv[0].as_int != 0 && sse < ctx->newmv_sse) {
ctx->newmv_sse = sse;
ctx->best_sse_inter_mode = mode;
ctx->best_sse_mv = mbmi->mv[0];
ctx->best_reference_frame = mbmi->ref_frame[0];
}
}
int vp10_denoiser_alloc(VP9_DENOISER *denoiser, int width, int height,
int ssx, int ssy,
#if CONFIG_VPX_HIGHBITDEPTH
int use_highbitdepth,
#endif
int border) {
int i, fail;
const int legacy_byte_alignment = 0;
assert(denoiser != NULL);
for (i = 0; i < MAX_REF_FRAMES; ++i) {
fail = vpx_alloc_frame_buffer(&denoiser->running_avg_y[i], width, height,
ssx, ssy,
#if CONFIG_VPX_HIGHBITDEPTH
use_highbitdepth,
#endif
border, legacy_byte_alignment);
if (fail) {
vp10_denoiser_free(denoiser);
return 1;
}
#ifdef OUTPUT_YUV_DENOISED
make_grayscale(&denoiser->running_avg_y[i]);
#endif
}
fail = vpx_alloc_frame_buffer(&denoiser->mc_running_avg_y, width, height,
ssx, ssy,
#if CONFIG_VPX_HIGHBITDEPTH
use_highbitdepth,
#endif
border, legacy_byte_alignment);
if (fail) {
vp10_denoiser_free(denoiser);
return 1;
}
#ifdef OUTPUT_YUV_DENOISED
make_grayscale(&denoiser->running_avg_y[i]);
#endif
denoiser->increase_denoising = 0;
denoiser->frame_buffer_initialized = 1;
return 0;
}
void vp10_denoiser_free(VP9_DENOISER *denoiser) {
int i;
denoiser->frame_buffer_initialized = 0;
if (denoiser == NULL) {
return;
}
for (i = 0; i < MAX_REF_FRAMES; ++i) {
vpx_free_frame_buffer(&denoiser->running_avg_y[i]);
}
vpx_free_frame_buffer(&denoiser->mc_running_avg_y);
}
#ifdef OUTPUT_YUV_DENOISED
static void make_grayscale(YV12_BUFFER_CONFIG *yuv) {
int r, c;
uint8_t *u = yuv->u_buffer;
uint8_t *v = yuv->v_buffer;
for (r = 0; r < yuv->uv_height; ++r) {
for (c = 0; c < yuv->uv_width; ++c) {
u[c] = UINT8_MAX / 2;
v[c] = UINT8_MAX / 2;
}
u += yuv->uv_stride;
v += yuv->uv_stride;
}
}
#endif

View File

@ -1,69 +0,0 @@
/*
* Copyright (c) 2012 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef VP9_ENCODER_DENOISER_H_
#define VP9_ENCODER_DENOISER_H_
#include "vp10/encoder/block.h"
#include "vpx_scale/yv12config.h"
#ifdef __cplusplus
extern "C" {
#endif
#define MOTION_MAGNITUDE_THRESHOLD (8 * 3)
typedef enum vp10_denoiser_decision {
COPY_BLOCK,
FILTER_BLOCK
} VP9_DENOISER_DECISION;
typedef struct vp10_denoiser {
YV12_BUFFER_CONFIG running_avg_y[MAX_REF_FRAMES];
YV12_BUFFER_CONFIG mc_running_avg_y;
int increase_denoising;
int frame_buffer_initialized;
} VP9_DENOISER;
void vp10_denoiser_update_frame_info(VP9_DENOISER *denoiser,
YV12_BUFFER_CONFIG src,
FRAME_TYPE frame_type,
int refresh_alt_ref_frame,
int refresh_golden_frame,
int refresh_last_frame);
void vp10_denoiser_denoise(VP9_DENOISER *denoiser, MACROBLOCK *mb,
int mi_row, int mi_col, BLOCK_SIZE bs,
PICK_MODE_CONTEXT *ctx);
void vp10_denoiser_reset_frame_stats(PICK_MODE_CONTEXT *ctx);
void vp10_denoiser_update_frame_stats(MB_MODE_INFO *mbmi,
unsigned int sse, PREDICTION_MODE mode,
PICK_MODE_CONTEXT *ctx);
int vp10_denoiser_alloc(VP9_DENOISER *denoiser, int width, int height,
int ssx, int ssy,
#if CONFIG_VPX_HIGHBITDEPTH
int use_highbitdepth,
#endif
int border);
#if CONFIG_VP9_TEMPORAL_DENOISING
int total_adj_strong_thresh(BLOCK_SIZE bs, int increase_denoising);
#endif
void vp10_denoiser_free(VP9_DENOISER *denoiser);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // VP9_ENCODER_DENOISER_H_

View File

@ -1681,11 +1681,6 @@ VP10_COMP *vp10_create_compressor(VP10EncoderConfig *oxcf,
cpi->td.mb.nmvsadcost_hp[1] = &cpi->nmvsadcosts_hp[1][MV_MAX];
cal_nmvsadcosts_hp(cpi->td.mb.nmvsadcost_hp);
#if CONFIG_VP9_TEMPORAL_DENOISING
#ifdef OUTPUT_YUV_DENOISED
yuv_denoised_file = fopen("denoised.yuv", "ab");
#endif
#endif
#ifdef OUTPUT_YUV_SKINMAP
yuv_skinmap_file = fopen("skinmap.yuv", "ab");
#endif
@ -1923,10 +1918,6 @@ void vp10_remove_compressor(VP10_COMP *cpi) {
#endif
}
#if CONFIG_VP9_TEMPORAL_DENOISING
vp10_denoiser_free(&(cpi->denoiser));
#endif
for (t = 0; t < cpi->num_workers; ++t) {
VPxWorker *const worker = &cpi->workers[t];
EncWorkerData *const thread_data = &cpi->tile_thr_data[t];
@ -1965,11 +1956,6 @@ void vp10_remove_compressor(VP10_COMP *cpi) {
vp10_free_ref_frame_buffers(cm->buffer_pool);
vpx_free(cpi);
#if CONFIG_VP9_TEMPORAL_DENOISING
#ifdef OUTPUT_YUV_DENOISED
fclose(yuv_denoised_file);
#endif
#endif
#ifdef OUTPUT_YUV_SKINMAP
fclose(yuv_skinmap_file);
#endif
@ -2630,16 +2616,6 @@ void vp10_update_reference_frames(VP10_COMP *cpi) {
cpi->interp_filter_selected[0],
sizeof(cpi->interp_filter_selected[0]));
}
#if CONFIG_VP9_TEMPORAL_DENOISING
if (cpi->oxcf.noise_sensitivity > 0) {
vp10_denoiser_update_frame_info(&cpi->denoiser,
*cpi->Source,
cpi->common.frame_type,
cpi->refresh_alt_ref_frame,
cpi->refresh_golden_frame,
cpi->refresh_last_frame);
}
#endif
}
static void loopfilter_frame(VP10_COMP *cpi, VP10_COMMON *cm) {
@ -3572,14 +3548,6 @@ static void encode_frame_to_data_rate(VP10_COMP *cpi,
encode_with_recode_loop(cpi, size, dest);
}
#if CONFIG_VP9_TEMPORAL_DENOISING
#ifdef OUTPUT_YUV_DENOISED
if (oxcf->noise_sensitivity > 0) {
vp10_write_yuv_frame_420(&cpi->denoiser.running_avg_y[INTRA_FRAME],
yuv_denoised_file);
}
#endif
#endif
#ifdef OUTPUT_YUV_SKINMAP
if (cpi->common.current_video_frame > 1) {
vp10_compute_skin_map(cpi, yuv_skinmap_file);
@ -3756,21 +3724,6 @@ static void check_initial_width(VP10_COMP *cpi,
}
}
#if CONFIG_VP9_TEMPORAL_DENOISING
static void setup_denoiser_buffer(VP10_COMP *cpi) {
VP10_COMMON *const cm = &cpi->common;
if (cpi->oxcf.noise_sensitivity > 0 &&
!cpi->denoiser.frame_buffer_initialized) {
vp10_denoiser_alloc(&(cpi->denoiser), cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
#if CONFIG_VPX_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
VPX_ENC_BORDER_IN_PIXELS);
}
}
#endif
int vp10_receive_raw_frame(VP10_COMP *cpi, unsigned int frame_flags,
YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
int64_t end_time) {
@ -3786,9 +3739,6 @@ int vp10_receive_raw_frame(VP10_COMP *cpi, unsigned int frame_flags,
check_initial_width(cpi, subsampling_x, subsampling_y);
#endif // CONFIG_VPX_HIGHBITDEPTH
#if CONFIG_VP9_TEMPORAL_DENOISING
setup_denoiser_buffer(cpi);
#endif
vpx_usec_timer_start(&timer);
if (vp10_lookahead_push(cpi->lookahead, sd, time_stamp, end_time,
@ -4318,10 +4268,6 @@ int vp10_set_size_literal(VP10_COMP *cpi, unsigned int width,
check_initial_width(cpi, 1, 1);
#endif // CONFIG_VPX_HIGHBITDEPTH
#if CONFIG_VP9_TEMPORAL_DENOISING
setup_denoiser_buffer(cpi);
#endif
if (width) {
cm->width = width;
if (cm->width > cpi->initial_width) {

View File

@ -34,10 +34,6 @@
#include "vp10/encoder/speed_features.h"
#include "vp10/encoder/tokenize.h"
#if CONFIG_VP9_TEMPORAL_DENOISING
#include "vp10/encoder/denoiser.h"
#endif
#if CONFIG_INTERNAL_STATS
#include "vpx_dsp/ssim.h"
#endif
@ -468,9 +464,6 @@ typedef struct VP10_COMP {
int intra_tx_type_costs[EXT_TX_SIZES][TX_TYPES][TX_TYPES];
int inter_tx_type_costs[EXT_TX_SIZES][TX_TYPES];
#if CONFIG_VP9_TEMPORAL_DENOISING
VP9_DENOISER denoiser;
#endif
int resize_pending;
int resize_state;

View File

@ -1,375 +0,0 @@
/*
* Copyright (c) 2014 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <emmintrin.h>
#include "./vpx_config.h"
#include "./vp10_rtcd.h"
#include "vpx_ports/emmintrin_compat.h"
#include "vpx/vpx_integer.h"
#include "vp10/common/reconinter.h"
#include "vp10/encoder/context_tree.h"
#include "vp10/encoder/denoiser.h"
#include "vpx_mem/vpx_mem.h"
// Compute the sum of all pixel differences of this MB.
static INLINE int sum_diff_16x1(__m128i acc_diff) {
const __m128i k_1 = _mm_set1_epi16(1);
const __m128i acc_diff_lo =
_mm_srai_epi16(_mm_unpacklo_epi8(acc_diff, acc_diff), 8);
const __m128i acc_diff_hi =
_mm_srai_epi16(_mm_unpackhi_epi8(acc_diff, acc_diff), 8);
const __m128i acc_diff_16 = _mm_add_epi16(acc_diff_lo, acc_diff_hi);
const __m128i hg_fe_dc_ba = _mm_madd_epi16(acc_diff_16, k_1);
const __m128i hgfe_dcba =
_mm_add_epi32(hg_fe_dc_ba, _mm_srli_si128(hg_fe_dc_ba, 8));
const __m128i hgfedcba =
_mm_add_epi32(hgfe_dcba, _mm_srli_si128(hgfe_dcba, 4));
return _mm_cvtsi128_si32(hgfedcba);
}
// Denoise a 16x1 vector.
static INLINE __m128i vp10_denoiser_16x1_sse2(const uint8_t *sig,
const uint8_t *mc_running_avg_y,
uint8_t *running_avg_y,
const __m128i *k_0,
const __m128i *k_4,
const __m128i *k_8,
const __m128i *k_16,
const __m128i *l3,
const __m128i *l32,
const __m128i *l21,
__m128i acc_diff) {
// Calculate differences
const __m128i v_sig = _mm_loadu_si128((const __m128i *)(&sig[0]));
const __m128i v_mc_running_avg_y =
_mm_loadu_si128((const __m128i *)(&mc_running_avg_y[0]));
__m128i v_running_avg_y;
const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg_y, v_sig);
const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg_y);
// Obtain the sign. FF if diff is negative.
const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, *k_0);
// Clamp absolute difference to 16 to be used to get mask. Doing this
// allows us to use _mm_cmpgt_epi8, which operates on signed byte.
const __m128i clamped_absdiff =
_mm_min_epu8(_mm_or_si128(pdiff, ndiff), *k_16);
// Get masks for l2 l1 and l0 adjustments.
const __m128i mask2 = _mm_cmpgt_epi8(*k_16, clamped_absdiff);
const __m128i mask1 = _mm_cmpgt_epi8(*k_8, clamped_absdiff);
const __m128i mask0 = _mm_cmpgt_epi8(*k_4, clamped_absdiff);
// Get adjustments for l2, l1, and l0.
__m128i adj2 = _mm_and_si128(mask2, *l32);
const __m128i adj1 = _mm_and_si128(mask1, *l21);
const __m128i adj0 = _mm_and_si128(mask0, clamped_absdiff);
__m128i adj, padj, nadj;
// Combine the adjustments and get absolute adjustments.
adj2 = _mm_add_epi8(adj2, adj1);
adj = _mm_sub_epi8(*l3, adj2);
adj = _mm_andnot_si128(mask0, adj);
adj = _mm_or_si128(adj, adj0);
// Restore the sign and get positive and negative adjustments.
padj = _mm_andnot_si128(diff_sign, adj);
nadj = _mm_and_si128(diff_sign, adj);
// Calculate filtered value.
v_running_avg_y = _mm_adds_epu8(v_sig, padj);
v_running_avg_y = _mm_subs_epu8(v_running_avg_y, nadj);
_mm_storeu_si128((__m128i *)running_avg_y, v_running_avg_y);
// Adjustments <=7, and each element in acc_diff can fit in signed
// char.
acc_diff = _mm_adds_epi8(acc_diff, padj);
acc_diff = _mm_subs_epi8(acc_diff, nadj);
return acc_diff;
}
// Denoise a 16x1 vector with a weaker filter.
static INLINE __m128i vp10_denoiser_adj_16x1_sse2(
const uint8_t *sig, const uint8_t *mc_running_avg_y,
uint8_t *running_avg_y, const __m128i k_0,
const __m128i k_delta, __m128i acc_diff) {
__m128i v_running_avg_y = _mm_loadu_si128((__m128i *)(&running_avg_y[0]));
// Calculate differences.
const __m128i v_sig = _mm_loadu_si128((const __m128i *)(&sig[0]));
const __m128i v_mc_running_avg_y =
_mm_loadu_si128((const __m128i *)(&mc_running_avg_y[0]));
const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg_y, v_sig);
const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg_y);
// Obtain the sign. FF if diff is negative.
const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, k_0);
// Clamp absolute difference to delta to get the adjustment.
const __m128i adj =
_mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_delta);
// Restore the sign and get positive and negative adjustments.
__m128i padj, nadj;
padj = _mm_andnot_si128(diff_sign, adj);
nadj = _mm_and_si128(diff_sign, adj);
// Calculate filtered value.
v_running_avg_y = _mm_subs_epu8(v_running_avg_y, padj);
v_running_avg_y = _mm_adds_epu8(v_running_avg_y, nadj);
_mm_storeu_si128((__m128i *)running_avg_y, v_running_avg_y);
// Accumulate the adjustments.
acc_diff = _mm_subs_epi8(acc_diff, padj);
acc_diff = _mm_adds_epi8(acc_diff, nadj);
return acc_diff;
}
// Denoiser for 4xM and 8xM blocks.
static int vp10_denoiser_NxM_sse2_small(
const uint8_t *sig, int sig_stride, const uint8_t *mc_running_avg_y,
int mc_avg_y_stride, uint8_t *running_avg_y, int avg_y_stride,
int increase_denoising, BLOCK_SIZE bs, int motion_magnitude, int width) {
int sum_diff_thresh, r, sum_diff = 0;
const int shift_inc = (increase_denoising &&
motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ?
1 : 0;
uint8_t sig_buffer[8][16], mc_running_buffer[8][16], running_buffer[8][16];
__m128i acc_diff = _mm_setzero_si128();
const __m128i k_0 = _mm_setzero_si128();
const __m128i k_4 = _mm_set1_epi8(4 + shift_inc);
const __m128i k_8 = _mm_set1_epi8(8);
const __m128i k_16 = _mm_set1_epi8(16);
// Modify each level's adjustment according to motion_magnitude.
const __m128i l3 = _mm_set1_epi8(
(motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 7 + shift_inc : 6);
// Difference between level 3 and level 2 is 2.
const __m128i l32 = _mm_set1_epi8(2);
// Difference between level 2 and level 1 is 1.
const __m128i l21 = _mm_set1_epi8(1);
const uint8_t shift = (width == 4) ? 2 : 1;
for (r = 0; r < ((4 << b_height_log2_lookup[bs]) >> shift); ++r) {
memcpy(sig_buffer[r], sig, width);
memcpy(sig_buffer[r] + width, sig + sig_stride, width);
memcpy(mc_running_buffer[r], mc_running_avg_y, width);
memcpy(mc_running_buffer[r] + width,
mc_running_avg_y + mc_avg_y_stride, width);
memcpy(running_buffer[r], running_avg_y, width);
memcpy(running_buffer[r] + width, running_avg_y + avg_y_stride, width);
if (width == 4) {
memcpy(sig_buffer[r] + width * 2, sig + sig_stride * 2, width);
memcpy(sig_buffer[r] + width * 3, sig + sig_stride * 3, width);
memcpy(mc_running_buffer[r] + width * 2,
mc_running_avg_y + mc_avg_y_stride * 2, width);
memcpy(mc_running_buffer[r] + width * 3,
mc_running_avg_y + mc_avg_y_stride * 3, width);
memcpy(running_buffer[r] + width * 2,
running_avg_y + avg_y_stride * 2, width);
memcpy(running_buffer[r] + width * 3,
running_avg_y + avg_y_stride * 3, width);
}
acc_diff = vp10_denoiser_16x1_sse2(sig_buffer[r],
mc_running_buffer[r],
running_buffer[r],
&k_0, &k_4, &k_8, &k_16,
&l3, &l32, &l21, acc_diff);
memcpy(running_avg_y, running_buffer[r], width);
memcpy(running_avg_y + avg_y_stride, running_buffer[r] + width, width);
if (width == 4) {
memcpy(running_avg_y + avg_y_stride * 2,
running_buffer[r] + width * 2, width);
memcpy(running_avg_y + avg_y_stride * 3,
running_buffer[r] + width * 3, width);
}
// Update pointers for next iteration.
sig += (sig_stride << shift);
mc_running_avg_y += (mc_avg_y_stride << shift);
running_avg_y += (avg_y_stride << shift);
}
{
sum_diff = sum_diff_16x1(acc_diff);
sum_diff_thresh = total_adj_strong_thresh(bs, increase_denoising);
if (abs(sum_diff) > sum_diff_thresh) {
// Before returning to copy the block (i.e., apply no denoising),
// check if we can still apply some (weaker) temporal filtering to
// this block, that would otherwise not be denoised at all. Simplest
// is to apply an additional adjustment to running_avg_y to bring it
// closer to sig. The adjustment is capped by a maximum delta, and
// chosen such that in most cases the resulting sum_diff will be
// within the acceptable range given by sum_diff_thresh.
// The delta is set by the excess of absolute pixel diff over the
// threshold.
const int delta = ((abs(sum_diff) - sum_diff_thresh) >>
num_pels_log2_lookup[bs]) + 1;
// Only apply the adjustment for max delta up to 3.
if (delta < 4) {
const __m128i k_delta = _mm_set1_epi8(delta);
running_avg_y -= avg_y_stride * (4 << b_height_log2_lookup[bs]);
for (r = 0; r < ((4 << b_height_log2_lookup[bs]) >> shift); ++r) {
acc_diff = vp10_denoiser_adj_16x1_sse2(
sig_buffer[r], mc_running_buffer[r], running_buffer[r],
k_0, k_delta, acc_diff);
memcpy(running_avg_y, running_buffer[r], width);
memcpy(running_avg_y + avg_y_stride,
running_buffer[r] + width, width);
if (width == 4) {
memcpy(running_avg_y + avg_y_stride * 2,
running_buffer[r] + width * 2, width);
memcpy(running_avg_y + avg_y_stride * 3,
running_buffer[r] + width * 3, width);
}
// Update pointers for next iteration.
running_avg_y += (avg_y_stride << shift);
}
sum_diff = sum_diff_16x1(acc_diff);
if (abs(sum_diff) > sum_diff_thresh) {
return COPY_BLOCK;
}
} else {
return COPY_BLOCK;
}
}
}
return FILTER_BLOCK;
}
// Denoiser for 16xM, 32xM and 64xM blocks
static int vp10_denoiser_NxM_sse2_big(const uint8_t *sig, int sig_stride,
const uint8_t *mc_running_avg_y,
int mc_avg_y_stride,
uint8_t *running_avg_y,
int avg_y_stride,
int increase_denoising, BLOCK_SIZE bs,
int motion_magnitude) {
int sum_diff_thresh, r, c, sum_diff = 0;
const int shift_inc = (increase_denoising &&
motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ?
1 : 0;
__m128i acc_diff[4][4];
const __m128i k_0 = _mm_setzero_si128();
const __m128i k_4 = _mm_set1_epi8(4 + shift_inc);
const __m128i k_8 = _mm_set1_epi8(8);
const __m128i k_16 = _mm_set1_epi8(16);
// Modify each level's adjustment according to motion_magnitude.
const __m128i l3 = _mm_set1_epi8(
(motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 7 + shift_inc : 6);
// Difference between level 3 and level 2 is 2.
const __m128i l32 = _mm_set1_epi8(2);
// Difference between level 2 and level 1 is 1.
const __m128i l21 = _mm_set1_epi8(1);
for (c = 0; c < 4; ++c) {
for (r = 0; r < 4; ++r) {
acc_diff[c][r] = _mm_setzero_si128();
}
}
for (r = 0; r < (4 << b_height_log2_lookup[bs]); ++r) {
for (c = 0; c < (4 << b_width_log2_lookup[bs]); c += 16) {
acc_diff[c>>4][r>>4] = vp10_denoiser_16x1_sse2(
sig, mc_running_avg_y, running_avg_y, &k_0, &k_4,
&k_8, &k_16, &l3, &l32, &l21, acc_diff[c>>4][r>>4]);
// Update pointers for next iteration.
sig += 16;
mc_running_avg_y += 16;
running_avg_y += 16;
}
if ((r + 1) % 16 == 0 || (bs == BLOCK_16X8 && r == 7)) {
for (c = 0; c < (4 << b_width_log2_lookup[bs]); c += 16) {
sum_diff += sum_diff_16x1(acc_diff[c>>4][r>>4]);
}
}
// Update pointers for next iteration.
sig = sig - 16 * ((4 << b_width_log2_lookup[bs]) >> 4) + sig_stride;
mc_running_avg_y = mc_running_avg_y -
16 * ((4 << b_width_log2_lookup[bs]) >> 4) +
mc_avg_y_stride;
running_avg_y = running_avg_y -
16 * ((4 << b_width_log2_lookup[bs]) >> 4) +
avg_y_stride;
}
{
sum_diff_thresh = total_adj_strong_thresh(bs, increase_denoising);
if (abs(sum_diff) > sum_diff_thresh) {
const int delta = ((abs(sum_diff) - sum_diff_thresh) >>
num_pels_log2_lookup[bs]) + 1;
// Only apply the adjustment for max delta up to 3.
if (delta < 4) {
const __m128i k_delta = _mm_set1_epi8(delta);
sig -= sig_stride * (4 << b_height_log2_lookup[bs]);
mc_running_avg_y -= mc_avg_y_stride * (4 << b_height_log2_lookup[bs]);
running_avg_y -= avg_y_stride * (4 << b_height_log2_lookup[bs]);
sum_diff = 0;
for (r = 0; r < (4 << b_height_log2_lookup[bs]); ++r) {
for (c = 0; c < (4 << b_width_log2_lookup[bs]); c += 16) {
acc_diff[c>>4][r>>4] = vp10_denoiser_adj_16x1_sse2(
sig, mc_running_avg_y, running_avg_y, k_0,
k_delta, acc_diff[c>>4][r>>4]);
// Update pointers for next iteration.
sig += 16;
mc_running_avg_y += 16;
running_avg_y += 16;
}
if ((r + 1) % 16 == 0 || (bs == BLOCK_16X8 && r == 7)) {
for (c = 0; c < (4 << b_width_log2_lookup[bs]); c += 16) {
sum_diff += sum_diff_16x1(acc_diff[c>>4][r>>4]);
}
}
sig = sig - 16 * ((4 << b_width_log2_lookup[bs]) >> 4) + sig_stride;
mc_running_avg_y = mc_running_avg_y -
16 * ((4 << b_width_log2_lookup[bs]) >> 4) +
mc_avg_y_stride;
running_avg_y = running_avg_y -
16 * ((4 << b_width_log2_lookup[bs]) >> 4) +
avg_y_stride;
}
if (abs(sum_diff) > sum_diff_thresh) {
return COPY_BLOCK;
}
} else {
return COPY_BLOCK;
}
}
}
return FILTER_BLOCK;
}
int vp10_denoiser_filter_sse2(const uint8_t *sig, int sig_stride,
const uint8_t *mc_avg,
int mc_avg_stride,
uint8_t *avg, int avg_stride,
int increase_denoising,
BLOCK_SIZE bs,
int motion_magnitude) {
if (bs == BLOCK_4X4 || bs == BLOCK_4X8) {
return vp10_denoiser_NxM_sse2_small(sig, sig_stride,
mc_avg, mc_avg_stride,
avg, avg_stride,
increase_denoising,
bs, motion_magnitude, 4);
} else if (bs == BLOCK_8X4 || bs == BLOCK_8X8 || bs == BLOCK_8X16) {
return vp10_denoiser_NxM_sse2_small(sig, sig_stride,
mc_avg, mc_avg_stride,
avg, avg_stride,
increase_denoising,
bs, motion_magnitude, 8);
} else if (bs == BLOCK_16X8 || bs == BLOCK_16X16 || bs == BLOCK_16X32 ||
bs == BLOCK_32X16|| bs == BLOCK_32X32 || bs == BLOCK_32X64 ||
bs == BLOCK_64X32 || bs == BLOCK_64X64) {
return vp10_denoiser_NxM_sse2_big(sig, sig_stride,
mc_avg, mc_avg_stride,
avg, avg_stride,
increase_denoising,
bs, motion_magnitude);
} else {
return COPY_BLOCK;
}
}

View File

@ -23,8 +23,6 @@ VP10_CX_SRCS-yes += encoder/context_tree.h
VP10_CX_SRCS-yes += encoder/cost.h
VP10_CX_SRCS-yes += encoder/cost.c
VP10_CX_SRCS-yes += encoder/dct.c
VP10_CX_SRCS-$(CONFIG_VP9_TEMPORAL_DENOISING) += encoder/denoiser.c
VP10_CX_SRCS-$(CONFIG_VP9_TEMPORAL_DENOISING) += encoder/denoiser.h
VP10_CX_SRCS-yes += encoder/encodeframe.c
VP10_CX_SRCS-yes += encoder/encodeframe.h
VP10_CX_SRCS-yes += encoder/encodemb.c
@ -102,10 +100,6 @@ endif
VP10_CX_SRCS-$(HAVE_SSE2) += encoder/x86/dct_sse2.c
VP10_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/dct_ssse3.c
ifeq ($(CONFIG_VP9_TEMPORAL_DENOISING),yes)
VP10_CX_SRCS-$(HAVE_SSE2) += encoder/x86/denoiser_sse2.c
endif
VP10_CX_SRCS-$(HAVE_AVX2) += encoder/x86/error_intrin_avx2.c
ifneq ($(CONFIG_VPX_HIGHBITDEPTH),yes)