2010-05-18 17:58:33 +02:00
|
|
|
/*
|
2010-09-09 14:16:39 +02:00
|
|
|
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
|
2010-05-18 17:58:33 +02:00
|
|
|
*
|
2010-06-18 18:39:21 +02:00
|
|
|
* Use of this source code is governed by a BSD-style license
|
2010-06-04 22:19:40 +02:00
|
|
|
* that can be found in the LICENSE file in the root of the source
|
|
|
|
* tree. An additional intellectual property rights grant can be found
|
2010-06-18 18:39:21 +02:00
|
|
|
* in the file PATENTS. All contributing project authors may
|
2010-06-04 22:19:40 +02:00
|
|
|
* be found in the AUTHORS file in the root of the source tree.
|
2010-05-18 17:58:33 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
#include "onyxc_int.h"
|
|
|
|
#include "onyx_int.h"
|
|
|
|
#include "quantize.h"
|
|
|
|
#include "vpx_mem/vpx_mem.h"
|
|
|
|
#include "vpx_scale/yv12extend.h"
|
|
|
|
#include "vpx_scale/vpxscale.h"
|
|
|
|
#include "alloccommon.h"
|
Add runtime CPU detection support for ARM.
The primary goal is to allow a binary to be built which supports
NEON, but can fall back to non-NEON routines, since some Android
devices do not have NEON, even if they are otherwise ARMv7 (e.g.,
Tegra).
The configure-generated flags HAVE_ARMV7, etc., are used to decide
which versions of each function to build, and when
CONFIG_RUNTIME_CPU_DETECT is enabled, the correct version is chosen
at run time.
In order for this to work, the CFLAGS must be set to something
appropriate (e.g., without -mfpu=neon for ARMv7, and with
appropriate -march and -mcpu for even earlier configurations), or
the native C code will not be able to run.
The ASFLAGS must remain set for the most advanced instruction set
required at build time, since the ARM assembler will refuse to emit
them otherwise.
I have not attempted to make any changes to configure to do this
automatically.
Doing so will probably require the addition of new configure options.
Many of the hooks for RTCD on ARM were already there, but a lot of
the code had bit-rotted, and a good deal of the ARM-specific code
is not integrated into the RTCD structs at all.
I did not try to resolve the latter, merely to add the minimal amount
of protection around them to allow RTCD to work.
Those functions that were called based on an ifdef at the calling
site were expanded to check the RTCD flags at that site, but they
should be added to an RTCD struct somewhere in the future.
The functions invoked with global function pointers still are, but
these should be moved into an RTCD struct for thread safety (I
believe every platform currently supported has atomic pointer
stores, but this is not guaranteed).
The encoder's boolhuff functions did not even have _c and armv7
suffixes, and the correct version was resolved at link time.
The token packing functions did have appropriate suffixes, but the
version was selected with a define, with no associated RTCD struct.
However, for both of these, the only armv7 instruction they actually
used was rbit, and this was completely superfluous, so I reworked
them to avoid it.
The only non-ARMv4 instruction remaining in them is clz, which is
ARMv5 (not even ARMv5TE is required).
Considering that there are no ARM-specific configs which are not at
least ARMv5TE, I did not try to detect these at runtime, and simply
enable them for ARMv5 and above.
Finally, the NEON register saving code was completely non-reentrant,
since it saved the registers to a global, static variable.
I moved the storage for this onto the stack.
A single binary built with this code was tested on an ARM11 (ARMv6)
and a Cortex A8 (ARMv7 w/NEON), for both the encoder and decoder,
and produced identical output, while using the correct accelerated
functions on each.
I did not test on any earlier processors.
Change-Id: I45cbd63a614f4554c3b325c45d46c0806f009eaa
2010-10-21 00:39:11 +02:00
|
|
|
#if ARCH_ARM
|
|
|
|
#include "vpx_ports/arm.h"
|
|
|
|
#endif
|
2010-05-18 17:58:33 +02:00
|
|
|
|
|
|
|
extern void vp8_loop_filter_frame(VP8_COMMON *cm, MACROBLOCKD *mbd, int filt_val);
|
|
|
|
extern void vp8_loop_filter_frame_yonly(VP8_COMMON *cm, MACROBLOCKD *mbd, int filt_val, int sharpness_lvl);
|
|
|
|
extern int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd);
|
|
|
|
#if HAVE_ARMV7
|
|
|
|
extern void vp8_yv12_copy_frame_yonly_no_extend_frame_borders_neon(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if CONFIG_RUNTIME_CPU_DETECT
|
|
|
|
#define IF_RTCD(x) (x)
|
|
|
|
#else
|
|
|
|
#define IF_RTCD(x) NULL
|
|
|
|
#endif
|
|
|
|
|
|
|
|
extern void
|
|
|
|
(*vp8_yv12_copy_partial_frame_ptr)(YV12_BUFFER_CONFIG *src_ybc,
|
|
|
|
YV12_BUFFER_CONFIG *dst_ybc,
|
|
|
|
int Fraction);
|
|
|
|
void
|
|
|
|
vp8_yv12_copy_partial_frame(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc, int Fraction)
|
|
|
|
{
|
|
|
|
unsigned char *src_y, *dst_y;
|
|
|
|
int yheight;
|
|
|
|
int ystride;
|
|
|
|
int border;
|
|
|
|
int yoffset;
|
|
|
|
int linestocopy;
|
|
|
|
|
|
|
|
border = src_ybc->border;
|
|
|
|
yheight = src_ybc->y_height;
|
|
|
|
ystride = src_ybc->y_stride;
|
|
|
|
|
|
|
|
linestocopy = (yheight >> (Fraction + 4));
|
|
|
|
|
|
|
|
if (linestocopy < 1)
|
|
|
|
linestocopy = 1;
|
|
|
|
|
|
|
|
linestocopy <<= 4;
|
|
|
|
|
|
|
|
yoffset = ystride * ((yheight >> 5) * 16 - 8);
|
|
|
|
src_y = src_ybc->y_buffer + yoffset;
|
|
|
|
dst_y = dst_ybc->y_buffer + yoffset;
|
|
|
|
|
|
|
|
vpx_memcpy(dst_y, src_y, ystride *(linestocopy + 16));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vp8_calc_partial_ssl_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, int Fraction, const vp8_variance_rtcd_vtable_t *rtcd)
|
|
|
|
{
|
|
|
|
int i, j;
|
|
|
|
int Total = 0;
|
|
|
|
int srcoffset, dstoffset;
|
|
|
|
unsigned char *src = source->y_buffer;
|
|
|
|
unsigned char *dst = dest->y_buffer;
|
|
|
|
|
|
|
|
int linestocopy = (source->y_height >> (Fraction + 4));
|
|
|
|
(void)rtcd;
|
|
|
|
|
|
|
|
if (linestocopy < 1)
|
|
|
|
linestocopy = 1;
|
|
|
|
|
|
|
|
linestocopy <<= 4;
|
|
|
|
|
|
|
|
|
|
|
|
srcoffset = source->y_stride * (dest->y_height >> 5) * 16;
|
|
|
|
dstoffset = dest->y_stride * (dest->y_height >> 5) * 16;
|
|
|
|
|
|
|
|
src += srcoffset;
|
|
|
|
dst += dstoffset;
|
|
|
|
|
|
|
|
// Loop through the Y plane raw and reconstruction data summing (square differences)
|
|
|
|
for (i = 0; i < linestocopy; i += 16)
|
|
|
|
{
|
|
|
|
for (j = 0; j < source->y_width; j += 16)
|
|
|
|
{
|
|
|
|
unsigned int sse;
|
|
|
|
Total += VARIANCE_INVOKE(rtcd, mse16x16)(src + j, source->y_stride, dst + j, dest->y_stride, &sse);
|
|
|
|
}
|
|
|
|
|
|
|
|
src += 16 * source->y_stride;
|
|
|
|
dst += 16 * dest->y_stride;
|
|
|
|
}
|
|
|
|
|
|
|
|
return Total;
|
|
|
|
}
|
|
|
|
|
|
|
|
extern void vp8_loop_filter_partial_frame
|
|
|
|
(
|
|
|
|
VP8_COMMON *cm,
|
|
|
|
MACROBLOCKD *mbd,
|
|
|
|
int default_filt_lvl,
|
|
|
|
int sharpness_lvl,
|
|
|
|
int Fraction
|
|
|
|
);
|
|
|
|
|
|
|
|
// Enforce a minimum filter level based upon baseline Q
|
|
|
|
static int get_min_filter_level(VP8_COMP *cpi, int base_qindex)
|
|
|
|
{
|
|
|
|
int min_filter_level;
|
|
|
|
|
|
|
|
if (cpi->source_alt_ref_active && cpi->common.refresh_golden_frame && !cpi->common.refresh_alt_ref_frame)
|
|
|
|
min_filter_level = 0;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (base_qindex <= 6)
|
|
|
|
min_filter_level = 0;
|
|
|
|
else if (base_qindex <= 16)
|
|
|
|
min_filter_level = 1;
|
|
|
|
else
|
|
|
|
min_filter_level = (base_qindex / 8);
|
|
|
|
}
|
|
|
|
|
|
|
|
return min_filter_level;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Enforce a maximum filter level based upon baseline Q
|
|
|
|
static int get_max_filter_level(VP8_COMP *cpi, int base_qindex)
|
|
|
|
{
|
|
|
|
// PGW August 2006: Highest filter values almost always a bad idea
|
|
|
|
|
|
|
|
// jbb chg: 20100118 - not so any more with this overquant stuff allow high values
|
|
|
|
// with lots of intra coming in.
|
|
|
|
int max_filter_level = MAX_LOOP_FILTER ;//* 3 / 4;
|
|
|
|
|
|
|
|
if (cpi->section_intra_rating > 8)
|
|
|
|
max_filter_level = MAX_LOOP_FILTER * 3 / 4;
|
|
|
|
|
|
|
|
(void) cpi;
|
|
|
|
(void) base_qindex;
|
|
|
|
|
|
|
|
return max_filter_level;
|
|
|
|
}
|
|
|
|
|
|
|
|
void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
|
|
|
|
{
|
|
|
|
VP8_COMMON *cm = &cpi->common;
|
|
|
|
|
|
|
|
int best_err = 0;
|
|
|
|
int filt_err = 0;
|
|
|
|
int min_filter_level = 0;
|
|
|
|
int max_filter_level = MAX_LOOP_FILTER * 3 / 4; // PGW August 2006: Highest filter values almost always a bad idea
|
|
|
|
int filt_val;
|
|
|
|
int best_filt_val = cm->filter_level;
|
|
|
|
|
|
|
|
// Make a copy of the unfiltered / processed recon buffer
|
|
|
|
//vp8_yv12_copy_frame_ptr( cm->frame_to_show, &cpi->last_frame_uf );
|
|
|
|
vp8_yv12_copy_partial_frame_ptr(cm->frame_to_show, &cpi->last_frame_uf, 3);
|
|
|
|
|
|
|
|
if (cm->frame_type == KEY_FRAME)
|
|
|
|
cm->sharpness_level = 0;
|
|
|
|
else
|
|
|
|
cm->sharpness_level = cpi->oxcf.Sharpness;
|
|
|
|
|
|
|
|
// Enforce a minimum filter level based upon Q
|
|
|
|
min_filter_level = get_min_filter_level(cpi, cm->base_qindex);
|
|
|
|
max_filter_level = get_max_filter_level(cpi, cm->base_qindex);
|
|
|
|
|
|
|
|
// Start the search at the previous frame filter level unless it is now out of range.
|
|
|
|
if (cm->filter_level < min_filter_level)
|
|
|
|
cm->filter_level = min_filter_level;
|
|
|
|
else if (cm->filter_level > max_filter_level)
|
|
|
|
cm->filter_level = max_filter_level;
|
|
|
|
|
|
|
|
filt_val = cm->filter_level;
|
|
|
|
best_filt_val = filt_val;
|
|
|
|
|
|
|
|
// Set up alternate filter values
|
|
|
|
|
|
|
|
// Get the err using the previous frame's filter value.
|
|
|
|
vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val, 0 , 3);
|
|
|
|
cm->last_frame_type = cm->frame_type;
|
|
|
|
cm->last_filter_type = cm->filter_type;
|
|
|
|
cm->last_sharpness_level = cm->sharpness_level;
|
|
|
|
|
|
|
|
best_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3, IF_RTCD(&cpi->rtcd.variance));
|
|
|
|
|
|
|
|
// Re-instate the unfiltered frame
|
|
|
|
vp8_yv12_copy_partial_frame_ptr(&cpi->last_frame_uf, cm->frame_to_show, 3);
|
|
|
|
|
|
|
|
filt_val -= (1 + ((filt_val > 10) ? 1 : 0));
|
|
|
|
|
|
|
|
// Search lower filter levels
|
|
|
|
while (filt_val >= min_filter_level)
|
|
|
|
{
|
|
|
|
// Apply the loop filter
|
|
|
|
vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val, 0, 3);
|
|
|
|
cm->last_frame_type = cm->frame_type;
|
|
|
|
cm->last_filter_type = cm->filter_type;
|
|
|
|
cm->last_sharpness_level = cm->sharpness_level;
|
|
|
|
|
|
|
|
// Get the err for filtered frame
|
|
|
|
filt_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3, IF_RTCD(&cpi->rtcd.variance));
|
|
|
|
|
|
|
|
|
|
|
|
// Re-instate the unfiltered frame
|
|
|
|
vp8_yv12_copy_partial_frame_ptr(&cpi->last_frame_uf, cm->frame_to_show, 3);
|
|
|
|
|
|
|
|
|
|
|
|
// Update the best case record or exit loop.
|
|
|
|
if (filt_err < best_err)
|
|
|
|
{
|
|
|
|
best_err = filt_err;
|
|
|
|
best_filt_val = filt_val;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
|
|
|
|
// Adjust filter level
|
|
|
|
filt_val -= (1 + ((filt_val > 10) ? 1 : 0));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Search up (note that we have already done filt_val = cm->filter_level)
|
|
|
|
filt_val = cm->filter_level + (1 + ((filt_val > 10) ? 1 : 0));
|
|
|
|
|
|
|
|
if (best_filt_val == cm->filter_level)
|
|
|
|
{
|
|
|
|
// Resist raising filter level for very small gains
|
|
|
|
best_err -= (best_err >> 10);
|
|
|
|
|
|
|
|
while (filt_val < max_filter_level)
|
|
|
|
{
|
|
|
|
// Apply the loop filter
|
|
|
|
vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val, 0, 3);
|
|
|
|
cm->last_frame_type = cm->frame_type;
|
|
|
|
cm->last_filter_type = cm->filter_type;
|
|
|
|
cm->last_sharpness_level = cm->sharpness_level;
|
|
|
|
|
|
|
|
// Get the err for filtered frame
|
|
|
|
filt_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3, IF_RTCD(&cpi->rtcd.variance));
|
|
|
|
|
|
|
|
// Re-instate the unfiltered frame
|
|
|
|
vp8_yv12_copy_partial_frame_ptr(&cpi->last_frame_uf, cm->frame_to_show, 3);
|
|
|
|
|
|
|
|
// Update the best case record or exit loop.
|
|
|
|
if (filt_err < best_err)
|
|
|
|
{
|
|
|
|
// Do not raise filter level if improvement is < 1 part in 4096
|
|
|
|
best_err = filt_err - (filt_err >> 10);
|
|
|
|
|
|
|
|
best_filt_val = filt_val;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
|
|
|
|
// Adjust filter level
|
|
|
|
filt_val += (1 + ((filt_val > 10) ? 1 : 0));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cm->filter_level = best_filt_val;
|
|
|
|
|
|
|
|
if (cm->filter_level < min_filter_level)
|
|
|
|
cm->filter_level = min_filter_level;
|
|
|
|
|
|
|
|
if (cm->filter_level > max_filter_level)
|
|
|
|
cm->filter_level = max_filter_level;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stub function for now Alt LF not used
|
|
|
|
void vp8cx_set_alt_lf_level(VP8_COMP *cpi, int filt_val)
|
|
|
|
{
|
|
|
|
MACROBLOCKD *mbd = &cpi->mb.e_mbd;
|
|
|
|
(void) filt_val;
|
|
|
|
|
|
|
|
mbd->segment_feature_data[MB_LVL_ALT_LF][0] = cpi->segment_feature_data[MB_LVL_ALT_LF][0];
|
|
|
|
mbd->segment_feature_data[MB_LVL_ALT_LF][1] = cpi->segment_feature_data[MB_LVL_ALT_LF][1];
|
|
|
|
mbd->segment_feature_data[MB_LVL_ALT_LF][2] = cpi->segment_feature_data[MB_LVL_ALT_LF][2];
|
|
|
|
mbd->segment_feature_data[MB_LVL_ALT_LF][3] = cpi->segment_feature_data[MB_LVL_ALT_LF][3];
|
|
|
|
}
|
|
|
|
|
|
|
|
void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
|
|
|
|
{
|
|
|
|
VP8_COMMON *cm = &cpi->common;
|
|
|
|
|
|
|
|
int best_err = 0;
|
|
|
|
int filt_err = 0;
|
|
|
|
int min_filter_level;
|
|
|
|
int max_filter_level;
|
|
|
|
|
|
|
|
int filter_step;
|
|
|
|
int filt_high = 0;
|
|
|
|
int filt_mid = cm->filter_level; // Start search at previous frame filter level
|
|
|
|
int filt_low = 0;
|
|
|
|
int filt_best;
|
|
|
|
int filt_direction = 0;
|
|
|
|
|
|
|
|
int Bias = 0; // Bias against raising loop filter and in favour of lowering it
|
|
|
|
|
|
|
|
// Make a copy of the unfiltered / processed recon buffer
|
|
|
|
#if HAVE_ARMV7
|
Add runtime CPU detection support for ARM.
The primary goal is to allow a binary to be built which supports
NEON, but can fall back to non-NEON routines, since some Android
devices do not have NEON, even if they are otherwise ARMv7 (e.g.,
Tegra).
The configure-generated flags HAVE_ARMV7, etc., are used to decide
which versions of each function to build, and when
CONFIG_RUNTIME_CPU_DETECT is enabled, the correct version is chosen
at run time.
In order for this to work, the CFLAGS must be set to something
appropriate (e.g., without -mfpu=neon for ARMv7, and with
appropriate -march and -mcpu for even earlier configurations), or
the native C code will not be able to run.
The ASFLAGS must remain set for the most advanced instruction set
required at build time, since the ARM assembler will refuse to emit
them otherwise.
I have not attempted to make any changes to configure to do this
automatically.
Doing so will probably require the addition of new configure options.
Many of the hooks for RTCD on ARM were already there, but a lot of
the code had bit-rotted, and a good deal of the ARM-specific code
is not integrated into the RTCD structs at all.
I did not try to resolve the latter, merely to add the minimal amount
of protection around them to allow RTCD to work.
Those functions that were called based on an ifdef at the calling
site were expanded to check the RTCD flags at that site, but they
should be added to an RTCD struct somewhere in the future.
The functions invoked with global function pointers still are, but
these should be moved into an RTCD struct for thread safety (I
believe every platform currently supported has atomic pointer
stores, but this is not guaranteed).
The encoder's boolhuff functions did not even have _c and armv7
suffixes, and the correct version was resolved at link time.
The token packing functions did have appropriate suffixes, but the
version was selected with a define, with no associated RTCD struct.
However, for both of these, the only armv7 instruction they actually
used was rbit, and this was completely superfluous, so I reworked
them to avoid it.
The only non-ARMv4 instruction remaining in them is clz, which is
ARMv5 (not even ARMv5TE is required).
Considering that there are no ARM-specific configs which are not at
least ARMv5TE, I did not try to detect these at runtime, and simply
enable them for ARMv5 and above.
Finally, the NEON register saving code was completely non-reentrant,
since it saved the registers to a global, static variable.
I moved the storage for this onto the stack.
A single binary built with this code was tested on an ARM11 (ARMv6)
and a Cortex A8 (ARMv7 w/NEON), for both the encoder and decoder,
and produced identical output, while using the correct accelerated
functions on each.
I did not test on any earlier processors.
Change-Id: I45cbd63a614f4554c3b325c45d46c0806f009eaa
2010-10-21 00:39:11 +02:00
|
|
|
#if CONFIG_RUNTIME_CPU_DETECT
|
|
|
|
if (cm->rtcd.flags & HAS_NEON)
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
vp8_yv12_copy_frame_yonly_no_extend_frame_borders_neon(cm->frame_to_show, &cpi->last_frame_uf);
|
|
|
|
}
|
|
|
|
#if CONFIG_RUNTIME_CPU_DETECT
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
#if !HAVE_ARMV7 || CONFIG_RUNTIME_CPU_DETECT
|
|
|
|
{
|
|
|
|
vp8_yv12_copy_frame_ptr(cm->frame_to_show, &cpi->last_frame_uf);
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
#endif
|
|
|
|
|
|
|
|
if (cm->frame_type == KEY_FRAME)
|
|
|
|
cm->sharpness_level = 0;
|
|
|
|
else
|
|
|
|
cm->sharpness_level = cpi->oxcf.Sharpness;
|
|
|
|
|
|
|
|
// Enforce a minimum filter level based upon Q
|
|
|
|
min_filter_level = get_min_filter_level(cpi, cm->base_qindex);
|
|
|
|
max_filter_level = get_max_filter_level(cpi, cm->base_qindex);
|
|
|
|
|
|
|
|
// Start the search at the previous frame filter level unless it is now out of range.
|
|
|
|
filt_mid = cm->filter_level;
|
|
|
|
|
|
|
|
if (filt_mid < min_filter_level)
|
|
|
|
filt_mid = min_filter_level;
|
|
|
|
else if (filt_mid > max_filter_level)
|
|
|
|
filt_mid = max_filter_level;
|
|
|
|
|
|
|
|
// Define the initial step size
|
|
|
|
filter_step = (filt_mid < 16) ? 4 : filt_mid / 4;
|
|
|
|
|
|
|
|
// Get baseline error score
|
|
|
|
vp8cx_set_alt_lf_level(cpi, filt_mid);
|
|
|
|
vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_mid, 0);
|
|
|
|
cm->last_frame_type = cm->frame_type;
|
|
|
|
cm->last_filter_type = cm->filter_type;
|
|
|
|
cm->last_sharpness_level = cm->sharpness_level;
|
|
|
|
|
|
|
|
best_err = vp8_calc_ss_err(sd, cm->frame_to_show, IF_RTCD(&cpi->rtcd.variance));
|
|
|
|
filt_best = filt_mid;
|
|
|
|
|
|
|
|
// Re-instate the unfiltered frame
|
|
|
|
#if HAVE_ARMV7
|
Add runtime CPU detection support for ARM.
The primary goal is to allow a binary to be built which supports
NEON, but can fall back to non-NEON routines, since some Android
devices do not have NEON, even if they are otherwise ARMv7 (e.g.,
Tegra).
The configure-generated flags HAVE_ARMV7, etc., are used to decide
which versions of each function to build, and when
CONFIG_RUNTIME_CPU_DETECT is enabled, the correct version is chosen
at run time.
In order for this to work, the CFLAGS must be set to something
appropriate (e.g., without -mfpu=neon for ARMv7, and with
appropriate -march and -mcpu for even earlier configurations), or
the native C code will not be able to run.
The ASFLAGS must remain set for the most advanced instruction set
required at build time, since the ARM assembler will refuse to emit
them otherwise.
I have not attempted to make any changes to configure to do this
automatically.
Doing so will probably require the addition of new configure options.
Many of the hooks for RTCD on ARM were already there, but a lot of
the code had bit-rotted, and a good deal of the ARM-specific code
is not integrated into the RTCD structs at all.
I did not try to resolve the latter, merely to add the minimal amount
of protection around them to allow RTCD to work.
Those functions that were called based on an ifdef at the calling
site were expanded to check the RTCD flags at that site, but they
should be added to an RTCD struct somewhere in the future.
The functions invoked with global function pointers still are, but
these should be moved into an RTCD struct for thread safety (I
believe every platform currently supported has atomic pointer
stores, but this is not guaranteed).
The encoder's boolhuff functions did not even have _c and armv7
suffixes, and the correct version was resolved at link time.
The token packing functions did have appropriate suffixes, but the
version was selected with a define, with no associated RTCD struct.
However, for both of these, the only armv7 instruction they actually
used was rbit, and this was completely superfluous, so I reworked
them to avoid it.
The only non-ARMv4 instruction remaining in them is clz, which is
ARMv5 (not even ARMv5TE is required).
Considering that there are no ARM-specific configs which are not at
least ARMv5TE, I did not try to detect these at runtime, and simply
enable them for ARMv5 and above.
Finally, the NEON register saving code was completely non-reentrant,
since it saved the registers to a global, static variable.
I moved the storage for this onto the stack.
A single binary built with this code was tested on an ARM11 (ARMv6)
and a Cortex A8 (ARMv7 w/NEON), for both the encoder and decoder,
and produced identical output, while using the correct accelerated
functions on each.
I did not test on any earlier processors.
Change-Id: I45cbd63a614f4554c3b325c45d46c0806f009eaa
2010-10-21 00:39:11 +02:00
|
|
|
#if CONFIG_RUNTIME_CPU_DETECT
|
|
|
|
if (cm->rtcd.flags & HAS_NEON)
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
vp8_yv12_copy_frame_yonly_no_extend_frame_borders_neon(&cpi->last_frame_uf, cm->frame_to_show);
|
|
|
|
}
|
|
|
|
#if CONFIG_RUNTIME_CPU_DETECT
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
#if !HAVE_ARMV7 || CONFIG_RUNTIME_CPU_DETECT
|
|
|
|
{
|
|
|
|
vp8_yv12_copy_frame_yonly_ptr(&cpi->last_frame_uf, cm->frame_to_show);
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
#endif
|
|
|
|
|
|
|
|
while (filter_step > 0)
|
|
|
|
{
|
|
|
|
Bias = (best_err >> (15 - (filt_mid / 8))) * filter_step; //PGW change 12/12/06 for small images
|
|
|
|
|
|
|
|
// jbb chg: 20100118 - in sections with lots of new material coming in don't bias as much to a low filter value
|
|
|
|
if (cpi->section_intra_rating < 20)
|
|
|
|
Bias = Bias * cpi->section_intra_rating / 20;
|
|
|
|
|
|
|
|
filt_high = ((filt_mid + filter_step) > max_filter_level) ? max_filter_level : (filt_mid + filter_step);
|
|
|
|
filt_low = ((filt_mid - filter_step) < min_filter_level) ? min_filter_level : (filt_mid - filter_step);
|
|
|
|
|
|
|
|
if ((filt_direction <= 0) && (filt_low != filt_mid))
|
|
|
|
{
|
|
|
|
// Get Low filter error score
|
|
|
|
vp8cx_set_alt_lf_level(cpi, filt_low);
|
|
|
|
vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_low, 0);
|
|
|
|
cm->last_frame_type = cm->frame_type;
|
|
|
|
cm->last_filter_type = cm->filter_type;
|
|
|
|
cm->last_sharpness_level = cm->sharpness_level;
|
|
|
|
|
|
|
|
filt_err = vp8_calc_ss_err(sd, cm->frame_to_show, IF_RTCD(&cpi->rtcd.variance));
|
|
|
|
|
|
|
|
// Re-instate the unfiltered frame
|
|
|
|
#if HAVE_ARMV7
|
Add runtime CPU detection support for ARM.
The primary goal is to allow a binary to be built which supports
NEON, but can fall back to non-NEON routines, since some Android
devices do not have NEON, even if they are otherwise ARMv7 (e.g.,
Tegra).
The configure-generated flags HAVE_ARMV7, etc., are used to decide
which versions of each function to build, and when
CONFIG_RUNTIME_CPU_DETECT is enabled, the correct version is chosen
at run time.
In order for this to work, the CFLAGS must be set to something
appropriate (e.g., without -mfpu=neon for ARMv7, and with
appropriate -march and -mcpu for even earlier configurations), or
the native C code will not be able to run.
The ASFLAGS must remain set for the most advanced instruction set
required at build time, since the ARM assembler will refuse to emit
them otherwise.
I have not attempted to make any changes to configure to do this
automatically.
Doing so will probably require the addition of new configure options.
Many of the hooks for RTCD on ARM were already there, but a lot of
the code had bit-rotted, and a good deal of the ARM-specific code
is not integrated into the RTCD structs at all.
I did not try to resolve the latter, merely to add the minimal amount
of protection around them to allow RTCD to work.
Those functions that were called based on an ifdef at the calling
site were expanded to check the RTCD flags at that site, but they
should be added to an RTCD struct somewhere in the future.
The functions invoked with global function pointers still are, but
these should be moved into an RTCD struct for thread safety (I
believe every platform currently supported has atomic pointer
stores, but this is not guaranteed).
The encoder's boolhuff functions did not even have _c and armv7
suffixes, and the correct version was resolved at link time.
The token packing functions did have appropriate suffixes, but the
version was selected with a define, with no associated RTCD struct.
However, for both of these, the only armv7 instruction they actually
used was rbit, and this was completely superfluous, so I reworked
them to avoid it.
The only non-ARMv4 instruction remaining in them is clz, which is
ARMv5 (not even ARMv5TE is required).
Considering that there are no ARM-specific configs which are not at
least ARMv5TE, I did not try to detect these at runtime, and simply
enable them for ARMv5 and above.
Finally, the NEON register saving code was completely non-reentrant,
since it saved the registers to a global, static variable.
I moved the storage for this onto the stack.
A single binary built with this code was tested on an ARM11 (ARMv6)
and a Cortex A8 (ARMv7 w/NEON), for both the encoder and decoder,
and produced identical output, while using the correct accelerated
functions on each.
I did not test on any earlier processors.
Change-Id: I45cbd63a614f4554c3b325c45d46c0806f009eaa
2010-10-21 00:39:11 +02:00
|
|
|
#if CONFIG_RUNTIME_CPU_DETECT
|
|
|
|
if (cm->rtcd.flags & HAS_NEON)
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
vp8_yv12_copy_frame_yonly_no_extend_frame_borders_neon(&cpi->last_frame_uf, cm->frame_to_show);
|
|
|
|
}
|
|
|
|
#if CONFIG_RUNTIME_CPU_DETECT
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
#if !HAVE_ARMV7 || CONFIG_RUNTIME_CPU_DETECT
|
|
|
|
{
|
|
|
|
vp8_yv12_copy_frame_yonly_ptr(&cpi->last_frame_uf, cm->frame_to_show);
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
#endif
|
|
|
|
|
|
|
|
// If value is close to the best so far then bias towards a lower loop filter value.
|
|
|
|
if ((filt_err - Bias) < best_err)
|
|
|
|
{
|
|
|
|
// Was it actually better than the previous best?
|
|
|
|
if (filt_err < best_err)
|
|
|
|
best_err = filt_err;
|
|
|
|
|
|
|
|
filt_best = filt_low;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now look at filt_high
|
|
|
|
if ((filt_direction >= 0) && (filt_high != filt_mid))
|
|
|
|
{
|
|
|
|
vp8cx_set_alt_lf_level(cpi, filt_high);
|
|
|
|
vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_high, 0);
|
|
|
|
cm->last_frame_type = cm->frame_type;
|
|
|
|
cm->last_filter_type = cm->filter_type;
|
|
|
|
cm->last_sharpness_level = cm->sharpness_level;
|
|
|
|
|
|
|
|
filt_err = vp8_calc_ss_err(sd, cm->frame_to_show, IF_RTCD(&cpi->rtcd.variance));
|
|
|
|
|
|
|
|
// Re-instate the unfiltered frame
|
|
|
|
#if HAVE_ARMV7
|
Add runtime CPU detection support for ARM.
The primary goal is to allow a binary to be built which supports
NEON, but can fall back to non-NEON routines, since some Android
devices do not have NEON, even if they are otherwise ARMv7 (e.g.,
Tegra).
The configure-generated flags HAVE_ARMV7, etc., are used to decide
which versions of each function to build, and when
CONFIG_RUNTIME_CPU_DETECT is enabled, the correct version is chosen
at run time.
In order for this to work, the CFLAGS must be set to something
appropriate (e.g., without -mfpu=neon for ARMv7, and with
appropriate -march and -mcpu for even earlier configurations), or
the native C code will not be able to run.
The ASFLAGS must remain set for the most advanced instruction set
required at build time, since the ARM assembler will refuse to emit
them otherwise.
I have not attempted to make any changes to configure to do this
automatically.
Doing so will probably require the addition of new configure options.
Many of the hooks for RTCD on ARM were already there, but a lot of
the code had bit-rotted, and a good deal of the ARM-specific code
is not integrated into the RTCD structs at all.
I did not try to resolve the latter, merely to add the minimal amount
of protection around them to allow RTCD to work.
Those functions that were called based on an ifdef at the calling
site were expanded to check the RTCD flags at that site, but they
should be added to an RTCD struct somewhere in the future.
The functions invoked with global function pointers still are, but
these should be moved into an RTCD struct for thread safety (I
believe every platform currently supported has atomic pointer
stores, but this is not guaranteed).
The encoder's boolhuff functions did not even have _c and armv7
suffixes, and the correct version was resolved at link time.
The token packing functions did have appropriate suffixes, but the
version was selected with a define, with no associated RTCD struct.
However, for both of these, the only armv7 instruction they actually
used was rbit, and this was completely superfluous, so I reworked
them to avoid it.
The only non-ARMv4 instruction remaining in them is clz, which is
ARMv5 (not even ARMv5TE is required).
Considering that there are no ARM-specific configs which are not at
least ARMv5TE, I did not try to detect these at runtime, and simply
enable them for ARMv5 and above.
Finally, the NEON register saving code was completely non-reentrant,
since it saved the registers to a global, static variable.
I moved the storage for this onto the stack.
A single binary built with this code was tested on an ARM11 (ARMv6)
and a Cortex A8 (ARMv7 w/NEON), for both the encoder and decoder,
and produced identical output, while using the correct accelerated
functions on each.
I did not test on any earlier processors.
Change-Id: I45cbd63a614f4554c3b325c45d46c0806f009eaa
2010-10-21 00:39:11 +02:00
|
|
|
#if CONFIG_RUNTIME_CPU_DETECT
|
|
|
|
if (cm->rtcd.flags & HAS_NEON)
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
vp8_yv12_copy_frame_yonly_no_extend_frame_borders_neon(&cpi->last_frame_uf, cm->frame_to_show);
|
|
|
|
}
|
|
|
|
#if CONFIG_RUNTIME_CPU_DETECT
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
#if !HAVE_ARMV7 || CONFIG_RUNTIME_CPU_DETECT
|
|
|
|
{
|
|
|
|
vp8_yv12_copy_frame_yonly_ptr(&cpi->last_frame_uf, cm->frame_to_show);
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
#endif
|
|
|
|
|
|
|
|
// Was it better than the previous best?
|
|
|
|
if (filt_err < (best_err - Bias))
|
|
|
|
{
|
|
|
|
best_err = filt_err;
|
|
|
|
filt_best = filt_high;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Half the step distance if the best filter value was the same as last time
|
|
|
|
if (filt_best == filt_mid)
|
|
|
|
{
|
|
|
|
filter_step = filter_step / 2;
|
|
|
|
filt_direction = 0;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
filt_direction = (filt_best < filt_mid) ? -1 : 1;
|
|
|
|
filt_mid = filt_best;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cm->filter_level = filt_best;
|
|
|
|
cpi->last_auto_filt_val = filt_best;
|
|
|
|
cpi->last_auto_filt_q = cm->base_qindex;
|
|
|
|
|
|
|
|
cpi->frames_since_auto_filter = 0;
|
|
|
|
}
|