Nextgen branch cleanup: remove svc code

Spatial/temporal svc code was removed.

Change-Id: Ie25c7a58ee5feb662d4de526406d8cd834d19977
This commit is contained in:
Yunqing Wang 2015-07-06 14:39:03 -07:00
parent 020293588d
commit ca42973ea2
31 changed files with 82 additions and 4867 deletions

1
configure vendored
View File

@ -278,7 +278,6 @@ HAVE_LIST="
unistd_h
"
EXPERIMENT_LIST="
spatial_svc
vp9_temporal_denoising
fp_mb_stats
emulate_hardware

View File

@ -91,29 +91,11 @@ ifeq ($(CONFIG_WEBM_IO),yes)
endif
vpxenc.GUID = 548DEC74-7A15-4B2B-AFC3-AA102E7C25C1
vpxenc.DESCRIPTION = Full featured encoder
ifeq ($(CONFIG_SPATIAL_SVC),yes)
EXAMPLES-$(CONFIG_VP9_ENCODER) += vp9_spatial_svc_encoder.c
vp9_spatial_svc_encoder.SRCS += args.c args.h
vp9_spatial_svc_encoder.SRCS += ivfenc.c ivfenc.h
vp9_spatial_svc_encoder.SRCS += tools_common.c tools_common.h
vp9_spatial_svc_encoder.SRCS += video_common.h
vp9_spatial_svc_encoder.SRCS += video_writer.h video_writer.c
vp9_spatial_svc_encoder.SRCS += vpxstats.c vpxstats.h
vp9_spatial_svc_encoder.GUID = 4A38598D-627D-4505-9C7B-D4020C84100D
vp9_spatial_svc_encoder.DESCRIPTION = VP9 Spatial SVC Encoder
endif
ifneq ($(CONFIG_SHARED),yes)
EXAMPLES-$(CONFIG_VP9_ENCODER) += resize_util.c
endif
EXAMPLES-$(CONFIG_ENCODERS) += vpx_temporal_svc_encoder.c
vpx_temporal_svc_encoder.SRCS += ivfenc.c ivfenc.h
vpx_temporal_svc_encoder.SRCS += tools_common.c tools_common.h
vpx_temporal_svc_encoder.SRCS += video_common.h
vpx_temporal_svc_encoder.SRCS += video_writer.h video_writer.c
vpx_temporal_svc_encoder.GUID = B18C08F2-A439-4502-A78E-849BE3D60947
vpx_temporal_svc_encoder.DESCRIPTION = Temporal SVC Encoder
EXAMPLES-$(CONFIG_DECODERS) += simple_decoder.c
simple_decoder.GUID = D3BBF1E9-2427-450D-BBFF-B2843C1D44CC
simple_decoder.SRCS += ivfdec.h ivfdec.c

View File

@ -1,439 +0,0 @@
/*
* Copyright (c) 2012 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* This is an example demonstrating how to implement a multi-layer
* VP9 encoding scheme based on spatial scalability for video applications
* that benefit from a scalable bitstream.
*/
#include <stdarg.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "./args.h"
#include "./tools_common.h"
#include "./video_writer.h"
#include "vpx/svc_context.h"
#include "vpx/vp8cx.h"
#include "vpx/vpx_encoder.h"
#include "./vpxstats.h"
static const arg_def_t skip_frames_arg =
ARG_DEF("s", "skip-frames", 1, "input frames to skip");
static const arg_def_t frames_arg =
ARG_DEF("f", "frames", 1, "number of frames to encode");
static const arg_def_t width_arg = ARG_DEF("w", "width", 1, "source width");
static const arg_def_t height_arg = ARG_DEF("h", "height", 1, "source height");
static const arg_def_t timebase_arg =
ARG_DEF("t", "timebase", 1, "timebase (num/den)");
static const arg_def_t bitrate_arg = ARG_DEF(
"b", "target-bitrate", 1, "encoding bitrate, in kilobits per second");
static const arg_def_t spatial_layers_arg =
ARG_DEF("sl", "spatial-layers", 1, "number of spatial SVC layers");
static const arg_def_t temporal_layers_arg =
ARG_DEF("tl", "temporal-layers", 1, "number of temporal SVC layers");
static const arg_def_t kf_dist_arg =
ARG_DEF("k", "kf-dist", 1, "number of frames between keyframes");
static const arg_def_t scale_factors_arg =
ARG_DEF("r", "scale-factors", 1, "scale factors (lowest to highest layer)");
static const arg_def_t passes_arg =
ARG_DEF("p", "passes", 1, "Number of passes (1/2)");
static const arg_def_t pass_arg =
ARG_DEF(NULL, "pass", 1, "Pass to execute (1/2)");
static const arg_def_t fpf_name_arg =
ARG_DEF(NULL, "fpf", 1, "First pass statistics file name");
static const arg_def_t min_q_arg =
ARG_DEF(NULL, "min-q", 1, "Minimum quantizer");
static const arg_def_t max_q_arg =
ARG_DEF(NULL, "max-q", 1, "Maximum quantizer");
static const arg_def_t min_bitrate_arg =
ARG_DEF(NULL, "min-bitrate", 1, "Minimum bitrate");
static const arg_def_t max_bitrate_arg =
ARG_DEF(NULL, "max-bitrate", 1, "Maximum bitrate");
#if CONFIG_VP9_HIGHBITDEPTH
static const struct arg_enum_list bitdepth_enum[] = {
{"8", VPX_BITS_8},
{"10", VPX_BITS_10},
{"12", VPX_BITS_12},
{NULL, 0}
};
static const arg_def_t bitdepth_arg =
ARG_DEF_ENUM("d", "bit-depth", 1, "Bit depth for codec 8, 10 or 12. ",
bitdepth_enum);
#endif // CONFIG_VP9_HIGHBITDEPTH
static const arg_def_t *svc_args[] = {
&frames_arg, &width_arg, &height_arg,
&timebase_arg, &bitrate_arg, &skip_frames_arg, &spatial_layers_arg,
&kf_dist_arg, &scale_factors_arg, &passes_arg, &pass_arg,
&fpf_name_arg, &min_q_arg, &max_q_arg, &min_bitrate_arg,
&max_bitrate_arg, &temporal_layers_arg,
#if CONFIG_VP9_HIGHBITDEPTH
&bitdepth_arg,
#endif
NULL
};
static const uint32_t default_frames_to_skip = 0;
static const uint32_t default_frames_to_code = 60 * 60;
static const uint32_t default_width = 1920;
static const uint32_t default_height = 1080;
static const uint32_t default_timebase_num = 1;
static const uint32_t default_timebase_den = 60;
static const uint32_t default_bitrate = 1000;
static const uint32_t default_spatial_layers = 5;
static const uint32_t default_temporal_layers = 1;
static const uint32_t default_kf_dist = 100;
typedef struct {
const char *input_filename;
const char *output_filename;
uint32_t frames_to_code;
uint32_t frames_to_skip;
struct VpxInputContext input_ctx;
stats_io_t rc_stats;
int passes;
int pass;
} AppInput;
static const char *exec_name;
void usage_exit() {
fprintf(stderr, "Usage: %s <options> input_filename output_filename\n",
exec_name);
fprintf(stderr, "Options:\n");
arg_show_usage(stderr, svc_args);
exit(EXIT_FAILURE);
}
static void parse_command_line(int argc, const char **argv_,
AppInput *app_input, SvcContext *svc_ctx,
vpx_codec_enc_cfg_t *enc_cfg) {
struct arg arg = {0};
char **argv = NULL;
char **argi = NULL;
char **argj = NULL;
vpx_codec_err_t res;
int passes = 0;
int pass = 0;
const char *fpf_file_name = NULL;
unsigned int min_bitrate = 0;
unsigned int max_bitrate = 0;
char string_options[1024] = {0};
// initialize SvcContext with parameters that will be passed to vpx_svc_init
svc_ctx->log_level = SVC_LOG_DEBUG;
svc_ctx->spatial_layers = default_spatial_layers;
svc_ctx->temporal_layers = default_temporal_layers;
// start with default encoder configuration
res = vpx_codec_enc_config_default(vpx_codec_vp9_cx(), enc_cfg, 0);
if (res) {
die("Failed to get config: %s\n", vpx_codec_err_to_string(res));
}
// update enc_cfg with app default values
enc_cfg->g_w = default_width;
enc_cfg->g_h = default_height;
enc_cfg->g_timebase.num = default_timebase_num;
enc_cfg->g_timebase.den = default_timebase_den;
enc_cfg->rc_target_bitrate = default_bitrate;
enc_cfg->kf_min_dist = default_kf_dist;
enc_cfg->kf_max_dist = default_kf_dist;
enc_cfg->rc_end_usage = VPX_CQ;
// initialize AppInput with default values
app_input->frames_to_code = default_frames_to_code;
app_input->frames_to_skip = default_frames_to_skip;
// process command line options
argv = argv_dup(argc - 1, argv_ + 1);
for (argi = argj = argv; (*argj = *argi); argi += arg.argv_step) {
arg.argv_step = 1;
if (arg_match(&arg, &frames_arg, argi)) {
app_input->frames_to_code = arg_parse_uint(&arg);
} else if (arg_match(&arg, &width_arg, argi)) {
enc_cfg->g_w = arg_parse_uint(&arg);
} else if (arg_match(&arg, &height_arg, argi)) {
enc_cfg->g_h = arg_parse_uint(&arg);
} else if (arg_match(&arg, &timebase_arg, argi)) {
enc_cfg->g_timebase = arg_parse_rational(&arg);
} else if (arg_match(&arg, &bitrate_arg, argi)) {
enc_cfg->rc_target_bitrate = arg_parse_uint(&arg);
} else if (arg_match(&arg, &skip_frames_arg, argi)) {
app_input->frames_to_skip = arg_parse_uint(&arg);
} else if (arg_match(&arg, &spatial_layers_arg, argi)) {
svc_ctx->spatial_layers = arg_parse_uint(&arg);
} else if (arg_match(&arg, &temporal_layers_arg, argi)) {
svc_ctx->temporal_layers = arg_parse_uint(&arg);
} else if (arg_match(&arg, &kf_dist_arg, argi)) {
enc_cfg->kf_min_dist = arg_parse_uint(&arg);
enc_cfg->kf_max_dist = enc_cfg->kf_min_dist;
} else if (arg_match(&arg, &scale_factors_arg, argi)) {
snprintf(string_options, sizeof(string_options), "%s scale-factors=%s",
string_options, arg.val);
} else if (arg_match(&arg, &passes_arg, argi)) {
passes = arg_parse_uint(&arg);
if (passes < 1 || passes > 2) {
die("Error: Invalid number of passes (%d)\n", passes);
}
} else if (arg_match(&arg, &pass_arg, argi)) {
pass = arg_parse_uint(&arg);
if (pass < 1 || pass > 2) {
die("Error: Invalid pass selected (%d)\n", pass);
}
} else if (arg_match(&arg, &fpf_name_arg, argi)) {
fpf_file_name = arg.val;
} else if (arg_match(&arg, &min_q_arg, argi)) {
snprintf(string_options, sizeof(string_options), "%s min-quantizers=%s",
string_options, arg.val);
} else if (arg_match(&arg, &max_q_arg, argi)) {
snprintf(string_options, sizeof(string_options), "%s max-quantizers=%s",
string_options, arg.val);
} else if (arg_match(&arg, &min_bitrate_arg, argi)) {
min_bitrate = arg_parse_uint(&arg);
} else if (arg_match(&arg, &max_bitrate_arg, argi)) {
max_bitrate = arg_parse_uint(&arg);
#if CONFIG_VP9_HIGHBITDEPTH
} else if (arg_match(&arg, &bitdepth_arg, argi)) {
enc_cfg->g_bit_depth = arg_parse_enum_or_int(&arg);
switch (enc_cfg->g_bit_depth) {
case VPX_BITS_8:
enc_cfg->g_input_bit_depth = 8;
enc_cfg->g_profile = 0;
break;
case VPX_BITS_10:
enc_cfg->g_input_bit_depth = 10;
enc_cfg->g_profile = 2;
break;
case VPX_BITS_12:
enc_cfg->g_input_bit_depth = 12;
enc_cfg->g_profile = 2;
break;
default:
die("Error: Invalid bit depth selected (%d)\n", enc_cfg->g_bit_depth);
break;
}
#endif // CONFIG_VP9_HIGHBITDEPTH
} else {
++argj;
}
}
// There will be a space in front of the string options
if (strlen(string_options) > 0)
vpx_svc_set_options(svc_ctx, string_options + 1);
if (passes == 0 || passes == 1) {
if (pass) {
fprintf(stderr, "pass is ignored since there's only one pass\n");
}
enc_cfg->g_pass = VPX_RC_ONE_PASS;
} else {
if (pass == 0) {
die("pass must be specified when passes is 2\n");
}
if (fpf_file_name == NULL) {
die("fpf must be specified when passes is 2\n");
}
if (pass == 1) {
enc_cfg->g_pass = VPX_RC_FIRST_PASS;
if (!stats_open_file(&app_input->rc_stats, fpf_file_name, 0)) {
fatal("Failed to open statistics store");
}
} else {
enc_cfg->g_pass = VPX_RC_LAST_PASS;
if (!stats_open_file(&app_input->rc_stats, fpf_file_name, 1)) {
fatal("Failed to open statistics store");
}
enc_cfg->rc_twopass_stats_in = stats_get(&app_input->rc_stats);
}
app_input->passes = passes;
app_input->pass = pass;
}
if (enc_cfg->rc_target_bitrate > 0) {
if (min_bitrate > 0) {
enc_cfg->rc_2pass_vbr_minsection_pct =
min_bitrate * 100 / enc_cfg->rc_target_bitrate;
}
if (max_bitrate > 0) {
enc_cfg->rc_2pass_vbr_maxsection_pct =
max_bitrate * 100 / enc_cfg->rc_target_bitrate;
}
}
// Check for unrecognized options
for (argi = argv; *argi; ++argi)
if (argi[0][0] == '-' && strlen(argi[0]) > 1)
die("Error: Unrecognized option %s\n", *argi);
if (argv[0] == NULL || argv[1] == 0) {
usage_exit();
}
app_input->input_filename = argv[0];
app_input->output_filename = argv[1];
free(argv);
if (enc_cfg->g_w < 16 || enc_cfg->g_w % 2 || enc_cfg->g_h < 16 ||
enc_cfg->g_h % 2)
die("Invalid resolution: %d x %d\n", enc_cfg->g_w, enc_cfg->g_h);
printf(
"Codec %s\nframes: %d, skip: %d\n"
"layers: %d\n"
"width %d, height: %d,\n"
"num: %d, den: %d, bitrate: %d,\n"
"gop size: %d\n",
vpx_codec_iface_name(vpx_codec_vp9_cx()), app_input->frames_to_code,
app_input->frames_to_skip,
svc_ctx->spatial_layers, enc_cfg->g_w, enc_cfg->g_h,
enc_cfg->g_timebase.num, enc_cfg->g_timebase.den,
enc_cfg->rc_target_bitrate, enc_cfg->kf_max_dist);
}
int main(int argc, const char **argv) {
AppInput app_input = {0};
VpxVideoWriter *writer = NULL;
VpxVideoInfo info = {0};
vpx_codec_ctx_t codec;
vpx_codec_enc_cfg_t enc_cfg;
SvcContext svc_ctx;
uint32_t i;
uint32_t frame_cnt = 0;
vpx_image_t raw;
vpx_codec_err_t res;
int pts = 0; /* PTS starts at 0 */
int frame_duration = 1; /* 1 timebase tick per frame */
FILE *infile = NULL;
int end_of_stream = 0;
int frames_received = 0;
memset(&svc_ctx, 0, sizeof(svc_ctx));
svc_ctx.log_print = 1;
exec_name = argv[0];
parse_command_line(argc, argv, &app_input, &svc_ctx, &enc_cfg);
// Allocate image buffer
#if CONFIG_VP9_HIGHBITDEPTH
if (!vpx_img_alloc(&raw, enc_cfg.g_input_bit_depth == 8 ?
VPX_IMG_FMT_I420 : VPX_IMG_FMT_I42016,
enc_cfg.g_w, enc_cfg.g_h, 32)) {
die("Failed to allocate image %dx%d\n", enc_cfg.g_w, enc_cfg.g_h);
}
#else
if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, enc_cfg.g_w, enc_cfg.g_h, 32)) {
die("Failed to allocate image %dx%d\n", enc_cfg.g_w, enc_cfg.g_h);
}
#endif // CONFIG_VP9_HIGHBITDEPTH
if (!(infile = fopen(app_input.input_filename, "rb")))
die("Failed to open %s for reading\n", app_input.input_filename);
// Initialize codec
if (vpx_svc_init(&svc_ctx, &codec, vpx_codec_vp9_cx(), &enc_cfg) !=
VPX_CODEC_OK)
die("Failed to initialize encoder\n");
info.codec_fourcc = VP9_FOURCC;
info.time_base.numerator = enc_cfg.g_timebase.num;
info.time_base.denominator = enc_cfg.g_timebase.den;
if (!(app_input.passes == 2 && app_input.pass == 1)) {
// We don't save the bitstream for the 1st pass on two pass rate control
writer = vpx_video_writer_open(app_input.output_filename, kContainerIVF,
&info);
if (!writer)
die("Failed to open %s for writing\n", app_input.output_filename);
}
// skip initial frames
for (i = 0; i < app_input.frames_to_skip; ++i)
vpx_img_read(&raw, infile);
// Encode frames
while (!end_of_stream) {
vpx_codec_iter_t iter = NULL;
const vpx_codec_cx_pkt_t *cx_pkt;
if (frame_cnt >= app_input.frames_to_code || !vpx_img_read(&raw, infile)) {
// We need one extra vpx_svc_encode call at end of stream to flush
// encoder and get remaining data
end_of_stream = 1;
}
res = vpx_svc_encode(&svc_ctx, &codec, (end_of_stream ? NULL : &raw),
pts, frame_duration, VPX_DL_GOOD_QUALITY);
printf("%s", vpx_svc_get_message(&svc_ctx));
if (res != VPX_CODEC_OK) {
die_codec(&codec, "Failed to encode frame");
}
while ((cx_pkt = vpx_codec_get_cx_data(&codec, &iter)) != NULL) {
switch (cx_pkt->kind) {
case VPX_CODEC_CX_FRAME_PKT: {
if (cx_pkt->data.frame.sz > 0)
vpx_video_writer_write_frame(writer,
cx_pkt->data.frame.buf,
cx_pkt->data.frame.sz,
cx_pkt->data.frame.pts);
printf("SVC frame: %d, kf: %d, size: %d, pts: %d\n", frames_received,
!!(cx_pkt->data.frame.flags & VPX_FRAME_IS_KEY),
(int)cx_pkt->data.frame.sz, (int)cx_pkt->data.frame.pts);
++frames_received;
break;
}
case VPX_CODEC_STATS_PKT: {
stats_write(&app_input.rc_stats,
cx_pkt->data.twopass_stats.buf,
cx_pkt->data.twopass_stats.sz);
break;
}
default: {
break;
}
}
}
if (!end_of_stream) {
++frame_cnt;
pts += frame_duration;
}
}
printf("Processed %d frames\n", frame_cnt);
fclose(infile);
if (vpx_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec");
if (app_input.passes == 2)
stats_close(&app_input.rc_stats, 1);
if (writer) {
vpx_video_writer_close(writer);
}
vpx_img_free(&raw);
// display average size, psnr
printf("%s", vpx_svc_dump_statistics(&svc_ctx));
vpx_svc_release(&svc_ctx);
return EXIT_SUCCESS;
}

View File

@ -1,733 +0,0 @@
/*
* Copyright (c) 2012 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// This is an example demonstrating how to implement a multi-layer VPx
// encoding scheme based on temporal scalability for video applications
// that benefit from a scalable bitstream.
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "./vpx_config.h"
#include "vpx_ports/vpx_timer.h"
#include "vpx/vp8cx.h"
#include "vpx/vpx_encoder.h"
#include "./tools_common.h"
#include "./video_writer.h"
static const char *exec_name;
void usage_exit() {
exit(EXIT_FAILURE);
}
// Denoiser states, for temporal denoising.
enum denoiserState {
kDenoiserOff,
kDenoiserOnYOnly,
kDenoiserOnYUV,
kDenoiserOnYUVAggressive,
kDenoiserOnAdaptive
};
static int mode_to_num_layers[12] = {1, 2, 2, 3, 3, 3, 3, 5, 2, 3, 3, 3};
// For rate control encoding stats.
struct RateControlMetrics {
// Number of input frames per layer.
int layer_input_frames[VPX_TS_MAX_LAYERS];
// Total (cumulative) number of encoded frames per layer.
int layer_tot_enc_frames[VPX_TS_MAX_LAYERS];
// Number of encoded non-key frames per layer.
int layer_enc_frames[VPX_TS_MAX_LAYERS];
// Framerate per layer layer (cumulative).
double layer_framerate[VPX_TS_MAX_LAYERS];
// Target average frame size per layer (per-frame-bandwidth per layer).
double layer_pfb[VPX_TS_MAX_LAYERS];
// Actual average frame size per layer.
double layer_avg_frame_size[VPX_TS_MAX_LAYERS];
// Average rate mismatch per layer (|target - actual| / target).
double layer_avg_rate_mismatch[VPX_TS_MAX_LAYERS];
// Actual encoding bitrate per layer (cumulative).
double layer_encoding_bitrate[VPX_TS_MAX_LAYERS];
};
// Note: these rate control metrics assume only 1 key frame in the
// sequence (i.e., first frame only). So for temporal pattern# 7
// (which has key frame for every frame on base layer), the metrics
// computation will be off/wrong.
// TODO(marpan): Update these metrics to account for multiple key frames
// in the stream.
static void set_rate_control_metrics(struct RateControlMetrics *rc,
vpx_codec_enc_cfg_t *cfg) {
unsigned int i = 0;
// Set the layer (cumulative) framerate and the target layer (non-cumulative)
// per-frame-bandwidth, for the rate control encoding stats below.
const double framerate = cfg->g_timebase.den / cfg->g_timebase.num;
rc->layer_framerate[0] = framerate / cfg->ts_rate_decimator[0];
rc->layer_pfb[0] = 1000.0 * cfg->ts_target_bitrate[0] /
rc->layer_framerate[0];
for (i = 0; i < cfg->ts_number_layers; ++i) {
if (i > 0) {
rc->layer_framerate[i] = framerate / cfg->ts_rate_decimator[i];
rc->layer_pfb[i] = 1000.0 *
(cfg->ts_target_bitrate[i] - cfg->ts_target_bitrate[i - 1]) /
(rc->layer_framerate[i] - rc->layer_framerate[i - 1]);
}
rc->layer_input_frames[i] = 0;
rc->layer_enc_frames[i] = 0;
rc->layer_tot_enc_frames[i] = 0;
rc->layer_encoding_bitrate[i] = 0.0;
rc->layer_avg_frame_size[i] = 0.0;
rc->layer_avg_rate_mismatch[i] = 0.0;
}
}
static void printout_rate_control_summary(struct RateControlMetrics *rc,
vpx_codec_enc_cfg_t *cfg,
int frame_cnt) {
unsigned int i = 0;
int tot_num_frames = 0;
printf("Total number of processed frames: %d\n\n", frame_cnt -1);
printf("Rate control layer stats for %d layer(s):\n\n",
cfg->ts_number_layers);
for (i = 0; i < cfg->ts_number_layers; ++i) {
const int num_dropped = (i > 0) ?
(rc->layer_input_frames[i] - rc->layer_enc_frames[i]) :
(rc->layer_input_frames[i] - rc->layer_enc_frames[i] - 1);
tot_num_frames += rc->layer_input_frames[i];
rc->layer_encoding_bitrate[i] = 0.001 * rc->layer_framerate[i] *
rc->layer_encoding_bitrate[i] / tot_num_frames;
rc->layer_avg_frame_size[i] = rc->layer_avg_frame_size[i] /
rc->layer_enc_frames[i];
rc->layer_avg_rate_mismatch[i] = 100.0 * rc->layer_avg_rate_mismatch[i] /
rc->layer_enc_frames[i];
printf("For layer#: %d \n", i);
printf("Bitrate (target vs actual): %d %f \n", cfg->ts_target_bitrate[i],
rc->layer_encoding_bitrate[i]);
printf("Average frame size (target vs actual): %f %f \n", rc->layer_pfb[i],
rc->layer_avg_frame_size[i]);
printf("Average rate_mismatch: %f \n", rc->layer_avg_rate_mismatch[i]);
printf("Number of input frames, encoded (non-key) frames, "
"and perc dropped frames: %d %d %f \n", rc->layer_input_frames[i],
rc->layer_enc_frames[i],
100.0 * num_dropped / rc->layer_input_frames[i]);
printf("\n");
}
if ((frame_cnt - 1) != tot_num_frames)
die("Error: Number of input frames not equal to output! \n");
}
// Temporal scaling parameters:
// NOTE: The 3 prediction frames cannot be used interchangeably due to
// differences in the way they are handled throughout the code. The
// frames should be allocated to layers in the order LAST, GF, ARF.
// Other combinations work, but may produce slightly inferior results.
static void set_temporal_layer_pattern(int layering_mode,
vpx_codec_enc_cfg_t *cfg,
int *layer_flags,
int *flag_periodicity) {
switch (layering_mode) {
case 0: {
// 1-layer.
int ids[1] = {0};
cfg->ts_periodicity = 1;
*flag_periodicity = 1;
cfg->ts_number_layers = 1;
cfg->ts_rate_decimator[0] = 1;
memcpy(cfg->ts_layer_id, ids, sizeof(ids));
// Update L only.
layer_flags[0] = VPX_EFLAG_FORCE_KF | VP8_EFLAG_NO_UPD_GF |
VP8_EFLAG_NO_UPD_ARF;
break;
}
case 1: {
// 2-layers, 2-frame period.
int ids[2] = {0, 1};
cfg->ts_periodicity = 2;
*flag_periodicity = 2;
cfg->ts_number_layers = 2;
cfg->ts_rate_decimator[0] = 2;
cfg->ts_rate_decimator[1] = 1;
memcpy(cfg->ts_layer_id, ids, sizeof(ids));
#if 1
// 0=L, 1=GF, Intra-layer prediction enabled.
layer_flags[0] = VPX_EFLAG_FORCE_KF | VP8_EFLAG_NO_UPD_GF |
VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF;
layer_flags[1] = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_REF_ARF;
#else
// 0=L, 1=GF, Intra-layer prediction disabled.
layer_flags[0] = VPX_EFLAG_FORCE_KF | VP8_EFLAG_NO_UPD_GF |
VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF;
layer_flags[1] = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_REF_LAST;
#endif
break;
}
case 2: {
// 2-layers, 3-frame period.
int ids[3] = {0, 1, 1};
cfg->ts_periodicity = 3;
*flag_periodicity = 3;
cfg->ts_number_layers = 2;
cfg->ts_rate_decimator[0] = 3;
cfg->ts_rate_decimator[1] = 1;
memcpy(cfg->ts_layer_id, ids, sizeof(ids));
// 0=L, 1=GF, Intra-layer prediction enabled.
layer_flags[0] = VPX_EFLAG_FORCE_KF | VP8_EFLAG_NO_REF_GF |
VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
layer_flags[1] =
layer_flags[2] = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF |
VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST;
break;
}
case 3: {
// 3-layers, 6-frame period.
int ids[6] = {0, 2, 2, 1, 2, 2};
cfg->ts_periodicity = 6;
*flag_periodicity = 6;
cfg->ts_number_layers = 3;
cfg->ts_rate_decimator[0] = 6;
cfg->ts_rate_decimator[1] = 3;
cfg->ts_rate_decimator[2] = 1;
memcpy(cfg->ts_layer_id, ids, sizeof(ids));
// 0=L, 1=GF, 2=ARF, Intra-layer prediction enabled.
layer_flags[0] = VPX_EFLAG_FORCE_KF | VP8_EFLAG_NO_REF_GF |
VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
layer_flags[3] = VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_ARF |
VP8_EFLAG_NO_UPD_LAST;
layer_flags[1] =
layer_flags[2] =
layer_flags[4] =
layer_flags[5] = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_LAST;
break;
}
case 4: {
// 3-layers, 4-frame period.
int ids[4] = {0, 2, 1, 2};
cfg->ts_periodicity = 4;
*flag_periodicity = 4;
cfg->ts_number_layers = 3;
cfg->ts_rate_decimator[0] = 4;
cfg->ts_rate_decimator[1] = 2;
cfg->ts_rate_decimator[2] = 1;
memcpy(cfg->ts_layer_id, ids, sizeof(ids));
// 0=L, 1=GF, 2=ARF, Intra-layer prediction disabled.
layer_flags[0] = VPX_EFLAG_FORCE_KF | VP8_EFLAG_NO_REF_GF |
VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
layer_flags[2] = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF |
VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST;
layer_flags[1] =
layer_flags[3] = VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
break;
}
case 5: {
// 3-layers, 4-frame period.
int ids[4] = {0, 2, 1, 2};
cfg->ts_periodicity = 4;
*flag_periodicity = 4;
cfg->ts_number_layers = 3;
cfg->ts_rate_decimator[0] = 4;
cfg->ts_rate_decimator[1] = 2;
cfg->ts_rate_decimator[2] = 1;
memcpy(cfg->ts_layer_id, ids, sizeof(ids));
// 0=L, 1=GF, 2=ARF, Intra-layer prediction enabled in layer 1, disabled
// in layer 2.
layer_flags[0] = VPX_EFLAG_FORCE_KF | VP8_EFLAG_NO_REF_GF |
VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
layer_flags[2] = VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_UPD_ARF;
layer_flags[1] =
layer_flags[3] = VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
break;
}
case 6: {
// 3-layers, 4-frame period.
int ids[4] = {0, 2, 1, 2};
cfg->ts_periodicity = 4;
*flag_periodicity = 4;
cfg->ts_number_layers = 3;
cfg->ts_rate_decimator[0] = 4;
cfg->ts_rate_decimator[1] = 2;
cfg->ts_rate_decimator[2] = 1;
memcpy(cfg->ts_layer_id, ids, sizeof(ids));
// 0=L, 1=GF, 2=ARF, Intra-layer prediction enabled.
layer_flags[0] = VPX_EFLAG_FORCE_KF | VP8_EFLAG_NO_REF_GF |
VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
layer_flags[2] = VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_UPD_ARF;
layer_flags[1] =
layer_flags[3] = VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF;
break;
}
case 7: {
// NOTE: Probably of academic interest only.
// 5-layers, 16-frame period.
int ids[16] = {0, 4, 3, 4, 2, 4, 3, 4, 1, 4, 3, 4, 2, 4, 3, 4};
cfg->ts_periodicity = 16;
*flag_periodicity = 16;
cfg->ts_number_layers = 5;
cfg->ts_rate_decimator[0] = 16;
cfg->ts_rate_decimator[1] = 8;
cfg->ts_rate_decimator[2] = 4;
cfg->ts_rate_decimator[3] = 2;
cfg->ts_rate_decimator[4] = 1;
memcpy(cfg->ts_layer_id, ids, sizeof(ids));
layer_flags[0] = VPX_EFLAG_FORCE_KF;
layer_flags[1] =
layer_flags[3] =
layer_flags[5] =
layer_flags[7] =
layer_flags[9] =
layer_flags[11] =
layer_flags[13] =
layer_flags[15] = VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF |
VP8_EFLAG_NO_UPD_ARF;
layer_flags[2] =
layer_flags[6] =
layer_flags[10] =
layer_flags[14] = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_GF;
layer_flags[4] =
layer_flags[12] = VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_UPD_ARF;
layer_flags[8] = VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_GF;
break;
}
case 8: {
// 2-layers, with sync point at first frame of layer 1.
int ids[2] = {0, 1};
cfg->ts_periodicity = 2;
*flag_periodicity = 8;
cfg->ts_number_layers = 2;
cfg->ts_rate_decimator[0] = 2;
cfg->ts_rate_decimator[1] = 1;
memcpy(cfg->ts_layer_id, ids, sizeof(ids));
// 0=L, 1=GF.
// ARF is used as predictor for all frames, and is only updated on
// key frame. Sync point every 8 frames.
// Layer 0: predict from L and ARF, update L and G.
layer_flags[0] = VPX_EFLAG_FORCE_KF | VP8_EFLAG_NO_REF_GF |
VP8_EFLAG_NO_UPD_ARF;
// Layer 1: sync point: predict from L and ARF, and update G.
layer_flags[1] = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_UPD_ARF;
// Layer 0, predict from L and ARF, update L.
layer_flags[2] = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF |
VP8_EFLAG_NO_UPD_ARF;
// Layer 1: predict from L, G and ARF, and update G.
layer_flags[3] = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_UPD_ENTROPY;
// Layer 0.
layer_flags[4] = layer_flags[2];
// Layer 1.
layer_flags[5] = layer_flags[3];
// Layer 0.
layer_flags[6] = layer_flags[4];
// Layer 1.
layer_flags[7] = layer_flags[5];
break;
}
case 9: {
// 3-layers: Sync points for layer 1 and 2 every 8 frames.
int ids[4] = {0, 2, 1, 2};
cfg->ts_periodicity = 4;
*flag_periodicity = 8;
cfg->ts_number_layers = 3;
cfg->ts_rate_decimator[0] = 4;
cfg->ts_rate_decimator[1] = 2;
cfg->ts_rate_decimator[2] = 1;
memcpy(cfg->ts_layer_id, ids, sizeof(ids));
// 0=L, 1=GF, 2=ARF.
layer_flags[0] = VPX_EFLAG_FORCE_KF | VP8_EFLAG_NO_REF_GF |
VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
layer_flags[1] = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF |
VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF;
layer_flags[2] = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF |
VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ARF;
layer_flags[3] =
layer_flags[5] = VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF;
layer_flags[4] = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF |
VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
layer_flags[6] = VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_UPD_ARF;
layer_flags[7] = VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF |
VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_ENTROPY;
break;
}
case 10: {
// 3-layers structure where ARF is used as predictor for all frames,
// and is only updated on key frame.
// Sync points for layer 1 and 2 every 8 frames.
int ids[4] = {0, 2, 1, 2};
cfg->ts_periodicity = 4;
*flag_periodicity = 8;
cfg->ts_number_layers = 3;
cfg->ts_rate_decimator[0] = 4;
cfg->ts_rate_decimator[1] = 2;
cfg->ts_rate_decimator[2] = 1;
memcpy(cfg->ts_layer_id, ids, sizeof(ids));
// 0=L, 1=GF, 2=ARF.
// Layer 0: predict from L and ARF; update L and G.
layer_flags[0] = VPX_EFLAG_FORCE_KF | VP8_EFLAG_NO_UPD_ARF |
VP8_EFLAG_NO_REF_GF;
// Layer 2: sync point: predict from L and ARF; update none.
layer_flags[1] = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF |
VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_UPD_ENTROPY;
// Layer 1: sync point: predict from L and ARF; update G.
layer_flags[2] = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_ARF |
VP8_EFLAG_NO_UPD_LAST;
// Layer 2: predict from L, G, ARF; update none.
layer_flags[3] = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ENTROPY;
// Layer 0: predict from L and ARF; update L.
layer_flags[4] = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
VP8_EFLAG_NO_REF_GF;
// Layer 2: predict from L, G, ARF; update none.
layer_flags[5] = layer_flags[3];
// Layer 1: predict from L, G, ARF; update G.
layer_flags[6] = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST;
// Layer 2: predict from L, G, ARF; update none.
layer_flags[7] = layer_flags[3];
break;
}
case 11:
default: {
// 3-layers structure as in case 10, but no sync/refresh points for
// layer 1 and 2.
int ids[4] = {0, 2, 1, 2};
cfg->ts_periodicity = 4;
*flag_periodicity = 8;
cfg->ts_number_layers = 3;
cfg->ts_rate_decimator[0] = 4;
cfg->ts_rate_decimator[1] = 2;
cfg->ts_rate_decimator[2] = 1;
memcpy(cfg->ts_layer_id, ids, sizeof(ids));
// 0=L, 1=GF, 2=ARF.
// Layer 0: predict from L and ARF; update L.
layer_flags[0] = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
VP8_EFLAG_NO_REF_GF;
layer_flags[4] = layer_flags[0];
// Layer 1: predict from L, G, ARF; update G.
layer_flags[2] = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST;
layer_flags[6] = layer_flags[2];
// Layer 2: predict from L, G, ARF; update none.
layer_flags[1] = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ENTROPY;
layer_flags[3] = layer_flags[1];
layer_flags[5] = layer_flags[1];
layer_flags[7] = layer_flags[1];
break;
}
}
}
int main(int argc, char **argv) {
VpxVideoWriter *outfile[VPX_TS_MAX_LAYERS] = {NULL};
vpx_codec_ctx_t codec;
vpx_codec_enc_cfg_t cfg;
int frame_cnt = 0;
vpx_image_t raw;
vpx_codec_err_t res;
unsigned int width;
unsigned int height;
int speed;
int frame_avail;
int got_data;
int flags = 0;
unsigned int i;
int pts = 0; // PTS starts at 0.
int frame_duration = 1; // 1 timebase tick per frame.
int layering_mode = 0;
int layer_flags[VPX_TS_MAX_PERIODICITY] = {0};
int flag_periodicity = 1;
vpx_svc_layer_id_t layer_id = {0, 0};
const VpxInterface *encoder = NULL;
FILE *infile = NULL;
struct RateControlMetrics rc;
int64_t cx_time = 0;
const int min_args_base = 11;
#if CONFIG_VP9_HIGHBITDEPTH
vpx_bit_depth_t bit_depth = VPX_BITS_8;
int input_bit_depth = 8;
const int min_args = min_args_base + 1;
#else
const int min_args = min_args_base;
#endif // CONFIG_VP9_HIGHBITDEPTH
exec_name = argv[0];
// Check usage and arguments.
if (argc < min_args) {
#if CONFIG_VP9_HIGHBITDEPTH
die("Usage: %s <infile> <outfile> <codec_type(vp8/vp9)> <width> <height> "
"<rate_num> <rate_den> <speed> <frame_drop_threshold> <mode> "
"<Rate_0> ... <Rate_nlayers-1> <bit-depth> \n", argv[0]);
#else
die("Usage: %s <infile> <outfile> <codec_type(vp8/vp9)> <width> <height> "
"<rate_num> <rate_den> <speed> <frame_drop_threshold> <mode> "
"<Rate_0> ... <Rate_nlayers-1> \n", argv[0]);
#endif // CONFIG_VP9_HIGHBITDEPTH
}
encoder = get_vpx_encoder_by_name(argv[3]);
if (!encoder)
die("Unsupported codec.");
printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface()));
width = strtol(argv[4], NULL, 0);
height = strtol(argv[5], NULL, 0);
if (width < 16 || width % 2 || height < 16 || height % 2) {
die("Invalid resolution: %d x %d", width, height);
}
layering_mode = strtol(argv[10], NULL, 0);
if (layering_mode < 0 || layering_mode > 12) {
die("Invalid layering mode (0..12) %s", argv[10]);
}
if (argc != min_args + mode_to_num_layers[layering_mode]) {
die("Invalid number of arguments");
}
#if CONFIG_VP9_HIGHBITDEPTH
switch (strtol(argv[argc-1], NULL, 0)) {
case 8:
bit_depth = VPX_BITS_8;
input_bit_depth = 8;
break;
case 10:
bit_depth = VPX_BITS_10;
input_bit_depth = 10;
break;
case 12:
bit_depth = VPX_BITS_12;
input_bit_depth = 12;
break;
default:
die("Invalid bit depth (8, 10, 12) %s", argv[argc-1]);
}
if (!vpx_img_alloc(&raw,
bit_depth == VPX_BITS_8 ? VPX_IMG_FMT_I420 :
VPX_IMG_FMT_I42016,
width, height, 32)) {
die("Failed to allocate image", width, height);
}
#else
if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, width, height, 32)) {
die("Failed to allocate image", width, height);
}
#endif // CONFIG_VP9_HIGHBITDEPTH
// Populate encoder configuration.
res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
if (res) {
printf("Failed to get config: %s\n", vpx_codec_err_to_string(res));
return EXIT_FAILURE;
}
// Update the default configuration with our settings.
cfg.g_w = width;
cfg.g_h = height;
#if CONFIG_VP9_HIGHBITDEPTH
if (bit_depth != VPX_BITS_8) {
cfg.g_bit_depth = bit_depth;
cfg.g_input_bit_depth = input_bit_depth;
cfg.g_profile = 2;
}
#endif // CONFIG_VP9_HIGHBITDEPTH
// Timebase format e.g. 30fps: numerator=1, demoninator = 30.
cfg.g_timebase.num = strtol(argv[6], NULL, 0);
cfg.g_timebase.den = strtol(argv[7], NULL, 0);
speed = strtol(argv[8], NULL, 0);
if (speed < 0) {
die("Invalid speed setting: must be positive");
}
for (i = min_args_base;
(int)i < min_args_base + mode_to_num_layers[layering_mode];
++i) {
cfg.ts_target_bitrate[i - 11] = strtol(argv[i], NULL, 0);
}
// Real time parameters.
cfg.rc_dropframe_thresh = strtol(argv[9], NULL, 0);
cfg.rc_end_usage = VPX_CBR;
cfg.rc_resize_allowed = 0;
cfg.rc_min_quantizer = 2;
cfg.rc_max_quantizer = 56;
cfg.rc_undershoot_pct = 50;
cfg.rc_overshoot_pct = 50;
cfg.rc_buf_initial_sz = 500;
cfg.rc_buf_optimal_sz = 600;
cfg.rc_buf_sz = 1000;
// Enable error resilient mode.
cfg.g_error_resilient = 1;
cfg.g_lag_in_frames = 0;
cfg.kf_mode = VPX_KF_AUTO;
// Disable automatic keyframe placement.
cfg.kf_min_dist = cfg.kf_max_dist = 3000;
set_temporal_layer_pattern(layering_mode,
&cfg,
layer_flags,
&flag_periodicity);
set_rate_control_metrics(&rc, &cfg);
// Target bandwidth for the whole stream.
// Set to ts_target_bitrate for highest layer (total bitrate).
cfg.rc_target_bitrate = cfg.ts_target_bitrate[cfg.ts_number_layers - 1];
// Open input file.
if (!(infile = fopen(argv[1], "rb"))) {
die("Failed to open %s for reading", argv[1]);
}
// Open an output file for each stream.
for (i = 0; i < cfg.ts_number_layers; ++i) {
char file_name[PATH_MAX];
VpxVideoInfo info;
info.codec_fourcc = encoder->fourcc;
info.frame_width = cfg.g_w;
info.frame_height = cfg.g_h;
info.time_base.numerator = cfg.g_timebase.num;
info.time_base.denominator = cfg.g_timebase.den;
snprintf(file_name, sizeof(file_name), "%s_%d.ivf", argv[2], i);
outfile[i] = vpx_video_writer_open(file_name, kContainerIVF, &info);
if (!outfile[i])
die("Failed to open %s for writing", file_name);
assert(outfile[i] != NULL);
}
// No spatial layers in this encoder.
cfg.ss_number_layers = 1;
// Initialize codec.
#if CONFIG_VP9_HIGHBITDEPTH
if (vpx_codec_enc_init(
&codec, encoder->codec_interface(), &cfg,
bit_depth == VPX_BITS_8 ? 0 : VPX_CODEC_USE_HIGHBITDEPTH))
#else
if (vpx_codec_enc_init(&codec, encoder->codec_interface(), &cfg, 0))
#endif // CONFIG_VP9_HIGHBITDEPTH
die_codec(&codec, "Failed to initialize encoder");
if (strncmp(encoder->name, "vp8", 3) == 0) {
vpx_codec_control(&codec, VP8E_SET_CPUUSED, -speed);
vpx_codec_control(&codec, VP8E_SET_NOISE_SENSITIVITY, kDenoiserOnYOnly);
} else if (strncmp(encoder->name, "vp9", 3) == 0) {
vpx_codec_control(&codec, VP8E_SET_CPUUSED, speed);
vpx_codec_control(&codec, VP9E_SET_AQ_MODE, 3);
vpx_codec_control(&codec, VP9E_SET_FRAME_PERIODIC_BOOST, 0);
vpx_codec_control(&codec, VP9E_SET_NOISE_SENSITIVITY, 0);
if (vpx_codec_control(&codec, VP9E_SET_SVC, 1)) {
die_codec(&codec, "Failed to set SVC");
}
}
vpx_codec_control(&codec, VP8E_SET_STATIC_THRESHOLD, 1);
vpx_codec_control(&codec, VP8E_SET_TOKEN_PARTITIONS, 1);
// This controls the maximum target size of the key frame.
// For generating smaller key frames, use a smaller max_intra_size_pct
// value, like 100 or 200.
{
const int max_intra_size_pct = 200;
vpx_codec_control(&codec, VP8E_SET_MAX_INTRA_BITRATE_PCT,
max_intra_size_pct);
}
frame_avail = 1;
while (frame_avail || got_data) {
struct vpx_usec_timer timer;
vpx_codec_iter_t iter = NULL;
const vpx_codec_cx_pkt_t *pkt;
// Update the temporal layer_id. No spatial layers in this test.
layer_id.spatial_layer_id = 0;
layer_id.temporal_layer_id =
cfg.ts_layer_id[frame_cnt % cfg.ts_periodicity];
if (strncmp(encoder->name, "vp9", 3) == 0) {
vpx_codec_control(&codec, VP9E_SET_SVC_LAYER_ID, &layer_id);
}
flags = layer_flags[frame_cnt % flag_periodicity];
frame_avail = vpx_img_read(&raw, infile);
if (frame_avail)
++rc.layer_input_frames[layer_id.temporal_layer_id];
vpx_usec_timer_start(&timer);
if (vpx_codec_encode(&codec, frame_avail? &raw : NULL, pts, 1, flags,
VPX_DL_REALTIME)) {
die_codec(&codec, "Failed to encode frame");
}
vpx_usec_timer_mark(&timer);
cx_time += vpx_usec_timer_elapsed(&timer);
// Reset KF flag.
if (layering_mode != 7) {
layer_flags[0] &= ~VPX_EFLAG_FORCE_KF;
}
got_data = 0;
while ( (pkt = vpx_codec_get_cx_data(&codec, &iter)) ) {
got_data = 1;
switch (pkt->kind) {
case VPX_CODEC_CX_FRAME_PKT:
for (i = cfg.ts_layer_id[frame_cnt % cfg.ts_periodicity];
i < cfg.ts_number_layers; ++i) {
vpx_video_writer_write_frame(outfile[i], pkt->data.frame.buf,
pkt->data.frame.sz, pts);
++rc.layer_tot_enc_frames[i];
rc.layer_encoding_bitrate[i] += 8.0 * pkt->data.frame.sz;
// Keep count of rate control stats per layer (for non-key frames).
if (i == cfg.ts_layer_id[frame_cnt % cfg.ts_periodicity] &&
!(pkt->data.frame.flags & VPX_FRAME_IS_KEY)) {
rc.layer_avg_frame_size[i] += 8.0 * pkt->data.frame.sz;
rc.layer_avg_rate_mismatch[i] +=
fabs(8.0 * pkt->data.frame.sz - rc.layer_pfb[i]) /
rc.layer_pfb[i];
++rc.layer_enc_frames[i];
}
}
break;
default:
break;
}
}
++frame_cnt;
pts += frame_duration;
}
fclose(infile);
printout_rate_control_summary(&rc, &cfg, frame_cnt);
printf("\n");
printf("Frame cnt and encoding time/FPS stats for encoding: %d %f %f \n",
frame_cnt,
1000 * (float)cx_time / (double)(frame_cnt * 1000000),
1000000 * (double)frame_cnt / (double)cx_time);
if (vpx_codec_destroy(&codec))
die_codec(&codec, "Failed to destroy codec");
// Try to rewrite the output file headers with the actual frame count.
for (i = 0; i < cfg.ts_number_layers; ++i)
vpx_video_writer_close(outfile[i]);
vpx_img_free(&raw);
return EXIT_SUCCESS;
}

View File

@ -115,7 +115,6 @@ ifeq ($(CONFIG_VP9_ENCODER),yes)
CODEC_EXPORTS-yes += $(addprefix $(VP9_PREFIX),$(VP9_CX_EXPORTS))
CODEC_SRCS-yes += $(VP9_PREFIX)vp9cx.mk vpx/vp8.h vpx/vp8cx.h
INSTALL-LIBS-yes += include/vpx/vp8.h include/vpx/vp8cx.h
INSTALL-LIBS-$(CONFIG_SPATIAL_SVC) += include/vpx/svc_context.h
INSTALL_MAPS += include/vpx/% $(SRC_PATH_BARE)/$(VP9_PREFIX)/%
CODEC_DOC_SRCS += vpx/vp8.h vpx/vp8cx.h
CODEC_DOC_SECTIONS += vp9 vp9_encoder

View File

@ -218,442 +218,5 @@ TEST_P(DatarateTestLarge, ChangingDropFrameThresh) {
}
}
class DatarateTestVP9Large : public ::libvpx_test::EncoderTest,
public ::libvpx_test::CodecTestWith2Params<libvpx_test::TestMode, int> {
public:
DatarateTestVP9Large() : EncoderTest(GET_PARAM(0)) {}
protected:
virtual ~DatarateTestVP9Large() {}
virtual void SetUp() {
InitializeConfig();
SetMode(GET_PARAM(1));
set_cpu_used_ = GET_PARAM(2);
ResetModel();
}
virtual void ResetModel() {
last_pts_ = 0;
bits_in_buffer_model_ = cfg_.rc_target_bitrate * cfg_.rc_buf_initial_sz;
frame_number_ = 0;
tot_frame_number_ = 0;
first_drop_ = 0;
num_drops_ = 0;
// Denoiser is off by default.
denoiser_on_ = 0;
// For testing up to 3 layers.
for (int i = 0; i < 3; ++i) {
bits_total_[i] = 0;
}
}
//
// Frame flags and layer id for temporal layers.
//
// For two layers, test pattern is:
// 1 3
// 0 2 .....
// For three layers, test pattern is:
// 1 3 5 7
// 2 6
// 0 4 ....
// LAST is always update on base/layer 0, GOLDEN is updated on layer 1.
// For this 3 layer example, the 2nd enhancement layer (layer 2) does not
// update any reference frames.
int SetFrameFlags(int frame_num, int num_temp_layers) {
int frame_flags = 0;
if (num_temp_layers == 2) {
if (frame_num % 2 == 0) {
// Layer 0: predict from L and ARF, update L.
frame_flags = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF |
VP8_EFLAG_NO_UPD_ARF;
} else {
// Layer 1: predict from L, G and ARF, and update G.
frame_flags = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_UPD_ENTROPY;
}
} else if (num_temp_layers == 3) {
if (frame_num % 4 == 0) {
// Layer 0: predict from L and ARF; update L.
frame_flags = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
VP8_EFLAG_NO_REF_GF;
} else if ((frame_num - 2) % 4 == 0) {
// Layer 1: predict from L, G, ARF; update G.
frame_flags = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST;
} else if ((frame_num - 1) % 2 == 0) {
// Layer 2: predict from L, G, ARF; update none.
frame_flags = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
VP8_EFLAG_NO_UPD_LAST;
}
}
return frame_flags;
}
int SetLayerId(int frame_num, int num_temp_layers) {
int layer_id = 0;
if (num_temp_layers == 2) {
if (frame_num % 2 == 0) {
layer_id = 0;
} else {
layer_id = 1;
}
} else if (num_temp_layers == 3) {
if (frame_num % 4 == 0) {
layer_id = 0;
} else if ((frame_num - 2) % 4 == 0) {
layer_id = 1;
} else if ((frame_num - 1) % 2 == 0) {
layer_id = 2;
}
}
return layer_id;
}
virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
::libvpx_test::Encoder *encoder) {
if (video->frame() == 1) {
encoder->Control(VP8E_SET_CPUUSED, set_cpu_used_);
encoder->Control(VP9E_SET_NOISE_SENSITIVITY, denoiser_on_);
}
if (cfg_.ts_number_layers > 1) {
if (video->frame() == 1) {
encoder->Control(VP9E_SET_SVC, 1);
}
vpx_svc_layer_id_t layer_id = {0, 0};
layer_id.spatial_layer_id = 0;
frame_flags_ = SetFrameFlags(video->frame(), cfg_.ts_number_layers);
layer_id.temporal_layer_id = SetLayerId(video->frame(),
cfg_.ts_number_layers);
if (video->frame() > 0) {
encoder->Control(VP9E_SET_SVC_LAYER_ID, &layer_id);
}
}
const vpx_rational_t tb = video->timebase();
timebase_ = static_cast<double>(tb.num) / tb.den;
duration_ = 0;
}
virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
// Time since last timestamp = duration.
vpx_codec_pts_t duration = pkt->data.frame.pts - last_pts_;
if (duration > 1) {
// If first drop not set and we have a drop set it to this time.
if (!first_drop_)
first_drop_ = last_pts_ + 1;
// Update the number of frame drops.
num_drops_ += static_cast<int>(duration - 1);
// Update counter for total number of frames (#frames input to encoder).
// Needed for setting the proper layer_id below.
tot_frame_number_ += static_cast<int>(duration - 1);
}
int layer = SetLayerId(tot_frame_number_, cfg_.ts_number_layers);
// Add to the buffer the bits we'd expect from a constant bitrate server.
bits_in_buffer_model_ += static_cast<int64_t>(
duration * timebase_ * cfg_.rc_target_bitrate * 1000);
// Buffer should not go negative.
ASSERT_GE(bits_in_buffer_model_, 0) << "Buffer Underrun at frame "
<< pkt->data.frame.pts;
const size_t frame_size_in_bits = pkt->data.frame.sz * 8;
// Update the total encoded bits. For temporal layers, update the cumulative
// encoded bits per layer.
for (int i = layer; i < static_cast<int>(cfg_.ts_number_layers); ++i) {
bits_total_[i] += frame_size_in_bits;
}
// Update the most recent pts.
last_pts_ = pkt->data.frame.pts;
++frame_number_;
++tot_frame_number_;
}
virtual void EndPassHook(void) {
for (int layer = 0; layer < static_cast<int>(cfg_.ts_number_layers);
++layer) {
duration_ = (last_pts_ + 1) * timebase_;
if (bits_total_[layer]) {
// Effective file datarate:
effective_datarate_[layer] = (bits_total_[layer] / 1000.0) / duration_;
}
}
}
vpx_codec_pts_t last_pts_;
double timebase_;
int frame_number_; // Counter for number of non-dropped/encoded frames.
int tot_frame_number_; // Counter for total number of input frames.
int64_t bits_total_[3];
double duration_;
double effective_datarate_[3];
int set_cpu_used_;
int64_t bits_in_buffer_model_;
vpx_codec_pts_t first_drop_;
int num_drops_;
int denoiser_on_;
};
// Check basic rate targeting,
TEST_P(DatarateTestVP9Large, BasicRateTargeting) {
cfg_.rc_buf_initial_sz = 500;
cfg_.rc_buf_optimal_sz = 500;
cfg_.rc_buf_sz = 1000;
cfg_.rc_dropframe_thresh = 1;
cfg_.rc_min_quantizer = 0;
cfg_.rc_max_quantizer = 63;
cfg_.rc_end_usage = VPX_CBR;
cfg_.g_lag_in_frames = 0;
::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
30, 1, 0, 140);
for (int i = 150; i < 800; i += 200) {
cfg_.rc_target_bitrate = i;
ResetModel();
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
ASSERT_GE(effective_datarate_[0], cfg_.rc_target_bitrate * 0.85)
<< " The datarate for the file is lower than target by too much!";
ASSERT_LE(effective_datarate_[0], cfg_.rc_target_bitrate * 1.15)
<< " The datarate for the file is greater than target by too much!";
}
}
// Check basic rate targeting,
TEST_P(DatarateTestVP9Large, BasicRateTargeting444) {
::libvpx_test::Y4mVideoSource video("rush_hour_444.y4m", 0, 140);
cfg_.g_profile = 1;
cfg_.g_timebase = video.timebase();
cfg_.rc_buf_initial_sz = 500;
cfg_.rc_buf_optimal_sz = 500;
cfg_.rc_buf_sz = 1000;
cfg_.rc_dropframe_thresh = 1;
cfg_.rc_min_quantizer = 0;
cfg_.rc_max_quantizer = 63;
cfg_.rc_end_usage = VPX_CBR;
for (int i = 250; i < 900; i += 200) {
cfg_.rc_target_bitrate = i;
ResetModel();
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
ASSERT_GE(static_cast<double>(cfg_.rc_target_bitrate),
effective_datarate_[0] * 0.85)
<< " The datarate for the file exceeds the target by too much!";
ASSERT_LE(static_cast<double>(cfg_.rc_target_bitrate),
effective_datarate_[0] * 1.15)
<< " The datarate for the file missed the target!"
<< cfg_.rc_target_bitrate << " "<< effective_datarate_;
}
}
// Check that (1) the first dropped frame gets earlier and earlier
// as the drop frame threshold is increased, and (2) that the total number of
// frame drops does not decrease as we increase frame drop threshold.
// Use a lower qp-max to force some frame drops.
TEST_P(DatarateTestVP9Large, ChangingDropFrameThresh) {
cfg_.rc_buf_initial_sz = 500;
cfg_.rc_buf_optimal_sz = 500;
cfg_.rc_buf_sz = 1000;
cfg_.rc_undershoot_pct = 20;
cfg_.rc_undershoot_pct = 20;
cfg_.rc_dropframe_thresh = 10;
cfg_.rc_min_quantizer = 0;
cfg_.rc_max_quantizer = 50;
cfg_.rc_end_usage = VPX_CBR;
cfg_.rc_target_bitrate = 200;
cfg_.g_lag_in_frames = 0;
::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
30, 1, 0, 140);
const int kDropFrameThreshTestStep = 30;
vpx_codec_pts_t last_drop = 140;
int last_num_drops = 0;
for (int i = 10; i < 100; i += kDropFrameThreshTestStep) {
cfg_.rc_dropframe_thresh = i;
ResetModel();
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
ASSERT_GE(effective_datarate_[0], cfg_.rc_target_bitrate * 0.85)
<< " The datarate for the file is lower than target by too much!";
ASSERT_LE(effective_datarate_[0], cfg_.rc_target_bitrate * 1.15)
<< " The datarate for the file is greater than target by too much!";
ASSERT_LE(first_drop_, last_drop)
<< " The first dropped frame for drop_thresh " << i
<< " > first dropped frame for drop_thresh "
<< i - kDropFrameThreshTestStep;
ASSERT_GE(num_drops_, last_num_drops)
<< " The number of dropped frames for drop_thresh " << i
<< " < number of dropped frames for drop_thresh "
<< i - kDropFrameThreshTestStep;
last_drop = first_drop_;
last_num_drops = num_drops_;
}
}
// Check basic rate targeting for 2 temporal layers.
TEST_P(DatarateTestVP9Large, BasicRateTargeting2TemporalLayers) {
cfg_.rc_buf_initial_sz = 500;
cfg_.rc_buf_optimal_sz = 500;
cfg_.rc_buf_sz = 1000;
cfg_.rc_dropframe_thresh = 1;
cfg_.rc_min_quantizer = 0;
cfg_.rc_max_quantizer = 63;
cfg_.rc_end_usage = VPX_CBR;
cfg_.g_lag_in_frames = 0;
// 2 Temporal layers, no spatial layers: Framerate decimation (2, 1).
cfg_.ss_number_layers = 1;
cfg_.ts_number_layers = 2;
cfg_.ts_rate_decimator[0] = 2;
cfg_.ts_rate_decimator[1] = 1;
::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
30, 1, 0, 200);
for (int i = 200; i <= 800; i += 200) {
cfg_.rc_target_bitrate = i;
ResetModel();
// 60-40 bitrate allocation for 2 temporal layers.
cfg_.ts_target_bitrate[0] = 60 * cfg_.rc_target_bitrate / 100;
cfg_.ts_target_bitrate[1] = cfg_.rc_target_bitrate;
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
for (int j = 0; j < static_cast<int>(cfg_.ts_number_layers); ++j) {
ASSERT_GE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 0.85)
<< " The datarate for the file is lower than target by too much, "
"for layer: " << j;
ASSERT_LE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 1.15)
<< " The datarate for the file is greater than target by too much, "
"for layer: " << j;
}
}
}
// Check basic rate targeting for 3 temporal layers.
TEST_P(DatarateTestVP9Large, BasicRateTargeting3TemporalLayers) {
cfg_.rc_buf_initial_sz = 500;
cfg_.rc_buf_optimal_sz = 500;
cfg_.rc_buf_sz = 1000;
cfg_.rc_dropframe_thresh = 1;
cfg_.rc_min_quantizer = 0;
cfg_.rc_max_quantizer = 63;
cfg_.rc_end_usage = VPX_CBR;
cfg_.g_lag_in_frames = 0;
// 3 Temporal layers, no spatial layers: Framerate decimation (4, 2, 1).
cfg_.ss_number_layers = 1;
cfg_.ts_number_layers = 3;
cfg_.ts_rate_decimator[0] = 4;
cfg_.ts_rate_decimator[1] = 2;
cfg_.ts_rate_decimator[2] = 1;
::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
30, 1, 0, 200);
for (int i = 200; i <= 800; i += 200) {
cfg_.rc_target_bitrate = i;
ResetModel();
// 40-20-40 bitrate allocation for 3 temporal layers.
cfg_.ts_target_bitrate[0] = 40 * cfg_.rc_target_bitrate / 100;
cfg_.ts_target_bitrate[1] = 60 * cfg_.rc_target_bitrate / 100;
cfg_.ts_target_bitrate[2] = cfg_.rc_target_bitrate;
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
for (int j = 0; j < static_cast<int>(cfg_.ts_number_layers); ++j) {
// TODO(yaowu): Work out more stable rc control strategy and
// Adjust the thresholds to be tighter than .75.
ASSERT_GE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 0.75)
<< " The datarate for the file is lower than target by too much, "
"for layer: " << j;
// TODO(yaowu): Work out more stable rc control strategy and
// Adjust the thresholds to be tighter than 1.25.
ASSERT_LE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 1.25)
<< " The datarate for the file is greater than target by too much, "
"for layer: " << j;
}
}
}
// Check basic rate targeting for 3 temporal layers, with frame dropping.
// Only for one (low) bitrate with lower max_quantizer, and somewhat higher
// frame drop threshold, to force frame dropping.
TEST_P(DatarateTestVP9Large, BasicRateTargeting3TemporalLayersFrameDropping) {
cfg_.rc_buf_initial_sz = 500;
cfg_.rc_buf_optimal_sz = 500;
cfg_.rc_buf_sz = 1000;
// Set frame drop threshold and rc_max_quantizer to force some frame drops.
cfg_.rc_dropframe_thresh = 20;
cfg_.rc_max_quantizer = 45;
cfg_.rc_min_quantizer = 0;
cfg_.rc_end_usage = VPX_CBR;
cfg_.g_lag_in_frames = 0;
// 3 Temporal layers, no spatial layers: Framerate decimation (4, 2, 1).
cfg_.ss_number_layers = 1;
cfg_.ts_number_layers = 3;
cfg_.ts_rate_decimator[0] = 4;
cfg_.ts_rate_decimator[1] = 2;
cfg_.ts_rate_decimator[2] = 1;
::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
30, 1, 0, 200);
cfg_.rc_target_bitrate = 200;
ResetModel();
// 40-20-40 bitrate allocation for 3 temporal layers.
cfg_.ts_target_bitrate[0] = 40 * cfg_.rc_target_bitrate / 100;
cfg_.ts_target_bitrate[1] = 60 * cfg_.rc_target_bitrate / 100;
cfg_.ts_target_bitrate[2] = cfg_.rc_target_bitrate;
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
for (int j = 0; j < static_cast<int>(cfg_.ts_number_layers); ++j) {
ASSERT_GE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 0.85)
<< " The datarate for the file is lower than target by too much, "
"for layer: " << j;
ASSERT_LE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 1.15)
<< " The datarate for the file is greater than target by too much, "
"for layer: " << j;
// Expect some frame drops in this test: for this 200 frames test,
// expect at least 10% and not more than 60% drops.
ASSERT_GE(num_drops_, 20);
ASSERT_LE(num_drops_, 130);
}
}
#if CONFIG_VP9_TEMPORAL_DENOISING
// Check basic datarate targeting, for a single bitrate, when denoiser is on.
TEST_P(DatarateTestVP9Large, DenoiserLevels) {
cfg_.rc_buf_initial_sz = 500;
cfg_.rc_buf_optimal_sz = 500;
cfg_.rc_buf_sz = 1000;
cfg_.rc_dropframe_thresh = 1;
cfg_.rc_min_quantizer = 2;
cfg_.rc_max_quantizer = 56;
cfg_.rc_end_usage = VPX_CBR;
cfg_.g_lag_in_frames = 0;
::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
30, 1, 0, 140);
// For the temporal denoiser (#if CONFIG_VP9_TEMPORAL_DENOISING),
// there is only one denoiser mode: denoiserYonly(which is 1),
// but may add more modes in the future.
cfg_.rc_target_bitrate = 300;
ResetModel();
// Turn on the denoiser.
denoiser_on_ = 1;
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
ASSERT_GE(effective_datarate_[0], cfg_.rc_target_bitrate * 0.85)
<< " The datarate for the file is lower than target by too much!";
ASSERT_LE(effective_datarate_[0], cfg_.rc_target_bitrate * 1.15)
<< " The datarate for the file is greater than target by too much!";
}
#endif // CONFIG_VP9_TEMPORAL_DENOISING
VP8_INSTANTIATE_TEST_CASE(DatarateTestLarge, ALL_TEST_MODES);
VP9_INSTANTIATE_TEST_CASE(DatarateTestVP9Large,
::testing::Values(::libvpx_test::kOnePassGood,
::libvpx_test::kRealTime),
::testing::Range(2, 7));
} // namespace

View File

@ -126,11 +126,6 @@ class Encoder {
ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
}
void Control(int ctrl_id, struct vpx_svc_layer_id *arg) {
const vpx_codec_err_t res = vpx_codec_control_(&encoder_, ctrl_id, arg);
ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
}
#if CONFIG_VP8_ENCODER || CONFIG_VP9_ENCODER
void Control(int ctrl_id, vpx_active_map_t *arg) {
const vpx_codec_err_t res = vpx_codec_control_(&encoder_, ctrl_id, arg);

View File

@ -1,740 +0,0 @@
/*
* Copyright (c) 2013 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <string>
#include "third_party/googletest/src/include/gtest/gtest.h"
#include "test/codec_factory.h"
#include "test/decode_test_driver.h"
#include "test/i420_video_source.h"
#include "vp9/decoder/vp9_decoder.h"
#include "vpx/svc_context.h"
#include "vpx/vp8cx.h"
#include "vpx/vpx_encoder.h"
namespace {
using libvpx_test::CodecFactory;
using libvpx_test::Decoder;
using libvpx_test::DxDataIterator;
using libvpx_test::VP9CodecFactory;
class SvcTest : public ::testing::Test {
protected:
static const uint32_t kWidth = 352;
static const uint32_t kHeight = 288;
SvcTest()
: codec_iface_(0),
test_file_name_("hantro_collage_w352h288.yuv"),
codec_initialized_(false),
decoder_(0) {
memset(&svc_, 0, sizeof(svc_));
memset(&codec_, 0, sizeof(codec_));
memset(&codec_enc_, 0, sizeof(codec_enc_));
}
virtual ~SvcTest() {}
virtual void SetUp() {
svc_.log_level = SVC_LOG_DEBUG;
svc_.log_print = 0;
codec_iface_ = vpx_codec_vp9_cx();
const vpx_codec_err_t res =
vpx_codec_enc_config_default(codec_iface_, &codec_enc_, 0);
EXPECT_EQ(VPX_CODEC_OK, res);
codec_enc_.g_w = kWidth;
codec_enc_.g_h = kHeight;
codec_enc_.g_timebase.num = 1;
codec_enc_.g_timebase.den = 60;
codec_enc_.kf_min_dist = 100;
codec_enc_.kf_max_dist = 100;
vpx_codec_dec_cfg_t dec_cfg = vpx_codec_dec_cfg_t();
VP9CodecFactory codec_factory;
decoder_ = codec_factory.CreateDecoder(dec_cfg, 0);
}
virtual void TearDown() {
ReleaseEncoder();
delete(decoder_);
}
void InitializeEncoder() {
const vpx_codec_err_t res =
vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
EXPECT_EQ(VPX_CODEC_OK, res);
vpx_codec_control(&codec_, VP8E_SET_CPUUSED, 4); // Make the test faster
codec_initialized_ = true;
}
void ReleaseEncoder() {
vpx_svc_release(&svc_);
if (codec_initialized_) vpx_codec_destroy(&codec_);
codec_initialized_ = false;
}
void GetStatsData(std::string *const stats_buf) {
vpx_codec_iter_t iter = NULL;
const vpx_codec_cx_pkt_t *cx_pkt;
while ((cx_pkt = vpx_codec_get_cx_data(&codec_, &iter)) != NULL) {
if (cx_pkt->kind == VPX_CODEC_STATS_PKT) {
EXPECT_GT(cx_pkt->data.twopass_stats.sz, 0U);
ASSERT_TRUE(cx_pkt->data.twopass_stats.buf != NULL);
stats_buf->append(static_cast<char*>(cx_pkt->data.twopass_stats.buf),
cx_pkt->data.twopass_stats.sz);
}
}
}
void Pass1EncodeNFrames(const int n, const int layers,
std::string *const stats_buf) {
vpx_codec_err_t res;
ASSERT_GT(n, 0);
ASSERT_GT(layers, 0);
svc_.spatial_layers = layers;
codec_enc_.g_pass = VPX_RC_FIRST_PASS;
InitializeEncoder();
libvpx_test::I420VideoSource video(test_file_name_, kWidth, kHeight,
codec_enc_.g_timebase.den,
codec_enc_.g_timebase.num, 0, 30);
video.Begin();
for (int i = 0; i < n; ++i) {
res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
video.duration(), VPX_DL_GOOD_QUALITY);
ASSERT_EQ(VPX_CODEC_OK, res);
GetStatsData(stats_buf);
video.Next();
}
// Flush encoder and test EOS packet.
res = vpx_svc_encode(&svc_, &codec_, NULL, video.pts(),
video.duration(), VPX_DL_GOOD_QUALITY);
ASSERT_EQ(VPX_CODEC_OK, res);
GetStatsData(stats_buf);
ReleaseEncoder();
}
void StoreFrames(const size_t max_frame_received,
struct vpx_fixed_buf *const outputs,
size_t *const frame_received) {
vpx_codec_iter_t iter = NULL;
const vpx_codec_cx_pkt_t *cx_pkt;
while ((cx_pkt = vpx_codec_get_cx_data(&codec_, &iter)) != NULL) {
if (cx_pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
const size_t frame_size = cx_pkt->data.frame.sz;
EXPECT_GT(frame_size, 0U);
ASSERT_TRUE(cx_pkt->data.frame.buf != NULL);
ASSERT_LT(*frame_received, max_frame_received);
if (*frame_received == 0)
EXPECT_EQ(1, !!(cx_pkt->data.frame.flags & VPX_FRAME_IS_KEY));
outputs[*frame_received].buf = malloc(frame_size + 16);
ASSERT_TRUE(outputs[*frame_received].buf != NULL);
memcpy(outputs[*frame_received].buf, cx_pkt->data.frame.buf,
frame_size);
outputs[*frame_received].sz = frame_size;
++(*frame_received);
}
}
}
void Pass2EncodeNFrames(std::string *const stats_buf,
const int n, const int layers,
struct vpx_fixed_buf *const outputs) {
vpx_codec_err_t res;
size_t frame_received = 0;
ASSERT_TRUE(outputs != NULL);
ASSERT_GT(n, 0);
ASSERT_GT(layers, 0);
svc_.spatial_layers = layers;
codec_enc_.rc_target_bitrate = 500;
if (codec_enc_.g_pass == VPX_RC_LAST_PASS) {
ASSERT_TRUE(stats_buf != NULL);
ASSERT_GT(stats_buf->size(), 0U);
codec_enc_.rc_twopass_stats_in.buf = &(*stats_buf)[0];
codec_enc_.rc_twopass_stats_in.sz = stats_buf->size();
}
InitializeEncoder();
libvpx_test::I420VideoSource video(test_file_name_, kWidth, kHeight,
codec_enc_.g_timebase.den,
codec_enc_.g_timebase.num, 0, 30);
video.Begin();
for (int i = 0; i < n; ++i) {
res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
video.duration(), VPX_DL_GOOD_QUALITY);
ASSERT_EQ(VPX_CODEC_OK, res);
StoreFrames(n, outputs, &frame_received);
video.Next();
}
// Flush encoder.
res = vpx_svc_encode(&svc_, &codec_, NULL, 0,
video.duration(), VPX_DL_GOOD_QUALITY);
EXPECT_EQ(VPX_CODEC_OK, res);
StoreFrames(n, outputs, &frame_received);
EXPECT_EQ(frame_received, static_cast<size_t>(n));
ReleaseEncoder();
}
void DecodeNFrames(const struct vpx_fixed_buf *const inputs, const int n) {
int decoded_frames = 0;
int received_frames = 0;
ASSERT_TRUE(inputs != NULL);
ASSERT_GT(n, 0);
for (int i = 0; i < n; ++i) {
ASSERT_TRUE(inputs[i].buf != NULL);
ASSERT_GT(inputs[i].sz, 0U);
const vpx_codec_err_t res_dec =
decoder_->DecodeFrame(static_cast<const uint8_t *>(inputs[i].buf),
inputs[i].sz);
ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
++decoded_frames;
DxDataIterator dec_iter = decoder_->GetDxData();
while (dec_iter.Next() != NULL) {
++received_frames;
}
}
EXPECT_EQ(decoded_frames, n);
EXPECT_EQ(received_frames, n);
}
void DropEnhancementLayers(struct vpx_fixed_buf *const inputs,
const int num_super_frames,
const int remained_spatial_layers) {
ASSERT_TRUE(inputs != NULL);
ASSERT_GT(num_super_frames, 0);
ASSERT_GT(remained_spatial_layers, 0);
for (int i = 0; i < num_super_frames; ++i) {
uint32_t frame_sizes[8] = {0};
int frame_count = 0;
int frames_found = 0;
int frame;
ASSERT_TRUE(inputs[i].buf != NULL);
ASSERT_GT(inputs[i].sz, 0U);
vpx_codec_err_t res =
vp9_parse_superframe_index(static_cast<const uint8_t*>(inputs[i].buf),
inputs[i].sz, frame_sizes, &frame_count,
NULL, NULL);
ASSERT_EQ(VPX_CODEC_OK, res);
if (frame_count == 0) {
// There's no super frame but only a single frame.
ASSERT_EQ(1, remained_spatial_layers);
} else {
// Found a super frame.
uint8_t *frame_data = static_cast<uint8_t*>(inputs[i].buf);
uint8_t *frame_start = frame_data;
for (frame = 0; frame < frame_count; ++frame) {
// Looking for a visible frame.
if (frame_data[0] & 0x02) {
++frames_found;
if (frames_found == remained_spatial_layers)
break;
}
frame_data += frame_sizes[frame];
}
ASSERT_LT(frame, frame_count) << "Couldn't find a visible frame. "
<< "remained_spatial_layers: " << remained_spatial_layers
<< " super_frame: " << i;
if (frame == frame_count - 1)
continue;
frame_data += frame_sizes[frame];
// We need to add one more frame for multiple frame contexts.
uint8_t marker =
static_cast<const uint8_t*>(inputs[i].buf)[inputs[i].sz - 1];
const uint32_t mag = ((marker >> 3) & 0x3) + 1;
const size_t index_sz = 2 + mag * frame_count;
const size_t new_index_sz = 2 + mag * (frame + 1);
marker &= 0x0f8;
marker |= frame;
// Copy existing frame sizes.
memmove(frame_data + 1, frame_start + inputs[i].sz - index_sz + 1,
new_index_sz - 2);
// New marker.
frame_data[0] = marker;
frame_data += (mag * (frame + 1) + 1);
*frame_data++ = marker;
inputs[i].sz = frame_data - frame_start;
}
}
}
void FreeBitstreamBuffers(struct vpx_fixed_buf *const inputs, const int n) {
ASSERT_TRUE(inputs != NULL);
ASSERT_GT(n, 0);
for (int i = 0; i < n; ++i) {
free(inputs[i].buf);
inputs[i].buf = NULL;
inputs[i].sz = 0;
}
}
SvcContext svc_;
vpx_codec_ctx_t codec_;
struct vpx_codec_enc_cfg codec_enc_;
vpx_codec_iface_t *codec_iface_;
std::string test_file_name_;
bool codec_initialized_;
Decoder *decoder_;
};
TEST_F(SvcTest, SvcInit) {
// test missing parameters
vpx_codec_err_t res = vpx_svc_init(NULL, &codec_, codec_iface_, &codec_enc_);
EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
res = vpx_svc_init(&svc_, NULL, codec_iface_, &codec_enc_);
EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
res = vpx_svc_init(&svc_, &codec_, NULL, &codec_enc_);
EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
res = vpx_svc_init(&svc_, &codec_, codec_iface_, NULL);
EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
svc_.spatial_layers = 6; // too many layers
res = vpx_svc_init(&svc_, &codec_, codec_iface_, &codec_enc_);
EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
svc_.spatial_layers = 0; // use default layers
InitializeEncoder();
EXPECT_EQ(VPX_SS_DEFAULT_LAYERS, svc_.spatial_layers);
}
TEST_F(SvcTest, InitTwoLayers) {
svc_.spatial_layers = 2;
InitializeEncoder();
}
TEST_F(SvcTest, InvalidOptions) {
vpx_codec_err_t res = vpx_svc_set_options(&svc_, NULL);
EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
res = vpx_svc_set_options(&svc_, "not-an-option=1");
EXPECT_EQ(VPX_CODEC_OK, res);
res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
}
TEST_F(SvcTest, SetLayersOption) {
vpx_codec_err_t res = vpx_svc_set_options(&svc_, "spatial-layers=3");
EXPECT_EQ(VPX_CODEC_OK, res);
InitializeEncoder();
EXPECT_EQ(3, svc_.spatial_layers);
}
TEST_F(SvcTest, SetMultipleOptions) {
vpx_codec_err_t res =
vpx_svc_set_options(&svc_, "spatial-layers=2 scale-factors=1/3,2/3");
EXPECT_EQ(VPX_CODEC_OK, res);
InitializeEncoder();
EXPECT_EQ(2, svc_.spatial_layers);
}
TEST_F(SvcTest, SetScaleFactorsOption) {
svc_.spatial_layers = 2;
vpx_codec_err_t res =
vpx_svc_set_options(&svc_, "scale-factors=not-scale-factors");
EXPECT_EQ(VPX_CODEC_OK, res);
res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
res = vpx_svc_set_options(&svc_, "scale-factors=1/3, 3*3");
EXPECT_EQ(VPX_CODEC_OK, res);
res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
res = vpx_svc_set_options(&svc_, "scale-factors=1/3");
EXPECT_EQ(VPX_CODEC_OK, res);
res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
res = vpx_svc_set_options(&svc_, "scale-factors=1/3,2/3");
EXPECT_EQ(VPX_CODEC_OK, res);
InitializeEncoder();
}
TEST_F(SvcTest, SetQuantizersOption) {
svc_.spatial_layers = 2;
vpx_codec_err_t res = vpx_svc_set_options(&svc_, "max-quantizers=nothing");
EXPECT_EQ(VPX_CODEC_OK, res);
res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
res = vpx_svc_set_options(&svc_, "min-quantizers=nothing");
EXPECT_EQ(VPX_CODEC_OK, res);
res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
res = vpx_svc_set_options(&svc_, "max-quantizers=40");
EXPECT_EQ(VPX_CODEC_OK, res);
res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
res = vpx_svc_set_options(&svc_, "min-quantizers=40");
EXPECT_EQ(VPX_CODEC_OK, res);
res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
res = vpx_svc_set_options(&svc_, "max-quantizers=30,30 min-quantizers=40,40");
EXPECT_EQ(VPX_CODEC_OK, res);
res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
res = vpx_svc_set_options(&svc_, "max-quantizers=40,40 min-quantizers=30,30");
InitializeEncoder();
}
TEST_F(SvcTest, SetAutoAltRefOption) {
svc_.spatial_layers = 5;
vpx_codec_err_t res = vpx_svc_set_options(&svc_, "auto-alt-refs=none");
EXPECT_EQ(VPX_CODEC_OK, res);
res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
res = vpx_svc_set_options(&svc_, "auto-alt-refs=1,1,1,1,0");
EXPECT_EQ(VPX_CODEC_OK, res);
res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
vpx_svc_set_options(&svc_, "auto-alt-refs=0,1,1,1,0");
InitializeEncoder();
}
// Test that decoder can handle an SVC frame as the first frame in a sequence.
TEST_F(SvcTest, OnePassEncodeOneFrame) {
codec_enc_.g_pass = VPX_RC_ONE_PASS;
vpx_fixed_buf output = {0};
Pass2EncodeNFrames(NULL, 1, 2, &output);
DecodeNFrames(&output, 1);
FreeBitstreamBuffers(&output, 1);
}
TEST_F(SvcTest, OnePassEncodeThreeFrames) {
codec_enc_.g_pass = VPX_RC_ONE_PASS;
vpx_fixed_buf outputs[3];
memset(&outputs[0], 0, sizeof(outputs));
Pass2EncodeNFrames(NULL, 3, 2, &outputs[0]);
DecodeNFrames(&outputs[0], 3);
FreeBitstreamBuffers(&outputs[0], 3);
}
TEST_F(SvcTest, TwoPassEncode10Frames) {
// First pass encode
std::string stats_buf;
Pass1EncodeNFrames(10, 2, &stats_buf);
// Second pass encode
codec_enc_.g_pass = VPX_RC_LAST_PASS;
vpx_fixed_buf outputs[10];
memset(&outputs[0], 0, sizeof(outputs));
Pass2EncodeNFrames(&stats_buf, 10, 2, &outputs[0]);
DecodeNFrames(&outputs[0], 10);
FreeBitstreamBuffers(&outputs[0], 10);
}
TEST_F(SvcTest, TwoPassEncode20FramesWithAltRef) {
// First pass encode
std::string stats_buf;
Pass1EncodeNFrames(20, 2, &stats_buf);
// Second pass encode
codec_enc_.g_pass = VPX_RC_LAST_PASS;
vpx_svc_set_options(&svc_, "auto-alt-refs=1,1");
vpx_fixed_buf outputs[20];
memset(&outputs[0], 0, sizeof(outputs));
Pass2EncodeNFrames(&stats_buf, 20, 2, &outputs[0]);
DecodeNFrames(&outputs[0], 20);
FreeBitstreamBuffers(&outputs[0], 20);
}
TEST_F(SvcTest, TwoPassEncode2SpatialLayersDecodeBaseLayerOnly) {
// First pass encode
std::string stats_buf;
Pass1EncodeNFrames(10, 2, &stats_buf);
// Second pass encode
codec_enc_.g_pass = VPX_RC_LAST_PASS;
vpx_svc_set_options(&svc_, "auto-alt-refs=1,1");
vpx_fixed_buf outputs[10];
memset(&outputs[0], 0, sizeof(outputs));
Pass2EncodeNFrames(&stats_buf, 10, 2, &outputs[0]);
DropEnhancementLayers(&outputs[0], 10, 1);
DecodeNFrames(&outputs[0], 10);
FreeBitstreamBuffers(&outputs[0], 10);
}
TEST_F(SvcTest, TwoPassEncode5SpatialLayersDecode54321Layers) {
// First pass encode
std::string stats_buf;
Pass1EncodeNFrames(10, 5, &stats_buf);
// Second pass encode
codec_enc_.g_pass = VPX_RC_LAST_PASS;
vpx_svc_set_options(&svc_, "auto-alt-refs=0,1,1,1,0");
vpx_fixed_buf outputs[10];
memset(&outputs[0], 0, sizeof(outputs));
Pass2EncodeNFrames(&stats_buf, 10, 5, &outputs[0]);
DecodeNFrames(&outputs[0], 10);
DropEnhancementLayers(&outputs[0], 10, 4);
DecodeNFrames(&outputs[0], 10);
DropEnhancementLayers(&outputs[0], 10, 3);
DecodeNFrames(&outputs[0], 10);
DropEnhancementLayers(&outputs[0], 10, 2);
DecodeNFrames(&outputs[0], 10);
DropEnhancementLayers(&outputs[0], 10, 1);
DecodeNFrames(&outputs[0], 10);
FreeBitstreamBuffers(&outputs[0], 10);
}
TEST_F(SvcTest, TwoPassEncode2SNRLayers) {
// First pass encode
std::string stats_buf;
vpx_svc_set_options(&svc_, "scale-factors=1/1,1/1");
Pass1EncodeNFrames(20, 2, &stats_buf);
// Second pass encode
codec_enc_.g_pass = VPX_RC_LAST_PASS;
vpx_svc_set_options(&svc_,
"auto-alt-refs=1,1 scale-factors=1/1,1/1");
vpx_fixed_buf outputs[20];
memset(&outputs[0], 0, sizeof(outputs));
Pass2EncodeNFrames(&stats_buf, 20, 2, &outputs[0]);
DecodeNFrames(&outputs[0], 20);
FreeBitstreamBuffers(&outputs[0], 20);
}
TEST_F(SvcTest, TwoPassEncode3SNRLayersDecode321Layers) {
// First pass encode
std::string stats_buf;
vpx_svc_set_options(&svc_, "scale-factors=1/1,1/1,1/1");
Pass1EncodeNFrames(20, 3, &stats_buf);
// Second pass encode
codec_enc_.g_pass = VPX_RC_LAST_PASS;
vpx_svc_set_options(&svc_,
"auto-alt-refs=1,1,1 scale-factors=1/1,1/1,1/1");
vpx_fixed_buf outputs[20];
memset(&outputs[0], 0, sizeof(outputs));
Pass2EncodeNFrames(&stats_buf, 20, 3, &outputs[0]);
DecodeNFrames(&outputs[0], 20);
DropEnhancementLayers(&outputs[0], 20, 2);
DecodeNFrames(&outputs[0], 20);
DropEnhancementLayers(&outputs[0], 20, 1);
DecodeNFrames(&outputs[0], 20);
FreeBitstreamBuffers(&outputs[0], 20);
}
TEST_F(SvcTest, SetMultipleFrameContextsOption) {
svc_.spatial_layers = 5;
vpx_codec_err_t res =
vpx_svc_set_options(&svc_, "multi-frame-contexts=1");
EXPECT_EQ(VPX_CODEC_OK, res);
res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
svc_.spatial_layers = 2;
res = vpx_svc_set_options(&svc_, "multi-frame-contexts=1");
InitializeEncoder();
}
TEST_F(SvcTest, TwoPassEncode2SpatialLayersWithMultipleFrameContexts) {
// First pass encode
std::string stats_buf;
Pass1EncodeNFrames(10, 2, &stats_buf);
// Second pass encode
codec_enc_.g_pass = VPX_RC_LAST_PASS;
codec_enc_.g_error_resilient = 0;
vpx_svc_set_options(&svc_, "auto-alt-refs=1,1 multi-frame-contexts=1");
vpx_fixed_buf outputs[10];
memset(&outputs[0], 0, sizeof(outputs));
Pass2EncodeNFrames(&stats_buf, 10, 2, &outputs[0]);
DecodeNFrames(&outputs[0], 10);
FreeBitstreamBuffers(&outputs[0], 10);
}
TEST_F(SvcTest,
TwoPassEncode2SpatialLayersWithMultipleFrameContextsDecodeBaselayer) {
// First pass encode
std::string stats_buf;
Pass1EncodeNFrames(10, 2, &stats_buf);
// Second pass encode
codec_enc_.g_pass = VPX_RC_LAST_PASS;
codec_enc_.g_error_resilient = 0;
vpx_svc_set_options(&svc_, "auto-alt-refs=1,1 multi-frame-contexts=1");
vpx_fixed_buf outputs[10];
memset(&outputs[0], 0, sizeof(outputs));
Pass2EncodeNFrames(&stats_buf, 10, 2, &outputs[0]);
DropEnhancementLayers(&outputs[0], 10, 1);
DecodeNFrames(&outputs[0], 10);
FreeBitstreamBuffers(&outputs[0], 10);
}
TEST_F(SvcTest, TwoPassEncode2SNRLayersWithMultipleFrameContexts) {
// First pass encode
std::string stats_buf;
vpx_svc_set_options(&svc_, "scale-factors=1/1,1/1");
Pass1EncodeNFrames(10, 2, &stats_buf);
// Second pass encode
codec_enc_.g_pass = VPX_RC_LAST_PASS;
codec_enc_.g_error_resilient = 0;
vpx_svc_set_options(&svc_, "auto-alt-refs=1,1 scale-factors=1/1,1/1 "
"multi-frame-contexts=1");
vpx_fixed_buf outputs[10];
memset(&outputs[0], 0, sizeof(outputs));
Pass2EncodeNFrames(&stats_buf, 10, 2, &outputs[0]);
DecodeNFrames(&outputs[0], 10);
FreeBitstreamBuffers(&outputs[0], 10);
}
TEST_F(SvcTest,
TwoPassEncode3SNRLayersWithMultipleFrameContextsDecode321Layer) {
// First pass encode
std::string stats_buf;
vpx_svc_set_options(&svc_, "scale-factors=1/1,1/1,1/1");
Pass1EncodeNFrames(10, 3, &stats_buf);
// Second pass encode
codec_enc_.g_pass = VPX_RC_LAST_PASS;
codec_enc_.g_error_resilient = 0;
vpx_svc_set_options(&svc_, "auto-alt-refs=1,1,1 scale-factors=1/1,1/1,1/1 "
"multi-frame-contexts=1");
vpx_fixed_buf outputs[10];
memset(&outputs[0], 0, sizeof(outputs));
Pass2EncodeNFrames(&stats_buf, 10, 3, &outputs[0]);
DecodeNFrames(&outputs[0], 10);
DropEnhancementLayers(&outputs[0], 10, 2);
DecodeNFrames(&outputs[0], 10);
DropEnhancementLayers(&outputs[0], 10, 1);
DecodeNFrames(&outputs[0], 10);
FreeBitstreamBuffers(&outputs[0], 10);
}
TEST_F(SvcTest, TwoPassEncode2TemporalLayers) {
// First pass encode
std::string stats_buf;
vpx_svc_set_options(&svc_, "scale-factors=1/1");
svc_.temporal_layers = 2;
Pass1EncodeNFrames(10, 1, &stats_buf);
// Second pass encode
codec_enc_.g_pass = VPX_RC_LAST_PASS;
svc_.temporal_layers = 2;
vpx_svc_set_options(&svc_, "auto-alt-refs=1 scale-factors=1/1");
vpx_fixed_buf outputs[10];
memset(&outputs[0], 0, sizeof(outputs));
Pass2EncodeNFrames(&stats_buf, 10, 1, &outputs[0]);
DecodeNFrames(&outputs[0], 10);
FreeBitstreamBuffers(&outputs[0], 10);
}
TEST_F(SvcTest, TwoPassEncode2TemporalLayersWithMultipleFrameContexts) {
// First pass encode
std::string stats_buf;
vpx_svc_set_options(&svc_, "scale-factors=1/1");
svc_.temporal_layers = 2;
Pass1EncodeNFrames(10, 1, &stats_buf);
// Second pass encode
codec_enc_.g_pass = VPX_RC_LAST_PASS;
svc_.temporal_layers = 2;
codec_enc_.g_error_resilient = 0;
vpx_svc_set_options(&svc_, "auto-alt-refs=1 scale-factors=1/1 "
"multi-frame-contexts=1");
vpx_fixed_buf outputs[10];
memset(&outputs[0], 0, sizeof(outputs));
Pass2EncodeNFrames(&stats_buf, 10, 1, &outputs[0]);
DecodeNFrames(&outputs[0], 10);
FreeBitstreamBuffers(&outputs[0], 10);
}
TEST_F(SvcTest, TwoPassEncode2TemporalLayersDecodeBaseLayer) {
// First pass encode
std::string stats_buf;
vpx_svc_set_options(&svc_, "scale-factors=1/1");
svc_.temporal_layers = 2;
Pass1EncodeNFrames(10, 1, &stats_buf);
// Second pass encode
codec_enc_.g_pass = VPX_RC_LAST_PASS;
svc_.temporal_layers = 2;
vpx_svc_set_options(&svc_, "auto-alt-refs=1 scale-factors=1/1");
vpx_fixed_buf outputs[10];
memset(&outputs[0], 0, sizeof(outputs));
Pass2EncodeNFrames(&stats_buf, 10, 1, &outputs[0]);
vpx_fixed_buf base_layer[5];
for (int i = 0; i < 5; ++i)
base_layer[i] = outputs[i * 2];
DecodeNFrames(&base_layer[0], 5);
FreeBitstreamBuffers(&outputs[0], 10);
}
TEST_F(SvcTest,
TwoPassEncode2TemporalLayersWithMultipleFrameContextsDecodeBaseLayer) {
// First pass encode
std::string stats_buf;
vpx_svc_set_options(&svc_, "scale-factors=1/1");
svc_.temporal_layers = 2;
Pass1EncodeNFrames(10, 1, &stats_buf);
// Second pass encode
codec_enc_.g_pass = VPX_RC_LAST_PASS;
svc_.temporal_layers = 2;
codec_enc_.g_error_resilient = 0;
vpx_svc_set_options(&svc_, "auto-alt-refs=1 scale-factors=1/1 "
"multi-frame-contexts=1");
vpx_fixed_buf outputs[10];
memset(&outputs[0], 0, sizeof(outputs));
Pass2EncodeNFrames(&stats_buf, 10, 1, &outputs[0]);
vpx_fixed_buf base_layer[5];
for (int i = 0; i < 5; ++i)
base_layer[i] = outputs[i * 2];
DecodeNFrames(&base_layer[0], 5);
FreeBitstreamBuffers(&outputs[0], 10);
}
} // namespace

View File

@ -140,10 +140,6 @@ LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += quantize_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += error_block_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP9) += vp9_intrapred_test.cc
ifeq ($(CONFIG_VP9_ENCODER),yes)
LIBVPX_TEST_SRCS-$(CONFIG_SPATIAL_SVC) += svc_test.cc
endif
ifeq ($(CONFIG_VP9_ENCODER)$(CONFIG_VP9_TEMPORAL_DENOISING),yesyes)
LIBVPX_TEST_SRCS-$(HAVE_SSE2) += vp9_denoiser_sse2_test.cc
endif

View File

@ -1,72 +0,0 @@
#!/bin/sh
##
## Copyright (c) 2014 The WebM project authors. All Rights Reserved.
##
## Use of this source code is governed by a BSD-style license
## that can be found in the LICENSE file in the root of the source
## tree. An additional intellectual property rights grant can be found
## in the file PATENTS. All contributing project authors may
## be found in the AUTHORS file in the root of the source tree.
##
## This file tests the libvpx vp9_spatial_svc_encoder example. To add new
## tests to to this file, do the following:
## 1. Write a shell function (this is your test).
## 2. Add the function to vp9_spatial_svc_tests (on a new line).
##
. $(dirname $0)/tools_common.sh
# Environment check: $YUV_RAW_INPUT is required.
vp9_spatial_svc_encoder_verify_environment() {
if [ ! -e "${YUV_RAW_INPUT}" ]; then
echo "Libvpx test data must exist in LIBVPX_TEST_DATA_PATH."
return 1
fi
}
# Runs vp9_spatial_svc_encoder. $1 is the test name.
vp9_spatial_svc_encoder() {
local readonly \
encoder="${LIBVPX_BIN_PATH}/vp9_spatial_svc_encoder${VPX_TEST_EXE_SUFFIX}"
local readonly test_name="$1"
local readonly \
output_file="${VPX_TEST_OUTPUT_DIR}/vp9_ssvc_encoder${test_name}.ivf"
local readonly frames_to_encode=10
local readonly max_kf=9999
shift
if [ ! -x "${encoder}" ]; then
elog "${encoder} does not exist or is not executable."
return 1
fi
eval "${VPX_TEST_PREFIX}" "${encoder}" -w "${YUV_RAW_INPUT_WIDTH}" \
-h "${YUV_RAW_INPUT_HEIGHT}" -k "${max_kf}" -f "${frames_to_encode}" \
"$@" "${YUV_RAW_INPUT}" "${output_file}" ${devnull}
[ -e "${output_file}" ] || return 1
}
# Each test is run with layer count 1-$vp9_ssvc_test_layers.
vp9_ssvc_test_layers=5
vp9_spatial_svc() {
if [ "$(vp9_encode_available)" = "yes" ]; then
local readonly test_name="vp9_spatial_svc"
for layers in $(seq 1 ${vp9_ssvc_test_layers}); do
vp9_spatial_svc_encoder "${test_name}" -l ${layers}
done
fi
}
readonly vp9_spatial_svc_tests="DISABLED_vp9_spatial_svc_mode_i
DISABLED_vp9_spatial_svc_mode_altip
DISABLED_vp9_spatial_svc_mode_ip
DISABLED_vp9_spatial_svc_mode_gf
vp9_spatial_svc"
if [ "$(vpx_config_option_enabled CONFIG_SPATIAL_SVC)" = "yes" ]; then
run_tests \
vp9_spatial_svc_encoder_verify_environment \
"${vp9_spatial_svc_tests}"
fi

View File

@ -1,290 +0,0 @@
#!/bin/sh
##
## Copyright (c) 2014 The WebM project authors. All Rights Reserved.
##
## Use of this source code is governed by a BSD-style license
## that can be found in the LICENSE file in the root of the source
## tree. An additional intellectual property rights grant can be found
## in the file PATENTS. All contributing project authors may
## be found in the AUTHORS file in the root of the source tree.
##
## This file tests the libvpx vpx_temporal_svc_encoder example. To add new
## tests to this file, do the following:
## 1. Write a shell function (this is your test).
## 2. Add the function to vpx_tsvc_encoder_tests (on a new line).
##
. $(dirname $0)/tools_common.sh
# Environment check: $YUV_RAW_INPUT is required.
vpx_tsvc_encoder_verify_environment() {
if [ ! -e "${YUV_RAW_INPUT}" ]; then
echo "Libvpx test data must exist in LIBVPX_TEST_DATA_PATH."
return 1
fi
if [ "$(vpx_config_option_enabled CONFIG_TEMPORAL_DENOISING)" != "yes" ]; then
elog "Warning: Temporal denoising is disabled! Spatial denoising will be " \
"used instead, which is probably not what you want for this test."
fi
}
# Runs vpx_temporal_svc_encoder using the codec specified by $1 and output file
# name by $2. Additional positional parameters are passed directly to
# vpx_temporal_svc_encoder.
vpx_tsvc_encoder() {
local encoder="${LIBVPX_BIN_PATH}/vpx_temporal_svc_encoder"
encoder="${encoder}${VPX_TEST_EXE_SUFFIX}"
local codec="$1"
local output_file_base="$2"
local output_file="${VPX_TEST_OUTPUT_DIR}/${output_file_base}"
local timebase_num="1"
local timebase_den="1000"
local speed="6"
local frame_drop_thresh="30"
shift 2
if [ ! -x "${encoder}" ]; then
elog "${encoder} does not exist or is not executable."
return 1
fi
eval "${VPX_TEST_PREFIX}" "${encoder}" "${YUV_RAW_INPUT}" "${output_file}" \
"${codec}" "${YUV_RAW_INPUT_WIDTH}" "${YUV_RAW_INPUT_HEIGHT}" \
"${timebase_num}" "${timebase_den}" "${speed}" "${frame_drop_thresh}" \
"$@" \
${devnull}
}
# Confirms that all expected output files exist given the output file name
# passed to vpx_temporal_svc_encoder.
# The file name passed to vpx_temporal_svc_encoder is joined with the stream
# number and the extension .ivf to produce per stream output files. Here $1 is
# file name, and $2 is expected number of files.
files_exist() {
local file_name="${VPX_TEST_OUTPUT_DIR}/$1"
local num_files="$(($2 - 1))"
for stream_num in $(seq 0 ${num_files}); do
[ -e "${file_name}_${stream_num}.ivf" ] || return 1
done
}
# Run vpx_temporal_svc_encoder in all supported modes for vp8 and vp9.
vpx_tsvc_encoder_vp8_mode_0() {
if [ "$(vp8_encode_available)" = "yes" ]; then
vpx_tsvc_encoder vp8 "${FUNCNAME}" 0 200 || return 1
# Mode 0 produces 1 stream
files_exist "${FUNCNAME}" 1 || return 1
fi
}
vpx_tsvc_encoder_vp8_mode_1() {
if [ "$(vp8_encode_available)" = "yes" ]; then
vpx_tsvc_encoder vp8 "${FUNCNAME}" 1 200 400 || return 1
# Mode 1 produces 2 streams
files_exist "${FUNCNAME}" 2 || return 1
fi
}
vpx_tsvc_encoder_vp8_mode_2() {
if [ "$(vp8_encode_available)" = "yes" ]; then
vpx_tsvc_encoder vp8 "${FUNCNAME}" 2 200 400 || return 1
# Mode 2 produces 2 streams
files_exist "${FUNCNAME}" 2 || return 1
fi
}
vpx_tsvc_encoder_vp8_mode_3() {
if [ "$(vp8_encode_available)" = "yes" ]; then
vpx_tsvc_encoder vp8 "${FUNCNAME}" 3 200 400 600 || return 1
# Mode 3 produces 3 streams
files_exist "${FUNCNAME}" 3 || return 1
fi
}
vpx_tsvc_encoder_vp8_mode_4() {
if [ "$(vp8_encode_available)" = "yes" ]; then
vpx_tsvc_encoder vp8 "${FUNCNAME}" 4 200 400 600 || return 1
# Mode 4 produces 3 streams
files_exist "${FUNCNAME}" 3 || return 1
fi
}
vpx_tsvc_encoder_vp8_mode_5() {
if [ "$(vp8_encode_available)" = "yes" ]; then
vpx_tsvc_encoder vp8 "${FUNCNAME}" 5 200 400 600 || return 1
# Mode 5 produces 3 streams
files_exist "${FUNCNAME}" 3 || return 1
fi
}
vpx_tsvc_encoder_vp8_mode_6() {
if [ "$(vp8_encode_available)" = "yes" ]; then
vpx_tsvc_encoder vp8 "${FUNCNAME}" 6 200 400 600 || return 1
# Mode 6 produces 3 streams
files_exist "${FUNCNAME}" 3 || return 1
fi
}
vpx_tsvc_encoder_vp8_mode_7() {
if [ "$(vp8_encode_available)" = "yes" ]; then
vpx_tsvc_encoder vp8 "${FUNCNAME}" 7 200 400 600 800 1000 || return 1
# Mode 7 produces 5 streams
files_exist "${FUNCNAME}" 5 || return 1
fi
}
vpx_tsvc_encoder_vp8_mode_8() {
if [ "$(vp8_encode_available)" = "yes" ]; then
vpx_tsvc_encoder vp8 "${FUNCNAME}" 8 200 400 || return 1
# Mode 8 produces 2 streams
files_exist "${FUNCNAME}" 2 || return 1
fi
}
vpx_tsvc_encoder_vp8_mode_9() {
if [ "$(vp8_encode_available)" = "yes" ]; then
vpx_tsvc_encoder vp8 "${FUNCNAME}" 9 200 400 600 || return 1
# Mode 9 produces 3 streams
files_exist "${FUNCNAME}" 3 || return 1
fi
}
vpx_tsvc_encoder_vp8_mode_10() {
if [ "$(vp8_encode_available)" = "yes" ]; then
vpx_tsvc_encoder vp8 "${FUNCNAME}" 10 200 400 600 || return 1
# Mode 10 produces 3 streams
files_exist "${FUNCNAME}" 3 || return 1
fi
}
vpx_tsvc_encoder_vp8_mode_11() {
if [ "$(vp8_encode_available)" = "yes" ]; then
vpx_tsvc_encoder vp8 "${FUNCNAME}" 11 200 400 600 || return 1
# Mode 11 produces 3 streams
files_exist "${FUNCNAME}" 3 || return 1
fi
}
vpx_tsvc_encoder_vp9_mode_0() {
if [ "$(vp9_encode_available)" = "yes" ]; then
vpx_tsvc_encoder vp9 "${FUNCNAME}" 0 200 || return 1
# Mode 0 produces 1 stream
files_exist "${FUNCNAME}" 1 || return 1
fi
}
vpx_tsvc_encoder_vp9_mode_1() {
if [ "$(vp9_encode_available)" = "yes" ]; then
vpx_tsvc_encoder vp9 "${FUNCNAME}" 1 200 400 || return 1
# Mode 1 produces 2 streams
files_exist "${FUNCNAME}" 2 || return 1
fi
}
vpx_tsvc_encoder_vp9_mode_2() {
if [ "$(vp9_encode_available)" = "yes" ]; then
vpx_tsvc_encoder vp9 "${FUNCNAME}" 2 200 400 || return 1
# Mode 2 produces 2 streams
files_exist "${FUNCNAME}" 2 || return 1
fi
}
vpx_tsvc_encoder_vp9_mode_3() {
if [ "$(vp9_encode_available)" = "yes" ]; then
vpx_tsvc_encoder vp9 "${FUNCNAME}" 3 200 400 600 || return 1
# Mode 3 produces 3 streams
files_exist "${FUNCNAME}" 3 || return 1
fi
}
vpx_tsvc_encoder_vp9_mode_4() {
if [ "$(vp9_encode_available)" = "yes" ]; then
vpx_tsvc_encoder vp9 "${FUNCNAME}" 4 200 400 600 || return 1
# Mode 4 produces 3 streams
files_exist "${FUNCNAME}" 3 || return 1
fi
}
vpx_tsvc_encoder_vp9_mode_5() {
if [ "$(vp9_encode_available)" = "yes" ]; then
vpx_tsvc_encoder vp9 "${FUNCNAME}" 5 200 400 600 || return 1
# Mode 5 produces 3 streams
files_exist "${FUNCNAME}" 3 || return 1
fi
}
vpx_tsvc_encoder_vp9_mode_6() {
if [ "$(vp9_encode_available)" = "yes" ]; then
vpx_tsvc_encoder vp9 "${FUNCNAME}" 6 200 400 600 || return 1
# Mode 6 produces 3 streams
files_exist "${FUNCNAME}" 3 || return 1
fi
}
vpx_tsvc_encoder_vp9_mode_7() {
if [ "$(vp9_encode_available)" = "yes" ]; then
vpx_tsvc_encoder vp9 "${FUNCNAME}" 7 200 400 600 800 1000 || return 1
# Mode 7 produces 5 streams
files_exist "${FUNCNAME}" 5 || return 1
fi
}
vpx_tsvc_encoder_vp9_mode_8() {
if [ "$(vp9_encode_available)" = "yes" ]; then
vpx_tsvc_encoder vp9 "${FUNCNAME}" 8 200 400 || return 1
# Mode 8 produces 2 streams
files_exist "${FUNCNAME}" 2 || return 1
fi
}
vpx_tsvc_encoder_vp9_mode_9() {
if [ "$(vp9_encode_available)" = "yes" ]; then
vpx_tsvc_encoder vp9 "${FUNCNAME}" 9 200 400 600 || return 1
# Mode 9 produces 3 streams
files_exist "${FUNCNAME}" 3 || return 1
fi
}
vpx_tsvc_encoder_vp9_mode_10() {
if [ "$(vp9_encode_available)" = "yes" ]; then
vpx_tsvc_encoder vp9 "${FUNCNAME}" 10 200 400 600 || return 1
# Mode 10 produces 3 streams
files_exist "${FUNCNAME}" 3 || return 1
fi
}
vpx_tsvc_encoder_vp9_mode_11() {
if [ "$(vp9_encode_available)" = "yes" ]; then
vpx_tsvc_encoder vp9 "${FUNCNAME}" 11 200 400 600 || return 1
# Mode 11 produces 3 streams
files_exist "${FUNCNAME}" 3 || return 1
fi
}
vpx_tsvc_encoder_tests="vpx_tsvc_encoder_vp8_mode_0
vpx_tsvc_encoder_vp8_mode_1
vpx_tsvc_encoder_vp8_mode_2
vpx_tsvc_encoder_vp8_mode_3
vpx_tsvc_encoder_vp8_mode_4
vpx_tsvc_encoder_vp8_mode_5
vpx_tsvc_encoder_vp8_mode_6
vpx_tsvc_encoder_vp8_mode_7
vpx_tsvc_encoder_vp8_mode_8
vpx_tsvc_encoder_vp8_mode_9
vpx_tsvc_encoder_vp8_mode_10
vpx_tsvc_encoder_vp8_mode_11
vpx_tsvc_encoder_vp9_mode_0
vpx_tsvc_encoder_vp9_mode_1
vpx_tsvc_encoder_vp9_mode_2
vpx_tsvc_encoder_vp9_mode_3
vpx_tsvc_encoder_vp9_mode_4
vpx_tsvc_encoder_vp9_mode_5
vpx_tsvc_encoder_vp9_mode_6
vpx_tsvc_encoder_vp9_mode_7
vpx_tsvc_encoder_vp9_mode_8
vpx_tsvc_encoder_vp9_mode_9
vpx_tsvc_encoder_vp9_mode_10
vpx_tsvc_encoder_vp9_mode_11"
run_tests vpx_tsvc_encoder_verify_environment "${vpx_tsvc_encoder_tests}"

View File

@ -183,10 +183,8 @@ void vp9_cyclic_refresh_setup(VP9_COMP *const cpi) {
struct segmentation *const seg = &cm->seg;
unsigned char *const seg_map = cpi->segmentation_map;
const int apply_cyclic_refresh = apply_cyclic_refresh_bitrate(cm, rc);
// Don't apply refresh on key frame or enhancement layer frames.
if (!apply_cyclic_refresh ||
(cm->frame_type == KEY_FRAME) ||
(cpi->svc.temporal_layer_id > 0)) {
// Don't apply refresh on key frame.
if (!apply_cyclic_refresh || cm->frame_type == KEY_FRAME) {
// Set segmentation map to 0 and disable.
vpx_memset(seg_map, 0, cm->mi_rows * cm->mi_cols);
vp9_disable_segmentation(&cm->seg);

View File

@ -2159,18 +2159,6 @@ static void write_frame_size_with_refs(VP9_COMP *cpi,
found = cm->width == cfg->y_crop_width &&
cm->height == cfg->y_crop_height;
// Set "found" to 0 for temporal svc and for spatial svc key frame
if (cpi->use_svc &&
((cpi->svc.number_temporal_layers > 1 &&
cpi->oxcf.rc_mode == VPX_CBR) ||
(cpi->svc.number_spatial_layers > 1 &&
cpi->svc.layer_context[cpi->svc.spatial_layer_id].is_key_frame) ||
(is_two_pass_svc(cpi) &&
cpi->svc.encode_empty_frame_state == ENCODING &&
cpi->svc.layer_context[0].frames_from_key_frame <
cpi->svc.number_temporal_layers + 1))) {
found = 0;
}
vp9_wb_write_bit(wb, found);
if (found) {
break;
@ -2252,14 +2240,6 @@ static void write_uncompressed_header(VP9_COMP *cpi,
write_bitdepth_colorspace_sampling(cm, wb);
write_frame_size(cm, wb);
} else {
// In spatial svc if it's not error_resilient_mode then we need to code all
// visible frames as invisible. But we need to keep the show_frame flag so
// that the publisher could know whether it is supposed to be visible.
// So we will code the show_frame flag as it is. Then code the intra_only
// bit here. This will make the bitstream incompatible. In the player we
// will change to show_frame flag to 0, then add an one byte frame with
// show_existing_frame flag which tells the decoder which frame we want to
// show.
if (!cm->show_frame)
vp9_wb_write_bit(wb, cm->intra_only);

View File

@ -24,12 +24,7 @@ void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, size_t *size);
static INLINE int vp9_preserve_existing_gf(VP9_COMP *cpi) {
return !cpi->multi_arf_allowed && cpi->refresh_golden_frame &&
cpi->rc.is_src_frame_alt_ref &&
(!cpi->use_svc || // Add spatial svc base layer case here
(is_two_pass_svc(cpi) &&
cpi->svc.spatial_layer_id == 0 &&
cpi->svc.layer_context[0].gold_ref_idx >=0 &&
cpi->oxcf.ss_enable_auto_arf[0]));
cpi->rc.is_src_frame_alt_ref;
}
#ifdef __cplusplus

View File

@ -48,7 +48,6 @@
#endif
#include "vp9/encoder/vp9_temporal_filter.h"
#include "vp9/encoder/vp9_resize.h"
#include "vp9/encoder/vp9_svc_layercontext.h"
void vp9_coef_tree_initialize();
@ -124,13 +123,11 @@ static void setup_frame(VP9_COMP *cpi) {
if (frame_is_intra_only(cm) || cm->error_resilient_mode) {
vp9_setup_past_independence(cm);
} else {
if (!cpi->use_svc)
cm->frame_context_idx = cpi->refresh_alt_ref_frame;
cm->frame_context_idx = cpi->refresh_alt_ref_frame;
}
if (cm->frame_type == KEY_FRAME) {
if (!is_two_pass_svc(cpi))
cpi->refresh_golden_frame = 1;
cpi->refresh_golden_frame = 1;
cpi->refresh_alt_ref_frame = 1;
vp9_zero(cpi->interp_filter_selected);
} else {
@ -158,7 +155,6 @@ void vp9_initialize_enc() {
static void dealloc_compressor_data(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
int i;
// Delete sementation map
vpx_free(cpi->segmentation_map);
@ -218,26 +214,10 @@ static void dealloc_compressor_data(VP9_COMP *cpi) {
vp9_free_pc_tree(cpi);
for (i = 0; i < cpi->svc.number_spatial_layers; ++i) {
LAYER_CONTEXT *const lc = &cpi->svc.layer_context[i];
vpx_free(lc->rc_twopass_stats_in.buf);
lc->rc_twopass_stats_in.buf = NULL;
lc->rc_twopass_stats_in.sz = 0;
}
if (cpi->source_diff_var != NULL) {
vpx_free(cpi->source_diff_var);
cpi->source_diff_var = NULL;
}
for (i = 0; i < MAX_LAG_BUFFERS; ++i) {
vp9_free_frame_buffer(&cpi->svc.scaled_frames[i]);
}
vpx_memset(&cpi->svc.scaled_frames[0], 0,
MAX_LAG_BUFFERS * sizeof(cpi->svc.scaled_frames[0]));
vp9_free_frame_buffer(&cpi->svc.empty_frame.img);
vpx_memset(&cpi->svc.empty_frame, 0, sizeof(cpi->svc.empty_frame));
}
static void save_coding_context(VP9_COMP *cpi) {
@ -550,18 +530,6 @@ static void update_frame_size(VP9_COMP *cpi) {
vp9_set_mb_mi(cm, cm->width, cm->height);
vp9_init_context_buffers(cm);
init_macroblockd(cm, xd);
if (is_two_pass_svc(cpi)) {
if (vp9_realloc_frame_buffer(&cpi->alt_ref_buffer,
cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
#if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
VP9_ENC_BORDER_IN_PIXELS, NULL, NULL, NULL))
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
"Failed to reallocate alt_ref_buffer");
}
}
void vp9_new_framerate(VP9_COMP *cpi, double framerate) {
@ -632,18 +600,6 @@ static void init_config(struct VP9_COMP *cpi, VP9EncoderConfig *oxcf) {
cm->height = oxcf->height;
vp9_alloc_compressor_data(cpi);
// Spatial scalability.
cpi->svc.number_spatial_layers = oxcf->ss_number_layers;
// Temporal scalability.
cpi->svc.number_temporal_layers = oxcf->ts_number_layers;
if ((cpi->svc.number_temporal_layers > 1 && cpi->oxcf.rc_mode == VPX_CBR) ||
((cpi->svc.number_temporal_layers > 1 ||
cpi->svc.number_spatial_layers > 1) &&
cpi->oxcf.pass == 2)) {
vp9_init_layer_context(cpi);
}
// change includes all joint functionality
vp9_change_config(cpi, oxcf);
@ -1556,15 +1512,6 @@ void vp9_change_config(struct VP9_COMP *cpi, const VP9EncoderConfig *oxcf) {
}
update_frame_size(cpi);
if ((cpi->svc.number_temporal_layers > 1 &&
cpi->oxcf.rc_mode == VPX_CBR) ||
((cpi->svc.number_temporal_layers > 1 ||
cpi->svc.number_spatial_layers > 1) &&
cpi->oxcf.pass == 2)) {
vp9_update_layer_context_change_config(cpi,
(int)cpi->oxcf.target_bandwidth);
}
cpi->alt_ref_source = NULL;
rc->is_src_frame_alt_ref = 0;
@ -1656,8 +1603,6 @@ VP9_COMP *vp9_create_compressor(VP9EncoderConfig *oxcf) {
cm->error.setjmp = 1;
cpi->use_svc = 0;
init_config(cpi, oxcf);
vp9_rc_init(&cpi->oxcf, oxcf->pass, &cpi->rc);
@ -1803,63 +1748,24 @@ VP9_COMP *vp9_create_compressor(VP9EncoderConfig *oxcf) {
const size_t packet_sz = sizeof(FIRSTPASS_STATS);
const int packets = (int)(oxcf->two_pass_stats_in.sz / packet_sz);
if (cpi->svc.number_spatial_layers > 1
|| cpi->svc.number_temporal_layers > 1) {
FIRSTPASS_STATS *const stats = oxcf->two_pass_stats_in.buf;
FIRSTPASS_STATS *stats_copy[VPX_SS_MAX_LAYERS] = {0};
int i;
for (i = 0; i < oxcf->ss_number_layers; ++i) {
FIRSTPASS_STATS *const last_packet_for_layer =
&stats[packets - oxcf->ss_number_layers + i];
const int layer_id = (int)last_packet_for_layer->spatial_layer_id;
const int packets_in_layer = (int)last_packet_for_layer->count + 1;
if (layer_id >= 0 && layer_id < oxcf->ss_number_layers) {
LAYER_CONTEXT *const lc = &cpi->svc.layer_context[layer_id];
vpx_free(lc->rc_twopass_stats_in.buf);
lc->rc_twopass_stats_in.sz = packets_in_layer * packet_sz;
CHECK_MEM_ERROR(cm, lc->rc_twopass_stats_in.buf,
vpx_malloc(lc->rc_twopass_stats_in.sz));
lc->twopass.stats_in_start = lc->rc_twopass_stats_in.buf;
lc->twopass.stats_in = lc->twopass.stats_in_start;
lc->twopass.stats_in_end = lc->twopass.stats_in_start
+ packets_in_layer - 1;
stats_copy[layer_id] = lc->rc_twopass_stats_in.buf;
}
}
for (i = 0; i < packets; ++i) {
const int layer_id = (int)stats[i].spatial_layer_id;
if (layer_id >= 0 && layer_id < oxcf->ss_number_layers
&& stats_copy[layer_id] != NULL) {
*stats_copy[layer_id] = stats[i];
++stats_copy[layer_id];
}
}
vp9_init_second_pass_spatial_svc(cpi);
} else {
#if CONFIG_FP_MB_STATS
if (cpi->use_fp_mb_stats) {
const size_t psz = cpi->common.MBs * sizeof(uint8_t);
const int ps = (int)(oxcf->firstpass_mb_stats_in.sz / psz);
if (cpi->use_fp_mb_stats) {
const size_t psz = cpi->common.MBs * sizeof(uint8_t);
const int ps = (int)(oxcf->firstpass_mb_stats_in.sz / psz);
cpi->twopass.firstpass_mb_stats.mb_stats_start =
oxcf->firstpass_mb_stats_in.buf;
cpi->twopass.firstpass_mb_stats.mb_stats_end =
cpi->twopass.firstpass_mb_stats.mb_stats_start +
(ps - 1) * cpi->common.MBs * sizeof(uint8_t);
}
cpi->twopass.firstpass_mb_stats.mb_stats_start =
oxcf->firstpass_mb_stats_in.buf;
cpi->twopass.firstpass_mb_stats.mb_stats_end =
cpi->twopass.firstpass_mb_stats.mb_stats_start +
(ps - 1) * cpi->common.MBs * sizeof(uint8_t);
}
#endif
cpi->twopass.stats_in_start = oxcf->two_pass_stats_in.buf;
cpi->twopass.stats_in = cpi->twopass.stats_in_start;
cpi->twopass.stats_in_end = &cpi->twopass.stats_in[packets - 1];
cpi->twopass.stats_in_start = oxcf->two_pass_stats_in.buf;
cpi->twopass.stats_in = cpi->twopass.stats_in_start;
cpi->twopass.stats_in_end = &cpi->twopass.stats_in[packets - 1];
vp9_init_second_pass(cpi);
}
vp9_init_second_pass(cpi);
}
vp9_set_speed_features(cpi);
@ -2321,10 +2227,7 @@ static void generate_psnr_packet(VP9_COMP *cpi) {
pkt.data.psnr.psnr[i] = psnr.psnr[i];
}
pkt.kind = VPX_CODEC_PSNR_PKT;
if (is_two_pass_svc(cpi))
cpi->svc.layer_context[cpi->svc.spatial_layer_id].psnr_pkt = pkt.data.psnr;
else
vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt);
vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt);
}
int vp9_use_as_reference(VP9_COMP *cpi, int ref_frame_flags) {
@ -2638,11 +2541,6 @@ void vp9_update_reference_frames(VP9_COMP *cpi) {
tmp = cpi->alt_fb_idx;
cpi->alt_fb_idx = cpi->gld_fb_idx;
cpi->gld_fb_idx = tmp;
if (is_two_pass_svc(cpi)) {
cpi->svc.layer_context[0].gold_ref_idx = cpi->gld_fb_idx;
cpi->svc.layer_context[0].alt_ref_idx = cpi->alt_fb_idx;
}
} else { /* For non key/golden frames */
if (cpi->refresh_alt_ref_frame) {
int arf_idx = cpi->alt_fb_idx;
@ -3122,7 +3020,7 @@ static int get_ref_frame_flags(const VP9_COMP *cpi) {
if (gold_is_last)
flags &= ~VP9_GOLD_FLAG;
if (cpi->rc.frames_till_gf_update_due == INT_MAX && !is_two_pass_svc(cpi))
if (cpi->rc.frames_till_gf_update_due == INT_MAX)
flags &= ~VP9_GOLD_FLAG;
if (alt_is_last)
@ -3172,9 +3070,7 @@ static int is_skippable_frame(const VP9_COMP *cpi) {
// first pass, and so do its previous and forward frames, then this frame
// can be skipped for partition check, and the partition size is assigned
// according to the variance
const SVC *const svc = &cpi->svc;
const TWO_PASS *const twopass = is_two_pass_svc(cpi) ?
&svc->layer_context[svc->spatial_layer_id].twopass : &cpi->twopass;
const TWO_PASS *const twopass = &cpi->twopass;
return (!frame_is_intra_only(&cpi->common) &&
twopass->stats_in - 2 > twopass->stats_in_start &&
@ -3319,37 +3215,6 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
cm->reset_frame_context = 2;
}
}
if (is_two_pass_svc(cpi) && cm->error_resilient_mode == 0) {
// Use the last frame context for the empty frame.
cm->frame_context_idx =
(cpi->svc.encode_empty_frame_state == ENCODING) ? FRAME_CONTEXTS - 1 :
cpi->svc.spatial_layer_id * cpi->svc.number_temporal_layers +
cpi->svc.temporal_layer_id;
// The probs will be updated based on the frame type of its previous
// frame if frame_parallel_decoding_mode is 0. The type may vary for
// the frame after a key frame in base layer since we may drop enhancement
// layers. So set frame_parallel_decoding_mode to 1 in this case.
if (cpi->svc.number_temporal_layers == 1) {
if (cpi->svc.spatial_layer_id == 0 &&
cpi->svc.layer_context[0].last_frame_type == KEY_FRAME)
cm->frame_parallel_decoding_mode = 1;
else
cm->frame_parallel_decoding_mode = 0;
} else if (cpi->svc.spatial_layer_id == 0) {
// Find the 2nd frame in temporal base layer and 1st frame in temporal
// enhancement layers from the key frame.
int i;
for (i = 0; i < cpi->svc.number_temporal_layers; ++i) {
if (cpi->svc.layer_context[0].frames_from_key_frame == 1 << i) {
cm->frame_parallel_decoding_mode = 1;
break;
}
}
if (i == cpi->svc.number_temporal_layers)
cm->frame_parallel_decoding_mode = 0;
}
}
// Configure experimental use of segmentation for enhanced coding of
// static regions if indicated.
@ -3360,8 +3225,7 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
// Check if the current frame is skippable for the partition search in the
// second pass according to the first pass stats
if (cpi->sf.allow_partition_search_skip && oxcf->pass == 2 &&
(!cpi->use_svc || is_two_pass_svc(cpi))) {
if (cpi->sf.allow_partition_search_skip && oxcf->pass == 2) {
cpi->partition_search_skippable_frame = is_skippable_frame(cpi);
}
@ -3517,8 +3381,7 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
cm->lf.last_bilateral_level = 0;
#endif
if (!(is_two_pass_svc(cpi) && cpi->svc.encode_empty_frame_state == ENCODING))
vp9_rc_postencode_update(cpi, *size);
vp9_rc_postencode_update(cpi, *size);
#if 0
output_frame_level_debug_stats(cpi);
@ -3551,19 +3414,7 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
// Don't increment frame counters if this was an altref buffer
// update not a real frame
++cm->current_video_frame;
if (cpi->use_svc)
vp9_inc_frame_in_layer(cpi);
}
if (is_two_pass_svc(cpi))
cpi->svc.layer_context[cpi->svc.spatial_layer_id].last_frame_type =
cm->frame_type;
}
static void SvcEncode(VP9_COMP *cpi, size_t *size, uint8_t *dest,
unsigned int *frame_flags) {
vp9_rc_get_svc_params(cpi);
encode_frame_to_data_rate(cpi, size, dest, frame_flags);
}
static void Pass0Encode(VP9_COMP *cpi, size_t *size, uint8_t *dest,
@ -3580,9 +3431,7 @@ static void Pass2Encode(VP9_COMP *cpi, size_t *size,
uint8_t *dest, unsigned int *frame_flags) {
cpi->allow_encode_breakout = ENCODE_BREAKOUT_ENABLED;
encode_frame_to_data_rate(cpi, size, dest, frame_flags);
if (!(is_two_pass_svc(cpi) && cpi->svc.encode_empty_frame_state == ENCODING))
vp9_twopass_postencode_update(cpi);
vp9_twopass_postencode_update(cpi);
}
static void init_motion_estimation(VP9_COMP *cpi) {
@ -3767,25 +3616,12 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags,
MV_REFERENCE_FRAME ref_frame;
int arf_src_index;
if (is_two_pass_svc(cpi)) {
#if CONFIG_SPATIAL_SVC
vp9_svc_start_frame(cpi);
// Use a small empty frame instead of a real frame
if (cpi->svc.encode_empty_frame_state == ENCODING)
source = &cpi->svc.empty_frame;
#endif
if (oxcf->pass == 2)
vp9_restore_layer_context(cpi);
}
vpx_usec_timer_start(&cmptimer);
vp9_set_high_precision_mv(cpi, ALTREF_HIGH_PRECISION_MV);
// Is multi-arf enabled.
// Note that at the moment multi_arf is only configured for 2 pass VBR and
// will not work properly with svc.
if ((oxcf->pass == 2) && !cpi->use_svc &&
if ((oxcf->pass == 2) &&
(cpi->oxcf.enable_auto_arf > 1) && (cpi->oxcf.rc_mode == VPX_VBR))
cpi->multi_arf_allowed = 1;
else
@ -3802,30 +3638,12 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags,
// Should we encode an arf frame.
arf_src_index = get_arf_src_index(cpi);
// Skip alt frame if we encode the empty frame
if (is_two_pass_svc(cpi) && source != NULL)
arf_src_index = 0;
if (arf_src_index) {
assert(arf_src_index <= rc->frames_to_key);
if ((source = vp9_lookahead_peek(cpi->lookahead, arf_src_index)) != NULL) {
cpi->alt_ref_source = source;
#if CONFIG_SPATIAL_SVC
if (is_two_pass_svc(cpi) && cpi->svc.spatial_layer_id > 0) {
int i;
// Reference a hidden frame from a lower layer
for (i = cpi->svc.spatial_layer_id - 1; i >= 0; --i) {
if (oxcf->ss_enable_auto_arf[i]) {
cpi->gld_fb_idx = cpi->svc.layer_context[i].alt_ref_idx;
break;
}
}
}
cpi->svc.layer_context[cpi->svc.spatial_layer_id].has_alt_frame = 1;
#endif
if (oxcf->arnr_max_frames > 0) {
// Produce the filtered ARF frame.
vp9_temporal_filter(cpi, arf_src_index);
@ -3852,11 +3670,6 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags,
}
// Read in the source frame.
#if CONFIG_SPATIAL_SVC
if (is_two_pass_svc(cpi))
source = vp9_svc_lookahead_pop(cpi, cpi->lookahead, flush);
else
#endif
source = vp9_lookahead_pop(cpi->lookahead, flush);
if (source != NULL) {
cm->show_frame = 1;
@ -3899,12 +3712,6 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags,
adjust_frame_rate(cpi, source);
}
if (cpi->svc.number_temporal_layers > 1 &&
oxcf->rc_mode == VPX_CBR) {
vp9_update_temporal_layer_framerate(cpi);
vp9_restore_layer_context(cpi);
}
// start with a 0 size frame
*size = 0;
@ -3916,14 +3723,11 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags,
// For two pass encodes analyse the first pass stats and determine
// the bit allocation and other parameters for this frame / group of frames.
if ((oxcf->pass == 2) &&
(!cpi->use_svc ||
(is_two_pass_svc(cpi) &&
cpi->svc.encode_empty_frame_state != ENCODING))) {
if (oxcf->pass == 2) {
vp9_rc_get_second_pass_params(cpi);
}
if (!cpi->use_svc && cpi->multi_arf_allowed) {
if (cpi->multi_arf_allowed) {
if (cm->frame_type == KEY_FRAME) {
init_buffer_indices(cpi);
} else if (oxcf->pass == 2) {
@ -3982,8 +3786,7 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags,
vp9_vaq_init();
}
if (oxcf->pass == 1 &&
(!cpi->use_svc || is_two_pass_svc(cpi))) {
if (oxcf->pass == 1) {
const int lossless = is_lossless_requested(oxcf);
#if CONFIG_VP9_HIGHBITDEPTH
if (cpi->oxcf.use_highbitdepth)
@ -3997,11 +3800,8 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags,
#endif // CONFIG_VP9_HIGHBITDEPTH
cpi->mb.itxm_add = lossless ? vp9_iwht4x4_add : vp9_idct4x4_add;
vp9_first_pass(cpi, source);
} else if (oxcf->pass == 2 &&
(!cpi->use_svc || is_two_pass_svc(cpi))) {
} else if (oxcf->pass == 2) {
Pass2Encode(cpi, size, dest, frame_flags);
} else if (cpi->use_svc) {
SvcEncode(cpi, size, dest, frame_flags);
} else {
// One pass encode
Pass0Encode(cpi, size, dest, frame_flags);
@ -4019,15 +3819,6 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags,
cpi->droppable = !frame_is_reference(cpi);
}
// Save layer specific state.
if ((cpi->svc.number_temporal_layers > 1 &&
oxcf->rc_mode == VPX_CBR) ||
((cpi->svc.number_temporal_layers > 1 ||
cpi->svc.number_spatial_layers > 1) &&
oxcf->pass == 2)) {
vp9_save_layer_context(cpi);
}
vpx_usec_timer_mark(&cmptimer);
cpi->time_compress_data += vpx_usec_timer_elapsed(&cmptimer);
@ -4145,22 +3936,7 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags,
}
}
}
#endif
if (is_two_pass_svc(cpi)) {
if (cpi->svc.encode_empty_frame_state == ENCODING)
cpi->svc.encode_empty_frame_state = ENCODED;
if (cm->show_frame) {
++cpi->svc.spatial_layer_to_encode;
if (cpi->svc.spatial_layer_to_encode >= cpi->svc.number_spatial_layers)
cpi->svc.spatial_layer_to_encode = 0;
// May need the empty frame after an visible frame.
cpi->svc.encode_empty_frame_state = NEED_TO_ENCODE;
}
}
return 0;
}
@ -4271,11 +4047,6 @@ int vp9_set_size_literal(VP9_COMP *cpi, unsigned int width,
return 0;
}
void vp9_set_svc(VP9_COMP *cpi, int use_svc) {
cpi->use_svc = use_svc;
return;
}
int vp9_get_y_sse(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b) {
assert(a->y_crop_width == b->y_crop_width);
assert(a->y_crop_height == b->y_crop_height);

View File

@ -32,7 +32,6 @@
#include "vp9/encoder/vp9_ratectrl.h"
#include "vp9/encoder/vp9_rd.h"
#include "vp9/encoder/vp9_speed_features.h"
#include "vp9/encoder/vp9_svc_layercontext.h"
#include "vp9/encoder/vp9_tokenize.h"
#include "vp9/encoder/vp9_variance.h"
#if CONFIG_VP9_TEMPORAL_DENOISING
@ -176,16 +175,6 @@ typedef struct VP9EncoderConfig {
// END DATARATE CONTROL OPTIONS
// ----------------------------------------------------------------
// Spatial and temporal scalability.
int ss_number_layers; // Number of spatial layers.
int ts_number_layers; // Number of temporal layers.
// Bitrate allocation for spatial layers.
int ss_target_bitrate[VPX_SS_MAX_LAYERS];
int ss_enable_auto_arf[VPX_SS_MAX_LAYERS];
// Bitrate allocation (CBR mode) and framerate factor, for temporal layers.
int ts_target_bitrate[VPX_TS_MAX_LAYERS];
int ts_rate_decimator[VPX_TS_MAX_LAYERS];
int enable_auto_arf;
int encode_breakout; // early breakout : for video conf recommend 800
@ -384,10 +373,6 @@ typedef struct VP9_COMP {
int initial_width;
int initial_height;
int use_svc;
SVC svc;
// Store frame variance info in SOURCE_VAR_BASED_PARTITION search type.
diff *source_diff_var;
// The threshold used in SOURCE_VAR_BASED_PARTITION search type.
@ -486,8 +471,6 @@ int vp9_set_internal_size(VP9_COMP *cpi,
int vp9_set_size_literal(VP9_COMP *cpi, unsigned int width,
unsigned int height);
void vp9_set_svc(VP9_COMP *cpi, int use_svc);
int vp9_get_quantizer(struct VP9_COMP *cpi);
static INLINE int get_ref_frame_idx(const VP9_COMP *cpi,
@ -547,18 +530,9 @@ YV12_BUFFER_CONFIG *vp9_scale_if_required(VP9_COMMON *cm,
void vp9_apply_encoding_flags(VP9_COMP *cpi, vpx_enc_frame_flags_t flags);
static INLINE int is_two_pass_svc(const struct VP9_COMP *const cpi) {
return cpi->use_svc &&
(cpi->svc.number_temporal_layers > 1 ||
cpi->svc.number_spatial_layers > 1) &&
(cpi->oxcf.pass == 1 || cpi->oxcf.pass == 2);
}
static INLINE int is_altref_enabled(const VP9_COMP *const cpi) {
return cpi->oxcf.mode != REALTIME && cpi->oxcf.lag_in_frames > 0 &&
(cpi->oxcf.enable_auto_arf &&
(!is_two_pass_svc(cpi) ||
cpi->oxcf.ss_enable_auto_arf[cpi->svc.spatial_layer_id]));
cpi->oxcf.enable_auto_arf;
}
static INLINE void set_ref_ptrs(VP9_COMMON *cm, MACROBLOCKD *xd,

View File

@ -52,7 +52,6 @@
#define MIN_GF_INTERVAL 4
#define MIN_KF_BOOST 300
#define NEW_MV_MODE_PENALTY 32
#define SVC_FACTOR_PT_LOW 0.45
#define DOUBLE_DIVIDE_CHECK(x) ((x) < 0 ? (x) - 0.000001 : (x) + 0.000001)
@ -162,13 +161,11 @@ static void zero_stats(FIRSTPASS_STATS *section) {
section->new_mv_count = 0.0;
section->count = 0.0;
section->duration = 1.0;
section->spatial_layer_id = 0;
}
static void accumulate_stats(FIRSTPASS_STATS *section,
const FIRSTPASS_STATS *frame) {
section->frame += frame->frame;
section->spatial_layer_id = frame->spatial_layer_id;
section->intra_error += frame->intra_error;
section->coded_error += frame->coded_error;
section->sr_coded_error += frame->sr_coded_error;
@ -243,15 +240,7 @@ void vp9_init_first_pass(VP9_COMP *cpi) {
}
void vp9_end_first_pass(VP9_COMP *cpi) {
if (is_two_pass_svc(cpi)) {
int i;
for (i = 0; i < cpi->svc.number_spatial_layers; ++i) {
output_stats(&cpi->svc.layer_context[i].twopass.total_stats,
cpi->output_pkt_list);
}
} else {
output_stats(&cpi->twopass.total_stats, cpi->output_pkt_list);
}
output_stats(&cpi->twopass.total_stats, cpi->output_pkt_list);
}
static vp9_variance_fn_t get_block_variance_fn(BLOCK_SIZE bsize) {
@ -478,8 +467,6 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) {
TWO_PASS *twopass = &cpi->twopass;
const MV zero_mv = {0, 0};
const YV12_BUFFER_CONFIG *first_ref_buf = lst_yv12;
LAYER_CONTEXT *const lc = is_two_pass_svc(cpi) ?
&cpi->svc.layer_context[cpi->svc.spatial_layer_id] : NULL;
#if CONFIG_FP_MB_STATS
if (cpi->use_fp_mb_stats) {
@ -492,57 +479,6 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) {
set_first_pass_params(cpi);
vp9_set_quantizer(cm, find_fp_qindex(cm->bit_depth));
if (lc != NULL) {
twopass = &lc->twopass;
cpi->lst_fb_idx = cpi->svc.spatial_layer_id;
cpi->ref_frame_flags = VP9_LAST_FLAG;
if (cpi->svc.number_spatial_layers + cpi->svc.spatial_layer_id <
REF_FRAMES) {
cpi->gld_fb_idx =
cpi->svc.number_spatial_layers + cpi->svc.spatial_layer_id;
cpi->ref_frame_flags |= VP9_GOLD_FLAG;
cpi->refresh_golden_frame = (lc->current_video_frame_in_layer == 0);
} else {
cpi->refresh_golden_frame = 0;
}
if (lc->current_video_frame_in_layer == 0)
cpi->ref_frame_flags = 0;
vp9_scale_references(cpi);
// Use either last frame or alt frame for motion search.
if (cpi->ref_frame_flags & VP9_LAST_FLAG) {
first_ref_buf = vp9_get_scaled_ref_frame(cpi, LAST_FRAME);
if (first_ref_buf == NULL)
first_ref_buf = get_ref_frame_buffer(cpi, LAST_FRAME);
}
if (cpi->ref_frame_flags & VP9_GOLD_FLAG) {
const int ref_idx =
cm->ref_frame_map[get_ref_frame_idx(cpi, GOLDEN_FRAME)];
const int scaled_idx = cpi->scaled_ref_idx[GOLDEN_FRAME - 1];
gld_yv12 = (scaled_idx != ref_idx) ? &cm->frame_bufs[scaled_idx].buf :
get_ref_frame_buffer(cpi, GOLDEN_FRAME);
} else {
gld_yv12 = NULL;
}
recon_y_stride = new_yv12->y_stride;
recon_uv_stride = new_yv12->uv_stride;
uv_mb_height = 16 >> (new_yv12->y_height > new_yv12->uv_height);
set_ref_ptrs(cm, xd,
(cpi->ref_frame_flags & VP9_LAST_FLAG) ? LAST_FRAME: NONE,
(cpi->ref_frame_flags & VP9_GOLD_FLAG) ? GOLDEN_FRAME : NONE);
cpi->Source = vp9_scale_if_required(cm, cpi->un_scaled_source,
&cpi->scaled_source);
}
vp9_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
vp9_setup_src_planes(x, cpi->Source, 0, 0);
@ -669,8 +605,7 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) {
x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16) + BORDER_MV_PIXELS_B16;
// Other than for the first frame do a motion search.
if ((lc == NULL && cm->current_video_frame > 0) ||
(lc != NULL && lc->current_video_frame_in_layer > 0)) {
if (cm->current_video_frame > 0) {
int tmp_err, motion_error, raw_motion_error;
// Assume 0,0 motion with no mv overhead.
MV mv = {0, 0} , tmp_mv = {0, 0};
@ -711,7 +646,7 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) {
#endif // CONFIG_VP9_HIGHBITDEPTH
// TODO(pengchong): Replace the hard-coded threshold
if (raw_motion_error > 25 || lc != NULL) {
if (raw_motion_error > 25) {
// Test last reference frame using the previous best mv as the
// starting point (best reference) for the search.
first_pass_motion_search(cpi, x, &best_ref_mv, &mv, &motion_error);
@ -737,9 +672,7 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) {
}
// Search in an older reference frame.
if (((lc == NULL && cm->current_video_frame > 1) ||
(lc != NULL && lc->current_video_frame_in_layer > 1))
&& gld_yv12 != NULL) {
if (cm->current_video_frame > 1 && gld_yv12 != NULL) {
// Assume 0,0 motion with no mv overhead.
int gf_motion_error;
@ -946,7 +879,6 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) {
const double min_err = 200 * sqrt(cm->MBs);
fps.frame = cm->current_video_frame;
fps.spatial_layer_id = cpi->svc.spatial_layer_id;
fps.coded_error = (double)(coded_error >> 8) + min_err;
fps.sr_coded_error = (double)(sr_coded_error >> 8) + min_err;
fps.intra_error = (double)(intra_error >> 8) + min_err;
@ -1011,16 +943,12 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) {
vp9_extend_frame_borders(new_yv12);
if (lc != NULL) {
vp9_update_reference_frames(cpi);
} else {
// Swap frame pointers so last frame refers to the frame we just compressed.
swap_yv12(lst_yv12, new_yv12);
}
// Swap frame pointers so last frame refers to the frame we just compressed.
swap_yv12(lst_yv12, new_yv12);
// Special case for the first frame. Copy into the GF buffer as a second
// reference.
if (cm->current_video_frame == 0 && gld_yv12 != NULL && lc == NULL) {
if (cm->current_video_frame == 0 && gld_yv12 != NULL) {
vp8_yv12_copy_frame(lst_yv12, gld_yv12);
}
@ -1041,8 +969,6 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) {
}
++cm->current_video_frame;
if (cpi->use_svc)
vp9_inc_frame_in_layer(cpi);
}
static double calc_correction_factor(double err_per_mb,
@ -1087,16 +1013,12 @@ static int get_twopass_worst_quality(const VP9_COMP *cpi,
BPER_MB_NORMBITS) / num_mbs;
int q;
int is_svc_upper_layer = 0;
if (is_two_pass_svc(cpi) && cpi->svc.spatial_layer_id > 0)
is_svc_upper_layer = 1;
// Try and pick a max Q that will be high enough to encode the
// content at the given rate.
for (q = rc->best_quality; q < rc->worst_quality; ++q) {
const double factor =
calc_correction_factor(err_per_mb, ERR_DIVISOR - ediv_size_correction,
is_svc_upper_layer ? SVC_FACTOR_PT_LOW :
FACTOR_PT_LOW, FACTOR_PT_HIGH, q,
cpi->common.bit_depth);
const int bits_per_mb = vp9_rc_bits_per_mb(INTER_FRAME, q,
@ -1116,12 +1038,8 @@ static int get_twopass_worst_quality(const VP9_COMP *cpi,
extern void vp9_new_framerate(VP9_COMP *cpi, double framerate);
void vp9_init_second_pass(VP9_COMP *cpi) {
SVC *const svc = &cpi->svc;
const VP9EncoderConfig *const oxcf = &cpi->oxcf;
const int is_two_pass_svc = (svc->number_spatial_layers > 1) ||
(svc->number_temporal_layers > 1);
TWO_PASS *const twopass = is_two_pass_svc ?
&svc->layer_context[svc->spatial_layer_id].twopass : &cpi->twopass;
TWO_PASS *const twopass = &cpi->twopass;
double frame_rate;
FIRSTPASS_STATS *stats;
@ -1142,17 +1060,9 @@ void vp9_init_second_pass(VP9_COMP *cpi) {
// encoded in the second pass is a guess. However, the sum duration is not.
// It is calculated based on the actual durations of all frames from the
// first pass.
if (is_two_pass_svc) {
vp9_update_spatial_layer_framerate(cpi, frame_rate);
twopass->bits_left = (int64_t)(stats->duration *
svc->layer_context[svc->spatial_layer_id].target_bandwidth /
10000000.0);
} else {
vp9_new_framerate(cpi, frame_rate);
twopass->bits_left = (int64_t)(stats->duration * oxcf->target_bandwidth /
10000000.0);
}
vp9_new_framerate(cpi, frame_rate);
twopass->bits_left = (int64_t)(stats->duration * oxcf->target_bandwidth /
10000000.0);
// This variable monitors how far behind the second ref update is lagging.
twopass->sr_update_lag = 1;
@ -1527,15 +1437,8 @@ static void allocate_gf_group_bits(VP9_COMP *cpi, int64_t gf_group_bits,
int mid_frame_idx;
unsigned char arf_buffer_indices[MAX_ACTIVE_ARFS];
int alt_frame_index = frame_index;
int has_temporal_layers = is_two_pass_svc(cpi) &&
cpi->svc.number_temporal_layers > 1;
// Only encode alt reference frame in temporal base layer.
if (has_temporal_layers)
alt_frame_index = cpi->svc.number_temporal_layers;
key_frame = cpi->common.frame_type == KEY_FRAME ||
vp9_is_upper_layer_key_frame(cpi);
key_frame = cpi->common.frame_type == KEY_FRAME;
get_arf_buffer_indices(arf_buffer_indices);
@ -1572,20 +1475,14 @@ static void allocate_gf_group_bits(VP9_COMP *cpi, int64_t gf_group_bits,
gf_group->rf_level[alt_frame_index] = GF_ARF_STD;
gf_group->bit_allocation[alt_frame_index] = gf_arf_bits;
if (has_temporal_layers)
gf_group->arf_src_offset[alt_frame_index] =
(unsigned char)(rc->baseline_gf_interval -
cpi->svc.number_temporal_layers);
else
gf_group->arf_src_offset[alt_frame_index] =
(unsigned char)(rc->baseline_gf_interval - 1);
gf_group->arf_src_offset[alt_frame_index] =
(unsigned char)(rc->baseline_gf_interval - 1);
gf_group->arf_update_idx[alt_frame_index] = arf_buffer_indices[0];
gf_group->arf_ref_idx[alt_frame_index] =
arf_buffer_indices[cpi->multi_arf_last_grp_enabled &&
rc->source_alt_ref_active];
if (!has_temporal_layers)
++frame_index;
++frame_index;
if (cpi->multi_arf_enabled) {
// Set aside a slot for a level 1 arf.
@ -1608,10 +1505,6 @@ static void allocate_gf_group_bits(VP9_COMP *cpi, int64_t gf_group_bits,
if (EOF == input_stats(twopass, &frame_stats))
break;
if (has_temporal_layers && frame_index == alt_frame_index) {
++frame_index;
}
modified_err = calculate_modified_err(twopass, oxcf, &frame_stats);
if (group_error > 0)
@ -1830,21 +1723,6 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
else
rc->baseline_gf_interval = i;
// Only encode alt reference frame in temporal base layer. So
// baseline_gf_interval should be multiple of a temporal layer group
// (typically the frame distance between two base layer frames)
if (is_two_pass_svc(cpi) && cpi->svc.number_temporal_layers > 1) {
int count = (1 << (cpi->svc.number_temporal_layers - 1)) - 1;
int new_gf_interval = (rc->baseline_gf_interval + count) & (~count);
int j;
for (j = 0; j < new_gf_interval - rc->baseline_gf_interval; ++j) {
if (EOF == input_stats(twopass, this_frame))
break;
gf_group_err += calculate_modified_err(twopass, oxcf, this_frame);
}
rc->baseline_gf_interval = new_gf_interval;
}
rc->frames_till_gf_update_due = rc->baseline_gf_interval;
// Should we use the alternate reference frame.
@ -2110,18 +1988,6 @@ static void find_next_key_frame(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
rc->next_key_frame_forced = 0;
}
if (is_two_pass_svc(cpi) && cpi->svc.number_temporal_layers > 1) {
int count = (1 << (cpi->svc.number_temporal_layers - 1)) - 1;
int new_frame_to_key = (rc->frames_to_key + count) & (~count);
int j;
for (j = 0; j < new_frame_to_key - rc->frames_to_key; ++j) {
if (EOF == input_stats(twopass, this_frame))
break;
kf_group_err += calculate_modified_err(twopass, oxcf, this_frame);
}
rc->frames_to_key = new_frame_to_key;
}
// Special case for the last key frame of the file.
if (twopass->stats_in >= twopass->stats_in_end) {
// Accumulate kf group error.
@ -2287,16 +2153,6 @@ void configure_buffer_updates(VP9_COMP *cpi) {
assert(0);
break;
}
if (is_two_pass_svc(cpi)) {
if (cpi->svc.temporal_layer_id > 0) {
cpi->refresh_last_frame = 0;
cpi->refresh_golden_frame = 0;
}
if (cpi->svc.layer_context[cpi->svc.spatial_layer_id].gold_ref_idx < 0)
cpi->refresh_golden_frame = 0;
if (cpi->alt_ref_source == NULL)
cpi->refresh_alt_ref_frame = 0;
}
}
@ -2310,16 +2166,9 @@ void vp9_rc_get_second_pass_params(VP9_COMP *cpi) {
FIRSTPASS_STATS this_frame_copy;
int target_rate;
LAYER_CONTEXT *const lc = is_two_pass_svc(cpi) ?
&cpi->svc.layer_context[cpi->svc.spatial_layer_id] : 0;
if (lc != NULL) {
frames_left = (int)(twopass->total_stats.count -
lc->current_video_frame_in_layer);
} else {
frames_left = (int)(twopass->total_stats.count -
cm->current_video_frame);
}
frames_left = (int)(twopass->total_stats.count -
cm->current_video_frame);
if (!twopass->stats_in)
return;
@ -2340,17 +2189,6 @@ void vp9_rc_get_second_pass_params(VP9_COMP *cpi) {
vp9_rc_set_frame_target(cpi, target_rate);
cm->frame_type = INTER_FRAME;
if (lc != NULL) {
if (cpi->svc.spatial_layer_id == 0) {
lc->is_key_frame = 0;
} else {
lc->is_key_frame = cpi->svc.layer_context[0].is_key_frame;
if (lc->is_key_frame)
cpi->ref_frame_flags &= (~VP9_LAST_FLAG);
}
}
return;
}
@ -2358,8 +2196,7 @@ void vp9_rc_get_second_pass_params(VP9_COMP *cpi) {
if (cpi->oxcf.rc_mode == VPX_Q) {
twopass->active_worst_quality = cpi->oxcf.cq_level;
} else if (cm->current_video_frame == 0 ||
(lc != NULL && lc->current_video_frame_in_layer == 0)) {
} else if (cm->current_video_frame == 0) {
// Special case code for first frame.
const int section_target_bandwidth = (int)(twopass->bits_left /
frames_left);
@ -2389,35 +2226,11 @@ void vp9_rc_get_second_pass_params(VP9_COMP *cpi) {
cm->frame_type = INTER_FRAME;
}
if (lc != NULL) {
if (cpi->svc.spatial_layer_id == 0) {
lc->is_key_frame = (cm->frame_type == KEY_FRAME);
if (lc->is_key_frame) {
cpi->ref_frame_flags &=
(~VP9_LAST_FLAG & ~VP9_GOLD_FLAG & ~VP9_ALT_FLAG);
lc->frames_from_key_frame = 0;
// Reset the empty frame resolution since we have a key frame.
cpi->svc.empty_frame_width = cm->width;
cpi->svc.empty_frame_height = cm->height;
}
} else {
cm->frame_type = INTER_FRAME;
lc->is_key_frame = cpi->svc.layer_context[0].is_key_frame;
if (lc->is_key_frame) {
cpi->ref_frame_flags &= (~VP9_LAST_FLAG);
lc->frames_from_key_frame = 0;
}
}
}
// Define a new GF/ARF group. (Should always enter here for key frames).
if (rc->frames_till_gf_update_due == 0) {
define_gf_group(cpi, &this_frame_copy);
rc->frames_till_gf_update_due = rc->baseline_gf_interval;
if (lc != NULL)
cpi->refresh_golden_frame = 1;
#if ARF_STATS_OUTPUT
{
@ -2477,8 +2290,7 @@ void vp9_twopass_postencode_update(VP9_COMP *cpi) {
rc->rate_error_estimate = 0;
}
if (cpi->common.frame_type != KEY_FRAME &&
!vp9_is_upper_layer_key_frame(cpi)) {
if (cpi->common.frame_type != KEY_FRAME) {
twopass->kf_group_bits -= bits_used;
twopass->last_kfgroup_zeromotion_pct = twopass->kf_zeromotion_pct;
}

View File

@ -60,7 +60,6 @@ typedef struct {
double new_mv_count;
double duration;
double count;
int64_t spatial_layer_id;
} FIRSTPASS_STATS;
typedef enum {

View File

@ -14,11 +14,6 @@
#include "vpx_scale/yv12config.h"
#include "vpx/vpx_integer.h"
#if CONFIG_SPATIAL_SVC
#include "vpx/vp8cx.h"
#include "vpx/vpx_encoder.h"
#endif
#ifdef __cplusplus
extern "C" {
#endif

View File

@ -226,25 +226,6 @@ int vp9_rc_clamp_iframe_target_size(const VP9_COMP *const cpi, int target) {
return target;
}
// Update the buffer level for higher layers, given the encoded current layer.
static void update_layer_buffer_level(SVC *svc, int encoded_frame_size) {
int temporal_layer = 0;
int current_temporal_layer = svc->temporal_layer_id;
for (temporal_layer = current_temporal_layer + 1;
temporal_layer < svc->number_temporal_layers; ++temporal_layer) {
LAYER_CONTEXT *lc = &svc->layer_context[temporal_layer];
RATE_CONTROL *lrc = &lc->rc;
int bits_off_for_this_layer = (int)(lc->target_bandwidth / lc->framerate -
encoded_frame_size);
lrc->bits_off_target += bits_off_for_this_layer;
// Clip buffer level to maximum buffer size for the layer.
lrc->bits_off_target = MIN(lrc->bits_off_target, lrc->maximum_buffer_size);
lrc->buffer_level = lrc->bits_off_target;
}
}
// Update the buffer level: leaky bucket model.
static void update_buffer_level(VP9_COMP *cpi, int encoded_frame_size) {
const VP9_COMMON *const cm = &cpi->common;
@ -260,10 +241,6 @@ static void update_buffer_level(VP9_COMP *cpi, int encoded_frame_size) {
// Clip the buffer level to the maximum specified buffer size.
rc->bits_off_target = MIN(rc->bits_off_target, rc->maximum_buffer_size);
rc->buffer_level = rc->bits_off_target;
if (cpi->use_svc && cpi->oxcf.rc_mode == VPX_CBR) {
update_layer_buffer_level(&cpi->svc, encoded_frame_size);
}
}
void vp9_rc_init(const VP9EncoderConfig *oxcf, int pass, RATE_CONTROL *rc) {
@ -363,8 +340,7 @@ static double get_rate_correction_factor(const VP9_COMP *cpi) {
return rc->rate_correction_factors[rf_lvl];
} else {
if ((cpi->refresh_alt_ref_frame || cpi->refresh_golden_frame) &&
!rc->is_src_frame_alt_ref &&
!(cpi->use_svc && cpi->oxcf.rc_mode == VPX_CBR))
!rc->is_src_frame_alt_ref)
return rc->rate_correction_factors[GF_ARF_STD];
else
return rc->rate_correction_factors[INTER_NORMAL];
@ -382,8 +358,7 @@ static void set_rate_correction_factor(VP9_COMP *cpi, double factor) {
rc->rate_correction_factors[rf_lvl] = factor;
} else {
if ((cpi->refresh_alt_ref_frame || cpi->refresh_golden_frame) &&
!rc->is_src_frame_alt_ref &&
!(cpi->use_svc && cpi->oxcf.rc_mode == VPX_CBR))
!rc->is_src_frame_alt_ref)
rc->rate_correction_factors[GF_ARF_STD] = factor;
else
rc->rate_correction_factors[INTER_NORMAL] = factor;
@ -645,7 +620,6 @@ static int rc_pick_q_and_bounds_one_pass_cbr(const VP9_COMP *cpi,
cm->bit_depth);
}
} else if (!rc->is_src_frame_alt_ref &&
!cpi->use_svc &&
(cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
// Use the lower of active_worst_quality and recent
// average Q as basis for GF/ARF best Q limit unless last frame was
@ -897,7 +871,7 @@ static int rc_pick_q_and_bounds_two_pass(const VP9_COMP *cpi,
int *inter_minq;
ASSIGN_MINQ_TABLE(cm->bit_depth, inter_minq);
if (frame_is_intra_only(cm) || vp9_is_upper_layer_key_frame(cpi)) {
if (frame_is_intra_only(cm)) {
// Handle the special case for key frames forced when we have reached
// the maximum key frame interval. Here force the Q to a range
// based on the ambient Q to reduce the risk of popping.
@ -1009,7 +983,7 @@ static int rc_pick_q_and_bounds_two_pass(const VP9_COMP *cpi,
#if LIMIT_QRANGE_FOR_ALTREF_AND_KEY
vp9_clear_system_state();
// Static forced key frames Q restrictions dealt with elsewhere.
if (!((frame_is_intra_only(cm) || vp9_is_upper_layer_key_frame(cpi))) ||
if (!(frame_is_intra_only(cm)) ||
!rc->this_key_frame_forced ||
(cpi->twopass.last_kfgroup_zeromotion_pct < STATIC_MOTION_THRESH)) {
const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
@ -1039,8 +1013,7 @@ static int rc_pick_q_and_bounds_two_pass(const VP9_COMP *cpi,
if (oxcf->rc_mode == VPX_Q) {
q = active_best_quality;
// Special case code to try and match quality with forced key frames.
} else if ((frame_is_intra_only(cm) || vp9_is_upper_layer_key_frame(cpi)) &&
rc->this_key_frame_forced) {
} else if (frame_is_intra_only(cm) && rc->this_key_frame_forced) {
// If static since last kf use better of last boosted and last kf q.
if (cpi->twopass.last_kfgroup_zeromotion_pct >= STATIC_MOTION_THRESH) {
q = MIN(rc->last_kf_qindex, rc->last_boosted_qindex);
@ -1176,8 +1149,7 @@ void vp9_rc_postencode_update(VP9_COMP *cpi, uint64_t bytes_used) {
ROUND_POWER_OF_TWO(3 * rc->avg_frame_qindex[KEY_FRAME] + qindex, 2);
} else {
if (rc->is_src_frame_alt_ref ||
!(cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame) ||
(cpi->use_svc && oxcf->rc_mode == VPX_CBR)) {
!(cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
rc->last_q[INTER_FRAME] = qindex;
rc->avg_frame_qindex[INTER_FRAME] =
ROUND_POWER_OF_TWO(3 * rc->avg_frame_qindex[INTER_FRAME] + qindex, 2);
@ -1315,21 +1287,11 @@ void vp9_rc_get_one_pass_vbr_params(VP9_COMP *cpi) {
static int calc_pframe_target_size_one_pass_cbr(const VP9_COMP *cpi) {
const VP9EncoderConfig *oxcf = &cpi->oxcf;
const RATE_CONTROL *rc = &cpi->rc;
const SVC *const svc = &cpi->svc;
const int64_t diff = rc->optimal_buffer_level - rc->buffer_level;
const int64_t one_pct_bits = 1 + rc->optimal_buffer_level / 100;
int min_frame_target = MAX(rc->avg_frame_bandwidth >> 4, FRAME_OVERHEAD_BITS);
int target = rc->avg_frame_bandwidth;
if (svc->number_temporal_layers > 1 &&
oxcf->rc_mode == VPX_CBR) {
// Note that for layers, avg_frame_bandwidth is the cumulative
// per-frame-bandwidth. For the target size of this frame, use the
// layer average frame size (i.e., non-cumulative per-frame-bw).
int current_temporal_layer = svc->temporal_layer_id;
const LAYER_CONTEXT *lc = &svc->layer_context[current_temporal_layer];
target = lc->avg_frame_size;
min_frame_target = MAX(lc->avg_frame_size >> 4, FRAME_OVERHEAD_BITS);
}
if (diff > 0) {
// Lower the target bandwidth for this frame.
const int pct_low = (int)MIN(diff / one_pct_bits, oxcf->under_shoot_pct);
@ -1344,8 +1306,6 @@ static int calc_pframe_target_size_one_pass_cbr(const VP9_COMP *cpi) {
static int calc_iframe_target_size_one_pass_cbr(const VP9_COMP *cpi) {
const RATE_CONTROL *rc = &cpi->rc;
const VP9EncoderConfig *oxcf = &cpi->oxcf;
const SVC *const svc = &cpi->svc;
int target;
if (cpi->common.current_video_frame == 0) {
target = ((rc->starting_buffer_level / 2) > INT_MAX)
@ -1353,12 +1313,6 @@ static int calc_iframe_target_size_one_pass_cbr(const VP9_COMP *cpi) {
} else {
int kf_boost = 32;
double framerate = cpi->framerate;
if (svc->number_temporal_layers > 1 &&
oxcf->rc_mode == VPX_CBR) {
// Use the layer framerate for temporal layers CBR mode.
const LAYER_CONTEXT *lc = &svc->layer_context[svc->temporal_layer_id];
framerate = lc->framerate;
}
kf_boost = MAX(kf_boost, (int)(2 * framerate - 16));
if (rc->frames_since_key < framerate / 2) {
kf_boost = (int)(kf_boost * rc->frames_since_key /
@ -1369,50 +1323,6 @@ static int calc_iframe_target_size_one_pass_cbr(const VP9_COMP *cpi) {
return vp9_rc_clamp_iframe_target_size(cpi, target);
}
void vp9_rc_get_svc_params(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
RATE_CONTROL *const rc = &cpi->rc;
int target = rc->avg_frame_bandwidth;
if ((cm->current_video_frame == 0) ||
(cpi->frame_flags & FRAMEFLAGS_KEY) ||
(cpi->oxcf.auto_key && (rc->frames_since_key %
cpi->oxcf.key_freq == 0))) {
cm->frame_type = KEY_FRAME;
rc->source_alt_ref_active = 0;
if (is_two_pass_svc(cpi)) {
cpi->svc.layer_context[cpi->svc.spatial_layer_id].is_key_frame = 1;
cpi->ref_frame_flags &=
(~VP9_LAST_FLAG & ~VP9_GOLD_FLAG & ~VP9_ALT_FLAG);
}
if (cpi->oxcf.pass == 0 && cpi->oxcf.rc_mode == VPX_CBR) {
target = calc_iframe_target_size_one_pass_cbr(cpi);
}
} else {
cm->frame_type = INTER_FRAME;
if (is_two_pass_svc(cpi)) {
LAYER_CONTEXT *lc = &cpi->svc.layer_context[cpi->svc.spatial_layer_id];
if (cpi->svc.spatial_layer_id == 0) {
lc->is_key_frame = 0;
} else {
lc->is_key_frame = cpi->svc.layer_context[0].is_key_frame;
if (lc->is_key_frame)
cpi->ref_frame_flags &= (~VP9_LAST_FLAG);
}
cpi->ref_frame_flags &= (~VP9_ALT_FLAG);
}
if (cpi->oxcf.pass == 0 && cpi->oxcf.rc_mode == VPX_CBR) {
target = calc_pframe_target_size_one_pass_cbr(cpi);
}
}
vp9_rc_set_frame_target(cpi, target);
rc->frames_till_gf_update_due = INT_MAX;
rc->baseline_gf_interval = INT_MAX;
}
void vp9_rc_get_one_pass_cbr_params(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
RATE_CONTROL *const rc = &cpi->rc;

View File

@ -117,7 +117,6 @@ void vp9_rc_init_minq_luts();
// First call per frame, one of:
// vp9_rc_get_one_pass_vbr_params()
// vp9_rc_get_one_pass_cbr_params()
// vp9_rc_get_svc_params()
// vp9_rc_get_first_pass_params()
// vp9_rc_get_second_pass_params()
// depending on the usage to set the rate control encode parameters desired.
@ -138,7 +137,6 @@ void vp9_rc_init_minq_luts();
// encode_frame_to_data_rate() function.
void vp9_rc_get_one_pass_vbr_params(struct VP9_COMP *cpi);
void vp9_rc_get_one_pass_cbr_params(struct VP9_COMP *cpi);
void vp9_rc_get_svc_params(struct VP9_COMP *cpi);
// Post encode update of the rate control parameters based
// on bytes used

View File

@ -18,8 +18,7 @@
static int frame_is_boosted(const VP9_COMP *cpi) {
return frame_is_intra_only(&cpi->common) ||
cpi->refresh_alt_ref_frame ||
(cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref) ||
vp9_is_upper_layer_key_frame(cpi);
(cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref);
}

View File

@ -1,406 +0,0 @@
/*
* Copyright (c) 2014 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <math.h>
#include "vp9/encoder/vp9_encoder.h"
#include "vp9/encoder/vp9_svc_layercontext.h"
#include "vp9/encoder/vp9_extend.h"
#define SMALL_FRAME_FB_IDX 7
void vp9_init_layer_context(VP9_COMP *const cpi) {
SVC *const svc = &cpi->svc;
const VP9EncoderConfig *const oxcf = &cpi->oxcf;
int layer;
int layer_end;
int alt_ref_idx = svc->number_spatial_layers;
svc->spatial_layer_id = 0;
svc->temporal_layer_id = 0;
if (svc->number_temporal_layers > 1 && cpi->oxcf.rc_mode == VPX_CBR) {
layer_end = svc->number_temporal_layers;
} else {
layer_end = svc->number_spatial_layers;
if (cpi->oxcf.error_resilient_mode == 0 && cpi->oxcf.pass == 2) {
if (vp9_realloc_frame_buffer(&cpi->svc.empty_frame.img,
cpi->common.width, cpi->common.height,
cpi->common.subsampling_x,
cpi->common.subsampling_y,
#if CONFIG_VP9_HIGHBITDEPTH
cpi->common.use_highbitdepth,
#endif
VP9_ENC_BORDER_IN_PIXELS, NULL, NULL, NULL))
vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
"Failed to allocate empty frame for multiple frame "
"contexts");
vpx_memset(cpi->svc.empty_frame.img.buffer_alloc, 0x80,
cpi->svc.empty_frame.img.buffer_alloc_sz);
cpi->svc.empty_frame_width = cpi->common.width;
cpi->svc.empty_frame_height = cpi->common.height;
}
}
for (layer = 0; layer < layer_end; ++layer) {
LAYER_CONTEXT *const lc = &svc->layer_context[layer];
RATE_CONTROL *const lrc = &lc->rc;
int i;
lc->current_video_frame_in_layer = 0;
lc->layer_size = 0;
lc->frames_from_key_frame = 0;
lc->last_frame_type = FRAME_TYPES;
lrc->ni_av_qi = oxcf->worst_allowed_q;
lrc->total_actual_bits = 0;
lrc->total_target_vs_actual = 0;
lrc->ni_tot_qi = 0;
lrc->tot_q = 0.0;
lrc->avg_q = 0.0;
lrc->ni_frames = 0;
lrc->decimation_count = 0;
lrc->decimation_factor = 0;
for (i = 0; i < RATE_FACTOR_LEVELS; ++i) {
lrc->rate_correction_factors[i] = 1.0;
}
if (svc->number_temporal_layers > 1 && cpi->oxcf.rc_mode == VPX_CBR) {
lc->target_bandwidth = oxcf->ts_target_bitrate[layer];
lrc->last_q[INTER_FRAME] = oxcf->worst_allowed_q;
lrc->avg_frame_qindex[INTER_FRAME] = oxcf->worst_allowed_q;
} else {
lc->target_bandwidth = oxcf->ss_target_bitrate[layer];
lrc->last_q[KEY_FRAME] = oxcf->best_allowed_q;
lrc->last_q[INTER_FRAME] = oxcf->best_allowed_q;
lrc->avg_frame_qindex[KEY_FRAME] = (oxcf->worst_allowed_q +
oxcf->best_allowed_q) / 2;
lrc->avg_frame_qindex[INTER_FRAME] = (oxcf->worst_allowed_q +
oxcf->best_allowed_q) / 2;
if (oxcf->ss_enable_auto_arf[layer])
lc->alt_ref_idx = alt_ref_idx++;
else
lc->alt_ref_idx = -1;
lc->gold_ref_idx = -1;
}
lrc->buffer_level = oxcf->starting_buffer_level_ms *
lc->target_bandwidth / 1000;
lrc->bits_off_target = lrc->buffer_level;
}
// Still have extra buffer for base layer golden frame
if (!(svc->number_temporal_layers > 1 && cpi->oxcf.rc_mode == VPX_CBR)
&& alt_ref_idx < REF_FRAMES)
svc->layer_context[0].gold_ref_idx = alt_ref_idx;
}
// Update the layer context from a change_config() call.
void vp9_update_layer_context_change_config(VP9_COMP *const cpi,
const int target_bandwidth) {
SVC *const svc = &cpi->svc;
const VP9EncoderConfig *const oxcf = &cpi->oxcf;
const RATE_CONTROL *const rc = &cpi->rc;
int layer;
int layer_end;
float bitrate_alloc = 1.0;
if (svc->number_temporal_layers > 1 && cpi->oxcf.rc_mode == VPX_CBR) {
layer_end = svc->number_temporal_layers;
} else {
layer_end = svc->number_spatial_layers;
}
for (layer = 0; layer < layer_end; ++layer) {
LAYER_CONTEXT *const lc = &svc->layer_context[layer];
RATE_CONTROL *const lrc = &lc->rc;
if (svc->number_temporal_layers > 1 && cpi->oxcf.rc_mode == VPX_CBR) {
lc->target_bandwidth = oxcf->ts_target_bitrate[layer];
} else {
lc->target_bandwidth = oxcf->ss_target_bitrate[layer];
}
bitrate_alloc = (float)lc->target_bandwidth / target_bandwidth;
// Update buffer-related quantities.
lrc->starting_buffer_level =
(int64_t)(rc->starting_buffer_level * bitrate_alloc);
lrc->optimal_buffer_level =
(int64_t)(rc->optimal_buffer_level * bitrate_alloc);
lrc->maximum_buffer_size =
(int64_t)(rc->maximum_buffer_size * bitrate_alloc);
lrc->bits_off_target = MIN(lrc->bits_off_target, lrc->maximum_buffer_size);
lrc->buffer_level = MIN(lrc->buffer_level, lrc->maximum_buffer_size);
// Update framerate-related quantities.
if (svc->number_temporal_layers > 1 && cpi->oxcf.rc_mode == VPX_CBR) {
lc->framerate = cpi->framerate / oxcf->ts_rate_decimator[layer];
} else {
lc->framerate = cpi->framerate;
}
lrc->avg_frame_bandwidth = (int)(lc->target_bandwidth / lc->framerate);
lrc->max_frame_bandwidth = rc->max_frame_bandwidth;
// Update qp-related quantities.
lrc->worst_quality = rc->worst_quality;
lrc->best_quality = rc->best_quality;
}
}
static LAYER_CONTEXT *get_layer_context(VP9_COMP *const cpi) {
return (cpi->svc.number_temporal_layers > 1 && cpi->oxcf.rc_mode == VPX_CBR) ?
&cpi->svc.layer_context[cpi->svc.temporal_layer_id] :
&cpi->svc.layer_context[cpi->svc.spatial_layer_id];
}
void vp9_update_temporal_layer_framerate(VP9_COMP *const cpi) {
SVC *const svc = &cpi->svc;
const VP9EncoderConfig *const oxcf = &cpi->oxcf;
LAYER_CONTEXT *const lc = get_layer_context(cpi);
RATE_CONTROL *const lrc = &lc->rc;
const int layer = svc->temporal_layer_id;
lc->framerate = cpi->framerate / oxcf->ts_rate_decimator[layer];
lrc->avg_frame_bandwidth = (int)(lc->target_bandwidth / lc->framerate);
lrc->max_frame_bandwidth = cpi->rc.max_frame_bandwidth;
// Update the average layer frame size (non-cumulative per-frame-bw).
if (layer == 0) {
lc->avg_frame_size = lrc->avg_frame_bandwidth;
} else {
const double prev_layer_framerate =
cpi->framerate / oxcf->ts_rate_decimator[layer - 1];
const int prev_layer_target_bandwidth = oxcf->ts_target_bitrate[layer - 1];
lc->avg_frame_size =
(int)((lc->target_bandwidth - prev_layer_target_bandwidth) /
(lc->framerate - prev_layer_framerate));
}
}
void vp9_update_spatial_layer_framerate(VP9_COMP *const cpi, double framerate) {
const VP9EncoderConfig *const oxcf = &cpi->oxcf;
LAYER_CONTEXT *const lc = get_layer_context(cpi);
RATE_CONTROL *const lrc = &lc->rc;
lc->framerate = framerate;
lrc->avg_frame_bandwidth = (int)(lc->target_bandwidth / lc->framerate);
lrc->min_frame_bandwidth = (int)(lrc->avg_frame_bandwidth *
oxcf->two_pass_vbrmin_section / 100);
lrc->max_frame_bandwidth = (int)(((int64_t)lrc->avg_frame_bandwidth *
oxcf->two_pass_vbrmax_section) / 100);
vp9_rc_set_gf_max_interval(cpi, lrc);
}
void vp9_restore_layer_context(VP9_COMP *const cpi) {
LAYER_CONTEXT *const lc = get_layer_context(cpi);
const int old_frame_since_key = cpi->rc.frames_since_key;
const int old_frame_to_key = cpi->rc.frames_to_key;
cpi->rc = lc->rc;
cpi->twopass = lc->twopass;
cpi->oxcf.target_bandwidth = lc->target_bandwidth;
cpi->alt_ref_source = lc->alt_ref_source;
// Reset the frames_since_key and frames_to_key counters to their values
// before the layer restore. Keep these defined for the stream (not layer).
if (cpi->svc.number_temporal_layers > 1) {
cpi->rc.frames_since_key = old_frame_since_key;
cpi->rc.frames_to_key = old_frame_to_key;
}
}
void vp9_save_layer_context(VP9_COMP *const cpi) {
const VP9EncoderConfig *const oxcf = &cpi->oxcf;
LAYER_CONTEXT *const lc = get_layer_context(cpi);
lc->rc = cpi->rc;
lc->twopass = cpi->twopass;
lc->target_bandwidth = (int)oxcf->target_bandwidth;
lc->alt_ref_source = cpi->alt_ref_source;
}
void vp9_init_second_pass_spatial_svc(VP9_COMP *cpi) {
SVC *const svc = &cpi->svc;
int i;
for (i = 0; i < svc->number_spatial_layers; ++i) {
TWO_PASS *const twopass = &svc->layer_context[i].twopass;
svc->spatial_layer_id = i;
vp9_init_second_pass(cpi);
twopass->total_stats.spatial_layer_id = i;
twopass->total_left_stats.spatial_layer_id = i;
}
svc->spatial_layer_id = 0;
}
void vp9_inc_frame_in_layer(VP9_COMP *const cpi) {
LAYER_CONTEXT *const lc =
(cpi->svc.number_temporal_layers > 1 && cpi->oxcf.rc_mode == VPX_CBR) ?
&cpi->svc.layer_context[cpi->svc.temporal_layer_id] :
&cpi->svc.layer_context[cpi->svc.spatial_layer_id];
++lc->current_video_frame_in_layer;
++lc->frames_from_key_frame;
}
int vp9_is_upper_layer_key_frame(const VP9_COMP *const cpi) {
return is_two_pass_svc(cpi) &&
cpi->svc.spatial_layer_id > 0 &&
cpi->svc.layer_context[cpi->svc.spatial_layer_id].is_key_frame;
}
#if CONFIG_SPATIAL_SVC
static void get_layer_resolution(const int width_org, const int height_org,
const int num, const int den,
int *width_out, int *height_out) {
int w, h;
if (width_out == NULL || height_out == NULL || den == 0)
return;
w = width_org * num / den;
h = height_org * num / den;
// make height and width even to make chrome player happy
w += w % 2;
h += h % 2;
*width_out = w;
*height_out = h;
}
int vp9_svc_start_frame(VP9_COMP *const cpi) {
int width = 0, height = 0;
LAYER_CONTEXT *lc;
int count = 1 << (cpi->svc.number_temporal_layers - 1);
cpi->svc.spatial_layer_id = cpi->svc.spatial_layer_to_encode;
lc = &cpi->svc.layer_context[cpi->svc.spatial_layer_id];
cpi->svc.temporal_layer_id = 0;
while ((lc->current_video_frame_in_layer % count) != 0) {
++cpi->svc.temporal_layer_id;
count >>= 1;
}
cpi->ref_frame_flags = VP9_ALT_FLAG | VP9_GOLD_FLAG | VP9_LAST_FLAG;
cpi->lst_fb_idx = cpi->svc.spatial_layer_id;
if (cpi->svc.spatial_layer_id == 0)
cpi->gld_fb_idx = (lc->gold_ref_idx >= 0) ?
lc->gold_ref_idx : cpi->lst_fb_idx;
else
cpi->gld_fb_idx = cpi->svc.spatial_layer_id - 1;
if (lc->current_video_frame_in_layer == 0) {
if (cpi->svc.spatial_layer_id >= 2) {
cpi->alt_fb_idx = cpi->svc.spatial_layer_id - 2;
} else {
cpi->alt_fb_idx = cpi->lst_fb_idx;
cpi->ref_frame_flags &= (~VP9_LAST_FLAG & ~VP9_ALT_FLAG);
}
} else {
if (cpi->oxcf.ss_enable_auto_arf[cpi->svc.spatial_layer_id]) {
cpi->alt_fb_idx = lc->alt_ref_idx;
if (!lc->has_alt_frame)
cpi->ref_frame_flags &= (~VP9_ALT_FLAG);
} else {
// Find a proper alt_fb_idx for layers that don't have alt ref frame
if (cpi->svc.spatial_layer_id == 0) {
cpi->alt_fb_idx = cpi->lst_fb_idx;
} else {
LAYER_CONTEXT *lc_lower =
&cpi->svc.layer_context[cpi->svc.spatial_layer_id - 1];
if (cpi->oxcf.ss_enable_auto_arf[cpi->svc.spatial_layer_id - 1] &&
lc_lower->alt_ref_source != NULL)
cpi->alt_fb_idx = lc_lower->alt_ref_idx;
else if (cpi->svc.spatial_layer_id >= 2)
cpi->alt_fb_idx = cpi->svc.spatial_layer_id - 2;
else
cpi->alt_fb_idx = cpi->lst_fb_idx;
}
}
}
get_layer_resolution(cpi->oxcf.width, cpi->oxcf.height,
lc->scaling_factor_num, lc->scaling_factor_den,
&width, &height);
// Workaround for multiple frame contexts. In some frames we can't use prev_mi
// since its previous frame could be changed during decoding time. The idea is
// we put a empty invisible frame in front of them, then we will not use
// prev_mi when encoding these frames.
if (cpi->oxcf.error_resilient_mode == 0 && cpi->oxcf.pass == 2 &&
cpi->svc.encode_empty_frame_state == NEED_TO_ENCODE) {
if ((cpi->svc.number_temporal_layers > 1 &&
cpi->svc.temporal_layer_id < cpi->svc.number_temporal_layers - 1) ||
(cpi->svc.number_spatial_layers > 1 &&
cpi->svc.spatial_layer_id == 0)) {
struct lookahead_entry *buf = vp9_lookahead_peek(cpi->lookahead, 0);
if (buf != NULL) {
cpi->svc.empty_frame.ts_start = buf->ts_start;
cpi->svc.empty_frame.ts_end = buf->ts_end;
cpi->svc.encode_empty_frame_state = ENCODING;
cpi->common.show_frame = 0;
cpi->ref_frame_flags = 0;
cpi->common.frame_type = INTER_FRAME;
cpi->lst_fb_idx =
cpi->gld_fb_idx = cpi->alt_fb_idx = SMALL_FRAME_FB_IDX;
// Gradually make the empty frame smaller to save bits. Make it half of
// its previous size because of the scaling factor restriction.
cpi->svc.empty_frame_width >>= 1;
cpi->svc.empty_frame_width = (cpi->svc.empty_frame_width + 1) & ~1;
if (cpi->svc.empty_frame_width < 16)
cpi->svc.empty_frame_width = 16;
cpi->svc.empty_frame_height >>= 1;
cpi->svc.empty_frame_height = (cpi->svc.empty_frame_height + 1) & ~1;
if (cpi->svc.empty_frame_height < 16)
cpi->svc.empty_frame_height = 16;
width = cpi->svc.empty_frame_width;
height = cpi->svc.empty_frame_height;
}
}
}
if (vp9_set_size_literal(cpi, width, height) != 0)
return VPX_CODEC_INVALID_PARAM;
cpi->oxcf.worst_allowed_q = vp9_quantizer_to_qindex(lc->max_q);
cpi->oxcf.best_allowed_q = vp9_quantizer_to_qindex(lc->min_q);
vp9_change_config(cpi, &cpi->oxcf);
vp9_set_high_precision_mv(cpi, 1);
cpi->alt_ref_source = get_layer_context(cpi)->alt_ref_source;
return 0;
}
struct lookahead_entry *vp9_svc_lookahead_pop(VP9_COMP *const cpi,
struct lookahead_ctx *ctx,
int drain) {
struct lookahead_entry *buf = NULL;
if (ctx->sz && (drain || ctx->sz == ctx->max_sz - MAX_PRE_FRAMES)) {
buf = vp9_lookahead_peek(ctx, 0);
if (buf != NULL) {
// Only remove the buffer when pop the highest layer.
if (cpi->svc.spatial_layer_id == cpi->svc.number_spatial_layers - 1) {
vp9_lookahead_pop(ctx, drain);
}
}
}
return buf;
}
#endif

View File

@ -1,118 +0,0 @@
/*
* Copyright (c) 2014 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef VP9_ENCODER_VP9_SVC_LAYERCONTEXT_H_
#define VP9_ENCODER_VP9_SVC_LAYERCONTEXT_H_
#include "vpx/vpx_encoder.h"
#include "vp9/encoder/vp9_ratectrl.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct {
RATE_CONTROL rc;
int target_bandwidth;
double framerate;
int avg_frame_size;
int max_q;
int min_q;
int scaling_factor_num;
int scaling_factor_den;
TWO_PASS twopass;
vpx_fixed_buf_t rc_twopass_stats_in;
unsigned int current_video_frame_in_layer;
int is_key_frame;
int frames_from_key_frame;
FRAME_TYPE last_frame_type;
struct lookahead_entry *alt_ref_source;
int alt_ref_idx;
int gold_ref_idx;
int has_alt_frame;
size_t layer_size;
struct vpx_psnr_pkt psnr_pkt;
} LAYER_CONTEXT;
typedef struct {
int spatial_layer_id;
int temporal_layer_id;
int number_spatial_layers;
int number_temporal_layers;
int spatial_layer_to_encode;
// Workaround for multiple frame contexts
enum {
ENCODED = 0,
ENCODING,
NEED_TO_ENCODE
}encode_empty_frame_state;
struct lookahead_entry empty_frame;
int empty_frame_width;
int empty_frame_height;
// Store scaled source frames to be used for temporal filter to generate
// a alt ref frame.
YV12_BUFFER_CONFIG scaled_frames[MAX_LAG_BUFFERS];
// Layer context used for rate control in one pass temporal CBR mode or
// two pass spatial mode. Defined for temporal or spatial layers for now.
// Does not support temporal combined with spatial RC.
LAYER_CONTEXT layer_context[MAX(VPX_TS_MAX_LAYERS, VPX_SS_MAX_LAYERS)];
} SVC;
struct VP9_COMP;
// Initialize layer context data from init_config().
void vp9_init_layer_context(struct VP9_COMP *const cpi);
// Update the layer context from a change_config() call.
void vp9_update_layer_context_change_config(struct VP9_COMP *const cpi,
const int target_bandwidth);
// Prior to encoding the frame, update framerate-related quantities
// for the current temporal layer.
void vp9_update_temporal_layer_framerate(struct VP9_COMP *const cpi);
// Update framerate-related quantities for the current spatial layer.
void vp9_update_spatial_layer_framerate(struct VP9_COMP *const cpi,
double framerate);
// Prior to encoding the frame, set the layer context, for the current layer
// to be encoded, to the cpi struct.
void vp9_restore_layer_context(struct VP9_COMP *const cpi);
// Save the layer context after encoding the frame.
void vp9_save_layer_context(struct VP9_COMP *const cpi);
// Initialize second pass rc for spatial svc.
void vp9_init_second_pass_spatial_svc(struct VP9_COMP *cpi);
// Increment number of video frames in layer
void vp9_inc_frame_in_layer(struct VP9_COMP *const cpi);
// Check if current layer is key frame in spatial upper layer
int vp9_is_upper_layer_key_frame(const struct VP9_COMP *const cpi);
// Get the next source buffer to encode
struct lookahead_entry *vp9_svc_lookahead_pop(struct VP9_COMP *const cpi,
struct lookahead_ctx *ctx,
int drain);
// Start a frame and initialize svc parameters
int vp9_svc_start_frame(struct VP9_COMP *const cpi);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // VP9_ENCODER_VP9_SVC_LAYERCONTEXT_

View File

@ -651,7 +651,6 @@ static void adjust_arnr_filter(VP9_COMP *cpi,
}
void vp9_temporal_filter(VP9_COMP *cpi, int distance) {
VP9_COMMON *const cm = &cpi->common;
RATE_CONTROL *const rc = &cpi->rc;
int frame;
int frames_to_blur;
@ -679,66 +678,21 @@ void vp9_temporal_filter(VP9_COMP *cpi, int distance) {
if (frames_to_blur > 0) {
// Setup scaling factors. Scaling on each of the arnr frames is not
// supported.
if (is_two_pass_svc(cpi)) {
// In spatial svc the scaling factors might be less then 1/2.
// So we will use non-normative scaling.
int frame_used = 0;
// ARF is produced at the native frame size and resized when coded.
#if CONFIG_VP9_HIGHBITDEPTH
vp9_setup_scale_factors_for_frame(
&sf,
get_frame_new_buffer(cm)->y_crop_width,
get_frame_new_buffer(cm)->y_crop_height,
get_frame_new_buffer(cm)->y_crop_width,
get_frame_new_buffer(cm)->y_crop_height,
cm->use_highbitdepth);
vp9_setup_scale_factors_for_frame(&sf,
frames[0]->y_crop_width,
frames[0]->y_crop_height,
frames[0]->y_crop_width,
frames[0]->y_crop_height,
cpi->common.use_highbitdepth);
#else
vp9_setup_scale_factors_for_frame(
&sf,
get_frame_new_buffer(cm)->y_crop_width,
get_frame_new_buffer(cm)->y_crop_height,
get_frame_new_buffer(cm)->y_crop_width,
get_frame_new_buffer(cm)->y_crop_height);
vp9_setup_scale_factors_for_frame(&sf,
frames[0]->y_crop_width,
frames[0]->y_crop_height,
frames[0]->y_crop_width,
frames[0]->y_crop_height);
#endif // CONFIG_VP9_HIGHBITDEPTH
for (frame = 0; frame < frames_to_blur; ++frame) {
if (cm->mi_cols * MI_SIZE != frames[frame]->y_width ||
cm->mi_rows * MI_SIZE != frames[frame]->y_height) {
if (vp9_realloc_frame_buffer(&cpi->svc.scaled_frames[frame_used],
cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
#if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
VP9_ENC_BORDER_IN_PIXELS, NULL, NULL,
NULL)) {
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
"Failed to reallocate alt_ref_buffer");
}
frames[frame] = vp9_scale_if_required(
cm, frames[frame], &cpi->svc.scaled_frames[frame_used]);
++frame_used;
}
}
cm->mi = cm->mip + cm->mi_stride + 1;
cpi->mb.e_mbd.mi = cm->mi;
cpi->mb.e_mbd.mi[0].src_mi = &cpi->mb.e_mbd.mi[0];
} else {
// ARF is produced at the native frame size and resized when coded.
#if CONFIG_VP9_HIGHBITDEPTH
vp9_setup_scale_factors_for_frame(&sf,
frames[0]->y_crop_width,
frames[0]->y_crop_height,
frames[0]->y_crop_width,
frames[0]->y_crop_height,
cm->use_highbitdepth);
#else
vp9_setup_scale_factors_for_frame(&sf,
frames[0]->y_crop_width,
frames[0]->y_crop_height,
frames[0]->y_crop_width,
frames[0]->y_crop_height);
#endif // CONFIG_VP9_HIGHBITDEPTH
}
}
temporal_filter_iterate_c(cpi, frames, frames_to_blur,

View File

@ -164,38 +164,6 @@ static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx,
RANGE_CHECK(cfg, rc_scaled_height, 1, cfg->g_h);
}
RANGE_CHECK(cfg, ss_number_layers, 1, VPX_SS_MAX_LAYERS);
RANGE_CHECK(cfg, ts_number_layers, 1, VPX_TS_MAX_LAYERS);
if (cfg->ts_number_layers > 1) {
unsigned int i;
for (i = 1; i < cfg->ts_number_layers; ++i)
if (cfg->ts_target_bitrate[i] < cfg->ts_target_bitrate[i - 1])
ERROR("ts_target_bitrate entries are not increasing");
RANGE_CHECK(cfg, ts_rate_decimator[cfg->ts_number_layers - 1], 1, 1);
for (i = cfg->ts_number_layers - 2; i > 0; --i)
if (cfg->ts_rate_decimator[i - 1] != 2 * cfg->ts_rate_decimator[i])
ERROR("ts_rate_decimator factors are not powers of 2");
}
#if CONFIG_SPATIAL_SVC
if ((cfg->ss_number_layers > 1 || cfg->ts_number_layers > 1) &&
cfg->g_pass == VPX_RC_LAST_PASS) {
unsigned int i, alt_ref_sum = 0;
for (i = 0; i < cfg->ss_number_layers; ++i) {
if (cfg->ss_enable_auto_alt_ref[i])
++alt_ref_sum;
}
if (alt_ref_sum > REF_FRAMES - cfg->ss_number_layers)
ERROR("Not enough ref buffers for svc alt ref frames");
if (cfg->ss_number_layers * cfg->ts_number_layers > 3 &&
cfg->g_error_resilient == 0)
ERROR("Multiple frame context are not supported for more than 3 layers");
}
#endif
// VP9 does not support a lower bound on the keyframe interval in
// automatic keyframe placement mode.
if (cfg->kf_mode != VPX_KF_DISABLED &&
@ -238,44 +206,14 @@ static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx,
if (cfg->rc_twopass_stats_in.sz % packet_sz)
ERROR("rc_twopass_stats_in.sz indicates truncated packet.");
if (cfg->ss_number_layers > 1 || cfg->ts_number_layers > 1) {
int i;
unsigned int n_packets_per_layer[VPX_SS_MAX_LAYERS] = {0};
if (cfg->rc_twopass_stats_in.sz < 2 * packet_sz)
ERROR("rc_twopass_stats_in requires at least two packets.");
stats = cfg->rc_twopass_stats_in.buf;
for (i = 0; i < n_packets; ++i) {
const int layer_id = (int)stats[i].spatial_layer_id;
if (layer_id >= 0 && layer_id < (int)cfg->ss_number_layers) {
++n_packets_per_layer[layer_id];
}
}
stats =
(const FIRSTPASS_STATS *)cfg->rc_twopass_stats_in.buf + n_packets - 1;
for (i = 0; i < (int)cfg->ss_number_layers; ++i) {
unsigned int layer_id;
if (n_packets_per_layer[i] < 2) {
ERROR("rc_twopass_stats_in requires at least two packets for each "
"layer.");
}
stats = (const FIRSTPASS_STATS *)cfg->rc_twopass_stats_in.buf +
n_packets - cfg->ss_number_layers + i;
layer_id = (int)stats->spatial_layer_id;
if (layer_id >= cfg->ss_number_layers
||(unsigned int)(stats->count + 0.5) !=
n_packets_per_layer[layer_id] - 1)
ERROR("rc_twopass_stats_in missing EOS stats packet");
}
} else {
if (cfg->rc_twopass_stats_in.sz < 2 * packet_sz)
ERROR("rc_twopass_stats_in requires at least two packets.");
stats =
(const FIRSTPASS_STATS *)cfg->rc_twopass_stats_in.buf + n_packets - 1;
if ((int)(stats->count + 0.5) != n_packets - 1)
ERROR("rc_twopass_stats_in missing EOS stats packet");
}
if ((int)(stats->count + 0.5) != n_packets - 1)
ERROR("rc_twopass_stats_in missing EOS stats packet");
}
#if !CONFIG_VP9_HIGHBITDEPTH
@ -446,36 +384,6 @@ static vpx_codec_err_t set_encoder_config(
oxcf->frame_periodic_boost = extra_cfg->frame_periodic_boost;
oxcf->ss_number_layers = cfg->ss_number_layers;
if (oxcf->ss_number_layers > 1) {
int i;
for (i = 0; i < VPX_SS_MAX_LAYERS; ++i) {
oxcf->ss_target_bitrate[i] = 1000 * cfg->ss_target_bitrate[i];
#if CONFIG_SPATIAL_SVC
oxcf->ss_enable_auto_arf[i] = cfg->ss_enable_auto_alt_ref[i];
#endif
}
} else if (oxcf->ss_number_layers == 1) {
oxcf->ss_target_bitrate[0] = (int)oxcf->target_bandwidth;
#if CONFIG_SPATIAL_SVC
oxcf->ss_enable_auto_arf[0] = extra_cfg->enable_auto_alt_ref;
#endif
}
oxcf->ts_number_layers = cfg->ts_number_layers;
if (oxcf->ts_number_layers > 1) {
int i;
for (i = 0; i < VPX_TS_MAX_LAYERS; ++i) {
oxcf->ts_target_bitrate[i] = 1000 * cfg->ts_target_bitrate[i];
oxcf->ts_rate_decimator[i] = cfg->ts_rate_decimator[i];
}
} else if (oxcf->ts_number_layers == 1) {
oxcf->ts_target_bitrate[0] = (int)oxcf->target_bandwidth;
oxcf->ts_rate_decimator[0] = 1;
}
/*
printf("Current VP9 Settings: \n");
printf("target_bandwidth: %d\n", oxcf->target_bandwidth);
@ -847,11 +755,7 @@ static vpx_codec_frame_flags_t get_frame_pkt_flags(const VP9_COMP *cpi,
unsigned int lib_flags) {
vpx_codec_frame_flags_t flags = lib_flags << 16;
if (lib_flags & FRAMEFLAGS_KEY
#if CONFIG_SPATIAL_SVC
|| (is_two_pass_svc(cpi) && cpi->svc.layer_context[0].is_key_frame)
#endif
)
if (lib_flags & FRAMEFLAGS_KEY)
flags |= VPX_FRAME_IS_KEY;
if (cpi->droppable)
@ -961,18 +865,8 @@ static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t *ctx,
if (size) {
vpx_codec_cx_pkt_t pkt;
#if CONFIG_SPATIAL_SVC
if (is_two_pass_svc(cpi))
cpi->svc.layer_context[cpi->svc.spatial_layer_id].layer_size += size;
#endif
// Pack invisible frames with the next visible frame
if (!cpi->common.show_frame
#if CONFIG_SPATIAL_SVC
|| (is_two_pass_svc(cpi) &&
cpi->svc.spatial_layer_id < cpi->svc.number_spatial_layers - 1)
#endif
) {
if (!cpi->common.show_frame) {
if (ctx->pending_cx_data == 0)
ctx->pending_cx_data = cx_data;
ctx->pending_cx_data_sz += size;
@ -1010,24 +904,6 @@ static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t *ctx,
vpx_codec_pkt_list_add(&ctx->pkt_list.head, &pkt);
cx_data += size;
cx_data_sz -= size;
#if CONFIG_SPATIAL_SVC
if (is_two_pass_svc(cpi)) {
vpx_codec_cx_pkt_t pkt_sizes, pkt_psnr;
int i;
vp9_zero(pkt_sizes);
vp9_zero(pkt_psnr);
pkt_sizes.kind = VPX_CODEC_SPATIAL_SVC_LAYER_SIZES;
pkt_psnr.kind = VPX_CODEC_SPATIAL_SVC_LAYER_PSNR;
for (i = 0; i < cpi->svc.number_spatial_layers; ++i) {
LAYER_CONTEXT *lc = &cpi->svc.layer_context[i];
pkt_sizes.data.layer_sizes[i] = lc->layer_size;
pkt_psnr.data.layer_psnr[i] = lc->psnr_pkt;
lc->layer_size = 0;
}
vpx_codec_pkt_list_add(&ctx->pkt_list.head, &pkt_sizes);
vpx_codec_pkt_list_add(&ctx->pkt_list.head, &pkt_psnr);
}
#endif
}
}
}
@ -1187,62 +1063,6 @@ static vpx_codec_err_t ctrl_set_scale_mode(vpx_codec_alg_priv_t *ctx,
}
}
static vpx_codec_err_t ctrl_set_svc(vpx_codec_alg_priv_t *ctx, va_list args) {
int data = va_arg(args, int);
const vpx_codec_enc_cfg_t *cfg = &ctx->cfg;
vp9_set_svc(ctx->cpi, data);
// CBR or two pass mode for SVC with both temporal and spatial layers
// not yet supported.
if (data == 1 &&
(cfg->rc_end_usage == VPX_CBR ||
cfg->g_pass == VPX_RC_FIRST_PASS ||
cfg->g_pass == VPX_RC_LAST_PASS) &&
cfg->ss_number_layers > 1 &&
cfg->ts_number_layers > 1) {
return VPX_CODEC_INVALID_PARAM;
}
return VPX_CODEC_OK;
}
static vpx_codec_err_t ctrl_set_svc_layer_id(vpx_codec_alg_priv_t *ctx,
va_list args) {
vpx_svc_layer_id_t *const data = va_arg(args, vpx_svc_layer_id_t *);
VP9_COMP *const cpi = (VP9_COMP *)ctx->cpi;
SVC *const svc = &cpi->svc;
svc->spatial_layer_id = data->spatial_layer_id;
svc->temporal_layer_id = data->temporal_layer_id;
// Checks on valid layer_id input.
if (svc->temporal_layer_id < 0 ||
svc->temporal_layer_id >= (int)ctx->cfg.ts_number_layers) {
return VPX_CODEC_INVALID_PARAM;
}
if (svc->spatial_layer_id < 0 ||
svc->spatial_layer_id >= (int)ctx->cfg.ss_number_layers) {
return VPX_CODEC_INVALID_PARAM;
}
return VPX_CODEC_OK;
}
static vpx_codec_err_t ctrl_set_svc_parameters(vpx_codec_alg_priv_t *ctx,
va_list args) {
VP9_COMP *const cpi = ctx->cpi;
vpx_svc_extra_cfg_t *const params = va_arg(args, vpx_svc_extra_cfg_t *);
int i;
for (i = 0; i < cpi->svc.number_spatial_layers; ++i) {
LAYER_CONTEXT *lc = &cpi->svc.layer_context[i];
lc->max_q = params->max_quantizers[i];
lc->min_q = params->min_quantizers[i];
lc->scaling_factor_num = params->scaling_factor_num[i];
lc->scaling_factor_den = params->scaling_factor_den[i];
}
return VPX_CODEC_OK;
}
static vpx_codec_err_t ctrl_set_tune_content(vpx_codec_alg_priv_t *ctx,
va_list args) {
struct vp9_extracfg extra_cfg = ctx->extra_cfg;
@ -1285,9 +1105,6 @@ static vpx_codec_ctrl_fn_map_t encoder_ctrl_maps[] = {
{VP9E_SET_FRAME_PARALLEL_DECODING, ctrl_set_frame_parallel_decoding_mode},
{VP9E_SET_AQ_MODE, ctrl_set_aq_mode},
{VP9E_SET_FRAME_PERIODIC_BOOST, ctrl_set_frame_periodic_boost},
{VP9E_SET_SVC, ctrl_set_svc},
{VP9E_SET_SVC_PARAMETERS, ctrl_set_svc_parameters},
{VP9E_SET_SVC_LAYER_ID, ctrl_set_svc_layer_id},
{VP9E_SET_TUNE_CONTENT, ctrl_set_tune_content},
{VP9E_SET_COLOR_SPACE, ctrl_set_color_space},
{VP9E_SET_NOISE_SENSITIVITY, ctrl_set_noise_sensitivity},
@ -1352,10 +1169,13 @@ static vpx_codec_enc_cfg_map_t encoder_usage_cfg_map[] = {
0, // kf_min_dist
9999, // kf_max_dist
VPX_SS_DEFAULT_LAYERS, // ss_number_layers
// TODO(yunqingwang): Spatial/temporal scalability are not supported
// in Nextgen. The following 8 parameters are not used, which should
// be removed later.
0, // ss_number_layers
{0},
{0}, // ss_target_bitrate
1, // ts_number_layers
0, // ts_number_layers
{0}, // ts_target_bitrate
{0}, // ts_rate_decimator
0, // ts_periodicity

View File

@ -51,7 +51,6 @@ VP9_CX_SRCS-yes += encoder/vp9_quantize.h
VP9_CX_SRCS-yes += encoder/vp9_ratectrl.h
VP9_CX_SRCS-yes += encoder/vp9_rd.h
VP9_CX_SRCS-yes += encoder/vp9_rdopt.h
VP9_CX_SRCS-yes += encoder/vp9_svc_layercontext.h
VP9_CX_SRCS-yes += encoder/vp9_tokenize.h
VP9_CX_SRCS-yes += encoder/vp9_treewriter.h
VP9_CX_SRCS-yes += encoder/vp9_variance.h
@ -70,7 +69,6 @@ VP9_CX_SRCS-yes += encoder/vp9_speed_features.c
VP9_CX_SRCS-yes += encoder/vp9_speed_features.h
VP9_CX_SRCS-yes += encoder/vp9_subexp.c
VP9_CX_SRCS-yes += encoder/vp9_subexp.h
VP9_CX_SRCS-yes += encoder/vp9_svc_layercontext.c
VP9_CX_SRCS-yes += encoder/vp9_resize.c
VP9_CX_SRCS-yes += encoder/vp9_resize.h
VP9_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += encoder/vp9_ransac.c

View File

@ -1,608 +0,0 @@
/*
* Copyright (c) 2013 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/**
* @file
* VP9 SVC encoding support via libvpx
*/
#include <assert.h>
#include <math.h>
#include <limits.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define VPX_DISABLE_CTRL_TYPECHECKS 1
#include "./vpx_config.h"
#include "vpx/svc_context.h"
#include "vpx/vp8cx.h"
#include "vpx/vpx_encoder.h"
#include "vpx_mem/vpx_mem.h"
#include "vp9/common/vp9_onyxc_int.h"
#ifdef __MINGW32__
#define strtok_r strtok_s
#ifndef MINGW_HAS_SECURE_API
// proto from /usr/x86_64-w64-mingw32/include/sec_api/string_s.h
_CRTIMP char *__cdecl strtok_s(char *str, const char *delim, char **context);
#endif /* MINGW_HAS_SECURE_API */
#endif /* __MINGW32__ */
#ifdef _MSC_VER
#define strdup _strdup
#define strtok_r strtok_s
#endif
#define SVC_REFERENCE_FRAMES 8
#define SUPERFRAME_SLOTS (8)
#define SUPERFRAME_BUFFER_SIZE (SUPERFRAME_SLOTS * sizeof(uint32_t) + 2)
#define OPTION_BUFFER_SIZE 1024
#define COMPONENTS 4 // psnr & sse statistics maintained for total, y, u, v
#define MAX_QUANTIZER 63
static const int DEFAULT_SCALE_FACTORS_NUM[VPX_SS_MAX_LAYERS] = {
4, 5, 7, 11, 16
};
static const int DEFAULT_SCALE_FACTORS_DEN[VPX_SS_MAX_LAYERS] = {
16, 16, 16, 16, 16
};
typedef enum {
QUANTIZER = 0,
BITRATE,
SCALE_FACTOR,
AUTO_ALT_REF,
ALL_OPTION_TYPES
} LAYER_OPTION_TYPE;
static const int option_max_values[ALL_OPTION_TYPES] = {
63, INT_MAX, INT_MAX, 1
};
static const int option_min_values[ALL_OPTION_TYPES] = {
0, 0, 1, 0
};
// One encoded frame
typedef struct FrameData {
void *buf; // compressed data buffer
size_t size; // length of compressed data
vpx_codec_frame_flags_t flags; /**< flags for this frame */
struct FrameData *next;
} FrameData;
typedef struct SvcInternal {
char options[OPTION_BUFFER_SIZE]; // set by vpx_svc_set_options
// values extracted from option, quantizers
vpx_svc_extra_cfg_t svc_params;
int enable_auto_alt_ref[VPX_SS_MAX_LAYERS];
int bitrates[VPX_SS_MAX_LAYERS];
// accumulated statistics
double psnr_sum[VPX_SS_MAX_LAYERS][COMPONENTS]; // total/Y/U/V
uint64_t sse_sum[VPX_SS_MAX_LAYERS][COMPONENTS];
uint32_t bytes_sum[VPX_SS_MAX_LAYERS];
// codec encoding values
int width; // width of highest layer
int height; // height of highest layer
int kf_dist; // distance between keyframes
// state variables
int psnr_pkt_received;
int layer;
int use_multiple_frame_contexts;
char message_buffer[2048];
vpx_codec_ctx_t *codec_ctx;
} SvcInternal;
static SvcInternal *get_svc_internal(SvcContext *svc_ctx) {
if (svc_ctx == NULL) return NULL;
if (svc_ctx->internal == NULL) {
SvcInternal *const si = (SvcInternal *)malloc(sizeof(*si));
if (si != NULL) {
memset(si, 0, sizeof(*si));
}
svc_ctx->internal = si;
}
return (SvcInternal *)svc_ctx->internal;
}
static const SvcInternal *get_const_svc_internal(const SvcContext *svc_ctx) {
if (svc_ctx == NULL) return NULL;
return (const SvcInternal *)svc_ctx->internal;
}
static void svc_log_reset(SvcContext *svc_ctx) {
SvcInternal *const si = (SvcInternal *)svc_ctx->internal;
si->message_buffer[0] = '\0';
}
static int svc_log(SvcContext *svc_ctx, SVC_LOG_LEVEL level,
const char *fmt, ...) {
char buf[512];
int retval = 0;
va_list ap;
SvcInternal *const si = get_svc_internal(svc_ctx);
if (level > svc_ctx->log_level) {
return retval;
}
va_start(ap, fmt);
retval = vsnprintf(buf, sizeof(buf), fmt, ap);
va_end(ap);
if (svc_ctx->log_print) {
printf("%s", buf);
} else {
strncat(si->message_buffer, buf,
sizeof(si->message_buffer) - strlen(si->message_buffer) - 1);
}
if (level == SVC_LOG_ERROR) {
si->codec_ctx->err_detail = si->message_buffer;
}
return retval;
}
static vpx_codec_err_t extract_option(LAYER_OPTION_TYPE type,
char *input,
int *value0,
int *value1) {
if (type == SCALE_FACTOR) {
*value0 = strtol(input, &input, 10);
if (*input++ != '/')
return VPX_CODEC_INVALID_PARAM;
*value1 = strtol(input, &input, 10);
if (*value0 < option_min_values[SCALE_FACTOR] ||
*value1 < option_min_values[SCALE_FACTOR] ||
*value0 > option_max_values[SCALE_FACTOR] ||
*value1 > option_max_values[SCALE_FACTOR] ||
*value0 > *value1) // num shouldn't be greater than den
return VPX_CODEC_INVALID_PARAM;
} else {
*value0 = atoi(input);
if (*value0 < option_min_values[type] ||
*value0 > option_max_values[type])
return VPX_CODEC_INVALID_PARAM;
}
return VPX_CODEC_OK;
}
static vpx_codec_err_t parse_layer_options_from_string(SvcContext *svc_ctx,
LAYER_OPTION_TYPE type,
const char *input,
int *option0,
int *option1) {
int i;
vpx_codec_err_t res = VPX_CODEC_OK;
char *input_string;
char *token;
const char *delim = ",";
char *save_ptr;
if (input == NULL || option0 == NULL ||
(option1 == NULL && type == SCALE_FACTOR))
return VPX_CODEC_INVALID_PARAM;
input_string = strdup(input);
token = strtok_r(input_string, delim, &save_ptr);
for (i = 0; i < svc_ctx->spatial_layers; ++i) {
if (token != NULL) {
res = extract_option(type, token, option0 + i, option1 + i);
if (res != VPX_CODEC_OK)
break;
token = strtok_r(NULL, delim, &save_ptr);
} else {
break;
}
}
if (res == VPX_CODEC_OK && i != svc_ctx->spatial_layers) {
svc_log(svc_ctx, SVC_LOG_ERROR,
"svc: layer params type: %d %d values required, "
"but only %d specified\n", type, svc_ctx->spatial_layers, i);
res = VPX_CODEC_INVALID_PARAM;
}
free(input_string);
return res;
}
/**
* Parse SVC encoding options
* Format: encoding-mode=<svc_mode>,layers=<layer_count>
* scale-factors=<n1>/<d1>,<n2>/<d2>,...
* quantizers=<q1>,<q2>,...
* svc_mode = [i|ip|alt_ip|gf]
*/
static vpx_codec_err_t parse_options(SvcContext *svc_ctx, const char *options) {
char *input_string;
char *option_name;
char *option_value;
char *input_ptr;
SvcInternal *const si = get_svc_internal(svc_ctx);
vpx_codec_err_t res = VPX_CODEC_OK;
int i, alt_ref_enabled = 0;
if (options == NULL) return VPX_CODEC_OK;
input_string = strdup(options);
// parse option name
option_name = strtok_r(input_string, "=", &input_ptr);
while (option_name != NULL) {
// parse option value
option_value = strtok_r(NULL, " ", &input_ptr);
if (option_value == NULL) {
svc_log(svc_ctx, SVC_LOG_ERROR, "option missing value: %s\n",
option_name);
res = VPX_CODEC_INVALID_PARAM;
break;
}
if (strcmp("spatial-layers", option_name) == 0) {
svc_ctx->spatial_layers = atoi(option_value);
} else if (strcmp("temporal-layers", option_name) == 0) {
svc_ctx->temporal_layers = atoi(option_value);
} else if (strcmp("scale-factors", option_name) == 0) {
res = parse_layer_options_from_string(svc_ctx, SCALE_FACTOR, option_value,
si->svc_params.scaling_factor_num,
si->svc_params.scaling_factor_den);
if (res != VPX_CODEC_OK) break;
} else if (strcmp("max-quantizers", option_name) == 0) {
res = parse_layer_options_from_string(svc_ctx, QUANTIZER, option_value,
si->svc_params.max_quantizers,
NULL);
if (res != VPX_CODEC_OK) break;
} else if (strcmp("min-quantizers", option_name) == 0) {
res = parse_layer_options_from_string(svc_ctx, QUANTIZER, option_value,
si->svc_params.min_quantizers,
NULL);
if (res != VPX_CODEC_OK) break;
} else if (strcmp("auto-alt-refs", option_name) == 0) {
res = parse_layer_options_from_string(svc_ctx, AUTO_ALT_REF, option_value,
si->enable_auto_alt_ref, NULL);
if (res != VPX_CODEC_OK) break;
} else if (strcmp("bitrates", option_name) == 0) {
res = parse_layer_options_from_string(svc_ctx, BITRATE, option_value,
si->bitrates, NULL);
if (res != VPX_CODEC_OK) break;
} else if (strcmp("multi-frame-contexts", option_name) == 0) {
si->use_multiple_frame_contexts = atoi(option_value);
} else {
svc_log(svc_ctx, SVC_LOG_ERROR, "invalid option: %s\n", option_name);
res = VPX_CODEC_INVALID_PARAM;
break;
}
option_name = strtok_r(NULL, "=", &input_ptr);
}
free(input_string);
for (i = 0; i < svc_ctx->spatial_layers; ++i) {
if (si->svc_params.max_quantizers[i] > MAX_QUANTIZER ||
si->svc_params.max_quantizers[i] < 0 ||
si->svc_params.min_quantizers[i] > si->svc_params.max_quantizers[i] ||
si->svc_params.min_quantizers[i] < 0)
res = VPX_CODEC_INVALID_PARAM;
}
if (si->use_multiple_frame_contexts &&
(svc_ctx->spatial_layers > 3 ||
svc_ctx->spatial_layers * svc_ctx->temporal_layers > 4))
res = VPX_CODEC_INVALID_PARAM;
for (i = 0; i < svc_ctx->spatial_layers; ++i)
alt_ref_enabled += si->enable_auto_alt_ref[i];
if (alt_ref_enabled > REF_FRAMES - svc_ctx->spatial_layers) {
svc_log(svc_ctx, SVC_LOG_ERROR,
"svc: auto alt ref: Maxinum %d(REF_FRAMES - layers) layers could"
"enabled auto alt reference frame, but % layers are enabled\n",
REF_FRAMES - svc_ctx->spatial_layers, alt_ref_enabled);
res = VPX_CODEC_INVALID_PARAM;
}
return res;
}
vpx_codec_err_t vpx_svc_set_options(SvcContext *svc_ctx, const char *options) {
SvcInternal *const si = get_svc_internal(svc_ctx);
if (svc_ctx == NULL || options == NULL || si == NULL) {
return VPX_CODEC_INVALID_PARAM;
}
strncpy(si->options, options, sizeof(si->options));
si->options[sizeof(si->options) - 1] = '\0';
return VPX_CODEC_OK;
}
void assign_layer_bitrates(const SvcContext *svc_ctx,
vpx_codec_enc_cfg_t *const enc_cfg) {
int i;
const SvcInternal *const si = get_const_svc_internal(svc_ctx);
if (si->bitrates[0] != 0) {
enc_cfg->rc_target_bitrate = 0;
for (i = 0; i < svc_ctx->spatial_layers; ++i) {
enc_cfg->ss_target_bitrate[i] = (unsigned int)si->bitrates[i];
enc_cfg->rc_target_bitrate += si->bitrates[i];
}
} else {
float total = 0;
float alloc_ratio[VPX_SS_MAX_LAYERS] = {0};
for (i = 0; i < svc_ctx->spatial_layers; ++i) {
if (si->svc_params.scaling_factor_den[i] > 0) {
alloc_ratio[i] = (float)(si->svc_params.scaling_factor_num[i] * 1.0 /
si->svc_params.scaling_factor_den[i]);
alloc_ratio[i] *= alloc_ratio[i];
total += alloc_ratio[i];
}
}
for (i = 0; i < VPX_SS_MAX_LAYERS; ++i) {
if (total > 0) {
enc_cfg->ss_target_bitrate[i] = (unsigned int)
(enc_cfg->rc_target_bitrate * alloc_ratio[i] / total);
}
}
}
}
vpx_codec_err_t vpx_svc_init(SvcContext *svc_ctx, vpx_codec_ctx_t *codec_ctx,
vpx_codec_iface_t *iface,
vpx_codec_enc_cfg_t *enc_cfg) {
vpx_codec_err_t res;
int i;
SvcInternal *const si = get_svc_internal(svc_ctx);
if (svc_ctx == NULL || codec_ctx == NULL || iface == NULL ||
enc_cfg == NULL) {
return VPX_CODEC_INVALID_PARAM;
}
if (si == NULL) return VPX_CODEC_MEM_ERROR;
si->codec_ctx = codec_ctx;
si->width = enc_cfg->g_w;
si->height = enc_cfg->g_h;
if (enc_cfg->kf_max_dist < 2) {
svc_log(svc_ctx, SVC_LOG_ERROR, "key frame distance too small: %d\n",
enc_cfg->kf_max_dist);
return VPX_CODEC_INVALID_PARAM;
}
si->kf_dist = enc_cfg->kf_max_dist;
if (svc_ctx->spatial_layers == 0)
svc_ctx->spatial_layers = VPX_SS_DEFAULT_LAYERS;
if (svc_ctx->spatial_layers < 1 ||
svc_ctx->spatial_layers > VPX_SS_MAX_LAYERS) {
svc_log(svc_ctx, SVC_LOG_ERROR, "spatial layers: invalid value: %d\n",
svc_ctx->spatial_layers);
return VPX_CODEC_INVALID_PARAM;
}
for (i = 0; i < VPX_SS_MAX_LAYERS; ++i) {
si->svc_params.max_quantizers[i] = MAX_QUANTIZER;
si->svc_params.min_quantizers[i] = 0;
si->svc_params.scaling_factor_num[i] = DEFAULT_SCALE_FACTORS_NUM[i];
si->svc_params.scaling_factor_den[i] = DEFAULT_SCALE_FACTORS_DEN[i];
}
// Parse aggregate command line options. Options must start with
// "layers=xx" then followed by other options
res = parse_options(svc_ctx, si->options);
if (res != VPX_CODEC_OK) return res;
if (svc_ctx->spatial_layers < 1)
svc_ctx->spatial_layers = 1;
if (svc_ctx->spatial_layers > VPX_SS_MAX_LAYERS)
svc_ctx->spatial_layers = VPX_SS_MAX_LAYERS;
if (svc_ctx->temporal_layers < 1)
svc_ctx->temporal_layers = 1;
if (svc_ctx->temporal_layers > VPX_TS_MAX_LAYERS)
svc_ctx->temporal_layers = VPX_TS_MAX_LAYERS;
assign_layer_bitrates(svc_ctx, enc_cfg);
#if CONFIG_SPATIAL_SVC
for (i = 0; i < svc_ctx->spatial_layers; ++i)
enc_cfg->ss_enable_auto_alt_ref[i] = si->enable_auto_alt_ref[i];
#endif
if (svc_ctx->temporal_layers > 1) {
int i;
for (i = 0; i < svc_ctx->temporal_layers; ++i) {
enc_cfg->ts_target_bitrate[i] = enc_cfg->rc_target_bitrate /
svc_ctx->temporal_layers;
enc_cfg->ts_rate_decimator[i] = 1 << (svc_ctx->temporal_layers - 1 - i);
}
}
// modify encoder configuration
enc_cfg->ss_number_layers = svc_ctx->spatial_layers;
enc_cfg->ts_number_layers = svc_ctx->temporal_layers;
if (enc_cfg->g_error_resilient == 0 && si->use_multiple_frame_contexts == 0)
enc_cfg->g_error_resilient = 1;
// Initialize codec
res = vpx_codec_enc_init(codec_ctx, iface, enc_cfg, VPX_CODEC_USE_PSNR);
if (res != VPX_CODEC_OK) {
svc_log(svc_ctx, SVC_LOG_ERROR, "svc_enc_init error\n");
return res;
}
vpx_codec_control(codec_ctx, VP9E_SET_SVC, 1);
vpx_codec_control(codec_ctx, VP9E_SET_SVC_PARAMETERS, &si->svc_params);
return VPX_CODEC_OK;
}
/**
* Encode a frame into multiple layers
* Create a superframe containing the individual layers
*/
vpx_codec_err_t vpx_svc_encode(SvcContext *svc_ctx, vpx_codec_ctx_t *codec_ctx,
struct vpx_image *rawimg, vpx_codec_pts_t pts,
int64_t duration, int deadline) {
vpx_codec_err_t res;
vpx_codec_iter_t iter;
const vpx_codec_cx_pkt_t *cx_pkt;
SvcInternal *const si = get_svc_internal(svc_ctx);
if (svc_ctx == NULL || codec_ctx == NULL || si == NULL) {
return VPX_CODEC_INVALID_PARAM;
}
svc_log_reset(svc_ctx);
res = vpx_codec_encode(codec_ctx, rawimg, pts, (uint32_t)duration, 0,
deadline);
if (res != VPX_CODEC_OK) {
return res;
}
// save compressed data
iter = NULL;
while ((cx_pkt = vpx_codec_get_cx_data(codec_ctx, &iter))) {
switch (cx_pkt->kind) {
#if CONFIG_SPATIAL_SVC
case VPX_CODEC_SPATIAL_SVC_LAYER_PSNR: {
int i;
for (i = 0; i < svc_ctx->spatial_layers; ++i) {
int j;
svc_log(svc_ctx, SVC_LOG_DEBUG,
"SVC frame: %d, layer: %d, PSNR(Total/Y/U/V): "
"%2.3f %2.3f %2.3f %2.3f \n",
si->psnr_pkt_received, i,
cx_pkt->data.layer_psnr[i].psnr[0],
cx_pkt->data.layer_psnr[i].psnr[1],
cx_pkt->data.layer_psnr[i].psnr[2],
cx_pkt->data.layer_psnr[i].psnr[3]);
svc_log(svc_ctx, SVC_LOG_DEBUG,
"SVC frame: %d, layer: %d, SSE(Total/Y/U/V): "
"%2.3f %2.3f %2.3f %2.3f \n",
si->psnr_pkt_received, i,
cx_pkt->data.layer_psnr[i].sse[0],
cx_pkt->data.layer_psnr[i].sse[1],
cx_pkt->data.layer_psnr[i].sse[2],
cx_pkt->data.layer_psnr[i].sse[3]);
for (j = 0; j < COMPONENTS; ++j) {
si->psnr_sum[i][j] +=
cx_pkt->data.layer_psnr[i].psnr[j];
si->sse_sum[i][j] += cx_pkt->data.layer_psnr[i].sse[j];
}
}
++si->psnr_pkt_received;
break;
}
case VPX_CODEC_SPATIAL_SVC_LAYER_SIZES: {
int i;
for (i = 0; i < svc_ctx->spatial_layers; ++i)
si->bytes_sum[i] += cx_pkt->data.layer_sizes[i];
break;
}
#endif
default: {
break;
}
}
}
return VPX_CODEC_OK;
}
const char *vpx_svc_get_message(const SvcContext *svc_ctx) {
const SvcInternal *const si = get_const_svc_internal(svc_ctx);
if (svc_ctx == NULL || si == NULL) return NULL;
return si->message_buffer;
}
static double calc_psnr(double d) {
if (d == 0) return 100;
return -10.0 * log(d) / log(10.0);
}
// dump accumulated statistics and reset accumulated values
const char *vpx_svc_dump_statistics(SvcContext *svc_ctx) {
int number_of_frames;
int i, j;
uint32_t bytes_total = 0;
double scale[COMPONENTS];
double psnr[COMPONENTS];
double mse[COMPONENTS];
double y_scale;
SvcInternal *const si = get_svc_internal(svc_ctx);
if (svc_ctx == NULL || si == NULL) return NULL;
svc_log_reset(svc_ctx);
number_of_frames = si->psnr_pkt_received;
if (number_of_frames <= 0) return vpx_svc_get_message(svc_ctx);
svc_log(svc_ctx, SVC_LOG_INFO, "\n");
for (i = 0; i < svc_ctx->spatial_layers; ++i) {
svc_log(svc_ctx, SVC_LOG_INFO,
"Layer %d Average PSNR=[%2.3f, %2.3f, %2.3f, %2.3f], Bytes=[%u]\n",
i, (double)si->psnr_sum[i][0] / number_of_frames,
(double)si->psnr_sum[i][1] / number_of_frames,
(double)si->psnr_sum[i][2] / number_of_frames,
(double)si->psnr_sum[i][3] / number_of_frames, si->bytes_sum[i]);
// the following psnr calculation is deduced from ffmpeg.c#print_report
y_scale = si->width * si->height * 255.0 * 255.0 * number_of_frames;
scale[1] = y_scale;
scale[2] = scale[3] = y_scale / 4; // U or V
scale[0] = y_scale * 1.5; // total
for (j = 0; j < COMPONENTS; j++) {
psnr[j] = calc_psnr(si->sse_sum[i][j] / scale[j]);
mse[j] = si->sse_sum[i][j] * 255.0 * 255.0 / scale[j];
}
svc_log(svc_ctx, SVC_LOG_INFO,
"Layer %d Overall PSNR=[%2.3f, %2.3f, %2.3f, %2.3f]\n", i, psnr[0],
psnr[1], psnr[2], psnr[3]);
svc_log(svc_ctx, SVC_LOG_INFO,
"Layer %d Overall MSE=[%2.3f, %2.3f, %2.3f, %2.3f]\n", i, mse[0],
mse[1], mse[2], mse[3]);
bytes_total += si->bytes_sum[i];
// clear sums for next time
si->bytes_sum[i] = 0;
for (j = 0; j < COMPONENTS; ++j) {
si->psnr_sum[i][j] = 0;
si->sse_sum[i][j] = 0;
}
}
// only display statistics once
si->psnr_pkt_received = 0;
svc_log(svc_ctx, SVC_LOG_INFO, "Total Bytes=[%u]\n", bytes_total);
return vpx_svc_get_message(svc_ctx);
}
void vpx_svc_release(SvcContext *svc_ctx) {
SvcInternal *si;
if (svc_ctx == NULL) return;
// do not use get_svc_internal as it will unnecessarily allocate an
// SvcInternal if it was not already allocated
si = (SvcInternal *)svc_ctx->internal;
if (si != NULL) {
free(si);
svc_ctx->internal = NULL;
}
}

View File

@ -1,86 +0,0 @@
/*
* Copyright (c) 2013 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/**
* SvcContext - input parameters and state to encode a multi-layered
* spatial SVC frame
*/
#ifndef VPX_SVC_CONTEXT_H_
#define VPX_SVC_CONTEXT_H_
#include "./vp8cx.h"
#include "./vpx_encoder.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef enum SVC_LOG_LEVEL {
SVC_LOG_ERROR,
SVC_LOG_INFO,
SVC_LOG_DEBUG
} SVC_LOG_LEVEL;
typedef struct {
// public interface to svc_command options
int spatial_layers; // number of spatial layers
int temporal_layers; // number of temporal layers
SVC_LOG_LEVEL log_level; // amount of information to display
int log_print; // when set, printf log messages instead of returning the
// message with svc_get_message
// private storage for vpx_svc_encode
void *internal;
} SvcContext;
/**
* Set SVC options
* options are supplied as a single string separated by spaces
* Format: encoding-mode=<i|ip|alt-ip|gf>
* layers=<layer_count>
* scaling-factors=<n1>/<d1>,<n2>/<d2>,...
* quantizers=<q1>,<q2>,...
*/
vpx_codec_err_t vpx_svc_set_options(SvcContext *svc_ctx, const char *options);
/**
* initialize SVC encoding
*/
vpx_codec_err_t vpx_svc_init(SvcContext *svc_ctx, vpx_codec_ctx_t *codec_ctx,
vpx_codec_iface_t *iface,
vpx_codec_enc_cfg_t *cfg);
/**
* encode a frame of video with multiple layers
*/
vpx_codec_err_t vpx_svc_encode(SvcContext *svc_ctx, vpx_codec_ctx_t *codec_ctx,
struct vpx_image *rawimg, vpx_codec_pts_t pts,
int64_t duration, int deadline);
/**
* finished with svc encoding, release allocated resources
*/
void vpx_svc_release(SvcContext *svc_ctx);
/**
* dump accumulated statistics and reset accumulated values
*/
const char *vpx_svc_dump_statistics(SvcContext *svc_ctx);
/**
* get status message from previous encode
*/
const char *vpx_svc_get_message(const SvcContext *svc_ctx);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // VPX_SVC_CONTEXT_H_

View File

@ -207,17 +207,7 @@ enum vp8e_enc_control_id {
* 0: off, 1: OnYOnly
*/
VP9E_SET_NOISE_SENSITIVITY,
VP9E_SET_SVC,
VP9E_SET_SVC_PARAMETERS,
/*!\brief control function to set svc layer for spatial and temporal.
* \note Valid ranges: 0..#vpx_codec_enc_cfg::ss_number_layers for spatial
* layer and 0..#vpx_codec_enc_cfg::ts_number_layers for
* temporal layer.
*/
VP9E_SET_SVC_LAYER_ID,
VP9E_SET_TUNE_CONTENT,
VP9E_GET_SVC_LAYER_ID,
VP9E_REGISTER_CX_CALLBACK,
/*!\brief control function to set color space info.
@ -320,18 +310,6 @@ typedef enum {
VP8_TUNE_SSIM
} vp8e_tuning;
/*!\brief vp9 svc layer parameters
*
* This defines the spatial and temporal layer id numbers for svc encoding.
* This is used with the #VP9E_SET_SVC_LAYER_ID control to set the spatial and
* temporal layer id for the current frame.
*
*/
typedef struct vpx_svc_layer_id {
int spatial_layer_id; /**< Spatial layer id number. */
int temporal_layer_id; /**< Temporal layer id number. */
} vpx_svc_layer_id_t;
/*!\brief VP8 encoder control function parameter type
*
* Defines the data types that VP8E control functions take. Note that
@ -351,10 +329,6 @@ VPX_CTRL_USE_TYPE(VP8E_SET_ROI_MAP, vpx_roi_map_t *)
VPX_CTRL_USE_TYPE(VP8E_SET_ACTIVEMAP, vpx_active_map_t *)
VPX_CTRL_USE_TYPE(VP8E_SET_SCALEMODE, vpx_scaling_mode_t *)
VPX_CTRL_USE_TYPE(VP9E_SET_SVC, int)
VPX_CTRL_USE_TYPE(VP9E_SET_SVC_PARAMETERS, void *)
VPX_CTRL_USE_TYPE(VP9E_SET_SVC_LAYER_ID, vpx_svc_layer_id_t *)
VPX_CTRL_USE_TYPE(VP8E_SET_CPUUSED, int)
VPX_CTRL_USE_TYPE(VP8E_SET_ENABLEAUTOALTREF, unsigned int)
VPX_CTRL_USE_TYPE(VP8E_SET_NOISE_SENSITIVITY, unsigned int)

View File

@ -15,10 +15,6 @@ API_SRCS-$(CONFIG_VP8_ENCODER) += vp8.h
API_SRCS-$(CONFIG_VP8_ENCODER) += vp8cx.h
API_DOC_SRCS-$(CONFIG_VP8_ENCODER) += vp8.h
API_DOC_SRCS-$(CONFIG_VP8_ENCODER) += vp8cx.h
ifeq ($(CONFIG_VP9_ENCODER),yes)
API_SRCS-$(CONFIG_SPATIAL_SVC) += src/svc_encodeframe.c
API_SRCS-$(CONFIG_SPATIAL_SVC) += svc_context.h
endif
API_SRCS-$(CONFIG_VP8_DECODER) += vp8.h
API_SRCS-$(CONFIG_VP8_DECODER) += vp8dx.h