Merge "Revert "Merge branch 'frame-parallel' to enable frame parallel decode in master branch.""
This commit is contained in:
commit
3e466cea98
@ -554,8 +554,6 @@ LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-06-bilinear.webm
|
||||
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-06-bilinear.webm.md5
|
||||
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-07-frame_parallel.webm
|
||||
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-07-frame_parallel.webm.md5
|
||||
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-07-frame_parallel-1.webm
|
||||
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-07-frame_parallel-1.webm.md5
|
||||
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-08-tile-4x1.webm
|
||||
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-08-tile-4x1.webm.md5
|
||||
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-08-tile-4x4.webm
|
||||
@ -662,10 +660,6 @@ LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp91-2-04-yuv440.webm
|
||||
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp91-2-04-yuv440.webm.md5
|
||||
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp91-2-04-yuv444.webm
|
||||
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp91-2-04-yuv444.webm.md5
|
||||
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-20-big_superframe-01.webm
|
||||
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-20-big_superframe-01.webm.md5
|
||||
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-20-big_superframe-02.webm
|
||||
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-20-big_superframe-02.webm.md5
|
||||
ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
|
||||
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp92-2-20-10bit-yuv420.webm
|
||||
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp92-2-20-10bit-yuv420.webm.md5
|
||||
@ -718,9 +712,6 @@ LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-12-droppable_1.ivf.s738
|
||||
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-12-droppable_1.ivf.s73804_r01-05_b6-.ivf.res
|
||||
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp91-2-mixedrefcsp-444to420.ivf
|
||||
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp91-2-mixedrefcsp-444to420.ivf.res
|
||||
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-07-frame_parallel-1.webm
|
||||
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-07-frame_parallel-2.webm
|
||||
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-07-frame_parallel-3.webm
|
||||
|
||||
ifeq ($(CONFIG_DECODE_PERF_TESTS),yes)
|
||||
# NewEncode Test
|
||||
|
@ -728,12 +728,3 @@ b03c408cf23158638da18dbc3323b99a1635c68a invalid-vp90-2-12-droppable_1.ivf.s367
|
||||
a61774cf03fc584bd9f0904fc145253bb8ea6c4c invalid-vp91-2-mixedrefcsp-444to420.ivf.res
|
||||
812d05a64a0d83c1b504d0519927ddc5a2cdb273 invalid-vp90-2-12-droppable_1.ivf.s73804_r01-05_b6-.ivf
|
||||
1e472baaf5f6113459f0399a38a5a5e68d17799d invalid-vp90-2-12-droppable_1.ivf.s73804_r01-05_b6-.ivf.res
|
||||
f97088c7359fc8d3d5aa5eafe57bc7308b3ee124 vp90-2-20-big_superframe-01.webm
|
||||
47d7d409785afa33b123376de0c907336e6c7bd7 vp90-2-20-big_superframe-01.webm.md5
|
||||
65ade6d2786209582c50d34cfe22b3cdb033abaf vp90-2-20-big_superframe-02.webm
|
||||
7c0ed8d04c4d06c5411dd2e5de2411d37f092db5 vp90-2-20-big_superframe-02.webm.md5
|
||||
667ec8718c982aef6be07eb94f083c2efb9d2d16 vp90-2-07-frame_parallel-1.webm
|
||||
bfc82bf848e9c05020d61e3ffc1e62f25df81d19 vp90-2-07-frame_parallel-1.webm.md5
|
||||
efd5a51d175cfdacd169ed23477729dc558030dc invalid-vp90-2-07-frame_parallel-1.webm
|
||||
9f912712ec418be69adb910e2ca886a63c4cec08 invalid-vp90-2-07-frame_parallel-2.webm
|
||||
445f5a53ca9555341852997ccdd480a51540bd14 invalid-vp90-2-07-frame_parallel-3.webm
|
@ -35,7 +35,6 @@ LIBVPX_TEST_SRCS-$(CONFIG_VP9_DECODER) += byte_alignment_test.cc
|
||||
LIBVPX_TEST_SRCS-$(CONFIG_VP9_DECODER) += external_frame_buffer_test.cc
|
||||
LIBVPX_TEST_SRCS-$(CONFIG_VP9_DECODER) += invalid_file_test.cc
|
||||
LIBVPX_TEST_SRCS-$(CONFIG_VP9_DECODER) += user_priv_test.cc
|
||||
LIBVPX_TEST_SRCS-$(CONFIG_VP9_DECODER) += vp9_frame_parallel_test.cc
|
||||
LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += active_map_test.cc
|
||||
LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += borders_test.cc
|
||||
LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += cpu_speed_test.cc
|
||||
|
@ -12,7 +12,6 @@
|
||||
#include <cstdlib>
|
||||
#include <string>
|
||||
#include "third_party/googletest/src/include/gtest/gtest.h"
|
||||
#include "../tools_common.h"
|
||||
#include "./vpx_config.h"
|
||||
#include "test/codec_factory.h"
|
||||
#include "test/decode_test_driver.h"
|
||||
@ -27,24 +26,10 @@
|
||||
|
||||
namespace {
|
||||
|
||||
enum DecodeMode {
|
||||
kSerialMode,
|
||||
kFrameParallMode
|
||||
};
|
||||
|
||||
const int kDecodeMode = 0;
|
||||
const int kThreads = 1;
|
||||
const int kFileName = 2;
|
||||
|
||||
typedef std::tr1::tuple<int, int, const char*> DecodeParam;
|
||||
|
||||
class TestVectorTest : public ::libvpx_test::DecoderTest,
|
||||
public ::libvpx_test::CodecTestWithParam<DecodeParam> {
|
||||
public ::libvpx_test::CodecTestWithParam<const char*> {
|
||||
protected:
|
||||
TestVectorTest()
|
||||
: DecoderTest(GET_PARAM(0)),
|
||||
md5_file_(NULL) {
|
||||
}
|
||||
TestVectorTest() : DecoderTest(GET_PARAM(0)), md5_file_(NULL) {}
|
||||
|
||||
virtual ~TestVectorTest() {
|
||||
if (md5_file_)
|
||||
@ -86,25 +71,8 @@ class TestVectorTest : public ::libvpx_test::DecoderTest,
|
||||
// checksums match the correct md5 data, then the test is passed. Otherwise,
|
||||
// the test failed.
|
||||
TEST_P(TestVectorTest, MD5Match) {
|
||||
const DecodeParam input = GET_PARAM(1);
|
||||
const std::string filename = std::tr1::get<kFileName>(input);
|
||||
const int threads = std::tr1::get<kThreads>(input);
|
||||
const int mode = std::tr1::get<kDecodeMode>(input);
|
||||
const std::string filename = GET_PARAM(1);
|
||||
libvpx_test::CompressedVideoSource *video = NULL;
|
||||
vpx_codec_flags_t flags = 0;
|
||||
vpx_codec_dec_cfg_t cfg = {0};
|
||||
char str[256];
|
||||
|
||||
if (mode == kFrameParallMode) {
|
||||
flags |= VPX_CODEC_USE_FRAME_THREADING;
|
||||
}
|
||||
|
||||
cfg.threads = threads;
|
||||
|
||||
snprintf(str, sizeof(str) / sizeof(str[0]) - 1,
|
||||
"file: %s mode: %s threads: %d",
|
||||
filename.c_str(), mode == 0 ? "Serial" : "Parallel", threads);
|
||||
SCOPED_TRACE(str);
|
||||
|
||||
// Open compressed video file.
|
||||
if (filename.substr(filename.length() - 3, 3) == "ivf") {
|
||||
@ -124,53 +92,18 @@ TEST_P(TestVectorTest, MD5Match) {
|
||||
const std::string md5_filename = filename + ".md5";
|
||||
OpenMD5File(md5_filename);
|
||||
|
||||
// Set decode config and flags.
|
||||
set_cfg(cfg);
|
||||
set_flags(flags);
|
||||
|
||||
// Decode frame, and check the md5 matching.
|
||||
ASSERT_NO_FATAL_FAILURE(RunLoop(video));
|
||||
delete video;
|
||||
}
|
||||
|
||||
// Test VP8 decode in serial mode with single thread.
|
||||
// NOTE: VP8 only support serial mode.
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
VP8, TestVectorTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(
|
||||
static_cast<const libvpx_test::CodecFactory *>(&libvpx_test::kVP8)),
|
||||
::testing::Combine(
|
||||
::testing::Values(0), // Serial Mode.
|
||||
::testing::Values(1), // Single thread.
|
||||
::testing::ValuesIn(libvpx_test::kVP8TestVectors,
|
||||
libvpx_test::kVP8TestVectors +
|
||||
libvpx_test::kNumVP8TestVectors))));
|
||||
VP8_INSTANTIATE_TEST_CASE(TestVectorTest,
|
||||
::testing::ValuesIn(libvpx_test::kVP8TestVectors,
|
||||
libvpx_test::kVP8TestVectors +
|
||||
libvpx_test::kNumVP8TestVectors));
|
||||
VP9_INSTANTIATE_TEST_CASE(TestVectorTest,
|
||||
::testing::ValuesIn(libvpx_test::kVP9TestVectors,
|
||||
libvpx_test::kVP9TestVectors +
|
||||
libvpx_test::kNumVP9TestVectors));
|
||||
|
||||
// Test VP9 decode in serial mode with single thread.
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
VP9, TestVectorTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(
|
||||
static_cast<const libvpx_test::CodecFactory *>(&libvpx_test::kVP9)),
|
||||
::testing::Combine(
|
||||
::testing::Values(0), // Serial Mode.
|
||||
::testing::Values(1), // Single thread.
|
||||
::testing::ValuesIn(libvpx_test::kVP9TestVectors,
|
||||
libvpx_test::kVP9TestVectors +
|
||||
libvpx_test::kNumVP9TestVectors))));
|
||||
|
||||
|
||||
// Test VP9 decode in frame parallel mode with different number of threads.
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
VP9MultiThreadedFrameParallel, TestVectorTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(
|
||||
static_cast<const libvpx_test::CodecFactory *>(&libvpx_test::kVP9)),
|
||||
::testing::Combine(
|
||||
::testing::Values(1), // Frame Parallel mode.
|
||||
::testing::Range(2, 9), // With 2 ~ 8 threads.
|
||||
::testing::ValuesIn(libvpx_test::kVP9TestVectors,
|
||||
libvpx_test::kVP9TestVectors +
|
||||
libvpx_test::kNumVP9TestVectors))));
|
||||
} // namespace
|
||||
|
@ -191,7 +191,6 @@ const char *const kVP9TestVectors[] = {
|
||||
"vp93-2-20-10bit-yuv440.webm", "vp93-2-20-12bit-yuv440.webm",
|
||||
"vp93-2-20-10bit-yuv444.webm", "vp93-2-20-12bit-yuv444.webm",
|
||||
#endif // CONFIG_VP9_HIGHBITDEPTH`
|
||||
"vp90-2-20-big_superframe-01.webm", "vp90-2-20-big_superframe-02.webm",
|
||||
};
|
||||
const int kNumVP9TestVectors = NELEMENTS(kVP9TestVectors);
|
||||
#endif // CONFIG_VP9_DECODER
|
||||
|
@ -163,9 +163,7 @@ class Vp9EncoderParmsGetToDecoder
|
||||
EncodeParameters encode_parms;
|
||||
};
|
||||
|
||||
// TODO(hkuang): This test conflicts with frame parallel decode. So disable it
|
||||
// for now until fix.
|
||||
TEST_P(Vp9EncoderParmsGetToDecoder, DISABLED_BitstreamParms) {
|
||||
TEST_P(Vp9EncoderParmsGetToDecoder, BitstreamParms) {
|
||||
init_flags_ = VPX_CODEC_USE_PSNR;
|
||||
|
||||
libvpx_test::VideoSource *video;
|
||||
|
@ -1,209 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2014 The WebM project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <string>
|
||||
#include "third_party/googletest/src/include/gtest/gtest.h"
|
||||
#include "./vpx_config.h"
|
||||
#include "test/codec_factory.h"
|
||||
#include "test/decode_test_driver.h"
|
||||
#include "test/ivf_video_source.h"
|
||||
#include "test/md5_helper.h"
|
||||
#include "test/util.h"
|
||||
#if CONFIG_WEBM_IO
|
||||
#include "test/webm_video_source.h"
|
||||
#endif
|
||||
#include "vpx_mem/vpx_mem.h"
|
||||
|
||||
namespace {
|
||||
|
||||
using std::string;
|
||||
|
||||
#if CONFIG_WEBM_IO
|
||||
|
||||
struct FileList {
|
||||
const char *name;
|
||||
// md5 sum for decoded frames which does not include skipped frames.
|
||||
const char *expected_md5;
|
||||
const int pause_frame_num;
|
||||
};
|
||||
|
||||
// Decodes |filename| with |num_threads|. Pause at the specified frame_num,
|
||||
// seek to next key frame and then continue decoding until the end. Return
|
||||
// the md5 of the decoded frames which does not include skipped frames.
|
||||
string DecodeFile(const string &filename, int num_threads, int pause_num) {
|
||||
libvpx_test::WebMVideoSource video(filename);
|
||||
video.Init();
|
||||
int in_frames = 0;
|
||||
int out_frames = 0;
|
||||
|
||||
vpx_codec_dec_cfg_t cfg = {0};
|
||||
cfg.threads = num_threads;
|
||||
vpx_codec_flags_t flags = 0;
|
||||
flags |= VPX_CODEC_USE_FRAME_THREADING;
|
||||
libvpx_test::VP9Decoder decoder(cfg, flags, 0);
|
||||
|
||||
libvpx_test::MD5 md5;
|
||||
video.Begin();
|
||||
|
||||
do {
|
||||
++in_frames;
|
||||
const vpx_codec_err_t res =
|
||||
decoder.DecodeFrame(video.cxdata(), video.frame_size());
|
||||
if (res != VPX_CODEC_OK) {
|
||||
EXPECT_EQ(VPX_CODEC_OK, res) << decoder.DecodeError();
|
||||
break;
|
||||
}
|
||||
|
||||
// Pause at specified frame number.
|
||||
if (in_frames == pause_num) {
|
||||
// Flush the decoder and then seek to next key frame.
|
||||
decoder.DecodeFrame(NULL, 0);
|
||||
video.SeekToNextKeyFrame();
|
||||
} else {
|
||||
video.Next();
|
||||
}
|
||||
|
||||
// Flush the decoder at the end of the video.
|
||||
if (!video.cxdata())
|
||||
decoder.DecodeFrame(NULL, 0);
|
||||
|
||||
libvpx_test::DxDataIterator dec_iter = decoder.GetDxData();
|
||||
const vpx_image_t *img;
|
||||
|
||||
// Get decompressed data
|
||||
while ((img = dec_iter.Next())) {
|
||||
++out_frames;
|
||||
md5.Add(img);
|
||||
}
|
||||
} while (video.cxdata() != NULL);
|
||||
|
||||
EXPECT_EQ(in_frames, out_frames) <<
|
||||
"Input frame count does not match output frame count";
|
||||
|
||||
return string(md5.Get());
|
||||
}
|
||||
|
||||
void DecodeFiles(const FileList files[]) {
|
||||
for (const FileList *iter = files; iter->name != NULL; ++iter) {
|
||||
SCOPED_TRACE(iter->name);
|
||||
for (int t = 2; t <= 8; ++t) {
|
||||
EXPECT_EQ(iter->expected_md5,
|
||||
DecodeFile(iter->name, t, iter->pause_frame_num))
|
||||
<< "threads = " << t;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST(VP9MultiThreadedFrameParallel, PauseSeekResume) {
|
||||
// vp90-2-07-frame_parallel-1.webm is a 40 frame video file with
|
||||
// one key frame for every ten frames.
|
||||
static const FileList files[] = {
|
||||
{ "vp90-2-07-frame_parallel-1.webm",
|
||||
"6ea7c3875d67252e7caf2bc6e75b36b1", 6},
|
||||
{ "vp90-2-07-frame_parallel-1.webm",
|
||||
"4bb634160c7356a8d7d4299b6dc83a45", 12},
|
||||
{ "vp90-2-07-frame_parallel-1.webm",
|
||||
"89772591e6ef461f9fa754f916c78ed8", 26},
|
||||
{ NULL, NULL, 0},
|
||||
};
|
||||
DecodeFiles(files);
|
||||
}
|
||||
|
||||
struct InvalidFileList {
|
||||
const char *name;
|
||||
// md5 sum for decoded frames which does not include corrupted frames.
|
||||
const char *expected_md5;
|
||||
// Expected number of decoded frames which does not include corrupted frames.
|
||||
const int expected_frame_count;
|
||||
};
|
||||
|
||||
// Decodes |filename| with |num_threads|. Return the md5 of the decoded
|
||||
// frames which does not include corrupted frames.
|
||||
string DecodeInvalidFile(const string &filename, int num_threads,
|
||||
int expected_frame_count) {
|
||||
libvpx_test::WebMVideoSource video(filename);
|
||||
video.Init();
|
||||
|
||||
vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t();
|
||||
cfg.threads = num_threads;
|
||||
const vpx_codec_flags_t flags = VPX_CODEC_USE_FRAME_THREADING;
|
||||
libvpx_test::VP9Decoder decoder(cfg, flags, 0);
|
||||
|
||||
libvpx_test::MD5 md5;
|
||||
video.Begin();
|
||||
|
||||
int out_frames = 0;
|
||||
do {
|
||||
const vpx_codec_err_t res =
|
||||
decoder.DecodeFrame(video.cxdata(), video.frame_size());
|
||||
// TODO(hkuang): frame parallel mode should return an error on corruption.
|
||||
if (res != VPX_CODEC_OK) {
|
||||
EXPECT_EQ(VPX_CODEC_OK, res) << decoder.DecodeError();
|
||||
break;
|
||||
}
|
||||
|
||||
video.Next();
|
||||
|
||||
// Flush the decoder at the end of the video.
|
||||
if (!video.cxdata())
|
||||
decoder.DecodeFrame(NULL, 0);
|
||||
|
||||
libvpx_test::DxDataIterator dec_iter = decoder.GetDxData();
|
||||
const vpx_image_t *img;
|
||||
|
||||
// Get decompressed data
|
||||
while ((img = dec_iter.Next())) {
|
||||
++out_frames;
|
||||
md5.Add(img);
|
||||
}
|
||||
} while (video.cxdata() != NULL);
|
||||
|
||||
EXPECT_EQ(expected_frame_count, out_frames) <<
|
||||
"Input frame count does not match expected output frame count";
|
||||
|
||||
return string(md5.Get());
|
||||
}
|
||||
|
||||
void DecodeInvalidFiles(const InvalidFileList files[]) {
|
||||
for (const InvalidFileList *iter = files; iter->name != NULL; ++iter) {
|
||||
SCOPED_TRACE(iter->name);
|
||||
for (int t = 2; t <= 8; ++t) {
|
||||
EXPECT_EQ(iter->expected_md5,
|
||||
DecodeInvalidFile(iter->name, t, iter->expected_frame_count))
|
||||
<< "threads = " << t;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST(VP9MultiThreadedFrameParallel, InvalidFileTest) {
|
||||
static const InvalidFileList files[] = {
|
||||
// invalid-vp90-2-07-frame_parallel-1.webm is a 40 frame video file with
|
||||
// one key frame for every ten frames. The 11th frame has corrupted data.
|
||||
{ "invalid-vp90-2-07-frame_parallel-1.webm",
|
||||
"0549d0f45f60deaef8eb708e6c0eb6cb", 30},
|
||||
// invalid-vp90-2-07-frame_parallel-2.webm is a 40 frame video file with
|
||||
// one key frame for every ten frames. The 1st and 31st frames have
|
||||
// corrupted data.
|
||||
{ "invalid-vp90-2-07-frame_parallel-2.webm",
|
||||
"6a1f3cf6f9e7a364212fadb9580d525e", 20},
|
||||
// invalid-vp90-2-07-frame_parallel-3.webm is a 40 frame video file with
|
||||
// one key frame for every ten frames. The 5th and 13th frames have
|
||||
// corrupted data.
|
||||
{ "invalid-vp90-2-07-frame_parallel-3.webm",
|
||||
"8256544308de926b0681e04685b98677", 27},
|
||||
{ NULL, NULL, 0},
|
||||
};
|
||||
DecodeInvalidFiles(files);
|
||||
}
|
||||
|
||||
#endif // CONFIG_WEBM_IO
|
||||
} // namespace
|
@ -69,18 +69,6 @@ class WebMVideoSource : public CompressedVideoSource {
|
||||
}
|
||||
}
|
||||
|
||||
void SeekToNextKeyFrame() {
|
||||
ASSERT_TRUE(vpx_ctx_->file != NULL);
|
||||
do {
|
||||
const int status = webm_read_frame(webm_ctx_, &buf_, &buf_sz_, &buf_sz_);
|
||||
ASSERT_GE(status, 0) << "webm_read_frame failed";
|
||||
++frame_;
|
||||
if (status == 1) {
|
||||
end_of_file_ = true;
|
||||
}
|
||||
} while (!webm_ctx_->is_key_frame && !end_of_file_);
|
||||
}
|
||||
|
||||
virtual const uint8_t *cxdata() const {
|
||||
return end_of_file_ ? NULL : buf_;
|
||||
}
|
||||
|
@ -17,24 +17,6 @@
|
||||
#include "vp9/common/vp9_onyxc_int.h"
|
||||
#include "vp9/common/vp9_systemdependent.h"
|
||||
|
||||
// TODO(hkuang): Don't need to lock the whole pool after implementing atomic
|
||||
// frame reference count.
|
||||
void lock_buffer_pool(BufferPool *const pool) {
|
||||
#if CONFIG_MULTITHREAD
|
||||
pthread_mutex_lock(&pool->pool_mutex);
|
||||
#else
|
||||
(void)pool;
|
||||
#endif
|
||||
}
|
||||
|
||||
void unlock_buffer_pool(BufferPool *const pool) {
|
||||
#if CONFIG_MULTITHREAD
|
||||
pthread_mutex_unlock(&pool->pool_mutex);
|
||||
#else
|
||||
(void)pool;
|
||||
#endif
|
||||
}
|
||||
|
||||
void vp9_set_mb_mi(VP9_COMMON *cm, int width, int height) {
|
||||
const int aligned_width = ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2);
|
||||
const int aligned_height = ALIGN_POWER_OF_TWO(height, MI_SIZE_LOG2);
|
||||
@ -48,56 +30,18 @@ void vp9_set_mb_mi(VP9_COMMON *cm, int width, int height) {
|
||||
cm->MBs = cm->mb_rows * cm->mb_cols;
|
||||
}
|
||||
|
||||
static int alloc_seg_map(VP9_COMMON *cm, int seg_map_size) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NUM_PING_PONG_BUFFERS; ++i) {
|
||||
cm->seg_map_array[i] = (uint8_t *)vpx_calloc(seg_map_size, 1);
|
||||
if (cm->seg_map_array[i] == NULL)
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Init the index.
|
||||
cm->seg_map_idx = 0;
|
||||
cm->prev_seg_map_idx = 1;
|
||||
|
||||
cm->current_frame_seg_map = cm->seg_map_array[cm->seg_map_idx];
|
||||
|
||||
if (!cm->frame_parallel_decode) {
|
||||
cm->last_frame_seg_map = cm->seg_map_array[cm->prev_seg_map_idx];
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void free_seg_map(VP9_COMMON *cm) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NUM_PING_PONG_BUFFERS; ++i) {
|
||||
vpx_free(cm->seg_map_array[i]);
|
||||
cm->seg_map_array[i] = NULL;
|
||||
}
|
||||
|
||||
cm->current_frame_seg_map = NULL;
|
||||
|
||||
if (!cm->frame_parallel_decode) {
|
||||
cm->last_frame_seg_map = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void vp9_free_ref_frame_buffers(VP9_COMMON *cm) {
|
||||
BufferPool *const pool = cm->buffer_pool;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < FRAME_BUFFERS; ++i) {
|
||||
if (pool->frame_bufs[i].ref_count > 0 &&
|
||||
pool->frame_bufs[i].raw_frame_buffer.data != NULL) {
|
||||
pool->release_fb_cb(pool->cb_priv, &pool->frame_bufs[i].raw_frame_buffer);
|
||||
pool->frame_bufs[i].ref_count = 0;
|
||||
if (cm->frame_bufs[i].ref_count > 0 &&
|
||||
cm->frame_bufs[i].raw_frame_buffer.data != NULL) {
|
||||
cm->release_fb_cb(cm->cb_priv, &cm->frame_bufs[i].raw_frame_buffer);
|
||||
cm->frame_bufs[i].ref_count = 0;
|
||||
}
|
||||
vpx_free(pool->frame_bufs[i].mvs);
|
||||
pool->frame_bufs[i].mvs = NULL;
|
||||
vp9_free_frame_buffer(&pool->frame_bufs[i].buf);
|
||||
vpx_free(cm->frame_bufs[i].mvs);
|
||||
cm->frame_bufs[i].mvs = NULL;
|
||||
vp9_free_frame_buffer(&cm->frame_bufs[i].buf);
|
||||
}
|
||||
|
||||
#if CONFIG_VP9_POSTPROC
|
||||
@ -108,7 +52,8 @@ void vp9_free_ref_frame_buffers(VP9_COMMON *cm) {
|
||||
|
||||
void vp9_free_context_buffers(VP9_COMMON *cm) {
|
||||
cm->free_mi(cm);
|
||||
free_seg_map(cm);
|
||||
vpx_free(cm->last_frame_seg_map);
|
||||
cm->last_frame_seg_map = NULL;
|
||||
vpx_free(cm->above_context);
|
||||
cm->above_context = NULL;
|
||||
vpx_free(cm->above_seg_context);
|
||||
@ -122,10 +67,8 @@ int vp9_alloc_context_buffers(VP9_COMMON *cm, int width, int height) {
|
||||
if (cm->alloc_mi(cm, cm->mi_stride * calc_mi_size(cm->mi_rows)))
|
||||
goto fail;
|
||||
|
||||
// Create the segmentation map structure and set to 0.
|
||||
free_seg_map(cm);
|
||||
if (alloc_seg_map(cm, cm->mi_rows * cm->mi_cols))
|
||||
goto fail;
|
||||
cm->last_frame_seg_map = (uint8_t *)vpx_calloc(cm->mi_rows * cm->mi_cols, 1);
|
||||
if (!cm->last_frame_seg_map) goto fail;
|
||||
|
||||
cm->above_context = (ENTROPY_CONTEXT *)vpx_calloc(
|
||||
2 * mi_cols_aligned_to_sb(cm->mi_cols) * MAX_MB_PLANE,
|
||||
@ -144,15 +87,14 @@ int vp9_alloc_context_buffers(VP9_COMMON *cm, int width, int height) {
|
||||
}
|
||||
|
||||
static void init_frame_bufs(VP9_COMMON *cm) {
|
||||
BufferPool *const pool = cm->buffer_pool;
|
||||
int i;
|
||||
|
||||
cm->new_fb_idx = FRAME_BUFFERS - 1;
|
||||
pool->frame_bufs[cm->new_fb_idx].ref_count = 1;
|
||||
cm->frame_bufs[cm->new_fb_idx].ref_count = 1;
|
||||
|
||||
for (i = 0; i < REF_FRAMES; ++i) {
|
||||
cm->ref_frame_map[i] = i;
|
||||
pool->frame_bufs[i].ref_count = 1;
|
||||
cm->frame_bufs[i].ref_count = 1;
|
||||
}
|
||||
}
|
||||
|
||||
@ -164,9 +106,8 @@ int vp9_alloc_ref_frame_buffers(VP9_COMMON *cm, int width, int height) {
|
||||
vp9_free_ref_frame_buffers(cm);
|
||||
|
||||
for (i = 0; i < FRAME_BUFFERS; ++i) {
|
||||
BufferPool *const pool = cm->buffer_pool;
|
||||
pool->frame_bufs[i].ref_count = 0;
|
||||
if (vp9_alloc_frame_buffer(&pool->frame_bufs[i].buf, width, height,
|
||||
cm->frame_bufs[i].ref_count = 0;
|
||||
if (vp9_alloc_frame_buffer(&cm->frame_bufs[i].buf, width, height,
|
||||
ss_x, ss_y,
|
||||
#if CONFIG_VP9_HIGHBITDEPTH
|
||||
cm->use_highbitdepth,
|
||||
@ -174,15 +115,15 @@ int vp9_alloc_ref_frame_buffers(VP9_COMMON *cm, int width, int height) {
|
||||
VP9_ENC_BORDER_IN_PIXELS,
|
||||
cm->byte_alignment) < 0)
|
||||
goto fail;
|
||||
if (pool->frame_bufs[i].mvs == NULL) {
|
||||
pool->frame_bufs[i].mvs =
|
||||
if (cm->frame_bufs[i].mvs == NULL) {
|
||||
cm->frame_bufs[i].mvs =
|
||||
(MV_REF *)vpx_calloc(cm->mi_rows * cm->mi_cols,
|
||||
sizeof(*pool->frame_bufs[i].mvs));
|
||||
if (pool->frame_bufs[i].mvs == NULL)
|
||||
sizeof(*cm->frame_bufs[i].mvs));
|
||||
if (cm->frame_bufs[i].mvs == NULL)
|
||||
goto fail;
|
||||
|
||||
pool->frame_bufs[i].mi_rows = cm->mi_rows;
|
||||
pool->frame_bufs[i].mi_cols = cm->mi_cols;
|
||||
cm->frame_bufs[i].mi_rows = cm->mi_rows;
|
||||
cm->frame_bufs[i].mi_cols = cm->mi_cols;
|
||||
}
|
||||
}
|
||||
|
||||
@ -208,6 +149,7 @@ int vp9_alloc_ref_frame_buffers(VP9_COMMON *cm, int width, int height) {
|
||||
void vp9_remove_common(VP9_COMMON *cm) {
|
||||
vp9_free_ref_frame_buffers(cm);
|
||||
vp9_free_context_buffers(cm);
|
||||
vp9_free_internal_frame_buffers(&cm->int_frame_buffers);
|
||||
|
||||
vpx_free(cm->fc);
|
||||
cm->fc = NULL;
|
||||
@ -220,13 +162,3 @@ void vp9_init_context_buffers(VP9_COMMON *cm) {
|
||||
if (cm->last_frame_seg_map)
|
||||
vpx_memset(cm->last_frame_seg_map, 0, cm->mi_rows * cm->mi_cols);
|
||||
}
|
||||
|
||||
void vp9_swap_current_and_last_seg_map(VP9_COMMON *cm) {
|
||||
// Swap indices.
|
||||
const int tmp = cm->seg_map_idx;
|
||||
cm->seg_map_idx = cm->prev_seg_map_idx;
|
||||
cm->prev_seg_map_idx = tmp;
|
||||
|
||||
cm->current_frame_seg_map = cm->seg_map_array[cm->seg_map_idx];
|
||||
cm->last_frame_seg_map = cm->seg_map_array[cm->prev_seg_map_idx];
|
||||
}
|
||||
|
@ -32,8 +32,6 @@ void vp9_free_state_buffers(struct VP9Common *cm);
|
||||
|
||||
void vp9_set_mb_mi(struct VP9Common *cm, int width, int height);
|
||||
|
||||
void vp9_swap_current_and_last_seg_map(struct VP9Common *cm);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
@ -439,13 +439,9 @@ void vp9_setup_past_independence(VP9_COMMON *cm) {
|
||||
int i;
|
||||
vp9_clearall_segfeatures(&cm->seg);
|
||||
cm->seg.abs_delta = SEGMENT_DELTADATA;
|
||||
|
||||
if (cm->last_frame_seg_map && !cm->frame_parallel_decode)
|
||||
if (cm->last_frame_seg_map)
|
||||
vpx_memset(cm->last_frame_seg_map, 0, (cm->mi_rows * cm->mi_cols));
|
||||
|
||||
if (cm->current_frame_seg_map)
|
||||
vpx_memset(cm->current_frame_seg_map, 0, (cm->mi_rows * cm->mi_cols));
|
||||
|
||||
// Reset the mode ref deltas for loop filter
|
||||
vp9_zero(lf->last_ref_deltas);
|
||||
vp9_zero(lf->last_mode_deltas);
|
||||
@ -470,7 +466,7 @@ void vp9_setup_past_independence(VP9_COMMON *cm) {
|
||||
}
|
||||
|
||||
// prev_mip will only be allocated in encoder.
|
||||
if (frame_is_intra_only(cm) && cm->prev_mip && !cm->frame_parallel_decode)
|
||||
if (frame_is_intra_only(cm) && cm->prev_mip)
|
||||
vpx_memset(cm->prev_mip, 0, cm->mi_stride * (cm->mi_rows + 1) *
|
||||
sizeof(*cm->prev_mip));
|
||||
|
||||
|
@ -17,8 +17,7 @@ static void find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd,
|
||||
const TileInfo *const tile,
|
||||
MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
|
||||
int_mv *mv_ref_list,
|
||||
int block, int mi_row, int mi_col,
|
||||
find_mv_refs_sync sync, void *const data) {
|
||||
int block, int mi_row, int mi_col) {
|
||||
const int *ref_sign_bias = cm->ref_frame_sign_bias;
|
||||
int i, refmv_count = 0;
|
||||
const POSITION *const mv_ref_search = mv_ref_blocks[mi->mbmi.sb_type];
|
||||
@ -69,11 +68,6 @@ static void find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd,
|
||||
}
|
||||
}
|
||||
|
||||
// Synchronize here for frame parallel decode if sync function is provided.
|
||||
if (sync != NULL) {
|
||||
sync(data, mi_row);
|
||||
}
|
||||
|
||||
// Check the last frame's mode and mv info.
|
||||
if (cm->use_prev_frame_mvs) {
|
||||
if (prev_frame_mvs->ref_frame[0] == ref_frame) {
|
||||
@ -139,10 +133,9 @@ void vp9_find_mv_refs(const VP9_COMMON *cm, const MACROBLOCKD *xd,
|
||||
const TileInfo *const tile,
|
||||
MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
|
||||
int_mv *mv_ref_list,
|
||||
int mi_row, int mi_col,
|
||||
find_mv_refs_sync sync, void *const data) {
|
||||
int mi_row, int mi_col) {
|
||||
find_mv_refs_idx(cm, xd, tile, mi, ref_frame, mv_ref_list, -1,
|
||||
mi_row, mi_col, sync, data);
|
||||
mi_row, mi_col);
|
||||
}
|
||||
|
||||
static void lower_mv_precision(MV *mv, int allow_hp) {
|
||||
@ -179,7 +172,7 @@ void vp9_append_sub8x8_mvs_for_idx(VP9_COMMON *cm, MACROBLOCKD *xd,
|
||||
assert(MAX_MV_REF_CANDIDATES == 2);
|
||||
|
||||
find_mv_refs_idx(cm, xd, tile, mi, mi->mbmi.ref_frame[ref], mv_list, block,
|
||||
mi_row, mi_col, NULL, NULL);
|
||||
mi_row, mi_col);
|
||||
|
||||
near->as_int = 0;
|
||||
switch (block) {
|
||||
|
@ -207,12 +207,10 @@ static INLINE void clamp_mv2(MV *mv, const MACROBLOCKD *xd) {
|
||||
xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN);
|
||||
}
|
||||
|
||||
typedef void (*find_mv_refs_sync)(void *const data, int mi_row);
|
||||
void vp9_find_mv_refs(const VP9_COMMON *cm, const MACROBLOCKD *xd,
|
||||
const TileInfo *const tile,
|
||||
MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
|
||||
int_mv *mv_ref_list, int mi_row, int mi_col,
|
||||
find_mv_refs_sync sync, void *const data);
|
||||
int_mv *mv_ref_list, int mi_row, int mi_col);
|
||||
|
||||
// check a list of motion vectors by sad score using a number rows of pixels
|
||||
// above and a number cols of pixels in the left to select the one with best
|
||||
|
@ -20,7 +20,6 @@
|
||||
#include "vp9/common/vp9_entropymode.h"
|
||||
#include "vp9/common/vp9_frame_buffers.h"
|
||||
#include "vp9/common/vp9_quant_common.h"
|
||||
#include "vp9/common/vp9_thread.h"
|
||||
#include "vp9/common/vp9_tile_common.h"
|
||||
|
||||
#if CONFIG_VP9_POSTPROC
|
||||
@ -36,19 +35,14 @@ extern "C" {
|
||||
#define REF_FRAMES_LOG2 3
|
||||
#define REF_FRAMES (1 << REF_FRAMES_LOG2)
|
||||
|
||||
// 4 scratch frames for the new frames to support a maximum of 4 cores decoding
|
||||
// in parallel, 3 for scaled references on the encoder.
|
||||
// TODO(hkuang): Add ondemand frame buffers instead of hardcoding the number
|
||||
// of framebuffers.
|
||||
// 1 scratch frame for the new frame, 3 for scaled references on the encoder
|
||||
// TODO(jkoleszar): These 3 extra references could probably come from the
|
||||
// normal reference pool.
|
||||
#define FRAME_BUFFERS (REF_FRAMES + 7)
|
||||
#define FRAME_BUFFERS (REF_FRAMES + 4)
|
||||
|
||||
#define FRAME_CONTEXTS_LOG2 2
|
||||
#define FRAME_CONTEXTS (1 << FRAME_CONTEXTS_LOG2)
|
||||
|
||||
#define NUM_PING_PONG_BUFFERS 2
|
||||
|
||||
extern const struct {
|
||||
PARTITION_CONTEXT above;
|
||||
PARTITION_CONTEXT left;
|
||||
@ -74,40 +68,8 @@ typedef struct {
|
||||
int mi_cols;
|
||||
vpx_codec_frame_buffer_t raw_frame_buffer;
|
||||
YV12_BUFFER_CONFIG buf;
|
||||
|
||||
// The Following variables will only be used in frame parallel decode.
|
||||
|
||||
// frame_worker_owner indicates which FrameWorker owns this buffer. NULL means
|
||||
// that no FrameWorker owns, or is decoding, this buffer.
|
||||
VP9Worker *frame_worker_owner;
|
||||
|
||||
// row and col indicate which position frame has been decoded to in real
|
||||
// pixel unit. They are reset to -1 when decoding begins and set to INT_MAX
|
||||
// when the frame is fully decoded.
|
||||
int row;
|
||||
int col;
|
||||
} RefCntBuffer;
|
||||
|
||||
typedef struct {
|
||||
// Protect BufferPool from being accessed by several FrameWorkers at
|
||||
// the same time during frame parallel decode.
|
||||
// TODO(hkuang): Try to use atomic variable instead of locking the whole pool.
|
||||
#if CONFIG_MULTITHREAD
|
||||
pthread_mutex_t pool_mutex;
|
||||
#endif
|
||||
|
||||
// Private data associated with the frame buffer callbacks.
|
||||
void *cb_priv;
|
||||
|
||||
vpx_get_frame_buffer_cb_fn_t get_fb_cb;
|
||||
vpx_release_frame_buffer_cb_fn_t release_fb_cb;
|
||||
|
||||
RefCntBuffer frame_bufs[FRAME_BUFFERS];
|
||||
|
||||
// Frame buffers allocated internally by the codec.
|
||||
InternalFrameBufferList int_frame_buffers;
|
||||
} BufferPool;
|
||||
|
||||
typedef struct VP9Common {
|
||||
struct vpx_internal_error_info error;
|
||||
|
||||
@ -134,6 +96,7 @@ typedef struct VP9Common {
|
||||
#endif
|
||||
|
||||
YV12_BUFFER_CONFIG *frame_to_show;
|
||||
RefCntBuffer frame_bufs[FRAME_BUFFERS];
|
||||
RefCntBuffer *prev_frame;
|
||||
|
||||
// TODO(hkuang): Combine this with cur_buf in macroblockd.
|
||||
@ -141,10 +104,6 @@ typedef struct VP9Common {
|
||||
|
||||
int ref_frame_map[REF_FRAMES]; /* maps fb_idx to reference slot */
|
||||
|
||||
// Prepare ref_frame_map for the next frame.
|
||||
// Only used in frame parallel decode.
|
||||
int next_ref_frame_map[REF_FRAMES];
|
||||
|
||||
// TODO(jkoleszar): could expand active_ref_idx to 4, with 0 as intra, and
|
||||
// roll new_fb_idx into it.
|
||||
|
||||
@ -211,12 +170,7 @@ typedef struct VP9Common {
|
||||
int use_prev_frame_mvs;
|
||||
|
||||
// Persistent mb segment id map used in prediction.
|
||||
int seg_map_idx;
|
||||
int prev_seg_map_idx;
|
||||
|
||||
uint8_t *seg_map_array[NUM_PING_PONG_BUFFERS];
|
||||
uint8_t *last_frame_seg_map;
|
||||
uint8_t *current_frame_seg_map;
|
||||
unsigned char *last_frame_seg_map;
|
||||
|
||||
INTERP_FILTER interp_filter;
|
||||
|
||||
@ -229,10 +183,6 @@ typedef struct VP9Common {
|
||||
struct loopfilter lf;
|
||||
struct segmentation seg;
|
||||
|
||||
// TODO(hkuang): Remove this as it is the same as frame_parallel_decode
|
||||
// in pbi.
|
||||
int frame_parallel_decode; // frame-based threading.
|
||||
|
||||
// Context probabilities for reference frame prediction
|
||||
MV_REFERENCE_FRAME comp_fixed_ref;
|
||||
MV_REFERENCE_FRAME comp_var_ref[2];
|
||||
@ -268,43 +218,31 @@ typedef struct VP9Common {
|
||||
// Handles memory for the codec.
|
||||
InternalFrameBufferList int_frame_buffers;
|
||||
|
||||
// External BufferPool passed from outside.
|
||||
BufferPool *buffer_pool;
|
||||
|
||||
PARTITION_CONTEXT *above_seg_context;
|
||||
ENTROPY_CONTEXT *above_context;
|
||||
} VP9_COMMON;
|
||||
|
||||
// TODO(hkuang): Don't need to lock the whole pool after implementing atomic
|
||||
// frame reference count.
|
||||
void lock_buffer_pool(BufferPool *const pool);
|
||||
void unlock_buffer_pool(BufferPool *const pool);
|
||||
|
||||
static INLINE YV12_BUFFER_CONFIG *get_ref_frame(VP9_COMMON *cm, int index) {
|
||||
if (index < 0 || index >= REF_FRAMES)
|
||||
return NULL;
|
||||
if (cm->ref_frame_map[index] < 0)
|
||||
return NULL;
|
||||
assert(cm->ref_frame_map[index] < FRAME_BUFFERS);
|
||||
return &cm->buffer_pool->frame_bufs[cm->ref_frame_map[index]].buf;
|
||||
return &cm->frame_bufs[cm->ref_frame_map[index]].buf;
|
||||
}
|
||||
|
||||
static INLINE YV12_BUFFER_CONFIG *get_frame_new_buffer(VP9_COMMON *cm) {
|
||||
return &cm->buffer_pool->frame_bufs[cm->new_fb_idx].buf;
|
||||
return &cm->frame_bufs[cm->new_fb_idx].buf;
|
||||
}
|
||||
|
||||
static INLINE int get_free_fb(VP9_COMMON *cm) {
|
||||
RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
|
||||
int i;
|
||||
|
||||
lock_buffer_pool(cm->buffer_pool);
|
||||
for (i = 0; i < FRAME_BUFFERS; ++i)
|
||||
if (frame_bufs[i].ref_count == 0)
|
||||
for (i = 0; i < FRAME_BUFFERS; i++)
|
||||
if (cm->frame_bufs[i].ref_count == 0)
|
||||
break;
|
||||
|
||||
assert(i < FRAME_BUFFERS);
|
||||
frame_bufs[i].ref_count = 1;
|
||||
unlock_buffer_pool(cm->buffer_pool);
|
||||
cm->frame_bufs[i].ref_count = 1;
|
||||
return i;
|
||||
}
|
||||
|
||||
|
@ -20,7 +20,97 @@
|
||||
#include "vp9/common/vp9_reconinter.h"
|
||||
#include "vp9/common/vp9_reconintra.h"
|
||||
|
||||
void inter_predictor(const uint8_t *src, int src_stride,
|
||||
static void build_mc_border(const uint8_t *src, int src_stride,
|
||||
uint8_t *dst, int dst_stride,
|
||||
int x, int y, int b_w, int b_h, int w, int h) {
|
||||
// Get a pointer to the start of the real data for this row.
|
||||
const uint8_t *ref_row = src - x - y * src_stride;
|
||||
|
||||
if (y >= h)
|
||||
ref_row += (h - 1) * src_stride;
|
||||
else if (y > 0)
|
||||
ref_row += y * src_stride;
|
||||
|
||||
do {
|
||||
int right = 0, copy;
|
||||
int left = x < 0 ? -x : 0;
|
||||
|
||||
if (left > b_w)
|
||||
left = b_w;
|
||||
|
||||
if (x + b_w > w)
|
||||
right = x + b_w - w;
|
||||
|
||||
if (right > b_w)
|
||||
right = b_w;
|
||||
|
||||
copy = b_w - left - right;
|
||||
|
||||
if (left)
|
||||
memset(dst, ref_row[0], left);
|
||||
|
||||
if (copy)
|
||||
memcpy(dst + left, ref_row + x + left, copy);
|
||||
|
||||
if (right)
|
||||
memset(dst + left + copy, ref_row[w - 1], right);
|
||||
|
||||
dst += dst_stride;
|
||||
++y;
|
||||
|
||||
if (y > 0 && y < h)
|
||||
ref_row += src_stride;
|
||||
} while (--b_h);
|
||||
}
|
||||
|
||||
#if CONFIG_VP9_HIGHBITDEPTH
|
||||
static void high_build_mc_border(const uint8_t *src8, int src_stride,
|
||||
uint16_t *dst, int dst_stride,
|
||||
int x, int y, int b_w, int b_h,
|
||||
int w, int h) {
|
||||
// Get a pointer to the start of the real data for this row.
|
||||
const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
|
||||
const uint16_t *ref_row = src - x - y * src_stride;
|
||||
|
||||
if (y >= h)
|
||||
ref_row += (h - 1) * src_stride;
|
||||
else if (y > 0)
|
||||
ref_row += y * src_stride;
|
||||
|
||||
do {
|
||||
int right = 0, copy;
|
||||
int left = x < 0 ? -x : 0;
|
||||
|
||||
if (left > b_w)
|
||||
left = b_w;
|
||||
|
||||
if (x + b_w > w)
|
||||
right = x + b_w - w;
|
||||
|
||||
if (right > b_w)
|
||||
right = b_w;
|
||||
|
||||
copy = b_w - left - right;
|
||||
|
||||
if (left)
|
||||
vpx_memset16(dst, ref_row[0], left);
|
||||
|
||||
if (copy)
|
||||
memcpy(dst + left, ref_row + x + left, copy * sizeof(uint16_t));
|
||||
|
||||
if (right)
|
||||
vpx_memset16(dst + left + copy, ref_row[w - 1], right);
|
||||
|
||||
dst += dst_stride;
|
||||
++y;
|
||||
|
||||
if (y > 0 && y < h)
|
||||
ref_row += src_stride;
|
||||
} while (--b_h);
|
||||
}
|
||||
#endif // CONFIG_VP9_HIGHBITDEPTH
|
||||
|
||||
static void inter_predictor(const uint8_t *src, int src_stride,
|
||||
uint8_t *dst, int dst_stride,
|
||||
const int subpel_x,
|
||||
const int subpel_y,
|
||||
@ -33,8 +123,29 @@ void inter_predictor(const uint8_t *src, int src_stride,
|
||||
kernel[subpel_x], xs, kernel[subpel_y], ys, w, h);
|
||||
}
|
||||
|
||||
void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
|
||||
uint8_t *dst, int dst_stride,
|
||||
const MV *src_mv,
|
||||
const struct scale_factors *sf,
|
||||
int w, int h, int ref,
|
||||
const InterpKernel *kernel,
|
||||
enum mv_precision precision,
|
||||
int x, int y) {
|
||||
const int is_q4 = precision == MV_PRECISION_Q4;
|
||||
const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2,
|
||||
is_q4 ? src_mv->col : src_mv->col * 2 };
|
||||
MV32 mv = vp9_scale_mv(&mv_q4, x, y, sf);
|
||||
const int subpel_x = mv.col & SUBPEL_MASK;
|
||||
const int subpel_y = mv.row & SUBPEL_MASK;
|
||||
|
||||
src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS);
|
||||
|
||||
inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y,
|
||||
sf, w, h, ref, kernel, sf->x_step_q4, sf->y_step_q4);
|
||||
}
|
||||
|
||||
#if CONFIG_VP9_HIGHBITDEPTH
|
||||
void high_inter_predictor(const uint8_t *src, int src_stride,
|
||||
static void high_inter_predictor(const uint8_t *src, int src_stride,
|
||||
uint8_t *dst, int dst_stride,
|
||||
const int subpel_x,
|
||||
const int subpel_y,
|
||||
@ -69,27 +180,6 @@ void vp9_highbd_build_inter_predictor(const uint8_t *src, int src_stride,
|
||||
}
|
||||
#endif // CONFIG_VP9_HIGHBITDEPTH
|
||||
|
||||
void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
|
||||
uint8_t *dst, int dst_stride,
|
||||
const MV *src_mv,
|
||||
const struct scale_factors *sf,
|
||||
int w, int h, int ref,
|
||||
const InterpKernel *kernel,
|
||||
enum mv_precision precision,
|
||||
int x, int y) {
|
||||
const int is_q4 = precision == MV_PRECISION_Q4;
|
||||
const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2,
|
||||
is_q4 ? src_mv->col : src_mv->col * 2 };
|
||||
MV32 mv = vp9_scale_mv(&mv_q4, x, y, sf);
|
||||
const int subpel_x = mv.col & SUBPEL_MASK;
|
||||
const int subpel_y = mv.row & SUBPEL_MASK;
|
||||
|
||||
src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS);
|
||||
|
||||
inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y,
|
||||
sf, w, h, ref, kernel, sf->x_step_q4, sf->y_step_q4);
|
||||
}
|
||||
|
||||
static INLINE int round_mv_comp_q4(int value) {
|
||||
return (value < 0 ? value - 2 : value + 2) / 4;
|
||||
}
|
||||
@ -144,8 +234,8 @@ MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd, const MV *src_mv,
|
||||
return clamped_mv;
|
||||
}
|
||||
|
||||
MV average_split_mvs(const struct macroblockd_plane *pd,
|
||||
const MODE_INFO *mi, int ref, int block) {
|
||||
static MV average_split_mvs(const struct macroblockd_plane *pd,
|
||||
const MODE_INFO *mi, int ref, int block) {
|
||||
const int ss_idx = ((pd->subsampling_x > 0) << 1) | (pd->subsampling_y > 0);
|
||||
MV res = {0, 0};
|
||||
switch (ss_idx) {
|
||||
@ -167,7 +257,7 @@ MV average_split_mvs(const struct macroblockd_plane *pd,
|
||||
return res;
|
||||
}
|
||||
|
||||
void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
|
||||
static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
|
||||
int bw, int bh,
|
||||
int x, int y, int w, int h,
|
||||
int mi_x, int mi_y) {
|
||||
@ -275,6 +365,213 @@ void vp9_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
|
||||
MAX_MB_PLANE - 1);
|
||||
}
|
||||
|
||||
// TODO(jingning): This function serves as a placeholder for decoder prediction
|
||||
// using on demand border extension. It should be moved to /decoder/ directory.
|
||||
static void dec_build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
|
||||
int bw, int bh,
|
||||
int x, int y, int w, int h,
|
||||
int mi_x, int mi_y) {
|
||||
struct macroblockd_plane *const pd = &xd->plane[plane];
|
||||
const MODE_INFO *mi = xd->mi[0].src_mi;
|
||||
const int is_compound = has_second_ref(&mi->mbmi);
|
||||
const InterpKernel *kernel = vp9_get_interp_kernel(mi->mbmi.interp_filter);
|
||||
int ref;
|
||||
|
||||
for (ref = 0; ref < 1 + is_compound; ++ref) {
|
||||
const struct scale_factors *const sf = &xd->block_refs[ref]->sf;
|
||||
struct buf_2d *const pre_buf = &pd->pre[ref];
|
||||
struct buf_2d *const dst_buf = &pd->dst;
|
||||
uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
|
||||
const MV mv = mi->mbmi.sb_type < BLOCK_8X8
|
||||
? average_split_mvs(pd, mi, ref, block)
|
||||
: mi->mbmi.mv[ref].as_mv;
|
||||
|
||||
const MV mv_q4 = clamp_mv_to_umv_border_sb(xd, &mv, bw, bh,
|
||||
pd->subsampling_x,
|
||||
pd->subsampling_y);
|
||||
|
||||
MV32 scaled_mv;
|
||||
int xs, ys, x0, y0, x0_16, y0_16, frame_width, frame_height, buf_stride,
|
||||
subpel_x, subpel_y;
|
||||
uint8_t *ref_frame, *buf_ptr;
|
||||
const YV12_BUFFER_CONFIG *ref_buf = xd->block_refs[ref]->buf;
|
||||
const int is_scaled = vp9_is_scaled(sf);
|
||||
|
||||
// Get reference frame pointer, width and height.
|
||||
if (plane == 0) {
|
||||
frame_width = ref_buf->y_crop_width;
|
||||
frame_height = ref_buf->y_crop_height;
|
||||
ref_frame = ref_buf->y_buffer;
|
||||
} else {
|
||||
frame_width = ref_buf->uv_crop_width;
|
||||
frame_height = ref_buf->uv_crop_height;
|
||||
ref_frame = plane == 1 ? ref_buf->u_buffer : ref_buf->v_buffer;
|
||||
}
|
||||
|
||||
if (is_scaled) {
|
||||
// Co-ordinate of containing block to pixel precision.
|
||||
int x_start = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x));
|
||||
int y_start = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y));
|
||||
|
||||
// Co-ordinate of the block to 1/16th pixel precision.
|
||||
x0_16 = (x_start + x) << SUBPEL_BITS;
|
||||
y0_16 = (y_start + y) << SUBPEL_BITS;
|
||||
|
||||
// Co-ordinate of current block in reference frame
|
||||
// to 1/16th pixel precision.
|
||||
x0_16 = sf->scale_value_x(x0_16, sf);
|
||||
y0_16 = sf->scale_value_y(y0_16, sf);
|
||||
|
||||
// Map the top left corner of the block into the reference frame.
|
||||
x0 = sf->scale_value_x(x_start + x, sf);
|
||||
y0 = sf->scale_value_y(y_start + y, sf);
|
||||
|
||||
// Scale the MV and incorporate the sub-pixel offset of the block
|
||||
// in the reference frame.
|
||||
scaled_mv = vp9_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
|
||||
xs = sf->x_step_q4;
|
||||
ys = sf->y_step_q4;
|
||||
} else {
|
||||
// Co-ordinate of containing block to pixel precision.
|
||||
x0 = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)) + x;
|
||||
y0 = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)) + y;
|
||||
|
||||
// Co-ordinate of the block to 1/16th pixel precision.
|
||||
x0_16 = x0 << SUBPEL_BITS;
|
||||
y0_16 = y0 << SUBPEL_BITS;
|
||||
|
||||
scaled_mv.row = mv_q4.row;
|
||||
scaled_mv.col = mv_q4.col;
|
||||
xs = ys = 16;
|
||||
}
|
||||
subpel_x = scaled_mv.col & SUBPEL_MASK;
|
||||
subpel_y = scaled_mv.row & SUBPEL_MASK;
|
||||
|
||||
// Calculate the top left corner of the best matching block in the
|
||||
// reference frame.
|
||||
x0 += scaled_mv.col >> SUBPEL_BITS;
|
||||
y0 += scaled_mv.row >> SUBPEL_BITS;
|
||||
x0_16 += scaled_mv.col;
|
||||
y0_16 += scaled_mv.row;
|
||||
|
||||
// Get reference block pointer.
|
||||
buf_ptr = ref_frame + y0 * pre_buf->stride + x0;
|
||||
buf_stride = pre_buf->stride;
|
||||
|
||||
// Do border extension if there is motion or the
|
||||
// width/height is not a multiple of 8 pixels.
|
||||
if (is_scaled || scaled_mv.col || scaled_mv.row ||
|
||||
(frame_width & 0x7) || (frame_height & 0x7)) {
|
||||
// Get reference block bottom right coordinate.
|
||||
int x1 = ((x0_16 + (w - 1) * xs) >> SUBPEL_BITS) + 1;
|
||||
int y1 = ((y0_16 + (h - 1) * ys) >> SUBPEL_BITS) + 1;
|
||||
int x_pad = 0, y_pad = 0;
|
||||
|
||||
if (subpel_x || (sf->x_step_q4 != SUBPEL_SHIFTS)) {
|
||||
x0 -= VP9_INTERP_EXTEND - 1;
|
||||
x1 += VP9_INTERP_EXTEND;
|
||||
x_pad = 1;
|
||||
}
|
||||
|
||||
if (subpel_y || (sf->y_step_q4 != SUBPEL_SHIFTS)) {
|
||||
y0 -= VP9_INTERP_EXTEND - 1;
|
||||
y1 += VP9_INTERP_EXTEND;
|
||||
y_pad = 1;
|
||||
}
|
||||
|
||||
// Skip border extension if block is inside the frame.
|
||||
if (x0 < 0 || x0 > frame_width - 1 || x1 < 0 || x1 > frame_width - 1 ||
|
||||
y0 < 0 || y0 > frame_height - 1 || y1 < 0 || y1 > frame_height - 1) {
|
||||
uint8_t *buf_ptr1 = ref_frame + y0 * pre_buf->stride + x0;
|
||||
// Extend the border.
|
||||
#if CONFIG_VP9_HIGHBITDEPTH
|
||||
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
|
||||
high_build_mc_border(buf_ptr1,
|
||||
pre_buf->stride,
|
||||
xd->mc_buf_high,
|
||||
x1 - x0 + 1,
|
||||
x0,
|
||||
y0,
|
||||
x1 - x0 + 1,
|
||||
y1 - y0 + 1,
|
||||
frame_width,
|
||||
frame_height);
|
||||
buf_stride = x1 - x0 + 1;
|
||||
buf_ptr = CONVERT_TO_BYTEPTR(xd->mc_buf_high) +
|
||||
y_pad * 3 * buf_stride + x_pad * 3;
|
||||
} else {
|
||||
build_mc_border(buf_ptr1,
|
||||
pre_buf->stride,
|
||||
xd->mc_buf,
|
||||
x1 - x0 + 1,
|
||||
x0,
|
||||
y0,
|
||||
x1 - x0 + 1,
|
||||
y1 - y0 + 1,
|
||||
frame_width,
|
||||
frame_height);
|
||||
buf_stride = x1 - x0 + 1;
|
||||
buf_ptr = xd->mc_buf + y_pad * 3 * buf_stride + x_pad * 3;
|
||||
}
|
||||
#else
|
||||
build_mc_border(buf_ptr1,
|
||||
pre_buf->stride,
|
||||
xd->mc_buf,
|
||||
x1 - x0 + 1,
|
||||
x0,
|
||||
y0,
|
||||
x1 - x0 + 1,
|
||||
y1 - y0 + 1,
|
||||
frame_width,
|
||||
frame_height);
|
||||
buf_stride = x1 - x0 + 1;
|
||||
buf_ptr = xd->mc_buf + y_pad * 3 * buf_stride + x_pad * 3;
|
||||
#endif // CONFIG_VP9_HIGHBITDEPTH
|
||||
}
|
||||
}
|
||||
|
||||
#if CONFIG_VP9_HIGHBITDEPTH
|
||||
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
|
||||
high_inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
|
||||
subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd);
|
||||
} else {
|
||||
inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
|
||||
subpel_y, sf, w, h, ref, kernel, xs, ys);
|
||||
}
|
||||
#else
|
||||
inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
|
||||
subpel_y, sf, w, h, ref, kernel, xs, ys);
|
||||
#endif // CONFIG_VP9_HIGHBITDEPTH
|
||||
}
|
||||
}
|
||||
|
||||
void vp9_dec_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
|
||||
BLOCK_SIZE bsize) {
|
||||
int plane;
|
||||
const int mi_x = mi_col * MI_SIZE;
|
||||
const int mi_y = mi_row * MI_SIZE;
|
||||
for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
|
||||
const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize,
|
||||
&xd->plane[plane]);
|
||||
const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
|
||||
const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
|
||||
const int bw = 4 * num_4x4_w;
|
||||
const int bh = 4 * num_4x4_h;
|
||||
|
||||
if (xd->mi[0].src_mi->mbmi.sb_type < BLOCK_8X8) {
|
||||
int i = 0, x, y;
|
||||
assert(bsize == BLOCK_8X8);
|
||||
for (y = 0; y < num_4x4_h; ++y)
|
||||
for (x = 0; x < num_4x4_w; ++x)
|
||||
dec_build_inter_predictors(xd, plane, i++, bw, bh,
|
||||
4 * x, 4 * y, 4, 4, mi_x, mi_y);
|
||||
} else {
|
||||
dec_build_inter_predictors(xd, plane, 0, bw, bh,
|
||||
0, 0, bw, bh, mi_x, mi_y);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void vp9_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE],
|
||||
const YV12_BUFFER_CONFIG *src,
|
||||
int mi_row, int mi_col) {
|
||||
|
@ -18,37 +18,6 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
void inter_predictor(const uint8_t *src, int src_stride,
|
||||
uint8_t *dst, int dst_stride,
|
||||
const int subpel_x,
|
||||
const int subpel_y,
|
||||
const struct scale_factors *sf,
|
||||
int w, int h, int ref,
|
||||
const InterpKernel *kernel,
|
||||
int xs, int ys);
|
||||
|
||||
#if CONFIG_VP9_HIGHBITDEPTH
|
||||
void high_inter_predictor(const uint8_t *src, int src_stride,
|
||||
uint8_t *dst, int dst_stride,
|
||||
const int subpel_x,
|
||||
const int subpel_y,
|
||||
const struct scale_factors *sf,
|
||||
int w, int h, int ref,
|
||||
const InterpKernel *kernel,
|
||||
int xs, int ys, int bd);
|
||||
#endif // CONFIG_VP9_HIGHBITDEPTH
|
||||
|
||||
MV average_split_mvs(const struct macroblockd_plane *pd, const MODE_INFO *mi,
|
||||
int ref, int block);
|
||||
|
||||
MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd, const MV *src_mv,
|
||||
int bw, int bh, int ss_x, int ss_y);
|
||||
|
||||
void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
|
||||
int bw, int bh,
|
||||
int x, int y, int w, int h,
|
||||
int mi_x, int mi_y);
|
||||
|
||||
void vp9_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col,
|
||||
BLOCK_SIZE bsize);
|
||||
|
||||
@ -58,6 +27,9 @@ void vp9_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col,
|
||||
void vp9_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
|
||||
BLOCK_SIZE bsize);
|
||||
|
||||
void vp9_dec_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
|
||||
BLOCK_SIZE bsize);
|
||||
|
||||
void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
|
||||
uint8_t *dst, int dst_stride,
|
||||
const MV *mv_q3,
|
||||
|
@ -23,7 +23,6 @@
|
||||
#include "vp9/common/vp9_entropy.h"
|
||||
#include "vp9/common/vp9_entropymode.h"
|
||||
#include "vp9/common/vp9_idct.h"
|
||||
#include "vp9/common/vp9_loopfilter_thread.h"
|
||||
#include "vp9/common/vp9_pred_common.h"
|
||||
#include "vp9/common/vp9_quant_common.h"
|
||||
#include "vp9/common/vp9_reconintra.h"
|
||||
@ -384,14 +383,13 @@ static MB_MODE_INFO *set_offsets(VP9_COMMON *const cm, MACROBLOCKD *const xd,
|
||||
return &xd->mi[0].mbmi;
|
||||
}
|
||||
|
||||
static void decode_block(VP9Decoder *const pbi, MACROBLOCKD *const xd,
|
||||
static void decode_block(VP9_COMMON *const cm, MACROBLOCKD *const xd,
|
||||
const TileInfo *const tile,
|
||||
int mi_row, int mi_col,
|
||||
vp9_reader *r, BLOCK_SIZE bsize) {
|
||||
VP9_COMMON *const cm = &pbi->common;
|
||||
const int less8x8 = bsize < BLOCK_8X8;
|
||||
MB_MODE_INFO *mbmi = set_offsets(cm, xd, tile, bsize, mi_row, mi_col);
|
||||
vp9_read_mode_info(pbi, xd, tile, mi_row, mi_col, r);
|
||||
vp9_read_mode_info(cm, xd, tile, mi_row, mi_col, r);
|
||||
|
||||
if (less8x8)
|
||||
bsize = BLOCK_8X8;
|
||||
@ -410,7 +408,7 @@ static void decode_block(VP9Decoder *const pbi, MACROBLOCKD *const xd,
|
||||
predict_and_reconstruct_intra_block, &arg);
|
||||
} else {
|
||||
// Prediction
|
||||
vp9_dec_build_inter_predictors_sb(pbi, xd, mi_row, mi_col, bsize);
|
||||
vp9_dec_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
|
||||
|
||||
// Reconstruction
|
||||
if (!mbmi->skip) {
|
||||
@ -449,11 +447,10 @@ static PARTITION_TYPE read_partition(VP9_COMMON *cm, MACROBLOCKD *xd, int hbs,
|
||||
return p;
|
||||
}
|
||||
|
||||
static void decode_partition(VP9Decoder *const pbi, MACROBLOCKD *const xd,
|
||||
static void decode_partition(VP9_COMMON *const cm, MACROBLOCKD *const xd,
|
||||
const TileInfo *const tile,
|
||||
int mi_row, int mi_col,
|
||||
vp9_reader* r, BLOCK_SIZE bsize) {
|
||||
VP9_COMMON *const cm = &pbi->common;
|
||||
const int hbs = num_8x8_blocks_wide_lookup[bsize] / 2;
|
||||
PARTITION_TYPE partition;
|
||||
BLOCK_SIZE subsize, uv_subsize;
|
||||
@ -468,27 +465,27 @@ static void decode_partition(VP9Decoder *const pbi, MACROBLOCKD *const xd,
|
||||
vpx_internal_error(xd->error_info,
|
||||
VPX_CODEC_CORRUPT_FRAME, "Invalid block size.");
|
||||
if (subsize < BLOCK_8X8) {
|
||||
decode_block(pbi, xd, tile, mi_row, mi_col, r, subsize);
|
||||
decode_block(cm, xd, tile, mi_row, mi_col, r, subsize);
|
||||
} else {
|
||||
switch (partition) {
|
||||
case PARTITION_NONE:
|
||||
decode_block(pbi, xd, tile, mi_row, mi_col, r, subsize);
|
||||
decode_block(cm, xd, tile, mi_row, mi_col, r, subsize);
|
||||
break;
|
||||
case PARTITION_HORZ:
|
||||
decode_block(pbi, xd, tile, mi_row, mi_col, r, subsize);
|
||||
decode_block(cm, xd, tile, mi_row, mi_col, r, subsize);
|
||||
if (mi_row + hbs < cm->mi_rows)
|
||||
decode_block(pbi, xd, tile, mi_row + hbs, mi_col, r, subsize);
|
||||
decode_block(cm, xd, tile, mi_row + hbs, mi_col, r, subsize);
|
||||
break;
|
||||
case PARTITION_VERT:
|
||||
decode_block(pbi, xd, tile, mi_row, mi_col, r, subsize);
|
||||
decode_block(cm, xd, tile, mi_row, mi_col, r, subsize);
|
||||
if (mi_col + hbs < cm->mi_cols)
|
||||
decode_block(pbi, xd, tile, mi_row, mi_col + hbs, r, subsize);
|
||||
decode_block(cm, xd, tile, mi_row, mi_col + hbs, r, subsize);
|
||||
break;
|
||||
case PARTITION_SPLIT:
|
||||
decode_partition(pbi, xd, tile, mi_row, mi_col, r, subsize);
|
||||
decode_partition(pbi, xd, tile, mi_row, mi_col + hbs, r, subsize);
|
||||
decode_partition(pbi, xd, tile, mi_row + hbs, mi_col, r, subsize);
|
||||
decode_partition(pbi, xd, tile, mi_row + hbs, mi_col + hbs, r, subsize);
|
||||
decode_partition(cm, xd, tile, mi_row, mi_col, r, subsize);
|
||||
decode_partition(cm, xd, tile, mi_row, mi_col + hbs, r, subsize);
|
||||
decode_partition(cm, xd, tile, mi_row + hbs, mi_col, r, subsize);
|
||||
decode_partition(cm, xd, tile, mi_row + hbs, mi_col + hbs, r, subsize);
|
||||
break;
|
||||
default:
|
||||
assert(0 && "Invalid partition type");
|
||||
@ -710,12 +707,10 @@ static void resize_context_buffers(VP9_COMMON *cm, int width, int height) {
|
||||
|
||||
static void setup_frame_size(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
|
||||
int width, height;
|
||||
BufferPool *const pool = cm->buffer_pool;
|
||||
vp9_read_frame_size(rb, &width, &height);
|
||||
resize_context_buffers(cm, width, height);
|
||||
setup_display_size(cm, rb);
|
||||
|
||||
lock_buffer_pool(pool);
|
||||
if (vp9_realloc_frame_buffer(
|
||||
get_frame_new_buffer(cm), cm->width, cm->height,
|
||||
cm->subsampling_x, cm->subsampling_y,
|
||||
@ -724,17 +719,16 @@ static void setup_frame_size(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
|
||||
#endif
|
||||
VP9_DEC_BORDER_IN_PIXELS,
|
||||
cm->byte_alignment,
|
||||
&pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb,
|
||||
pool->cb_priv)) {
|
||||
unlock_buffer_pool(pool);
|
||||
&cm->frame_bufs[cm->new_fb_idx].raw_frame_buffer, cm->get_fb_cb,
|
||||
cm->cb_priv)) {
|
||||
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
|
||||
"Failed to allocate frame buffer");
|
||||
}
|
||||
unlock_buffer_pool(pool);
|
||||
|
||||
pool->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x;
|
||||
pool->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y;
|
||||
pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth;
|
||||
cm->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x;
|
||||
cm->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y;
|
||||
cm->frame_bufs[cm->new_fb_idx].buf.color_space =
|
||||
(vpx_color_space_t)cm->color_space;
|
||||
cm->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth;
|
||||
}
|
||||
|
||||
static INLINE int valid_ref_frame_img_fmt(vpx_bit_depth_t ref_bit_depth,
|
||||
@ -750,7 +744,6 @@ static void setup_frame_size_with_refs(VP9_COMMON *cm,
|
||||
int width, height;
|
||||
int found = 0, i;
|
||||
int has_valid_ref_frame = 0;
|
||||
BufferPool *const pool = cm->buffer_pool;
|
||||
for (i = 0; i < REFS_PER_FRAME; ++i) {
|
||||
if (vp9_rb_read_bit(rb)) {
|
||||
YV12_BUFFER_CONFIG *const buf = cm->frame_refs[i].buf;
|
||||
@ -795,7 +788,6 @@ static void setup_frame_size_with_refs(VP9_COMMON *cm,
|
||||
resize_context_buffers(cm, width, height);
|
||||
setup_display_size(cm, rb);
|
||||
|
||||
lock_buffer_pool(pool);
|
||||
if (vp9_realloc_frame_buffer(
|
||||
get_frame_new_buffer(cm), cm->width, cm->height,
|
||||
cm->subsampling_x, cm->subsampling_y,
|
||||
@ -804,17 +796,14 @@ static void setup_frame_size_with_refs(VP9_COMMON *cm,
|
||||
#endif
|
||||
VP9_DEC_BORDER_IN_PIXELS,
|
||||
cm->byte_alignment,
|
||||
&pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb,
|
||||
pool->cb_priv)) {
|
||||
unlock_buffer_pool(pool);
|
||||
&cm->frame_bufs[cm->new_fb_idx].raw_frame_buffer, cm->get_fb_cb,
|
||||
cm->cb_priv)) {
|
||||
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
|
||||
"Failed to allocate frame buffer");
|
||||
}
|
||||
unlock_buffer_pool(pool);
|
||||
|
||||
pool->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x;
|
||||
pool->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y;
|
||||
pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth;
|
||||
cm->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x;
|
||||
cm->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y;
|
||||
cm->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth;
|
||||
}
|
||||
|
||||
static void setup_tile_info(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
|
||||
@ -983,7 +972,7 @@ static const uint8_t *decode_tiles(VP9Decoder *pbi,
|
||||
vp9_zero(tile_data->xd.left_seg_context);
|
||||
for (mi_col = tile.mi_col_start; mi_col < tile.mi_col_end;
|
||||
mi_col += MI_BLOCK_SIZE) {
|
||||
decode_partition(pbi, &tile_data->xd, &tile, mi_row, mi_col,
|
||||
decode_partition(tile_data->cm, &tile_data->xd, &tile, mi_row, mi_col,
|
||||
&tile_data->bit_reader, BLOCK_64X64);
|
||||
}
|
||||
pbi->mb.corrupted |= tile_data->xd.corrupted;
|
||||
@ -1011,12 +1000,6 @@ static const uint8_t *decode_tiles(VP9Decoder *pbi,
|
||||
winterface->execute(&pbi->lf_worker);
|
||||
}
|
||||
}
|
||||
// After loopfiltering, the last 7 row pixels in each superblock row may
|
||||
// still be changed by the longest loopfilter of the next superblock
|
||||
// row.
|
||||
if (pbi->frame_parallel_decode)
|
||||
vp9_frameworker_broadcast(pbi->cur_buf,
|
||||
mi_row << MI_BLOCK_SIZE_LOG2);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1032,8 +1015,6 @@ static const uint8_t *decode_tiles(VP9Decoder *pbi,
|
||||
// Get last tile data.
|
||||
tile_data = pbi->tile_data + tile_cols * tile_rows - 1;
|
||||
|
||||
if (pbi->frame_parallel_decode)
|
||||
vp9_frameworker_broadcast(pbi->cur_buf, INT_MAX);
|
||||
return vp9_reader_find_end(&tile_data->bit_reader);
|
||||
}
|
||||
|
||||
@ -1056,7 +1037,7 @@ static int tile_worker_hook(TileWorkerData *const tile_data,
|
||||
vp9_zero(tile_data->xd.left_seg_context);
|
||||
for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
|
||||
mi_col += MI_BLOCK_SIZE) {
|
||||
decode_partition(tile_data->pbi, &tile_data->xd, tile,
|
||||
decode_partition(tile_data->cm, &tile_data->xd, tile,
|
||||
mi_row, mi_col, &tile_data->bit_reader, BLOCK_64X64);
|
||||
}
|
||||
}
|
||||
@ -1171,10 +1152,10 @@ static const uint8_t *decode_tiles_mt(VP9Decoder *pbi,
|
||||
TileInfo *const tile = (TileInfo*)worker->data2;
|
||||
TileBuffer *const buf = &tile_buffers[0][n];
|
||||
|
||||
tile_data->pbi = pbi;
|
||||
tile_data->cm = cm;
|
||||
tile_data->xd = pbi->mb;
|
||||
tile_data->xd.corrupted = 0;
|
||||
vp9_tile_init(tile, &pbi->common, 0, buf->col);
|
||||
vp9_tile_init(tile, tile_data->cm, 0, buf->col);
|
||||
setup_token_decoder(buf->data, data_end, buf->size, &cm->error,
|
||||
&tile_data->bit_reader, pbi->decrypt_cb,
|
||||
pbi->decrypt_state);
|
||||
@ -1278,10 +1259,8 @@ static void read_bitdepth_colorspace_sampling(
|
||||
static size_t read_uncompressed_header(VP9Decoder *pbi,
|
||||
struct vp9_read_bit_buffer *rb) {
|
||||
VP9_COMMON *const cm = &pbi->common;
|
||||
RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
|
||||
BufferPool *const pool = pbi->common.buffer_pool;
|
||||
int i, mask, ref_index = 0;
|
||||
size_t sz;
|
||||
int i;
|
||||
|
||||
cm->last_frame_type = cm->frame_type;
|
||||
|
||||
@ -1299,24 +1278,16 @@ static size_t read_uncompressed_header(VP9Decoder *pbi,
|
||||
if (cm->show_existing_frame) {
|
||||
// Show an existing frame directly.
|
||||
const int frame_to_show = cm->ref_frame_map[vp9_rb_read_literal(rb, 3)];
|
||||
lock_buffer_pool(pool);
|
||||
if (frame_to_show < 0 || frame_bufs[frame_to_show].ref_count < 1) {
|
||||
unlock_buffer_pool(pool);
|
||||
|
||||
if (frame_to_show < 0 || cm->frame_bufs[frame_to_show].ref_count < 1)
|
||||
vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
|
||||
"Buffer %d does not contain a decoded frame",
|
||||
frame_to_show);
|
||||
}
|
||||
|
||||
ref_cnt_fb(frame_bufs, &cm->new_fb_idx, frame_to_show);
|
||||
unlock_buffer_pool(pool);
|
||||
ref_cnt_fb(cm->frame_bufs, &cm->new_fb_idx, frame_to_show);
|
||||
pbi->refresh_frame_flags = 0;
|
||||
cm->lf.filter_level = 0;
|
||||
cm->show_frame = 1;
|
||||
|
||||
if (pbi->frame_parallel_decode) {
|
||||
for (i = 0; i < REF_FRAMES; ++i)
|
||||
cm->next_ref_frame_map[i] = cm->ref_frame_map[i];
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1338,10 +1309,7 @@ static size_t read_uncompressed_header(VP9Decoder *pbi,
|
||||
}
|
||||
|
||||
setup_frame_size(cm, rb);
|
||||
if (pbi->need_resync) {
|
||||
vpx_memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
|
||||
pbi->need_resync = 0;
|
||||
}
|
||||
pbi->need_resync = 0;
|
||||
} else {
|
||||
cm->intra_only = cm->show_frame ? 0 : vp9_rb_read_bit(rb);
|
||||
|
||||
@ -1369,18 +1337,15 @@ static size_t read_uncompressed_header(VP9Decoder *pbi,
|
||||
|
||||
pbi->refresh_frame_flags = vp9_rb_read_literal(rb, REF_FRAMES);
|
||||
setup_frame_size(cm, rb);
|
||||
if (pbi->need_resync) {
|
||||
vpx_memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
|
||||
pbi->need_resync = 0;
|
||||
}
|
||||
} else if (pbi->need_resync != 1) { /* Skip if need resync */
|
||||
pbi->need_resync = 0;
|
||||
} else {
|
||||
pbi->refresh_frame_flags = vp9_rb_read_literal(rb, REF_FRAMES);
|
||||
for (i = 0; i < REFS_PER_FRAME; ++i) {
|
||||
const int ref = vp9_rb_read_literal(rb, REF_FRAMES_LOG2);
|
||||
const int idx = cm->ref_frame_map[ref];
|
||||
RefBuffer *const ref_frame = &cm->frame_refs[i];
|
||||
ref_frame->idx = idx;
|
||||
ref_frame->buf = &frame_bufs[idx].buf;
|
||||
ref_frame->buf = &cm->frame_bufs[idx].buf;
|
||||
cm->ref_frame_sign_bias[LAST_FRAME + i] = vp9_rb_read_bit(rb);
|
||||
}
|
||||
|
||||
@ -1430,30 +1395,6 @@ static size_t read_uncompressed_header(VP9Decoder *pbi,
|
||||
// below, forcing the use of context 0 for those frame types.
|
||||
cm->frame_context_idx = vp9_rb_read_literal(rb, FRAME_CONTEXTS_LOG2);
|
||||
|
||||
// Generate next_ref_frame_map.
|
||||
lock_buffer_pool(pool);
|
||||
for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) {
|
||||
if (mask & 1) {
|
||||
cm->next_ref_frame_map[ref_index] = cm->new_fb_idx;
|
||||
++frame_bufs[cm->new_fb_idx].ref_count;
|
||||
} else {
|
||||
cm->next_ref_frame_map[ref_index] = cm->ref_frame_map[ref_index];
|
||||
}
|
||||
// Current thread holds the reference frame.
|
||||
if (cm->ref_frame_map[ref_index] >= 0)
|
||||
++frame_bufs[cm->ref_frame_map[ref_index]].ref_count;
|
||||
++ref_index;
|
||||
}
|
||||
|
||||
for (; ref_index < REF_FRAMES; ++ref_index) {
|
||||
cm->next_ref_frame_map[ref_index] = cm->ref_frame_map[ref_index];
|
||||
// Current thread holds the reference frame.
|
||||
if (cm->ref_frame_map[ref_index] >= 0)
|
||||
++frame_bufs[cm->ref_frame_map[ref_index]].ref_count;
|
||||
}
|
||||
unlock_buffer_pool(pool);
|
||||
pbi->hold_ref_buf = 1;
|
||||
|
||||
if (frame_is_intra_only(cm) || cm->error_resilient_mode)
|
||||
vp9_setup_past_independence(cm);
|
||||
|
||||
@ -1599,7 +1540,7 @@ void vp9_decode_frame(VP9Decoder *pbi,
|
||||
VP9_COMMON *const cm = &pbi->common;
|
||||
MACROBLOCKD *const xd = &pbi->mb;
|
||||
struct vp9_read_bit_buffer rb = { NULL, NULL, 0, NULL, 0};
|
||||
int context_updated = 0;
|
||||
|
||||
uint8_t clear_data[MAX_VP9_HEADER_SIZE];
|
||||
const size_t first_partition_size = read_uncompressed_header(pbi,
|
||||
init_read_bit_buffer(pbi, &rb, data, data_end, clear_data));
|
||||
@ -1641,28 +1582,6 @@ void vp9_decode_frame(VP9Decoder *pbi,
|
||||
vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
|
||||
"Decode failed. Frame data header is corrupted.");
|
||||
|
||||
if (cm->lf.filter_level) {
|
||||
vp9_loop_filter_frame_init(cm, cm->lf.filter_level);
|
||||
}
|
||||
|
||||
// If encoded in frame parallel mode, frame context is ready after decoding
|
||||
// the frame header.
|
||||
if (pbi->frame_parallel_decode && cm->frame_parallel_decoding_mode) {
|
||||
VP9Worker *const worker = pbi->frame_worker_owner;
|
||||
FrameWorkerData *const frame_worker_data = worker->data1;
|
||||
if (cm->refresh_frame_context) {
|
||||
context_updated = 1;
|
||||
cm->frame_contexts[cm->frame_context_idx] = *cm->fc;
|
||||
}
|
||||
vp9_frameworker_lock_stats(worker);
|
||||
pbi->cur_buf->row = -1;
|
||||
pbi->cur_buf->col = -1;
|
||||
frame_worker_data->frame_context_ready = 1;
|
||||
// Signal the main thread that context is ready.
|
||||
vp9_frameworker_signal_stats(worker);
|
||||
vp9_frameworker_unlock_stats(worker);
|
||||
}
|
||||
|
||||
// TODO(jzern): remove frame_parallel_decoding_mode restriction for
|
||||
// single-frame tile decoding.
|
||||
if (pbi->max_threads > 1 && tile_rows == 1 && tile_cols > 1 &&
|
||||
@ -1683,7 +1602,9 @@ void vp9_decode_frame(VP9Decoder *pbi,
|
||||
*p_data_end = decode_tiles(pbi, data + first_partition_size, data_end);
|
||||
}
|
||||
|
||||
if (!xd->corrupted) {
|
||||
new_fb->corrupted |= xd->corrupted;
|
||||
|
||||
if (!new_fb->corrupted) {
|
||||
if (!cm->error_resilient_mode && !cm->frame_parallel_decoding_mode) {
|
||||
vp9_adapt_coef_probs(cm);
|
||||
|
||||
@ -1699,324 +1620,6 @@ void vp9_decode_frame(VP9Decoder *pbi,
|
||||
"Decode failed. Frame data is corrupted.");
|
||||
}
|
||||
|
||||
// Non frame parallel update frame context here.
|
||||
if (cm->refresh_frame_context && !context_updated)
|
||||
if (cm->refresh_frame_context)
|
||||
cm->frame_contexts[cm->frame_context_idx] = *cm->fc;
|
||||
}
|
||||
|
||||
static void build_mc_border(const uint8_t *src, int src_stride,
|
||||
uint8_t *dst, int dst_stride,
|
||||
int x, int y, int b_w, int b_h, int w, int h) {
|
||||
// Get a pointer to the start of the real data for this row.
|
||||
const uint8_t *ref_row = src - x - y * src_stride;
|
||||
|
||||
if (y >= h)
|
||||
ref_row += (h - 1) * src_stride;
|
||||
else if (y > 0)
|
||||
ref_row += y * src_stride;
|
||||
|
||||
do {
|
||||
int right = 0, copy;
|
||||
int left = x < 0 ? -x : 0;
|
||||
|
||||
if (left > b_w)
|
||||
left = b_w;
|
||||
|
||||
if (x + b_w > w)
|
||||
right = x + b_w - w;
|
||||
|
||||
if (right > b_w)
|
||||
right = b_w;
|
||||
|
||||
copy = b_w - left - right;
|
||||
|
||||
if (left)
|
||||
memset(dst, ref_row[0], left);
|
||||
|
||||
if (copy)
|
||||
memcpy(dst + left, ref_row + x + left, copy);
|
||||
|
||||
if (right)
|
||||
memset(dst + left + copy, ref_row[w - 1], right);
|
||||
|
||||
dst += dst_stride;
|
||||
++y;
|
||||
|
||||
if (y > 0 && y < h)
|
||||
ref_row += src_stride;
|
||||
} while (--b_h);
|
||||
}
|
||||
|
||||
#if CONFIG_VP9_HIGHBITDEPTH
|
||||
static void high_build_mc_border(const uint8_t *src8, int src_stride,
|
||||
uint16_t *dst, int dst_stride,
|
||||
int x, int y, int b_w, int b_h,
|
||||
int w, int h) {
|
||||
// Get a pointer to the start of the real data for this row.
|
||||
const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
|
||||
const uint16_t *ref_row = src - x - y * src_stride;
|
||||
|
||||
if (y >= h)
|
||||
ref_row += (h - 1) * src_stride;
|
||||
else if (y > 0)
|
||||
ref_row += y * src_stride;
|
||||
|
||||
do {
|
||||
int right = 0, copy;
|
||||
int left = x < 0 ? -x : 0;
|
||||
|
||||
if (left > b_w)
|
||||
left = b_w;
|
||||
|
||||
if (x + b_w > w)
|
||||
right = x + b_w - w;
|
||||
|
||||
if (right > b_w)
|
||||
right = b_w;
|
||||
|
||||
copy = b_w - left - right;
|
||||
|
||||
if (left)
|
||||
vpx_memset16(dst, ref_row[0], left);
|
||||
|
||||
if (copy)
|
||||
memcpy(dst + left, ref_row + x + left, copy * sizeof(uint16_t));
|
||||
|
||||
if (right)
|
||||
vpx_memset16(dst + left + copy, ref_row[w - 1], right);
|
||||
|
||||
dst += dst_stride;
|
||||
++y;
|
||||
|
||||
if (y > 0 && y < h)
|
||||
ref_row += src_stride;
|
||||
} while (--b_h);
|
||||
}
|
||||
#endif // CONFIG_VP9_HIGHBITDEPTH
|
||||
|
||||
void dec_build_inter_predictors(VP9Decoder *const pbi, MACROBLOCKD *xd,
|
||||
int plane, int block, int bw, int bh, int x,
|
||||
int y, int w, int h, int mi_x, int mi_y) {
|
||||
struct macroblockd_plane *const pd = &xd->plane[plane];
|
||||
const MODE_INFO *mi = xd->mi[0].src_mi;
|
||||
const int is_compound = has_second_ref(&mi->mbmi);
|
||||
const InterpKernel *kernel = vp9_get_interp_kernel(mi->mbmi.interp_filter);
|
||||
int ref;
|
||||
|
||||
for (ref = 0; ref < 1 + is_compound; ++ref) {
|
||||
const struct scale_factors *const sf = &xd->block_refs[ref]->sf;
|
||||
struct buf_2d *const pre_buf = &pd->pre[ref];
|
||||
struct buf_2d *const dst_buf = &pd->dst;
|
||||
uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
|
||||
const MV mv = mi->mbmi.sb_type < BLOCK_8X8
|
||||
? average_split_mvs(pd, mi, ref, block)
|
||||
: mi->mbmi.mv[ref].as_mv;
|
||||
|
||||
|
||||
// TODO(jkoleszar): This clamping is done in the incorrect place for the
|
||||
// scaling case. It needs to be done on the scaled MV, not the pre-scaling
|
||||
// MV. Note however that it performs the subsampling aware scaling so
|
||||
// that the result is always q4.
|
||||
// mv_precision precision is MV_PRECISION_Q4.
|
||||
const MV mv_q4 = clamp_mv_to_umv_border_sb(xd, &mv, bw, bh,
|
||||
pd->subsampling_x,
|
||||
pd->subsampling_y);
|
||||
|
||||
MV32 scaled_mv;
|
||||
int xs, ys, x0, y0, x0_16, y0_16, y1, frame_width, frame_height,
|
||||
buf_stride, subpel_x, subpel_y;
|
||||
uint8_t *ref_frame, *buf_ptr;
|
||||
const int idx = xd->block_refs[ref]->idx;
|
||||
BufferPool *const pool = pbi->common.buffer_pool;
|
||||
RefCntBuffer *const ref_frame_buf = &pool->frame_bufs[idx];
|
||||
|
||||
// Get reference frame pointer, width and height.
|
||||
if (plane == 0) {
|
||||
frame_width = ref_frame_buf->buf.y_crop_width;
|
||||
frame_height = ref_frame_buf->buf.y_crop_height;
|
||||
ref_frame = ref_frame_buf->buf.y_buffer;
|
||||
} else {
|
||||
frame_width = ref_frame_buf->buf.uv_crop_width;
|
||||
frame_height = ref_frame_buf->buf.uv_crop_height;
|
||||
ref_frame = plane == 1 ? ref_frame_buf->buf.u_buffer
|
||||
: ref_frame_buf->buf.v_buffer;
|
||||
}
|
||||
|
||||
if (vp9_is_scaled(sf)) {
|
||||
// Co-ordinate of containing block to pixel precision.
|
||||
int x_start = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x));
|
||||
int y_start = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y));
|
||||
|
||||
// Co-ordinate of the block to 1/16th pixel precision.
|
||||
x0_16 = (x_start + x) << SUBPEL_BITS;
|
||||
y0_16 = (y_start + y) << SUBPEL_BITS;
|
||||
|
||||
// Co-ordinate of current block in reference frame
|
||||
// to 1/16th pixel precision.
|
||||
x0_16 = sf->scale_value_x(x0_16, sf);
|
||||
y0_16 = sf->scale_value_y(y0_16, sf);
|
||||
|
||||
// Map the top left corner of the block into the reference frame.
|
||||
x0 = sf->scale_value_x(x_start + x, sf);
|
||||
y0 = sf->scale_value_y(y_start + y, sf);
|
||||
|
||||
// Scale the MV and incorporate the sub-pixel offset of the block
|
||||
// in the reference frame.
|
||||
scaled_mv = vp9_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
|
||||
xs = sf->x_step_q4;
|
||||
ys = sf->y_step_q4;
|
||||
} else {
|
||||
// Co-ordinate of containing block to pixel precision.
|
||||
x0 = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)) + x;
|
||||
y0 = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)) + y;
|
||||
|
||||
// Co-ordinate of the block to 1/16th pixel precision.
|
||||
x0_16 = x0 << SUBPEL_BITS;
|
||||
y0_16 = y0 << SUBPEL_BITS;
|
||||
|
||||
scaled_mv.row = mv_q4.row;
|
||||
scaled_mv.col = mv_q4.col;
|
||||
xs = ys = 16;
|
||||
}
|
||||
subpel_x = scaled_mv.col & SUBPEL_MASK;
|
||||
subpel_y = scaled_mv.row & SUBPEL_MASK;
|
||||
|
||||
// Calculate the top left corner of the best matching block in the
|
||||
// reference frame.
|
||||
x0 += scaled_mv.col >> SUBPEL_BITS;
|
||||
y0 += scaled_mv.row >> SUBPEL_BITS;
|
||||
x0_16 += scaled_mv.col;
|
||||
y0_16 += scaled_mv.row;
|
||||
|
||||
// Get reference block pointer.
|
||||
buf_ptr = ref_frame + y0 * pre_buf->stride + x0;
|
||||
buf_stride = pre_buf->stride;
|
||||
|
||||
// Get reference block bottom right vertical coordinate.
|
||||
y1 = ((y0_16 + (h - 1) * ys) >> SUBPEL_BITS) + 1;
|
||||
|
||||
// Do border extension if there is motion or the
|
||||
// width/height is not a multiple of 8 pixels.
|
||||
if (scaled_mv.col || scaled_mv.row ||
|
||||
(frame_width & 0x7) || (frame_height & 0x7)) {
|
||||
int x_pad = 0, y_pad = 0;
|
||||
|
||||
// Get reference block bottom right horizontal coordinate.
|
||||
int x1 = ((x0_16 + (w - 1) * xs) >> SUBPEL_BITS) + 1;
|
||||
|
||||
if (subpel_x || (sf->x_step_q4 & SUBPEL_MASK)) {
|
||||
x0 -= VP9_INTERP_EXTEND - 1;
|
||||
x1 += VP9_INTERP_EXTEND;
|
||||
x_pad = 1;
|
||||
}
|
||||
|
||||
if (subpel_y || (sf->y_step_q4 & SUBPEL_MASK)) {
|
||||
y0 -= VP9_INTERP_EXTEND - 1;
|
||||
y1 += VP9_INTERP_EXTEND;
|
||||
y_pad = 1;
|
||||
}
|
||||
|
||||
// Wait until reference block is ready. Pad 7 more pixels as last 7
|
||||
// pixels of each superblock row can be changed by next superblock row.
|
||||
if (pbi->frame_parallel_decode)
|
||||
vp9_frameworker_wait(pbi->frame_worker_owner, ref_frame_buf,
|
||||
(y1 + 7) << (plane == 0 ? 0 : 1));
|
||||
|
||||
// Skip border extension if block is inside the frame.
|
||||
if (x0 < 0 || x0 > frame_width - 1 || x1 < 0 || x1 > frame_width - 1 ||
|
||||
y0 < 0 || y0 > frame_height - 1 || y1 < 0 || y1 > frame_height - 1) {
|
||||
uint8_t *buf_ptr1 = ref_frame + y0 * pre_buf->stride + x0;
|
||||
// Extend the border.
|
||||
#if CONFIG_VP9_HIGHBITDEPTH
|
||||
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
|
||||
high_build_mc_border(buf_ptr1,
|
||||
pre_buf->stride,
|
||||
xd->mc_buf_high,
|
||||
x1 - x0 + 1,
|
||||
x0,
|
||||
y0,
|
||||
x1 - x0 + 1,
|
||||
y1 - y0 + 1,
|
||||
frame_width,
|
||||
frame_height);
|
||||
buf_stride = x1 - x0 + 1;
|
||||
buf_ptr = CONVERT_TO_BYTEPTR(xd->mc_buf_high) +
|
||||
y_pad * 3 * buf_stride + x_pad * 3;
|
||||
} else {
|
||||
build_mc_border(buf_ptr1,
|
||||
pre_buf->stride,
|
||||
xd->mc_buf,
|
||||
x1 - x0 + 1,
|
||||
x0,
|
||||
y0,
|
||||
x1 - x0 + 1,
|
||||
y1 - y0 + 1,
|
||||
frame_width,
|
||||
frame_height);
|
||||
buf_stride = x1 - x0 + 1;
|
||||
buf_ptr = xd->mc_buf + y_pad * 3 * buf_stride + x_pad * 3;
|
||||
}
|
||||
#else
|
||||
build_mc_border(buf_ptr1,
|
||||
pre_buf->stride,
|
||||
xd->mc_buf,
|
||||
x1 - x0 + 1,
|
||||
x0,
|
||||
y0,
|
||||
x1 - x0 + 1,
|
||||
y1 - y0 + 1,
|
||||
frame_width,
|
||||
frame_height);
|
||||
buf_stride = x1 - x0 + 1;
|
||||
buf_ptr = xd->mc_buf + y_pad * 3 * buf_stride + x_pad * 3;
|
||||
#endif // CONFIG_VP9_HIGHBITDEPTH
|
||||
}
|
||||
} else {
|
||||
// Wait until reference block is ready. Pad 7 more pixels as last 7
|
||||
// pixels of each superblock row can be changed by next superblock row.
|
||||
if (pbi->frame_parallel_decode)
|
||||
vp9_frameworker_wait(pbi->frame_worker_owner, ref_frame_buf,
|
||||
(y1 + 7) << (plane == 0 ? 0 : 1));
|
||||
}
|
||||
#if CONFIG_VP9_HIGHBITDEPTH
|
||||
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
|
||||
high_inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
|
||||
subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd);
|
||||
} else {
|
||||
inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
|
||||
subpel_y, sf, w, h, ref, kernel, xs, ys);
|
||||
}
|
||||
#else
|
||||
inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
|
||||
subpel_y, sf, w, h, ref, kernel, xs, ys);
|
||||
#endif // CONFIG_VP9_HIGHBITDEPTH
|
||||
}
|
||||
}
|
||||
|
||||
void vp9_dec_build_inter_predictors_sb(VP9Decoder *const pbi, MACROBLOCKD *xd,
|
||||
int mi_row, int mi_col,
|
||||
BLOCK_SIZE bsize) {
|
||||
int plane;
|
||||
const int mi_x = mi_col * MI_SIZE;
|
||||
const int mi_y = mi_row * MI_SIZE;
|
||||
for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
|
||||
const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize,
|
||||
&xd->plane[plane]);
|
||||
const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
|
||||
const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
|
||||
const int bw = 4 * num_4x4_w;
|
||||
const int bh = 4 * num_4x4_h;
|
||||
|
||||
if (xd->mi[0].src_mi->mbmi.sb_type < BLOCK_8X8) {
|
||||
int i = 0, x, y;
|
||||
assert(bsize == BLOCK_8X8);
|
||||
for (y = 0; y < num_4x4_h; ++y)
|
||||
for (x = 0; x < num_4x4_w; ++x)
|
||||
dec_build_inter_predictors(pbi, xd, plane, i++, bw, bh,
|
||||
4 * x, 4 * y, 4, 4, mi_x, mi_y);
|
||||
} else {
|
||||
dec_build_inter_predictors(pbi, xd, plane, 0, bw, bh,
|
||||
0, 0, bw, bh, mi_x, mi_y);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -31,9 +31,6 @@ void vp9_read_frame_size(struct vp9_read_bit_buffer *rb,
|
||||
int *width, int *height);
|
||||
BITSTREAM_PROFILE vp9_read_profile(struct vp9_read_bit_buffer *rb);
|
||||
|
||||
void vp9_dec_build_inter_predictors_sb(struct VP9Decoder *const pbi,
|
||||
MACROBLOCKD *xd, int mi_row, int mi_col,
|
||||
BLOCK_SIZE bsize);
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
@ -98,24 +98,7 @@ static void set_segment_id(VP9_COMMON *cm, BLOCK_SIZE bsize,
|
||||
|
||||
for (y = 0; y < ymis; y++)
|
||||
for (x = 0; x < xmis; x++)
|
||||
cm->current_frame_seg_map[mi_offset + y * cm->mi_cols + x] = segment_id;
|
||||
}
|
||||
|
||||
static void copy_segment_id(const VP9_COMMON *cm,
|
||||
const uint8_t *last_segment_ids,
|
||||
uint8_t *current_segment_ids,
|
||||
BLOCK_SIZE bsize, int mi_row, int mi_col) {
|
||||
const int mi_offset = mi_row * cm->mi_cols + mi_col;
|
||||
const int bw = num_8x8_blocks_wide_lookup[bsize];
|
||||
const int bh = num_8x8_blocks_high_lookup[bsize];
|
||||
const int xmis = MIN(cm->mi_cols - mi_col, bw);
|
||||
const int ymis = MIN(cm->mi_rows - mi_row, bh);
|
||||
int x, y;
|
||||
|
||||
for (y = 0; y < ymis; y++)
|
||||
for (x = 0; x < xmis; x++)
|
||||
current_segment_ids[mi_offset + y * cm->mi_cols + x] =
|
||||
last_segment_ids[mi_offset + y * cm->mi_cols + x];
|
||||
cm->last_frame_seg_map[mi_offset + y * cm->mi_cols + x] = segment_id;
|
||||
}
|
||||
|
||||
static int read_intra_segment_id(VP9_COMMON *const cm, MACROBLOCKD *const xd,
|
||||
@ -128,11 +111,8 @@ static int read_intra_segment_id(VP9_COMMON *const cm, MACROBLOCKD *const xd,
|
||||
if (!seg->enabled)
|
||||
return 0; // Default for disabled segmentation
|
||||
|
||||
if (!seg->update_map) {
|
||||
copy_segment_id(cm, cm->last_frame_seg_map, cm->current_frame_seg_map,
|
||||
bsize, mi_row, mi_col);
|
||||
if (!seg->update_map)
|
||||
return 0;
|
||||
}
|
||||
|
||||
segment_id = read_segment_id(r, seg);
|
||||
set_segment_id(cm, bsize, mi_row, mi_col, segment_id);
|
||||
@ -151,11 +131,8 @@ static int read_inter_segment_id(VP9_COMMON *const cm, MACROBLOCKD *const xd,
|
||||
|
||||
predicted_segment_id = vp9_get_segment_id(cm, cm->last_frame_seg_map,
|
||||
bsize, mi_row, mi_col);
|
||||
if (!seg->update_map) {
|
||||
copy_segment_id(cm, cm->last_frame_seg_map, cm->current_frame_seg_map,
|
||||
bsize, mi_row, mi_col);
|
||||
if (!seg->update_map)
|
||||
return predicted_segment_id;
|
||||
}
|
||||
|
||||
if (seg->temporal_update) {
|
||||
const vp9_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd);
|
||||
@ -442,18 +419,11 @@ static int read_is_inter_block(VP9_COMMON *const cm, MACROBLOCKD *const xd,
|
||||
}
|
||||
}
|
||||
|
||||
static void fpm_sync(void *const data, int mi_row) {
|
||||
VP9Decoder *const pbi = (VP9Decoder *)data;
|
||||
vp9_frameworker_wait(pbi->frame_worker_owner, pbi->prev_buf,
|
||||
mi_row << MI_BLOCK_SIZE_LOG2);
|
||||
}
|
||||
|
||||
static void read_inter_block_mode_info(VP9Decoder *const pbi,
|
||||
static void read_inter_block_mode_info(VP9_COMMON *const cm,
|
||||
MACROBLOCKD *const xd,
|
||||
const TileInfo *const tile,
|
||||
MODE_INFO *const mi,
|
||||
int mi_row, int mi_col, vp9_reader *r) {
|
||||
VP9_COMMON *const cm = &pbi->common;
|
||||
MB_MODE_INFO *const mbmi = &mi->mbmi;
|
||||
const BLOCK_SIZE bsize = mbmi->sb_type;
|
||||
const int allow_hp = cm->allow_high_precision_mv;
|
||||
@ -473,7 +443,7 @@ static void read_inter_block_mode_info(VP9Decoder *const pbi,
|
||||
vp9_setup_pre_planes(xd, ref, ref_buf->buf, mi_row, mi_col,
|
||||
&ref_buf->sf);
|
||||
vp9_find_mv_refs(cm, xd, tile, mi, frame, mbmi->ref_mvs[frame],
|
||||
mi_row, mi_col, fpm_sync, (void *)pbi);
|
||||
mi_row, mi_col);
|
||||
}
|
||||
|
||||
inter_mode_ctx = mbmi->mode_context[mbmi->ref_frame[0]];
|
||||
@ -547,11 +517,10 @@ static void read_inter_block_mode_info(VP9Decoder *const pbi,
|
||||
}
|
||||
}
|
||||
|
||||
static void read_inter_frame_mode_info(VP9Decoder *const pbi,
|
||||
static void read_inter_frame_mode_info(VP9_COMMON *const cm,
|
||||
MACROBLOCKD *const xd,
|
||||
const TileInfo *const tile,
|
||||
int mi_row, int mi_col, vp9_reader *r) {
|
||||
VP9_COMMON *const cm = &pbi->common;
|
||||
MODE_INFO *const mi = xd->mi[0].src_mi;
|
||||
MB_MODE_INFO *const mbmi = &mi->mbmi;
|
||||
int inter_block;
|
||||
@ -564,15 +533,14 @@ static void read_inter_frame_mode_info(VP9Decoder *const pbi,
|
||||
mbmi->tx_size = read_tx_size(cm, xd, !mbmi->skip || !inter_block, r);
|
||||
|
||||
if (inter_block)
|
||||
read_inter_block_mode_info(pbi, xd, tile, mi, mi_row, mi_col, r);
|
||||
read_inter_block_mode_info(cm, xd, tile, mi, mi_row, mi_col, r);
|
||||
else
|
||||
read_intra_block_mode_info(cm, mi, r);
|
||||
}
|
||||
|
||||
void vp9_read_mode_info(VP9Decoder *const pbi, MACROBLOCKD *xd,
|
||||
void vp9_read_mode_info(VP9_COMMON *cm, MACROBLOCKD *xd,
|
||||
const TileInfo *const tile,
|
||||
int mi_row, int mi_col, vp9_reader *r) {
|
||||
VP9_COMMON *const cm = &pbi->common;
|
||||
MODE_INFO *const mi = xd->mi[0].src_mi;
|
||||
const int bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type];
|
||||
const int bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type];
|
||||
@ -584,7 +552,7 @@ void vp9_read_mode_info(VP9Decoder *const pbi, MACROBLOCKD *xd,
|
||||
if (frame_is_intra_only(cm))
|
||||
read_intra_frame_mode_info(cm, xd, mi_row, mi_col, r);
|
||||
else
|
||||
read_inter_frame_mode_info(pbi, xd, tile, mi_row, mi_col, r);
|
||||
read_inter_frame_mode_info(cm, xd, tile, mi_row, mi_col, r);
|
||||
|
||||
for (h = 0; h < y_mis; ++h) {
|
||||
MV_REF *const frame_mv = frame_mvs + h * cm->mi_cols;
|
||||
|
@ -11,7 +11,6 @@
|
||||
#ifndef VP9_DECODER_VP9_DECODEMV_H_
|
||||
#define VP9_DECODER_VP9_DECODEMV_H_
|
||||
|
||||
#include "vp9/decoder/vp9_decoder.h"
|
||||
#include "vp9/decoder/vp9_reader.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
@ -20,7 +19,7 @@ extern "C" {
|
||||
|
||||
struct TileInfo;
|
||||
|
||||
void vp9_read_mode_info(VP9Decoder *const pbi, MACROBLOCKD *xd,
|
||||
void vp9_read_mode_info(VP9_COMMON *cm, MACROBLOCKD *xd,
|
||||
const struct TileInfo *const tile,
|
||||
int mi_row, int mi_col, vp9_reader *r);
|
||||
|
||||
|
@ -28,7 +28,6 @@
|
||||
#include "vp9/common/vp9_quant_common.h"
|
||||
#include "vp9/common/vp9_reconintra.h"
|
||||
#include "vp9/common/vp9_systemdependent.h"
|
||||
#include "vp9/common/vp9_thread.h"
|
||||
|
||||
#include "vp9/decoder/vp9_decodeframe.h"
|
||||
#include "vp9/decoder/vp9_decoder.h"
|
||||
@ -62,7 +61,7 @@ static void vp9_dec_free_mi(VP9_COMMON *cm) {
|
||||
cm->mip = NULL;
|
||||
}
|
||||
|
||||
VP9Decoder *vp9_decoder_create(BufferPool *const pool) {
|
||||
VP9Decoder *vp9_decoder_create() {
|
||||
VP9Decoder *volatile const pbi = vpx_memalign(32, sizeof(*pbi));
|
||||
VP9_COMMON *volatile const cm = pbi ? &pbi->common : NULL;
|
||||
|
||||
@ -90,12 +89,9 @@ VP9Decoder *vp9_decoder_create(BufferPool *const pool) {
|
||||
|
||||
// Initialize the references to not point to any frame buffers.
|
||||
vpx_memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
|
||||
vpx_memset(&cm->next_ref_frame_map, -1, sizeof(cm->next_ref_frame_map));
|
||||
|
||||
cm->current_video_frame = 0;
|
||||
pbi->ready_for_new_data = 1;
|
||||
pbi->common.buffer_pool = pool;
|
||||
|
||||
cm->bit_depth = VPX_BITS_8;
|
||||
cm->dequant_bit_depth = VPX_BITS_8;
|
||||
|
||||
@ -118,6 +114,7 @@ VP9Decoder *vp9_decoder_create(BufferPool *const pool) {
|
||||
}
|
||||
|
||||
void vp9_decoder_remove(VP9Decoder *pbi) {
|
||||
VP9_COMMON *const cm = &pbi->common;
|
||||
int i;
|
||||
|
||||
vp9_get_worker_interface()->end(&pbi->lf_worker);
|
||||
@ -135,6 +132,7 @@ void vp9_decoder_remove(VP9Decoder *pbi) {
|
||||
vp9_loop_filter_dealloc(&pbi->lf_row_sync);
|
||||
}
|
||||
|
||||
vp9_remove_common(cm);
|
||||
vpx_free(pbi);
|
||||
}
|
||||
|
||||
@ -179,7 +177,6 @@ vpx_codec_err_t vp9_set_reference_dec(VP9_COMMON *cm,
|
||||
VP9_REFFRAME ref_frame_flag,
|
||||
YV12_BUFFER_CONFIG *sd) {
|
||||
RefBuffer *ref_buf = NULL;
|
||||
RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
|
||||
|
||||
// TODO(jkoleszar): The decoder doesn't have any real knowledge of what the
|
||||
// encoder is using the frame buffers for. This is just a stub to keep the
|
||||
@ -207,11 +204,11 @@ vpx_codec_err_t vp9_set_reference_dec(VP9_COMMON *cm,
|
||||
const int free_fb = get_free_fb(cm);
|
||||
// Decrease ref_count since it will be increased again in
|
||||
// ref_cnt_fb() below.
|
||||
--frame_bufs[free_fb].ref_count;
|
||||
cm->frame_bufs[free_fb].ref_count--;
|
||||
|
||||
// Manage the reference counters and copy image.
|
||||
ref_cnt_fb(frame_bufs, ref_fb_ptr, free_fb);
|
||||
ref_buf->buf = &frame_bufs[*ref_fb_ptr].buf;
|
||||
ref_cnt_fb(cm->frame_bufs, ref_fb_ptr, free_fb);
|
||||
ref_buf->buf = &cm->frame_bufs[*ref_fb_ptr].buf;
|
||||
vp8_yv12_copy_frame(sd, ref_buf->buf);
|
||||
}
|
||||
|
||||
@ -222,49 +219,30 @@ vpx_codec_err_t vp9_set_reference_dec(VP9_COMMON *cm,
|
||||
static void swap_frame_buffers(VP9Decoder *pbi) {
|
||||
int ref_index = 0, mask;
|
||||
VP9_COMMON *const cm = &pbi->common;
|
||||
BufferPool *const pool = cm->buffer_pool;
|
||||
RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
|
||||
|
||||
lock_buffer_pool(pool);
|
||||
for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) {
|
||||
const int old_idx = cm->ref_frame_map[ref_index];
|
||||
// Current thread releases the holding of reference frame.
|
||||
decrease_ref_count(old_idx, frame_bufs, pool);
|
||||
|
||||
// Release the reference frame in reference map.
|
||||
if ((mask & 1) && old_idx >= 0) {
|
||||
decrease_ref_count(old_idx, frame_bufs, pool);
|
||||
if (mask & 1) {
|
||||
const int old_idx = cm->ref_frame_map[ref_index];
|
||||
ref_cnt_fb(cm->frame_bufs, &cm->ref_frame_map[ref_index],
|
||||
cm->new_fb_idx);
|
||||
if (old_idx >= 0 && cm->frame_bufs[old_idx].ref_count == 0)
|
||||
cm->release_fb_cb(cm->cb_priv,
|
||||
&cm->frame_bufs[old_idx].raw_frame_buffer);
|
||||
}
|
||||
cm->ref_frame_map[ref_index] = cm->next_ref_frame_map[ref_index];
|
||||
++ref_index;
|
||||
}
|
||||
|
||||
// Current thread releases the holding of reference frame.
|
||||
for (; ref_index < REF_FRAMES && !cm->show_existing_frame; ++ref_index) {
|
||||
const int old_idx = cm->ref_frame_map[ref_index];
|
||||
decrease_ref_count(old_idx, frame_bufs, pool);
|
||||
cm->ref_frame_map[ref_index] = cm->next_ref_frame_map[ref_index];
|
||||
}
|
||||
unlock_buffer_pool(pool);
|
||||
pbi->hold_ref_buf = 0;
|
||||
cm->frame_to_show = get_frame_new_buffer(cm);
|
||||
|
||||
if (!pbi->frame_parallel_decode || !cm->show_frame) {
|
||||
lock_buffer_pool(pool);
|
||||
--frame_bufs[cm->new_fb_idx].ref_count;
|
||||
unlock_buffer_pool(pool);
|
||||
}
|
||||
cm->frame_bufs[cm->new_fb_idx].ref_count--;
|
||||
|
||||
// Invalidate these references until the next frame starts.
|
||||
for (ref_index = 0; ref_index < 3; ref_index++)
|
||||
cm->frame_refs[ref_index].idx = INT_MAX;
|
||||
cm->frame_refs[ref_index].idx = -1;
|
||||
}
|
||||
|
||||
int vp9_receive_compressed_data(VP9Decoder *pbi,
|
||||
size_t size, const uint8_t **psource) {
|
||||
VP9_COMMON *volatile const cm = &pbi->common;
|
||||
BufferPool *const pool = cm->buffer_pool;
|
||||
RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
|
||||
const uint8_t *source = *psource;
|
||||
int retcode = 0;
|
||||
|
||||
@ -286,38 +264,20 @@ int vp9_receive_compressed_data(VP9Decoder *pbi,
|
||||
pbi->ready_for_new_data = 0;
|
||||
|
||||
// Check if the previous frame was a frame without any references to it.
|
||||
// Release frame buffer if not decoding in frame parallel mode.
|
||||
if (!pbi->frame_parallel_decode && cm->new_fb_idx >= 0
|
||||
&& frame_bufs[cm->new_fb_idx].ref_count == 0)
|
||||
pool->release_fb_cb(pool->cb_priv,
|
||||
&frame_bufs[cm->new_fb_idx].raw_frame_buffer);
|
||||
if (cm->new_fb_idx >= 0 && cm->frame_bufs[cm->new_fb_idx].ref_count == 0)
|
||||
cm->release_fb_cb(cm->cb_priv,
|
||||
&cm->frame_bufs[cm->new_fb_idx].raw_frame_buffer);
|
||||
cm->new_fb_idx = get_free_fb(cm);
|
||||
|
||||
// Assign a MV array to the frame buffer.
|
||||
cm->cur_frame = &pool->frame_bufs[cm->new_fb_idx];
|
||||
|
||||
pbi->hold_ref_buf = 0;
|
||||
if (pbi->frame_parallel_decode) {
|
||||
VP9Worker *const worker = pbi->frame_worker_owner;
|
||||
vp9_frameworker_lock_stats(worker);
|
||||
frame_bufs[cm->new_fb_idx].frame_worker_owner = worker;
|
||||
// Reset decoding progress.
|
||||
pbi->cur_buf = &frame_bufs[cm->new_fb_idx];
|
||||
pbi->cur_buf->row = -1;
|
||||
pbi->cur_buf->col = -1;
|
||||
vp9_frameworker_unlock_stats(worker);
|
||||
} else {
|
||||
pbi->cur_buf = &frame_bufs[cm->new_fb_idx];
|
||||
}
|
||||
|
||||
cm->cur_frame = &cm->frame_bufs[cm->new_fb_idx];
|
||||
|
||||
if (setjmp(cm->error.jmp)) {
|
||||
const VP9WorkerInterface *const winterface = vp9_get_worker_interface();
|
||||
VP9_COMMON *const cm = &pbi->common;
|
||||
int i;
|
||||
|
||||
pbi->need_resync = 1;
|
||||
cm->error.setjmp = 0;
|
||||
pbi->ready_for_new_data = 1;
|
||||
|
||||
// Synchronize all threads immediately as a subsequent decode call may
|
||||
// cause a resize invalidating some allocations.
|
||||
@ -326,36 +286,11 @@ int vp9_receive_compressed_data(VP9Decoder *pbi,
|
||||
winterface->sync(&pbi->tile_workers[i]);
|
||||
}
|
||||
|
||||
lock_buffer_pool(pool);
|
||||
// Release all the reference buffers if worker thread is holding them.
|
||||
if (pbi->hold_ref_buf == 1) {
|
||||
int ref_index = 0, mask;
|
||||
BufferPool *const pool = cm->buffer_pool;
|
||||
RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
|
||||
for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) {
|
||||
const int old_idx = cm->ref_frame_map[ref_index];
|
||||
// Current thread releases the holding of reference frame.
|
||||
decrease_ref_count(old_idx, frame_bufs, pool);
|
||||
|
||||
// Release the reference frame in reference map.
|
||||
if ((mask & 1) && old_idx >= 0) {
|
||||
decrease_ref_count(old_idx, frame_bufs, pool);
|
||||
}
|
||||
++ref_index;
|
||||
}
|
||||
|
||||
// Current thread releases the holding of reference frame.
|
||||
for (; ref_index < REF_FRAMES && !cm->show_existing_frame; ++ref_index) {
|
||||
const int old_idx = cm->ref_frame_map[ref_index];
|
||||
decrease_ref_count(old_idx, frame_bufs, pool);
|
||||
}
|
||||
pbi->hold_ref_buf = 0;
|
||||
}
|
||||
// Release current frame.
|
||||
decrease_ref_count(cm->new_fb_idx, frame_bufs, pool);
|
||||
unlock_buffer_pool(pool);
|
||||
|
||||
vp9_clear_system_state();
|
||||
|
||||
if (cm->new_fb_idx > 0 && cm->frame_bufs[cm->new_fb_idx].ref_count > 0)
|
||||
cm->frame_bufs[cm->new_fb_idx].ref_count--;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -375,31 +310,8 @@ int vp9_receive_compressed_data(VP9Decoder *pbi,
|
||||
cm->prev_frame = cm->cur_frame;
|
||||
}
|
||||
|
||||
// Update progress in frame parallel decode.
|
||||
if (pbi->frame_parallel_decode) {
|
||||
// Need to lock the mutex here as another thread may
|
||||
// be accessing this buffer.
|
||||
VP9Worker *const worker = pbi->frame_worker_owner;
|
||||
FrameWorkerData *const frame_worker_data = worker->data1;
|
||||
vp9_frameworker_lock_stats(worker);
|
||||
|
||||
if (cm->show_frame) {
|
||||
cm->current_video_frame++;
|
||||
}
|
||||
vp9_swap_current_and_last_seg_map(cm);
|
||||
frame_worker_data->frame_decoded = 1;
|
||||
frame_worker_data->frame_context_ready = 1;
|
||||
vp9_frameworker_signal_stats(worker);
|
||||
vp9_frameworker_unlock_stats(worker);
|
||||
} else {
|
||||
cm->last_width = cm->width;
|
||||
cm->last_height = cm->height;
|
||||
if (cm->show_frame) {
|
||||
cm->current_video_frame++;
|
||||
}
|
||||
|
||||
vp9_swap_current_and_last_seg_map(cm);
|
||||
}
|
||||
if (cm->show_frame)
|
||||
cm->current_video_frame++;
|
||||
|
||||
cm->error.setjmp = 0;
|
||||
return retcode;
|
||||
@ -422,8 +334,6 @@ int vp9_get_raw_frame(VP9Decoder *pbi, YV12_BUFFER_CONFIG *sd,
|
||||
if (!cm->show_frame)
|
||||
return ret;
|
||||
|
||||
pbi->ready_for_new_data = 1;
|
||||
|
||||
#if CONFIG_VP9_POSTPROC
|
||||
if (!cm->show_existing_frame) {
|
||||
ret = vp9_post_proc_frame(cm, sd, flags);
|
||||
|
@ -19,7 +19,6 @@
|
||||
#include "vp9/common/vp9_onyxc_int.h"
|
||||
#include "vp9/common/vp9_ppflags.h"
|
||||
#include "vp9/common/vp9_thread.h"
|
||||
#include "vp9/decoder/vp9_dthread.h"
|
||||
#include "vp9/decoder/vp9_reader.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
@ -34,7 +33,7 @@ typedef struct TileData {
|
||||
} TileData;
|
||||
|
||||
typedef struct TileWorkerData {
|
||||
struct VP9Decoder *pbi;
|
||||
VP9_COMMON *cm;
|
||||
vp9_reader bit_reader;
|
||||
DECLARE_ALIGNED(16, MACROBLOCKD, xd);
|
||||
struct vpx_internal_error_info error_info;
|
||||
@ -51,12 +50,6 @@ typedef struct VP9Decoder {
|
||||
|
||||
int frame_parallel_decode; // frame-based threading.
|
||||
|
||||
// TODO(hkuang): Combine this with cur_buf in macroblockd as they are
|
||||
// the same.
|
||||
RefCntBuffer *cur_buf; // Current decoding frame buffer.
|
||||
RefCntBuffer *prev_buf; // Previous decoding frame buffer.
|
||||
|
||||
VP9Worker *frame_worker_owner; // frame_worker that owns this pbi.
|
||||
VP9Worker lf_worker;
|
||||
VP9Worker *tile_workers;
|
||||
TileWorkerData *tile_worker_data;
|
||||
@ -73,8 +66,7 @@ typedef struct VP9Decoder {
|
||||
|
||||
int max_threads;
|
||||
int inv_tile_order;
|
||||
int need_resync; // wait for key/intra-only frame.
|
||||
int hold_ref_buf; // hold the reference buffer.
|
||||
int need_resync; // wait for key/intra-only frame
|
||||
} VP9Decoder;
|
||||
|
||||
int vp9_receive_compressed_data(struct VP9Decoder *pbi,
|
||||
@ -91,6 +83,10 @@ vpx_codec_err_t vp9_set_reference_dec(VP9_COMMON *cm,
|
||||
VP9_REFFRAME ref_frame_flag,
|
||||
YV12_BUFFER_CONFIG *sd);
|
||||
|
||||
struct VP9Decoder *vp9_decoder_create();
|
||||
|
||||
void vp9_decoder_remove(struct VP9Decoder *pbi);
|
||||
|
||||
static INLINE uint8_t read_marker(vpx_decrypt_cb decrypt_cb,
|
||||
void *decrypt_state,
|
||||
const uint8_t *data) {
|
||||
@ -110,25 +106,6 @@ vpx_codec_err_t vp9_parse_superframe_index(const uint8_t *data,
|
||||
vpx_decrypt_cb decrypt_cb,
|
||||
void *decrypt_state);
|
||||
|
||||
struct VP9Decoder *vp9_decoder_create(BufferPool *const pool);
|
||||
|
||||
void vp9_decoder_remove(struct VP9Decoder *pbi);
|
||||
|
||||
static INLINE void decrease_ref_count(int idx, RefCntBuffer *const frame_bufs,
|
||||
BufferPool *const pool) {
|
||||
if (idx >= 0) {
|
||||
--frame_bufs[idx].ref_count;
|
||||
// A worker may only get a free framebuffer index when calling get_free_fb.
|
||||
// But the private buffer is not set up until finish decoding header.
|
||||
// So any error happens during decoding header, the frame_bufs will not
|
||||
// have valid priv buffer.
|
||||
if (frame_bufs[idx].ref_count == 0 &&
|
||||
frame_bufs[idx].raw_frame_buffer.priv) {
|
||||
pool->release_fb_cb(pool->cb_priv, &frame_bufs[idx].raw_frame_buffer);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
@ -1,187 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2014 The WebM project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "./vpx_config.h"
|
||||
#include "vpx_mem/vpx_mem.h"
|
||||
#include "vp9/common/vp9_reconinter.h"
|
||||
#include "vp9/decoder/vp9_dthread.h"
|
||||
#include "vp9/decoder/vp9_decoder.h"
|
||||
|
||||
// TODO(hkuang): Clean up all the #ifdef in this file.
|
||||
void vp9_frameworker_lock_stats(VP9Worker *const worker) {
|
||||
#if CONFIG_MULTITHREAD
|
||||
FrameWorkerData *const worker_data = worker->data1;
|
||||
pthread_mutex_lock(&worker_data->stats_mutex);
|
||||
#else
|
||||
(void)worker;
|
||||
#endif
|
||||
}
|
||||
|
||||
void vp9_frameworker_unlock_stats(VP9Worker *const worker) {
|
||||
#if CONFIG_MULTITHREAD
|
||||
FrameWorkerData *const worker_data = worker->data1;
|
||||
pthread_mutex_unlock(&worker_data->stats_mutex);
|
||||
#else
|
||||
(void)worker;
|
||||
#endif
|
||||
}
|
||||
|
||||
void vp9_frameworker_signal_stats(VP9Worker *const worker) {
|
||||
#if CONFIG_MULTITHREAD
|
||||
FrameWorkerData *const worker_data = worker->data1;
|
||||
// TODO(hkuang): Investigate using broadcast or signal.
|
||||
pthread_cond_signal(&worker_data->stats_cond);
|
||||
#else
|
||||
(void)worker;
|
||||
#endif
|
||||
}
|
||||
|
||||
// TODO(hkuang): Remove worker parameter as it is only used in debug code.
|
||||
void vp9_frameworker_wait(VP9Worker *const worker, RefCntBuffer *const ref_buf,
|
||||
int row) {
|
||||
#if CONFIG_MULTITHREAD
|
||||
if (!ref_buf)
|
||||
return;
|
||||
|
||||
// Enabling the following line of code will get harmless tsan error but
|
||||
// will get best performance.
|
||||
// if (ref_buf->row >= row && ref_buf->buf.corrupted != 1) return;
|
||||
|
||||
{
|
||||
// Find the worker thread that owns the reference frame. If the reference
|
||||
// frame has been fully decoded, it may not have owner.
|
||||
VP9Worker *const ref_worker = ref_buf->frame_worker_owner;
|
||||
FrameWorkerData *const ref_worker_data =
|
||||
(FrameWorkerData *)ref_worker->data1;
|
||||
const VP9Decoder *const pbi = ref_worker_data->pbi;
|
||||
|
||||
#ifdef DEBUG_THREAD
|
||||
{
|
||||
FrameWorkerData *const worker_data = (FrameWorkerData *)worker->data1;
|
||||
printf("%d %p worker is waiting for %d %p worker (%d) ref %d \r\n",
|
||||
worker_data->worker_id, worker, ref_worker_data->worker_id,
|
||||
ref_buf->frame_worker_owner, row, ref_buf->row);
|
||||
}
|
||||
#endif
|
||||
|
||||
vp9_frameworker_lock_stats(ref_worker);
|
||||
while (ref_buf->row < row && pbi->cur_buf == ref_buf &&
|
||||
ref_buf->buf.corrupted != 1) {
|
||||
pthread_cond_wait(&ref_worker_data->stats_cond,
|
||||
&ref_worker_data->stats_mutex);
|
||||
}
|
||||
|
||||
if (ref_buf->buf.corrupted == 1) {
|
||||
FrameWorkerData *const worker_data = (FrameWorkerData *)worker->data1;
|
||||
vp9_frameworker_unlock_stats(ref_worker);
|
||||
vpx_internal_error(&worker_data->pbi->common.error,
|
||||
VPX_CODEC_CORRUPT_FRAME,
|
||||
"Worker %p failed to decode frame", worker);
|
||||
}
|
||||
vp9_frameworker_unlock_stats(ref_worker);
|
||||
}
|
||||
#else
|
||||
(void)worker;
|
||||
(void)ref_buf;
|
||||
(void)row;
|
||||
(void)ref_buf;
|
||||
#endif // CONFIG_MULTITHREAD
|
||||
}
|
||||
|
||||
void vp9_frameworker_broadcast(RefCntBuffer *const buf, int row) {
|
||||
#if CONFIG_MULTITHREAD
|
||||
VP9Worker *worker = buf->frame_worker_owner;
|
||||
|
||||
#ifdef DEBUG_THREAD
|
||||
{
|
||||
FrameWorkerData *const worker_data = (FrameWorkerData *)worker->data1;
|
||||
printf("%d %p worker decode to (%d) \r\n", worker_data->worker_id,
|
||||
buf->frame_worker_owner, row);
|
||||
}
|
||||
#endif
|
||||
|
||||
vp9_frameworker_lock_stats(worker);
|
||||
buf->row = row;
|
||||
vp9_frameworker_signal_stats(worker);
|
||||
vp9_frameworker_unlock_stats(worker);
|
||||
#else
|
||||
(void)buf;
|
||||
(void)row;
|
||||
#endif // CONFIG_MULTITHREAD
|
||||
}
|
||||
|
||||
void vp9_frameworker_copy_context(VP9Worker *const dst_worker,
|
||||
VP9Worker *const src_worker) {
|
||||
#if CONFIG_MULTITHREAD
|
||||
FrameWorkerData *const src_worker_data = (FrameWorkerData *)src_worker->data1;
|
||||
FrameWorkerData *const dst_worker_data = (FrameWorkerData *)dst_worker->data1;
|
||||
VP9_COMMON *const src_cm = &src_worker_data->pbi->common;
|
||||
VP9_COMMON *const dst_cm = &dst_worker_data->pbi->common;
|
||||
int i;
|
||||
|
||||
// Wait until source frame's context is ready.
|
||||
vp9_frameworker_lock_stats(src_worker);
|
||||
while (!src_worker_data->frame_context_ready) {
|
||||
pthread_cond_wait(&src_worker_data->stats_cond,
|
||||
&src_worker_data->stats_mutex);
|
||||
}
|
||||
|
||||
// src worker may have already finished decoding a frame and swapped the mi.
|
||||
// TODO(hkuang): Remove following code after implenment no ModeInfo decoding.
|
||||
if (src_worker_data->frame_decoded) {
|
||||
dst_cm->prev_mip = src_cm->prev_mip;
|
||||
dst_cm->prev_mi = src_cm->prev_mi;
|
||||
dst_cm->last_frame_seg_map = src_cm->last_frame_seg_map;
|
||||
} else {
|
||||
dst_cm->prev_mip = src_cm->mip;
|
||||
dst_cm->prev_mi = src_cm->mi;
|
||||
dst_cm->last_frame_seg_map = src_cm->current_frame_seg_map;
|
||||
}
|
||||
dst_worker_data->pbi->need_resync = src_worker_data->pbi->need_resync;
|
||||
vp9_frameworker_unlock_stats(src_worker);
|
||||
|
||||
dst_worker_data->pbi->prev_buf =
|
||||
src_worker_data->pbi->common.show_existing_frame ?
|
||||
NULL : src_worker_data->pbi->cur_buf;
|
||||
|
||||
dst_cm->last_width = !src_cm->show_existing_frame ?
|
||||
src_cm->width : src_cm->last_width;
|
||||
dst_cm->last_height = !src_cm->show_existing_frame ?
|
||||
src_cm->height : src_cm->last_height;
|
||||
dst_cm->display_width = src_cm->display_width;
|
||||
dst_cm->display_height = src_cm->display_height;
|
||||
dst_cm->subsampling_x = src_cm->subsampling_x;
|
||||
dst_cm->subsampling_y = src_cm->subsampling_y;
|
||||
dst_cm->last_show_frame = !src_cm->show_existing_frame ?
|
||||
src_cm->show_frame : src_cm->last_show_frame;
|
||||
dst_cm->last_frame_type = src_cm->last_frame_type;
|
||||
dst_cm->frame_type = src_cm->frame_type;
|
||||
dst_cm->y_dc_delta_q = src_cm->y_dc_delta_q;
|
||||
dst_cm->uv_dc_delta_q = src_cm->uv_dc_delta_q;
|
||||
dst_cm->uv_ac_delta_q = src_cm->uv_ac_delta_q;
|
||||
dst_cm->base_qindex = src_cm->base_qindex;
|
||||
|
||||
for (i = 0; i < REF_FRAMES; ++i)
|
||||
dst_cm->ref_frame_map[i] = src_cm->next_ref_frame_map[i];
|
||||
|
||||
memcpy(dst_cm->lf_info.lfthr, src_cm->lf_info.lfthr,
|
||||
(MAX_LOOP_FILTER + 1) * sizeof(loop_filter_thresh));
|
||||
dst_cm->lf.last_sharpness_level = src_cm->lf.sharpness_level;
|
||||
dst_cm->lf.filter_level = src_cm->lf.filter_level;
|
||||
memcpy(dst_cm->lf.ref_deltas, src_cm->lf.ref_deltas, MAX_REF_LF_DELTAS);
|
||||
memcpy(dst_cm->lf.mode_deltas, src_cm->lf.mode_deltas, MAX_MODE_LF_DELTAS);
|
||||
dst_cm->seg = src_cm->seg;
|
||||
memcpy(dst_cm->frame_contexts, src_cm->frame_contexts,
|
||||
FRAME_CONTEXTS * sizeof(dst_cm->frame_contexts[0]));
|
||||
#else
|
||||
(void) dst_worker;
|
||||
(void) src_worker;
|
||||
#endif // CONFIG_MULTITHREAD
|
||||
}
|
@ -1,65 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2014 The WebM project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VP9_DECODER_VP9_DTHREAD_H_
|
||||
#define VP9_DECODER_VP9_DTHREAD_H_
|
||||
|
||||
#include "./vpx_config.h"
|
||||
#include "vp9/common/vp9_thread.h"
|
||||
#include "vpx/internal/vpx_codec_internal.h"
|
||||
|
||||
struct VP9Common;
|
||||
struct VP9Decoder;
|
||||
|
||||
// WorkerData for the FrameWorker thread. It contains all the information of
|
||||
// the worker and decode structures for decoding a frame.
|
||||
typedef struct FrameWorkerData {
|
||||
struct VP9Decoder *pbi;
|
||||
const uint8_t *data;
|
||||
const uint8_t *data_end;
|
||||
size_t data_size;
|
||||
void *user_priv;
|
||||
int result;
|
||||
int worker_id;
|
||||
|
||||
// scratch_buffer is used in frame parallel mode only.
|
||||
// It is used to make a copy of the compressed data.
|
||||
uint8_t *scratch_buffer;
|
||||
size_t scratch_buffer_size;
|
||||
|
||||
#if CONFIG_MULTITHREAD
|
||||
pthread_mutex_t stats_mutex;
|
||||
pthread_cond_t stats_cond;
|
||||
#endif
|
||||
|
||||
int frame_context_ready; // Current frame's context is ready to read.
|
||||
int frame_decoded; // Finished decoding current frame.
|
||||
} FrameWorkerData;
|
||||
|
||||
void vp9_frameworker_lock_stats(VP9Worker *const worker);
|
||||
void vp9_frameworker_unlock_stats(VP9Worker *const worker);
|
||||
void vp9_frameworker_signal_stats(VP9Worker *const worker);
|
||||
|
||||
// Wait until ref_buf has been decoded to row in real pixel unit.
|
||||
// Note: worker may already finish decoding ref_buf and release it in order to
|
||||
// start decoding next frame. So need to check whether worker is still decoding
|
||||
// ref_buf.
|
||||
void vp9_frameworker_wait(VP9Worker *const worker, RefCntBuffer *const ref_buf,
|
||||
int row);
|
||||
|
||||
// FrameWorker broadcasts its decoding progress so other workers that are
|
||||
// waiting on it can resume decoding.
|
||||
void vp9_frameworker_broadcast(RefCntBuffer *const buf, int row);
|
||||
|
||||
// Copy necessary decoding context from src worker to dst worker.
|
||||
void vp9_frameworker_copy_context(VP9Worker *const dst_worker,
|
||||
VP9Worker *const src_worker);
|
||||
|
||||
#endif // VP9_DECODER_VP9_DTHREAD_H_
|
@ -204,6 +204,8 @@ static void dealloc_compressor_data(VP9_COMP *cpi) {
|
||||
// Delete sementation map
|
||||
vpx_free(cpi->segmentation_map);
|
||||
cpi->segmentation_map = NULL;
|
||||
vpx_free(cm->last_frame_seg_map);
|
||||
cm->last_frame_seg_map = NULL;
|
||||
vpx_free(cpi->coding_context.last_frame_seg_map_copy);
|
||||
cpi->coding_context.last_frame_seg_map_copy = NULL;
|
||||
|
||||
@ -1393,8 +1395,7 @@ static void cal_nmvsadcosts_hp(int *mvsadcost[2]) {
|
||||
}
|
||||
|
||||
|
||||
VP9_COMP *vp9_create_compressor(VP9EncoderConfig *oxcf,
|
||||
BufferPool *const pool) {
|
||||
VP9_COMP *vp9_create_compressor(VP9EncoderConfig *oxcf) {
|
||||
unsigned int i;
|
||||
VP9_COMP *volatile const cpi = vpx_memalign(32, sizeof(VP9_COMP));
|
||||
VP9_COMMON *volatile const cm = cpi != NULL ? &cpi->common : NULL;
|
||||
@ -1422,7 +1423,6 @@ VP9_COMP *vp9_create_compressor(VP9EncoderConfig *oxcf,
|
||||
sizeof(*cm->frame_contexts)));
|
||||
|
||||
cpi->use_svc = 0;
|
||||
cpi->common.buffer_pool = pool;
|
||||
|
||||
init_config(cpi, oxcf);
|
||||
vp9_rc_init(&cpi->oxcf, oxcf->pass, &cpi->rc);
|
||||
@ -2343,14 +2343,13 @@ static int recode_loop_test(const VP9_COMP *cpi,
|
||||
|
||||
void vp9_update_reference_frames(VP9_COMP *cpi) {
|
||||
VP9_COMMON * const cm = &cpi->common;
|
||||
BufferPool *const pool = cm->buffer_pool;
|
||||
|
||||
// At this point the new frame has been encoded.
|
||||
// If any buffer copy / swapping is signaled it should be done here.
|
||||
if (cm->frame_type == KEY_FRAME) {
|
||||
ref_cnt_fb(pool->frame_bufs,
|
||||
ref_cnt_fb(cm->frame_bufs,
|
||||
&cm->ref_frame_map[cpi->gld_fb_idx], cm->new_fb_idx);
|
||||
ref_cnt_fb(pool->frame_bufs,
|
||||
ref_cnt_fb(cm->frame_bufs,
|
||||
&cm->ref_frame_map[cpi->alt_fb_idx], cm->new_fb_idx);
|
||||
} else if (vp9_preserve_existing_gf(cpi)) {
|
||||
// We have decided to preserve the previously existing golden frame as our
|
||||
@ -2363,7 +2362,7 @@ void vp9_update_reference_frames(VP9_COMP *cpi) {
|
||||
// slot and, if we're updating the GF, the current frame becomes the new GF.
|
||||
int tmp;
|
||||
|
||||
ref_cnt_fb(pool->frame_bufs,
|
||||
ref_cnt_fb(cm->frame_bufs,
|
||||
&cm->ref_frame_map[cpi->alt_fb_idx], cm->new_fb_idx);
|
||||
|
||||
tmp = cpi->alt_fb_idx;
|
||||
@ -2382,7 +2381,7 @@ void vp9_update_reference_frames(VP9_COMP *cpi) {
|
||||
arf_idx = gf_group->arf_update_idx[gf_group->index];
|
||||
}
|
||||
|
||||
ref_cnt_fb(pool->frame_bufs,
|
||||
ref_cnt_fb(cm->frame_bufs,
|
||||
&cm->ref_frame_map[arf_idx], cm->new_fb_idx);
|
||||
vpx_memcpy(cpi->interp_filter_selected[ALTREF_FRAME],
|
||||
cpi->interp_filter_selected[0],
|
||||
@ -2390,7 +2389,7 @@ void vp9_update_reference_frames(VP9_COMP *cpi) {
|
||||
}
|
||||
|
||||
if (cpi->refresh_golden_frame) {
|
||||
ref_cnt_fb(pool->frame_bufs,
|
||||
ref_cnt_fb(cm->frame_bufs,
|
||||
&cm->ref_frame_map[cpi->gld_fb_idx], cm->new_fb_idx);
|
||||
if (!cpi->rc.is_src_frame_alt_ref)
|
||||
vpx_memcpy(cpi->interp_filter_selected[GOLDEN_FRAME],
|
||||
@ -2404,7 +2403,7 @@ void vp9_update_reference_frames(VP9_COMP *cpi) {
|
||||
}
|
||||
|
||||
if (cpi->refresh_last_frame) {
|
||||
ref_cnt_fb(pool->frame_bufs,
|
||||
ref_cnt_fb(cm->frame_bufs,
|
||||
&cm->ref_frame_map[cpi->lst_fb_idx], cm->new_fb_idx);
|
||||
if (!cpi->rc.is_src_frame_alt_ref)
|
||||
vpx_memcpy(cpi->interp_filter_selected[LAST_FRAME],
|
||||
@ -2463,45 +2462,44 @@ void vp9_scale_references(VP9_COMP *cpi) {
|
||||
// Need to convert from VP9_REFFRAME to index into ref_mask (subtract 1).
|
||||
if (cpi->ref_frame_flags & ref_mask[ref_frame - 1]) {
|
||||
const int idx = cm->ref_frame_map[get_ref_frame_idx(cpi, ref_frame)];
|
||||
BufferPool *const pool = cm->buffer_pool;
|
||||
const YV12_BUFFER_CONFIG *const ref = &pool->frame_bufs[idx].buf;
|
||||
const YV12_BUFFER_CONFIG *const ref = &cm->frame_bufs[idx].buf;
|
||||
|
||||
#if CONFIG_VP9_HIGHBITDEPTH
|
||||
if (ref->y_crop_width != cm->width || ref->y_crop_height != cm->height) {
|
||||
const int new_fb = get_free_fb(cm);
|
||||
cm->cur_frame = &pool->frame_bufs[new_fb];
|
||||
vp9_realloc_frame_buffer(&pool->frame_bufs[new_fb].buf,
|
||||
cm->cur_frame = &cm->frame_bufs[new_fb];
|
||||
vp9_realloc_frame_buffer(&cm->frame_bufs[new_fb].buf,
|
||||
cm->width, cm->height,
|
||||
cm->subsampling_x, cm->subsampling_y,
|
||||
cm->use_highbitdepth,
|
||||
VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
|
||||
NULL, NULL, NULL);
|
||||
scale_and_extend_frame(ref, &pool->frame_bufs[new_fb].buf,
|
||||
scale_and_extend_frame(ref, &cm->frame_bufs[new_fb].buf,
|
||||
(int)cm->bit_depth);
|
||||
#else
|
||||
if (ref->y_crop_width != cm->width || ref->y_crop_height != cm->height) {
|
||||
const int new_fb = get_free_fb(cm);
|
||||
vp9_realloc_frame_buffer(&pool->frame_bufs[new_fb].buf,
|
||||
vp9_realloc_frame_buffer(&cm->frame_bufs[new_fb].buf,
|
||||
cm->width, cm->height,
|
||||
cm->subsampling_x, cm->subsampling_y,
|
||||
VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
|
||||
NULL, NULL, NULL);
|
||||
scale_and_extend_frame(ref, &pool->frame_bufs[new_fb].buf);
|
||||
scale_and_extend_frame(ref, &cm->frame_bufs[new_fb].buf);
|
||||
#endif // CONFIG_VP9_HIGHBITDEPTH
|
||||
cpi->scaled_ref_idx[ref_frame - 1] = new_fb;
|
||||
if (pool->frame_bufs[new_fb].mvs == NULL ||
|
||||
pool->frame_bufs[new_fb].mi_rows < cm->mi_rows ||
|
||||
pool->frame_bufs[new_fb].mi_cols < cm->mi_cols) {
|
||||
vpx_free(pool->frame_bufs[new_fb].mvs);
|
||||
pool->frame_bufs[new_fb].mvs =
|
||||
if (cm->frame_bufs[new_fb].mvs == NULL ||
|
||||
cm->frame_bufs[new_fb].mi_rows < cm->mi_rows ||
|
||||
cm->frame_bufs[new_fb].mi_cols < cm->mi_cols) {
|
||||
vpx_free(cm->frame_bufs[new_fb].mvs);
|
||||
cm->frame_bufs[new_fb].mvs =
|
||||
(MV_REF *)vpx_calloc(cm->mi_rows * cm->mi_cols,
|
||||
sizeof(*pool->frame_bufs[new_fb].mvs));
|
||||
pool->frame_bufs[new_fb].mi_rows = cm->mi_rows;
|
||||
pool->frame_bufs[new_fb].mi_cols = cm->mi_cols;
|
||||
sizeof(*cm->frame_bufs[new_fb].mvs));
|
||||
cm->frame_bufs[new_fb].mi_rows = cm->mi_rows;
|
||||
cm->frame_bufs[new_fb].mi_cols = cm->mi_cols;
|
||||
}
|
||||
} else {
|
||||
cpi->scaled_ref_idx[ref_frame - 1] = idx;
|
||||
++pool->frame_bufs[idx].ref_count;
|
||||
++cm->frame_bufs[idx].ref_count;
|
||||
}
|
||||
} else {
|
||||
cpi->scaled_ref_idx[ref_frame - 1] = INVALID_REF_BUFFER_IDX;
|
||||
@ -2514,8 +2512,8 @@ static void release_scaled_references(VP9_COMP *cpi) {
|
||||
int i;
|
||||
for (i = 0; i < MAX_REF_FRAMES; ++i) {
|
||||
const int idx = cpi->scaled_ref_idx[i];
|
||||
RefCntBuffer *const buf = idx != INVALID_REF_BUFFER_IDX ?
|
||||
&cm->buffer_pool->frame_bufs[idx] : NULL;
|
||||
RefCntBuffer *const buf =
|
||||
idx != INVALID_REF_BUFFER_IDX ? &cm->frame_bufs[idx] : NULL;
|
||||
if (buf != NULL) {
|
||||
--buf->ref_count;
|
||||
cpi->scaled_ref_idx[i] = INVALID_REF_BUFFER_IDX;
|
||||
@ -2732,7 +2730,7 @@ void set_frame_size(VP9_COMP *cpi) {
|
||||
|
||||
for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
|
||||
const int idx = cm->ref_frame_map[get_ref_frame_idx(cpi, ref_frame)];
|
||||
YV12_BUFFER_CONFIG *const buf = &cm->buffer_pool->frame_bufs[idx].buf;
|
||||
YV12_BUFFER_CONFIG *const buf = &cm->frame_bufs[idx].buf;
|
||||
RefBuffer *const ref_buf = &cm->frame_refs[ref_frame - 1];
|
||||
ref_buf->buf = buf;
|
||||
ref_buf->idx = idx;
|
||||
@ -3561,7 +3559,6 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags,
|
||||
int64_t *time_stamp, int64_t *time_end, int flush) {
|
||||
const VP9EncoderConfig *const oxcf = &cpi->oxcf;
|
||||
VP9_COMMON *const cm = &cpi->common;
|
||||
BufferPool *const pool = cm->buffer_pool;
|
||||
RATE_CONTROL *const rc = &cpi->rc;
|
||||
struct vpx_usec_timer cmptimer;
|
||||
YV12_BUFFER_CONFIG *force_src_buffer = NULL;
|
||||
@ -3716,9 +3713,9 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags,
|
||||
|
||||
// Find a free buffer for the new frame, releasing the reference previously
|
||||
// held.
|
||||
pool->frame_bufs[cm->new_fb_idx].ref_count--;
|
||||
cm->frame_bufs[cm->new_fb_idx].ref_count--;
|
||||
cm->new_fb_idx = get_free_fb(cm);
|
||||
cm->cur_frame = &pool->frame_bufs[cm->new_fb_idx];
|
||||
cm->cur_frame = &cm->frame_bufs[cm->new_fb_idx];
|
||||
|
||||
if (!cpi->use_svc && cpi->multi_arf_allowed) {
|
||||
if (cm->frame_type == KEY_FRAME) {
|
||||
|
@ -457,8 +457,7 @@ typedef struct VP9_COMP {
|
||||
|
||||
void vp9_initialize_enc(void);
|
||||
|
||||
struct VP9_COMP *vp9_create_compressor(VP9EncoderConfig *oxcf,
|
||||
BufferPool *const pool);
|
||||
struct VP9_COMP *vp9_create_compressor(VP9EncoderConfig *oxcf);
|
||||
void vp9_remove_compressor(VP9_COMP *cpi);
|
||||
|
||||
void vp9_change_config(VP9_COMP *cpi, const VP9EncoderConfig *oxcf);
|
||||
@ -519,9 +518,8 @@ static INLINE int get_ref_frame_idx(const VP9_COMP *cpi,
|
||||
|
||||
static INLINE YV12_BUFFER_CONFIG *get_ref_frame_buffer(
|
||||
VP9_COMP *cpi, MV_REFERENCE_FRAME ref_frame) {
|
||||
VP9_COMMON *const cm = &cpi->common;
|
||||
BufferPool *const pool = cm->buffer_pool;
|
||||
return &pool->frame_bufs[cm->ref_frame_map[get_ref_frame_idx(cpi, ref_frame)]]
|
||||
VP9_COMMON * const cm = &cpi->common;
|
||||
return &cm->frame_bufs[cm->ref_frame_map[get_ref_frame_idx(cpi, ref_frame)]]
|
||||
.buf;
|
||||
}
|
||||
|
||||
|
@ -532,12 +532,11 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) {
|
||||
}
|
||||
|
||||
if (cpi->ref_frame_flags & VP9_GOLD_FLAG) {
|
||||
BufferPool *const pool = cm->buffer_pool;
|
||||
const int ref_idx =
|
||||
cm->ref_frame_map[get_ref_frame_idx(cpi, GOLDEN_FRAME)];
|
||||
const int scaled_idx = cpi->scaled_ref_idx[GOLDEN_FRAME - 1];
|
||||
|
||||
gld_yv12 = (scaled_idx != ref_idx) ? &pool->frame_bufs[scaled_idx].buf :
|
||||
gld_yv12 = (scaled_idx != ref_idx) ? &cm->frame_bufs[scaled_idx].buf :
|
||||
get_ref_frame_buffer(cpi, GOLDEN_FRAME);
|
||||
} else {
|
||||
gld_yv12 = NULL;
|
||||
|
@ -624,7 +624,7 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
|
||||
|
||||
if (cm->use_prev_frame_mvs)
|
||||
vp9_find_mv_refs(cm, xd, tile_info, xd->mi[0].src_mi, ref_frame,
|
||||
candidates, mi_row, mi_col, NULL, NULL);
|
||||
candidates, mi_row, mi_col);
|
||||
else
|
||||
const_motion[ref_frame] = mv_refs_rt(cm, xd, tile_info,
|
||||
xd->mi[0].src_mi,
|
||||
@ -988,7 +988,7 @@ void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x,
|
||||
vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col,
|
||||
sf, sf);
|
||||
vp9_find_mv_refs(cm, xd, tile_info, xd->mi[0].src_mi, ref_frame,
|
||||
candidates, mi_row, mi_col, NULL, NULL);
|
||||
candidates, mi_row, mi_col);
|
||||
|
||||
vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv, candidates,
|
||||
&dummy_mv[0], &dummy_mv[1]);
|
||||
|
@ -535,8 +535,7 @@ const YV12_BUFFER_CONFIG *vp9_get_scaled_ref_frame(const VP9_COMP *cpi,
|
||||
const VP9_COMMON *const cm = &cpi->common;
|
||||
const int ref_idx = cm->ref_frame_map[get_ref_frame_idx(cpi, ref_frame)];
|
||||
const int scaled_idx = cpi->scaled_ref_idx[ref_frame - 1];
|
||||
return (scaled_idx != ref_idx) ?
|
||||
&cm->buffer_pool->frame_bufs[scaled_idx].buf : NULL;
|
||||
return (scaled_idx != ref_idx) ? &cm->frame_bufs[scaled_idx].buf : NULL;
|
||||
}
|
||||
|
||||
int vp9_get_switchable_rate(const VP9_COMP *cpi, const MACROBLOCKD *const xd) {
|
||||
|
@ -2026,8 +2026,7 @@ static void setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x,
|
||||
vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf, sf);
|
||||
|
||||
// Gets an initial list of candidate vectors from neighbours and orders them
|
||||
vp9_find_mv_refs(cm, xd, tile, mi, ref_frame, candidates, mi_row, mi_col,
|
||||
NULL, NULL);
|
||||
vp9_find_mv_refs(cm, xd, tile, mi, ref_frame, candidates, mi_row, mi_col);
|
||||
|
||||
// Candidate refinement carried out at encoder and decoder
|
||||
vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv, candidates,
|
||||
|
@ -88,8 +88,6 @@ struct vpx_codec_alg_priv {
|
||||
vpx_codec_pkt_list_decl(256) pkt_list;
|
||||
unsigned int fixed_kf_cntr;
|
||||
vpx_codec_priv_output_cx_pkt_cb_pair_t output_cx_pkt_cb;
|
||||
// BufferPool that holds all reference frames.
|
||||
BufferPool *buffer_pool;
|
||||
};
|
||||
|
||||
static VP9_REFFRAME ref_frame_to_vp9_reframe(vpx_ref_frame_type_t frame) {
|
||||
@ -739,10 +737,6 @@ static vpx_codec_err_t encoder_init(vpx_codec_ctx_t *ctx,
|
||||
ctx->priv = (vpx_codec_priv_t *)priv;
|
||||
ctx->priv->init_flags = ctx->init_flags;
|
||||
ctx->priv->enc.total_encoders = 1;
|
||||
priv->buffer_pool =
|
||||
(BufferPool *)vpx_calloc(1, sizeof(BufferPool));
|
||||
if (priv->buffer_pool == NULL)
|
||||
return VPX_CODEC_MEM_ERROR;
|
||||
|
||||
if (ctx->config.enc) {
|
||||
// Update the reference to the config structure to an internal copy.
|
||||
@ -761,7 +755,7 @@ static vpx_codec_err_t encoder_init(vpx_codec_ctx_t *ctx,
|
||||
priv->oxcf.use_highbitdepth =
|
||||
(ctx->init_flags & VPX_CODEC_USE_HIGHBITDEPTH) ? 1 : 0;
|
||||
#endif
|
||||
priv->cpi = vp9_create_compressor(&priv->oxcf, priv->buffer_pool);
|
||||
priv->cpi = vp9_create_compressor(&priv->oxcf);
|
||||
if (priv->cpi == NULL)
|
||||
res = VPX_CODEC_MEM_ERROR;
|
||||
else
|
||||
@ -775,7 +769,6 @@ static vpx_codec_err_t encoder_init(vpx_codec_ctx_t *ctx,
|
||||
static vpx_codec_err_t encoder_destroy(vpx_codec_alg_priv_t *ctx) {
|
||||
free(ctx->cx_data);
|
||||
vp9_remove_compressor(ctx->cpi);
|
||||
vpx_free(ctx->buffer_pool);
|
||||
vpx_free(ctx);
|
||||
return VPX_CODEC_OK;
|
||||
}
|
||||
|
@ -18,9 +18,7 @@
|
||||
#include "vpx/vp8dx.h"
|
||||
#include "vpx/vpx_decoder.h"
|
||||
|
||||
#include "vp9/common/vp9_alloccommon.h"
|
||||
#include "vp9/common/vp9_frame_buffers.h"
|
||||
#include "vp9/common/vp9_thread.h"
|
||||
|
||||
#include "vp9/decoder/vp9_decoder.h"
|
||||
#include "vp9/decoder/vp9_decodeframe.h"
|
||||
@ -32,45 +30,21 @@
|
||||
|
||||
typedef vpx_codec_stream_info_t vp9_stream_info_t;
|
||||
|
||||
// This limit is due to framebuffer numbers.
|
||||
// TODO(hkuang): Remove this limit after implementing ondemand framebuffers.
|
||||
#define FRAME_CACHE_SIZE 6 // Cache maximum 6 decoded frames.
|
||||
|
||||
typedef struct cache_frame {
|
||||
int fb_idx;
|
||||
vpx_image_t img;
|
||||
} cache_frame;
|
||||
|
||||
struct vpx_codec_alg_priv {
|
||||
vpx_codec_priv_t base;
|
||||
vpx_codec_dec_cfg_t cfg;
|
||||
vp9_stream_info_t si;
|
||||
struct VP9Decoder *pbi;
|
||||
int postproc_cfg_set;
|
||||
vp8_postproc_cfg_t postproc_cfg;
|
||||
vpx_decrypt_cb decrypt_cb;
|
||||
void *decrypt_state;
|
||||
void *decrypt_state;
|
||||
vpx_image_t img;
|
||||
int img_avail;
|
||||
int flushed;
|
||||
int invert_tile_order;
|
||||
int last_show_frame; // Index of last output frame.
|
||||
|
||||
// Frame parallel related.
|
||||
int frame_parallel_decode; // frame-based threading.
|
||||
int byte_alignment;
|
||||
VP9Worker *frame_workers;
|
||||
int num_frame_workers;
|
||||
int next_submit_worker_id;
|
||||
int last_submit_worker_id;
|
||||
int next_output_worker_id;
|
||||
int available_threads;
|
||||
cache_frame frame_cache[FRAME_CACHE_SIZE];
|
||||
int frame_cache_write;
|
||||
int frame_cache_read;
|
||||
int num_cache_frames;
|
||||
|
||||
// BufferPool that holds all reference frames. Shared by all the FrameWorkers.
|
||||
BufferPool *buffer_pool;
|
||||
|
||||
// External frame buffer info to save for VP9 common.
|
||||
void *ext_priv; // Private data associated with the external frame buffers.
|
||||
@ -92,16 +66,12 @@ static vpx_codec_err_t decoder_init(vpx_codec_ctx_t *ctx,
|
||||
|
||||
ctx->priv = (vpx_codec_priv_t *)priv;
|
||||
ctx->priv->init_flags = ctx->init_flags;
|
||||
|
||||
priv->si.sz = sizeof(priv->si);
|
||||
priv->flushed = 0;
|
||||
priv->frame_parallel_decode =
|
||||
(ctx->init_flags & VPX_CODEC_USE_FRAME_THREADING);
|
||||
priv->frame_parallel_decode = 0; // Disable for now
|
||||
priv->flushed = 0;
|
||||
// Only do frame parallel decode when threads > 1.
|
||||
priv->frame_parallel_decode =
|
||||
(ctx->config.dec && (ctx->config.dec->threads > 1) &&
|
||||
(ctx->init_flags & VPX_CODEC_USE_FRAME_THREADING)) ? 1 : 0;
|
||||
|
||||
if (ctx->config.dec) {
|
||||
priv->cfg = *ctx->config.dec;
|
||||
@ -113,33 +83,13 @@ static vpx_codec_err_t decoder_init(vpx_codec_ctx_t *ctx,
|
||||
}
|
||||
|
||||
static vpx_codec_err_t decoder_destroy(vpx_codec_alg_priv_t *ctx) {
|
||||
if (ctx->frame_workers != NULL) {
|
||||
int i;
|
||||
for (i = 0; i < ctx->num_frame_workers; ++i) {
|
||||
VP9Worker *const worker = &ctx->frame_workers[i];
|
||||
FrameWorkerData *const frame_worker_data =
|
||||
(FrameWorkerData *)worker->data1;
|
||||
vp9_get_worker_interface()->end(worker);
|
||||
vp9_remove_common(&frame_worker_data->pbi->common);
|
||||
vp9_decoder_remove(frame_worker_data->pbi);
|
||||
vpx_free(frame_worker_data->scratch_buffer);
|
||||
#if CONFIG_MULTITHREAD
|
||||
pthread_mutex_destroy(&frame_worker_data->stats_mutex);
|
||||
pthread_cond_destroy(&frame_worker_data->stats_cond);
|
||||
#endif
|
||||
vpx_free(frame_worker_data);
|
||||
}
|
||||
#if CONFIG_MULTITHREAD
|
||||
pthread_mutex_destroy(&ctx->buffer_pool->pool_mutex);
|
||||
#endif
|
||||
if (ctx->pbi) {
|
||||
vp9_decoder_remove(ctx->pbi);
|
||||
ctx->pbi = NULL;
|
||||
}
|
||||
|
||||
if (ctx->buffer_pool)
|
||||
vp9_free_internal_frame_buffers(&ctx->buffer_pool->int_frame_buffers);
|
||||
|
||||
vpx_free(ctx->frame_workers);
|
||||
vpx_free(ctx->buffer_pool);
|
||||
vpx_free(ctx);
|
||||
|
||||
return VPX_CODEC_OK;
|
||||
}
|
||||
|
||||
@ -261,45 +211,33 @@ static vpx_codec_err_t decoder_get_si(vpx_codec_alg_priv_t *ctx,
|
||||
return VPX_CODEC_OK;
|
||||
}
|
||||
|
||||
static void set_error_detail(vpx_codec_alg_priv_t *ctx,
|
||||
const char *const error) {
|
||||
ctx->base.err_detail = error;
|
||||
}
|
||||
|
||||
static vpx_codec_err_t update_error_state(vpx_codec_alg_priv_t *ctx,
|
||||
const struct vpx_internal_error_info *error) {
|
||||
if (error->error_code)
|
||||
set_error_detail(ctx, error->has_detail ? error->detail : NULL);
|
||||
ctx->base.err_detail = error->has_detail ? error->detail : NULL;
|
||||
|
||||
return error->error_code;
|
||||
}
|
||||
|
||||
static void init_buffer_callbacks(vpx_codec_alg_priv_t *ctx) {
|
||||
int i;
|
||||
VP9_COMMON *const cm = &ctx->pbi->common;
|
||||
|
||||
for (i = 0; i < ctx->num_frame_workers; ++i) {
|
||||
VP9Worker *const worker = &ctx->frame_workers[i];
|
||||
FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
|
||||
VP9_COMMON *const cm = &frame_worker_data->pbi->common;
|
||||
BufferPool *const pool = cm->buffer_pool;
|
||||
cm->new_fb_idx = -1;
|
||||
cm->byte_alignment = ctx->byte_alignment;
|
||||
|
||||
cm->new_fb_idx = -1;
|
||||
cm->byte_alignment = ctx->byte_alignment;
|
||||
if (ctx->get_ext_fb_cb != NULL && ctx->release_ext_fb_cb != NULL) {
|
||||
cm->get_fb_cb = ctx->get_ext_fb_cb;
|
||||
cm->release_fb_cb = ctx->release_ext_fb_cb;
|
||||
cm->cb_priv = ctx->ext_priv;
|
||||
} else {
|
||||
cm->get_fb_cb = vp9_get_frame_buffer;
|
||||
cm->release_fb_cb = vp9_release_frame_buffer;
|
||||
|
||||
if (ctx->get_ext_fb_cb != NULL && ctx->release_ext_fb_cb != NULL) {
|
||||
pool->get_fb_cb = ctx->get_ext_fb_cb;
|
||||
pool->release_fb_cb = ctx->release_ext_fb_cb;
|
||||
pool->cb_priv = ctx->ext_priv;
|
||||
} else {
|
||||
pool->get_fb_cb = vp9_get_frame_buffer;
|
||||
pool->release_fb_cb = vp9_release_frame_buffer;
|
||||
if (vp9_alloc_internal_frame_buffers(&cm->int_frame_buffers))
|
||||
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
|
||||
"Failed to initialize internal frame buffers");
|
||||
|
||||
if (vp9_alloc_internal_frame_buffers(&pool->int_frame_buffers))
|
||||
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
|
||||
"Failed to initialize internal frame buffers");
|
||||
|
||||
pool->cb_priv = &pool->int_frame_buffers;
|
||||
}
|
||||
cm->cb_priv = &cm->int_frame_buffers;
|
||||
}
|
||||
}
|
||||
|
||||
@ -318,124 +256,14 @@ static void set_ppflags(const vpx_codec_alg_priv_t *ctx,
|
||||
flags->noise_level = ctx->postproc_cfg.noise_level;
|
||||
}
|
||||
|
||||
static int frame_worker_hook(void *arg1, void *arg2) {
|
||||
FrameWorkerData *const frame_worker_data = (FrameWorkerData *)arg1;
|
||||
const uint8_t *data = frame_worker_data->data;
|
||||
(void)arg2;
|
||||
static void init_decoder(vpx_codec_alg_priv_t *ctx) {
|
||||
ctx->pbi = vp9_decoder_create();
|
||||
if (ctx->pbi == NULL)
|
||||
return;
|
||||
|
||||
frame_worker_data->result =
|
||||
vp9_receive_compressed_data(frame_worker_data->pbi,
|
||||
frame_worker_data->data_size,
|
||||
&data);
|
||||
frame_worker_data->data_end = data;
|
||||
|
||||
if (frame_worker_data->pbi->frame_parallel_decode) {
|
||||
// In frame parallel decoding, a worker thread must successfully decode all
|
||||
// the compressed data.
|
||||
if (frame_worker_data->result != 0 ||
|
||||
frame_worker_data->data + frame_worker_data->data_size - 1 > data) {
|
||||
VP9Worker *const worker = frame_worker_data->pbi->frame_worker_owner;
|
||||
BufferPool *const pool = frame_worker_data->pbi->common.buffer_pool;
|
||||
// Signal all the other threads that are waiting for this frame.
|
||||
vp9_frameworker_lock_stats(worker);
|
||||
frame_worker_data->frame_context_ready = 1;
|
||||
lock_buffer_pool(pool);
|
||||
frame_worker_data->pbi->cur_buf->buf.corrupted = 1;
|
||||
unlock_buffer_pool(pool);
|
||||
frame_worker_data->pbi->need_resync = 1;
|
||||
vp9_frameworker_signal_stats(worker);
|
||||
vp9_frameworker_unlock_stats(worker);
|
||||
return 0;
|
||||
}
|
||||
} else if (frame_worker_data->result != 0) {
|
||||
// Check decode result in serial decode.
|
||||
frame_worker_data->pbi->cur_buf->buf.corrupted = 1;
|
||||
frame_worker_data->pbi->need_resync = 1;
|
||||
}
|
||||
|
||||
return !frame_worker_data->result;
|
||||
}
|
||||
|
||||
static vpx_codec_err_t init_decoder(vpx_codec_alg_priv_t *ctx) {
|
||||
int i;
|
||||
const VP9WorkerInterface *const winterface = vp9_get_worker_interface();
|
||||
|
||||
ctx->last_show_frame = -1;
|
||||
ctx->next_submit_worker_id = 0;
|
||||
ctx->last_submit_worker_id = 0;
|
||||
ctx->next_output_worker_id = 0;
|
||||
ctx->frame_cache_read = 0;
|
||||
ctx->frame_cache_write = 0;
|
||||
ctx->num_cache_frames = 0;
|
||||
ctx->num_frame_workers =
|
||||
(ctx->frame_parallel_decode == 1) ? ctx->cfg.threads: 1;
|
||||
ctx->available_threads = ctx->num_frame_workers;
|
||||
ctx->flushed = 0;
|
||||
|
||||
ctx->buffer_pool = (BufferPool *)vpx_calloc(1, sizeof(BufferPool));
|
||||
if (ctx->buffer_pool == NULL)
|
||||
return VPX_CODEC_MEM_ERROR;
|
||||
|
||||
#if CONFIG_MULTITHREAD
|
||||
if (pthread_mutex_init(&ctx->buffer_pool->pool_mutex, NULL)) {
|
||||
set_error_detail(ctx, "Failed to allocate buffer pool mutex");
|
||||
return VPX_CODEC_MEM_ERROR;
|
||||
}
|
||||
#endif
|
||||
|
||||
ctx->frame_workers = (VP9Worker *)
|
||||
vpx_malloc(ctx->num_frame_workers * sizeof(*ctx->frame_workers));
|
||||
if (ctx->frame_workers == NULL) {
|
||||
set_error_detail(ctx, "Failed to allocate frame_workers");
|
||||
return VPX_CODEC_MEM_ERROR;
|
||||
}
|
||||
|
||||
for (i = 0; i < ctx->num_frame_workers; ++i) {
|
||||
VP9Worker *const worker = &ctx->frame_workers[i];
|
||||
FrameWorkerData *frame_worker_data = NULL;
|
||||
winterface->init(worker);
|
||||
worker->data1 = vpx_memalign(32, sizeof(FrameWorkerData));
|
||||
if (worker->data1 == NULL) {
|
||||
set_error_detail(ctx, "Failed to allocate frame_worker_data");
|
||||
return VPX_CODEC_MEM_ERROR;
|
||||
}
|
||||
frame_worker_data = (FrameWorkerData *)worker->data1;
|
||||
frame_worker_data->pbi = vp9_decoder_create(ctx->buffer_pool);
|
||||
if (frame_worker_data->pbi == NULL) {
|
||||
set_error_detail(ctx, "Failed to allocate frame_worker_data");
|
||||
return VPX_CODEC_MEM_ERROR;
|
||||
}
|
||||
frame_worker_data->pbi->frame_worker_owner = worker;
|
||||
frame_worker_data->worker_id = i;
|
||||
frame_worker_data->scratch_buffer = NULL;
|
||||
frame_worker_data->scratch_buffer_size = 0;
|
||||
frame_worker_data->frame_context_ready = 0;
|
||||
#if CONFIG_MULTITHREAD
|
||||
if (pthread_mutex_init(&frame_worker_data->stats_mutex, NULL)) {
|
||||
set_error_detail(ctx, "Failed to allocate frame_worker_data mutex");
|
||||
return VPX_CODEC_MEM_ERROR;
|
||||
}
|
||||
|
||||
if (pthread_cond_init(&frame_worker_data->stats_cond, NULL)) {
|
||||
set_error_detail(ctx, "Failed to allocate frame_worker_data cond");
|
||||
return VPX_CODEC_MEM_ERROR;
|
||||
}
|
||||
#endif
|
||||
// If decoding in serial mode, FrameWorker thread could create tile worker
|
||||
// thread or loopfilter thread.
|
||||
frame_worker_data->pbi->max_threads =
|
||||
(ctx->frame_parallel_decode == 0) ? ctx->cfg.threads : 0;
|
||||
|
||||
frame_worker_data->pbi->inv_tile_order = ctx->invert_tile_order;
|
||||
frame_worker_data->pbi->frame_parallel_decode = ctx->frame_parallel_decode;
|
||||
frame_worker_data->pbi->common.frame_parallel_decode =
|
||||
ctx->frame_parallel_decode;
|
||||
worker->hook = (VP9WorkerHook)frame_worker_hook;
|
||||
if (!winterface->reset(worker)) {
|
||||
set_error_detail(ctx, "Frame Worker thread creation failed");
|
||||
return VPX_CODEC_MEM_ERROR;
|
||||
}
|
||||
}
|
||||
ctx->pbi->max_threads = ctx->cfg.threads;
|
||||
ctx->pbi->inv_tile_order = ctx->invert_tile_order;
|
||||
ctx->pbi->frame_parallel_decode = ctx->frame_parallel_decode;
|
||||
|
||||
// If postprocessing was enabled by the application and a
|
||||
// configuration has not been provided, default it.
|
||||
@ -444,17 +272,20 @@ static vpx_codec_err_t init_decoder(vpx_codec_alg_priv_t *ctx) {
|
||||
set_default_ppflags(&ctx->postproc_cfg);
|
||||
|
||||
init_buffer_callbacks(ctx);
|
||||
|
||||
return VPX_CODEC_OK;
|
||||
}
|
||||
|
||||
static vpx_codec_err_t decode_one(vpx_codec_alg_priv_t *ctx,
|
||||
const uint8_t **data, unsigned int data_sz,
|
||||
void *user_priv, int64_t deadline) {
|
||||
YV12_BUFFER_CONFIG sd;
|
||||
vp9_ppflags_t flags = {0, 0, 0};
|
||||
const VP9WorkerInterface *const winterface = vp9_get_worker_interface();
|
||||
VP9_COMMON *cm = NULL;
|
||||
|
||||
(void)deadline;
|
||||
|
||||
vp9_zero(sd);
|
||||
ctx->img_avail = 0;
|
||||
|
||||
// Determine the stream parameters. Note that we rely on peek_si to
|
||||
// validate that we have a buffer that does not wrap around the top
|
||||
// of the heap.
|
||||
@ -470,98 +301,34 @@ static vpx_codec_err_t decode_one(vpx_codec_alg_priv_t *ctx,
|
||||
return VPX_CODEC_ERROR;
|
||||
}
|
||||
|
||||
if (!ctx->frame_parallel_decode) {
|
||||
VP9Worker *const worker = ctx->frame_workers;
|
||||
FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
|
||||
frame_worker_data->data = *data;
|
||||
frame_worker_data->data_size = data_sz;
|
||||
frame_worker_data->user_priv = user_priv;
|
||||
|
||||
// Set these even if already initialized. The caller may have changed the
|
||||
// decrypt config between frames.
|
||||
frame_worker_data->pbi->decrypt_cb = ctx->decrypt_cb;
|
||||
frame_worker_data->pbi->decrypt_state = ctx->decrypt_state;
|
||||
|
||||
worker->had_error = 0;
|
||||
winterface->execute(worker);
|
||||
|
||||
// Update data pointer after decode.
|
||||
*data = frame_worker_data->data_end;
|
||||
|
||||
if (worker->had_error)
|
||||
return update_error_state(ctx, &frame_worker_data->pbi->common.error);
|
||||
} else {
|
||||
const VP9WorkerInterface *const winterface = vp9_get_worker_interface();
|
||||
VP9Worker *const worker = &ctx->frame_workers[ctx->next_submit_worker_id];
|
||||
FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
|
||||
// Copy context from last worker thread to next worker thread.
|
||||
if (ctx->next_submit_worker_id != ctx->last_submit_worker_id)
|
||||
vp9_frameworker_copy_context(
|
||||
&ctx->frame_workers[ctx->next_submit_worker_id],
|
||||
&ctx->frame_workers[ctx->last_submit_worker_id]);
|
||||
|
||||
frame_worker_data->pbi->ready_for_new_data = 0;
|
||||
// Copy the compressed data into worker's internal buffer.
|
||||
// TODO(hkuang): Will all the workers allocate the same size
|
||||
// as the size of the first intra frame be better? This will
|
||||
// avoid too many deallocate and allocate.
|
||||
if (frame_worker_data->scratch_buffer_size < data_sz) {
|
||||
frame_worker_data->scratch_buffer =
|
||||
(uint8_t *)vpx_realloc(frame_worker_data->scratch_buffer, data_sz);
|
||||
if (frame_worker_data->scratch_buffer == NULL) {
|
||||
set_error_detail(ctx, "Failed to reallocate scratch buffer");
|
||||
return VPX_CODEC_MEM_ERROR;
|
||||
}
|
||||
frame_worker_data->scratch_buffer_size = data_sz;
|
||||
}
|
||||
frame_worker_data->data_size = data_sz;
|
||||
vpx_memcpy(frame_worker_data->scratch_buffer, *data, data_sz);
|
||||
|
||||
frame_worker_data->frame_decoded = 0;
|
||||
frame_worker_data->frame_context_ready = 0;
|
||||
frame_worker_data->data = frame_worker_data->scratch_buffer;
|
||||
frame_worker_data->user_priv = user_priv;
|
||||
|
||||
if (ctx->next_submit_worker_id != ctx->last_submit_worker_id)
|
||||
ctx->last_submit_worker_id =
|
||||
(ctx->last_submit_worker_id + 1) % ctx->num_frame_workers;
|
||||
|
||||
ctx->next_submit_worker_id =
|
||||
(ctx->next_submit_worker_id + 1) % ctx->num_frame_workers;
|
||||
|
||||
--ctx->available_threads;
|
||||
worker->had_error = 0;
|
||||
winterface->launch(worker);
|
||||
// Initialize the decoder instance on the first frame
|
||||
if (ctx->pbi == NULL) {
|
||||
init_decoder(ctx);
|
||||
if (ctx->pbi == NULL)
|
||||
return VPX_CODEC_ERROR;
|
||||
}
|
||||
|
||||
// Set these even if already initialized. The caller may have changed the
|
||||
// decrypt config between frames.
|
||||
ctx->pbi->decrypt_cb = ctx->decrypt_cb;
|
||||
ctx->pbi->decrypt_state = ctx->decrypt_state;
|
||||
|
||||
cm = &ctx->pbi->common;
|
||||
|
||||
if (vp9_receive_compressed_data(ctx->pbi, data_sz, data))
|
||||
return update_error_state(ctx, &cm->error);
|
||||
|
||||
if (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC)
|
||||
set_ppflags(ctx, &flags);
|
||||
|
||||
return VPX_CODEC_OK;
|
||||
}
|
||||
if (vp9_get_raw_frame(ctx->pbi, &sd, &flags))
|
||||
return update_error_state(ctx, &cm->error);
|
||||
|
||||
static void wait_worker_and_cache_frame(vpx_codec_alg_priv_t *ctx) {
|
||||
YV12_BUFFER_CONFIG sd;
|
||||
vp9_ppflags_t flags = {0, 0, 0};
|
||||
const VP9WorkerInterface *const winterface = vp9_get_worker_interface();
|
||||
VP9Worker *const worker = &ctx->frame_workers[ctx->next_output_worker_id];
|
||||
FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
|
||||
ctx->next_output_worker_id =
|
||||
(ctx->next_output_worker_id + 1) % ctx->num_frame_workers;
|
||||
winterface->sync(worker);
|
||||
++ctx->available_threads;
|
||||
if (vp9_get_raw_frame(frame_worker_data->pbi, &sd, &flags) == 0) {
|
||||
VP9_COMMON *const cm = &frame_worker_data->pbi->common;
|
||||
RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
|
||||
ctx->frame_cache[ctx->frame_cache_write].fb_idx = cm->new_fb_idx;
|
||||
yuvconfig2image(&ctx->frame_cache[ctx->frame_cache_write].img, &sd,
|
||||
frame_worker_data->user_priv);
|
||||
ctx->frame_cache[ctx->frame_cache_write].img.fb_priv =
|
||||
frame_bufs[cm->new_fb_idx].raw_frame_buffer.priv;
|
||||
ctx->frame_cache_write =
|
||||
(ctx->frame_cache_write + 1) % FRAME_CACHE_SIZE;
|
||||
++ctx->num_cache_frames;
|
||||
}
|
||||
yuvconfig2image(&ctx->img, &sd, user_priv);
|
||||
ctx->img.fb_priv = cm->frame_bufs[cm->new_fb_idx].raw_frame_buffer.priv;
|
||||
ctx->img_avail = 1;
|
||||
|
||||
return VPX_CODEC_OK;
|
||||
}
|
||||
|
||||
static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx,
|
||||
@ -581,13 +348,6 @@ static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx,
|
||||
// Reset flushed when receiving a valid frame.
|
||||
ctx->flushed = 0;
|
||||
|
||||
// Initialize the decoder workers on the first frame.
|
||||
if (ctx->frame_workers == NULL) {
|
||||
const vpx_codec_err_t res = init_decoder(ctx);
|
||||
if (res != VPX_CODEC_OK)
|
||||
return res;
|
||||
}
|
||||
|
||||
res = vp9_parse_superframe_index(data, data_sz, frame_sizes, &frame_count,
|
||||
ctx->decrypt_cb, ctx->decrypt_state);
|
||||
if (res != VPX_CODEC_OK)
|
||||
@ -604,46 +364,30 @@ static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx,
|
||||
for (i = 0; i < frame_count; ++i) {
|
||||
const uint8_t *data_start_copy = data_start;
|
||||
const uint32_t frame_size = frame_sizes[i];
|
||||
vpx_codec_err_t res;
|
||||
if (data_start < data
|
||||
|| frame_size > (uint32_t) (data_end - data_start)) {
|
||||
set_error_detail(ctx, "Invalid frame size in index");
|
||||
ctx->base.err_detail = "Invalid frame size in index";
|
||||
return VPX_CODEC_CORRUPT_FRAME;
|
||||
}
|
||||
|
||||
if (ctx->available_threads == 0) {
|
||||
// No more threads for decoding. Wait until the next output worker
|
||||
// finishes decoding. Then copy the decoded frame into cache.
|
||||
if (ctx->num_cache_frames < FRAME_CACHE_SIZE) {
|
||||
wait_worker_and_cache_frame(ctx);
|
||||
} else {
|
||||
// TODO(hkuang): Add unit test to test this path.
|
||||
set_error_detail(ctx, "Frame output cache is full.");
|
||||
return VPX_CODEC_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
res = decode_one(ctx, &data_start_copy, frame_size, user_priv,
|
||||
deadline);
|
||||
if (res != VPX_CODEC_OK)
|
||||
return res;
|
||||
|
||||
data_start += frame_size;
|
||||
}
|
||||
} else {
|
||||
if (ctx->available_threads == 0) {
|
||||
// No more threads for decoding. Wait until the next output worker
|
||||
// finishes decoding. Then copy the decoded frame into cache.
|
||||
if (ctx->num_cache_frames < FRAME_CACHE_SIZE) {
|
||||
wait_worker_and_cache_frame(ctx);
|
||||
} else {
|
||||
// TODO(hkuang): Add unit test to test this path.
|
||||
set_error_detail(ctx, "Frame output cache is full.");
|
||||
return VPX_CODEC_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
res = decode_one(ctx, &data, data_sz, user_priv, deadline);
|
||||
res = decode_one(ctx, &data_start, data_sz, user_priv, deadline);
|
||||
if (res != VPX_CODEC_OK)
|
||||
return res;
|
||||
|
||||
// Extra data detected after the frame.
|
||||
if (data_start < data_end - 1) {
|
||||
ctx->base.err_detail = "Fail to decode frame in parallel mode";
|
||||
return VPX_CODEC_INCAPABLE;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Decode in serial mode.
|
||||
@ -656,7 +400,7 @@ static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx,
|
||||
vpx_codec_err_t res;
|
||||
if (data_start < data
|
||||
|| frame_size > (uint32_t) (data_end - data_start)) {
|
||||
set_error_detail(ctx, "Invalid frame size in index");
|
||||
ctx->base.err_detail = "Invalid frame size in index";
|
||||
return VPX_CODEC_CORRUPT_FRAME;
|
||||
}
|
||||
|
||||
@ -687,73 +431,23 @@ static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx,
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static void release_last_output_frame(vpx_codec_alg_priv_t *ctx) {
|
||||
RefCntBuffer *const frame_bufs = ctx->buffer_pool->frame_bufs;
|
||||
// Decrease reference count of last output frame in frame parallel mode.
|
||||
if (ctx->frame_parallel_decode && ctx->last_show_frame >= 0) {
|
||||
BufferPool *const pool = ctx->buffer_pool;
|
||||
lock_buffer_pool(pool);
|
||||
decrease_ref_count(ctx->last_show_frame, frame_bufs, pool);
|
||||
unlock_buffer_pool(pool);
|
||||
}
|
||||
return VPX_CODEC_OK;
|
||||
}
|
||||
|
||||
static vpx_image_t *decoder_get_frame(vpx_codec_alg_priv_t *ctx,
|
||||
vpx_codec_iter_t *iter) {
|
||||
vpx_image_t *img = NULL;
|
||||
|
||||
// Only return frame when all the cpu are busy or
|
||||
// application fluhsed the decoder in frame parallel decode.
|
||||
if (ctx->frame_parallel_decode && ctx->available_threads > 0 &&
|
||||
!ctx->flushed) {
|
||||
return img;
|
||||
if (ctx->img_avail) {
|
||||
// iter acts as a flip flop, so an image is only returned on the first
|
||||
// call to get_frame.
|
||||
if (!(*iter)) {
|
||||
img = &ctx->img;
|
||||
*iter = img;
|
||||
}
|
||||
}
|
||||
ctx->img_avail = 0;
|
||||
|
||||
// Output the frames in the cache first.
|
||||
if (ctx->num_cache_frames > 0) {
|
||||
release_last_output_frame(ctx);
|
||||
ctx->last_show_frame = ctx->frame_cache[ctx->frame_cache_read].fb_idx;
|
||||
img = &ctx->frame_cache[ctx->frame_cache_read].img;
|
||||
ctx->frame_cache_read = (ctx->frame_cache_read + 1) % FRAME_CACHE_SIZE;
|
||||
--ctx->num_cache_frames;
|
||||
return img;
|
||||
}
|
||||
|
||||
// iter acts as a flip flop, so an image is only returned on the first
|
||||
// call to get_frame.
|
||||
if (*iter == NULL && ctx->frame_workers != NULL) {
|
||||
do {
|
||||
YV12_BUFFER_CONFIG sd;
|
||||
vp9_ppflags_t flags = {0, 0, 0};
|
||||
const VP9WorkerInterface *const winterface = vp9_get_worker_interface();
|
||||
VP9Worker *const worker =
|
||||
&ctx->frame_workers[ctx->next_output_worker_id];
|
||||
FrameWorkerData *const frame_worker_data =
|
||||
(FrameWorkerData *)worker->data1;
|
||||
ctx->next_output_worker_id =
|
||||
(ctx->next_output_worker_id + 1) % ctx->num_frame_workers;
|
||||
// Wait for the frame from worker thread.
|
||||
if (!winterface->sync(worker)) {
|
||||
// Decoding failed. Release the worker thread.
|
||||
++ctx->available_threads;
|
||||
if (ctx->flushed != 1)
|
||||
return img;
|
||||
} else if (vp9_get_raw_frame(frame_worker_data->pbi, &sd, &flags) == 0) {
|
||||
VP9_COMMON *const cm = &frame_worker_data->pbi->common;
|
||||
RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
|
||||
++ctx->available_threads;
|
||||
release_last_output_frame(ctx);
|
||||
ctx->last_show_frame = frame_worker_data->pbi->common.new_fb_idx;
|
||||
yuvconfig2image(&ctx->img, &sd, frame_worker_data->user_priv);
|
||||
ctx->img.fb_priv = frame_bufs[cm->new_fb_idx].raw_frame_buffer.priv;
|
||||
img = &ctx->img;
|
||||
return img;
|
||||
}
|
||||
} while (ctx->next_output_worker_id != ctx->next_submit_worker_id);
|
||||
}
|
||||
return img;
|
||||
}
|
||||
|
||||
@ -763,7 +457,7 @@ static vpx_codec_err_t decoder_set_fb_fn(
|
||||
vpx_release_frame_buffer_cb_fn_t cb_release, void *cb_priv) {
|
||||
if (cb_get == NULL || cb_release == NULL) {
|
||||
return VPX_CODEC_INVALID_PARAM;
|
||||
} else if (ctx->frame_workers == NULL) {
|
||||
} else if (ctx->pbi == NULL) {
|
||||
// If the decoder has already been initialized, do not accept changes to
|
||||
// the frame buffer functions.
|
||||
ctx->get_ext_fb_cb = cb_get;
|
||||
@ -779,19 +473,12 @@ static vpx_codec_err_t ctrl_set_reference(vpx_codec_alg_priv_t *ctx,
|
||||
va_list args) {
|
||||
vpx_ref_frame_t *const data = va_arg(args, vpx_ref_frame_t *);
|
||||
|
||||
// Only support this function in serial decode.
|
||||
if (ctx->frame_parallel_decode) {
|
||||
set_error_detail(ctx, "Not supported in frame parallel decode");
|
||||
return VPX_CODEC_INCAPABLE;
|
||||
}
|
||||
|
||||
if (data) {
|
||||
vpx_ref_frame_t *const frame = (vpx_ref_frame_t *)data;
|
||||
YV12_BUFFER_CONFIG sd;
|
||||
VP9Worker *const worker = ctx->frame_workers;
|
||||
FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
|
||||
|
||||
image2yuvconfig(&frame->img, &sd);
|
||||
return vp9_set_reference_dec(&frame_worker_data->pbi->common,
|
||||
return vp9_set_reference_dec(&ctx->pbi->common,
|
||||
(VP9_REFFRAME)frame->frame_type, &sd);
|
||||
} else {
|
||||
return VPX_CODEC_INVALID_PARAM;
|
||||
@ -802,19 +489,13 @@ static vpx_codec_err_t ctrl_copy_reference(vpx_codec_alg_priv_t *ctx,
|
||||
va_list args) {
|
||||
vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *);
|
||||
|
||||
// Only support this function in serial decode.
|
||||
if (ctx->frame_parallel_decode) {
|
||||
set_error_detail(ctx, "Not supported in frame parallel decode");
|
||||
return VPX_CODEC_INCAPABLE;
|
||||
}
|
||||
|
||||
if (data) {
|
||||
vpx_ref_frame_t *frame = (vpx_ref_frame_t *) data;
|
||||
vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data;
|
||||
YV12_BUFFER_CONFIG sd;
|
||||
VP9Worker *const worker = ctx->frame_workers;
|
||||
FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
|
||||
|
||||
image2yuvconfig(&frame->img, &sd);
|
||||
return vp9_copy_reference_dec(frame_worker_data->pbi,
|
||||
|
||||
return vp9_copy_reference_dec(ctx->pbi,
|
||||
(VP9_REFFRAME)frame->frame_type, &sd);
|
||||
} else {
|
||||
return VPX_CODEC_INVALID_PARAM;
|
||||
@ -825,18 +506,10 @@ static vpx_codec_err_t ctrl_get_reference(vpx_codec_alg_priv_t *ctx,
|
||||
va_list args) {
|
||||
vp9_ref_frame_t *data = va_arg(args, vp9_ref_frame_t *);
|
||||
|
||||
// Only support this function in serial decode.
|
||||
if (ctx->frame_parallel_decode) {
|
||||
set_error_detail(ctx, "Not supported in frame parallel decode");
|
||||
return VPX_CODEC_INCAPABLE;
|
||||
}
|
||||
|
||||
if (data) {
|
||||
YV12_BUFFER_CONFIG* fb;
|
||||
VP9Worker *const worker = ctx->frame_workers;
|
||||
FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
|
||||
fb = get_ref_frame(&frame_worker_data->pbi->common, data->idx);
|
||||
YV12_BUFFER_CONFIG* fb = get_ref_frame(&ctx->pbi->common, data->idx);
|
||||
if (fb == NULL) return VPX_CODEC_ERROR;
|
||||
|
||||
yuvconfig2image(&data->img, fb, NULL);
|
||||
return VPX_CODEC_OK;
|
||||
} else {
|
||||
@ -874,44 +547,26 @@ static vpx_codec_err_t ctrl_get_last_ref_updates(vpx_codec_alg_priv_t *ctx,
|
||||
va_list args) {
|
||||
int *const update_info = va_arg(args, int *);
|
||||
|
||||
// Only support this function in serial decode.
|
||||
if (ctx->frame_parallel_decode) {
|
||||
set_error_detail(ctx, "Not supported in frame parallel decode");
|
||||
return VPX_CODEC_INCAPABLE;
|
||||
}
|
||||
|
||||
if (update_info) {
|
||||
if (ctx->frame_workers) {
|
||||
VP9Worker *const worker = ctx->frame_workers;
|
||||
FrameWorkerData *const frame_worker_data =
|
||||
(FrameWorkerData *)worker->data1;
|
||||
*update_info = frame_worker_data->pbi->refresh_frame_flags;
|
||||
} else {
|
||||
if (ctx->pbi)
|
||||
*update_info = ctx->pbi->refresh_frame_flags;
|
||||
else
|
||||
return VPX_CODEC_ERROR;
|
||||
}
|
||||
return VPX_CODEC_OK;
|
||||
} else {
|
||||
return VPX_CODEC_INVALID_PARAM;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static vpx_codec_err_t ctrl_get_frame_corrupted(vpx_codec_alg_priv_t *ctx,
|
||||
va_list args) {
|
||||
int *corrupted = va_arg(args, int *);
|
||||
|
||||
if (corrupted) {
|
||||
if (ctx->frame_workers) {
|
||||
VP9Worker *const worker = ctx->frame_workers;
|
||||
FrameWorkerData *const frame_worker_data =
|
||||
(FrameWorkerData *)worker->data1;
|
||||
RefCntBuffer *const frame_bufs =
|
||||
frame_worker_data->pbi->common.buffer_pool->frame_bufs;
|
||||
if (frame_worker_data->pbi->common.frame_to_show == NULL)
|
||||
return VPX_CODEC_ERROR;
|
||||
*corrupted = frame_bufs[ctx->last_show_frame].buf.corrupted;
|
||||
} else {
|
||||
return VPX_CODEC_ERROR;
|
||||
}
|
||||
if (corrupted != NULL && ctx->pbi != NULL) {
|
||||
const YV12_BUFFER_CONFIG *const frame = ctx->pbi->common.frame_to_show;
|
||||
if (frame == NULL) return VPX_CODEC_ERROR;
|
||||
*corrupted = frame->corrupted;
|
||||
return VPX_CODEC_OK;
|
||||
} else {
|
||||
return VPX_CODEC_INVALID_PARAM;
|
||||
@ -922,18 +577,9 @@ static vpx_codec_err_t ctrl_get_display_size(vpx_codec_alg_priv_t *ctx,
|
||||
va_list args) {
|
||||
int *const display_size = va_arg(args, int *);
|
||||
|
||||
// Only support this function in serial decode.
|
||||
if (ctx->frame_parallel_decode) {
|
||||
set_error_detail(ctx, "Not supported in frame parallel decode");
|
||||
return VPX_CODEC_INCAPABLE;
|
||||
}
|
||||
|
||||
if (display_size) {
|
||||
if (ctx->frame_workers) {
|
||||
VP9Worker *const worker = ctx->frame_workers;
|
||||
FrameWorkerData *const frame_worker_data =
|
||||
(FrameWorkerData *)worker->data1;
|
||||
const VP9_COMMON *const cm = &frame_worker_data->pbi->common;
|
||||
if (ctx->pbi) {
|
||||
const VP9_COMMON *const cm = &ctx->pbi->common;
|
||||
display_size[0] = cm->display_width;
|
||||
display_size[1] = cm->display_height;
|
||||
} else {
|
||||
@ -948,13 +594,10 @@ static vpx_codec_err_t ctrl_get_display_size(vpx_codec_alg_priv_t *ctx,
|
||||
static vpx_codec_err_t ctrl_get_bit_depth(vpx_codec_alg_priv_t *ctx,
|
||||
va_list args) {
|
||||
unsigned int *const bit_depth = va_arg(args, unsigned int *);
|
||||
VP9Worker *const worker = &ctx->frame_workers[ctx->next_output_worker_id];
|
||||
|
||||
if (bit_depth) {
|
||||
if (worker) {
|
||||
FrameWorkerData *const frame_worker_data =
|
||||
(FrameWorkerData *)worker->data1;
|
||||
const VP9_COMMON *const cm = &frame_worker_data->pbi->common;
|
||||
if (ctx->pbi) {
|
||||
const VP9_COMMON *const cm = &ctx->pbi->common;
|
||||
*bit_depth = cm->bit_depth;
|
||||
return VPX_CODEC_OK;
|
||||
} else {
|
||||
@ -993,11 +636,9 @@ static vpx_codec_err_t ctrl_set_byte_alignment(vpx_codec_alg_priv_t *ctx,
|
||||
return VPX_CODEC_INVALID_PARAM;
|
||||
|
||||
ctx->byte_alignment = byte_alignment;
|
||||
if (ctx->frame_workers) {
|
||||
VP9Worker *const worker = ctx->frame_workers;
|
||||
FrameWorkerData *const frame_worker_data =
|
||||
(FrameWorkerData *)worker->data1;
|
||||
frame_worker_data->pbi->common.byte_alignment = byte_alignment;
|
||||
if (ctx->pbi != NULL) {
|
||||
VP9_COMMON *const cm = &ctx->pbi->common;
|
||||
cm->byte_alignment = byte_alignment;
|
||||
}
|
||||
return VPX_CODEC_OK;
|
||||
}
|
||||
|
@ -27,8 +27,6 @@ VP9_DX_SRCS-yes += decoder/vp9_read_bit_buffer.c
|
||||
VP9_DX_SRCS-yes += decoder/vp9_read_bit_buffer.h
|
||||
VP9_DX_SRCS-yes += decoder/vp9_decodemv.h
|
||||
VP9_DX_SRCS-yes += decoder/vp9_detokenize.h
|
||||
VP9_DX_SRCS-yes += decoder/vp9_dthread.c
|
||||
VP9_DX_SRCS-yes += decoder/vp9_dthread.h
|
||||
VP9_DX_SRCS-yes += decoder/vp9_decoder.c
|
||||
VP9_DX_SRCS-yes += decoder/vp9_decoder.h
|
||||
VP9_DX_SRCS-yes += decoder/vp9_dsubexp.c
|
||||
|
@ -22,11 +22,8 @@ extern "C" {
|
||||
#include "./vpx_integer.h"
|
||||
|
||||
/*!\brief The maximum number of work buffers used by libvpx.
|
||||
* Support maximum 4 threads to decode video in parallel.
|
||||
* Each thread will use one work buffer.
|
||||
* TODO(hkuang): Add support to set number of worker threads dynamically.
|
||||
*/
|
||||
#define VPX_MAXIMUM_WORK_BUFFERS 8
|
||||
#define VPX_MAXIMUM_WORK_BUFFERS 1
|
||||
|
||||
/*!\brief The maximum number of reference buffers that a VP9 encoder may use.
|
||||
*/
|
||||
|
@ -41,7 +41,6 @@ void reset(struct WebmInputContext *const webm_ctx) {
|
||||
webm_ctx->block_frame_index = 0;
|
||||
webm_ctx->video_track_index = 0;
|
||||
webm_ctx->timestamp_ns = 0;
|
||||
webm_ctx->is_key_frame = false;
|
||||
}
|
||||
|
||||
void get_first_cluster(struct WebmInputContext *const webm_ctx) {
|
||||
@ -183,7 +182,6 @@ int webm_read_frame(struct WebmInputContext *webm_ctx,
|
||||
}
|
||||
*bytes_in_buffer = frame.len;
|
||||
webm_ctx->timestamp_ns = block->GetTime(cluster);
|
||||
webm_ctx->is_key_frame = block->IsKey();
|
||||
|
||||
mkvparser::MkvReader *const reader =
|
||||
reinterpret_cast<mkvparser::MkvReader*>(webm_ctx->reader);
|
||||
|
Loading…
Reference in New Issue
Block a user