Remove no longer used video codec test framework.

Moves one test to the vp8 unittests which might still be good to have.
Also does a bit of clean up in vp8 unittests.

R=mflodman@webrtc.org, pbos@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/31139004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@7835 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
stefan@webrtc.org 2014-12-09 00:02:45 +00:00
parent 8911bc52f1
commit 86b6d65ef1
20 changed files with 88 additions and 4520 deletions

View File

@ -47,7 +47,6 @@
'rtp_rtcp/test/testFec/test_fec.gypi', 'rtp_rtcp/test/testFec/test_fec.gypi',
'video_coding/main/source/video_coding_test.gypi', 'video_coding/main/source/video_coding_test.gypi',
'video_coding/codecs/test/video_codecs_test_framework.gypi', 'video_coding/codecs/test/video_codecs_test_framework.gypi',
'video_coding/codecs/test_framework/test_framework.gypi',
'video_coding/codecs/tools/video_codecs_tools.gypi', 'video_coding/codecs/tools/video_codecs_tools.gypi',
], # includes ], # includes
'variables': { 'variables': {
@ -83,7 +82,6 @@
'PCM16B', # Needed by NetEq tests. 'PCM16B', # Needed by NetEq tests.
'remote_bitrate_estimator', 'remote_bitrate_estimator',
'rtp_rtcp', 'rtp_rtcp',
'test_framework',
'video_codecs_test_framework', 'video_codecs_test_framework',
'video_processing', 'video_processing',
'webrtc_utility', 'webrtc_utility',
@ -324,7 +322,6 @@
'dependencies': [ 'dependencies': [
'audio_coding_module', 'audio_coding_module',
'rtp_rtcp', 'rtp_rtcp',
'test_framework',
'video_codecs_test_framework', 'video_codecs_test_framework',
'webrtc_utility', 'webrtc_utility',
'webrtc_video_coding', 'webrtc_video_coding',

View File

@ -1,5 +0,0 @@
# These are for the common case of adding or renaming files. If you're doing
# structural changes, please get a review from a reviewer in this file.
per-file *.gyp=*
per-file *.gypi=*

View File

@ -1,304 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/video_coding/codecs/test_framework/benchmark.h"
#include <assert.h>
#include <iostream>
#include <sstream>
#include <vector>
#if defined(_WIN32)
#include <windows.h>
#endif
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
#include "webrtc/modules/video_coding/codecs/test_framework/video_source.h"
#include "webrtc/system_wrappers/interface/event_wrapper.h"
#include "webrtc/test/testsupport/fileutils.h"
#include "webrtc/test/testsupport/metrics/video_metrics.h"
#define SSIM_CALC 0 // by default, don't compute SSIM
using namespace webrtc;
Benchmark::Benchmark()
:
NormalAsyncTest("Benchmark", "Codec benchmark over a range of test cases", 6),
_resultsFileName(webrtc::test::OutputPath() + "benchmark.txt"),
_codecName("Default")
{
}
Benchmark::Benchmark(std::string name, std::string description)
:
NormalAsyncTest(name, description, 6),
_resultsFileName(webrtc::test::OutputPath() + "benchmark.txt"),
_codecName("Default")
{
}
Benchmark::Benchmark(std::string name, std::string description, std::string resultsFileName, std::string codecName)
:
NormalAsyncTest(name, description, 6),
_resultsFileName(resultsFileName),
_codecName(codecName)
{
}
void
Benchmark::Perform()
{
std::vector<const VideoSource*> sources;
std::vector<const VideoSource*>::iterator it;
// Configuration --------------------------
sources.push_back(new const VideoSource(webrtc::test::ProjectRootPath() +
"resources/foreman_cif.yuv", kCIF));
// sources.push_back(new const VideoSource(webrtc::test::ProjectRootPath() +
// "resources/akiyo_cif.yuv", kCIF));
const VideoSize size[] = {kQCIF, kCIF};
const int frameRate[] = {10, 15, 30};
// Specifies the framerates for which to perform a speed test.
const bool speedTestMask[] = {false, false, false};
const int bitRate[] = {50, 100, 200, 300, 400, 500, 600, 1000};
// Determines the number of iterations to perform to arrive at the speed result.
enum { kSpeedTestIterations = 10 };
// ----------------------------------------
const int nFrameRates = sizeof(frameRate)/sizeof(*frameRate);
assert(sizeof(speedTestMask)/sizeof(*speedTestMask) == nFrameRates);
const int nBitrates = sizeof(bitRate)/sizeof(*bitRate);
int testIterations = 10;
webrtc::test::QualityMetricsResult psnr[nBitrates];
webrtc::test::QualityMetricsResult ssim[nBitrates];
double fps[nBitrates];
double totalEncodeTime[nBitrates];
double totalDecodeTime[nBitrates];
_results.open(_resultsFileName.c_str(), std::fstream::out);
_results << GetMagicStr() << std::endl;
_results << _codecName << std::endl;
for (it = sources.begin() ; it < sources.end(); it++)
{
for (int i = 0; i < static_cast<int>(sizeof(size)/sizeof(*size)); i++)
{
for (int j = 0; j < nFrameRates; j++)
{
std::stringstream ss;
std::string strFrameRate;
std::string outFileName;
ss << frameRate[j];
ss >> strFrameRate;
outFileName = (*it)->GetFilePath() + "/" + (*it)->GetName() + "_" +
VideoSource::GetSizeString(size[i]) + "_" + strFrameRate + ".yuv";
_target = new const VideoSource(outFileName, size[i], frameRate[j]);
(*it)->Convert(*_target);
if (VideoSource::FileExists(outFileName.c_str()))
{
_inname = outFileName;
}
else
{
_inname = (*it)->GetFileName();
}
std::cout << (*it)->GetName() << ", " << VideoSource::GetSizeString(size[i])
<< ", " << frameRate[j] << " fps" << std::endl << "Bitrate [kbps]:";
_results << (*it)->GetName() << "," << VideoSource::GetSizeString(size[i])
<< "," << frameRate[j] << " fps" << std::endl << "Bitrate [kbps]";
if (speedTestMask[j])
{
testIterations = kSpeedTestIterations;
}
else
{
testIterations = 1;
}
for (int k = 0; k < nBitrates; k++)
{
_bitRate = (bitRate[k]);
double avgFps = 0.0;
totalEncodeTime[k] = 0;
totalDecodeTime[k] = 0;
for (int l = 0; l < testIterations; l++)
{
PerformNormalTest();
_appendNext = false;
avgFps += _framecnt / (_totalEncodeTime + _totalDecodeTime);
totalEncodeTime[k] += _totalEncodeTime;
totalDecodeTime[k] += _totalDecodeTime;
}
avgFps /= testIterations;
totalEncodeTime[k] /= testIterations;
totalDecodeTime[k] /= testIterations;
double actualBitRate = ActualBitRate(_framecnt) / 1000.0;
std::cout << " " << actualBitRate;
_results << "," << actualBitRate;
webrtc::test::QualityMetricsResult psnr_result;
I420PSNRFromFiles(_inname.c_str(), _outname.c_str(),
_inst.width, _inst.height, &psnr[k]);
if (SSIM_CALC)
{
webrtc::test::QualityMetricsResult ssim_result;
I420SSIMFromFiles(_inname.c_str(), _outname.c_str(),
_inst.width, _inst.height, &ssim[k]);
}
fps[k] = avgFps;
}
std::cout << std::endl << "Y-PSNR [dB]:";
_results << std::endl << "Y-PSNR [dB]";
for (int k = 0; k < nBitrates; k++)
{
std::cout << " " << psnr[k].average;
_results << "," << psnr[k].average;
}
if (SSIM_CALC)
{
std::cout << std::endl << "SSIM: ";
_results << std::endl << "SSIM ";
for (int k = 0; k < nBitrates; k++)
{
std::cout << " " << ssim[k].average;
_results << "," << ssim[k].average;
}
}
std::cout << std::endl << "Encode Time[ms]:";
_results << std::endl << "Encode Time[ms]";
for (int k = 0; k < nBitrates; k++)
{
std::cout << " " << totalEncodeTime[k];
_results << "," << totalEncodeTime[k];
}
std::cout << std::endl << "Decode Time[ms]:";
_results << std::endl << "Decode Time[ms]";
for (int k = 0; k < nBitrates; k++)
{
std::cout << " " << totalDecodeTime[k];
_results << "," << totalDecodeTime[k];
}
if (speedTestMask[j])
{
std::cout << std::endl << "Speed [fps]:";
_results << std::endl << "Speed [fps]";
for (int k = 0; k < nBitrates; k++)
{
std::cout << " " << static_cast<int>(fps[k] + 0.5);
_results << "," << static_cast<int>(fps[k] + 0.5);
}
}
std::cout << std::endl << std::endl;
_results << std::endl << std::endl;
delete _target;
}
}
delete *it;
}
_results.close();
}
void
Benchmark::PerformNormalTest()
{
_encoder = GetNewEncoder();
_decoder = GetNewDecoder();
CodecSettings(_target->GetWidth(), _target->GetHeight(), _target->GetFrameRate(), _bitRate);
Setup();
EventWrapper* waitEvent = EventWrapper::Create();
_encoder->InitEncode(&_inst, 4, 1440);
CodecSpecific_InitBitrate();
_decoder->InitDecode(&_inst,1);
FrameQueue frameQueue;
VideoEncodeCompleteCallback encCallback(_encodedFile, &frameQueue, *this);
VideoDecodeCompleteCallback decCallback(_decodedFile, *this);
_encoder->RegisterEncodeCompleteCallback(&encCallback);
_decoder->RegisterDecodeCompleteCallback(&decCallback);
SetCodecSpecificParameters();
_totalEncodeTime = _totalDecodeTime = 0;
_totalEncodePipeTime = _totalDecodePipeTime = 0;
bool complete = false;
_framecnt = 0;
_encFrameCnt = 0;
_sumEncBytes = 0;
_lengthEncFrame = 0;
while (!complete)
{
complete = Encode();
if (!frameQueue.Empty() || complete)
{
while (!frameQueue.Empty())
{
_frameToDecode = static_cast<FrameQueueTuple *>(frameQueue.PopFrame());
DoPacketLoss();
int ret = Decode();
delete _frameToDecode;
_frameToDecode = NULL;
if (ret < 0)
{
fprintf(stderr,"\n\nError in decoder: %d\n\n", ret);
exit(EXIT_FAILURE);
}
else if (ret == 0)
{
_framecnt++;
}
else
{
fprintf(stderr, "\n\nPositive return value from decode!\n\n");
}
}
}
waitEvent->Wait(5);
}
_encodedVideoBuffer.Free();
_encoder->Release();
_decoder->Release();
delete waitEvent;
delete _encoder;
delete _decoder;
Teardown();
}
void
Benchmark::CodecSpecific_InitBitrate()
{
if (_bitRate == 0)
{
_encoder->SetRates(600, _inst.maxFramerate);
}
else
{
_encoder->SetRates(_bitRate, _inst.maxFramerate);
}
}

View File

@ -1,39 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAWEWORK_BENCHMARK_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAWEWORK_BENCHMARK_H_
#include "webrtc/modules/video_coding/codecs/test_framework/normal_async_test.h"
class VideoSource;
class Benchmark : public NormalAsyncTest
{
public:
Benchmark();
virtual void Perform();
protected:
Benchmark(std::string name, std::string description);
Benchmark(std::string name, std::string description, std::string resultsFileName, std::string codecName);
virtual webrtc::VideoEncoder* GetNewEncoder() = 0;
virtual webrtc::VideoDecoder* GetNewDecoder() = 0;
virtual void PerformNormalTest();
virtual void CodecSpecific_InitBitrate();
static const char* GetMagicStr() { return "#!benchmark1.0"; }
const VideoSource* _target;
std::string _resultsFileName;
std::ofstream _results;
std::string _codecName;
};
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAWEWORK_BENCHMARK_H_

View File

@ -1,500 +0,0 @@
function exportfig(varargin)
%EXPORTFIG Export a figure to Encapsulated Postscript.
% EXPORTFIG(H, FILENAME) writes the figure H to FILENAME. H is
% a figure handle and FILENAME is a string that specifies the
% name of the output file.
%
% EXPORTFIG(...,PARAM1,VAL1,PARAM2,VAL2,...) specifies
% parameters that control various characteristics of the output
% file.
%
% Format Paramter:
% 'Format' one of the strings 'eps','eps2','jpeg','png','preview'
% specifies the output format. Defaults to 'eps'.
% The output format 'preview' does not generate an output
% file but instead creates a new figure window with a
% preview of the exported figure. In this case the
% FILENAME parameter is ignored.
%
% 'Preview' one of the strings 'none', 'tiff'
% specifies a preview for EPS files. Defaults to 'none'.
%
% Size Parameters:
% 'Width' a positive scalar
% specifies the width in the figure's PaperUnits
% 'Height' a positive scalar
% specifies the height in the figure's PaperUnits
%
% Specifying only one dimension sets the other dimension
% so that the exported aspect ratio is the same as the
% figure's current aspect ratio.
% If neither dimension is specified the size defaults to
% the width and height from the figure's PaperPosition.
%
% Rendering Parameters:
% 'Color' one of the strings 'bw', 'gray', 'cmyk'
% 'bw' specifies that lines and text are exported in
% black and all other objects in grayscale
% 'gray' specifies that all objects are exported in grayscale
% 'cmyk' specifies that all objects are exported in color
% using the CMYK color space
% 'Renderer' one of the strings 'painters', 'zbuffer', 'opengl'
% specifies the renderer to use
% 'Resolution' a positive scalar
% specifies the resolution in dots-per-inch.
%
% The default color setting is 'bw'.
%
% Font Parameters:
% 'FontMode' one of the strings 'scaled', 'fixed'
% 'FontSize' a positive scalar
% in 'scaled' mode multiplies with the font size of each
% text object to obtain the exported font size
% in 'fixed' mode specifies the font size of all text
% objects in points
% 'FontEncoding' one of the strings 'latin1', 'adobe'
% specifies the character encoding of the font
%
% If FontMode is 'scaled' but FontSize is not specified then a
% scaling factor is computed from the ratio of the size of the
% exported figure to the size of the actual figure. The minimum
% font size allowed after scaling is 5 points.
% If FontMode is 'fixed' but FontSize is not specified then the
% exported font sizes of all text objects is 7 points.
%
% The default 'FontMode' setting is 'scaled'.
%
% Line Width Parameters:
% 'LineMode' one of the strings 'scaled', 'fixed'
% 'LineWidth' a positive scalar
% the semantics of LineMode and LineWidth are exactly the
% same as FontMode and FontSize, except that they apply
% to line widths instead of font sizes. The minumum line
% width allowed after scaling is 0.5 points.
% If LineMode is 'fixed' but LineWidth is not specified
% then the exported line width of all line objects is 1
% point.
%
% Examples:
% exportfig(gcf,'fig1.eps','height',3);
% Exports the current figure to the file named 'fig1.eps' with
% a height of 3 inches (assuming the figure's PaperUnits is
% inches) and an aspect ratio the same as the figure's aspect
% ratio on screen.
%
% exportfig(gcf, 'fig2.eps', 'FontMode', 'fixed',...
% 'FontSize', 10, 'color', 'cmyk' );
% Exports the current figure to 'fig2.eps' in color with all
% text in 10 point fonts. The size of the exported figure is
% the figure's PaperPostion width and height.
if (nargin < 2)
error('Too few input arguments');
end
% exportfig(H, filename, ...)
H = varargin{1};
if ~ishandle(H) | ~strcmp(get(H,'type'), 'figure')
error('First argument must be a handle to a figure.');
end
filename = varargin{2};
if ~ischar(filename)
error('Second argument must be a string.');
end
paramPairs = varargin(3:end);
% Do some validity checking on param-value pairs
if (rem(length(paramPairs),2) ~= 0)
error(['Invalid input syntax. Optional parameters and values' ...
' must be in pairs.']);
end
format = 'eps';
preview = 'none';
width = -1;
height = -1;
color = 'bw';
fontsize = -1;
fontmode='scaled';
linewidth = -1;
linemode=[];
fontencoding = 'latin1';
renderer = [];
resolution = [];
% Process param-value pairs
args = {};
for k = 1:2:length(paramPairs)
param = lower(paramPairs{k});
if (~ischar(param))
error('Optional parameter names must be strings');
end
value = paramPairs{k+1};
switch (param)
case 'format'
format = value;
if (~strcmp(format,{'eps','eps2','jpeg','png','preview'}))
error(['Format must be ''eps'', ''eps2'', ''jpeg'', ''png'' or' ...
' ''preview''.']);
end
case 'preview'
preview = value;
if (~strcmp(preview,{'none','tiff'}))
error('Preview must be ''none'' or ''tiff''.');
end
case 'width'
width = LocalToNum(value);
if(~LocalIsPositiveScalar(width))
error('Width must be a numeric scalar > 0');
end
case 'height'
height = LocalToNum(value);
if(~LocalIsPositiveScalar(height))
error('Height must be a numeric scalar > 0');
end
case 'color'
color = lower(value);
if (~strcmp(color,{'bw','gray','cmyk'}))
error('Color must be ''bw'', ''gray'' or ''cmyk''.');
end
case 'fontmode'
fontmode = lower(value);
if (~strcmp(fontmode,{'scaled','fixed'}))
error('FontMode must be ''scaled'' or ''fixed''.');
end
case 'fontsize'
fontsize = LocalToNum(value);
if(~LocalIsPositiveScalar(fontsize))
error('FontSize must be a numeric scalar > 0');
end
case 'fontencoding'
fontencoding = lower(value);
if (~strcmp(fontencoding,{'latin1','adobe'}))
error('FontEncoding must be ''latin1'' or ''adobe''.');
end
case 'linemode'
linemode = lower(value);
if (~strcmp(linemode,{'scaled','fixed'}))
error('LineMode must be ''scaled'' or ''fixed''.');
end
case 'linewidth'
linewidth = LocalToNum(value);
if(~LocalIsPositiveScalar(linewidth))
error('LineWidth must be a numeric scalar > 0');
end
case 'renderer'
renderer = lower(value);
if (~strcmp(renderer,{'painters','zbuffer','opengl'}))
error('Renderer must be ''painters'', ''zbuffer'' or ''opengl''.');
end
case 'resolution'
resolution = LocalToNum(value);
if ~(isnumeric(value) & (prod(size(value)) == 1) & (value >= 0));
error('Resolution must be a numeric scalar >= 0');
end
otherwise
error(['Unrecognized option ' param '.']);
end
end
allLines = findall(H, 'type', 'line');
allText = findall(H, 'type', 'text');
allAxes = findall(H, 'type', 'axes');
allImages = findall(H, 'type', 'image');
allLights = findall(H, 'type', 'light');
allPatch = findall(H, 'type', 'patch');
allSurf = findall(H, 'type', 'surface');
allRect = findall(H, 'type', 'rectangle');
allFont = [allText; allAxes];
allColor = [allLines; allText; allAxes; allLights];
allMarker = [allLines; allPatch; allSurf];
allEdge = [allPatch; allSurf];
allCData = [allImages; allPatch; allSurf];
old.objs = {};
old.prop = {};
old.values = {};
% Process format and preview parameter
showPreview = strcmp(format,'preview');
if showPreview
format = 'png';
filename = [tempName '.png'];
end
if strncmp(format,'eps',3) & ~strcmp(preview,'none')
args = {args{:}, ['-' preview]};
end
hadError = 0;
try
% Process size parameters
paperPos = get(H, 'PaperPosition');
old = LocalPushOldData(old, H, 'PaperPosition', paperPos);
figureUnits = get(H, 'Units');
set(H, 'Units', get(H,'PaperUnits'));
figurePos = get(H, 'Position');
aspectRatio = figurePos(3)/figurePos(4);
set(H, 'Units', figureUnits);
if (width == -1) & (height == -1)
width = paperPos(3);
height = paperPos(4);
elseif (width == -1)
width = height * aspectRatio;
elseif (height == -1)
height = width / aspectRatio;
end
set(H, 'PaperPosition', [0 0 width height]);
paperPosMode = get(H, 'PaperPositionMode');
old = LocalPushOldData(old, H, 'PaperPositionMode', paperPosMode);
set(H, 'PaperPositionMode', 'manual');
% Process rendering parameters
switch (color)
case {'bw', 'gray'}
if ~strcmp(color,'bw') & strncmp(format,'eps',3)
format = [format 'c'];
end
args = {args{:}, ['-d' format]};
%compute and set gray colormap
oldcmap = get(H,'Colormap');
newgrays = 0.30*oldcmap(:,1) + 0.59*oldcmap(:,2) + 0.11*oldcmap(:,3);
newcmap = [newgrays newgrays newgrays];
old = LocalPushOldData(old, H, 'Colormap', oldcmap);
set(H, 'Colormap', newcmap);
%compute and set ColorSpec and CData properties
old = LocalUpdateColors(allColor, 'color', old);
old = LocalUpdateColors(allAxes, 'xcolor', old);
old = LocalUpdateColors(allAxes, 'ycolor', old);
old = LocalUpdateColors(allAxes, 'zcolor', old);
old = LocalUpdateColors(allMarker, 'MarkerEdgeColor', old);
old = LocalUpdateColors(allMarker, 'MarkerFaceColor', old);
old = LocalUpdateColors(allEdge, 'EdgeColor', old);
old = LocalUpdateColors(allEdge, 'FaceColor', old);
old = LocalUpdateColors(allCData, 'CData', old);
case 'cmyk'
if strncmp(format,'eps',3)
format = [format 'c'];
args = {args{:}, ['-d' format], '-cmyk'};
else
args = {args{:}, ['-d' format]};
end
otherwise
error('Invalid Color parameter');
end
if (~isempty(renderer))
args = {args{:}, ['-' renderer]};
end
if (~isempty(resolution)) | ~strncmp(format,'eps',3)
if isempty(resolution)
resolution = 0;
end
args = {args{:}, ['-r' int2str(resolution)]};
end
% Process font parameters
if (~isempty(fontmode))
oldfonts = LocalGetAsCell(allFont,'FontSize');
switch (fontmode)
case 'fixed'
oldfontunits = LocalGetAsCell(allFont,'FontUnits');
old = LocalPushOldData(old, allFont, {'FontUnits'}, oldfontunits);
set(allFont,'FontUnits','points');
if (fontsize == -1)
set(allFont,'FontSize',7);
else
set(allFont,'FontSize',fontsize);
end
case 'scaled'
if (fontsize == -1)
wscale = width/figurePos(3);
hscale = height/figurePos(4);
scale = min(wscale, hscale);
else
scale = fontsize;
end
newfonts = LocalScale(oldfonts,scale,5);
set(allFont,{'FontSize'},newfonts);
otherwise
error('Invalid FontMode parameter');
end
% make sure we push the size after the units
old = LocalPushOldData(old, allFont, {'FontSize'}, oldfonts);
end
if strcmp(fontencoding,'adobe') & strncmp(format,'eps',3)
args = {args{:}, '-adobecset'};
end
% Process linewidth parameters
if (~isempty(linemode))
oldlines = LocalGetAsCell(allMarker,'LineWidth');
old = LocalPushOldData(old, allMarker, {'LineWidth'}, oldlines);
switch (linemode)
case 'fixed'
if (linewidth == -1)
set(allMarker,'LineWidth',1);
else
set(allMarker,'LineWidth',linewidth);
end
case 'scaled'
if (linewidth == -1)
wscale = width/figurePos(3);
hscale = height/figurePos(4);
scale = min(wscale, hscale);
else
scale = linewidth;
end
newlines = LocalScale(oldlines, scale, 0.5);
set(allMarker,{'LineWidth'},newlines);
otherwise
error('Invalid LineMode parameter');
end
end
% Export
print(H, filename, args{:});
catch
hadError = 1;
end
% Restore figure settings
for n=1:length(old.objs)
set(old.objs{n}, old.prop{n}, old.values{n});
end
if hadError
error(deblank(lasterr));
end
% Show preview if requested
if showPreview
X = imread(filename,'png');
delete(filename);
f = figure( 'Name', 'Preview', ...
'Menubar', 'none', ...
'NumberTitle', 'off', ...
'Visible', 'off');
image(X);
axis image;
ax = findobj(f, 'type', 'axes');
set(ax, 'Units', get(H,'PaperUnits'), ...
'Position', [0 0 width height], ...
'Visible', 'off');
set(ax, 'Units', 'pixels');
axesPos = get(ax,'Position');
figPos = get(f,'Position');
rootSize = get(0,'ScreenSize');
figPos(3:4) = axesPos(3:4);
if figPos(1) + figPos(3) > rootSize(3)
figPos(1) = rootSize(3) - figPos(3) - 50;
end
if figPos(2) + figPos(4) > rootSize(4)
figPos(2) = rootSize(4) - figPos(4) - 50;
end
set(f, 'Position',figPos, ...
'Visible', 'on');
end
%
% Local Functions
%
function outData = LocalPushOldData(inData, objs, prop, values)
outData.objs = {inData.objs{:}, objs};
outData.prop = {inData.prop{:}, prop};
outData.values = {inData.values{:}, values};
function cellArray = LocalGetAsCell(fig,prop);
cellArray = get(fig,prop);
if (~isempty(cellArray)) & (~iscell(cellArray))
cellArray = {cellArray};
end
function newArray = LocalScale(inArray, scale, minValue)
n = length(inArray);
newArray = cell(n,1);
for k=1:n
newArray{k} = max(minValue,scale*inArray{k}(1));
end
function newArray = LocalMapToGray(inArray);
n = length(inArray);
newArray = cell(n,1);
for k=1:n
color = inArray{k};
if (~isempty(color))
if ischar(color)
switch color(1)
case 'y'
color = [1 1 0];
case 'm'
color = [1 0 1];
case 'c'
color = [0 1 1];
case 'r'
color = [1 0 0];
case 'g'
color = [0 1 0];
case 'b'
color = [0 0 1];
case 'w'
color = [1 1 1];
case 'k'
color = [0 0 0];
otherwise
newArray{k} = color;
end
end
if ~ischar(color)
color = 0.30*color(1) + 0.59*color(2) + 0.11*color(3);
end
end
if isempty(color) | ischar(color)
newArray{k} = color;
else
newArray{k} = [color color color];
end
end
function newArray = LocalMapCData(inArray);
n = length(inArray);
newArray = cell(n,1);
for k=1:n
color = inArray{k};
if (ndims(color) == 3) & isa(color,'double')
gray = 0.30*color(:,:,1) + 0.59*color(:,:,2) + 0.11*color(:,:,3);
color(:,:,1) = gray;
color(:,:,2) = gray;
color(:,:,3) = gray;
end
newArray{k} = color;
end
function outData = LocalUpdateColors(inArray, prop, inData)
value = LocalGetAsCell(inArray,prop);
outData.objs = {inData.objs{:}, inArray};
outData.prop = {inData.prop{:}, {prop}};
outData.values = {inData.values{:}, value};
if (~isempty(value))
if strcmp(prop,'CData')
value = LocalMapCData(value);
else
value = LocalMapToGray(value);
end
set(inArray,{prop},value);
end
function bool = LocalIsPositiveScalar(value)
bool = isnumeric(value) & ...
prod(size(value)) == 1 & ...
value > 0;
function value = LocalToNum(value)
if ischar(value)
value = str2num(value);
end

View File

@ -1,591 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/video_coding/codecs/test_framework/normal_async_test.h"
#include <assert.h>
#include <queue>
#include <sstream>
#include <string.h>
#include <vector>
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
#include "webrtc/system_wrappers/interface/tick_util.h"
#include "webrtc/test/testsupport/fileutils.h"
#include "webrtc/typedefs.h"
using namespace webrtc;
NormalAsyncTest::NormalAsyncTest()
:
NormalTest("Async Normal Test 1", "A test of normal execution of the codec", 1),
_decodeCompleteTime(0),
_encodeCompleteTime(0),
_encFrameCnt(0),
_decFrameCnt(0),
_requestKeyFrame(false),
_appendNext(false),
_missingFrames(false),
_rttFrames(0),
_hasReceivedSLI(false),
_hasReceivedRPSI(false),
_hasReceivedPLI(false),
_waitForKey(false)
{
}
NormalAsyncTest::NormalAsyncTest(uint32_t bitRate)
:
NormalTest("Async Normal Test 1", "A test of normal execution of the codec",
bitRate,
1),
_decodeCompleteTime(0),
_encodeCompleteTime(0),
_encFrameCnt(0),
_decFrameCnt(0),
_requestKeyFrame(false),
_appendNext(false),
_missingFrames(false),
_rttFrames(0),
_hasReceivedSLI(false),
_hasReceivedRPSI(false),
_hasReceivedPLI(false),
_waitForKey(false)
{
}
NormalAsyncTest::NormalAsyncTest(std::string name, std::string description,
unsigned int testNo)
:
NormalTest(name, description, testNo),
_decodeCompleteTime(0),
_encodeCompleteTime(0),
_encFrameCnt(0),
_decFrameCnt(0),
_requestKeyFrame(false),
_lengthEncFrame(0),
_appendNext(false),
_missingFrames(false),
_rttFrames(0),
_hasReceivedSLI(false),
_hasReceivedRPSI(false),
_hasReceivedPLI(false),
_waitForKey(false)
{
}
NormalAsyncTest::NormalAsyncTest(std::string name, std::string description,
uint32_t bitRate, unsigned int testNo)
:
NormalTest(name, description, bitRate, testNo),
_decodeCompleteTime(0),
_encodeCompleteTime(0),
_encFrameCnt(0),
_decFrameCnt(0),
_requestKeyFrame(false),
_lengthEncFrame(0),
_appendNext(false),
_missingFrames(false),
_rttFrames(0),
_hasReceivedSLI(false),
_hasReceivedRPSI(false),
_hasReceivedPLI(false),
_waitForKey(false)
{
}
NormalAsyncTest::NormalAsyncTest(std::string name, std::string description,
uint32_t bitRate, unsigned int testNo,
unsigned int rttFrames)
:
NormalTest(name, description, bitRate, testNo),
_decodeCompleteTime(0),
_encodeCompleteTime(0),
_encFrameCnt(0),
_decFrameCnt(0),
_requestKeyFrame(false),
_lengthEncFrame(0),
_appendNext(false),
_missingFrames(false),
_rttFrames(rttFrames),
_hasReceivedSLI(false),
_hasReceivedRPSI(false),
_hasReceivedPLI(false),
_waitForKey(false)
{
}
void
NormalAsyncTest::Setup()
{
CodecTest::Setup();
std::stringstream ss;
std::string strTestNo;
ss << _testNo;
ss >> strTestNo;
// Check if settings exist. Otherwise use defaults.
if (_outname == "")
{
_outname = webrtc::test::OutputPath() + "out_normaltest" + strTestNo +
".yuv";
}
if (_encodedName == "")
{
_encodedName = webrtc::test::OutputPath() + "encoded_normaltest" +
strTestNo + ".yuv";
}
if ((_sourceFile = fopen(_inname.c_str(), "rb")) == NULL)
{
printf("Cannot read file %s.\n", _inname.c_str());
exit(1);
}
if ((_encodedFile = fopen(_encodedName.c_str(), "wb")) == NULL)
{
printf("Cannot write encoded file.\n");
exit(1);
}
char mode[3] = "wb";
if (_appendNext)
{
strncpy(mode, "ab", 3);
}
if ((_decodedFile = fopen(_outname.c_str(), mode)) == NULL)
{
printf("Cannot write file %s.\n", _outname.c_str());
exit(1);
}
_appendNext = true;
}
void
NormalAsyncTest::Teardown()
{
CodecTest::Teardown();
fclose(_sourceFile);
fclose(_encodedFile);
fclose(_decodedFile);
}
FrameQueueTuple::~FrameQueueTuple()
{
if (_codecSpecificInfo != NULL)
{
delete _codecSpecificInfo;
}
if (_frame != NULL)
{
delete _frame;
}
}
void FrameQueue::PushFrame(VideoFrame *frame,
webrtc::CodecSpecificInfo* codecSpecificInfo)
{
WriteLockScoped cs(_queueRWLock);
_frameBufferQueue.push(new FrameQueueTuple(frame, codecSpecificInfo));
}
FrameQueueTuple* FrameQueue::PopFrame()
{
WriteLockScoped cs(_queueRWLock);
if (_frameBufferQueue.empty())
{
return NULL;
}
FrameQueueTuple* tuple = _frameBufferQueue.front();
_frameBufferQueue.pop();
return tuple;
}
bool FrameQueue::Empty()
{
ReadLockScoped cs(_queueRWLock);
return _frameBufferQueue.empty();
}
size_t VideoEncodeCompleteCallback::EncodedBytes()
{
return _encodedBytes;
}
int32_t VideoEncodeCompleteCallback::Encoded(
const EncodedImage& encodedImage,
const webrtc::CodecSpecificInfo* codecSpecificInfo,
const webrtc::RTPFragmentationHeader* fragmentation) {
_test.Encoded(encodedImage);
VideoFrame *newBuffer = new VideoFrame();
newBuffer->VerifyAndAllocate(encodedImage._size);
_encodedBytes += encodedImage._length;
// If _frameQueue would have been a fixed sized buffer we could have asked
// it for an empty frame and then just do:
// emptyFrame->SwapBuffers(encodedBuffer);
// This is how it should be done in Video Engine to save in on memcpys
webrtc::CodecSpecificInfo* codecSpecificInfoCopy =
_test.CopyCodecSpecificInfo(codecSpecificInfo);
_test.CopyEncodedImage(*newBuffer, encodedImage, codecSpecificInfoCopy);
if (_encodedFile != NULL)
{
if (fwrite(newBuffer->Buffer(), 1, newBuffer->Length(),
_encodedFile) != newBuffer->Length()) {
return -1;
}
}
_frameQueue->PushFrame(newBuffer, codecSpecificInfoCopy);
return 0;
}
size_t VideoDecodeCompleteCallback::DecodedBytes()
{
return _decodedBytes;
}
int32_t
VideoDecodeCompleteCallback::Decoded(I420VideoFrame& image)
{
_test.Decoded(image);
_decodedBytes += CalcBufferSize(kI420, image.width(), image.height());
if (_decodedFile != NULL)
{
return PrintI420VideoFrame(image, _decodedFile);
}
return 0;
}
int32_t
VideoDecodeCompleteCallback::ReceivedDecodedReferenceFrame(
const uint64_t pictureId)
{
return _test.ReceivedDecodedReferenceFrame(pictureId);
}
int32_t
VideoDecodeCompleteCallback::ReceivedDecodedFrame(
const uint64_t pictureId)
{
return _test.ReceivedDecodedFrame(pictureId);
}
void
NormalAsyncTest::Encoded(const EncodedImage& encodedImage)
{
_encodeCompleteTime = tGetTime();
_encFrameCnt++;
_totalEncodePipeTime += _encodeCompleteTime -
_encodeTimes[encodedImage._timeStamp];
}
void
NormalAsyncTest::Decoded(const I420VideoFrame& decodedImage)
{
_decodeCompleteTime = tGetTime();
_decFrameCnt++;
_totalDecodePipeTime += _decodeCompleteTime -
_decodeTimes[decodedImage.timestamp()];
_decodedWidth = decodedImage.width();
_decodedHeight = decodedImage.height();
}
void
NormalAsyncTest::Perform()
{
_inname = webrtc::test::ProjectRootPath() + "resources/foreman_cif.yuv";
CodecSettings(352, 288, 30, _bitRate);
Setup();
if(_encoder->InitEncode(&_inst, 1, 1440) < 0)
{
exit(EXIT_FAILURE);
}
_decoder->InitDecode(&_inst, 1);
FrameQueue frameQueue;
VideoEncodeCompleteCallback encCallback(_encodedFile, &frameQueue, *this);
VideoDecodeCompleteCallback decCallback(_decodedFile, *this);
_encoder->RegisterEncodeCompleteCallback(&encCallback);
_decoder->RegisterDecodeCompleteCallback(&decCallback);
if (SetCodecSpecificParameters() != WEBRTC_VIDEO_CODEC_OK)
{
exit(EXIT_FAILURE);
}
_totalEncodeTime = _totalDecodeTime = 0;
_totalEncodePipeTime = _totalDecodePipeTime = 0;
bool complete = false;
_framecnt = 0;
_encFrameCnt = 0;
_decFrameCnt = 0;
_sumEncBytes = 0;
_lengthEncFrame = 0;
double starttime = tGetTime();
while (!complete)
{
CodecSpecific_InitBitrate();
complete = Encode();
if (!frameQueue.Empty() || complete)
{
while (!frameQueue.Empty())
{
_frameToDecode =
static_cast<FrameQueueTuple *>(frameQueue.PopFrame());
int lost = DoPacketLoss();
if (lost == 2)
{
// Lost the whole frame, continue
_missingFrames = true;
delete _frameToDecode;
_frameToDecode = NULL;
continue;
}
int ret = Decode(lost);
delete _frameToDecode;
_frameToDecode = NULL;
if (ret < 0)
{
fprintf(stderr,"\n\nError in decoder: %d\n\n", ret);
exit(EXIT_FAILURE);
}
else if (ret == 0)
{
_framecnt++;
}
else
{
fprintf(stderr,
"\n\nPositive return value from decode!\n\n");
}
}
}
}
double endtime = tGetTime();
double totalExecutionTime = endtime - starttime;
printf("Total execution time: %.1f s\n", totalExecutionTime);
_sumEncBytes = encCallback.EncodedBytes();
double actualBitRate = ActualBitRate(_encFrameCnt) / 1000.0;
double avgEncTime = _totalEncodeTime / _encFrameCnt;
double avgDecTime = _totalDecodeTime / _decFrameCnt;
printf("Actual bitrate: %f kbps\n", actualBitRate);
printf("Average encode time: %.1f ms\n", 1000 * avgEncTime);
printf("Average decode time: %.1f ms\n", 1000 * avgDecTime);
printf("Average encode pipeline time: %.1f ms\n",
1000 * _totalEncodePipeTime / _encFrameCnt);
printf("Average decode pipeline time: %.1f ms\n",
1000 * _totalDecodePipeTime / _decFrameCnt);
printf("Number of encoded frames: %u\n", _encFrameCnt);
printf("Number of decoded frames: %u\n", _decFrameCnt);
(*_log) << "Actual bitrate: " << actualBitRate << " kbps\tTarget: " <<
_bitRate << " kbps" << std::endl;
(*_log) << "Average encode time: " << avgEncTime << " s" << std::endl;
(*_log) << "Average decode time: " << avgDecTime << " s" << std::endl;
_encoder->Release();
_decoder->Release();
Teardown();
}
bool
NormalAsyncTest::Encode()
{
_lengthEncFrame = 0;
if (feof(_sourceFile) != 0)
{
return true;
}
EXPECT_GT(fread(_sourceBuffer, 1, _lengthSourceFrame, _sourceFile), 0u);
EXPECT_EQ(0, _inputVideoBuffer.CreateFrame(_sizeY, _sourceBuffer,
_sizeUv, _sourceBuffer + _sizeY,
_sizeUv, _sourceBuffer + _sizeY + _sizeUv,
_width, _height,
_width, _halfWidth, _halfWidth));
_inputVideoBuffer.set_timestamp((unsigned int)
(_encFrameCnt * 9e4 / _inst.maxFramerate));
_encodeCompleteTime = 0;
_encodeTimes[_inputVideoBuffer.timestamp()] = tGetTime();
std::vector<VideoFrameType> frame_types(1, kDeltaFrame);
// check SLI queue
_hasReceivedSLI = false;
while (!_signalSLI.empty() && _signalSLI.front().delay == 0)
{
// SLI message has arrived at sender side
_hasReceivedSLI = true;
_pictureIdSLI = _signalSLI.front().id;
_signalSLI.pop_front();
}
// decrement SLI queue times
for (std::list<fbSignal>::iterator it = _signalSLI.begin();
it !=_signalSLI.end(); it++)
{
(*it).delay--;
}
// check PLI queue
_hasReceivedPLI = false;
while (!_signalPLI.empty() && _signalPLI.front().delay == 0)
{
// PLI message has arrived at sender side
_hasReceivedPLI = true;
_signalPLI.pop_front();
}
// decrement PLI queue times
for (std::list<fbSignal>::iterator it = _signalPLI.begin();
it != _signalPLI.end(); it++)
{
(*it).delay--;
}
if (_hasReceivedPLI)
{
// respond to PLI by encoding a key frame
frame_types[0] = kKeyFrame;
_hasReceivedPLI = false;
_hasReceivedSLI = false; // don't trigger both at once
}
webrtc::CodecSpecificInfo* codecSpecificInfo = CreateEncoderSpecificInfo();
int ret = _encoder->Encode(_inputVideoBuffer,
codecSpecificInfo, &frame_types);
EXPECT_EQ(ret, WEBRTC_VIDEO_CODEC_OK);
if (codecSpecificInfo != NULL)
{
delete codecSpecificInfo;
codecSpecificInfo = NULL;
}
if (_encodeCompleteTime > 0)
{
_totalEncodeTime += _encodeCompleteTime -
_encodeTimes[_inputVideoBuffer.timestamp()];
}
else
{
_totalEncodeTime += tGetTime() -
_encodeTimes[_inputVideoBuffer.timestamp()];
}
assert(ret >= 0);
return false;
}
int
NormalAsyncTest::Decode(int lossValue)
{
_sumEncBytes += _frameToDecode->_frame->Length();
EncodedImage encodedImage;
VideoEncodedBufferToEncodedImage(*(_frameToDecode->_frame), encodedImage);
encodedImage._completeFrame = !lossValue;
_decodeCompleteTime = 0;
_decodeTimes[encodedImage._timeStamp] = tGetTime();
int ret = WEBRTC_VIDEO_CODEC_OK;
// TODO(mikhal): Update frame type.
//if (!_waitForKey || encodedImage._frameType == kKeyFrame)
{
_waitForKey = false;
ret = _decoder->Decode(encodedImage, _missingFrames, NULL,
_frameToDecode->_codecSpecificInfo);
if (ret >= 0)
{
_missingFrames = false;
}
}
// check for SLI
if (ret == WEBRTC_VIDEO_CODEC_REQUEST_SLI)
{
// add an SLI feedback to the feedback "queue"
// to be delivered to encoder with _rttFrames delay
_signalSLI.push_back(fbSignal(_rttFrames,
static_cast<uint8_t>((_lastDecPictureId) & 0x3f))); // 6 lsb
ret = WEBRTC_VIDEO_CODEC_OK;
}
else if (ret == WEBRTC_VIDEO_CODEC_ERR_REQUEST_SLI)
{
// add an SLI feedback to the feedback "queue"
// to be delivered to encoder with _rttFrames delay
_signalSLI.push_back(fbSignal(_rttFrames,
static_cast<uint8_t>((_lastDecPictureId + 1) & 0x3f)));//6 lsb
ret = WEBRTC_VIDEO_CODEC_OK;
}
else if (ret == WEBRTC_VIDEO_CODEC_ERROR)
{
// wait for new key frame
// add an PLI feedback to the feedback "queue"
// to be delivered to encoder with _rttFrames delay
_signalPLI.push_back(fbSignal(_rttFrames, 0 /* picId not used*/));
_waitForKey = true;
ret = WEBRTC_VIDEO_CODEC_OK;
}
if (_decodeCompleteTime > 0)
{
_totalDecodeTime += _decodeCompleteTime -
_decodeTimes[encodedImage._timeStamp];
}
else
{
_totalDecodeTime += tGetTime() - _decodeTimes[encodedImage._timeStamp];
}
return ret;
}
webrtc::CodecSpecificInfo*
NormalAsyncTest::CopyCodecSpecificInfo(
const webrtc::CodecSpecificInfo* codecSpecificInfo) const
{
webrtc::CodecSpecificInfo* info = new webrtc::CodecSpecificInfo;
*info = *codecSpecificInfo;
return info;
}
void NormalAsyncTest::CodecSpecific_InitBitrate()
{
if (_bitRate == 0)
{
_encoder->SetRates(600, _inst.maxFramerate);
}
else
{
_encoder->SetRates(_bitRate, _inst.maxFramerate);
}
}
void NormalAsyncTest::CopyEncodedImage(VideoFrame& dest,
const EncodedImage& src,
void* /*codecSpecificInfo*/) const
{
dest.CopyFrame(src._length, src._buffer);
//dest.SetFrameType(src._frameType);
dest.SetWidth((uint16_t)src._encodedWidth);
dest.SetHeight((uint16_t)src._encodedHeight);
dest.SetTimeStamp(src._timeStamp);
}
int32_t NormalAsyncTest::ReceivedDecodedReferenceFrame(
const uint64_t pictureId) {
_lastDecRefPictureId = pictureId;
return 0;
}
int32_t NormalAsyncTest::ReceivedDecodedFrame(
const uint64_t pictureId) {
_lastDecPictureId = pictureId;
return 0;
}
double
NormalAsyncTest::tGetTime()
{// return time in sec
return ((double) (TickTime::MillisecondTimestamp())/1000);
}

View File

@ -1,185 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_NORMAL_ASYNC_TEST_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_NORMAL_ASYNC_TEST_H_
#include "webrtc/common_types.h"
#include <list>
#include <map>
#include <queue>
#include "webrtc/modules/video_coding/codecs/test_framework/normal_test.h"
#include "webrtc/system_wrappers/interface/rw_lock_wrapper.h"
class FrameQueueTuple
{
public:
FrameQueueTuple(webrtc::VideoFrame *frame,
const webrtc::CodecSpecificInfo* codecSpecificInfo = NULL)
:
_frame(frame),
_codecSpecificInfo(codecSpecificInfo)
{};
~FrameQueueTuple();
webrtc::VideoFrame* _frame;
const webrtc::CodecSpecificInfo* _codecSpecificInfo;
};
class FrameQueue
{
public:
FrameQueue()
:
_queueRWLock(*webrtc::RWLockWrapper::CreateRWLock())
{
}
~FrameQueue()
{
delete &_queueRWLock;
}
void PushFrame(webrtc::VideoFrame *frame,
webrtc::CodecSpecificInfo* codecSpecificInfo = NULL);
FrameQueueTuple* PopFrame();
bool Empty();
private:
webrtc::RWLockWrapper& _queueRWLock;
std::queue<FrameQueueTuple *> _frameBufferQueue;
};
// feedback signal to encoder
struct fbSignal
{
fbSignal(int d, uint8_t pid) : delay(d), id(pid) {};
int delay;
uint8_t id;
};
class NormalAsyncTest : public NormalTest
{
public:
NormalAsyncTest();
NormalAsyncTest(uint32_t bitRate);
NormalAsyncTest(std::string name, std::string description,
unsigned int testNo);
NormalAsyncTest(std::string name, std::string description,
uint32_t bitRate, unsigned int testNo);
NormalAsyncTest(std::string name, std::string description,
uint32_t bitRate, unsigned int testNo,
unsigned int rttFrames);
virtual ~NormalAsyncTest() {};
virtual void Perform();
virtual void Encoded(const webrtc::EncodedImage& encodedImage);
virtual void Decoded(const webrtc::I420VideoFrame& decodedImage);
virtual webrtc::CodecSpecificInfo*
CopyCodecSpecificInfo(
const webrtc::CodecSpecificInfo* codecSpecificInfo) const;
virtual void CopyEncodedImage(webrtc::VideoFrame& dest,
const webrtc::EncodedImage& src,
void* /*codecSpecificInfo*/) const;
virtual webrtc::CodecSpecificInfo* CreateEncoderSpecificInfo() const
{
return NULL;
};
virtual int32_t ReceivedDecodedReferenceFrame(
const uint64_t pictureId);
virtual int32_t ReceivedDecodedFrame(const uint64_t pictureId);
protected:
virtual void Setup();
virtual void Teardown();
virtual bool Encode();
virtual int Decode(int lossValue = 0);
virtual void CodecSpecific_InitBitrate();
virtual int SetCodecSpecificParameters() {return 0;};
double tGetTime();// return time in sec
FILE* _sourceFile;
FILE* _decodedFile;
uint32_t _decodedWidth;
uint32_t _decodedHeight;
double _totalEncodeTime;
double _totalDecodeTime;
double _decodeCompleteTime;
double _encodeCompleteTime;
double _totalEncodePipeTime;
double _totalDecodePipeTime;
int _framecnt;
int _encFrameCnt;
int _decFrameCnt;
bool _requestKeyFrame;
unsigned int _lengthEncFrame;
FrameQueueTuple* _frameToDecode;
bool _appendNext;
std::map<uint32_t, double> _encodeTimes;
std::map<uint32_t, double> _decodeTimes;
bool _missingFrames;
std::list<fbSignal> _signalSLI;
int _rttFrames;
mutable bool _hasReceivedSLI;
mutable bool _hasReceivedRPSI;
uint8_t _pictureIdSLI;
uint16_t _pictureIdRPSI;
uint64_t _lastDecRefPictureId;
uint64_t _lastDecPictureId;
std::list<fbSignal> _signalPLI;
bool _hasReceivedPLI;
bool _waitForKey;
};
class VideoEncodeCompleteCallback : public webrtc::EncodedImageCallback
{
public:
VideoEncodeCompleteCallback(FILE* encodedFile, FrameQueue *frameQueue,
NormalAsyncTest& test)
:
_encodedFile(encodedFile),
_frameQueue(frameQueue),
_test(test),
_encodedBytes(0)
{}
int32_t Encoded(const webrtc::EncodedImage& encodedImage,
const webrtc::CodecSpecificInfo* codecSpecificInfo,
const webrtc::RTPFragmentationHeader* fragmentation);
size_t EncodedBytes();
private:
FILE* _encodedFile;
FrameQueue* _frameQueue;
NormalAsyncTest& _test;
size_t _encodedBytes;
};
class VideoDecodeCompleteCallback : public webrtc::DecodedImageCallback
{
public:
VideoDecodeCompleteCallback(FILE* decodedFile, NormalAsyncTest& test)
:
_decodedFile(decodedFile),
_test(test),
_decodedBytes(0)
{}
virtual int32_t Decoded(webrtc::I420VideoFrame& decodedImage);
virtual int32_t
ReceivedDecodedReferenceFrame(const uint64_t pictureId);
virtual int32_t ReceivedDecodedFrame(const uint64_t pictureId);
size_t DecodedBytes();
private:
FILE* _decodedFile;
NormalAsyncTest& _test;
size_t _decodedBytes;
};
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_NORMAL_ASYNC_TEST_H_

View File

@ -1,264 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/video_coding/codecs/test_framework/normal_test.h"
#include <sstream>
#include <string.h>
#include <time.h>
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
#include "webrtc/test/testsupport/fileutils.h"
NormalTest::NormalTest()
:
CodecTest("Normal Test 1", "A test of normal execution of the codec"),
_testNo(1),
_lengthEncFrame(0),
_appendNext(false)
{
}
NormalTest::NormalTest(std::string name, std::string description,
unsigned int testNo)
:
CodecTest(name, description),
_requestKeyFrame(false),
_testNo(testNo),
_lengthEncFrame(0),
_appendNext(false)
{
}
NormalTest::NormalTest(std::string name, std::string description,
uint32_t bitRate, unsigned int testNo)
:
CodecTest(name, description, bitRate),
_requestKeyFrame(false),
_testNo(testNo),
_lengthEncFrame(0),
_appendNext(false)
{
}
void
NormalTest::Setup()
{
CodecTest::Setup();
std::stringstream ss;
std::string strTestNo;
ss << _testNo;
ss >> strTestNo;
// Check if settings exist. Otherwise use defaults.
if (_outname == "")
{
_outname = webrtc::test::OutputPath() + "out_normaltest" + strTestNo +
".yuv";
}
if (_encodedName == "")
{
_encodedName = webrtc::test::OutputPath() + "encoded_normaltest" +
strTestNo + ".yuv";
}
if ((_sourceFile = fopen(_inname.c_str(), "rb")) == NULL)
{
printf("Cannot read file %s.\n", _inname.c_str());
exit(1);
}
if ((_encodedFile = fopen(_encodedName.c_str(), "wb")) == NULL)
{
printf("Cannot write encoded file.\n");
exit(1);
}
char mode[3] = "wb";
if (_appendNext)
{
strncpy(mode, "ab", 3);
}
if ((_decodedFile = fopen(_outname.c_str(), mode)) == NULL)
{
printf("Cannot write file %s.\n", _outname.c_str());
exit(1);
}
_appendNext = true;
}
void
NormalTest::Teardown()
{
CodecTest::Teardown();
fclose(_sourceFile);
fclose(_decodedFile);
}
void
NormalTest::Perform()
{
_width = 352;
_halfWidth = (_width + 1) / 2;
_height = 288;
_halfHeight = (_height + 1) / 2;
_sizeY = _width * _height;
_sizeUv = _halfWidth * _halfHeight;
_inname = webrtc::test::ProjectRootPath() + "resources/foreman_cif.yuv";
CodecSettings(_width, _height, 30, _bitRate);
Setup();
_inputVideoBuffer.CreateEmptyFrame(_width, _height,
_width, _halfWidth, _halfWidth);
_decodedVideoBuffer.CreateEmptyFrame(_width, _height,
_width, _halfWidth, _halfWidth);
_encodedVideoBuffer.VerifyAndAllocate(_lengthSourceFrame);
_encoder->InitEncode(&_inst, 1, 1460);
CodecSpecific_InitBitrate();
_decoder->InitDecode(&_inst,1);
_totalEncodeTime = _totalDecodeTime = 0;
_framecnt = 0;
_sumEncBytes = 0;
_lengthEncFrame = 0;
int decodeLength = 0;
while (!Encode())
{
DoPacketLoss();
_encodedVideoBuffer.SetLength(_encodedVideoBuffer.Length());
if (fwrite(_encodedVideoBuffer.Buffer(), 1,
_encodedVideoBuffer.Length(),
_encodedFile) != _encodedVideoBuffer.Length()) {
return;
}
decodeLength = Decode();
if (decodeLength < 0)
{
fprintf(stderr,"\n\nError in decoder: %d\n\n", decodeLength);
exit(EXIT_FAILURE);
}
if (PrintI420VideoFrame(_decodedVideoBuffer, _decodedFile) < 0) {
return;
}
CodecSpecific_InitBitrate();
_framecnt++;
}
// Ensure we empty the decoding queue.
while (decodeLength > 0)
{
decodeLength = Decode();
if (decodeLength < 0)
{
fprintf(stderr,"\n\nError in decoder: %d\n\n", decodeLength);
exit(EXIT_FAILURE);
}
if (PrintI420VideoFrame(_decodedVideoBuffer, _decodedFile) < 0) {
return;
}
}
double actualBitRate = ActualBitRate(_framecnt) / 1000.0;
double avgEncTime = _totalEncodeTime / _framecnt;
double avgDecTime = _totalDecodeTime / _framecnt;
printf("Actual bitrate: %f kbps\n", actualBitRate);
printf("Average encode time: %f s\n", avgEncTime);
printf("Average decode time: %f s\n", avgDecTime);
(*_log) << "Actual bitrate: " << actualBitRate << " kbps\tTarget: " << _bitRate << " kbps" << std::endl;
(*_log) << "Average encode time: " << avgEncTime << " s" << std::endl;
(*_log) << "Average decode time: " << avgDecTime << " s" << std::endl;
_encoder->Release();
_decoder->Release();
Teardown();
}
bool
NormalTest::Encode()
{
_lengthEncFrame = 0;
EXPECT_GT(fread(_sourceBuffer, 1, _lengthSourceFrame, _sourceFile), 0u);
if (feof(_sourceFile) != 0)
{
return true;
}
_inputVideoBuffer.CreateFrame(_sizeY, _sourceBuffer,
_sizeUv, _sourceBuffer + _sizeY,
_sizeUv, _sourceBuffer + _sizeY +
_sizeUv,
_width, _height,
_width, _halfWidth, _halfWidth);
_inputVideoBuffer.set_timestamp(_framecnt);
// This multiple attempt ridiculousness is to accomodate VP7:
// 1. The wrapper can unilaterally reduce the framerate for low bitrates.
// 2. The codec inexplicably likes to reject some frames. Perhaps there
// is a good reason for this...
int encodingAttempts = 0;
double starttime = 0;
double endtime = 0;
while (_lengthEncFrame == 0)
{
starttime = clock()/(double)CLOCKS_PER_SEC;
_inputVideoBuffer.set_width(_inst.width);
_inputVideoBuffer.set_height(_inst.height);
endtime = clock()/(double)CLOCKS_PER_SEC;
_encodedVideoBuffer.SetHeight(_inst.height);
_encodedVideoBuffer.SetWidth(_inst.width);
if (_lengthEncFrame < 0)
{
(*_log) << "Error in encoder: " << _lengthEncFrame << std::endl;
fprintf(stderr,"\n\nError in encoder: %d\n\n", _lengthEncFrame);
exit(EXIT_FAILURE);
}
_sumEncBytes += _lengthEncFrame;
encodingAttempts++;
if (encodingAttempts > 50)
{
(*_log) << "Unable to encode frame: " << _framecnt << std::endl;
fprintf(stderr,"\n\nUnable to encode frame: %d\n\n", _framecnt);
exit(EXIT_FAILURE);
}
}
_totalEncodeTime += endtime - starttime;
if (encodingAttempts > 1)
{
(*_log) << encodingAttempts << " attempts required to encode frame: " <<
_framecnt + 1 << std::endl;
fprintf(stderr,"\n%d attempts required to encode frame: %d\n", encodingAttempts,
_framecnt + 1);
}
return false;
}
int
NormalTest::Decode(int lossValue)
{
_encodedVideoBuffer.SetWidth(_inst.width);
_encodedVideoBuffer.SetHeight(_inst.height);
int lengthDecFrame = 0;
if (lengthDecFrame < 0)
{
return lengthDecFrame;
}
_encodedVideoBuffer.SetLength(0);
return lengthDecFrame;
}

View File

@ -1,45 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_NORMAL_TEST_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_NORMAL_TEST_H_
#include "webrtc/modules/video_coding/codecs/test_framework/test.h"
class NormalTest : public CodecTest
{
public:
NormalTest();
NormalTest(std::string name, std::string description, unsigned int testNo);
NormalTest(std::string name, std::string description, uint32_t bitRate, unsigned int testNo);
virtual ~NormalTest() {};
virtual void Perform();
protected:
virtual void Setup();
virtual void Teardown();
virtual bool Encode();
virtual int Decode(int lossValue = 0);
virtual void CodecSpecific_InitBitrate()=0;
virtual int DoPacketLoss() {return 0;};
FILE* _sourceFile;
FILE* _decodedFile;
FILE* _encodedFile;
double _totalEncodeTime;
double _totalDecodeTime;
unsigned int _framecnt;
bool _requestKeyFrame;
unsigned int _testNo;
int _lengthEncFrame;
bool _appendNext;
};
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_NORMAL_TEST_H_

View File

@ -1,258 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <assert.h>
#include <string.h>
#include <sstream>
#include "webrtc/modules/video_coding/codecs/test_framework/packet_loss_test.h"
#include "webrtc/modules/video_coding/codecs/test_framework/video_source.h"
using namespace webrtc;
PacketLossTest::PacketLossTest()
:
NormalAsyncTest("PacketLossTest", "Encode, remove lost packets, decode", 300,
5),
_lossRate(0.1),
_lossProbability(0.1),
_lastFrame(NULL),
_lastFrameLength(0)
{
}
PacketLossTest::PacketLossTest(std::string name, std::string description)
:
NormalAsyncTest(name, description, 300, 5),
_lossRate(0.1),
_lossProbability(0.1),
_lastFrame(NULL),
_lastFrameLength(0)
{
}
PacketLossTest::PacketLossTest(std::string name, std::string description, double lossRate, bool useNack, unsigned int rttFrames /* = 0*/)
:
NormalAsyncTest(name, description, 300, 5, rttFrames),
_lossRate(lossRate),
_lastFrame(NULL),
_lastFrameLength(0)
{
assert(lossRate >= 0 && lossRate <= 1);
if (useNack)
{
_lossProbability = 0;
}
else
{
_lossProbability = lossRate;
}
}
void
PacketLossTest::Encoded(const EncodedImage& encodedImage)
{
// push timestamp to queue
_frameQueue.push_back(encodedImage._timeStamp);
NormalAsyncTest::Encoded(encodedImage);
}
void
PacketLossTest::Decoded(const I420VideoFrame& decodedImage)
{
// check the frame queue if any frames have gone missing
assert(!_frameQueue.empty()); // decoded frame is not in the queue
while(_frameQueue.front() < decodedImage.timestamp())
{
// this frame is missing
// write previous decoded frame again (frame freeze)
if (_decodedFile && _lastFrame)
{
if (fwrite(_lastFrame, 1, _lastFrameLength,
_decodedFile) != _lastFrameLength) {
return;
}
}
// remove frame from queue
_frameQueue.pop_front();
}
// Decoded frame is not in the queue.
assert(_frameQueue.front() == decodedImage.timestamp());
// pop the current frame
_frameQueue.pop_front();
// save image for future freeze-frame
size_t length =
CalcBufferSize(kI420, decodedImage.width(), decodedImage.height());
if (_lastFrameLength < length)
{
if (_lastFrame) delete [] _lastFrame;
_lastFrame = new uint8_t[length];
}
// TODO(mikhal): Can't the last frame be a I420VideoFrame?
ExtractBuffer(decodedImage, length, _lastFrame);
_lastFrameLength = length;
NormalAsyncTest::Decoded(decodedImage);
}
void
PacketLossTest::Teardown()
{
if (_totalKept + _totalThrown > 0)
{
printf("Target packet loss rate: %.4f\n", _lossProbability);
printf("Actual packet loss rate: %.4f\n", (_totalThrown * 1.0f) / (_totalKept + _totalThrown));
printf("Channel rate: %.2f kbps\n",
0.001 * 8.0 * _sumChannelBytes / ((_framecnt * 1.0f) / _inst.maxFramerate));
}
else
{
printf("No packet losses inflicted\n");
}
NormalAsyncTest::Teardown();
}
void
PacketLossTest::Setup()
{
const VideoSource source(_inname, _inst.width, _inst.height, _inst.maxFramerate);
std::stringstream ss;
std::string lossRateStr;
ss << _lossRate;
ss >> lossRateStr;
_encodedName = source.GetName() + "-" + lossRateStr;
_outname = "out-" + source.GetName() + "-" + lossRateStr;
if (_lossProbability != _lossRate)
{
_encodedName += "-nack";
_outname += "-nack";
}
_encodedName += ".vp8";
_outname += ".yuv";
_totalKept = 0;
_totalThrown = 0;
_sumChannelBytes = 0;
NormalAsyncTest::Setup();
}
void
PacketLossTest::CodecSpecific_InitBitrate()
{
assert(_bitRate > 0);
uint32_t simulatedBitRate;
if (_lossProbability != _lossRate)
{
// Simulating NACK
simulatedBitRate = uint32_t(_bitRate / (1 + _lossRate));
}
else
{
simulatedBitRate = _bitRate;
}
int rtt = 0;
if (_inst.maxFramerate > 0)
rtt = _rttFrames * (1000 / _inst.maxFramerate);
_encoder->SetChannelParameters((uint32_t)(_lossProbability * 255.0),
rtt);
_encoder->SetRates(simulatedBitRate, _inst.maxFramerate);
}
int PacketLossTest::DoPacketLoss()
{
// Only packet loss for delta frames
// TODO(mikhal): Identify delta frames
// First frame so never a delta frame.
if (_frameToDecode->_frame->Length() == 0 || _sumChannelBytes == 0)
{
_sumChannelBytes += _frameToDecode->_frame->Length();
return 0;
}
unsigned char *packet = NULL;
VideoFrame newEncBuf;
newEncBuf.VerifyAndAllocate(_lengthSourceFrame);
_inBufIdx = 0;
_outBufIdx = 0;
size_t size = 1;
int kept = 0;
int thrown = 0;
while ((size = NextPacket(1500, &packet)) > 0)
{
if (!PacketLoss(_lossProbability, thrown))
{
InsertPacket(&newEncBuf, packet, size);
kept++;
}
else
{
// Use the ByteLoss function if you want to lose only
// parts of a packet, and not the whole packet.
//size_t size2 = ByteLoss(size, packet, 15);
thrown++;
//if (size2 != size)
//{
// InsertPacket(&newEncBuf, packet, size2);
//}
}
}
int lossResult = (thrown!=0); // 0 = no loss 1 = loss(es)
if (lossResult)
{
lossResult += (kept==0); // 2 = all lost = full frame
}
_frameToDecode->_frame->CopyFrame(newEncBuf.Length(), newEncBuf.Buffer());
_sumChannelBytes += newEncBuf.Length();
_totalKept += kept;
_totalThrown += thrown;
return lossResult;
//printf("Threw away: %d out of %d packets\n", thrown, thrown + kept);
//printf("Encoded left: %d bytes\n", _encodedVideoBuffer.Length());
}
size_t PacketLossTest::NextPacket(size_t mtu, unsigned char **pkg)
{
unsigned char *buf = _frameToDecode->_frame->Buffer();
*pkg = buf + _inBufIdx;
size_t old_idx = _inBufIdx;
_inBufIdx = std::min(_inBufIdx + mtu, _frameToDecode->_frame->Length());
return _inBufIdx - old_idx;
}
size_t PacketLossTest::ByteLoss(size_t size,
unsigned char *pkg,
size_t bytesToLose)
{
return size;
}
void PacketLossTest::InsertPacket(VideoFrame *buf,
unsigned char *pkg,
size_t size)
{
if ((_outBufIdx + size) > buf->Size())
{
printf("InsertPacket error!\n");
return;
}
memcpy(buf->Buffer() + _outBufIdx, pkg, size);
buf->SetLength(buf->Length() + size);
_outBufIdx += size;
}

View File

@ -1,63 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_PACKET_LOSS_TEST_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_PACKET_LOSS_TEST_H_
#include <list>
#include "webrtc/modules/video_coding/codecs/test_framework/normal_async_test.h"
class PacketLossTest : public NormalAsyncTest
{
public:
PacketLossTest();
virtual ~PacketLossTest() {if(_lastFrame) {delete [] _lastFrame; _lastFrame = NULL;}}
virtual void Encoded(const webrtc::EncodedImage& encodedImage);
virtual void Decoded(const webrtc::I420VideoFrame& decodedImage);
protected:
PacketLossTest(std::string name, std::string description);
PacketLossTest(std::string name,
std::string description,
double lossRate,
bool useNack,
unsigned int rttFrames = 0);
virtual void Setup();
virtual void Teardown();
virtual void CodecSpecific_InitBitrate();
virtual int DoPacketLoss();
virtual size_t NextPacket(size_t mtu, unsigned char **pkg);
virtual size_t ByteLoss(size_t size,
unsigned char *pkg,
size_t bytesToLose);
virtual void InsertPacket(webrtc::VideoFrame *buf,
unsigned char *pkg,
size_t size);
size_t _inBufIdx;
size_t _outBufIdx;
// When NACK is being simulated _lossProbabilty is zero,
// otherwise it is set equal to _lossRate.
// Desired channel loss rate.
double _lossRate;
// Probability used to simulate packet drops.
double _lossProbability;
int _totalKept;
int _totalThrown;
size_t _sumChannelBytes;
std::list<uint32_t> _frameQueue;
uint8_t* _lastFrame;
size_t _lastFrameLength;
};
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_PACKET_LOSS_TEST_H_

View File

@ -1,427 +0,0 @@
function plotBenchmark(fileNames, export)
%PLOTBENCHMARK Plots and exports video codec benchmarking results.
% PLOTBENCHMARK(FILENAMES, EXPORT) parses the video codec benchmarking result
% files given by the cell array of strings FILENAME. It plots the results and
% optionally exports each plot to an appropriately named file.
%
% EXPORT parameter:
% 'none' No file exports.
% 'eps' Exports to eps files (default).
% 'pdf' Exports to eps files and uses the command-line utility
% epstopdf to obtain pdf files.
%
% Example:
% plotBenchmark({'H264Benchmark.txt' 'LSVXBenchmark.txt'}, 'pdf')
if (nargin < 1)
error('Too few input arguments');
elseif (nargin < 2)
export = 'eps';
end
if ~iscell(fileNames)
if ischar(fileNames)
% one single file name as a string is ok
if size(fileNames,1) > 1
% this is a char matrix, not ok
error('First argument must not be a char matrix');
end
% wrap in a cell array
fileNames = {fileNames};
else
error('First argument must be a cell array of strings');
end
end
if ~ischar(export)
error('Second argument must be a string');
end
outpath = 'BenchmarkPlots';
[status, errMsg] = mkdir(outpath);
if status == 0
error(errMsg);
end
nCases = 0;
testCases = [];
% Read each test result file
for fileIdx = 1:length(fileNames)
if ~isstr(fileNames{fileIdx})
error('First argument must be a cell array of strings');
end
fid = fopen(fileNames{fileIdx}, 'rt');
if fid == -1
error(['Unable to open ' fileNames{fileIdx}]);
end
version = '1.0';
if ~strcmp(fgetl(fid), ['#!benchmark' version])
fclose(fid);
error(['Requires benchmark file format version ' version]);
end
% Parse results file into testCases struct
codec = fgetl(fid);
tline = fgetl(fid);
while(tline ~= -1)
nCases = nCases + 1;
delim = strfind(tline, ',');
name = tline(1:delim(1)-1);
% Drop underscored suffix from name
underscore = strfind(name, '_');
if ~isempty(underscore)
name = name(1:underscore(1)-1);
end
resolution = tline(delim(1)+1:delim(2)-1);
frameRate = tline(delim(2)+1:end);
tline = fgetl(fid);
delim = strfind(tline, ',');
bitrateLabel = tline(1:delim(1)-1);
bitrate = sscanf(tline(delim(1):end),',%f');
tline = fgetl(fid);
delim = strfind(tline, ',');
psnrLabel = tline(1:delim(1)-1);
psnr = sscanf(tline(delim(1):end),',%f');
% Default data for the optional lines
speedLabel = 'Default';
speed = 0;
ssimLabel = 'Default';
ssim = 0;
tline = fgetl(fid);
delim = strfind(tline, ',');
while ~isempty(delim)
% More data
% Check type of data
if strncmp(lower(tline), 'speed', 5)
% Speed data included
speedLabel = tline(1:delim(1)-1);
speed = sscanf(tline(delim(1):end), ',%f');
tline = fgetl(fid);
elseif strncmp(lower(tline), 'encode time', 11)
% Encode and decode times included
% TODO: take care of the data
% pop two lines from file
tline = fgetl(fid);
tline = fgetl(fid);
elseif strncmp(tline, 'SSIM', 4)
% SSIM data included
ssimLabel = tline(1:delim(1)-1);
ssim = sscanf(tline(delim(1):end), ',%f');
tline = fgetl(fid);
end
delim = strfind(tline, ',');
end
testCases = [testCases struct('codec', codec, 'name', name, 'resolution', ...
resolution, 'frameRate', frameRate, 'bitrate', bitrate, 'psnr', psnr, ...
'speed', speed, 'bitrateLabel', bitrateLabel, 'psnrLabel', psnrLabel, ...
'speedLabel', speedLabel, ...
'ssim', ssim, 'ssimLabel', ssimLabel)];
tline = fgetl(fid);
end
fclose(fid);
end
i = 0;
casesPsnr = testCases;
while ~isempty(casesPsnr)
i = i + 1;
casesPsnr = plotOnePsnr(casesPsnr, i, export, outpath);
end
casesSSIM = testCases;
while ~isempty(casesSSIM)
i = i + 1;
casesSSIM = plotOneSSIM(casesSSIM, i, export, outpath);
end
casesSpeed = testCases;
while ~isempty(casesSpeed)
if casesSpeed(1).speed == 0
casesSpeed = casesSpeed(2:end);
else
i = i + 1;
casesSpeed = plotOneSpeed(casesSpeed, i, export, outpath);
end
end
%%%%%%%%%%%%%%%%%%
%% SUBFUNCTIONS %%
%%%%%%%%%%%%%%%%%%
function casesOut = plotOnePsnr(cases, num, export, outpath)
% Find matching specs
plotIdx = 1;
for i = 2:length(cases)
if strcmp(cases(1).resolution, cases(i).resolution) & ...
strcmp(cases(1).frameRate, cases(i).frameRate)
plotIdx = [plotIdx i];
end
end
% Return unplotted cases
casesOut = cases(setdiff(1:length(cases), plotIdx));
cases = cases(plotIdx);
% Prune similar results
for i = 1:length(cases)
simIndx = find(abs(cases(i).bitrate - [cases(i).bitrate(2:end) ; 0]) < 10);
while ~isempty(simIndx)
diffIndx = setdiff(1:length(cases(i).bitrate), simIndx);
cases(i).psnr = cases(i).psnr(diffIndx);
cases(i).bitrate = cases(i).bitrate(diffIndx);
simIndx = find(abs(cases(i).bitrate - [cases(i).bitrate(2:end) ; 0]) < 10);
end
end
% Prepare figure with axis labels and so on
hFig = figure(num);
clf;
hold on;
grid on;
axis([0 1100 20 50]);
set(gca, 'XTick', 0:200:1000);
set(gca, 'YTick', 20:10:60);
xlabel(cases(1).bitrateLabel);
ylabel(cases(1).psnrLabel);
res = cases(1).resolution;
frRate = cases(1).frameRate;
title([res ', ' frRate]);
hLines = [];
codecs = {};
sequences = {};
i = 0;
while ~isempty(cases)
i = i + 1;
[cases, hLine, codec, sequences] = plotOneCodec(cases, 'bitrate', 'psnr', i, sequences, 1);
% Stored to generate the legend
hLines = [hLines ; hLine];
codecs = {codecs{:} codec};
end
legend(hLines, codecs, 4);
hold off;
if ~strcmp(export, 'none')
% Export figure to an eps file
res = stripws(res);
frRate = stripws(frRate);
exportName = [outpath '/psnr-' res '-' frRate];
exportfig(hFig, exportName, 'Format', 'eps2', 'Color', 'cmyk');
end
if strcmp(export, 'pdf')
% Use the epstopdf utility to convert to pdf
system(['epstopdf ' exportName '.eps']);
end
function casesOut = plotOneSSIM(cases, num, export, outpath)
% Find matching specs
plotIdx = 1;
for i = 2:length(cases)
if strcmp(cases(1).resolution, cases(i).resolution) & ...
strcmp(cases(1).frameRate, cases(i).frameRate)
plotIdx = [plotIdx i];
end
end
% Return unplotted cases
casesOut = cases(setdiff(1:length(cases), plotIdx));
cases = cases(plotIdx);
% Prune similar results
for i = 1:length(cases)
simIndx = find(abs(cases(i).bitrate - [cases(i).bitrate(2:end) ; 0]) < 10);
while ~isempty(simIndx)
diffIndx = setdiff(1:length(cases(i).bitrate), simIndx);
cases(i).ssim = cases(i).ssim(diffIndx);
cases(i).bitrate = cases(i).bitrate(diffIndx);
simIndx = find(abs(cases(i).bitrate - [cases(i).bitrate(2:end) ; 0]) < 10);
end
end
% Prepare figure with axis labels and so on
hFig = figure(num);
clf;
hold on;
grid on;
axis([0 1100 0.5 1]); % y-limit are set to 'auto' below
set(gca, 'XTick', 0:200:1000);
%set(gca, 'YTick', 20:10:60);
xlabel(cases(1).bitrateLabel);
ylabel(cases(1).ssimLabel);
res = cases(1).resolution;
frRate = cases(1).frameRate;
title([res ', ' frRate]);
hLines = [];
codecs = {};
sequences = {};
i = 0;
while ~isempty(cases)
i = i + 1;
[cases, hLine, codec, sequences] = plotOneCodec(cases, 'bitrate', 'ssim', i, sequences, 1);
% Stored to generate the legend
hLines = [hLines ; hLine];
codecs = {codecs{:} codec};
end
%set(gca,'YLimMode','auto')
set(gca,'YLim',[0.5 1])
set(gca,'YScale','log')
legend(hLines, codecs, 4);
hold off;
if ~strcmp(export, 'none')
% Export figure to an eps file
res = stripws(res);
frRate = stripws(frRate);
exportName = [outpath '/psnr-' res '-' frRate];
exportfig(hFig, exportName, 'Format', 'eps2', 'Color', 'cmyk');
end
if strcmp(export, 'pdf')
% Use the epstopdf utility to convert to pdf
system(['epstopdf ' exportName '.eps']);
end
function casesOut = plotOneSpeed(cases, num, export, outpath)
% Find matching specs
plotIdx = 1;
for i = 2:length(cases)
if strcmp(cases(1).resolution, cases(i).resolution) & ...
strcmp(cases(1).frameRate, cases(i).frameRate) & ...
strcmp(cases(1).name, cases(i).name)
plotIdx = [plotIdx i];
end
end
% Return unplotted cases
casesOut = cases(setdiff(1:length(cases), plotIdx));
cases = cases(plotIdx);
% Prune similar results
for i = 1:length(cases)
simIndx = find(abs(cases(i).psnr - [cases(i).psnr(2:end) ; 0]) < 0.25);
while ~isempty(simIndx)
diffIndx = setdiff(1:length(cases(i).psnr), simIndx);
cases(i).psnr = cases(i).psnr(diffIndx);
cases(i).speed = cases(i).speed(diffIndx);
simIndx = find(abs(cases(i).psnr - [cases(i).psnr(2:end) ; 0]) < 0.25);
end
end
hFig = figure(num);
clf;
hold on;
%grid on;
xlabel(cases(1).psnrLabel);
ylabel(cases(1).speedLabel);
res = cases(1).resolution;
name = cases(1).name;
frRate = cases(1).frameRate;
title([name ', ' res ', ' frRate]);
hLines = [];
codecs = {};
sequences = {};
i = 0;
while ~isempty(cases)
i = i + 1;
[cases, hLine, codec, sequences] = plotOneCodec(cases, 'psnr', 'speed', i, sequences, 0);
% Stored to generate the legend
hLines = [hLines ; hLine];
codecs = {codecs{:} codec};
end
legend(hLines, codecs, 1);
hold off;
if ~strcmp(export, 'none')
% Export figure to an eps file
res = stripws(res);
frRate = stripws(frRate);
exportName = [outpath '/speed-' name '-' res '-' frRate];
exportfig(hFig, exportName, 'Format', 'eps2', 'Color', 'cmyk');
end
if strcmp(export, 'pdf')
% Use the epstopdf utility to convert to pdf
system(['epstopdf ' exportName '.eps']);
end
function [casesOut, hLine, codec, sequences] = plotOneCodec(cases, xfield, yfield, num, sequences, annotatePlot)
plotStr = {'gx-', 'bo-', 'r^-', 'kd-', 'cx-', 'go--', 'b^--'};
% Find matching codecs
plotIdx = 1;
for i = 2:length(cases)
if strcmp(cases(1).codec, cases(i).codec)
plotIdx = [plotIdx i];
end
end
% Return unplotted cases
casesOut = cases(setdiff(1:length(cases), plotIdx));
cases = cases(plotIdx);
for i = 1:length(cases)
% Plot a single case
hLine = plot(getfield(cases(i), xfield), getfield(cases(i), yfield), plotStr{num}, ...
'LineWidth', 1.1, 'MarkerSize', 6);
end
% hLine handle and codec are returned to construct the legend afterwards
codec = cases(1).codec;
if annotatePlot == 0
return;
end
for i = 1:length(cases)
% Print the codec name as a text label
% Ensure each codec is only printed once
sequencePlotted = 0;
for j = 1:length(sequences)
if strcmp(cases(i).name, sequences{j})
sequencePlotted = 1;
break;
end
end
if sequencePlotted == 0
text(getfield(cases(i), xfield, {1}), getfield(cases(i), yfield, {1}), ...
[' ' cases(i).name]);
sequences = {sequences{:} cases(i).name};
end
end
% Strip whitespace from string
function str = stripws(str)
if ~isstr(str)
error('String required');
end
str = str(setdiff(1:length(str), find(isspace(str) == 1)));

View File

@ -1,169 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/video_coding/codecs/test_framework/test.h"
#include <string.h>
#include <iostream>
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
#include "webrtc/test/testsupport/metrics/video_metrics.h"
using namespace webrtc;
long filesize(const char *filename); // local function defined at end of file
CodecTest::CodecTest(std::string name, std::string description)
:
_bitRate(0),
_inname(""),
_outname(""),
_encodedName(""),
_name(name),
_description(description)
{
memset(&_inst, 0, sizeof(_inst));
unsigned int seed = static_cast<unsigned int>(0);
srand(seed);
}
CodecTest::CodecTest(std::string name, std::string description,
uint32_t bitRate)
:
_bitRate(bitRate),
_inname(""),
_outname(""),
_encodedName(""),
_name(name),
_description(description)
{
memset(&_inst, 0, sizeof(_inst));
unsigned int seed = static_cast<unsigned int>(0);
srand(seed);
}
void
CodecTest::Print()
{
std::cout << _name << " completed!" << std::endl;
(*_log) << _name << std::endl;
(*_log) << _description << std::endl;
(*_log) << "Input file: " << _inname << std::endl;
(*_log) << "Output file: " << _outname << std::endl;
webrtc::test::QualityMetricsResult psnr;
webrtc::test::QualityMetricsResult ssim;
I420PSNRFromFiles(_inname.c_str(), _outname.c_str(), _inst.width,
_inst.height, &psnr);
I420SSIMFromFiles(_inname.c_str(), _outname.c_str(), _inst.width,
_inst.height, &ssim);
(*_log) << "PSNR: " << psnr.average << std::endl;
std::cout << "PSNR: " << psnr.average << std::endl << std::endl;
(*_log) << "SSIM: " << ssim.average << std::endl;
std::cout << "SSIM: " << ssim.average << std::endl << std::endl;
(*_log) << std::endl;
}
void
CodecTest::Setup()
{
_width = _inst.width;
_halfWidth = (_width + 1) / 2;
_height = _inst.height;
_halfHeight = (_height + 1) / 2;;
_sizeY = _width * _height;
_sizeUv = _halfWidth * _halfHeight;
_lengthSourceFrame = webrtc::CalcBufferSize(webrtc::kI420,
_inst.width,
_inst.height);
_sourceBuffer = new unsigned char[_lengthSourceFrame];
}
void
CodecTest::CodecSettings(int width, int height,
uint32_t frameRate /*=30*/,
uint32_t bitRate /*=0*/)
{
if (bitRate > 0)
{
_bitRate = bitRate;
}
else if (_bitRate == 0)
{
_bitRate = 600;
}
_inst.codecType = kVideoCodecVP8;
_inst.codecSpecific.VP8.feedbackModeOn = true;
_inst.maxFramerate = (unsigned char)frameRate;
_inst.startBitrate = (int)_bitRate;
_inst.maxBitrate = 8000;
_inst.width = width;
_inst.height = height;
}
void
CodecTest::Teardown()
{
delete [] _sourceBuffer;
}
void
CodecTest::SetEncoder(webrtc::VideoEncoder*encoder)
{
_encoder = encoder;
}
void
CodecTest::SetDecoder(VideoDecoder*decoder)
{
_decoder = decoder;
}
void
CodecTest::SetLog(std::fstream* log)
{
_log = log;
}
double CodecTest::ActualBitRate(int nFrames)
{
return 8.0 * _sumEncBytes / (nFrames / _inst.maxFramerate);
}
bool CodecTest::PacketLoss(double lossRate, int /*thrown*/)
{
return RandUniform() < lossRate;
}
void
CodecTest::VideoEncodedBufferToEncodedImage(VideoFrame& videoBuffer,
EncodedImage &image)
{
image._buffer = videoBuffer.Buffer();
image._length = videoBuffer.Length();
image._size = videoBuffer.Size();
// image._frameType = static_cast<VideoFrameType>
// (videoBuffer.GetFrameType());
image._timeStamp = videoBuffer.TimeStamp();
image._encodedWidth = videoBuffer.Width();
image._encodedHeight = videoBuffer.Height();
image._completeFrame = true;
}
long filesize(const char *filename)
{
FILE *f = fopen(filename,"rb"); /* open the file in read only */
long size = 0;
if (fseek(f,0,SEEK_END)==0) /* seek was successful */
size = ftell(f);
fclose(f);
return size;
}

View File

@ -1,77 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAWEWORK_TEST_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAWEWORK_TEST_H_
#include <stdlib.h>
#include <fstream>
#include <string>
#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
class CodecTest
{
public:
CodecTest(std::string name, std::string description);
CodecTest(std::string name, std::string description,
uint32_t bitRate);
virtual ~CodecTest() {};
virtual void Setup();
virtual void Perform()=0;
virtual void Print();
void SetEncoder(webrtc::VideoEncoder *encoder);
void SetDecoder(webrtc::VideoDecoder *decoder);
void SetLog(std::fstream* log);
protected:
virtual void CodecSettings(int width,
int height,
uint32_t frameRate=30,
uint32_t bitRate=0);
virtual void Teardown();
double ActualBitRate(int nFrames);
virtual bool PacketLoss(double lossRate, int /*thrown*/);
static double RandUniform() { return (rand() + 1.0)/(RAND_MAX + 1.0); }
static void VideoEncodedBufferToEncodedImage(
webrtc::VideoFrame& videoBuffer,
webrtc::EncodedImage &image);
webrtc::VideoEncoder* _encoder;
webrtc::VideoDecoder* _decoder;
uint32_t _bitRate;
size_t _lengthSourceFrame;
unsigned char* _sourceBuffer;
webrtc::I420VideoFrame _inputVideoBuffer;
// TODO(mikhal): For now using VideoFrame for encodedBuffer, should use a
// designated class.
webrtc::VideoFrame _encodedVideoBuffer;
webrtc::I420VideoFrame _decodedVideoBuffer;
webrtc::VideoCodec _inst;
std::fstream* _log;
std::string _inname;
std::string _outname;
std::string _encodedName;
size_t _sumEncBytes;
int _width;
int _halfWidth;
int _height;
int _halfHeight;
int _sizeY;
int _sizeUv;
private:
std::string _name;
std::string _description;
};
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAWEWORK_TEST_H_

View File

@ -1,43 +0,0 @@
# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
{
'targets': [
{
'target_name': 'test_framework',
'type': 'static_library',
'dependencies': [
'<(DEPTH)/testing/gtest.gyp:gtest',
'<(webrtc_root)/common_video/common_video.gyp:common_video',
'<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
'<(webrtc_root)/test/metrics.gyp:metrics',
'<(webrtc_root)/test/test.gyp:test_support',
],
'sources': [
# header files
'benchmark.h',
'normal_async_test.h',
'normal_test.h',
'packet_loss_test.h',
'test.h',
'unit_test.h',
'video_source.h',
# source files
'benchmark.cc',
'normal_async_test.cc',
'normal_test.cc',
'packet_loss_test.cc',
'test.cc',
'unit_test.cc',
'video_source.cc',
],
},
], # targets
}

View File

@ -1,815 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <assert.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/modules/video_coding/codecs/test_framework/unit_test.h"
#include "webrtc/modules/video_coding/codecs/test_framework/video_source.h"
#include "webrtc/system_wrappers/interface/tick_util.h"
#include "webrtc/test/testsupport/fileutils.h"
using namespace webrtc;
UnitTest::UnitTest()
:
CodecTest("UnitTest", "Unit test"),
_tests(0),
_errors(0),
_source(NULL),
_refFrame(NULL),
_refEncFrame(NULL),
_refDecFrame(NULL),
_refEncFrameLength(0),
_sourceFile(NULL),
is_key_frame_(false),
_encodeCompleteCallback(NULL),
_decodeCompleteCallback(NULL)
{
}
UnitTest::UnitTest(std::string name, std::string description)
:
CodecTest(name, description),
_tests(0),
_errors(0),
_source(NULL),
_refFrame(NULL),
_refEncFrame(NULL),
_refDecFrame(NULL),
_refEncFrameLength(0),
_sourceFile(NULL),
is_key_frame_(false),
_encodeCompleteCallback(NULL),
_decodeCompleteCallback(NULL)
{
}
UnitTest::~UnitTest()
{
if (_encodeCompleteCallback) {
delete _encodeCompleteCallback;
}
if (_decodeCompleteCallback) {
delete _decodeCompleteCallback;
}
if (_source) {
delete _source;
}
if (_refFrame) {
delete [] _refFrame;
}
if (_refDecFrame) {
delete [] _refDecFrame;
}
if (_sourceBuffer) {
delete [] _sourceBuffer;
}
if (_sourceFile) {
fclose(_sourceFile);
}
if (_refEncFrame) {
delete [] _refEncFrame;
}
}
int32_t
UnitTestEncodeCompleteCallback::Encoded(const EncodedImage& encodedImage,
const webrtc::CodecSpecificInfo* codecSpecificInfo,
const webrtc::RTPFragmentationHeader*
fragmentation)
{
_encodedVideoBuffer->VerifyAndAllocate(encodedImage._size);
_encodedVideoBuffer->CopyFrame(encodedImage._size, encodedImage._buffer);
_encodedVideoBuffer->SetLength(encodedImage._length);
// TODO(mikhal): Update frame type API.
// _encodedVideoBuffer->SetFrameType(encodedImage._frameType);
_encodedVideoBuffer->SetWidth(
(uint16_t)encodedImage._encodedWidth);
_encodedVideoBuffer->SetHeight(
(uint16_t)encodedImage._encodedHeight);
_encodedVideoBuffer->SetTimeStamp(encodedImage._timeStamp);
_encodeComplete = true;
_encodedFrameType = encodedImage._frameType;
return 0;
}
int32_t UnitTestDecodeCompleteCallback::Decoded(I420VideoFrame& image)
{
_decodedVideoBuffer->CopyFrame(image);
_decodeComplete = true;
return 0;
}
bool
UnitTestEncodeCompleteCallback::EncodeComplete()
{
if (_encodeComplete)
{
_encodeComplete = false;
return true;
}
return false;
}
VideoFrameType
UnitTestEncodeCompleteCallback::EncodedFrameType() const
{
return _encodedFrameType;
}
bool
UnitTestDecodeCompleteCallback::DecodeComplete()
{
if (_decodeComplete)
{
_decodeComplete = false;
return true;
}
return false;
}
size_t
UnitTest::WaitForEncodedFrame() const
{
int64_t startTime = TickTime::MillisecondTimestamp();
while (TickTime::MillisecondTimestamp() - startTime < kMaxWaitEncTimeMs)
{
if (_encodeCompleteCallback->EncodeComplete())
{
return _encodedVideoBuffer.Length();
}
}
return 0;
}
size_t
UnitTest::WaitForDecodedFrame() const
{
int64_t startTime = TickTime::MillisecondTimestamp();
while (TickTime::MillisecondTimestamp() - startTime < kMaxWaitDecTimeMs)
{
if (_decodeCompleteCallback->DecodeComplete())
{
return webrtc::CalcBufferSize(kI420, _decodedVideoBuffer.width(),
_decodedVideoBuffer.height());
}
}
return 0;
}
uint32_t
UnitTest::CodecSpecific_SetBitrate(uint32_t bitRate,
uint32_t /* frameRate */)
{
return _encoder->SetRates(bitRate, _inst.maxFramerate);
}
void
UnitTest::Setup()
{
// Use _sourceFile as a check to prevent multiple Setup() calls.
if (_sourceFile != NULL)
{
return;
}
if (_encodeCompleteCallback == NULL)
{
_encodeCompleteCallback =
new UnitTestEncodeCompleteCallback(&_encodedVideoBuffer);
}
if (_decodeCompleteCallback == NULL)
{
_decodeCompleteCallback =
new UnitTestDecodeCompleteCallback(&_decodedVideoBuffer);
}
_encoder->RegisterEncodeCompleteCallback(_encodeCompleteCallback);
_decoder->RegisterDecodeCompleteCallback(_decodeCompleteCallback);
_source = new VideoSource(webrtc::test::ProjectRootPath() +
"resources/foreman_cif.yuv", kCIF);
_lengthSourceFrame = _source->GetFrameLength();
_refFrame = new unsigned char[_lengthSourceFrame];
_refDecFrame = new unsigned char[_lengthSourceFrame];
_sourceBuffer = new unsigned char [_lengthSourceFrame];
_sourceFile = fopen(_source->GetFileName().c_str(), "rb");
ASSERT_TRUE(_sourceFile != NULL);
_inst.maxFramerate = _source->GetFrameRate();
_bitRate = 300;
_inst.startBitrate = 300;
_inst.maxBitrate = 4000;
_inst.width = _source->GetWidth();
_inst.height = _source->GetHeight();
_inst.qpMax = 56;
_inst.codecSpecific.VP8.denoisingOn = true;
// Get input frame.
ASSERT_EQ(_lengthSourceFrame,
fread(_refFrame, 1, _lengthSourceFrame, _sourceFile));
int size_y = _inst.width * _inst.height;
int size_uv = ((_inst.width + 1) / 2) * ((_inst.height + 1) / 2);
_inputVideoBuffer.CreateFrame(size_y, _refFrame,
size_uv, _refFrame + size_y,
size_uv, _refFrame + size_y + size_uv,
_inst.width, _inst.height,
_inst.width,
(_inst.width + 1) / 2, (_inst.width + 1) / 2);
rewind(_sourceFile);
// Get a reference encoded frame.
_encodedVideoBuffer.VerifyAndAllocate(_lengthSourceFrame);
// Ensures our initial parameters are valid.
EXPECT_TRUE(_encoder->InitEncode(&_inst, 1, 1440) == WEBRTC_VIDEO_CODEC_OK);
_encoder->Encode(_inputVideoBuffer, NULL, NULL);
_refEncFrameLength = WaitForEncodedFrame();
ASSERT_GT(_refEncFrameLength, 0u);
_refEncFrame = new unsigned char[_refEncFrameLength];
memcpy(_refEncFrame, _encodedVideoBuffer.Buffer(), _refEncFrameLength);
// Get a reference decoded frame.
_decodedVideoBuffer.CreateEmptyFrame(_inst.width, _inst.height, _inst.width,
(_inst.width + 1) / 2,
(_inst.width + 1) / 2);
EXPECT_TRUE(_decoder->InitDecode(&_inst, 1) == WEBRTC_VIDEO_CODEC_OK);
ASSERT_FALSE(SetCodecSpecificParameters() != WEBRTC_VIDEO_CODEC_OK);
size_t frameLength = 0;
int i = 0;
_inputVideoBuffer.CreateEmptyFrame(_inst.width, _inst.height, _inst.width,
(_inst.width + 1) / 2,
(_inst.width + 1) / 2);
while (frameLength == 0)
{
EncodedImage encodedImage;
if (i > 0)
{
// Insert yet another frame.
ASSERT_EQ(_lengthSourceFrame,
fread(_refFrame, 1, _lengthSourceFrame, _sourceFile));
EXPECT_EQ(0, ConvertToI420(kI420, _refFrame, 0, 0, _width, _height,
0, kRotateNone, &_inputVideoBuffer));
_encoder->Encode(_inputVideoBuffer, NULL, NULL);
ASSERT_GT(WaitForEncodedFrame(), 0u);
} else {
// The first frame is always a key frame.
encodedImage._frameType = kKeyFrame;
}
VideoEncodedBufferToEncodedImage(_encodedVideoBuffer, encodedImage);
ASSERT_TRUE(_decoder->Decode(encodedImage, 0, NULL)
== WEBRTC_VIDEO_CODEC_OK);
frameLength = WaitForDecodedFrame();
_encodedVideoBuffer.SetLength(0);
i++;
}
rewind(_sourceFile);
EXPECT_EQ(_lengthSourceFrame, frameLength);
ExtractBuffer(_decodedVideoBuffer, _lengthSourceFrame, _refDecFrame);
}
void
UnitTest::Teardown()
{
// Use _sourceFile as a check to prevent multiple Teardown() calls.
if (_sourceFile == NULL)
{
return;
}
_encoder->Release();
_decoder->Release();
fclose(_sourceFile);
_sourceFile = NULL;
delete [] _refFrame;
_refFrame = NULL;
delete [] _refEncFrame;
_refEncFrame = NULL;
delete [] _refDecFrame;
_refDecFrame = NULL;
delete [] _sourceBuffer;
_sourceBuffer = NULL;
}
void
UnitTest::Print()
{
}
int
UnitTest::DecodeWithoutAssert()
{
EncodedImage encodedImage;
VideoEncodedBufferToEncodedImage(_encodedVideoBuffer, encodedImage);
int ret = _decoder->Decode(encodedImage, 0, NULL);
size_t frameLength = WaitForDecodedFrame();
_encodedVideoBuffer.SetLength(0);
return ret == WEBRTC_VIDEO_CODEC_OK ? static_cast<int>(frameLength) : ret;
}
int
UnitTest::Decode()
{
EncodedImage encodedImage;
VideoEncodedBufferToEncodedImage(_encodedVideoBuffer, encodedImage);
if (encodedImage._length == 0)
{
return WEBRTC_VIDEO_CODEC_OK;
}
if (is_key_frame_) {
encodedImage._frameType = kKeyFrame;
}
int ret = _decoder->Decode(encodedImage, 0, NULL);
size_t frameLength = WaitForDecodedFrame();
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, ret);
EXPECT_TRUE(frameLength == 0 || frameLength == _lengthSourceFrame);
_encodedVideoBuffer.SetLength(0);
return ret == WEBRTC_VIDEO_CODEC_OK ? static_cast<int>(frameLength) : ret;
}
// Test pure virtual VideoEncoder and VideoDecoder APIs.
void
UnitTest::Perform()
{
UnitTest::Setup();
size_t frameLength;
I420VideoFrame inputImage;
EncodedImage encodedImage;
//----- Encoder parameter tests -----
//-- Calls before InitEncode() --
// We want to revert the initialization done in Setup().
EXPECT_TRUE(_encoder->Release() == WEBRTC_VIDEO_CODEC_OK);
EXPECT_TRUE(_encoder->Encode(_inputVideoBuffer, NULL, NULL)
== WEBRTC_VIDEO_CODEC_UNINITIALIZED);
//-- InitEncode() errors --
// Null pointer.
EXPECT_TRUE(_encoder->InitEncode(NULL, 1, 1440) ==
WEBRTC_VIDEO_CODEC_ERR_PARAMETER);
// bit rate exceeds max bit rate
int32_t tmpBitRate = _inst.startBitrate;
int32_t tmpMaxBitRate = _inst.maxBitrate;
_inst.startBitrate = 4000;
_inst.maxBitrate = 3000;
EXPECT_TRUE(_encoder->InitEncode(&_inst, 1, 1440) ==
WEBRTC_VIDEO_CODEC_ERR_PARAMETER);
_inst.startBitrate = tmpBitRate;
_inst.maxBitrate = tmpMaxBitRate; //unspecified value
// Bad framerate.
_inst.maxFramerate = 0;
EXPECT_TRUE(_encoder->InitEncode(&_inst, 1, 1440) ==
WEBRTC_VIDEO_CODEC_ERR_PARAMETER);
// Seems like we should allow any framerate in range [0, 255].
//_inst.frameRate = 100;
//EXPECT_TRUE(_encoder->InitEncode(&_inst, 1) == -1); // FAILS
_inst.maxFramerate = 30;
// Bad bitrate.
_inst.startBitrate = static_cast<unsigned int>(-1);
EXPECT_TRUE(_encoder->InitEncode(&_inst, 1, 1440) ==
WEBRTC_VIDEO_CODEC_ERR_PARAMETER);
_inst.maxBitrate = _inst.startBitrate - 1;
EXPECT_TRUE(_encoder->InitEncode(&_inst, 1, 1440) ==
WEBRTC_VIDEO_CODEC_ERR_PARAMETER);
_inst.maxBitrate = 0;
_inst.startBitrate = 300;
// Bad maxBitRate.
_inst.maxBitrate = 200;
EXPECT_TRUE(_encoder->InitEncode(&_inst, 1, 1440) ==
WEBRTC_VIDEO_CODEC_ERR_PARAMETER);
_inst.maxBitrate = 4000;
// Bad width.
_inst.width = 0;
EXPECT_TRUE(_encoder->InitEncode(&_inst, 1, 1440) < 0);
_inst.width = _source->GetWidth();
// Bad height.
_inst.height = 0;
EXPECT_TRUE(_encoder->InitEncode(&_inst, 1, 1440) < 0);
_inst.height = _source->GetHeight();
// Bad number of cores.
EXPECT_TRUE(_encoder->InitEncode(&_inst, -1, 1440) ==
WEBRTC_VIDEO_CODEC_ERR_PARAMETER);
EXPECT_TRUE(_encoder->InitEncode(&_inst, 1, 1440) == WEBRTC_VIDEO_CODEC_OK);
//-- Encode() errors --
inputImage.ResetSize();
EXPECT_TRUE(_encoder->Encode(inputImage, NULL, NULL) ==
WEBRTC_VIDEO_CODEC_ERR_PARAMETER);
int width = _source->GetWidth();
int half_width = (width + 1) / 2;
int height = _source->GetHeight();
int half_height = (height + 1) / 2;
int size_y = width * height;
int size_uv = half_width * half_height;
_inputVideoBuffer.CreateFrame(size_y, _refFrame,
size_uv, _refFrame + size_y,
size_uv, _refFrame + size_y + size_uv,
width, height,
width, half_width, half_width);
//----- Encoder stress tests -----
// Vary frame rate and I-frame request.
for (int i = 1; i <= 60; i++)
{
VideoFrameType frame_type = !(i % 2) ? kKeyFrame : kDeltaFrame;
std::vector<VideoFrameType> frame_types(1, frame_type);
EXPECT_TRUE(_encoder->Encode(_inputVideoBuffer, NULL, &frame_types) ==
WEBRTC_VIDEO_CODEC_OK);
EXPECT_GT(WaitForEncodedFrame(), 0u);
}
// Init then encode.
_encodedVideoBuffer.SetLength(0);
EXPECT_TRUE(_encoder->Encode(_inputVideoBuffer, NULL, NULL) ==
WEBRTC_VIDEO_CODEC_OK);
EXPECT_GT(WaitForEncodedFrame(), 0u);
EXPECT_TRUE(_encoder->InitEncode(&_inst, 1, 1440) == WEBRTC_VIDEO_CODEC_OK);
_encoder->Encode(_inputVideoBuffer, NULL, NULL);
frameLength = WaitForEncodedFrame();
EXPECT_GT(frameLength, 0u);
EXPECT_TRUE(CheckIfBitExact(_refEncFrame, _refEncFrameLength,
_encodedVideoBuffer.Buffer(), frameLength));
// Reset then encode.
_encodedVideoBuffer.SetLength(0);
EXPECT_TRUE(_encoder->Encode(_inputVideoBuffer, NULL, NULL) ==
WEBRTC_VIDEO_CODEC_OK);
WaitForEncodedFrame();
EXPECT_TRUE(_encoder->InitEncode(&_inst, 1, 1440) == WEBRTC_VIDEO_CODEC_OK);
_encoder->Encode(_inputVideoBuffer, NULL, NULL);
frameLength = WaitForEncodedFrame();
EXPECT_GT(frameLength, 0u);
EXPECT_TRUE(CheckIfBitExact(_refEncFrame, _refEncFrameLength,
_encodedVideoBuffer.Buffer(), frameLength));
// Release then encode.
_encodedVideoBuffer.SetLength(0);
EXPECT_TRUE(_encoder->Encode(_inputVideoBuffer, NULL, NULL) ==
WEBRTC_VIDEO_CODEC_OK);
WaitForEncodedFrame();
EXPECT_TRUE(_encoder->Release() == WEBRTC_VIDEO_CODEC_OK);
EXPECT_TRUE(_encoder->InitEncode(&_inst, 1, 1440) == WEBRTC_VIDEO_CODEC_OK);
_encoder->Encode(_inputVideoBuffer, NULL, NULL);
frameLength = WaitForEncodedFrame();
EXPECT_GT(frameLength, 0u);
EXPECT_TRUE(CheckIfBitExact(_refEncFrame, _refEncFrameLength,
_encodedVideoBuffer.Buffer(), frameLength));
//----- Decoder parameter tests -----
//-- Calls before InitDecode() --
// We want to revert the initialization done in Setup().
EXPECT_TRUE(_decoder->Release() == WEBRTC_VIDEO_CODEC_OK);
VideoEncodedBufferToEncodedImage(_encodedVideoBuffer, encodedImage);
EXPECT_TRUE(_decoder->Decode(encodedImage, false, NULL) ==
WEBRTC_VIDEO_CODEC_UNINITIALIZED);
WaitForDecodedFrame();
EXPECT_TRUE(_decoder->Reset() == WEBRTC_VIDEO_CODEC_UNINITIALIZED);
EXPECT_TRUE(_decoder->InitDecode(&_inst, 1) == WEBRTC_VIDEO_CODEC_OK);
ASSERT_FALSE(SetCodecSpecificParameters() != WEBRTC_VIDEO_CODEC_OK);
//-- Decode() errors --
// Unallocated encodedVideoBuffer.
_encodedVideoBuffer.Free();
VideoEncodedBufferToEncodedImage(_encodedVideoBuffer, encodedImage);
encodedImage._length = 10; // Buffer NULL but length > 0
EXPECT_EQ(_decoder->Decode(encodedImage, false, NULL),
WEBRTC_VIDEO_CODEC_ERR_PARAMETER);
_encodedVideoBuffer.VerifyAndAllocate(_lengthSourceFrame);
//----- Decoder stress tests -----
unsigned char* tmpBuf = new unsigned char[_lengthSourceFrame];
// "Random" and zero data.
// We either expect an error, or at the least, no output.
// This relies on the codec's ability to detect an erroneous bitstream.
EXPECT_TRUE(_decoder->Reset() == WEBRTC_VIDEO_CODEC_OK);
EXPECT_TRUE(_decoder->InitDecode(&_inst, 1) == WEBRTC_VIDEO_CODEC_OK);
ASSERT_FALSE(SetCodecSpecificParameters() != WEBRTC_VIDEO_CODEC_OK);
for (int i = 0; i < 100; i++)
{
ASSERT_EQ(_refEncFrameLength,
fread(tmpBuf, 1, _refEncFrameLength, _sourceFile));
_encodedVideoBuffer.CopyFrame(_refEncFrameLength, tmpBuf);
VideoEncodedBufferToEncodedImage(_encodedVideoBuffer, encodedImage);
int ret = _decoder->Decode(encodedImage, false, NULL);
EXPECT_TRUE(ret <= 0);
if (ret == 0)
{
EXPECT_TRUE(WaitForDecodedFrame() == 0);
}
memset(tmpBuf, 0, _refEncFrameLength);
_encodedVideoBuffer.CopyFrame(_refEncFrameLength, tmpBuf);
VideoEncodedBufferToEncodedImage(_encodedVideoBuffer, encodedImage);
if (i == 0) {
// First frame is a key frame.
is_key_frame_ = true;
}
ret = _decoder->Decode(encodedImage, false, NULL);
EXPECT_TRUE(ret <= 0);
if (ret == 0)
{
EXPECT_TRUE(WaitForDecodedFrame() == 0);
}
}
rewind(_sourceFile);
_encodedVideoBuffer.SetLength(_refEncFrameLength);
_encodedVideoBuffer.CopyFrame(_refEncFrameLength, _refEncFrame);
// Init then decode.
EXPECT_TRUE(_decoder->InitDecode(&_inst, 1) == WEBRTC_VIDEO_CODEC_OK);
ASSERT_FALSE(SetCodecSpecificParameters() != WEBRTC_VIDEO_CODEC_OK);
frameLength = 0;
VideoEncodedBufferToEncodedImage(_encodedVideoBuffer, encodedImage);
// first frame is a key frame.
encodedImage._frameType = kKeyFrame;
while (frameLength == 0)
{
_decoder->Decode(encodedImage, false, NULL);
frameLength = WaitForDecodedFrame();
}
size_t length = CalcBufferSize(kI420, width, height);
scoped_ptr<uint8_t[]> decoded_buffer(new uint8_t[length]);
ExtractBuffer(_decodedVideoBuffer, _lengthSourceFrame,
decoded_buffer.get());
EXPECT_TRUE(CheckIfBitExact(decoded_buffer.get(), frameLength, _refDecFrame,
_lengthSourceFrame));
// Reset then decode.
EXPECT_TRUE(_decoder->Reset() == WEBRTC_VIDEO_CODEC_OK);
frameLength = 0;
VideoEncodedBufferToEncodedImage(_encodedVideoBuffer, encodedImage);
while (frameLength == 0)
{
_decoder->Decode(encodedImage, false, NULL);
frameLength = WaitForDecodedFrame();
}
ExtractBuffer(_decodedVideoBuffer, _lengthSourceFrame,
decoded_buffer.get());
EXPECT_TRUE(CheckIfBitExact(decoded_buffer.get(), frameLength,
_refDecFrame, _lengthSourceFrame));
// Decode with other size, reset, then decode with original size again
// to verify that decoder is reset to a "fresh" state upon Reset().
{
// Assert that input frame size is a factor of two, so that we can use
// quarter size below.
EXPECT_TRUE((_inst.width % 2 == 0) && (_inst.height % 2 == 0));
VideoCodec tempInst;
memcpy(&tempInst, &_inst, sizeof(VideoCodec));
tempInst.width /= 2;
tempInst.height /= 2;
int tmpHalfWidth = (tempInst.width + 1) / 2;
int tmpHalfHeight = (tempInst.height + 1) / 2;
int tmpSizeY = tempInst.width * tempInst.height;
int tmpSizeUv = tmpHalfWidth * tmpHalfHeight;
// Encode reduced (quarter) frame size.
EXPECT_TRUE(_encoder->Release() == WEBRTC_VIDEO_CODEC_OK);
EXPECT_TRUE(_encoder->InitEncode(&tempInst, 1, 1440) ==
WEBRTC_VIDEO_CODEC_OK);
webrtc::I420VideoFrame tempInput;
tempInput.CreateFrame(tmpSizeY, _inputVideoBuffer.buffer(kYPlane),
tmpSizeUv, _inputVideoBuffer.buffer(kUPlane),
tmpSizeUv, _inputVideoBuffer.buffer(kVPlane),
tempInst.width, tempInst.height,
tempInst.width, tmpHalfWidth, tmpHalfWidth);
_encoder->Encode(tempInput, NULL, NULL);
frameLength = WaitForEncodedFrame();
EXPECT_GT(frameLength, 0u);
// Reset then decode.
EXPECT_TRUE(_decoder->Reset() == WEBRTC_VIDEO_CODEC_OK);
frameLength = 0;
VideoEncodedBufferToEncodedImage(_encodedVideoBuffer, encodedImage);
while (frameLength == 0)
{
_decoder->Decode(encodedImage, false, NULL);
frameLength = WaitForDecodedFrame();
}
// Encode original frame again
EXPECT_TRUE(_encoder->Release() == WEBRTC_VIDEO_CODEC_OK);
EXPECT_TRUE(_encoder->InitEncode(&_inst, 1, 1440) ==
WEBRTC_VIDEO_CODEC_OK);
_encoder->Encode(_inputVideoBuffer, NULL, NULL);
frameLength = WaitForEncodedFrame();
EXPECT_GT(frameLength, 0u);
// Reset then decode original frame again.
EXPECT_TRUE(_decoder->Reset() == WEBRTC_VIDEO_CODEC_OK);
frameLength = 0;
VideoEncodedBufferToEncodedImage(_encodedVideoBuffer, encodedImage);
while (frameLength == 0)
{
_decoder->Decode(encodedImage, false, NULL);
frameLength = WaitForDecodedFrame();
}
// check that decoded frame matches with reference
size_t length = CalcBufferSize(kI420, width, height);
scoped_ptr<uint8_t[]> decoded_buffer(new uint8_t[length]);
ExtractBuffer(_decodedVideoBuffer, length, decoded_buffer.get());
EXPECT_TRUE(CheckIfBitExact(decoded_buffer.get(), length,
_refDecFrame, _lengthSourceFrame));
}
// Release then decode.
EXPECT_TRUE(_decoder->Release() == WEBRTC_VIDEO_CODEC_OK);
EXPECT_TRUE(_decoder->InitDecode(&_inst, 1) == WEBRTC_VIDEO_CODEC_OK);
ASSERT_FALSE(SetCodecSpecificParameters() != WEBRTC_VIDEO_CODEC_OK);
frameLength = 0;
VideoEncodedBufferToEncodedImage(_encodedVideoBuffer, encodedImage);
while (frameLength == 0)
{
_decoder->Decode(encodedImage, false, NULL);
frameLength = WaitForDecodedFrame();
}
ExtractBuffer(_decodedVideoBuffer, length, decoded_buffer.get());
EXPECT_TRUE(CheckIfBitExact(decoded_buffer.get(), frameLength,
_refDecFrame, _lengthSourceFrame));
_encodedVideoBuffer.SetLength(0);
delete [] tmpBuf;
//----- Function tests -----
int frames = 0;
// Do not specify maxBitRate (as in ViE).
_inst.maxBitrate = 0;
//-- Timestamp propagation --
EXPECT_TRUE(_encoder->InitEncode(&_inst, 1, 1440) == WEBRTC_VIDEO_CODEC_OK);
EXPECT_TRUE(_decoder->Reset() == WEBRTC_VIDEO_CODEC_OK);
EXPECT_TRUE(_decoder->InitDecode(&_inst, 1) == WEBRTC_VIDEO_CODEC_OK);
ASSERT_FALSE(SetCodecSpecificParameters() != WEBRTC_VIDEO_CODEC_OK);
frames = 0;
int frameDelay = 0;
int encTimeStamp;
_decodedVideoBuffer.set_timestamp(0);
while (fread(_sourceBuffer, 1, _lengthSourceFrame, _sourceFile) ==
_lengthSourceFrame)
{
_inputVideoBuffer.CreateFrame(size_y, _sourceBuffer,
size_uv, _sourceBuffer + size_y,
size_uv, _sourceBuffer + size_y + size_uv,
width, height,
width, half_width, half_width);
_inputVideoBuffer.set_timestamp(frames);
ASSERT_TRUE(_encoder->Encode(_inputVideoBuffer, NULL, NULL) ==
WEBRTC_VIDEO_CODEC_OK);
frameLength = WaitForEncodedFrame();
EXPECT_GT(frameLength, 0u);
encTimeStamp = _encodedVideoBuffer.TimeStamp();
EXPECT_TRUE(_inputVideoBuffer.timestamp() ==
static_cast<unsigned>(encTimeStamp));
if (frames == 0) {
// First frame is always a key frame.
is_key_frame_ = true;
}
if (Decode() == 0)
{
frameDelay++;
}
encTimeStamp -= frameDelay;
if (encTimeStamp < 0)
{
encTimeStamp = 0;
}
EXPECT_TRUE(_decodedVideoBuffer.timestamp() ==
static_cast<unsigned>(encTimeStamp));
frames++;
}
ASSERT_TRUE(feof(_sourceFile) != 0);
rewind(_sourceFile);
RateControlTests();
Teardown();
}
void
UnitTest::RateControlTests()
{
int frames = 0;
VideoFrame inputImage;
size_t frameLength;
// Do not specify maxBitRate (as in ViE).
_inst.maxBitrate = 0;
// Verify rate control. For this test turn on codec frame dropper.
// At least one other test (BasicUnitTest) assumes frame dropper off, so
// for now we only set frame dropper on for this (rate control) test.
_inst.codecSpecific.VP8.frameDroppingOn = true;
EXPECT_TRUE(_encoder->InitEncode(&_inst, 1, 1440) == WEBRTC_VIDEO_CODEC_OK);
EXPECT_TRUE(_decoder->Reset() == WEBRTC_VIDEO_CODEC_OK);
EXPECT_TRUE(_decoder->InitDecode(&_inst, 1) == WEBRTC_VIDEO_CODEC_OK);
// add: should also be 0, and 1
const int bitRate[] = {100, 500};
const int nBitrates = sizeof(bitRate)/sizeof(*bitRate);
printf("\nRate control test\n");
for (int i = 0; i < nBitrates; i++)
{
_bitRate = bitRate[i];
size_t totalBytes = 0;
_inst.startBitrate = _bitRate;
_encoder->InitEncode(&_inst, 4, 1440);
_decoder->Reset();
_decoder->InitDecode(&_inst, 1);
frames = 0;
if (_bitRate > _inst.maxBitrate)
{
CodecSpecific_SetBitrate(_bitRate, _inst.maxFramerate);
}
else
{
CodecSpecific_SetBitrate(_bitRate, _inst.maxFramerate);
}
int width = _source->GetWidth();
int half_width = (width + 1) / 2;
int height = _source->GetHeight();
int half_height = (height + 1) / 2;
int size_y = width * height;
int size_uv = half_width * half_height;
while (fread(_sourceBuffer, 1, _lengthSourceFrame, _sourceFile) ==
_lengthSourceFrame)
{
_inputVideoBuffer.CreateFrame(size_y, _sourceBuffer,
size_uv, _sourceBuffer + size_y,
size_uv, _sourceBuffer + size_y +
size_uv,
width, height,
width, half_width, half_width);
_inputVideoBuffer.set_timestamp(static_cast<uint32_t>(9e4 /
static_cast<float>(_inst.maxFramerate)));
ASSERT_EQ(_encoder->Encode(_inputVideoBuffer, NULL, NULL),
WEBRTC_VIDEO_CODEC_OK);
frameLength = WaitForEncodedFrame();
totalBytes += frameLength;
frames++;
_encodedVideoBuffer.SetLength(0);
}
uint32_t actualBitrate = static_cast<uint32_t>(
(totalBytes / frames * _inst.maxFramerate * 8) / 1000);
printf("Target bitrate: %u kbps, actual bitrate: %u kbps\n", _bitRate,
actualBitrate);
// Test for close match over reasonable range.
EXPECT_LT(abs(static_cast<int32_t>(actualBitrate - _bitRate)),
0.12 * _bitRate);
ASSERT_TRUE(feof(_sourceFile) != 0);
rewind(_sourceFile);
}
}
bool
UnitTest::CheckIfBitExact(const void* ptrA, size_t aLengthBytes,
const void* ptrB, size_t bLengthBytes)
{
if (aLengthBytes != bLengthBytes)
{
return false;
}
return memcmp(ptrA, ptrB, aLengthBytes) == 0;
}

View File

@ -1,106 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_UNIT_TEST_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_UNIT_TEST_H_
#include "webrtc/modules/video_coding/codecs/test_framework/test.h"
#include "webrtc/system_wrappers/interface/event_wrapper.h"
// Disable "conditional expression is constant" warnings on the perfectly
// acceptable
// do { ... } while (0) constructions below.
// Refer to http://stackoverflow.com/questions/1946445/
// is-there-better-way-to-write-do-while0-construct-to-avoid-compiler-warnings
// for some discussion of the issue.
#ifdef _WIN32
#pragma warning(disable : 4127)
#endif
class VideoSource;
class UnitTestEncodeCompleteCallback;
class UnitTestDecodeCompleteCallback;
class UnitTest : public CodecTest
{
public:
UnitTest();
virtual ~UnitTest();
virtual void Setup();
virtual void Perform();
virtual void Print();
protected:
UnitTest(std::string name, std::string description);
virtual uint32_t CodecSpecific_SetBitrate(
uint32_t bitRate,
uint32_t /* frameRate */);
virtual void Teardown();
virtual void RateControlTests();
virtual int Decode();
virtual int DecodeWithoutAssert();
virtual int SetCodecSpecificParameters() {return 0;};
virtual bool CheckIfBitExact(const void *ptrA, size_t aLengthBytes,
const void *ptrB, size_t bLengthBytes);
size_t WaitForEncodedFrame() const;
size_t WaitForDecodedFrame() const;
int _tests;
int _errors;
VideoSource* _source;
unsigned char* _refFrame;
unsigned char* _refEncFrame;
unsigned char* _refDecFrame;
size_t _refEncFrameLength;
FILE* _sourceFile;
bool is_key_frame_;
UnitTestEncodeCompleteCallback* _encodeCompleteCallback;
UnitTestDecodeCompleteCallback* _decodeCompleteCallback;
enum { kMaxWaitEncTimeMs = 100 };
enum { kMaxWaitDecTimeMs = 25 };
};
class UnitTestEncodeCompleteCallback : public webrtc::EncodedImageCallback
{
public:
UnitTestEncodeCompleteCallback(webrtc::VideoFrame* buffer,
uint32_t decoderSpecificSize = 0,
void* decoderSpecificInfo = NULL) :
_encodedVideoBuffer(buffer),
_encodeComplete(false) {}
int32_t Encoded(const webrtc::EncodedImage& encodedImage,
const webrtc::CodecSpecificInfo* codecSpecificInfo,
const webrtc::RTPFragmentationHeader* fragmentation);
bool EncodeComplete();
// Note that this only makes sense if an encode has been completed
webrtc::VideoFrameType EncodedFrameType() const;
private:
webrtc::VideoFrame* _encodedVideoBuffer;
bool _encodeComplete;
webrtc::VideoFrameType _encodedFrameType;
};
class UnitTestDecodeCompleteCallback : public webrtc::DecodedImageCallback
{
public:
UnitTestDecodeCompleteCallback(webrtc::I420VideoFrame* buffer) :
_decodedVideoBuffer(buffer), _decodeComplete(false) {}
int32_t Decoded(webrtc::I420VideoFrame& image);
bool DecodeComplete();
private:
webrtc::I420VideoFrame* _decodedVideoBuffer;
bool _decodeComplete;
};
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_UNIT_TEST_H_

View File

@ -1,425 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/video_coding/codecs/test_framework/video_source.h"
#include <stdio.h>
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/test/testsupport/fileutils.h"
VideoSource::VideoSource()
:
_fileName(webrtc::test::ProjectRootPath() + "resources/foreman_cif.yuv"),
_width(352),
_height(288),
_type(webrtc::kI420),
_frameRate(30)
{
}
VideoSource::VideoSource(std::string fileName, VideoSize size,
int frameRate /*= 30*/, webrtc::VideoType type /*= webrtc::kI420*/)
:
_fileName(fileName),
_type(type),
_frameRate(frameRate)
{
assert(size != kUndefined && size != kNumberOfVideoSizes);
assert(type != webrtc::kUnknown);
assert(frameRate > 0);
if (GetWidthHeight(size, _width, _height) != 0) {
assert(false);
}
}
VideoSource::VideoSource(std::string fileName, int width, int height,
int frameRate /*= 30*/, webrtc::VideoType type /*= webrtc::kI420*/)
:
_fileName(fileName),
_width(width),
_height(height),
_type(type),
_frameRate(frameRate)
{
assert(width > 0);
assert(height > 0);
assert(type != webrtc::kUnknown);
assert(frameRate > 0);
}
VideoSize
VideoSource::GetSize() const
{
return GetSize(_width, _height);
}
VideoSize
VideoSource::GetSize(uint16_t width, uint16_t height)
{
if(width == 128 && height == 96)
{
return kSQCIF;
}else if(width == 160 && height == 120)
{
return kQQVGA;
}else if(width == 176 && height == 144)
{
return kQCIF;
}else if(width == 320 && height == 240)
{
return kQVGA;
}else if(width == 352 && height == 288)
{
return kCIF;
}else if(width == 640 && height == 480)
{
return kVGA;
}else if(width == 720 && height == 480)
{
return kNTSC;
}else if(width == 704 && height == 576)
{
return k4CIF;
}else if(width == 800 && height == 600)
{
return kSVGA;
}else if(width == 960 && height == 720)
{
return kHD;
}else if(width == 1024 && height == 768)
{
return kXGA;
}else if(width == 1440 && height == 1080)
{
return kFullHD;
}else if(width == 400 && height == 240)
{
return kWQVGA;
}else if(width == 800 && height == 480)
{
return kWVGA;
}else if(width == 1280 && height == 720)
{
return kWHD;
}else if(width == 1920 && height == 1080)
{
return kWFullHD;
}
return kUndefined;
}
size_t
VideoSource::GetFrameLength() const
{
return webrtc::CalcBufferSize(_type, _width, _height);
}
const char*
VideoSource::GetMySizeString() const
{
return VideoSource::GetSizeString(GetSize());
}
const char*
VideoSource::GetSizeString(VideoSize size)
{
switch (size)
{
case kSQCIF:
return "SQCIF";
case kQQVGA:
return "QQVGA";
case kQCIF:
return "QCIF";
case kQVGA:
return "QVGA";
case kCIF:
return "CIF";
case kVGA:
return "VGA";
case kNTSC:
return "NTSC";
case k4CIF:
return "4CIF";
case kSVGA:
return "SVGA";
case kHD:
return "HD";
case kXGA:
return "XGA";
case kFullHD:
return "Full_HD";
case kWQVGA:
return "WQVGA";
case kWHD:
return "WHD";
case kWFullHD:
return "WFull_HD";
default:
return "Undefined";
}
}
std::string
VideoSource::GetFilePath() const
{
size_t slashPos = _fileName.find_last_of("/\\");
if (slashPos == std::string::npos)
{
return ".";
}
return _fileName.substr(0, slashPos);
}
std::string
VideoSource::GetName() const
{
// Remove path.
size_t slashPos = _fileName.find_last_of("/\\");
if (slashPos == std::string::npos)
{
slashPos = 0;
}
else
{
slashPos++;
}
// Remove extension and underscored suffix if it exists.
return _fileName.substr(slashPos, std::min(_fileName.find_last_of("_"),
_fileName.find_last_of(".")) - slashPos);
}
void
VideoSource::Convert(const VideoSource &target, bool force /* = false */) const
{
// Ensure target rate is less than or equal to source
// (i.e. we are only temporally downsampling).
ASSERT_TRUE(target.GetFrameRate() <= _frameRate);
// Only supports YUV420 currently.
ASSERT_TRUE(_type == webrtc::kI420 && target.GetType() == webrtc::kI420);
if (!force && (FileExists(target.GetFileName().c_str()) ||
(target.GetWidth() == _width && target.GetHeight() == _height && target.GetFrameRate() == _frameRate)))
{
// Assume that the filename uniquely defines the content.
// If the file already exists, it is the correct file.
return;
}
FILE *inFile = NULL;
FILE *outFile = NULL;
inFile = fopen(_fileName.c_str(), "rb");
ASSERT_TRUE(inFile != NULL);
outFile = fopen(target.GetFileName().c_str(), "wb");
ASSERT_TRUE(outFile != NULL);
FrameDropper fd;
fd.SetFrameRate(target.GetFrameRate(), _frameRate);
const size_t lengthOutFrame = webrtc::CalcBufferSize(target.GetType(),
target.GetWidth(), target.GetHeight());
ASSERT_TRUE(lengthOutFrame > 0);
unsigned char *outFrame = new unsigned char[lengthOutFrame];
const size_t lengthInFrame = webrtc::CalcBufferSize(_type, _width, _height);
ASSERT_TRUE(lengthInFrame > 0);
unsigned char *inFrame = new unsigned char[lengthInFrame];
while (fread(inFrame, 1, lengthInFrame, inFile) == lengthInFrame)
{
if (!fd.DropFrame())
{
ASSERT_TRUE(target.GetWidth() == _width &&
target.GetHeight() == _height);
// Add video interpolator here!
if (fwrite(outFrame, 1, lengthOutFrame,
outFile) != lengthOutFrame) {
return;
}
}
}
delete inFrame;
delete outFrame;
fclose(inFile);
fclose(outFile);
}
bool VideoSource::FileExists(const char* fileName)
{
FILE* fp = NULL;
fp = fopen(fileName, "rb");
if(fp != NULL)
{
fclose(fp);
return true;
}
return false;
}
int
VideoSource::GetWidthHeight( VideoSize size, int & width, int& height)
{
switch(size)
{
case kSQCIF:
width = 128;
height = 96;
return 0;
case kQQVGA:
width = 160;
height = 120;
return 0;
case kQCIF:
width = 176;
height = 144;
return 0;
case kCGA:
width = 320;
height = 200;
return 0;
case kQVGA:
width = 320;
height = 240;
return 0;
case kSIF:
width = 352;
height = 240;
return 0;
case kWQVGA:
width = 400;
height = 240;
return 0;
case kCIF:
width = 352;
height = 288;
return 0;
case kW288p:
width = 512;
height = 288;
return 0;
case k448p:
width = 576;
height = 448;
return 0;
case kVGA:
width = 640;
height = 480;
return 0;
case k432p:
width = 720;
height = 432;
return 0;
case kW432p:
width = 768;
height = 432;
return 0;
case k4SIF:
width = 704;
height = 480;
return 0;
case kW448p:
width = 768;
height = 448;
return 0;
case kNTSC:
width = 720;
height = 480;
return 0;
case kFW448p:
width = 800;
height = 448;
return 0;
case kWVGA:
width = 800;
height = 480;
return 0;
case k4CIF:
width = 704;
height = 576;
return 0;
case kSVGA:
width = 800;
height = 600;
return 0;
case kW544p:
width = 960;
height = 544;
return 0;
case kW576p:
width = 1024;
height = 576;
return 0;
case kHD:
width = 960;
height = 720;
return 0;
case kXGA:
width = 1024;
height = 768;
return 0;
case kFullHD:
width = 1440;
height = 1080;
return 0;
case kWHD:
width = 1280;
height = 720;
return 0;
case kWFullHD:
width = 1920;
height = 1080;
return 0;
default:
return -1;
}
}
FrameDropper::FrameDropper()
:
_dropsBetweenRenders(0),
_frameCounter(0)
{
}
bool
FrameDropper::DropFrame()
{
_frameCounter++;
if (_frameCounter > _dropsBetweenRenders)
{
_frameCounter = 0;
return false;
}
return true;
}
unsigned int
FrameDropper::DropsBetweenRenders()
{
return _dropsBetweenRenders;
}
void
FrameDropper::SetFrameRate(double frameRate, double maxFrameRate)
{
if (frameRate >= 1.0)
{
_dropsBetweenRenders = static_cast<unsigned int>(maxFrameRate / frameRate + 0.5) - 1;
}
else
{
_dropsBetweenRenders = 0;
}
}

View File

@ -1,108 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_VIDEO_SOURCE_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_VIDEO_SOURCE_H_
#include <string>
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
enum VideoSize
{
kUndefined,
kSQCIF, // 128*96 = 12 288
kQQVGA, // 160*120 = 19 200
kQCIF, // 176*144 = 25 344
kCGA, // 320*200 = 64 000
kQVGA, // 320*240 = 76 800
kSIF, // 352*240 = 84 480
kWQVGA, // 400*240 = 96 000
kCIF, // 352*288 = 101 376
kW288p, // 512*288 = 147 456 (WCIF)
k448p, // 576*448 = 281 088
kVGA, // 640*480 = 307 200
k432p, // 720*432 = 311 040
kW432p, // 768*432 = 331 776
k4SIF, // 704*480 = 337 920
kW448p, // 768*448 = 344 064
kNTSC, // 720*480 = 345 600
kFW448p, // 800*448 = 358 400
kWVGA, // 800*480 = 384 000
k4CIF, // 704<30>576 = 405 504
kSVGA, // 800*600 = 480 000
kW544p, // 960*544 = 522 240
kW576p, // 1024*576 = 589 824 (W4CIF)
kHD, // 960*720 = 691 200
kXGA, // 1024*768 = 786 432
kWHD, // 1280*720 = 921 600
kFullHD, // 1440*1080 = 1 555 200
kWFullHD, // 1920*1080 = 2 073 600
kNumberOfVideoSizes
};
class VideoSource
{
public:
VideoSource();
VideoSource(std::string fileName, VideoSize size, int frameRate = 30,
webrtc::VideoType type = webrtc::kI420);
VideoSource(std::string fileName, int width, int height, int frameRate = 30,
webrtc::VideoType type = webrtc::kI420);
std::string GetFileName() const { return _fileName; }
int GetWidth() const { return _width; }
int GetHeight() const { return _height; }
webrtc::VideoType GetType() const { return _type; }
int GetFrameRate() const { return _frameRate; }
// Returns the file path without a trailing slash.
std::string GetFilePath() const;
// Returns the filename with the path (including the leading slash) removed.
std::string GetName() const;
VideoSize GetSize() const;
static VideoSize GetSize(uint16_t width, uint16_t height);
size_t GetFrameLength() const;
// Returns a human-readable size string.
static const char* GetSizeString(VideoSize size);
const char* GetMySizeString() const;
// Opens the video source, converting and writing to the specified target.
// If force is true, the conversion will be done even if the target file
// already exists.
void Convert(const VideoSource& target, bool force = false) const;
static bool FileExists(const char* fileName);
private:
static int GetWidthHeight( VideoSize size, int& width, int& height);
std::string _fileName;
int _width;
int _height;
webrtc::VideoType _type;
int _frameRate;
};
class FrameDropper
{
public:
FrameDropper();
bool DropFrame();
unsigned int DropsBetweenRenders();
void SetFrameRate(double frameRate, double maxFrameRate);
private:
unsigned int _dropsBetweenRenders;
unsigned int _frameCounter;
};
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_VIDEO_SOURCE_H_

View File

@ -8,9 +8,10 @@
* be found in the AUTHORS file in the root of the source tree. * be found in the AUTHORS file in the root of the source tree.
*/ */
#include <stdio.h>
#include "testing/gtest/include/gtest/gtest.h" #include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/modules/video_coding/codecs/test_framework/unit_test.h" #include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
#include "webrtc/modules/video_coding/codecs/test_framework/video_source.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h" #include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h" #include "webrtc/system_wrappers/interface/scoped_ptr.h"
#include "webrtc/system_wrappers/interface/tick_util.h" #include "webrtc/system_wrappers/interface/tick_util.h"
@ -28,37 +29,40 @@ static const int64_t kTestNtpTimeMs = 456;
// TODO(mikhal): Replace these with mocks. // TODO(mikhal): Replace these with mocks.
class Vp8UnitTestEncodeCompleteCallback : public webrtc::EncodedImageCallback { class Vp8UnitTestEncodeCompleteCallback : public webrtc::EncodedImageCallback {
public: public:
Vp8UnitTestEncodeCompleteCallback(VideoFrame* frame, Vp8UnitTestEncodeCompleteCallback(EncodedImage* frame,
unsigned int decoderSpecificSize, unsigned int decoderSpecificSize,
void* decoderSpecificInfo) void* decoderSpecificInfo)
: encoded_video_frame_(frame), : encoded_frame_(frame), encode_complete_(false) {}
encode_complete_(false) {}
int Encoded(const EncodedImage& encodedImage, virtual int Encoded(const EncodedImage& encoded_frame_,
const CodecSpecificInfo* codecSpecificInfo, const CodecSpecificInfo* codecSpecificInfo,
const RTPFragmentationHeader*); const RTPFragmentationHeader*);
bool EncodeComplete(); bool EncodeComplete();
// Note that this only makes sense if an encode has been completed
VideoFrameType EncodedFrameType() const {return encoded_frame_type_;}
private: private:
VideoFrame* encoded_video_frame_; EncodedImage* const encoded_frame_;
scoped_ptr<uint8_t[]> frame_buffer_;
bool encode_complete_; bool encode_complete_;
VideoFrameType encoded_frame_type_;
}; };
int Vp8UnitTestEncodeCompleteCallback::Encoded(const EncodedImage& encodedImage, int Vp8UnitTestEncodeCompleteCallback::Encoded(
const EncodedImage& encoded_frame,
const CodecSpecificInfo* codecSpecificInfo, const CodecSpecificInfo* codecSpecificInfo,
const RTPFragmentationHeader* fragmentation) { const RTPFragmentationHeader* fragmentation) {
encoded_video_frame_->VerifyAndAllocate(encodedImage._size); if (encoded_frame_->_size < encoded_frame._length) {
encoded_video_frame_->CopyFrame(encodedImage._size, encodedImage._buffer); delete[] encoded_frame_->_buffer;
encoded_video_frame_->SetLength(encodedImage._length); frame_buffer_.reset(new uint8_t[encoded_frame._length]);
// TODO(mikhal): Update frame type API. encoded_frame_->_buffer = frame_buffer_.get();
// encoded_video_frame_->SetFrameType(encodedImage._frameType); encoded_frame_->_size = encoded_frame._length;
encoded_video_frame_->SetWidth(encodedImage._encodedWidth); }
encoded_video_frame_->SetHeight(encodedImage._encodedHeight); memcpy(encoded_frame_->_buffer, encoded_frame._buffer, encoded_frame._length);
encoded_video_frame_->SetTimeStamp(encodedImage._timeStamp); encoded_frame_->_length = encoded_frame._length;
encoded_frame_->_encodedWidth = encoded_frame._encodedWidth;
encoded_frame_->_encodedHeight = encoded_frame._encodedHeight;
encoded_frame_->_timeStamp = encoded_frame._timeStamp;
encoded_frame_->_frameType = encoded_frame._frameType;
encoded_frame_->_completeFrame = encoded_frame._completeFrame;
encode_complete_ = true; encode_complete_ = true;
encoded_frame_type_ = encodedImage._frameType;
return 0; return 0;
} }
@ -73,12 +77,12 @@ bool Vp8UnitTestEncodeCompleteCallback::EncodeComplete() {
class Vp8UnitTestDecodeCompleteCallback : public webrtc::DecodedImageCallback { class Vp8UnitTestDecodeCompleteCallback : public webrtc::DecodedImageCallback {
public: public:
explicit Vp8UnitTestDecodeCompleteCallback(I420VideoFrame* frame) explicit Vp8UnitTestDecodeCompleteCallback(I420VideoFrame* frame)
: decoded_video_frame_(frame), : decoded_frame_(frame), decode_complete(false) {}
decode_complete(false) {}
int Decoded(webrtc::I420VideoFrame& frame); int Decoded(webrtc::I420VideoFrame& frame);
bool DecodeComplete(); bool DecodeComplete();
private: private:
I420VideoFrame* decoded_video_frame_; I420VideoFrame* decoded_frame_;
bool decode_complete; bool decode_complete;
}; };
@ -91,7 +95,7 @@ bool Vp8UnitTestDecodeCompleteCallback::DecodeComplete() {
} }
int Vp8UnitTestDecodeCompleteCallback::Decoded(I420VideoFrame& image) { int Vp8UnitTestDecodeCompleteCallback::Decoded(I420VideoFrame& image) {
decoded_video_frame_->CopyFrame(image); decoded_frame_->CopyFrame(image);
decode_complete = true; decode_complete = true;
return 0; return 0;
} }
@ -102,25 +106,26 @@ class TestVp8Impl : public ::testing::Test {
encoder_.reset(VP8Encoder::Create()); encoder_.reset(VP8Encoder::Create());
decoder_.reset(VP8Decoder::Create()); decoder_.reset(VP8Decoder::Create());
memset(&codec_inst_, 0, sizeof(codec_inst_)); memset(&codec_inst_, 0, sizeof(codec_inst_));
encode_complete_callback_.reset(new encode_complete_callback_.reset(
Vp8UnitTestEncodeCompleteCallback(&encoded_video_frame_, 0, NULL)); new Vp8UnitTestEncodeCompleteCallback(&encoded_frame_, 0, NULL));
decode_complete_callback_.reset(new decode_complete_callback_.reset(
Vp8UnitTestDecodeCompleteCallback(&decoded_video_frame_)); new Vp8UnitTestDecodeCompleteCallback(&decoded_frame_));
encoder_->RegisterEncodeCompleteCallback(encode_complete_callback_.get()); encoder_->RegisterEncodeCompleteCallback(encode_complete_callback_.get());
decoder_->RegisterDecodeCompleteCallback(decode_complete_callback_.get()); decoder_->RegisterDecodeCompleteCallback(decode_complete_callback_.get());
// Using a QCIF image (aligned stride (u,v planes) > width). // Using a QCIF image (aligned stride (u,v planes) > width).
// Processing only one frame. // Processing only one frame.
const VideoSource source(test::ResourcePath("paris_qcif", "yuv"), kQCIF); length_source_frame_ = CalcBufferSize(kI420, kWidth, kHeight);
length_source_frame_ = source.GetFrameLength();
source_buffer_.reset(new uint8_t[length_source_frame_]); source_buffer_.reset(new uint8_t[length_source_frame_]);
source_file_ = fopen(source.GetFileName().c_str(), "rb"); source_file_ = fopen(test::ResourcePath("paris_qcif", "yuv").c_str(), "rb");
ASSERT_TRUE(source_file_ != NULL); ASSERT_TRUE(source_file_ != NULL);
// Set input frame. // Set input frame.
ASSERT_EQ(fread(source_buffer_.get(), 1, length_source_frame_, ASSERT_EQ(
source_file_), length_source_frame_); fread(source_buffer_.get(), 1, length_source_frame_, source_file_),
codec_inst_.width = source.GetWidth(); length_source_frame_);
codec_inst_.height = source.GetHeight(); codec_inst_.width = kWidth;
codec_inst_.maxFramerate = source.GetFrameRate(); codec_inst_.height = kHeight;
const int kFramerate = 30;
codec_inst_.maxFramerate = kFramerate;
// Setting aligned stride values. // Setting aligned stride values.
int stride_uv = 0; int stride_uv = 0;
int stride_y = 0; int stride_y = 0;
@ -132,9 +137,9 @@ class TestVp8Impl : public ::testing::Test {
stride_y, stride_uv, stride_uv); stride_y, stride_uv, stride_uv);
input_frame_.set_timestamp(kTestTimestamp); input_frame_.set_timestamp(kTestTimestamp);
// Using ConvertToI420 to add stride to the image. // Using ConvertToI420 to add stride to the image.
EXPECT_EQ(0, ConvertToI420(kI420, source_buffer_.get(), 0, 0, EXPECT_EQ(
codec_inst_.width, codec_inst_.height, 0, ConvertToI420(kI420, source_buffer_.get(), 0, 0, codec_inst_.width,
0, kRotateNone, &input_frame_)); codec_inst_.height, 0, kRotateNone, &input_frame_));
} }
void SetUpEncodeDecode() { void SetUpEncodeDecode() {
@ -152,7 +157,7 @@ class TestVp8Impl : public ::testing::Test {
int64_t startTime = TickTime::MillisecondTimestamp(); int64_t startTime = TickTime::MillisecondTimestamp();
while (TickTime::MillisecondTimestamp() - startTime < kMaxWaitEncTimeMs) { while (TickTime::MillisecondTimestamp() - startTime < kMaxWaitEncTimeMs) {
if (encode_complete_callback_->EncodeComplete()) { if (encode_complete_callback_->EncodeComplete()) {
return encoded_video_frame_.Length(); return encoded_frame_._length;
} }
} }
return 0; return 0;
@ -162,22 +167,15 @@ class TestVp8Impl : public ::testing::Test {
int64_t startTime = TickTime::MillisecondTimestamp(); int64_t startTime = TickTime::MillisecondTimestamp();
while (TickTime::MillisecondTimestamp() - startTime < kMaxWaitDecTimeMs) { while (TickTime::MillisecondTimestamp() - startTime < kMaxWaitDecTimeMs) {
if (decode_complete_callback_->DecodeComplete()) { if (decode_complete_callback_->DecodeComplete()) {
return CalcBufferSize(kI420, decoded_video_frame_.width(), return CalcBufferSize(kI420, decoded_frame_.width(),
decoded_video_frame_.height()); decoded_frame_.height());
} }
} }
return 0; return 0;
} }
void VideoFrameToEncodedImage(VideoFrame& frame, EncodedImage &image) { const int kWidth = 172;
image._buffer = frame.Buffer(); const int kHeight = 144;
image._length = frame.Length();
image._size = frame.Size();
image._timeStamp = frame.TimeStamp();
image._encodedWidth = frame.Width();
image._encodedHeight = frame.Height();
image._completeFrame = true;
}
scoped_ptr<Vp8UnitTestEncodeCompleteCallback> encode_complete_callback_; scoped_ptr<Vp8UnitTestEncodeCompleteCallback> encode_complete_callback_;
scoped_ptr<Vp8UnitTestDecodeCompleteCallback> decode_complete_callback_; scoped_ptr<Vp8UnitTestDecodeCompleteCallback> decode_complete_callback_;
@ -186,29 +184,12 @@ class TestVp8Impl : public ::testing::Test {
I420VideoFrame input_frame_; I420VideoFrame input_frame_;
scoped_ptr<VideoEncoder> encoder_; scoped_ptr<VideoEncoder> encoder_;
scoped_ptr<VideoDecoder> decoder_; scoped_ptr<VideoDecoder> decoder_;
VideoFrame encoded_video_frame_; EncodedImage encoded_frame_;
I420VideoFrame decoded_video_frame_; I420VideoFrame decoded_frame_;
size_t length_source_frame_; size_t length_source_frame_;
VideoCodec codec_inst_; VideoCodec codec_inst_;
}; };
// Disabled on MemorySanitizer as it's breaking on generic libvpx.
// https://code.google.com/p/webrtc/issues/detail?id=3904
#if defined(MEMORY_SANITIZER)
TEST_F(TestVp8Impl, DISABLED_BaseUnitTest) {
#else
TEST_F(TestVp8Impl, DISABLED_ON_ANDROID(BaseUnitTest)) {
#endif
// TODO(mikhal): Remove dependency. Move all test code here.
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
UnitTest unittest;
unittest.SetEncoder(encoder_.get());
unittest.SetDecoder(decoder_.get());
unittest.Setup();
unittest.Perform();
unittest.Print();
}
TEST_F(TestVp8Impl, EncoderParameterTest) { TEST_F(TestVp8Impl, EncoderParameterTest) {
strncpy(codec_inst_.plName, "VP8", 31); strncpy(codec_inst_.plName, "VP8", 31);
codec_inst_.plType = 126; codec_inst_.plType = 126;
@ -227,8 +208,7 @@ TEST_F(TestVp8Impl, EncoderParameterTest) {
EXPECT_EQ(WEBRTC_VIDEO_CODEC_UNINITIALIZED, EXPECT_EQ(WEBRTC_VIDEO_CODEC_UNINITIALIZED,
encoder_->SetRates(bit_rate, codec_inst_.maxFramerate)); encoder_->SetRates(bit_rate, codec_inst_.maxFramerate));
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->InitEncode(&codec_inst_, 1, 1440));
encoder_->InitEncode(&codec_inst_, 1, 1440));
// Decoder parameter tests. // Decoder parameter tests.
// Calls before InitDecode(). // Calls before InitDecode().
@ -240,39 +220,54 @@ TEST_F(TestVp8Impl, DISABLED_ON_ANDROID(AlignedStrideEncodeDecode)) {
SetUpEncodeDecode(); SetUpEncodeDecode();
encoder_->Encode(input_frame_, NULL, NULL); encoder_->Encode(input_frame_, NULL, NULL);
EXPECT_GT(WaitForEncodedFrame(), 0u); EXPECT_GT(WaitForEncodedFrame(), 0u);
EncodedImage encodedImage;
VideoFrameToEncodedImage(encoded_video_frame_, encodedImage);
// First frame should be a key frame. // First frame should be a key frame.
encodedImage._frameType = kKeyFrame; encoded_frame_._frameType = kKeyFrame;
encodedImage.ntp_time_ms_ = kTestNtpTimeMs; encoded_frame_.ntp_time_ms_ = kTestNtpTimeMs;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encodedImage, false, NULL)); EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
decoder_->Decode(encoded_frame_, false, NULL));
EXPECT_GT(WaitForDecodedFrame(), 0u); EXPECT_GT(WaitForDecodedFrame(), 0u);
// Compute PSNR on all planes (faster than SSIM). // Compute PSNR on all planes (faster than SSIM).
EXPECT_GT(I420PSNR(&input_frame_, &decoded_video_frame_), 36); EXPECT_GT(I420PSNR(&input_frame_, &decoded_frame_), 36);
EXPECT_EQ(kTestTimestamp, decoded_video_frame_.timestamp()); EXPECT_EQ(kTestTimestamp, decoded_frame_.timestamp());
EXPECT_EQ(kTestNtpTimeMs, decoded_video_frame_.ntp_time_ms()); EXPECT_EQ(kTestNtpTimeMs, decoded_frame_.ntp_time_ms());
} }
TEST_F(TestVp8Impl, DISABLED_ON_ANDROID(DecodeWithACompleteKeyFrame)) { TEST_F(TestVp8Impl, DISABLED_ON_ANDROID(DecodeWithACompleteKeyFrame)) {
SetUpEncodeDecode(); SetUpEncodeDecode();
encoder_->Encode(input_frame_, NULL, NULL); encoder_->Encode(input_frame_, NULL, NULL);
EXPECT_GT(WaitForEncodedFrame(), 0u); EXPECT_GT(WaitForEncodedFrame(), 0u);
EncodedImage encodedImage;
VideoFrameToEncodedImage(encoded_video_frame_, encodedImage);
// Setting complete to false -> should return an error. // Setting complete to false -> should return an error.
encodedImage._completeFrame = false; encoded_frame_._completeFrame = false;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERROR, EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERROR,
decoder_->Decode(encodedImage, false, NULL)); decoder_->Decode(encoded_frame_, false, NULL));
// Setting complete back to true. Forcing a delta frame. // Setting complete back to true. Forcing a delta frame.
encodedImage._frameType = kDeltaFrame; encoded_frame_._frameType = kDeltaFrame;
encodedImage._completeFrame = true; encoded_frame_._completeFrame = true;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERROR, EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERROR,
decoder_->Decode(encodedImage, false, NULL)); decoder_->Decode(encoded_frame_, false, NULL));
// Now setting a key frame. // Now setting a key frame.
encodedImage._frameType = kKeyFrame; encoded_frame_._frameType = kKeyFrame;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
decoder_->Decode(encodedImage, false, NULL)); decoder_->Decode(encoded_frame_, false, NULL));
EXPECT_GT(I420PSNR(&input_frame_, &decoded_video_frame_), 36); EXPECT_GT(I420PSNR(&input_frame_, &decoded_frame_), 36);
}
TEST_F(TestVp8Impl, TestReset) {
SetUpEncodeDecode();
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
EXPECT_EQ(0, decoder_->Decode(encoded_frame_, false, NULL));
size_t length = CalcBufferSize(kI420, kWidth, kHeight);
scoped_ptr<uint8_t[]> first_frame_buffer(new uint8_t[length]);
ExtractBuffer(decoded_frame_, length, first_frame_buffer.get());
EXPECT_EQ(0, decoder_->Reset());
EXPECT_EQ(0, decoder_->Decode(encoded_frame_, false, NULL));
scoped_ptr<uint8_t[]> second_frame_buffer(new uint8_t[length]);
ExtractBuffer(decoded_frame_, length, second_frame_buffer.get());
EXPECT_EQ(
0, memcmp(second_frame_buffer.get(), first_frame_buffer.get(), length));
} }
} // namespace webrtc } // namespace webrtc