Remove avi recorder and corresponding enable_video flags.

R=mflodman@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/42099004

Cr-Commit-Position: refs/heads/master@{#8554}
git-svn-id: http://webrtc.googlecode.com/svn/trunk@8554 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
andresp@webrtc.org 2015-03-02 13:07:02 +00:00
parent f56c162310
commit e8f50df6b9
28 changed files with 61 additions and 4212 deletions

View File

@ -83,10 +83,6 @@ config("common_config") {
all_dependent_configs = [ "dbus-glib" ]
}
if (rtc_enable_video) {
defines += [ "WEBRTC_MODULE_UTILITY_VIDEO" ]
}
if (build_with_chromium) {
defines += [ "LOGGING_INSIDE_WEBRTC" ]
} else {

View File

@ -73,11 +73,6 @@
# Remote bitrate estimator logging/plotting.
'enable_bwe_test_logging%': 0,
# Adds video support to dependencies shared by voice and video engine.
# This should normally be enabled; the intended use is to disable only
# when building voice engine exclusively.
'enable_video%': 1,
# Selects fixed-point code where possible.
'prefer_fixed_point%': 0,
@ -188,9 +183,6 @@
['rtc_relative_path==1', {
'defines': ['EXPAT_RELATIVE_PATH',],
}],
['enable_video==1', {
'defines': ['WEBRTC_MODULE_UTILITY_VIDEO',],
}],
['build_with_chromium==1', {
'defines': [
# Changes settings for Chromium build.

View File

@ -26,11 +26,6 @@ declare_args() {
# library that comes with WebRTC (i.e. rtc_build_ssl == 0).
rtc_ssl_root = ""
# Adds video support to dependencies shared by voice and video engine.
# This should normally be enabled; the intended use is to disable only
# when building voice engine exclusively.
rtc_enable_video = true
# Selects fixed-point code where possible.
rtc_prefer_fixed_point = false

View File

@ -137,7 +137,6 @@ enum FileFormats
{
kFileFormatWavFile = 1,
kFileFormatCompressedFile = 2,
kFileFormatAviFile = 3,
kFileFormatPreencodedFile = 4,
kFileFormatPcm16kHzFile = 7,
kFileFormatPcm8kHzFile = 8,

View File

@ -22,7 +22,6 @@ const uint32_t kPulsePeriodMs = 1000;
const uint32_t kMaxBufferSizeBytes = 3840; // 10ms in stereo @ 96kHz
class AudioDeviceObserver;
class MediaFile;
class AudioDeviceBuffer
{

View File

@ -16,8 +16,6 @@ source_set("media_file") {
sources = [
"interface/media_file.h",
"interface/media_file_defines.h",
"source/avi_file.cc",
"source/avi_file.h",
"source/media_file_impl.cc",
"source/media_file_impl.h",
"source/media_file_utility.cc",

View File

@ -38,14 +38,6 @@ public:
int8_t* audioBuffer,
size_t& dataLengthInBytes) = 0;
// Put one video frame into videoBuffer. dataLengthInBytes is both an input
// and output parameter. As input parameter it indicates the size of
// videoBuffer. As output parameter it indicates the number of bytes written
// to videoBuffer.
virtual int32_t PlayoutAVIVideoData(
int8_t* videoBuffer,
size_t& dataLengthInBytes) = 0;
// Put 10-60ms, depending on codec frame size, of audio data from file into
// audioBufferLeft and audioBufferRight. The buffers contain the left and
// right channel of played out stereo audio.
@ -82,16 +74,6 @@ public:
const uint32_t startPointMs = 0,
const uint32_t stopPointMs = 0) = 0;
// Open the file specified by fileName for reading (relative path is
// allowed). If loop is true the file will be played until StopPlaying() is
// called. When end of file is reached the file is read from the start.
// format specifies the type of file fileName refers to. Only video will be
// read if videoOnly is true.
virtual int32_t StartPlayingVideoFile(const char* fileName,
const bool loop,
bool videoOnly,
const FileFormats format) = 0;
// Prepare for playing audio from stream.
// FileCallback::PlayNotification(..) will be called after
// notificationTimeMs of the file has been played if notificationTimeMs is
@ -130,16 +112,6 @@ public:
const int8_t* audioBuffer,
const size_t bufferLength) = 0;
// Write one video frame, i.e. the bufferLength first bytes of videoBuffer,
// to file.
// Note: videoBuffer can contain encoded data. The codec used must be the
// same as what was specified by videoCodecInst for the last successfull
// StartRecordingVideoFile(..) call. The videoBuffer must contain exactly
// one video frame.
virtual int32_t IncomingAVIVideoData(
const int8_t* videoBuffer,
const size_t bufferLength) = 0;
// Open/creates file specified by fileName for writing (relative path is
// allowed). FileCallback::RecordNotification(..) will be called after
// notificationTimeMs of audio data has been recorded if
@ -157,18 +129,6 @@ public:
const uint32_t notificationTimeMs = 0,
const uint32_t maxSizeBytes = 0) = 0;
// Open/create the file specified by fileName for writing audio/video data
// (relative path is allowed). format specifies the type of file fileName
// should be. codecInst specifies the encoding of the audio data.
// videoCodecInst specifies the encoding of the video data. Only video data
// will be recorded if videoOnly is true.
virtual int32_t StartRecordingVideoFile(
const char* fileName,
const FileFormats format,
const CodecInst& codecInst,
const VideoCodec& videoCodecInst,
bool videoOnly = false) = 0;
// Prepare for recording audio to stream.
// FileCallback::RecordNotification(..) will be called after
// notificationTimeMs of audio data has been recorded if
@ -212,10 +172,6 @@ public:
// reading or writing.
virtual int32_t codec_info(CodecInst& codecInst) const = 0;
// Update videoCodecInst according to the current video codec being used for
// reading or writing.
virtual int32_t VideoCodecInst(VideoCodec& videoCodecInst) const = 0;
protected:
MediaFile() {}
virtual ~MediaFile() {}

View File

@ -19,8 +19,6 @@
'sources': [
'interface/media_file.h',
'interface/media_file_defines.h',
'source/avi_file.cc',
'source/avi_file.h',
'source/media_file_impl.cc',
'source/media_file_impl.h',
'source/media_file_utility.cc',

File diff suppressed because it is too large Load Diff

View File

@ -1,277 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// Class for reading (x)or writing to an AVI file.
// Note: the class cannot be used for reading and writing at the same time.
#ifndef WEBRTC_MODULES_MEDIA_FILE_SOURCE_AVI_FILE_H_
#define WEBRTC_MODULES_MEDIA_FILE_SOURCE_AVI_FILE_H_
#include <stdio.h>
#include <list>
#include "webrtc/typedefs.h"
namespace webrtc {
class CriticalSectionWrapper;
struct AVISTREAMHEADER
{
AVISTREAMHEADER();
uint32_t fcc;
uint32_t cb;
uint32_t fccType;
uint32_t fccHandler;
uint32_t dwFlags;
uint16_t wPriority;
uint16_t wLanguage;
uint32_t dwInitialFrames;
uint32_t dwScale;
uint32_t dwRate;
uint32_t dwStart;
uint32_t dwLength;
uint32_t dwSuggestedBufferSize;
uint32_t dwQuality;
uint32_t dwSampleSize;
struct
{
int16_t left;
int16_t top;
int16_t right;
int16_t bottom;
} rcFrame;
};
struct BITMAPINFOHEADER
{
BITMAPINFOHEADER();
uint32_t biSize;
uint32_t biWidth;
uint32_t biHeight;
uint16_t biPlanes;
uint16_t biBitCount;
uint32_t biCompression;
uint32_t biSizeImage;
uint32_t biXPelsPerMeter;
uint32_t biYPelsPerMeter;
uint32_t biClrUsed;
uint32_t biClrImportant;
};
struct WAVEFORMATEX
{
WAVEFORMATEX();
uint16_t wFormatTag;
uint16_t nChannels;
uint32_t nSamplesPerSec;
uint32_t nAvgBytesPerSec;
uint16_t nBlockAlign;
uint16_t wBitsPerSample;
uint16_t cbSize;
};
class AviFile
{
public:
enum AVIStreamType
{
AVI_AUDIO = 0,
AVI_VIDEO = 1
};
// Unsigned, for comparison with must-be-unsigned types.
static const unsigned int CODEC_CONFIG_LENGTH = 64;
static const unsigned int STREAM_NAME_LENGTH = 32;
AviFile();
~AviFile();
int32_t Open(AVIStreamType streamType, const char* fileName,
bool loop = false);
int32_t CreateVideoStream(const AVISTREAMHEADER& videoStreamHeader,
const BITMAPINFOHEADER& bitMapInfoHeader,
const uint8_t* codecConfigParams,
int32_t codecConfigParamsLength);
int32_t CreateAudioStream(const AVISTREAMHEADER& audioStreamHeader,
const WAVEFORMATEX& waveFormatHeader);
int32_t Create(const char* fileName);
int32_t WriteAudio(const uint8_t* data, size_t length);
int32_t WriteVideo(const uint8_t* data, size_t length);
int32_t GetVideoStreamInfo(AVISTREAMHEADER& videoStreamHeader,
BITMAPINFOHEADER& bitmapInfo,
char* codecConfigParameters,
int32_t& configLength);
int32_t GetDuration(int32_t& durationMs);
int32_t GetAudioStreamInfo(WAVEFORMATEX& waveHeader);
int32_t ReadAudio(uint8_t* data, size_t& length);
int32_t ReadVideo(uint8_t* data, size_t& length);
int32_t Close();
static uint32_t MakeFourCc(uint8_t ch0, uint8_t ch1, uint8_t ch2,
uint8_t ch3);
private:
enum AVIFileMode
{
NotSet,
Read,
Write
};
struct AVIINDEXENTRY
{
AVIINDEXENTRY(uint32_t inckid, uint32_t indwFlags,
uint32_t indwChunkOffset,
uint32_t indwChunkLength);
uint32_t ckid;
uint32_t dwFlags;
uint32_t dwChunkOffset;
uint32_t dwChunkLength;
};
int32_t PrepareDataChunkHeaders();
int32_t ReadMoviSubChunk(uint8_t* data, size_t& length, uint32_t tag1,
uint32_t tag2 = 0);
int32_t WriteRIFF();
int32_t WriteHeaders();
int32_t WriteAVIMainHeader();
int32_t WriteAVIStreamHeaders();
int32_t WriteAVIVideoStreamHeaders();
int32_t WriteAVIVideoStreamHeaderChunks();
int32_t WriteAVIAudioStreamHeaders();
int32_t WriteAVIAudioStreamHeaderChunks();
int32_t WriteMoviStart();
size_t PutByte(uint8_t byte);
size_t PutLE16(uint16_t word);
size_t PutLE32(uint32_t word);
size_t PutBuffer(const uint8_t* str, size_t size);
size_t PutBufferZ(const char* str);
long PutLE32LengthFromCurrent(long startPos);
void PutLE32AtPos(long pos, uint32_t word);
size_t GetByte(uint8_t& word);
size_t GetLE16(uint16_t& word);
size_t GetLE32(uint32_t& word);
size_t GetBuffer(uint8_t* str, size_t size);
void CloseRead();
void CloseWrite();
void ResetMembers();
void ResetComplexMembers();
int32_t ReadRIFF();
int32_t ReadHeaders();
int32_t ReadAVIMainHeader();
int32_t ReadAVIVideoStreamHeader(int32_t endpos);
int32_t ReadAVIAudioStreamHeader(int32_t endpos);
uint32_t StreamAndTwoCharCodeToTag(int32_t streamNum,
const char* twoCharCode);
void ClearIndexList();
void AddChunkToIndexList(uint32_t inChunkId, uint32_t inFlags,
uint32_t inOffset, uint32_t inSize);
void WriteIndex();
private:
typedef std::list<AVIINDEXENTRY*> IndexList;
struct AVIMAINHEADER
{
AVIMAINHEADER();
uint32_t fcc;
uint32_t cb;
uint32_t dwMicroSecPerFrame;
uint32_t dwMaxBytesPerSec;
uint32_t dwPaddingGranularity;
uint32_t dwFlags;
uint32_t dwTotalFrames;
uint32_t dwInitialFrames;
uint32_t dwStreams;
uint32_t dwSuggestedBufferSize;
uint32_t dwWidth;
uint32_t dwHeight;
uint32_t dwReserved[4];
};
struct AVIStream
{
AVIStreamType streamType;
int streamNumber;
};
CriticalSectionWrapper* _crit;
FILE* _aviFile;
AVIMAINHEADER _aviHeader;
AVISTREAMHEADER _videoStreamHeader;
AVISTREAMHEADER _audioStreamHeader;
BITMAPINFOHEADER _videoFormatHeader;
WAVEFORMATEX _audioFormatHeader;
int8_t _videoConfigParameters[CODEC_CONFIG_LENGTH];
int32_t _videoConfigLength;
int8_t _videoStreamName[STREAM_NAME_LENGTH];
int8_t _audioConfigParameters[CODEC_CONFIG_LENGTH];
int8_t _audioStreamName[STREAM_NAME_LENGTH];
AVIStream _videoStream;
AVIStream _audioStream;
int32_t _nrStreams;
int32_t _aviLength;
int32_t _dataLength;
size_t _bytesRead;
size_t _dataStartByte;
int32_t _framesRead;
int32_t _videoFrames;
int32_t _audioFrames;
bool _reading;
AVIStreamType _openedAs;
bool _loop;
bool _writing;
size_t _bytesWritten;
size_t _riffSizeMark;
size_t _moviSizeMark;
size_t _totNumFramesMark;
size_t _videoStreamLengthMark;
size_t _audioStreamLengthMark;
int32_t _moviListOffset;
bool _writeAudioStream;
bool _writeVideoStream;
AVIFileMode _aviMode;
uint8_t* _videoCodecConfigParams;
int32_t _videoCodecConfigParamsLength;
uint32_t _videoStreamDataChunkPrefix;
uint32_t _audioStreamDataChunkPrefix;
bool _created;
IndexList _indexList;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_MEDIA_FILE_SOURCE_AVI_FILE_H_

View File

@ -102,21 +102,8 @@ int32_t MediaFileImpl::Process()
return -1;
}
int32_t MediaFileImpl::PlayoutAVIVideoData(
int8_t* buffer,
size_t& dataLengthInBytes)
{
return PlayoutData( buffer, dataLengthInBytes, true);
}
int32_t MediaFileImpl::PlayoutAudioData(int8_t* buffer,
size_t& dataLengthInBytes)
{
return PlayoutData( buffer, dataLengthInBytes, false);
}
int32_t MediaFileImpl::PlayoutData(int8_t* buffer, size_t& dataLengthInBytes,
bool video)
{
WEBRTC_TRACE(kTraceStream, kTraceFile, _id,
"MediaFileImpl::PlayoutData(buffer= 0x%x, bufLen= %" PRIuS ")",
@ -184,28 +171,12 @@ int32_t MediaFileImpl::PlayoutData(int8_t* buffer, size_t& dataLengthInBytes,
return 0;
}
break;
case kFileFormatAviFile:
default:
{
#ifdef WEBRTC_MODULE_UTILITY_VIDEO
if(video)
{
bytesRead = _ptrFileUtilityObj->ReadAviVideoData(
buffer,
bufferLengthInBytes);
}
else
{
bytesRead = _ptrFileUtilityObj->ReadAviAudioData(
buffer,
bufferLengthInBytes);
}
break;
#else
WEBRTC_TRACE(kTraceError, kTraceFile, _id,
"Invalid file format: %d", kFileFormatAviFile);
"Invalid file format: %d", _fileFormat);
assert(false);
break;
#endif
}
}
@ -368,36 +339,6 @@ int32_t MediaFileImpl::StartPlayingAudioFile(
const uint32_t startPointMs,
const uint32_t stopPointMs)
{
const bool videoOnly = false;
return StartPlayingFile(fileName, notificationTimeMs, loop, videoOnly,
format, codecInst, startPointMs, stopPointMs);
}
int32_t MediaFileImpl::StartPlayingVideoFile(const char* fileName,
const bool loop,
bool videoOnly,
const FileFormats format)
{
const uint32_t notificationTimeMs = 0;
const uint32_t startPointMs = 0;
const uint32_t stopPointMs = 0;
return StartPlayingFile(fileName, notificationTimeMs, loop, videoOnly,
format, 0, startPointMs, stopPointMs);
}
int32_t MediaFileImpl::StartPlayingFile(
const char* fileName,
const uint32_t notificationTimeMs,
const bool loop,
bool videoOnly,
const FileFormats format,
const CodecInst* codecInst,
const uint32_t startPointMs,
const uint32_t stopPointMs)
{
if(!ValidFileName(fileName))
{
return -1;
@ -432,27 +373,18 @@ int32_t MediaFileImpl::StartPlayingFile(
return -1;
}
// TODO (hellner): make all formats support reading from stream.
bool useStream = (format != kFileFormatAviFile);
if( useStream)
if(inputStream->OpenFile(fileName, true, loop) != 0)
{
if(inputStream->OpenFile(fileName, true, loop) != 0)
{
delete inputStream;
WEBRTC_TRACE(kTraceError, kTraceFile, _id,
"Could not open input file %s", fileName);
return -1;
}
delete inputStream;
WEBRTC_TRACE(kTraceError, kTraceFile, _id,
"Could not open input file %s", fileName);
return -1;
}
if(StartPlayingStream(*inputStream, fileName, loop, notificationTimeMs,
format, codecInst, startPointMs, stopPointMs,
videoOnly) == -1)
if(StartPlayingStream(*inputStream, loop, notificationTimeMs,
format, codecInst, startPointMs, stopPointMs) == -1)
{
if( useStream)
{
inputStream->CloseFile();
}
inputStream->CloseFile();
delete inputStream;
return -1;
}
@ -472,20 +404,18 @@ int32_t MediaFileImpl::StartPlayingAudioStream(
const uint32_t startPointMs,
const uint32_t stopPointMs)
{
return StartPlayingStream(stream, 0, false, notificationTimeMs, format,
return StartPlayingStream(stream, false, notificationTimeMs, format,
codecInst, startPointMs, stopPointMs);
}
int32_t MediaFileImpl::StartPlayingStream(
InStream& stream,
const char* filename,
bool loop,
const uint32_t notificationTimeMs,
const FileFormats format,
const CodecInst* codecInst,
const uint32_t startPointMs,
const uint32_t stopPointMs,
bool videoOnly)
const uint32_t stopPointMs)
{
if(!ValidFileFormat(format,codecInst))
{
@ -593,28 +523,12 @@ int32_t MediaFileImpl::StartPlayingStream(
_fileFormat = kFileFormatPreencodedFile;
break;
}
case kFileFormatAviFile:
default:
{
#ifdef WEBRTC_MODULE_UTILITY_VIDEO
if(_ptrFileUtilityObj->InitAviReading( filename, videoOnly, loop))
{
WEBRTC_TRACE(kTraceError, kTraceFile, _id,
"Not a valid AVI file!");
StopPlaying();
return -1;
}
_ptrFileUtilityObj->codec_info(codec_info_);
_fileFormat = kFileFormatAviFile;
break;
#else
WEBRTC_TRACE(kTraceError, kTraceFile, _id,
"Invalid file format: %d", kFileFormatAviFile);
"Invalid file format: %d", format);
assert(false);
break;
#endif
}
}
if(_ptrFileUtilityObj->codec_info(codec_info_) == -1)
@ -686,21 +600,6 @@ bool MediaFileImpl::IsPlaying()
int32_t MediaFileImpl::IncomingAudioData(
const int8_t* buffer,
const size_t bufferLengthInBytes)
{
return IncomingAudioVideoData( buffer, bufferLengthInBytes, false);
}
int32_t MediaFileImpl::IncomingAVIVideoData(
const int8_t* buffer,
const size_t bufferLengthInBytes)
{
return IncomingAudioVideoData( buffer, bufferLengthInBytes, true);
}
int32_t MediaFileImpl::IncomingAudioVideoData(
const int8_t* buffer,
const size_t bufferLengthInBytes,
const bool video)
{
WEBRTC_TRACE(kTraceStream, kTraceFile, _id,
"MediaFile::IncomingData(buffer= 0x%x, bufLen= %" PRIuS,
@ -772,24 +671,11 @@ int32_t MediaFileImpl::IncomingAudioVideoData(
bytesWritten = _ptrFileUtilityObj->WritePreEncodedData(
*_ptrOutStream, buffer, bufferLengthInBytes);
break;
case kFileFormatAviFile:
#ifdef WEBRTC_MODULE_UTILITY_VIDEO
if(video)
{
bytesWritten = _ptrFileUtilityObj->WriteAviVideoData(
buffer, bufferLengthInBytes);
}else
{
bytesWritten = _ptrFileUtilityObj->WriteAviAudioData(
buffer, bufferLengthInBytes);
}
break;
#else
default:
WEBRTC_TRACE(kTraceError, kTraceFile, _id,
"Invalid file format: %d", kFileFormatAviFile);
"Invalid file format: %d", _fileFormat);
assert(false);
break;
#endif
}
} else {
// TODO (hellner): quick look at the code makes me think that this
@ -803,10 +689,7 @@ int32_t MediaFileImpl::IncomingAudioVideoData(
}
}
if(!video)
{
_recordDurationMs += samplesWritten / (codec_info_.plfreq / 1000);
}
_recordDurationMs += samplesWritten / (codec_info_.plfreq / 1000);
// Check if it's time for RecordNotification(..).
if(_notificationMs)
@ -850,36 +733,6 @@ int32_t MediaFileImpl::StartRecordingAudioFile(
const uint32_t notificationTimeMs,
const uint32_t maxSizeBytes)
{
VideoCodec dummyCodecInst;
return StartRecordingFile(fileName, format, codecInst, dummyCodecInst,
notificationTimeMs, maxSizeBytes);
}
int32_t MediaFileImpl::StartRecordingVideoFile(
const char* fileName,
const FileFormats format,
const CodecInst& codecInst,
const VideoCodec& videoCodecInst,
bool videoOnly)
{
const uint32_t notificationTimeMs = 0;
const uint32_t maxSizeBytes = 0;
return StartRecordingFile(fileName, format, codecInst, videoCodecInst,
notificationTimeMs, maxSizeBytes, videoOnly);
}
int32_t MediaFileImpl::StartRecordingFile(
const char* fileName,
const FileFormats format,
const CodecInst& codecInst,
const VideoCodec& videoCodecInst,
const uint32_t notificationTimeMs,
const uint32_t maxSizeBytes,
bool videoOnly)
{
if(!ValidFileName(fileName))
{
return -1;
@ -897,32 +750,24 @@ int32_t MediaFileImpl::StartRecordingFile(
return -1;
}
// TODO (hellner): make all formats support writing to stream.
const bool useStream = ( format != kFileFormatAviFile);
if( useStream)
if(outputStream->OpenFile(fileName, false) != 0)
{
if(outputStream->OpenFile(fileName, false) != 0)
{
delete outputStream;
WEBRTC_TRACE(kTraceError, kTraceFile, _id,
"Could not open output file '%s' for writing!",
fileName);
return -1;
}
delete outputStream;
WEBRTC_TRACE(kTraceError, kTraceFile, _id,
"Could not open output file '%s' for writing!",
fileName);
return -1;
}
if(maxSizeBytes)
{
outputStream->SetMaxFileSize(maxSizeBytes);
}
if(StartRecordingStream(*outputStream, fileName, format, codecInst,
videoCodecInst, notificationTimeMs,
videoOnly) == -1)
if(StartRecordingAudioStream(*outputStream, format, codecInst,
notificationTimeMs) == -1)
{
if( useStream)
{
outputStream->CloseFile();
}
outputStream->CloseFile();
delete outputStream;
return -1;
}
@ -940,21 +785,6 @@ int32_t MediaFileImpl::StartRecordingAudioStream(
const CodecInst& codecInst,
const uint32_t notificationTimeMs)
{
VideoCodec dummyCodecInst;
return StartRecordingStream(stream, 0, format, codecInst, dummyCodecInst,
notificationTimeMs);
}
int32_t MediaFileImpl::StartRecordingStream(
OutStream& stream,
const char* fileName,
const FileFormats format,
const CodecInst& codecInst,
const VideoCodec& videoCodecInst,
const uint32_t notificationTimeMs,
bool videoOnly)
{
// Check codec info
if(!ValidFileFormat(format,&codecInst))
{
@ -1055,25 +885,6 @@ int32_t MediaFileImpl::StartRecordingStream(
_fileFormat = kFileFormatPreencodedFile;
break;
}
#ifdef WEBRTC_MODULE_UTILITY_VIDEO
case kFileFormatAviFile:
{
if( (_ptrFileUtilityObj->InitAviWriting(
fileName,
codecInst,
videoCodecInst,videoOnly) == -1) ||
(_ptrFileUtilityObj->codec_info(tmpAudioCodec) != 0))
{
WEBRTC_TRACE(kTraceError, kTraceFile, _id,
"Failed to initialize AVI file!");
delete _ptrFileUtilityObj;
_ptrFileUtilityObj = NULL;
return -1;
}
_fileFormat = kFileFormatAviFile;
break;
}
#endif
default:
{
WEBRTC_TRACE(kTraceError, kTraceFile, _id,
@ -1136,12 +947,6 @@ int32_t MediaFileImpl::StopRecording()
{
_ptrFileUtilityObj->UpdateWavHeader(*_ptrOutStream);
}
#ifdef WEBRTC_MODULE_UTILITY_VIDEO
else if( _fileFormat == kFileFormatAviFile)
{
_ptrFileUtilityObj->CloseAviFile( );
}
#endif
delete _ptrFileUtilityObj;
_ptrFileUtilityObj = NULL;
}
@ -1268,32 +1073,6 @@ int32_t MediaFileImpl::codec_info(CodecInst& codecInst) const
return 0;
}
int32_t MediaFileImpl::VideoCodecInst(VideoCodec& codecInst) const
{
CriticalSectionScoped lock(_crit);
if(!_playingActive && !_recordingActive)
{
WEBRTC_TRACE(kTraceError, kTraceFile, _id,
"Neither playout nor recording has been initialized!");
return -1;
}
if( _ptrFileUtilityObj == NULL)
{
return -1;
}
#ifdef WEBRTC_MODULE_UTILITY_VIDEO
VideoCodec videoCodec;
if( _ptrFileUtilityObj->VideoCodecInst( videoCodec) != 0)
{
return -1;
}
memcpy(&codecInst,&videoCodec,sizeof(VideoCodec));
return 0;
#else
return -1;
#endif
}
bool MediaFileImpl::ValidFileFormat(const FileFormats format,
const CodecInst* codecInst)
{

View File

@ -32,11 +32,11 @@ public:
// MediaFile functions
virtual int32_t PlayoutAudioData(int8_t* audioBuffer,
size_t& dataLengthInBytes) OVERRIDE;
virtual int32_t PlayoutAVIVideoData(int8_t* videoBuffer,
size_t& dataLengthInBytes) OVERRIDE;
virtual int32_t PlayoutStereoData(int8_t* audioBufferLeft,
int8_t* audioBufferRight,
size_t& dataLengthInBytes) OVERRIDE;
virtual int32_t StartPlayingAudioFile(
const char* fileName,
const uint32_t notificationTimeMs = 0,
@ -45,51 +45,53 @@ public:
const CodecInst* codecInst = NULL,
const uint32_t startPointMs = 0,
const uint32_t stopPointMs = 0) OVERRIDE;
virtual int32_t StartPlayingVideoFile(const char* fileName, const bool loop,
bool videoOnly,
const FileFormats format) OVERRIDE;
virtual int32_t StartPlayingAudioStream(InStream& stream,
const uint32_t notificationTimeMs = 0,
const FileFormats format = kFileFormatPcm16kHzFile,
const CodecInst* codecInst = NULL,
const uint32_t startPointMs = 0,
const uint32_t stopPointMs = 0) OVERRIDE;
virtual int32_t StopPlaying() OVERRIDE;
virtual bool IsPlaying() OVERRIDE;
virtual int32_t PlayoutPositionMs(uint32_t& positionMs) const OVERRIDE;
virtual int32_t IncomingAudioData(const int8_t* audioBuffer,
const size_t bufferLength) OVERRIDE;
virtual int32_t IncomingAVIVideoData(const int8_t* audioBuffer,
const size_t bufferLength) OVERRIDE;
virtual int32_t StartRecordingAudioFile(
const char* fileName,
const FileFormats format,
const CodecInst& codecInst,
const uint32_t notificationTimeMs = 0,
const uint32_t maxSizeBytes = 0) OVERRIDE;
virtual int32_t StartRecordingVideoFile(
const char* fileName,
const FileFormats format,
const CodecInst& codecInst,
const VideoCodec& videoCodecInst,
bool videoOnly = false) OVERRIDE;
virtual int32_t StartRecordingAudioStream(
OutStream& stream,
const FileFormats format,
const CodecInst& codecInst,
const uint32_t notificationTimeMs = 0) OVERRIDE;
virtual int32_t StopRecording() OVERRIDE;
virtual bool IsRecording() OVERRIDE;
virtual int32_t RecordDurationMs(uint32_t& durationMs) OVERRIDE;
virtual bool IsStereo() OVERRIDE;
virtual int32_t SetModuleFileCallback(FileCallback* callback) OVERRIDE;
virtual int32_t FileDurationMs(
const char* fileName,
uint32_t& durationMs,
const FileFormats format,
const uint32_t freqInHz = 16000) OVERRIDE;
virtual int32_t codec_info(CodecInst& codecInst) const OVERRIDE;
virtual int32_t VideoCodecInst(VideoCodec& codecInst) const OVERRIDE;
private:
// Returns true if the combination of format and codecInst is valid.
@ -100,121 +102,24 @@ private:
// Returns true if the filename is valid
static bool ValidFileName(const char* fileName);
// Returns true if the combination of startPointMs and stopPointMs is valid.
// Returns true if the combination of startPointMs and stopPointMs is valid.
static bool ValidFilePositions(const uint32_t startPointMs,
const uint32_t stopPointMs);
// Open the file specified by fileName for reading (relative path is
// allowed). FileCallback::PlayNotification(..) will be called after
// notificationTimeMs of the file has been played if notificationTimeMs is
// greater than zero. If loop is true the file will be played until
// StopPlaying() is called. When end of file is reached the file is read
// from the start. format specifies the type of file fileName refers to.
// codecInst specifies the encoding of the audio data. Note that
// file formats that contain this information (like WAV files) don't need to
// provide a non-NULL codecInst. Only video will be read if videoOnly is
// true. startPointMs and stopPointMs, unless zero,
// specify what part of the file should be read. From startPointMs ms to
// stopPointMs ms.
int32_t StartPlayingFile(
const char* fileName,
const uint32_t notificationTimeMs = 0,
const bool loop = false,
bool videoOnly = false,
const FileFormats format = kFileFormatPcm16kHzFile,
const CodecInst* codecInst = NULL,
const uint32_t startPointMs = 0,
const uint32_t stopPointMs = 0);
// Opens the file specified by fileName for reading (relative path is
// allowed) if format is kFileFormatAviFile otherwise use stream for
// reading. FileCallback::PlayNotification(..) will be called after
// notificationTimeMs of the file has been played if notificationTimeMs is
// greater than zero. If loop is true the file will be played until
// StopPlaying() is called. When end of file is reached the file is read
// from the start. format specifies the type of file fileName refers to.
// codecInst specifies the encoding of the audio data. Note that
// file formats that contain this information (like WAV files) don't need to
// provide a non-NULL codecInst. Only video will be read if videoOnly is
// true. startPointMs and stopPointMs, unless zero,
// specify what part of the file should be read. From startPointMs ms to
// stopPointMs ms.
// TODO (hellner): there is no reason why fileName should be needed here.
int32_t StartPlayingStream(
InStream& stream,
const char* fileName,
bool loop,
const uint32_t notificationTimeMs = 0,
const FileFormats format = kFileFormatPcm16kHzFile,
const CodecInst* codecInst = NULL,
const uint32_t startPointMs = 0,
const uint32_t stopPointMs = 0,
bool videoOnly = true);
// Writes one frame into dataBuffer. dataLengthInBytes is both an input and
// output parameter. As input parameter it indicates the size of
// audioBuffer. As output parameter it indicates the number of bytes
// written to audioBuffer. If video is true the data written is a video
// frame otherwise it is an audio frame.
int32_t PlayoutData(int8_t* dataBuffer, size_t& dataLengthInBytes,
bool video);
// Write one frame, i.e. the bufferLength first bytes of audioBuffer,
// to file. The frame is an audio frame if video is true otherwise it is an
// audio frame.
int32_t IncomingAudioVideoData(const int8_t* buffer,
const size_t bufferLength,
const bool video);
// Open/creates file specified by fileName for writing (relative path is
// allowed) if format is kFileFormatAviFile otherwise use stream for
// writing. FileCallback::RecordNotification(..) will be called after
// notificationTimeMs of audio data has been recorded if
// notificationTimeMs is greater than zero.
// format specifies the type of file that should be created/opened.
// codecInst specifies the encoding of the audio data. videoCodecInst
// specifies the encoding of the video data. maxSizeBytes specifies the
// number of bytes allowed to be written to file if it is greater than zero.
// If format is kFileFormatAviFile and videoOnly is true the AVI file will
// only contain video frames.
// Note: codecInst.channels should be set to 2 for stereo (and 1 for
// mono). Stereo is only supported for WAV files.
int32_t StartRecordingFile(
const char* fileName,
const FileFormats format,
const CodecInst& codecInst,
const VideoCodec& videoCodecInst,
const uint32_t notificationTimeMs = 0,
const uint32_t maxSizeBytes = 0,
bool videoOnly = false);
// Open/creates file specified by fileName for writing (relative path is
// allowed). FileCallback::RecordNotification(..) will be called after
// notificationTimeMs of audio data has been recorded if
// notificationTimeMs is greater than zero.
// format specifies the type of file that should be created/opened.
// codecInst specifies the encoding of the audio data. videoCodecInst
// specifies the encoding of the video data. maxSizeBytes specifies the
// number of bytes allowed to be written to file if it is greater than zero.
// If format is kFileFormatAviFile and videoOnly is true the AVI file will
// only contain video frames.
// Note: codecInst.channels should be set to 2 for stereo (and 1 for
// mono). Stereo is only supported for WAV files.
// TODO (hellner): there is no reason why fileName should be needed here.
int32_t StartRecordingStream(
OutStream& stream,
const char* fileName,
const FileFormats format,
const CodecInst& codecInst,
const VideoCodec& videoCodecInst,
const uint32_t notificationTimeMs = 0,
const bool videoOnly = false);
// Returns true if frequencyInHz is a supported frequency.
static bool ValidFrequency(const uint32_t frequencyInHz);
void HandlePlayCallbacks(int32_t bytesRead);
int32_t StartPlayingStream(
InStream& stream,
bool loop,
const uint32_t notificationTimeMs,
const FileFormats format,
const CodecInst* codecInst,
const uint32_t startPointMs,
const uint32_t stopPointMs);
int32_t _id;
CriticalSectionWrapper* _crit;
CriticalSectionWrapper* _callbackCrit;

View File

@ -23,10 +23,6 @@
#include "webrtc/system_wrappers/interface/file_wrapper.h"
#include "webrtc/system_wrappers/interface/trace.h"
#ifdef WEBRTC_MODULE_UTILITY_VIDEO
#include "avi_file.h"
#endif
namespace {
// First 16 bytes the WAVE header. ckID should be "RIFF", wave_ckID should be
@ -63,369 +59,19 @@ ModuleFileUtility::ModuleFileUtility(const int32_t id)
_readPos(0),
_reading(false),
_writing(false),
_tempData()
#ifdef WEBRTC_MODULE_UTILITY_VIDEO
,
_aviAudioInFile(0),
_aviVideoInFile(0),
_aviOutFile(0)
#endif
{
_tempData() {
WEBRTC_TRACE(kTraceMemory, kTraceFile, _id,
"ModuleFileUtility::ModuleFileUtility()");
memset(&codec_info_,0,sizeof(CodecInst));
codec_info_.pltype = -1;
#ifdef WEBRTC_MODULE_UTILITY_VIDEO
memset(&_videoCodec,0,sizeof(_videoCodec));
#endif
}
ModuleFileUtility::~ModuleFileUtility()
{
WEBRTC_TRACE(kTraceMemory, kTraceFile, _id,
"ModuleFileUtility::~ModuleFileUtility()");
#ifdef WEBRTC_MODULE_UTILITY_VIDEO
delete _aviAudioInFile;
delete _aviVideoInFile;
#endif
}
#ifdef WEBRTC_MODULE_UTILITY_VIDEO
int32_t ModuleFileUtility::InitAviWriting(
const char* filename,
const CodecInst& audioCodecInst,
const VideoCodec& videoCodecInst,
const bool videoOnly /*= false*/)
{
_writing = false;
delete _aviOutFile;
_aviOutFile = new AviFile( );
AVISTREAMHEADER videoStreamHeader;
videoStreamHeader.fccType = AviFile::MakeFourCc('v', 'i', 'd', 's');
#ifdef VIDEOCODEC_I420
if (strncmp(videoCodecInst.plName, "I420", 7) == 0)
{
videoStreamHeader.fccHandler = AviFile::MakeFourCc('I','4','2','0');
}
#endif
#ifdef VIDEOCODEC_VP8
if (strncmp(videoCodecInst.plName, "VP8", 7) == 0)
{
videoStreamHeader.fccHandler = AviFile::MakeFourCc('V','P','8','0');
}
#endif
if (videoStreamHeader.fccHandler == 0)
{
WEBRTC_TRACE(kTraceError, kTraceFile, _id,
"InitAviWriting() Codec not supported");
return -1;
}
videoStreamHeader.dwScale = 1;
videoStreamHeader.dwRate = videoCodecInst.maxFramerate;
videoStreamHeader.dwSuggestedBufferSize = videoCodecInst.height *
(videoCodecInst.width >> 1) * 3;
videoStreamHeader.dwQuality = (uint32_t)-1;
videoStreamHeader.dwSampleSize = 0;
videoStreamHeader.rcFrame.top = 0;
videoStreamHeader.rcFrame.bottom = videoCodecInst.height;
videoStreamHeader.rcFrame.left = 0;
videoStreamHeader.rcFrame.right = videoCodecInst.width;
BITMAPINFOHEADER bitMapInfoHeader;
bitMapInfoHeader.biSize = sizeof(BITMAPINFOHEADER);
bitMapInfoHeader.biHeight = videoCodecInst.height;
bitMapInfoHeader.biWidth = videoCodecInst.width;
bitMapInfoHeader.biPlanes = 1;
bitMapInfoHeader.biBitCount = 12;
bitMapInfoHeader.biClrImportant = 0;
bitMapInfoHeader.biClrUsed = 0;
bitMapInfoHeader.biCompression = videoStreamHeader.fccHandler;
bitMapInfoHeader.biSizeImage = bitMapInfoHeader.biWidth *
bitMapInfoHeader.biHeight * bitMapInfoHeader.biBitCount / 8;
if (_aviOutFile->CreateVideoStream(
videoStreamHeader,
bitMapInfoHeader,
NULL,
0) != 0)
{
return -1;
}
if(!videoOnly)
{
AVISTREAMHEADER audioStreamHeader;
audioStreamHeader.fccType = AviFile::MakeFourCc('a', 'u', 'd', 's');
// fccHandler is the FOURCC of the codec for decoding the stream.
// It's an optional parameter that is not used by audio streams.
audioStreamHeader.fccHandler = 0;
audioStreamHeader.dwScale = 1;
WAVEFORMATEX waveFormatHeader;
waveFormatHeader.cbSize = 0;
waveFormatHeader.nChannels = 1;
if (strncmp(audioCodecInst.plname, "PCMU", 4) == 0)
{
audioStreamHeader.dwSampleSize = 1;
audioStreamHeader.dwRate = 8000;
audioStreamHeader.dwQuality = (uint32_t)-1;
audioStreamHeader.dwSuggestedBufferSize = 80;
waveFormatHeader.nAvgBytesPerSec = 8000;
waveFormatHeader.nSamplesPerSec = 8000;
waveFormatHeader.wBitsPerSample = 8;
waveFormatHeader.nBlockAlign = 1;
waveFormatHeader.wFormatTag = kWavFormatMuLaw;
} else if (strncmp(audioCodecInst.plname, "PCMA", 4) == 0)
{
audioStreamHeader.dwSampleSize = 1;
audioStreamHeader.dwRate = 8000;
audioStreamHeader.dwQuality = (uint32_t)-1;
audioStreamHeader.dwSuggestedBufferSize = 80;
waveFormatHeader.nAvgBytesPerSec = 8000;
waveFormatHeader.nSamplesPerSec = 8000;
waveFormatHeader.wBitsPerSample = 8;
waveFormatHeader.nBlockAlign = 1;
waveFormatHeader.wFormatTag = kWavFormatALaw;
} else if (strncmp(audioCodecInst.plname, "L16", 3) == 0)
{
audioStreamHeader.dwSampleSize = 2;
audioStreamHeader.dwRate = audioCodecInst.plfreq;
audioStreamHeader.dwQuality = (uint32_t)-1;
audioStreamHeader.dwSuggestedBufferSize =
(audioCodecInst.plfreq/100) * 2;
waveFormatHeader.nAvgBytesPerSec = audioCodecInst.plfreq * 2;
waveFormatHeader.nSamplesPerSec = audioCodecInst.plfreq;
waveFormatHeader.wBitsPerSample = 16;
waveFormatHeader.nBlockAlign = 2;
waveFormatHeader.wFormatTag = kWavFormatPcm;
} else
{
return -1;
}
if(_aviOutFile->CreateAudioStream(
audioStreamHeader,
waveFormatHeader) != 0)
{
return -1;
}
if( InitWavCodec(waveFormatHeader.nSamplesPerSec,
waveFormatHeader.nChannels,
waveFormatHeader.wBitsPerSample,
waveFormatHeader.wFormatTag) != 0)
{
return -1;
}
}
_aviOutFile->Create(filename);
_writing = true;
return 0;
}
int32_t ModuleFileUtility::WriteAviAudioData(
const int8_t* buffer,
size_t bufferLengthInBytes)
{
if( _aviOutFile != 0)
{
return _aviOutFile->WriteAudio(
reinterpret_cast<const uint8_t*>(buffer),
bufferLengthInBytes);
}
else
{
WEBRTC_TRACE(kTraceError, kTraceFile, _id, "AVI file not initialized");
return -1;
}
}
int32_t ModuleFileUtility::WriteAviVideoData(
const int8_t* buffer,
size_t bufferLengthInBytes)
{
if( _aviOutFile != 0)
{
return _aviOutFile->WriteVideo(
reinterpret_cast<const uint8_t*>(buffer),
bufferLengthInBytes);
}
else
{
WEBRTC_TRACE(kTraceError, kTraceFile, _id, "AVI file not initialized");
return -1;
}
}
int32_t ModuleFileUtility::CloseAviFile( )
{
if( _reading && _aviAudioInFile)
{
delete _aviAudioInFile;
_aviAudioInFile = 0;
}
if( _reading && _aviVideoInFile)
{
delete _aviVideoInFile;
_aviVideoInFile = 0;
}
if( _writing && _aviOutFile)
{
delete _aviOutFile;
_aviOutFile = 0;
}
return 0;
}
int32_t ModuleFileUtility::InitAviReading(const char* filename, bool videoOnly,
bool loop)
{
_reading = false;
delete _aviVideoInFile;
_aviVideoInFile = new AviFile( );
if ((_aviVideoInFile != 0) && _aviVideoInFile->Open(AviFile::AVI_VIDEO,
filename, loop) == -1)
{
WEBRTC_TRACE(kTraceError, kTraceVideo, -1,
"Unable to open AVI file (video)");
return -1;
}
AVISTREAMHEADER videoInStreamHeader;
BITMAPINFOHEADER bitmapInfo;
char codecConfigParameters[AviFile::CODEC_CONFIG_LENGTH] = {};
int32_t configLength = 0;
if( _aviVideoInFile->GetVideoStreamInfo(videoInStreamHeader, bitmapInfo,
codecConfigParameters,
configLength) != 0)
{
return -1;
}
_videoCodec.width = static_cast<uint16_t>(
videoInStreamHeader.rcFrame.right);
_videoCodec.height = static_cast<uint16_t>(
videoInStreamHeader.rcFrame.bottom);
_videoCodec.maxFramerate = static_cast<uint8_t>(
videoInStreamHeader.dwRate);
const size_t plnameLen = sizeof(_videoCodec.plName) / sizeof(char);
if (bitmapInfo.biCompression == AviFile::MakeFourCc('I','4','2','0'))
{
strncpy(_videoCodec.plName, "I420", plnameLen);
_videoCodec.codecType = kVideoCodecI420;
}
else if (bitmapInfo.biCompression ==
AviFile::MakeFourCc('V', 'P', '8', '0'))
{
strncpy(_videoCodec.plName, "VP8", plnameLen);
_videoCodec.codecType = kVideoCodecVP8;
}
else
{
return -1;
}
if(!videoOnly)
{
delete _aviAudioInFile;
_aviAudioInFile = new AviFile();
if ( (_aviAudioInFile != 0) &&
_aviAudioInFile->Open(AviFile::AVI_AUDIO, filename, loop) == -1)
{
WEBRTC_TRACE(kTraceError, kTraceVideo, -1,
"Unable to open AVI file (audio)");
return -1;
}
WAVEFORMATEX waveHeader;
if(_aviAudioInFile->GetAudioStreamInfo(waveHeader) != 0)
{
return -1;
}
if(InitWavCodec(waveHeader.nSamplesPerSec, waveHeader.nChannels,
waveHeader.wBitsPerSample, waveHeader.wFormatTag) != 0)
{
return -1;
}
}
_reading = true;
return 0;
}
int32_t ModuleFileUtility::ReadAviAudioData(
int8_t* outBuffer,
size_t bufferLengthInBytes)
{
if(_aviAudioInFile == 0)
{
WEBRTC_TRACE(kTraceError, kTraceVideo, -1, "AVI file not opened.");
return -1;
}
if(_aviAudioInFile->ReadAudio(reinterpret_cast<uint8_t*>(outBuffer),
bufferLengthInBytes) != 0)
{
return -1;
}
else
{
return static_cast<int32_t>(bufferLengthInBytes);
}
}
int32_t ModuleFileUtility::ReadAviVideoData(
int8_t* outBuffer,
size_t bufferLengthInBytes)
{
if(_aviVideoInFile == 0)
{
WEBRTC_TRACE(kTraceError, kTraceVideo, -1, "AVI file not opened.");
return -1;
}
if(_aviVideoInFile->ReadVideo(reinterpret_cast<uint8_t*>(outBuffer),
bufferLengthInBytes) != 0)
{
return -1;
} else {
return static_cast<int32_t>(bufferLengthInBytes);
}
}
int32_t ModuleFileUtility::VideoCodecInst(VideoCodec& codecInst)
{
WEBRTC_TRACE(kTraceStream, kTraceFile, _id,
"ModuleFileUtility::CodecInst(codecInst= 0x%x)", &codecInst);
if(!_reading)
{
WEBRTC_TRACE(kTraceError, kTraceFile, _id,
"CodecInst: not currently reading audio file!");
return -1;
}
memcpy(&codecInst,&_videoCodec,sizeof(VideoCodec));
return 0;
}
#endif
int32_t ModuleFileUtility::ReadWavHeader(InStream& wav)
{
WAVE_RIFF_header RIFFheaderObj;

View File

@ -18,7 +18,6 @@
#include "webrtc/modules/media_file/interface/media_file_defines.h"
namespace webrtc {
class AviFile;
class InStream;
class OutStream;
@ -29,61 +28,6 @@ public:
ModuleFileUtility(const int32_t id);
~ModuleFileUtility();
#ifdef WEBRTC_MODULE_UTILITY_VIDEO
// Open the file specified by fileName for reading (relative path is
// allowed). If loop is true the file will be played until StopPlaying() is
// called. When end of file is reached the file is read from the start.
// Only video will be read if videoOnly is true.
int32_t InitAviReading(const char* fileName, bool videoOnly, bool loop);
// Put 10-60ms of audio data from file into the outBuffer depending on
// codec frame size. bufferLengthInBytes indicates the size of outBuffer.
// The return value is the number of bytes written to audioBuffer.
// Note: This API only play mono audio but can be used on file containing
// audio with more channels (in which case the audio will be coverted to
// mono).
int32_t ReadAviAudioData(int8_t* outBuffer,
size_t bufferLengthInBytes);
// Put one video frame into outBuffer. bufferLengthInBytes indicates the
// size of outBuffer.
// The return value is the number of bytes written to videoBuffer.
int32_t ReadAviVideoData(int8_t* videoBuffer,
size_t bufferLengthInBytes);
// Open/create the file specified by fileName for writing audio/video data
// (relative path is allowed). codecInst specifies the encoding of the audio
// data. videoCodecInst specifies the encoding of the video data. Only video
// data will be recorded if videoOnly is true.
int32_t InitAviWriting(const char* filename,
const CodecInst& codecInst,
const VideoCodec& videoCodecInst,
const bool videoOnly);
// Write one audio frame, i.e. the bufferLengthinBytes first bytes of
// audioBuffer, to file. The audio frame size is determined by the
// codecInst.pacsize parameter of the last sucessfull
// InitAviWriting(..) call.
// Note: bufferLength must be exactly one frame.
int32_t WriteAviAudioData(const int8_t* audioBuffer,
size_t bufferLengthInBytes);
// Write one video frame, i.e. the bufferLength first bytes of videoBuffer,
// to file.
// Note: videoBuffer can contain encoded data. The codec used must be the
// same as what was specified by videoCodecInst for the last successfull
// InitAviWriting(..) call. The videoBuffer must contain exactly
// one video frame.
int32_t WriteAviVideoData(const int8_t* videoBuffer,
size_t bufferLengthInBytes);
// Stop recording to file or stream.
int32_t CloseAviFile();
int32_t VideoCodecInst(VideoCodec& codecInst);
#endif // #ifdef WEBRTC_MODULE_UTILITY_VIDEO
// Prepare for playing audio from stream.
// startPointMs and stopPointMs, unless zero, specify what part of the file
// should be read. From startPointMs ms to stopPointMs ms.
@ -335,13 +279,6 @@ private:
// Scratch buffer used for turning stereo audio to mono.
uint8_t _tempData[WAV_MAX_BUFFER_SIZE];
#ifdef WEBRTC_MODULE_UTILITY_VIDEO
AviFile* _aviAudioInFile;
AviFile* _aviVideoInFile;
AviFile* _aviOutFile;
VideoCodec _videoCodec;
#endif
};
} // namespace webrtc
#endif // WEBRTC_MODULES_MEDIA_FILE_SOURCE_MEDIA_FILE_UTILITY_H_

View File

@ -46,13 +46,4 @@ source_set("utility") {
"../audio_coding",
"../media_file",
]
if (rtc_enable_video) {
sources += [
"source/frame_scaler.cc",
"source/video_coder.cc",
"source/video_frames_queue.cc",
]
deps += [ "../video_coding" ]
}
}

View File

@ -27,8 +27,7 @@ public:
enum {MAX_AUDIO_BUFFER_IN_SAMPLES = 60*32};
enum {MAX_AUDIO_BUFFER_IN_BYTES = MAX_AUDIO_BUFFER_IN_SAMPLES*2};
// Note: will return NULL for video file formats (e.g. AVI) if the flag
// WEBRTC_MODULE_UTILITY_VIDEO is not defined.
// Note: will return NULL for unsupported formats.
static FilePlayer* CreateFilePlayer(const uint32_t instanceID,
const FileFormats fileFormat);

View File

@ -26,8 +26,7 @@ class FileRecorder
{
public:
// Note: will return NULL for video file formats (e.g. AVI) if the flag
// WEBRTC_MODULE_UTILITY_VIDEO is not defined.
// Note: will return NULL for unsupported formats.
static FileRecorder* CreateFileRecorder(const uint32_t instanceID,
const FileFormats fileFormat);

View File

@ -11,12 +11,6 @@
#include "webrtc/modules/utility/source/file_player_impl.h"
#include "webrtc/system_wrappers/interface/logging.h"
#ifdef WEBRTC_MODULE_UTILITY_VIDEO
#include "webrtc/modules/utility/source/frame_scaler.h"
#include "webrtc/modules/utility/source/video_coder.h"
#include "webrtc/system_wrappers/interface/tick_util.h"
#endif
namespace webrtc {
FilePlayer* FilePlayer::CreateFilePlayer(uint32_t instanceID,
FileFormats fileFormat)
@ -31,16 +25,10 @@ FilePlayer* FilePlayer::CreateFilePlayer(uint32_t instanceID,
case kFileFormatPcm32kHzFile:
// audio formats
return new FilePlayerImpl(instanceID, fileFormat);
case kFileFormatAviFile:
#ifdef WEBRTC_MODULE_UTILITY_VIDEO
return new VideoFilePlayerImpl(instanceID, fileFormat);
#else
default:
assert(false);
return NULL;
#endif
}
assert(false);
return NULL;
}
void FilePlayer::DestroyFilePlayer(FilePlayer* player)
@ -412,258 +400,4 @@ int32_t FilePlayerImpl::SetUpAudioDecoder()
_numberOf10MsInDecoder = 0;
return 0;
}
#ifdef WEBRTC_MODULE_UTILITY_VIDEO
VideoFilePlayerImpl::VideoFilePlayerImpl(uint32_t instanceID,
FileFormats fileFormat)
: FilePlayerImpl(instanceID, fileFormat),
video_decoder_(new VideoCoder()),
video_codec_info_(),
_decodedVideoFrames(0),
_encodedData(*new EncodedVideoData()),
_frameScaler(*new FrameScaler()),
_critSec(CriticalSectionWrapper::CreateCriticalSection()),
_startTime(),
_accumulatedRenderTimeMs(0),
_frameLengthMS(0),
_numberOfFramesRead(0),
_videoOnly(false) {
memset(&video_codec_info_, 0, sizeof(video_codec_info_));
}
VideoFilePlayerImpl::~VideoFilePlayerImpl()
{
delete _critSec;
delete &_frameScaler;
video_decoder_.reset();
delete &_encodedData;
}
int32_t VideoFilePlayerImpl::StartPlayingVideoFile(
const char* fileName,
bool loop,
bool videoOnly)
{
CriticalSectionScoped lock( _critSec);
if(_fileModule.StartPlayingVideoFile(fileName, loop, videoOnly,
_fileFormat) != 0)
{
return -1;
}
_decodedVideoFrames = 0;
_accumulatedRenderTimeMs = 0;
_frameLengthMS = 0;
_numberOfFramesRead = 0;
_videoOnly = videoOnly;
// Set up video_codec_info_ according to file,
if(SetUpVideoDecoder() != 0)
{
StopPlayingFile();
return -1;
}
if(!videoOnly)
{
// Set up _codec according to file,
if(SetUpAudioDecoder() != 0)
{
StopPlayingFile();
return -1;
}
}
return 0;
}
int32_t VideoFilePlayerImpl::StopPlayingFile()
{
CriticalSectionScoped lock( _critSec);
_decodedVideoFrames = 0;
video_decoder_.reset(new VideoCoder());
return FilePlayerImpl::StopPlayingFile();
}
int32_t VideoFilePlayerImpl::GetVideoFromFile(I420VideoFrame& videoFrame,
uint32_t outWidth,
uint32_t outHeight)
{
CriticalSectionScoped lock( _critSec);
int32_t retVal = GetVideoFromFile(videoFrame);
if(retVal != 0)
{
return retVal;
}
if (!videoFrame.IsZeroSize())
{
retVal = _frameScaler.ResizeFrameIfNeeded(&videoFrame, outWidth,
outHeight);
}
return retVal;
}
int32_t VideoFilePlayerImpl::GetVideoFromFile(I420VideoFrame& videoFrame)
{
CriticalSectionScoped lock( _critSec);
// No new video data read from file.
if(_encodedData.payloadSize == 0)
{
videoFrame.ResetSize();
return -1;
}
int32_t retVal = 0;
if(strncmp(video_codec_info_.plName, "I420", 5) == 0)
{
int size_y = video_codec_info_.width * video_codec_info_.height;
int half_width = (video_codec_info_.width + 1) / 2;
int half_height = (video_codec_info_.height + 1) / 2;
int size_uv = half_width * half_height;
// TODO(mikhal): Do we need to align the stride here?
const uint8_t* buffer_y = _encodedData.payloadData;
const uint8_t* buffer_u = buffer_y + size_y;
const uint8_t* buffer_v = buffer_u + size_uv;
videoFrame.CreateFrame(size_y, buffer_y,
size_uv, buffer_u,
size_uv, buffer_v,
video_codec_info_.width, video_codec_info_.height,
video_codec_info_.height, half_width, half_width);
}else
{
// Set the timestamp manually since there is no timestamp in the file.
// Update timestam according to 90 kHz stream.
_encodedData.timeStamp += (90000 / video_codec_info_.maxFramerate);
retVal = video_decoder_->Decode(videoFrame, _encodedData);
}
int64_t renderTimeMs = TickTime::MillisecondTimestamp();
videoFrame.set_render_time_ms(renderTimeMs);
// Indicate that the current frame in the encoded buffer is old/has
// already been read.
_encodedData.payloadSize = 0;
if( retVal == 0)
{
_decodedVideoFrames++;
}
return retVal;
}
int32_t VideoFilePlayerImpl::video_codec_info(
VideoCodec& videoCodec) const
{
if(video_codec_info_.plName[0] == 0)
{
return -1;
}
memcpy(&videoCodec, &video_codec_info_, sizeof(VideoCodec));
return 0;
}
int32_t VideoFilePlayerImpl::TimeUntilNextVideoFrame()
{
if(_fileFormat != kFileFormatAviFile)
{
return -1;
}
if(!_fileModule.IsPlaying())
{
return -1;
}
if(_encodedData.payloadSize <= 0)
{
// Read next frame from file.
CriticalSectionScoped lock( _critSec);
if(_fileFormat == kFileFormatAviFile)
{
// Get next video frame
size_t encodedBufferLengthInBytes = _encodedData.bufferSize;
if(_fileModule.PlayoutAVIVideoData(
reinterpret_cast< int8_t*>(_encodedData.payloadData),
encodedBufferLengthInBytes) != 0)
{
LOG(LS_WARNING) << "Error reading video data.";
return -1;
}
_encodedData.payloadSize = encodedBufferLengthInBytes;
_encodedData.codec = video_codec_info_.codecType;
_numberOfFramesRead++;
if(_accumulatedRenderTimeMs == 0)
{
_startTime = TickTime::Now();
// This if-statement should only trigger once.
_accumulatedRenderTimeMs = 1;
} else {
// A full seconds worth of frames have been read.
if(_numberOfFramesRead % video_codec_info_.maxFramerate == 0)
{
// Frame rate is in frames per seconds. Frame length is
// calculated as an integer division which means it may
// be rounded down. Compensate for this every second.
uint32_t rest = 1000%_frameLengthMS;
_accumulatedRenderTimeMs += rest;
}
_accumulatedRenderTimeMs += _frameLengthMS;
}
}
}
int64_t timeToNextFrame;
if(_videoOnly)
{
timeToNextFrame = _accumulatedRenderTimeMs -
(TickTime::Now() - _startTime).Milliseconds();
} else {
// Synchronize with the audio stream instead of system clock.
timeToNextFrame = _accumulatedRenderTimeMs - _decodedLengthInMS;
}
if(timeToNextFrame < 0)
{
return 0;
} else if(timeToNextFrame > 0x0fffffff)
{
// Wraparound or audio stream has gone to far ahead of the video stream.
return -1;
}
return static_cast<int32_t>(timeToNextFrame);
}
int32_t VideoFilePlayerImpl::SetUpVideoDecoder()
{
if (_fileModule.VideoCodecInst(video_codec_info_) != 0)
{
LOG(LS_WARNING) << "SetVideoDecoder() failed to retrieve codec info of "
<< "file data.";
return -1;
}
int32_t useNumberOfCores = 1;
if (video_decoder_->SetDecodeCodec(video_codec_info_, useNumberOfCores) !=
0) {
LOG(LS_WARNING) << "SetUpVideoDecoder() codec "
<< video_codec_info_.plName << " not supported.";
return -1;
}
_frameLengthMS = 1000/video_codec_info_.maxFramerate;
// Size of unencoded data (I420) should be the largest possible frame size
// in a file.
const size_t KReadBufferSize = 3 * video_codec_info_.width *
video_codec_info_.height / 2;
_encodedData.VerifyAndAllocate(KReadBufferSize);
_encodedData.encodedHeight = video_codec_info_.height;
_encodedData.encodedWidth = video_codec_info_.width;
_encodedData.payloadType = video_codec_info_.plType;
_encodedData.timeStamp = 0;
return 0;
}
#endif // WEBRTC_MODULE_UTILITY_VIDEO
} // namespace webrtc

View File

@ -23,9 +23,6 @@
#include "webrtc/typedefs.h"
namespace webrtc {
class VideoCoder;
class FrameScaler;
class FilePlayerImpl : public FilePlayer
{
public:
@ -78,45 +75,5 @@ private:
Resampler _resampler;
float _scaling;
};
#ifdef WEBRTC_MODULE_UTILITY_VIDEO
class VideoFilePlayerImpl: public FilePlayerImpl
{
public:
VideoFilePlayerImpl(uint32_t instanceID, FileFormats fileFormat);
~VideoFilePlayerImpl();
// FilePlayer functions.
virtual int32_t TimeUntilNextVideoFrame();
virtual int32_t StartPlayingVideoFile(const char* fileName,
bool loop,
bool videoOnly);
virtual int32_t StopPlayingFile();
virtual int32_t video_codec_info(VideoCodec& videoCodec) const;
virtual int32_t GetVideoFromFile(I420VideoFrame& videoFrame);
virtual int32_t GetVideoFromFile(I420VideoFrame& videoFrame,
const uint32_t outWidth,
const uint32_t outHeight);
private:
int32_t SetUpVideoDecoder();
rtc::scoped_ptr<VideoCoder> video_decoder_;
VideoCodec video_codec_info_;
int32_t _decodedVideoFrames;
EncodedVideoData& _encodedData;
FrameScaler& _frameScaler;
CriticalSectionWrapper* _critSec;
TickTime _startTime;
int64_t _accumulatedRenderTimeMs;
uint32_t _frameLengthMS;
int32_t _numberOfFramesRead;
bool _videoOnly;
};
#endif //WEBRTC_MODULE_UTILITY_VIDEO
} // namespace webrtc
#endif // WEBRTC_MODULES_UTILITY_SOURCE_FILE_PLAYER_IMPL_H_

View File

@ -14,36 +14,11 @@
#include "webrtc/modules/utility/source/file_recorder_impl.h"
#include "webrtc/system_wrappers/interface/logging.h"
#ifdef WEBRTC_MODULE_UTILITY_VIDEO
#include "webrtc/modules/utility/source/frame_scaler.h"
#include "webrtc/modules/utility/source/video_coder.h"
#include "webrtc/modules/utility/source/video_frames_queue.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#endif
namespace webrtc {
FileRecorder* FileRecorder::CreateFileRecorder(uint32_t instanceID,
FileFormats fileFormat)
{
switch(fileFormat)
{
case kFileFormatWavFile:
case kFileFormatCompressedFile:
case kFileFormatPreencodedFile:
case kFileFormatPcm16kHzFile:
case kFileFormatPcm8kHzFile:
case kFileFormatPcm32kHzFile:
return new FileRecorderImpl(instanceID, fileFormat);
case kFileFormatAviFile:
#ifdef WEBRTC_MODULE_UTILITY_VIDEO
return new AviRecorder(instanceID, fileFormat);
#else
assert(false);
return NULL;
#endif
}
assert(false);
return NULL;
return new FileRecorderImpl(instanceID, fileFormat);
}
void FileRecorder::DestroyFileRecorder(FileRecorder* recorder)
@ -98,14 +73,9 @@ int32_t FileRecorderImpl::StartRecordingAudioFile(
_amrFormat = amrFormat;
int32_t retVal = 0;
if(_fileFormat != kFileFormatAviFile)
{
// AVI files should be started using StartRecordingVideoFile(..) all
// other formats should use this API.
retVal =_moduleFile->StartRecordingAudioFile(fileName, _fileFormat,
codecInst,
notificationTimeMs);
}
retVal =_moduleFile->StartRecordingAudioFile(fileName, _fileFormat,
codecInst,
notificationTimeMs);
if( retVal == 0)
{
@ -314,410 +284,4 @@ int32_t FileRecorderImpl::WriteEncodedAudioData(
{
return _moduleFile->IncomingAudioData(audioBuffer, bufferLength);
}
#ifdef WEBRTC_MODULE_UTILITY_VIDEO
AviRecorder::AviRecorder(uint32_t instanceID, FileFormats fileFormat)
: FileRecorderImpl(instanceID, fileFormat),
_videoOnly(false),
_thread( 0),
_timeEvent(*EventWrapper::Create()),
_critSec(CriticalSectionWrapper::CreateCriticalSection()),
_writtenVideoFramesCounter(0),
_writtenAudioMS(0),
_writtenVideoMS(0)
{
_videoEncoder = new VideoCoder();
_frameScaler = new FrameScaler();
_videoFramesQueue = new VideoFramesQueue();
_thread = ThreadWrapper::CreateThread(Run, this, kNormalPriority,
"AviRecorder()");
}
AviRecorder::~AviRecorder( )
{
StopRecording( );
delete _videoEncoder;
delete _frameScaler;
delete _videoFramesQueue;
delete _thread;
delete &_timeEvent;
delete _critSec;
}
int32_t AviRecorder::StartRecordingVideoFile(
const char* fileName,
const CodecInst& audioCodecInst,
const VideoCodec& videoCodecInst,
ACMAMRPackingFormat amrFormat,
bool videoOnly)
{
_firstAudioFrameReceived = false;
_videoCodecInst = videoCodecInst;
_videoOnly = videoOnly;
if(_moduleFile->StartRecordingVideoFile(fileName, _fileFormat,
audioCodecInst, videoCodecInst,
videoOnly) != 0)
{
return -1;
}
if(!videoOnly)
{
if(FileRecorderImpl::StartRecordingAudioFile(fileName,audioCodecInst, 0,
amrFormat) !=0)
{
StopRecording();
return -1;
}
}
if( SetUpVideoEncoder() != 0)
{
StopRecording();
return -1;
}
if(_videoOnly)
{
// Writing to AVI file is non-blocking.
// Start non-blocking timer if video only. If recording both video and
// audio let the pushing of audio frames be the timer.
_timeEvent.StartTimer(true, 1000 / _videoCodecInst.maxFramerate);
}
StartThread();
return 0;
}
int32_t AviRecorder::StopRecording()
{
_timeEvent.StopTimer();
StopThread();
return FileRecorderImpl::StopRecording();
}
size_t AviRecorder::CalcI420FrameSize( ) const
{
return 3 * _videoCodecInst.width * _videoCodecInst.height / 2;
}
int32_t AviRecorder::SetUpVideoEncoder()
{
// Size of unencoded data (I420) should be the largest possible frame size
// in a file.
_videoMaxPayloadSize = CalcI420FrameSize();
_videoEncodedData.VerifyAndAllocate(_videoMaxPayloadSize);
_videoCodecInst.plType = _videoEncoder->DefaultPayloadType(
_videoCodecInst.plName);
int32_t useNumberOfCores = 1;
// Set the max payload size to 16000. This means that the codec will try to
// create slices that will fit in 16000 kByte packets. However, the
// Encode() call will still generate one full frame.
if(_videoEncoder->SetEncodeCodec(_videoCodecInst, useNumberOfCores,
16000))
{
return -1;
}
return 0;
}
int32_t AviRecorder::RecordVideoToFile(const I420VideoFrame& videoFrame)
{
CriticalSectionScoped lock(_critSec);
if(!IsRecording() || videoFrame.IsZeroSize())
{
return -1;
}
// The frame is written to file in AviRecorder::Process().
int32_t retVal = _videoFramesQueue->AddFrame(videoFrame);
if(retVal != 0)
{
StopRecording();
}
return retVal;
}
bool AviRecorder::StartThread()
{
unsigned int id;
if( _thread == 0)
{
return false;
}
return _thread->Start(id);
}
bool AviRecorder::StopThread()
{
_critSec->Enter();
if(_thread)
{
ThreadWrapper* thread = _thread;
_thread = NULL;
_timeEvent.Set();
_critSec->Leave();
if(thread->Stop())
{
delete thread;
} else {
return false;
}
} else {
_critSec->Leave();
}
return true;
}
bool AviRecorder::Run( ThreadObj threadObj)
{
return static_cast<AviRecorder*>( threadObj)->Process();
}
int32_t AviRecorder::ProcessAudio()
{
if (_writtenVideoFramesCounter == 0)
{
// Get the most recent frame that is due for writing to file. Since
// frames are unencoded it's safe to throw away frames if necessary
// for synchronizing audio and video.
I420VideoFrame* frameToProcess = _videoFramesQueue->FrameToRecord();
if(frameToProcess)
{
// Syncronize audio to the current frame to process by throwing away
// audio samples with older timestamp than the video frame.
size_t numberOfAudioElements =
_audioFramesToWrite.size();
for (size_t i = 0; i < numberOfAudioElements; ++i)
{
AudioFrameFileInfo* frameInfo = _audioFramesToWrite.front();
if(TickTime::TicksToMilliseconds(
frameInfo->_playoutTS.Ticks()) <
frameToProcess->render_time_ms())
{
delete frameInfo;
_audioFramesToWrite.pop_front();
} else
{
break;
}
}
}
}
// Write all audio up to current timestamp.
int32_t error = 0;
size_t numberOfAudioElements = _audioFramesToWrite.size();
for (size_t i = 0; i < numberOfAudioElements; ++i)
{
AudioFrameFileInfo* frameInfo = _audioFramesToWrite.front();
if((TickTime::Now() - frameInfo->_playoutTS).Milliseconds() > 0)
{
_moduleFile->IncomingAudioData(frameInfo->_audioData,
frameInfo->_audioSize);
_writtenAudioMS += frameInfo->_audioMS;
delete frameInfo;
_audioFramesToWrite.pop_front();
} else {
break;
}
}
return error;
}
bool AviRecorder::Process()
{
switch(_timeEvent.Wait(500))
{
case kEventSignaled:
if(_thread == NULL)
{
return false;
}
break;
case kEventError:
return false;
case kEventTimeout:
// No events triggered. No work to do.
return true;
}
CriticalSectionScoped lock( _critSec);
// Get the most recent frame to write to file (if any). Synchronize it with
// the audio stream (if any). Synchronization the video based on its render
// timestamp (i.e. VideoFrame::RenderTimeMS())
I420VideoFrame* frameToProcess = _videoFramesQueue->FrameToRecord();
if( frameToProcess == NULL)
{
return true;
}
int32_t error = 0;
if(!_videoOnly)
{
if(!_firstAudioFrameReceived)
{
// Video and audio can only be synchronized if both have been
// received.
return true;
}
error = ProcessAudio();
while (_writtenAudioMS > _writtenVideoMS)
{
error = EncodeAndWriteVideoToFile( *frameToProcess);
if( error != 0)
{
LOG(LS_ERROR) << "AviRecorder::Process() error writing to "
<< "file.";
break;
} else {
uint32_t frameLengthMS = 1000 /
_videoCodecInst.maxFramerate;
_writtenVideoFramesCounter++;
_writtenVideoMS += frameLengthMS;
// A full seconds worth of frames have been written.
if(_writtenVideoFramesCounter%_videoCodecInst.maxFramerate == 0)
{
// Frame rate is in frames per seconds. Frame length is
// calculated as an integer division which means it may
// be rounded down. Compensate for this every second.
uint32_t rest = 1000 % frameLengthMS;
_writtenVideoMS += rest;
}
}
}
} else {
// Frame rate is in frames per seconds. Frame length is calculated as an
// integer division which means it may be rounded down. This introduces
// drift. Once a full frame worth of drift has happened, skip writing
// one frame. Note that frame rate is in frames per second so the
// drift is completely compensated for.
uint32_t frameLengthMS = 1000/_videoCodecInst.maxFramerate;
uint32_t restMS = 1000 % frameLengthMS;
uint32_t frameSkip = (_videoCodecInst.maxFramerate *
frameLengthMS) / restMS;
_writtenVideoFramesCounter++;
if(_writtenVideoFramesCounter % frameSkip == 0)
{
_writtenVideoMS += frameLengthMS;
return true;
}
error = EncodeAndWriteVideoToFile( *frameToProcess);
if(error != 0)
{
LOG(LS_ERROR) << "AviRecorder::Process() error writing to file.";
} else {
_writtenVideoMS += frameLengthMS;
}
}
return error == 0;
}
int32_t AviRecorder::EncodeAndWriteVideoToFile(I420VideoFrame& videoFrame)
{
if (!IsRecording() || videoFrame.IsZeroSize())
{
return -1;
}
if(_frameScaler->ResizeFrameIfNeeded(&videoFrame, _videoCodecInst.width,
_videoCodecInst.height) != 0)
{
return -1;
}
_videoEncodedData.payloadSize = 0;
if( STR_CASE_CMP(_videoCodecInst.plName, "I420") == 0)
{
size_t length =
CalcBufferSize(kI420, videoFrame.width(), videoFrame.height());
_videoEncodedData.VerifyAndAllocate(length);
// I420 is raw data. No encoding needed (each sample is represented by
// 1 byte so there is no difference depending on endianness).
int ret_length = ExtractBuffer(videoFrame, length,
_videoEncodedData.payloadData);
if (ret_length < 0)
return -1;
_videoEncodedData.payloadSize = ret_length;
_videoEncodedData.frameType = kVideoFrameKey;
}else {
if( _videoEncoder->Encode(videoFrame, _videoEncodedData) != 0)
{
return -1;
}
}
if(_videoEncodedData.payloadSize > 0)
{
if(_moduleFile->IncomingAVIVideoData(
(int8_t*)(_videoEncodedData.payloadData),
_videoEncodedData.payloadSize))
{
LOG(LS_ERROR) << "Error writing AVI file.";
return -1;
}
} else {
LOG(LS_ERROR) << "FileRecorder::RecordVideoToFile() frame dropped by "
<< "encoder, bitrate likely too low.";
}
return 0;
}
// Store audio frame in the _audioFramesToWrite buffer. The writing to file
// happens in AviRecorder::Process().
int32_t AviRecorder::WriteEncodedAudioData(
const int8_t* audioBuffer,
size_t bufferLength,
uint16_t millisecondsOfData,
const TickTime* playoutTS)
{
CriticalSectionScoped lock(_critSec);
if (!IsRecording())
{
return -1;
}
if (bufferLength > MAX_AUDIO_BUFFER_IN_BYTES)
{
return -1;
}
if (_videoOnly)
{
return -1;
}
if (_audioFramesToWrite.size() > kMaxAudioBufferQueueLength)
{
StopRecording();
return -1;
}
_firstAudioFrameReceived = true;
if(playoutTS)
{
_audioFramesToWrite.push_back(new AudioFrameFileInfo(audioBuffer,
bufferLength,
millisecondsOfData,
*playoutTS));
} else {
_audioFramesToWrite.push_back(new AudioFrameFileInfo(audioBuffer,
bufferLength,
millisecondsOfData,
TickTime::Now()));
}
_timeEvent.Set();
return 0;
}
#endif // WEBRTC_MODULE_UTILITY_VIDEO
} // namespace webrtc

View File

@ -30,12 +30,6 @@
#include "webrtc/system_wrappers/interface/tick_util.h"
#include "webrtc/typedefs.h"
#ifdef WEBRTC_MODULE_UTILITY_VIDEO
#include "webrtc/modules/utility/source/frame_scaler.h"
#include "webrtc/modules/utility/source/video_coder.h"
#include "webrtc/modules/utility/source/video_frames_queue.h"
#endif
namespace webrtc {
// The largest decoded frame size in samples (60ms with 32kHz sample rate).
enum { MAX_AUDIO_BUFFER_IN_SAMPLES = 60*32};
@ -104,90 +98,5 @@ private:
AudioCoder _audioEncoder;
Resampler _audioResampler;
};
#ifdef WEBRTC_MODULE_UTILITY_VIDEO
class AudioFrameFileInfo
{
public:
AudioFrameFileInfo(const int8_t* audioData,
const size_t audioSize,
const uint16_t audioMS,
const TickTime& playoutTS)
: _audioData(), _audioSize(audioSize), _audioMS(audioMS),
_playoutTS(playoutTS)
{
if(audioSize > MAX_AUDIO_BUFFER_IN_BYTES)
{
assert(false);
_audioSize = 0;
return;
}
memcpy(_audioData, audioData, audioSize);
};
// TODO (hellner): either turn into a struct or provide get/set functions.
int8_t _audioData[MAX_AUDIO_BUFFER_IN_BYTES];
size_t _audioSize;
uint16_t _audioMS;
TickTime _playoutTS;
};
class AviRecorder : public FileRecorderImpl
{
public:
AviRecorder(uint32_t instanceID, FileFormats fileFormat);
virtual ~AviRecorder();
// FileRecorder functions.
virtual int32_t StartRecordingVideoFile(
const char* fileName,
const CodecInst& audioCodecInst,
const VideoCodec& videoCodecInst,
ACMAMRPackingFormat amrFormat = AMRFileStorage,
bool videoOnly = false);
virtual int32_t StopRecording();
virtual int32_t RecordVideoToFile(const I420VideoFrame& videoFrame);
protected:
virtual int32_t WriteEncodedAudioData(
const int8_t* audioBuffer,
size_t bufferLength,
uint16_t millisecondsOfData,
const TickTime* playoutTS);
private:
typedef std::list<AudioFrameFileInfo*> AudioInfoList;
static bool Run(ThreadObj threadObj);
bool Process();
bool StartThread();
bool StopThread();
int32_t EncodeAndWriteVideoToFile(I420VideoFrame& videoFrame);
int32_t ProcessAudio();
size_t CalcI420FrameSize() const;
int32_t SetUpVideoEncoder();
VideoCodec _videoCodecInst;
bool _videoOnly;
AudioInfoList _audioFramesToWrite;
bool _firstAudioFrameReceived;
VideoFramesQueue* _videoFramesQueue;
FrameScaler* _frameScaler;
VideoCoder* _videoEncoder;
size_t _videoMaxPayloadSize;
EncodedVideoData _videoEncodedData;
ThreadWrapper* _thread;
EventWrapper& _timeEvent;
CriticalSectionWrapper* _critSec;
int64_t _writtenVideoFramesCounter;
int64_t _writtenAudioMS;
int64_t _writtenVideoMS;
};
#endif // WEBRTC_MODULE_UTILITY_VIDEO
} // namespace webrtc
#endif // WEBRTC_MODULES_UTILITY_SOURCE_FILE_RECORDER_IMPL_H_

View File

@ -1,51 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/utility/source/frame_scaler.h"
#ifdef WEBRTC_MODULE_UTILITY_VIDEO
#include "webrtc/common_video/libyuv/include/scaler.h"
namespace webrtc {
FrameScaler::FrameScaler()
: scaler_(new Scaler()),
scaled_frame_() {}
FrameScaler::~FrameScaler() {}
int FrameScaler::ResizeFrameIfNeeded(I420VideoFrame* video_frame,
int out_width,
int out_height) {
if (video_frame->IsZeroSize()) {
return -1;
}
if ((video_frame->width() != out_width) ||
(video_frame->height() != out_height)) {
// Set correct scale settings and scale |video_frame| into |scaled_frame_|.
scaler_->Set(video_frame->width(), video_frame->height(), out_width,
out_height, kI420, kI420, kScaleBox);
int ret = scaler_->Scale(*video_frame, &scaled_frame_);
if (ret < 0) {
return ret;
}
scaled_frame_.set_render_time_ms(video_frame->render_time_ms());
scaled_frame_.set_timestamp(video_frame->timestamp());
video_frame->SwapFrame(&scaled_frame_);
}
return 0;
}
} // namespace webrtc
#endif // WEBRTC_MODULE_UTILITY_VIDEO

View File

@ -1,48 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// This file implements a class that can be used for scaling frames.
#ifndef WEBRTC_MODULES_UTILITY_SOURCE_FRAME_SCALER_H_
#define WEBRTC_MODULES_UTILITY_SOURCE_FRAME_SCALER_H_
#ifdef WEBRTC_MODULE_UTILITY_VIDEO
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_video/interface/i420_video_frame.h"
#include "webrtc/engine_configurations.h"
#include "webrtc/modules/interface/module_common_types.h"
namespace webrtc {
class Scaler;
class VideoFrame;
class FrameScaler {
public:
FrameScaler();
~FrameScaler();
// Re-sizes |video_frame| so that it has the width |out_width| and height
// |out_height|.
int ResizeFrameIfNeeded(I420VideoFrame* video_frame,
int out_width,
int out_height);
private:
rtc::scoped_ptr<Scaler> scaler_;
I420VideoFrame scaled_frame_;
};
} // namespace webrtc
#endif // WEBRTC_MODULE_UTILITY_VIDEO
#endif // WEBRTC_MODULES_UTILITY_SOURCE_FRAME_SCALER_H_

View File

@ -1,132 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifdef WEBRTC_MODULE_UTILITY_VIDEO
#include "webrtc/modules/utility/source/video_coder.h"
#include "webrtc/modules/video_coding/main/source/encoded_frame.h"
namespace webrtc {
VideoCoder::VideoCoder()
: _vcm(VideoCodingModule::Create(nullptr)), _decodedVideo(0) {
_vcm->InitializeSender();
_vcm->InitializeReceiver();
_vcm->RegisterTransportCallback(this);
_vcm->RegisterReceiveCallback(this);
}
VideoCoder::~VideoCoder()
{
VideoCodingModule::Destroy(_vcm);
}
int32_t VideoCoder::SetEncodeCodec(VideoCodec& videoCodecInst,
uint32_t numberOfCores,
uint32_t maxPayloadSize)
{
if(_vcm->RegisterSendCodec(&videoCodecInst, numberOfCores,
maxPayloadSize) != VCM_OK)
{
return -1;
}
return 0;
}
int32_t VideoCoder::SetDecodeCodec(VideoCodec& videoCodecInst,
int32_t numberOfCores)
{
if (videoCodecInst.plType == 0)
{
int8_t plType = DefaultPayloadType(videoCodecInst.plName);
if (plType == -1)
{
return -1;
}
videoCodecInst.plType = plType;
}
if(_vcm->RegisterReceiveCodec(&videoCodecInst, numberOfCores) != VCM_OK)
{
return -1;
}
return 0;
}
int32_t VideoCoder::Decode(I420VideoFrame& decodedVideo,
const EncodedVideoData& encodedData)
{
decodedVideo.ResetSize();
if(encodedData.payloadSize <= 0)
{
return -1;
}
_decodedVideo = &decodedVideo;
return 0;
}
int32_t VideoCoder::Encode(const I420VideoFrame& videoFrame,
EncodedVideoData& videoEncodedData)
{
// The AddVideoFrame(..) call will (indirectly) call SendData(). Store a
// pointer to videoFrame so that it can be updated.
_videoEncodedData = &videoEncodedData;
videoEncodedData.payloadSize = 0;
if(_vcm->AddVideoFrame(videoFrame) != VCM_OK)
{
return -1;
}
return 0;
}
int8_t VideoCoder::DefaultPayloadType(const char* plName)
{
VideoCodec tmpCodec;
int32_t numberOfCodecs = _vcm->NumberOfCodecs();
for (uint8_t i = 0; i < numberOfCodecs; i++)
{
_vcm->Codec(i, &tmpCodec);
if(strncmp(tmpCodec.plName, plName, kPayloadNameSize) == 0)
{
return tmpCodec.plType;
}
}
return -1;
}
int32_t VideoCoder::FrameToRender(I420VideoFrame& videoFrame)
{
return _decodedVideo->CopyFrame(videoFrame);
}
int32_t VideoCoder::SendData(
const uint8_t payloadType,
const EncodedImage& encoded_image,
const RTPFragmentationHeader& fragmentationHeader,
const RTPVideoHeader* /*rtpVideoHdr*/)
{
// Store the data in _videoEncodedData which is a pointer to videoFrame in
// Encode(..)
_videoEncodedData->VerifyAndAllocate(encoded_image._length);
_videoEncodedData->frameType =
VCMEncodedFrame::ConvertFrameType(encoded_image._frameType);
_videoEncodedData->payloadType = payloadType;
_videoEncodedData->timeStamp = encoded_image._timeStamp;
_videoEncodedData->fragmentationHeader.CopyFrom(fragmentationHeader);
memcpy(_videoEncodedData->payloadData, encoded_image._buffer,
sizeof(uint8_t) * encoded_image._length);
_videoEncodedData->payloadSize = encoded_image._length;
return 0;
}
} // namespace webrtc
#endif // WEBRTC_MODULE_UTILITY_VIDEO

View File

@ -1,62 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_UTILITY_SOURCE_VIDEO_CODER_H_
#define WEBRTC_MODULES_UTILITY_SOURCE_VIDEO_CODER_H_
#ifdef WEBRTC_MODULE_UTILITY_VIDEO
#include "webrtc/engine_configurations.h"
#include "webrtc/modules/video_coding/main/interface/video_coding.h"
namespace webrtc {
class VideoCoder : public VCMPacketizationCallback, public VCMReceiveCallback
{
public:
VideoCoder();
~VideoCoder();
int32_t SetEncodeCodec(VideoCodec& videoCodecInst,
uint32_t numberOfCores,
uint32_t maxPayloadSize);
// Select the codec that should be used for decoding. videoCodecInst.plType
// will be set to the codec's default payload type.
int32_t SetDecodeCodec(VideoCodec& videoCodecInst, int32_t numberOfCores);
int32_t Decode(I420VideoFrame& decodedVideo,
const EncodedVideoData& encodedData);
int32_t Encode(const I420VideoFrame& videoFrame,
EncodedVideoData& videoEncodedData);
int8_t DefaultPayloadType(const char* plName);
private:
// VCMReceiveCallback function.
// Note: called by VideoCodingModule when decoding finished.
virtual int32_t FrameToRender(I420VideoFrame& videoFrame) OVERRIDE;
// VCMPacketizationCallback function.
// Note: called by VideoCodingModule when encoding finished.
virtual int32_t SendData(
uint8_t /*payloadType*/,
const EncodedImage& encoded_image,
const RTPFragmentationHeader& /* fragmentationHeader*/,
const RTPVideoHeader* rtpTypeHdr) OVERRIDE;
VideoCodingModule* _vcm;
I420VideoFrame* _decodedVideo;
EncodedVideoData* _videoEncodedData;
};
} // namespace webrtc
#endif // WEBRTC_MODULE_UTILITY_VIDEO
#endif // WEBRTC_MODULES_UTILITY_SOURCE_VIDEO_CODER_H_

View File

@ -1,112 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/utility/source/video_frames_queue.h"
#ifdef WEBRTC_MODULE_UTILITY_VIDEO
#include <assert.h>
#include "webrtc/common_video/interface/texture_video_frame.h"
#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/system_wrappers/interface/logging.h"
#include "webrtc/system_wrappers/interface/tick_util.h"
namespace webrtc {
VideoFramesQueue::VideoFramesQueue()
: _renderDelayMs(10)
{
}
VideoFramesQueue::~VideoFramesQueue() {
for (FrameList::iterator iter = _incomingFrames.begin();
iter != _incomingFrames.end(); ++iter) {
delete *iter;
}
for (FrameList::iterator iter = _emptyFrames.begin();
iter != _emptyFrames.end(); ++iter) {
delete *iter;
}
}
int32_t VideoFramesQueue::AddFrame(const I420VideoFrame& newFrame) {
if (newFrame.native_handle() != NULL) {
_incomingFrames.push_back(newFrame.CloneFrame());
return 0;
}
I420VideoFrame* ptrFrameToAdd = NULL;
// Try to re-use a VideoFrame. Only allocate new memory if it is necessary.
if (!_emptyFrames.empty()) {
ptrFrameToAdd = _emptyFrames.front();
_emptyFrames.pop_front();
}
if (!ptrFrameToAdd) {
if (_emptyFrames.size() + _incomingFrames.size() >
KMaxNumberOfFrames) {
LOG(LS_WARNING) << "Too many frames, limit: " << KMaxNumberOfFrames;
return -1;
}
ptrFrameToAdd = new I420VideoFrame();
}
ptrFrameToAdd->CopyFrame(newFrame);
_incomingFrames.push_back(ptrFrameToAdd);
return 0;
}
// Find the most recent frame that has a VideoFrame::RenderTimeMs() that is
// lower than current time in ms (TickTime::MillisecondTimestamp()).
// Note _incomingFrames is sorted so that the oldest frame is first.
// Recycle all frames that are older than the most recent frame.
I420VideoFrame* VideoFramesQueue::FrameToRecord() {
I420VideoFrame* ptrRenderFrame = NULL;
for (FrameList::iterator iter = _incomingFrames.begin();
iter != _incomingFrames.end(); ++iter) {
I420VideoFrame* ptrOldestFrameInList = *iter;
if (ptrOldestFrameInList->render_time_ms() <=
TickTime::MillisecondTimestamp() + _renderDelayMs) {
// List is traversed beginning to end. If ptrRenderFrame is not
// NULL it must be the first, and thus oldest, VideoFrame in the
// queue. It can be recycled.
if (ptrRenderFrame) {
ReturnFrame(ptrRenderFrame);
_incomingFrames.pop_front();
}
ptrRenderFrame = ptrOldestFrameInList;
} else {
// All VideoFrames following this one will be even newer. No match
// will be found.
break;
}
}
return ptrRenderFrame;
}
int32_t VideoFramesQueue::ReturnFrame(I420VideoFrame* ptrOldFrame) {
// No need to reuse texture frames because they do not allocate memory.
if (ptrOldFrame->native_handle() == NULL) {
ptrOldFrame->set_timestamp(0);
ptrOldFrame->set_width(0);
ptrOldFrame->set_height(0);
ptrOldFrame->set_render_time_ms(0);
ptrOldFrame->ResetSize();
_emptyFrames.push_back(ptrOldFrame);
} else {
delete ptrOldFrame;
}
return 0;
}
int32_t VideoFramesQueue::SetRenderDelay(uint32_t renderDelay) {
_renderDelayMs = renderDelay;
return 0;
}
} // namespace webrtc
#endif // WEBRTC_MODULE_UTILITY_VIDEO

View File

@ -1,63 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_UTILITY_SOURCE_VIDEO_FRAMES_QUEUE_H_
#define WEBRTC_MODULES_UTILITY_SOURCE_VIDEO_FRAMES_QUEUE_H_
#ifdef WEBRTC_MODULE_UTILITY_VIDEO
#include <list>
#include "webrtc/common_video/interface/i420_video_frame.h"
#include "webrtc/engine_configurations.h"
#include "webrtc/typedefs.h"
namespace webrtc {
class VideoFramesQueue {
public:
VideoFramesQueue();
~VideoFramesQueue();
// Put newFrame (last) in the queue.
int32_t AddFrame(const I420VideoFrame& newFrame);
// Return the most current frame. I.e. the frame with the highest
// VideoFrame::RenderTimeMs() that is lower than
// TickTime::MillisecondTimestamp().
I420VideoFrame* FrameToRecord();
// Set the render delay estimate to renderDelay ms.
int32_t SetRenderDelay(uint32_t renderDelay);
protected:
// Make ptrOldFrame available for re-use. I.e. put it in the empty frames
// queue.
int32_t ReturnFrame(I420VideoFrame* ptrOldFrame);
private:
typedef std::list<I420VideoFrame*> FrameList;
// Don't allow the buffer to expand beyond KMaxNumberOfFrames VideoFrames.
// 300 frames correspond to 10 seconds worth of frames at 30 fps.
enum {KMaxNumberOfFrames = 300};
// List of VideoFrame pointers. The list is sorted in the order of when the
// VideoFrame was inserted into the list. The first VideoFrame in the list
// was inserted first.
FrameList _incomingFrames;
// A list of frames that are free to be re-used.
FrameList _emptyFrames;
// Estimated render delay.
uint32_t _renderDelayMs;
};
} // namespace webrtc
#endif // WEBRTC_MODULE_UTILITY_VIDEO
#endif // WEBRTC_MODULES_UTILITY_SOURCE_VIDEO_FRAMES_QUEUE_H_

View File

@ -37,18 +37,6 @@
'source/rtp_dump_impl.cc',
'source/rtp_dump_impl.h',
],
'conditions': [
['enable_video==1', {
'dependencies': [
'webrtc_video_coding',
],
'sources': [
'source/frame_scaler.cc',
'source/video_coder.cc',
'source/video_frames_queue.cc',
],
}],
],
},
], # targets
}