Reland: Making WebRTC able to play and record audio to files for tests.

By specifying the define WEBRTC_DUMMY_FILE_DEVICES (which is similar to
WEBRTC_DUMMY_AUDIO_BUILD) an application will be able to tell WebRTC to
play out audio to a file and feed audio in from a file. We want to do
so we can better test WebRTC-using applications by recording what the
audio stack outputs and feeding known audio in for quality tests.

R=henrika@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/19729004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@6403 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
phoglund@webrtc.org 2014-06-11 14:12:04 +00:00
parent ab85187e63
commit 8454ad1b3e
7 changed files with 898 additions and 2 deletions

View File

@ -25,7 +25,8 @@ LOCAL_SRC_FILES := \
android/audio_device_android_opensles.cc \
android/audio_device_utility_android.cc \
dummy/audio_device_utility_dummy.cc \
dummy/audio_device_dummy.cc
dummy/audio_device_dummy.cc \
dummy/file_audio_device.cc
# Flags passed to both C and C++ files.
LOCAL_CFLAGS := \

View File

@ -20,7 +20,7 @@
'.',
'../interface',
'include',
'dummy', # dummy audio device
'dummy', # Contains dummy audio device implementations.
],
'direct_dependent_settings': {
'include_dirs': [
@ -45,6 +45,8 @@
'dummy/audio_device_dummy.h',
'dummy/audio_device_utility_dummy.cc',
'dummy/audio_device_utility_dummy.h',
'dummy/file_audio_device.cc',
'dummy/file_audio_device.h',
],
'conditions': [
['OS=="linux"', {
@ -77,6 +79,13 @@
'WEBRTC_DUMMY_AUDIO_BUILD',
],
}],
['build_with_chromium==0', {
'sources': [
# Don't link these into Chrome since they contain static data.
'dummy/file_audio_device_factory.cc',
'dummy/file_audio_device_factory.h',
],
}],
['include_internal_audio_device==1', {
'sources': [
'linux/alsasymboltable_linux.cc',

View File

@ -45,8 +45,14 @@
#include "audio_device_utility_mac.h"
#include "audio_device_mac.h"
#endif
#if defined(WEBRTC_DUMMY_FILE_DEVICES)
#include "webrtc/modules/audio_device/dummy/file_audio_device_factory.h"
#endif
#include "webrtc/modules/audio_device/dummy/audio_device_dummy.h"
#include "webrtc/modules/audio_device/dummy/audio_device_utility_dummy.h"
#include "webrtc/modules/audio_device/dummy/file_audio_device.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#include "webrtc/system_wrappers/interface/trace.h"
@ -203,6 +209,14 @@ int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects()
{
ptrAudioDeviceUtility = new AudioDeviceUtilityDummy(Id());
}
#elif defined(WEBRTC_DUMMY_FILE_DEVICES)
ptrAudioDevice = FileAudioDeviceFactory::CreateFileAudioDevice(Id());
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
"Will use file-playing dummy device.");
if (ptrAudioDevice != NULL)
{
ptrAudioDeviceUtility = new AudioDeviceUtilityDummy(Id());
}
#else
const AudioLayer audioLayer(PlatformAudioLayer());

View File

@ -0,0 +1,586 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <iostream>
#include "webrtc/modules/audio_device/dummy/file_audio_device.h"
#include "webrtc/system_wrappers/interface/sleep.h"
#include "webrtc/system_wrappers/interface/thread_wrapper.h"
namespace webrtc {
int kRecordingFixedSampleRate = 48000;
int kRecordingNumChannels = 2;
int kPlayoutFixedSampleRate = 48000;
int kPlayoutNumChannels = 2;
int kPlayoutBufferSize = kPlayoutFixedSampleRate / 100
* kPlayoutNumChannels * 2;
int kRecordingBufferSize = kRecordingFixedSampleRate / 100
* kRecordingNumChannels * 2;
FileAudioDevice::FileAudioDevice(const int32_t id,
const char* inputFilename,
const char* outputFile):
_ptrAudioBuffer(NULL),
_recordingBuffer(NULL),
_playoutBuffer(NULL),
_recordingFramesLeft(0),
_playoutFramesLeft(0),
_critSect(*CriticalSectionWrapper::CreateCriticalSection()),
_recordingBufferSizeIn10MS(0),
_recordingFramesIn10MS(0),
_playoutFramesIn10MS(0),
_ptrThreadRec(NULL),
_ptrThreadPlay(NULL),
_recThreadID(0),
_playThreadID(0),
_playing(false),
_recording(false),
_lastCallPlayoutMillis(0),
_lastCallRecordMillis(0),
_outputFile(*FileWrapper::Create()),
_inputFile(*FileWrapper::Create()),
_outputFilename(outputFile),
_inputFilename(inputFilename),
_clock(Clock::GetRealTimeClock()) {
}
FileAudioDevice::~FileAudioDevice() {
_outputFile.Flush();
_outputFile.CloseFile();
delete &_outputFile;
_inputFile.Flush();
_inputFile.CloseFile();
delete &_inputFile;
}
int32_t FileAudioDevice::ActiveAudioLayer(
AudioDeviceModule::AudioLayer& audioLayer) const {
return -1;
}
int32_t FileAudioDevice::Init() { return 0; }
int32_t FileAudioDevice::Terminate() { return 0; }
bool FileAudioDevice::Initialized() const { return true; }
int16_t FileAudioDevice::PlayoutDevices() {
return 1;
}
int16_t FileAudioDevice::RecordingDevices() {
return 1;
}
int32_t FileAudioDevice::PlayoutDeviceName(uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) {
const char* kName = "dummy_device";
const char* kGuid = "dummy_device_unique_id";
if (index < 1) {
memset(name, 0, kAdmMaxDeviceNameSize);
memset(guid, 0, kAdmMaxGuidSize);
memcpy(name, kName, strlen(kName));
memcpy(guid, kGuid, strlen(guid));
return 0;
}
return -1;
}
int32_t FileAudioDevice::RecordingDeviceName(uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) {
const char* kName = "dummy_device";
const char* kGuid = "dummy_device_unique_id";
if (index < 1) {
memset(name, 0, kAdmMaxDeviceNameSize);
memset(guid, 0, kAdmMaxGuidSize);
memcpy(name, kName, strlen(kName));
memcpy(guid, kGuid, strlen(guid));
return 0;
}
return -1;
}
int32_t FileAudioDevice::SetPlayoutDevice(uint16_t index) {
if (index == 0) {
_playout_index = index;
return 0;
}
return -1;
}
int32_t FileAudioDevice::SetPlayoutDevice(
AudioDeviceModule::WindowsDeviceType device) {
return -1;
}
int32_t FileAudioDevice::SetRecordingDevice(uint16_t index) {
if (index == 0) {
_record_index = index;
return _record_index;
}
return -1;
}
int32_t FileAudioDevice::SetRecordingDevice(
AudioDeviceModule::WindowsDeviceType device) {
return -1;
}
int32_t FileAudioDevice::PlayoutIsAvailable(bool& available) {
if (_playout_index == 0) {
available = true;
return _playout_index;
}
available = false;
return -1;
}
int32_t FileAudioDevice::InitPlayout() {
if (_ptrAudioBuffer)
{
// Update webrtc audio buffer with the selected parameters
_ptrAudioBuffer->SetPlayoutSampleRate(kPlayoutFixedSampleRate);
_ptrAudioBuffer->SetPlayoutChannels(kPlayoutNumChannels);
}
return 0;
}
bool FileAudioDevice::PlayoutIsInitialized() const {
return true;
}
int32_t FileAudioDevice::RecordingIsAvailable(bool& available) {
if (_record_index == 0) {
available = true;
return _record_index;
}
available = false;
return -1;
}
int32_t FileAudioDevice::InitRecording() {
CriticalSectionScoped lock(&_critSect);
if (_recording) {
return -1;
}
_recordingFramesIn10MS = kRecordingFixedSampleRate/100;
if (_ptrAudioBuffer) {
_ptrAudioBuffer->SetRecordingSampleRate(kRecordingFixedSampleRate);
_ptrAudioBuffer->SetRecordingChannels(kRecordingNumChannels);
}
return 0;
}
bool FileAudioDevice::RecordingIsInitialized() const {
return true;
}
int32_t FileAudioDevice::StartPlayout() {
if (_playing)
{
return 0;
}
_playing = true;
_playoutFramesLeft = 0;
if (!_playoutBuffer)
_playoutBuffer = new int8_t[2 *
kPlayoutNumChannels *
kPlayoutFixedSampleRate/100];
if (!_playoutBuffer)
{
_playing = false;
return -1;
}
// PLAYOUT
const char* threadName = "webrtc_audio_module_play_thread";
_ptrThreadPlay = ThreadWrapper::CreateThread(PlayThreadFunc,
this,
kRealtimePriority,
threadName);
if (_ptrThreadPlay == NULL)
{
_playing = false;
delete [] _playoutBuffer;
_playoutBuffer = NULL;
return -1;
}
if (_outputFile.OpenFile(_outputFilename.c_str(),
false, false, false) == -1) {
printf("Failed to open playout file %s!", _outputFilename.c_str());
_playing = false;
delete [] _playoutBuffer;
_playoutBuffer = NULL;
return -1;
}
unsigned int threadID(0);
if (!_ptrThreadPlay->Start(threadID))
{
_playing = false;
delete _ptrThreadPlay;
_ptrThreadPlay = NULL;
delete [] _playoutBuffer;
_playoutBuffer = NULL;
return -1;
}
_playThreadID = threadID;
return 0;
}
int32_t FileAudioDevice::StopPlayout() {
{
CriticalSectionScoped lock(&_critSect);
_playing = false;
}
// stop playout thread first
if (_ptrThreadPlay && !_ptrThreadPlay->Stop())
{
return -1;
}
else {
delete _ptrThreadPlay;
_ptrThreadPlay = NULL;
}
CriticalSectionScoped lock(&_critSect);
_playoutFramesLeft = 0;
delete [] _playoutBuffer;
_playoutBuffer = NULL;
_outputFile.Flush();
_outputFile.CloseFile();
return 0;
}
bool FileAudioDevice::Playing() const {
return true;
}
int32_t FileAudioDevice::StartRecording() {
_recording = true;
// Make sure we only create the buffer once.
_recordingBufferSizeIn10MS = _recordingFramesIn10MS *
kRecordingNumChannels *
2;
if (!_recordingBuffer) {
_recordingBuffer = new int8_t[_recordingBufferSizeIn10MS];
}
if (_inputFile.OpenFile(_inputFilename.c_str(), true,
true, false) == -1) {
printf("Failed to open audio input file %s!\n",
_inputFilename.c_str());
_recording = false;
delete[] _recordingBuffer;
_recordingBuffer = NULL;
return -1;
}
const char* threadName = "webrtc_audio_module_capture_thread";
_ptrThreadRec = ThreadWrapper::CreateThread(RecThreadFunc,
this,
kRealtimePriority,
threadName);
if (_ptrThreadRec == NULL)
{
_recording = false;
delete [] _recordingBuffer;
_recordingBuffer = NULL;
return -1;
}
unsigned int threadID(0);
if (!_ptrThreadRec->Start(threadID))
{
_recording = false;
delete _ptrThreadRec;
_ptrThreadRec = NULL;
delete [] _recordingBuffer;
_recordingBuffer = NULL;
return -1;
}
_recThreadID = threadID;
return 0;
}
int32_t FileAudioDevice::StopRecording() {
{
CriticalSectionScoped lock(&_critSect);
_recording = false;
}
if (_ptrThreadRec && !_ptrThreadRec->Stop())
{
return -1;
}
else {
delete _ptrThreadRec;
_ptrThreadRec = NULL;
}
CriticalSectionScoped lock(&_critSect);
_recordingFramesLeft = 0;
if (_recordingBuffer)
{
delete [] _recordingBuffer;
_recordingBuffer = NULL;
}
return 0;
}
bool FileAudioDevice::Recording() const {
return _recording;
}
int32_t FileAudioDevice::SetAGC(bool enable) { return -1; }
bool FileAudioDevice::AGC() const { return false; }
int32_t FileAudioDevice::SetWaveOutVolume(uint16_t volumeLeft,
uint16_t volumeRight) {
return -1;
}
int32_t FileAudioDevice::WaveOutVolume(uint16_t& volumeLeft,
uint16_t& volumeRight) const {
return -1;
}
int32_t FileAudioDevice::InitSpeaker() { return -1; }
bool FileAudioDevice::SpeakerIsInitialized() const { return false; }
int32_t FileAudioDevice::InitMicrophone() { return 0; }
bool FileAudioDevice::MicrophoneIsInitialized() const { return true; }
int32_t FileAudioDevice::SpeakerVolumeIsAvailable(bool& available) {
return -1;
}
int32_t FileAudioDevice::SetSpeakerVolume(uint32_t volume) { return -1; }
int32_t FileAudioDevice::SpeakerVolume(uint32_t& volume) const { return -1; }
int32_t FileAudioDevice::MaxSpeakerVolume(uint32_t& maxVolume) const {
return -1;
}
int32_t FileAudioDevice::MinSpeakerVolume(uint32_t& minVolume) const {
return -1;
}
int32_t FileAudioDevice::SpeakerVolumeStepSize(uint16_t& stepSize) const {
return -1;
}
int32_t FileAudioDevice::MicrophoneVolumeIsAvailable(bool& available) {
return -1;
}
int32_t FileAudioDevice::SetMicrophoneVolume(uint32_t volume) { return -1; }
int32_t FileAudioDevice::MicrophoneVolume(uint32_t& volume) const {
return -1;
}
int32_t FileAudioDevice::MaxMicrophoneVolume(uint32_t& maxVolume) const {
return -1;
}
int32_t FileAudioDevice::MinMicrophoneVolume(uint32_t& minVolume) const {
return -1;
}
int32_t FileAudioDevice::MicrophoneVolumeStepSize(uint16_t& stepSize) const {
return -1;
}
int32_t FileAudioDevice::SpeakerMuteIsAvailable(bool& available) { return -1; }
int32_t FileAudioDevice::SetSpeakerMute(bool enable) { return -1; }
int32_t FileAudioDevice::SpeakerMute(bool& enabled) const { return -1; }
int32_t FileAudioDevice::MicrophoneMuteIsAvailable(bool& available) {
return -1;
}
int32_t FileAudioDevice::SetMicrophoneMute(bool enable) { return -1; }
int32_t FileAudioDevice::MicrophoneMute(bool& enabled) const { return -1; }
int32_t FileAudioDevice::MicrophoneBoostIsAvailable(bool& available) {
return -1;
}
int32_t FileAudioDevice::SetMicrophoneBoost(bool enable) { return -1; }
int32_t FileAudioDevice::MicrophoneBoost(bool& enabled) const { return -1; }
int32_t FileAudioDevice::StereoPlayoutIsAvailable(bool& available) {
available = true;
return 0;
}
int32_t FileAudioDevice::SetStereoPlayout(bool enable) {
return 0;
}
int32_t FileAudioDevice::StereoPlayout(bool& enabled) const {
enabled = true;
return 0;
}
int32_t FileAudioDevice::StereoRecordingIsAvailable(bool& available) {
available = true;
return 0;
}
int32_t FileAudioDevice::SetStereoRecording(bool enable) {
return 0;
}
int32_t FileAudioDevice::StereoRecording(bool& enabled) const {
enabled = true;
return 0;
}
int32_t FileAudioDevice::SetPlayoutBuffer(
const AudioDeviceModule::BufferType type,
uint16_t sizeMS) {
return 0;
}
int32_t FileAudioDevice::PlayoutBuffer(AudioDeviceModule::BufferType& type,
uint16_t& sizeMS) const {
type = _playBufType;
return 0;
}
int32_t FileAudioDevice::PlayoutDelay(uint16_t& delayMS) const {
return 0;
}
int32_t FileAudioDevice::RecordingDelay(uint16_t& delayMS) const { return -1; }
int32_t FileAudioDevice::CPULoad(uint16_t& load) const { return -1; }
bool FileAudioDevice::PlayoutWarning() const { return false; }
bool FileAudioDevice::PlayoutError() const { return false; }
bool FileAudioDevice::RecordingWarning() const { return false; }
bool FileAudioDevice::RecordingError() const { return false; }
void FileAudioDevice::ClearPlayoutWarning() {}
void FileAudioDevice::ClearPlayoutError() {}
void FileAudioDevice::ClearRecordingWarning() {}
void FileAudioDevice::ClearRecordingError() {}
void FileAudioDevice::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
CriticalSectionScoped lock(&_critSect);
_ptrAudioBuffer = audioBuffer;
// Inform the AudioBuffer about default settings for this implementation.
// Set all values to zero here since the actual settings will be done by
// InitPlayout and InitRecording later.
_ptrAudioBuffer->SetRecordingSampleRate(0);
_ptrAudioBuffer->SetPlayoutSampleRate(0);
_ptrAudioBuffer->SetRecordingChannels(0);
_ptrAudioBuffer->SetPlayoutChannels(0);
}
bool FileAudioDevice::PlayThreadFunc(void* pThis)
{
return (static_cast<FileAudioDevice*>(pThis)->PlayThreadProcess());
}
bool FileAudioDevice::RecThreadFunc(void* pThis)
{
return (static_cast<FileAudioDevice*>(pThis)->RecThreadProcess());
}
bool FileAudioDevice::PlayThreadProcess()
{
if(!_playing)
return false;
uint64_t currentTime = _clock->CurrentNtpInMilliseconds();
_critSect.Enter();
if (_lastCallPlayoutMillis == 0 ||
currentTime - _lastCallPlayoutMillis >= 10)
{
_critSect.Leave();
_ptrAudioBuffer->RequestPlayoutData(_playoutFramesIn10MS);
_critSect.Enter();
_playoutFramesLeft = _ptrAudioBuffer->GetPlayoutData(_playoutBuffer);
assert(_playoutFramesLeft == _playoutFramesIn10MS);
if (_outputFile.Open()) {
_outputFile.Write(_playoutBuffer, kPlayoutBufferSize);
_outputFile.Flush();
}
_lastCallPlayoutMillis = currentTime;
}
_playoutFramesLeft = 0;
_critSect.Leave();
SleepMs(10 - (_clock->CurrentNtpInMilliseconds() - currentTime));
return true;
}
bool FileAudioDevice::RecThreadProcess()
{
if (!_recording)
return false;
uint64_t currentTime = _clock->CurrentNtpInMilliseconds();
_critSect.Enter();
if (_lastCallRecordMillis == 0 ||
currentTime - _lastCallRecordMillis >= 10) {
if (_inputFile.Open()) {
if (_inputFile.Read(_recordingBuffer, kRecordingBufferSize) > 0) {
_ptrAudioBuffer->SetRecordedBuffer(_recordingBuffer,
_recordingFramesIn10MS);
} else {
_inputFile.Rewind();
}
_lastCallRecordMillis = currentTime;
_critSect.Leave();
_ptrAudioBuffer->DeliverRecordedData();
_critSect.Enter();
}
}
_critSect.Leave();
SleepMs(10 - (_clock->CurrentNtpInMilliseconds() - currentTime));
return true;
}
} // namespace webrtc

View File

@ -0,0 +1,202 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_AUDIO_DEVICE_FILE_AUDIO_DEVICE_H
#define WEBRTC_AUDIO_DEVICE_FILE_AUDIO_DEVICE_H
#include <stdio.h>
#include <string>
#include "webrtc/modules/audio_device/audio_device_generic.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#include "webrtc/system_wrappers/interface/file_wrapper.h"
#include "webrtc/system_wrappers/interface/clock.h"
namespace webrtc {
class EventWrapper;
class ThreadWrapper;
// This is a fake audio device which plays audio from a file as its microphone
// and plays out into a file.
class FileAudioDevice : public AudioDeviceGeneric {
public:
// Constructs a file audio device with |id|. It will read audio from
// |inputFilename| and record output audio to |outputFilename|.
//
// The input file should be a readable 48k stereo raw file, and the output
// file should point to a writable location. The output format will also be
// 48k stereo raw audio.
FileAudioDevice(const int32_t id,
const char* inputFilename,
const char* outputFilename);
virtual ~FileAudioDevice();
// Retrieve the currently utilized audio layer
virtual int32_t ActiveAudioLayer(
AudioDeviceModule::AudioLayer& audioLayer) const OVERRIDE;
// Main initializaton and termination
virtual int32_t Init() OVERRIDE;
virtual int32_t Terminate() OVERRIDE;
virtual bool Initialized() const OVERRIDE;
// Device enumeration
virtual int16_t PlayoutDevices() OVERRIDE;
virtual int16_t RecordingDevices() OVERRIDE;
virtual int32_t PlayoutDeviceName(uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) OVERRIDE;
virtual int32_t RecordingDeviceName(uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) OVERRIDE;
// Device selection
virtual int32_t SetPlayoutDevice(uint16_t index) OVERRIDE;
virtual int32_t SetPlayoutDevice(
AudioDeviceModule::WindowsDeviceType device) OVERRIDE;
virtual int32_t SetRecordingDevice(uint16_t index) OVERRIDE;
virtual int32_t SetRecordingDevice(
AudioDeviceModule::WindowsDeviceType device) OVERRIDE;
// Audio transport initialization
virtual int32_t PlayoutIsAvailable(bool& available) OVERRIDE;
virtual int32_t InitPlayout() OVERRIDE;
virtual bool PlayoutIsInitialized() const OVERRIDE;
virtual int32_t RecordingIsAvailable(bool& available) OVERRIDE;
virtual int32_t InitRecording() OVERRIDE;
virtual bool RecordingIsInitialized() const OVERRIDE;
// Audio transport control
virtual int32_t StartPlayout() OVERRIDE;
virtual int32_t StopPlayout() OVERRIDE;
virtual bool Playing() const OVERRIDE;
virtual int32_t StartRecording() OVERRIDE;
virtual int32_t StopRecording() OVERRIDE;
virtual bool Recording() const OVERRIDE;
// Microphone Automatic Gain Control (AGC)
virtual int32_t SetAGC(bool enable) OVERRIDE;
virtual bool AGC() const OVERRIDE;
// Volume control based on the Windows Wave API (Windows only)
virtual int32_t SetWaveOutVolume(uint16_t volumeLeft,
uint16_t volumeRight) OVERRIDE;
virtual int32_t WaveOutVolume(uint16_t& volumeLeft,
uint16_t& volumeRight) const OVERRIDE;
// Audio mixer initialization
virtual int32_t InitSpeaker() OVERRIDE;
virtual bool SpeakerIsInitialized() const OVERRIDE;
virtual int32_t InitMicrophone() OVERRIDE;
virtual bool MicrophoneIsInitialized() const OVERRIDE;
// Speaker volume controls
virtual int32_t SpeakerVolumeIsAvailable(bool& available) OVERRIDE;
virtual int32_t SetSpeakerVolume(uint32_t volume) OVERRIDE;
virtual int32_t SpeakerVolume(uint32_t& volume) const OVERRIDE;
virtual int32_t MaxSpeakerVolume(uint32_t& maxVolume) const OVERRIDE;
virtual int32_t MinSpeakerVolume(uint32_t& minVolume) const OVERRIDE;
virtual int32_t SpeakerVolumeStepSize(uint16_t& stepSize) const OVERRIDE;
// Microphone volume controls
virtual int32_t MicrophoneVolumeIsAvailable(bool& available) OVERRIDE;
virtual int32_t SetMicrophoneVolume(uint32_t volume) OVERRIDE;
virtual int32_t MicrophoneVolume(uint32_t& volume) const OVERRIDE;
virtual int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const OVERRIDE;
virtual int32_t MinMicrophoneVolume(uint32_t& minVolume) const OVERRIDE;
virtual int32_t MicrophoneVolumeStepSize(uint16_t& stepSize) const OVERRIDE;
// Speaker mute control
virtual int32_t SpeakerMuteIsAvailable(bool& available) OVERRIDE;
virtual int32_t SetSpeakerMute(bool enable) OVERRIDE;
virtual int32_t SpeakerMute(bool& enabled) const OVERRIDE;
// Microphone mute control
virtual int32_t MicrophoneMuteIsAvailable(bool& available) OVERRIDE;
virtual int32_t SetMicrophoneMute(bool enable) OVERRIDE;
virtual int32_t MicrophoneMute(bool& enabled) const OVERRIDE;
// Microphone boost control
virtual int32_t MicrophoneBoostIsAvailable(bool& available) OVERRIDE;
virtual int32_t SetMicrophoneBoost(bool enable) OVERRIDE;
virtual int32_t MicrophoneBoost(bool& enabled) const OVERRIDE;
// Stereo support
virtual int32_t StereoPlayoutIsAvailable(bool& available) OVERRIDE;
virtual int32_t SetStereoPlayout(bool enable) OVERRIDE;
virtual int32_t StereoPlayout(bool& enabled) const OVERRIDE;
virtual int32_t StereoRecordingIsAvailable(bool& available) OVERRIDE;
virtual int32_t SetStereoRecording(bool enable) OVERRIDE;
virtual int32_t StereoRecording(bool& enabled) const OVERRIDE;
// Delay information and control
virtual int32_t SetPlayoutBuffer(const AudioDeviceModule::BufferType type,
uint16_t sizeMS) OVERRIDE;
virtual int32_t PlayoutBuffer(AudioDeviceModule::BufferType& type,
uint16_t& sizeMS) const OVERRIDE;
virtual int32_t PlayoutDelay(uint16_t& delayMS) const OVERRIDE;
virtual int32_t RecordingDelay(uint16_t& delayMS) const OVERRIDE;
// CPU load
virtual int32_t CPULoad(uint16_t& load) const OVERRIDE;
virtual bool PlayoutWarning() const OVERRIDE;
virtual bool PlayoutError() const OVERRIDE;
virtual bool RecordingWarning() const OVERRIDE;
virtual bool RecordingError() const OVERRIDE;
virtual void ClearPlayoutWarning() OVERRIDE;
virtual void ClearPlayoutError() OVERRIDE;
virtual void ClearRecordingWarning() OVERRIDE;
virtual void ClearRecordingError() OVERRIDE;
virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) OVERRIDE;
private:
static bool RecThreadFunc(void*);
static bool PlayThreadFunc(void*);
bool RecThreadProcess();
bool PlayThreadProcess();
int32_t _playout_index;
int32_t _record_index;
AudioDeviceModule::BufferType _playBufType;
AudioDeviceBuffer* _ptrAudioBuffer;
int8_t* _recordingBuffer; // In bytes.
int8_t* _playoutBuffer; // In bytes.
uint32_t _recordingFramesLeft;
uint32_t _playoutFramesLeft;
CriticalSectionWrapper& _critSect;
uint32_t _recordingBufferSizeIn10MS;
uint32_t _recordingFramesIn10MS;
uint32_t _playoutFramesIn10MS;
ThreadWrapper* _ptrThreadRec;
ThreadWrapper* _ptrThreadPlay;
uint32_t _recThreadID;
uint32_t _playThreadID;
bool _playing;
bool _recording;
uint64_t _lastCallPlayoutMillis;
uint64_t _lastCallRecordMillis;
FileWrapper& _outputFile;
FileWrapper& _inputFile;
std::string _outputFilename;
std::string _inputFilename;
Clock* _clock;
};
} // namespace webrtc
#endif // WEBRTC_AUDIO_DEVICE_FILE_AUDIO_DEVICE_H

View File

@ -0,0 +1,43 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_device/dummy/file_audio_device_factory.h"
#include <cstring>
#include "webrtc/modules/audio_device/dummy/file_audio_device.h"
namespace webrtc {
char FileAudioDeviceFactory::_inputAudioFilename[MAX_FILENAME_LEN] = "";
char FileAudioDeviceFactory::_outputAudioFilename[MAX_FILENAME_LEN] = "";
FileAudioDevice* FileAudioDeviceFactory::CreateFileAudioDevice(
const int32_t id) {
// Bail out here if the files aren't set.
if (strlen(_inputAudioFilename) == 0 || strlen(_outputAudioFilename) == 0) {
printf("Was compiled with WEBRTC_DUMMY_AUDIO_PLAY_STATIC_FILE "
"but did not set input/output files to use. Bailing out.\n");
exit(1);
}
return new FileAudioDevice(id, _inputAudioFilename, _outputAudioFilename);
}
void FileAudioDeviceFactory::SetFilenamesToUse(
const char* inputAudioFilename, const char* outputAudioFilename) {
assert(strlen(inputAudioFilename) < MAX_FILENAME_LEN &&
strlen(outputAudioFilename) < MAX_FILENAME_LEN);
// Copy the strings since we don't know the lifetime of the input pointers.
strncpy(_inputAudioFilename, inputAudioFilename, MAX_FILENAME_LEN);
strncpy(_outputAudioFilename, outputAudioFilename, MAX_FILENAME_LEN);
}
} // namespace webrtc

View File

@ -0,0 +1,41 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_AUDIO_DEVICE_FILE_AUDIO_DEVICE_FACTORY_H
#define WEBRTC_AUDIO_DEVICE_FILE_AUDIO_DEVICE_FACTORY_H
#include "webrtc/common_types.h"
namespace webrtc {
class FileAudioDevice;
// This class is used by audio_device_impl.cc when WebRTC is compiled with
// WEBRTC_DUMMY_FILE_DEVICES. The application must include this file and set the
// filenames to use before the audio device module is initialized. This is
// intended for test tools which use the audio device module.
class FileAudioDeviceFactory {
public:
static FileAudioDevice* CreateFileAudioDevice(const int32_t id);
// The input file must be a readable 48k stereo raw file. The output
// file must be writable. The strings will be copied.
static void SetFilenamesToUse(const char* inputAudioFilename,
const char* outputAudioFilename);
private:
static const uint32_t MAX_FILENAME_LEN = 256;
static char _inputAudioFilename[MAX_FILENAME_LEN];
static char _outputAudioFilename[MAX_FILENAME_LEN];
};
} // namespace webrtc
#endif // WEBRTC_AUDIO_DEVICE_FILE_AUDIO_DEVICE_FACTORY_H