git-svn-id: http://webrtc.googlecode.com/svn/trunk@162 4adac7df-926f-26a2-2b94-8c16560cd09d

This commit is contained in:
niklase@google.com 2011-07-07 08:27:17 +00:00
parent aa107a635f
commit 91081baf8a
116 changed files with 0 additions and 51165 deletions

View File

@ -1,4 +0,0 @@
grunell@google.com
henrika@google.com
niklase@google.com
xians@google.com

View File

@ -1,192 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// This sub-API supports the following functionalities:
//
// - Noise Suppression (NS).
// - Automatic Gain Control (AGC).
// - Echo Control (EC).
// - Receiving side VAD, NS and AGC.
// - Measurements of instantaneous speech, noise and echo levels.
// - Generation of AP debug recordings.
// - Detection of keyboard typing which can disrupt a voice conversation.
//
// Usage example, omitting error checking:
//
// using namespace webrtc;
// VoiceEngine* voe = VoiceEngine::Create();
// VoEBase* base = VoEBase::GetInterface();
// VoEAudioProcessing* ap = VoEAudioProcessing::GetInterface(voe);
// base->Init();
// ap->SetEcStatus(true, kAgcAdaptiveAnalog);
// ...
// base->Terminate();
// base->Release();
// ap->Release();
// VoiceEngine::Delete(voe);
//
#ifndef WEBRTC_VOICE_ENGINE_VOE_AUDIO_PROCESSING_H
#define WEBRTC_VOICE_ENGINE_VOE_AUDIO_PROCESSING_H
#include "common_types.h"
namespace webrtc {
class VoiceEngine;
// VoERxVadCallback
class WEBRTC_DLLEXPORT VoERxVadCallback
{
public:
virtual void OnRxVad(int channel, int vadDecision) = 0;
protected:
virtual ~VoERxVadCallback() {}
};
// VoEAudioProcessing
class WEBRTC_DLLEXPORT VoEAudioProcessing
{
public:
// Factory for the VoEAudioProcessing sub-API. Increases an internal
// reference counter if successful. Returns NULL if the API is not
// supported or if construction fails.
static VoEAudioProcessing* GetInterface(VoiceEngine* voiceEngine);
// Releases the VoEAudioProcessing sub-API and decreases an internal
// reference counter. Returns the new reference count. This value should
// be zero for all sub-API:s before the VoiceEngine object can be safely
// deleted.
virtual int Release() = 0;
// Sets Noise Suppression (NS) status and mode.
// The NS reduces noise in the microphone signal.
virtual int SetNsStatus(bool enable, NsModes mode = kNsUnchanged) = 0;
// Gets the NS status and mode.
virtual int GetNsStatus(bool& enabled, NsModes& mode) = 0;
// Sets the Automatic Gain Control (AGC) status and mode.
// The AGC adjusts the microphone signal to an appropriate level.
virtual int SetAgcStatus(bool enable, AgcModes mode = kAgcUnchanged) = 0;
// Gets the AGC status and mode.
virtual int GetAgcStatus(bool& enabled, AgcModes& mode) = 0;
// Sets the AGC configuration.
// Should only be used in situations where the working environment
// is well known.
virtual int SetAgcConfig(const AgcConfig config) = 0;
// Gets the AGC configuration.
virtual int GetAgcConfig(AgcConfig& config) = 0;
// Sets the Echo Control (EC) status and mode.
// The EC mitigates acoustic echo where a user can hear their own
// speech repeated back due to an acoustic coupling between the
// speaker and the microphone at the remote end.
virtual int SetEcStatus(bool enable, EcModes mode = kEcUnchanged) = 0;
// Gets the EC status and mode.
virtual int GetEcStatus(bool& enabled, EcModes& mode) = 0;
// Modifies settings for the AEC designed for mobile devices (AECM).
virtual int SetAecmMode(AecmModes mode = kAecmSpeakerphone,
bool enableCNG = true) = 0;
// Gets settings for the AECM.
virtual int GetAecmMode(AecmModes& mode, bool& enabledCNG) = 0;
// Sets status and mode of the receiving-side (Rx) NS.
// The Rx NS reduces noise in the received signal for the specified
// |channel|. Intended for advanced usage only.
virtual int SetRxNsStatus(int channel,
bool enable,
NsModes mode = kNsUnchanged) = 0;
// Gets status and mode of the receiving-side NS.
virtual int GetRxNsStatus(int channel,
bool& enabled,
NsModes& mode) = 0;
// Sets status and mode of the receiving-side (Rx) AGC.
// The Rx AGC adjusts the received signal to an appropriate level
// for the specified |channel|. Intended for advanced usage only.
virtual int SetRxAgcStatus(int channel,
bool enable,
AgcModes mode = kAgcUnchanged) = 0;
// Gets status and mode of the receiving-side AGC.
virtual int GetRxAgcStatus(int channel,
bool& enabled,
AgcModes& mode) = 0;
// Modifies the AGC configuration on the receiving side for the
// specified |channel|.
virtual int SetRxAgcConfig(int channel, const AgcConfig config) = 0;
// Gets the AGC configuration on the receiving side.
virtual int GetRxAgcConfig(int channel, AgcConfig& config) = 0;
// Registers a VoERxVadCallback |observer| instance and enables Rx VAD
// notifications for the specified |channel|.
virtual int RegisterRxVadObserver(int channel,
VoERxVadCallback &observer) = 0;
// Deregisters the VoERxVadCallback |observer| and disables Rx VAD
// notifications for the specified |channel|.
virtual int DeRegisterRxVadObserver(int channel) = 0;
// Gets the VAD/DTX activity for the specified |channel|.
// The returned value is 1 if frames of audio contains speech
// and 0 if silence. The output is always 1 if VAD is disabled.
virtual int VoiceActivityIndicator(int channel) = 0;
// Enables or disables the possibility to retrieve instantaneous
// speech, noise and echo metrics during an active call.
virtual int SetMetricsStatus(bool enable) = 0;
// Gets the current speech, noise and echo metric status.
virtual int GetMetricsStatus(bool& enabled) = 0;
// Gets the instantaneous speech level metrics for the transmitted
// and received signals.
virtual int GetSpeechMetrics(int& levelTx, int& levelRx) = 0;
// Gets the instantaneous noise level metrics for the transmitted
// and received signals.
virtual int GetNoiseMetrics(int& levelTx, int& levelRx) = 0;
// Gets the instantaneous echo level metrics for the near-end and
// far-end signals.
virtual int GetEchoMetrics(int& ERL, int& ERLE, int& RERL, int& A_NLP) = 0;
// Enables recording of Audio Processing (AP) debugging information.
// The file can later be used for off-line analysis of the AP performance.
virtual int StartDebugRecording(const char* fileNameUTF8) = 0;
// Disables recording of AP debugging information.
virtual int StopDebugRecording() = 0;
// Enables or disables detection of disturbing keyboard typing.
// An error notification will be given as a callback upon detection.
virtual int SetTypingDetectionStatus(bool enable) = 0;
// Gets the current typing detection status.
virtual int GetTypingDetectionStatus(bool& enabled) = 0;
protected:
VoEAudioProcessing() {}
virtual ~VoEAudioProcessing() {}
};
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_VOE_AUDIO_PROCESSING_H

View File

@ -1,217 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// This sub-API supports the following functionalities:
//
// - Enables full duplex VoIP sessions via RTP using G.711 (mu-Law or A-Law).
// - Initialization and termination.
// - Trace information on text files or via callbacks.
// - Multi-channel support (mixing, sending to multiple destinations etc.).
// - Call setup (port and address) for receiving and sending sides.
//
// To support other codecs than G.711, the VoECodec sub-API must be utilized.
//
// Usage example, omitting error checking:
//
// using namespace webrtc;
// VoiceEngine* voe = VoiceEngine::Create();
// VoEBase* base = VoEBase::GetInterface(voe);
// base->Init();
// int ch = base->CreateChannel();
// base->StartPlayout(ch);
// ...
// base->DeleteChannel(ch);
// base->Terminate();
// base->Release();
// VoiceEngine::Delete(voe);
//
#ifndef WEBRTC_VOICE_ENGINE_VOE_BASE_H
#define WEBRTC_VOICE_ENGINE_VOE_BASE_H
#include "common_types.h"
namespace webrtc {
class AudioDeviceModule;
const int kVoEDefault = -1;
// VoiceEngineObserver
class WEBRTC_DLLEXPORT VoiceEngineObserver
{
public:
// This method will be called after the occurrence of any runtime error
// code, or warning notification, when the observer interface has been
// installed using VoEBase::RegisterVoiceEngineObserver().
virtual void CallbackOnError(const int channel, const int errCode) = 0;
protected:
virtual ~VoiceEngineObserver() {}
};
// VoiceEngine
class WEBRTC_DLLEXPORT VoiceEngine
{
public:
// Creates a VoiceEngine object, which can then be used to acquire
// sub-APIs. Returns NULL on failure.
static VoiceEngine* Create();
// Deletes a created VoiceEngine object and releases the utilized resources.
// If |ignoreRefCounters| is set to false, all reference counters must be
// zero to enable a valid release of the allocated resources. When set to
// true, a release of all resources allocated by the VoE is performed
// without checking the reference counter state.
static bool Delete(VoiceEngine*& voiceEngine,
bool ignoreRefCounters = false);
// Specifies the amount and type of trace information which will be
// created by the VoiceEngine.
static int SetTraceFilter(const unsigned int filter);
// Sets the name of the trace file and enables non-encrypted trace messages.
static int SetTraceFile(const char* fileNameUTF8,
const bool addFileCounter = false);
// Installs the TraceCallback implementation to ensure that the user
// receives callbacks for generated trace messages.
static int SetTraceCallback(TraceCallback* callback);
static int SetAndroidObjects(void* javaVM, void* env, void* context);
protected:
VoiceEngine() {}
virtual ~VoiceEngine() {}
};
// VoEBase
class WEBRTC_DLLEXPORT VoEBase
{
public:
// Factory for the VoEBase sub-API. Increases an internal reference
// counter if successful. Returns NULL if the API is not supported or if
// construction fails.
static VoEBase* GetInterface(VoiceEngine* voiceEngine);
// Releases the VoEBase sub-API and decreases an internal reference
// counter. Returns the new reference count. This value should be zero
// for all sub-API:s before the VoiceEngine object can be safely deleted.
virtual int Release() = 0;
// Installs the observer class to enable runtime error control and
// warning notifications.
virtual int RegisterVoiceEngineObserver(VoiceEngineObserver& observer) = 0;
// Removes and disables the observer class for runtime error control
// and warning notifications.
virtual int DeRegisterVoiceEngineObserver() = 0;
// Installs and enables a user-defined external audio device module
// which implements all the audio layer functionality.
virtual int RegisterAudioDeviceModule(AudioDeviceModule& adm) = 0;
// Removes and disables the external audio device module.
virtual int DeRegisterAudioDeviceModule() = 0;
// Initiates all common parts of the VoiceEngine; e.g. all
// encoders/decoders, the sound card and core receiving components.
virtual int Init() = 0;
// Terminates all VoiceEngine functions and releses allocated resources.
virtual int Terminate() = 0;
// Retrieves the maximum number of channels that can be created.
virtual int MaxNumOfChannels() = 0;
// Creates a new channel and allocates the required resources for it.
virtual int CreateChannel() = 0;
// Deletes an existing channel and releases the utilized resources.
virtual int DeleteChannel(int channel) = 0;
// Sets the local receiver port and address for a specified
// |channel| number.
virtual int SetLocalReceiver(int channel, int port,
int RTCPport = kVoEDefault,
const char ipAddr[64] = NULL,
const char multiCastAddr[64] = NULL) = 0;
// Gets the local receiver port and address for a specified
// |channel| number.
virtual int GetLocalReceiver(int channel, int& port, int& RTCPport,
char ipAddr[64]) = 0;
// Sets the destination port and address for a specified |channel| number.
virtual int SetSendDestination(int channel, int port,
const char ipAddr[64],
int sourcePort = kVoEDefault,
int RTCPport = kVoEDefault) = 0;
// Gets the destination port and address for a specified |channel| number.
virtual int GetSendDestination(int channel, int& port, char ipAddr[64],
int& sourcePort, int& RTCPport) = 0;
// Prepares and initiates the VoiceEngine for reception of
// incoming RTP/RTCP packets on the specified |channel|.
virtual int StartReceive(int channel) = 0;
// Stops receiving incoming RTP/RTCP packets on the specified |channel|.
virtual int StopReceive(int channel) = 0;
// Starts forwarding the packets to the mixer/soundcard for a
// specified |channel|.
virtual int StartPlayout(int channel) = 0;
// Stops forwarding the packets to the mixer/soundcard for a
// specified |channel|.
virtual int StopPlayout(int channel) = 0;
// Starts sending packets to an already specified IP address and
// port number for a specified |channel|.
virtual int StartSend(int channel) = 0;
// Stops sending packets from a specified |channel|.
virtual int StopSend(int channel) = 0;
// Gets the version information for VoiceEngine and its components.
virtual int GetVersion(char version[1024]) = 0;
// Gets the last VoiceEngine error code.
virtual int LastError() = 0;
// Stops or resumes playout and transmission on a temporary basis.
virtual int SetOnHoldStatus(int channel, bool enable,
OnHoldModes mode = kHoldSendAndPlay) = 0;
// Gets the current playout and transmission status.
virtual int GetOnHoldStatus(int channel, bool& enabled,
OnHoldModes& mode) = 0;
// Sets the NetEQ playout mode for a specified |channel| number.
virtual int SetNetEQPlayoutMode(int channel, NetEqModes mode) = 0;
// Gets the NetEQ playout mode for a specified |channel| number.
virtual int GetNetEQPlayoutMode(int channel, NetEqModes& mode) = 0;
// Sets the NetEQ background noise mode for a specified |channel| number.
virtual int SetNetEQBGNMode(int channel, NetEqBgnModes mode) = 0;
// Gets the NetEQ background noise mode for a specified |channel| number.
virtual int GetNetEQBGNMode(int channel, NetEqBgnModes& mode) = 0;
protected:
VoEBase() {}
virtual ~VoEBase() {}
};
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_VOE_BASE_H

View File

@ -1,90 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// This sub-API supports the following functionalities:
//
// - Long-term speech and noise level metrics.
// - Long-term echo metric statistics.
// - Round Trip Time (RTT) statistics.
// - Dead-or-Alive connection summary.
// - Generation of call reports to text files.
//
// Usage example, omitting error checking:
//
// using namespace webrtc;
// VoiceEngine* voe = VoiceEngine::Create();
// VoEBase* base = VoEBase::GetInterface(voe);
// VoECallReport report = VoECallReport::GetInterface(voe);
// base->Init();
// LevelStatistics stats;
// report->GetSpeechAndNoiseSummary(stats);
// ...
// base->Terminate();
// base->Release();
// report->Release();
// VoiceEngine::Delete(voe);
//
#ifndef WEBRTC_VOICE_ENGINE_VOE_CALL_REPORT_H
#define WEBRTC_VOICE_ENGINE_VOE_CALL_REPORT_H
#include "common_types.h"
namespace webrtc {
class VoiceEngine;
// VoECallReport
class WEBRTC_DLLEXPORT VoECallReport
{
public:
// Factory for the VoECallReport sub-API. Increases an internal
// reference counter if successful. Returns NULL if the API is not
// supported or if construction fails.
static VoECallReport* GetInterface(VoiceEngine* voiceEngine);
// Releases the VoECallReport sub-API and decreases an internal
// reference counter. Returns the new reference count. This value should
// be zero for all sub-API:s before the VoiceEngine object can be safely
// deleted.
virtual int Release() = 0;
// Performs a combined reset of all components involved in generating
// the call report for a specified |channel|.
virtual int ResetCallReportStatistics(int channel) = 0;
// Gets minimum, maximum and average levels for long-term speech and
// noise metrics.
virtual int GetSpeechAndNoiseSummary(LevelStatistics& stats) = 0;
// Gets minimum, maximum and average levels for long-term echo metrics.
virtual int GetEchoMetricSummary(EchoStatistics& stats) = 0;
// Gets minimum, maximum and average levels for Round Trip Time (RTT)
// measurements.
virtual int GetRoundTripTimeSummary(int channel,
StatVal& delaysMs) = 0;
// Gets the total amount of dead and alive connection detections
// during a VoIP session.
virtual int GetDeadOrAliveSummary(int channel, int& numOfDeadDetections,
int& numOfAliveDetections) = 0;
// Creates a text file in ASCII format, which contains a summary
// of all the statistics that can be obtained by the call report sub-API.
virtual int WriteReportToFile(const char* fileNameUTF8) = 0;
protected:
VoECallReport() { }
virtual ~VoECallReport() { }
};
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_VOE_CALL_REPORT_H

View File

@ -1,134 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// This sub-API supports the following functionalities:
//
// - Support of non-default codecs (e.g. iLBC, iSAC, etc.).
// - Voice Activity Detection (VAD) on a per channel basis.
// - Possibility to specify how to map received payload types to codecs.
//
// Usage example, omitting error checking:
//
// using namespace webrtc;
// VoiceEngine* voe = VoiceEngine::Create();
// VoEBase* base = VoEBase::GetInterface(voe);
// VoECodec codec = VoECodec::GetInterface(voe);
// base->Init();
// int num_of_codecs = codec->NumOfCodecs()
// ...
// base->Terminate();
// base->Release();
// codec->Release();
// VoiceEngine::Delete(voe);
//
#ifndef WEBRTC_VOICE_ENGINE_VOE_CODEC_H
#define WEBRTC_VOICE_ENGINE_VOE_CODEC_H
#include "common_types.h"
namespace webrtc {
class VoiceEngine;
class WEBRTC_DLLEXPORT VoECodec
{
public:
// Factory for the VoECodec sub-API. Increases an internal
// reference counter if successful. Returns NULL if the API is not
// supported or if construction fails.
static VoECodec* GetInterface(VoiceEngine* voiceEngine);
// Releases the VoECodec sub-API and decreases an internal
// reference counter. Returns the new reference count. This value should
// be zero for all sub-API:s before the VoiceEngine object can be safely
// deleted.
virtual int Release() = 0;
// Gets the number of supported codecs.
virtual int NumOfCodecs() = 0;
// Get the |codec| information for a specified list |index|.
virtual int GetCodec(int index, CodecInst& codec) = 0;
// Sets the |codec| for the |channel| to be used for sending.
virtual int SetSendCodec(int channel, const CodecInst& codec) = 0;
// Gets the |codec| parameters for the sending codec on a specified
// |channel|.
virtual int GetSendCodec(int channel, CodecInst& codec) = 0;
// Gets the currently received |codec| for a specific |channel|.
virtual int GetRecCodec(int channel, CodecInst& codec) = 0;
// Sets the initial values of target rate and frame size for iSAC
// for a specified |channel|. This API is only valid if iSAC is setup
// to run in channel-adaptive mode
virtual int SetISACInitTargetRate(int channel, int rateBps,
bool useFixedFrameSize = false) = 0;
// Sets the maximum allowed iSAC rate which the codec may not exceed
// for a single packet for the specified |channel|. The maximum rate is
// defined as payload size per frame size in bits per second.
virtual int SetISACMaxRate(int channel, int rateBps) = 0;
// Sets the maximum allowed iSAC payload size for a specified |channel|.
// The maximum value is set independently of the frame size, i.e.
// 30 ms and 60 ms packets have the same limit.
virtual int SetISACMaxPayloadSize(int channel, int sizeBytes) = 0;
// Sets the dynamic payload type number for a particular |codec| or
// disables (ignores) a codec for receiving. For instance, when receiving
// an invite from a SIP-based client, this function can be used to change
// the dynamic payload type number to match that in the INVITE SDP-
// message. The utilized parameters in the |codec| structure are:
// plname, plfreq, pltype and channels.
virtual int SetRecPayloadType(int channel, const CodecInst& codec) = 0;
// Gets the actual payload type that is set for receiving a |codec| on a
// |channel|. The value it retrieves will either be the default payload
// type, or a value earlier set with SetRecPayloadType().
virtual int GetRecPayloadType(int channel, CodecInst& codec) = 0;
// Sets the payload |type| for the sending of SID-frames with background
// noise estimation during silence periods detected by the VAD.
virtual int SetSendCNPayloadType(
int channel, int type, PayloadFrequencies frequency = kFreq16000Hz) = 0;
// Sets the VAD/DTX (silence suppression) status and |mode| for a
// specified |channel|.
virtual int SetVADStatus(int channel, bool enable,
VadModes mode = kVadConventional,
bool disableDTX = false) = 0;
// Gets the VAD/DTX status and |mode| for a specified |channel|.
virtual int GetVADStatus(int channel, bool& enabled, VadModes& mode,
bool& disabledDTX) = 0;
// Not supported
virtual int SetAMREncFormat(int channel, AmrMode mode) = 0;
// Not supported
virtual int SetAMRDecFormat(int channel, AmrMode mode) = 0;
// Not supported
virtual int SetAMRWbEncFormat(int channel, AmrMode mode) = 0;
// Not supported
virtual int SetAMRWbDecFormat(int channel, AmrMode mode) = 0;
protected:
VoECodec() {}
virtual ~VoECodec() {}
};
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_VOE_CODEC_H

View File

@ -1,148 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// This sub-API supports the following functionalities:
//
// - Telephone event transmission.
// - DTMF tone generation.
//
// Usage example, omitting error checking:
//
// using namespace webrtc;
// VoiceEngine* voe = VoiceEngine::Create();
// VoEBase* base = VoEBase::GetInterface(voe);
// VoEDtmf* dtmf = VoEDtmf::GetInterface(voe);
// base->Init();
// int ch = base->CreateChannel();
// ...
// dtmf->SendTelephoneEvent(ch, 7);
// ...
// base->DeleteChannel(ch);
// base->Terminate();
// base->Release();
// dtmf->Release();
// VoiceEngine::Delete(voe);
//
#ifndef WEBRTC_VOICE_ENGINE_VOE_DTMF_H
#define WEBRTC_VOICE_ENGINE_VOE_DTMF_H
#include "common_types.h"
namespace webrtc {
class VoiceEngine;
// VoETelephoneEventObserver
class WEBRTC_DLLEXPORT VoETelephoneEventObserver
{
public:
// This method will be called after the detection of an inband
// telephone event. The event code is given as output in the
// |eventCode| parameter.
virtual void OnReceivedTelephoneEventInband(const int channel,
const unsigned char eventCode,
const bool endOfEvent) = 0;
// This method will be called after the detection of an out-of-band
// telephone event. The event code is given as output in the
// |eventCode| parameter.
virtual void OnReceivedTelephoneEventOutOfBand(
const int channel,
const unsigned char eventCode,
const bool endOfEvent) = 0;
protected:
virtual ~VoETelephoneEventObserver() {}
};
// VoEDtmf
class WEBRTC_DLLEXPORT VoEDtmf
{
public:
// Factory for the VoEDtmf sub-API. Increases an internal
// reference counter if successful. Returns NULL if the API is not
// supported or if construction fails.
static VoEDtmf* GetInterface(VoiceEngine* voiceEngine);
// Releases the VoEDtmf sub-API and decreases an internal
// reference counter. Returns the new reference count. This value should
// be zero for all sub-API:s before the VoiceEngine object can be safely
// deleted.
virtual int Release() = 0;
// Sends telephone events either in-band or out-of-band.
virtual int SendTelephoneEvent(int channel, unsigned char eventCode,
bool outOfBand = true, int lengthMs = 160,
int attenuationDb = 10) = 0;
// Sets the dynamic payload |type| that should be used for telephone
// events.
virtual int SetSendTelephoneEventPayloadType(int channel,
unsigned char type) = 0;
// Gets the currently set dynamic payload |type| for telephone events.
virtual int GetSendTelephoneEventPayloadType(int channel,
unsigned char& type) = 0;
// Enables or disables local tone playout for received DTMF events
// out-of-band.
virtual int SetDtmfPlayoutStatus(int channel, bool enable) = 0;
// Gets the DTMF playout status.
virtual int GetDtmfPlayoutStatus(int channel, bool& enabled) = 0;
// Toogles DTMF feedback state: when a DTMF tone is sent, the same tone
// is played out on the speaker.
virtual int SetDtmfFeedbackStatus(bool enable,
bool directFeedback = false) = 0;
// Gets the DTMF feedback status.
virtual int GetDtmfFeedbackStatus(bool& enabled, bool& directFeedback) = 0;
// Plays a DTMF feedback tone (only locally).
virtual int PlayDtmfTone(unsigned char eventCode, int lengthMs = 200,
int attenuationDb = 10) = 0;
// Starts playing out a DTMF feedback tone locally.
// The tone will be played out until the corresponding stop function
// is called.
virtual int StartPlayingDtmfTone(unsigned char eventCode,
int attenuationDb = 10) = 0;
// Stops playing out a DTMF feedback tone locally.
virtual int StopPlayingDtmfTone() = 0;
// Installs an instance of a VoETelephoneEventObserver derived class and
// activates detection of telephone events for the specified |channel|.
virtual int RegisterTelephoneEventDetection(
int channel, TelephoneEventDetectionMethods detectionMethod,
VoETelephoneEventObserver& observer) = 0;
// Removes an instance of a VoETelephoneEventObserver derived class and
// disables detection of telephone events for the specified |channel|.
virtual int DeRegisterTelephoneEventDetection(int channel) = 0;
// Gets the current telephone-event detection status for a specified
// |channel|.
virtual int GetTelephoneEventDetectionStatus(
int channel, bool& enabled,
TelephoneEventDetectionMethods& detectionMethod) = 0;
protected:
VoEDtmf() {}
virtual ~VoEDtmf() {}
};
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_VOE_DTMF_H

View File

@ -1,81 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// This sub-API supports the following functionalities:
//
// - External encryption and decryption.
//
// Usage example, omitting error checking:
//
// using namespace webrtc;
// VoiceEngine* voe = VoiceEngine::Create();
// VoEEncryption* encrypt = VoEEncryption::GetInterface(voe);
// ...
// encrypt->Release();
// VoiceEngine::Delete(voe);
//
#ifndef WEBRTC_VOICE_ENGINE_VOE_ENCRYPTION_H
#define WEBRTC_VOICE_ENGINE_VOE_ENCRYPTION_H
#include "common_types.h"
namespace webrtc {
class VoiceEngine;
class WEBRTC_DLLEXPORT VoEEncryption
{
public:
// Factory for the VoEEncryption sub-API. Increases an internal
// reference counter if successful. Returns NULL if the API is not
// supported or if construction fails.
static VoEEncryption* GetInterface(VoiceEngine* voiceEngine);
// Releases the VoEEncryption sub-API and decreases an internal
// reference counter. Returns the new reference count. This value should
// be zero for all sub-API:s before the VoiceEngine object can be safely
// deleted.
virtual int Release() = 0;
// Installs an Encryption instance and enables external encryption
// for the selected |channel|.
virtual int RegisterExternalEncryption(
int channel, Encryption& encryption) = 0;
// Removes an Encryption instance and disables external encryption
// for the selected |channel|.
virtual int DeRegisterExternalEncryption(int channel) = 0;
// Not supported
virtual int EnableSRTPSend(int channel, CipherTypes cipherType,
int cipherKeyLength, AuthenticationTypes authType, int authKeyLength,
int authTagLength, SecurityLevels level, const unsigned char key[30],
bool useForRTCP = false) = 0;
// Not supported
virtual int DisableSRTPSend(int channel) = 0;
// Not supported
virtual int EnableSRTPReceive(int channel, CipherTypes cipherType,
int cipherKeyLength, AuthenticationTypes authType, int authKeyLength,
int authTagLength, SecurityLevels level, const unsigned char key[30],
bool useForRTCP = false) = 0;
// Not supported
virtual int DisableSRTPReceive(int channel) = 0;
protected:
VoEEncryption() {}
virtual ~VoEEncryption() {}
};
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_VOE_ENCRYPTION_H

View File

@ -1,162 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_VOE_ERRORS_H
#define WEBRTC_VOICE_ENGINE_VOE_ERRORS_H
// Warnings
#define VE_PORT_NOT_DEFINED 8001
#define VE_CHANNEL_NOT_VALID 8002
#define VE_FUNC_NOT_SUPPORTED 8003
#define VE_INVALID_LISTNR 8004
#define VE_INVALID_ARGUMENT 8005
#define VE_INVALID_PORT_NMBR 8006
#define VE_INVALID_PLNAME 8007
#define VE_INVALID_PLFREQ 8008
#define VE_INVALID_PLTYPE 8009
#define VE_INVALID_PACSIZE 8010
#define VE_NOT_SUPPORTED 8011
#define VE_ALREADY_LISTENING 8012
#define VE_CHANNEL_NOT_CREATED 8013
#define VE_MAX_ACTIVE_CHANNELS_REACHED 8014
#define VE_REC_CANNOT_PREPARE_HEADER 8015
#define VE_REC_CANNOT_ADD_BUFFER 8016
#define VE_PLAY_CANNOT_PREPARE_HEADER 8017
#define VE_ALREADY_SENDING 8018
#define VE_INVALID_IP_ADDRESS 8019
#define VE_ALREADY_PLAYING 8020
#define VE_NOT_ALL_VERSION_INFO 8021
#define VE_DTMF_OUTOF_RANGE 8022
#define VE_INVALID_CHANNELS 8023
#define VE_SET_PLTYPE_FAILED 8024
#define VE_ENCRYPT_NOT_INITED 8025
#define VE_NOT_INITED 8026
#define VE_NOT_SENDING 8027
#define VE_EXT_TRANSPORT_NOT_SUPPORTED 8028
#define VE_EXTERNAL_TRANSPORT_ENABLED 8029
#define VE_STOP_RECORDING_FAILED 8030
#define VE_INVALID_RATE 8031
#define VE_INVALID_PACKET 8032
#define VE_NO_GQOS 8033
#define VE_INVALID_TIMESTAMP 8034
#define VE_RECEIVE_PACKET_TIMEOUT 8035
#define VE_STILL_PLAYING_PREV_DTMF 8036
#define VE_INIT_FAILED_WRONG_EXPIRY 8037
#define VE_SENDING 8038
#define VE_ENABLE_IPV6_FAILED 8039
#define VE_FUNC_NO_STEREO 8040
// Range 8041-8080 is not used
#define VE_FW_TRAVERSAL_ALREADY_INITIALIZED 8081
#define VE_PACKET_RECEIPT_RESTARTED 8082
#define VE_NOT_ALL_INFO 8083
#define VE_CANNOT_SET_SEND_CODEC 8084
#define VE_CODEC_ERROR 8085
#define VE_NETEQ_ERROR 8086
#define VE_RTCP_ERROR 8087
#define VE_INVALID_OPERATION 8088
#define VE_CPU_INFO_ERROR 8089
#define VE_SOUNDCARD_ERROR 8090
#define VE_SPEECH_LEVEL_ERROR 8091
#define VE_SEND_ERROR 8092
#define VE_CANNOT_REMOVE_CONF_CHANNEL 8093
#define VE_PLTYPE_ERROR 8094
#define VE_SET_FEC_FAILED 8095
#define VE_CANNOT_GET_PLAY_DATA 8096
#define VE_APM_ERROR 8097
#define VE_RUNTIME_PLAY_WARNING 8098
#define VE_RUNTIME_REC_WARNING 8099
#define VE_NOT_PLAYING 8100
#define VE_SOCKETS_NOT_INITED 8101
#define VE_CANNOT_GET_SOCKET_INFO 8102
#define VE_INVALID_MULTICAST_ADDRESS 8103
#define VE_DESTINATION_NOT_INITED 8104
#define VE_RECEIVE_SOCKETS_CONFLICT 8105
#define VE_SEND_SOCKETS_CONFLICT 8106
#define VE_TYPING_NOISE_WARNING 8107
#define VE_SATURATION_WARNING 8108
#define VE_NOISE_WARNING 8109
#define VE_CANNOT_GET_SEND_CODEC 8110
#define VE_CANNOT_GET_REC_CODEC 8111
#define VE_ALREADY_INITED 8112
// Errors causing limited functionality
#define VE_RTCP_SOCKET_ERROR 9001
#define VE_MIC_VOL_ERROR 9002
#define VE_SPEAKER_VOL_ERROR 9003
#define VE_CANNOT_ACCESS_MIC_VOL 9004
#define VE_CANNOT_ACCESS_SPEAKER_VOL 9005
#define VE_GET_MIC_VOL_ERROR 9006
#define VE_GET_SPEAKER_VOL_ERROR 9007
#define VE_THREAD_RTCP_ERROR 9008
#define VE_CANNOT_INIT_APM 9009
#define VE_SEND_SOCKET_TOS_ERROR 9010
#define VE_CANNOT_RETRIEVE_DEVICE_NAME 9013
#define VE_SRTP_ERROR 9014
// 9015 is not used
#define VE_INTERFACE_NOT_FOUND 9016
#define VE_TOS_GQOS_CONFLICT 9017
#define VE_CANNOT_ADD_CONF_CHANNEL 9018
#define VE_BUFFER_TOO_SMALL 9019
#define VE_CANNOT_EXECUTE_SETTING 9020
#define VE_CANNOT_RETRIEVE_SETTING 9021
// 9022 is not used
#define VE_RTP_KEEPALIVE_FAILED 9023
#define VE_SEND_DTMF_FAILED 9024
#define VE_CANNOT_RETRIEVE_CNAME 9025
#define VE_DECRYPTION_FAILED 9026
#define VE_ENCRYPTION_FAILED 9027
#define VE_CANNOT_RETRIEVE_RTP_STAT 9028
#define VE_GQOS_ERROR 9029
#define VE_BINDING_SOCKET_TO_LOCAL_ADDRESS_FAILED 9030
#define VE_TOS_INVALID 9031
#define VE_TOS_ERROR 9032
#define VE_CANNOT_RETRIEVE_VALUE 9033
// Critical errors that stops voice functionality
#define VE_PLAY_UNDEFINED_SC_ERR 10001
#define VE_REC_CANNOT_OPEN_SC 10002
#define VE_SOCKET_ERROR 10003
#define VE_MMSYSERR_INVALHANDLE 10004
#define VE_MMSYSERR_NODRIVER 10005
#define VE_MMSYSERR_NOMEM 10006
#define VE_WAVERR_UNPREPARED 10007
#define VE_WAVERR_STILLPLAYING 10008
#define VE_UNDEFINED_SC_ERR 10009
#define VE_UNDEFINED_SC_REC_ERR 10010
#define VE_THREAD_ERROR 10011
#define VE_CANNOT_START_RECORDING 10012
#define VE_PLAY_CANNOT_OPEN_SC 10013
#define VE_NO_WINSOCK_2 10014
#define VE_SEND_SOCKET_ERROR 10015
#define VE_BAD_FILE 10016
#define VE_EXPIRED_COPY 10017
#define VE_NOT_AUTHORISED 10018
#define VE_RUNTIME_PLAY_ERROR 10019
#define VE_RUNTIME_REC_ERROR 10020
#define VE_BAD_ARGUMENT 10021
#define VE_LINUX_API_ONLY 10022
#define VE_REC_DEVICE_REMOVED 10023
#define VE_NO_MEMORY 10024
#define VE_BAD_HANDLE 10025
#define VE_RTP_RTCP_MODULE_ERROR 10026
#define VE_AUDIO_CODING_MODULE_ERROR 10027
#define VE_AUDIO_DEVICE_MODULE_ERROR 10028
#define VE_CANNOT_START_PLAYOUT 10029
#define VE_CANNOT_STOP_RECORDING 10030
#define VE_CANNOT_STOP_PLAYOUT 10031
#define VE_CANNOT_INIT_CHANNEL 10032
#define VE_RECV_SOCKET_ERROR 10033
#define VE_SOCKET_TRANSPORT_MODULE_ERROR 10034
#define VE_AUDIO_CONF_MIX_MODULE_ERROR 10035
// Warnings for other platforms (reserved range 8061-8080)
#define VE_IGNORED_FUNCTION 8061
#endif // WEBRTC_VOICE_ENGINE_VOE_ERRORS_H

View File

@ -1,114 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// In some cases it is desirable to use an audio source or sink which may
// not be available to the VoiceEngine, such as a DV camera. This sub-API
// contains functions that allow for the use of such external recording
// sources and playout sinks. It also describes how recorded data, or data
// to be played out, can be modified outside the VoiceEngine.
//
// Usage example, omitting error checking:
//
// using namespace webrtc;
// VoiceEngine* voe = VoiceEngine::Create();
// VoEBase* base = VoEBase::GetInterface(voe);
// VoEMediaProcess media = VoEMediaProcess::GetInterface(voe);
// base->Init();
// ...
// media->SetExternalRecordingStatus(true);
// ...
// base->Terminate();
// base->Release();
// media->Release();
// VoiceEngine::Delete(voe);
//
#ifndef WEBRTC_VOICE_ENGINE_VOE_EXTERNAL_MEDIA_H
#define WEBRTC_VOICE_ENGINE_VOE_EXTERNAL_MEDIA_H
#include "common_types.h"
namespace webrtc {
class VoiceEngine;
class WEBRTC_DLLEXPORT VoEMediaProcess
{
public:
// The VoiceEngine user should override the Process() method in a
// derived class. Process() will be called when audio is ready to
// be processed. The audio can be accessed in several different modes
// given by the |type| parameter. The function should modify the
// original data and ensure that it is copied back to the |audio10ms|
// array. The number of samples in the frame cannot be changed.
// The sampling frequency will depend upon the codec used.
// If |isStereo| is true, audio10ms will contain 16-bit PCM data
// samples in interleaved stereo format (L0,R0,L1,R1,…):
virtual void Process(const int channel, const ProcessingTypes type,
WebRtc_Word16 audio10ms[], const int length,
const int samplingFreq, const bool isStereo) = 0;
protected:
virtual ~VoEMediaProcess() {}
};
class WEBRTC_DLLEXPORT VoEExternalMedia
{
public:
// Factory for the VoEExternalMedia sub-API. Increases an internal
// reference counter if successful. Returns NULL if the API is not
// supported or if construction fails.
static VoEExternalMedia* GetInterface(VoiceEngine* voiceEngine);
// Releases the VoEExternalMedia sub-API and decreases an internal
// reference counter. Returns the new reference count. This value should
// be zero for all sub-API:s before the VoiceEngine object can be safely
// deleted.
virtual int Release() = 0;
// Installs a VoEMediaProcess derived instance and activates external
// media for the specified |channel| and |type|.
virtual int RegisterExternalMediaProcessing(
int channel, ProcessingTypes type, VoEMediaProcess& processObject) = 0;
// Removes the VoEMediaProcess derived instance and deactivates external
// media for the specified |channel| and |type|.
virtual int DeRegisterExternalMediaProcessing(
int channel, ProcessingTypes type) = 0;
// Toogles state of external recording.
virtual int SetExternalRecordingStatus(bool enable) = 0;
// Toogles state of external playout.
virtual int SetExternalPlayoutStatus(bool enable) = 0;
// This function accepts externally recorded audio. During transmission,
// this method should be called at as regular an interval as possible
// with frames of corresponding size.
virtual int ExternalRecordingInsertData(
const WebRtc_Word16 speechData10ms[], int lengthSamples,
int samplingFreqHz, int current_delay_ms) = 0;
// This function gets audio for an external playout sink.
// During transmission, this function should be called every ~10 ms
// to obtain a new 10 ms frame of audio. The length of the block will
// be 160, 320, 440 or 480 samples (for 16, 32, 44 or 48 kHz sampling
// rates respectively).
virtual int ExternalPlayoutGetData(
WebRtc_Word16 speechData10ms[], int samplingFreqHz,
int current_delay_ms, int& lengthSamples) = 0;
protected:
VoEExternalMedia() {}
virtual ~VoEExternalMedia() {}
};
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_VOE_EXTERNAL_MEDIA_H

View File

@ -1,184 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// This sub-API supports the following functionalities:
//
// - File playback.
// - File recording.
// - File conversion.
//
// Usage example, omitting error checking:
//
// using namespace webrtc;
// VoiceEngine* voe = VoiceEngine::Create();
// VoEBase* base = VoEBase::GetInterface(voe);
// VoEFile* file = VoEFile::GetInterface(voe);
// base->Init();
// int ch = base->CreateChannel();
// ...
// base->StartPlayout(ch);
// file->StartPlayingFileAsMicrophone(ch, "data_file_16kHz.pcm", true);
// ...
// file->StopPlayingFileAsMicrophone(ch);
// base->StopPlayout(ch);
// ...
// base->DeleteChannel(ch);
// base->Terminate();
// base->Release();
// file->Release();
// VoiceEngine::Delete(voe);
//
#ifndef WEBRTC_VOICE_ENGINE_VOE_FILE_H
#define WEBRTC_VOICE_ENGINE_VOE_FILE_H
#include "common_types.h"
namespace webrtc {
class VoiceEngine;
class WEBRTC_DLLEXPORT VoEFile
{
public:
// Factory for the VoEFile sub-API. Increases an internal
// reference counter if successful. Returns NULL if the API is not
// supported or if construction fails.
static VoEFile* GetInterface(VoiceEngine* voiceEngine);
// Releases the VoEFile sub-API and decreases an internal
// reference counter. Returns the new reference count. This value should
// be zero for all sub-API:s before the VoiceEngine object can be safely
// deleted.
virtual int Release() = 0;
// Starts playing and mixing files with the local speaker signal for
// playout.
virtual int StartPlayingFileLocally(
int channel,
const char fileNameUTF8[1024],
bool loop = false,
FileFormats format = kFileFormatPcm16kHzFile,
float volumeScaling = 1.0,
int startPointMs = 0,
int stopPointMs = 0) = 0;
// Starts playing and mixing streams with the local speaker signal for
// playout.
virtual int StartPlayingFileLocally(
int channel,
InStream* stream,
FileFormats format = kFileFormatPcm16kHzFile,
float volumeScaling = 1.0,
int startPointMs = 0, int stopPointMs = 0) = 0;
// Stops playback of a file on a specific |channel|.
virtual int StopPlayingFileLocally(int channel) = 0;
// Returns the current file playing state for a specific |channel|.
virtual int IsPlayingFileLocally(int channel) = 0;
// Sets the volume scaling for a speaker file that is already playing.
virtual int ScaleLocalFilePlayout(int channel, float scale) = 0;
// Starts reading data from a file and transmits the data either
// mixed with or instead of the microphone signal.
virtual int StartPlayingFileAsMicrophone(
int channel,
const char fileNameUTF8[1024],
bool loop = false ,
bool mixWithMicrophone = false,
FileFormats format = kFileFormatPcm16kHzFile,
float volumeScaling = 1.0) = 0;
// Starts reading data from a stream and transmits the data either
// mixed with or instead of the microphone signal.
virtual int StartPlayingFileAsMicrophone(
int channel,
InStream* stream,
bool mixWithMicrophone = false,
FileFormats format = kFileFormatPcm16kHzFile,
float volumeScaling = 1.0) = 0;
// Stops playing of a file as microphone signal for a specific |channel|.
virtual int StopPlayingFileAsMicrophone(int channel) = 0;
// Returns whether the |channel| is currently playing a file as microphone.
virtual int IsPlayingFileAsMicrophone(int channel) = 0;
// Sets the volume scaling for a microphone file that is already playing.
virtual int ScaleFileAsMicrophonePlayout(int channel, float scale) = 0;
// Starts recording the mixed playout audio.
virtual int StartRecordingPlayout(int channel,
const char* fileNameUTF8,
CodecInst* compression = NULL,
int maxSizeBytes = -1) = 0;
// Stops recording the mixed playout audio.
virtual int StopRecordingPlayout(int channel) = 0;
virtual int StartRecordingPlayout(int channel,
OutStream* stream,
CodecInst* compression = NULL) = 0;
// Starts recording the microphone signal to a file.
virtual int StartRecordingMicrophone(const char* fileNameUTF8,
CodecInst* compression = NULL,
int maxSizeBytes = -1) = 0;
// Starts recording the microphone signal to a stream.
virtual int StartRecordingMicrophone(OutStream* stream,
CodecInst* compression = NULL) = 0;
// Stops recording the microphone signal.
virtual int StopRecordingMicrophone() = 0;
// Gets the duration of a file.
virtual int GetFileDuration(const char* fileNameUTF8, int& durationMs,
FileFormats format = kFileFormatPcm16kHzFile) = 0;
// Gets the current played position of a file on a specific |channel|.
virtual int GetPlaybackPosition(int channel, int& positionMs) = 0;
virtual int ConvertPCMToWAV(const char* fileNameInUTF8,
const char* fileNameOutUTF8) = 0;
virtual int ConvertPCMToWAV(InStream* streamIn,
OutStream* streamOut) = 0;
virtual int ConvertWAVToPCM(const char* fileNameInUTF8,
const char* fileNameOutUTF8) = 0;
virtual int ConvertWAVToPCM(InStream* streamIn,
OutStream* streamOut) = 0;
virtual int ConvertPCMToCompressed(const char* fileNameInUTF8,
const char* fileNameOutUTF8,
CodecInst* compression) = 0;
virtual int ConvertPCMToCompressed(InStream* streamIn,
OutStream* streamOut,
CodecInst* compression) = 0;
virtual int ConvertCompressedToPCM(const char* fileNameInUTF8,
const char* fileNameOutUTF8) = 0;
virtual int ConvertCompressedToPCM(InStream* streamIn,
OutStream* streamOut) = 0;
protected:
VoEFile() {}
virtual ~VoEFile() {}
};
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_VOE_FILE_H

View File

@ -1,120 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// This sub-API supports the following functionalities:
//
// - Audio device handling.
// - Device information.
// - CPU load monitoring.
//
// Usage example, omitting error checking:
//
// using namespace webrtc;
// VoiceEngine* voe = VoiceEngine::Create();
// VoEBase* base = VoEBase::GetInterface(voe);
// VoEHardware* hardware = VoEHardware::GetInterface(voe);
// base->Init();
// ...
// int n_devices = hardware->GetNumOfPlayoutDevices();
// ...
// base->Terminate();
// base->Release();
// hardware->Release();
// VoiceEngine::Delete(voe);
//
#ifndef WEBRTC_VOICE_ENGINE_VOE_HARDWARE_H
#define WEBRTC_VOICE_ENGINE_VOE_HARDWARE_H
#include "common_types.h"
namespace webrtc {
class VoiceEngine;
class WEBRTC_DLLEXPORT VoEHardware
{
public:
// Factory for the VoEHardware sub-API. Increases an internal
// reference counter if successful. Returns NULL if the API is not
// supported or if construction fails.
static VoEHardware* GetInterface(VoiceEngine* voiceEngine);
// Releases the VoEHardware sub-API and decreases an internal
// reference counter. Returns the new reference count. This value should
// be zero for all sub-API:s before the VoiceEngine object can be safely
// deleted.
virtual int Release() = 0;
// Gets the number of audio devices available for recording.
virtual int GetNumOfRecordingDevices(int& devices) = 0;
// Gets the number of audio devices available for playout.
virtual int GetNumOfPlayoutDevices(int& devices) = 0;
// Gets the name of a specific recording device given by an |index|.
// On Windows Vista/7, it also retrieves an additional unique ID
// (GUID) for the recording device.
virtual int GetRecordingDeviceName(int index, char strNameUTF8[128],
char strGuidUTF8[128]) = 0;
// Gets the name of a specific playout device given by an |index|.
// On Windows Vista/7, it also retrieves an additional unique ID
// (GUID) for the playout device.
virtual int GetPlayoutDeviceName(int index, char strNameUTF8[128],
char strGuidUTF8[128]) = 0;
// Checks if the sound card is available to be opened for recording.
virtual int GetRecordingDeviceStatus(bool& isAvailable) = 0;
// Checks if the sound card is available to be opened for playout.
virtual int GetPlayoutDeviceStatus(bool& isAvailable) = 0;
// Sets the audio device used for recording.
virtual int SetRecordingDevice(
int index, StereoChannel recordingChannel = kStereoBoth) = 0;
// Sets the audio device used for playout.
virtual int SetPlayoutDevice(int index) = 0;
// Sets the type of audio device layer to use.
virtual int SetAudioDeviceLayer(AudioLayers audioLayer) = 0;
// Gets the currently used (active) audio device layer.
virtual int GetAudioDeviceLayer(AudioLayers& audioLayer) = 0;
// Gets the VoiceEngines current CPU consumption in terms of the percent
// of total CPU availability. [Windows only]
virtual int GetCPULoad(int& loadPercent) = 0;
// Gets the computers current CPU consumption in terms of the percent
// of the total CPU availability.
virtual int GetSystemCPULoad(int& loadPercent) = 0;
// Not supported
virtual int ResetAudioDevice() = 0;
// Not supported
virtual int AudioDeviceControl(
unsigned int par1, unsigned int par2, unsigned int par3) = 0;
// Not supported
virtual int SetLoudspeakerStatus(bool enable) = 0;
// Not supported
virtual int GetLoudspeakerStatus(bool& enabled) = 0;
protected:
VoEHardware() {}
virtual ~VoEHardware() {}
};
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_VOE_HARDWARE_H

View File

@ -1,58 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_VOE_NETEQ_STATS_H
#define WEBRTC_VOICE_ENGINE_VOE_NETEQ_STATS_H
#include "common_types.h"
namespace webrtc {
class VoiceEngine;
class WEBRTC_DLLEXPORT VoENetEqStats
{
public:
// Factory for the VoENetEqStats sub-API. Increases an internal
// reference counter if successful. Returns NULL if the API is not
// supported or if construction fails.
static VoENetEqStats* GetInterface(VoiceEngine* voiceEngine);
// Releases the VoENetEqStats sub-API and decreases an internal
// reference counter. Returns the new reference count. This value should
// be zero for all sub-API:s before the VoiceEngine object can be safely
// deleted.
virtual int Release() = 0;
// Get the "in-call" statistics from NetEQ.
// The statistics are reset after the query.
virtual int GetNetworkStatistics(int channel, NetworkStatistics& stats) = 0;
// Get the "post-call" jitter statistics from NetEQ.
// The statistics are not reset by the query. Use the function
// ResetJitterStatistics() to reset.
virtual int GetJitterStatistics(int channel, JitterStatistics& stats) = 0;
// Get the optimal buffer size calculated for the current network
// conditions.
virtual int GetPreferredBufferSize(
int channel, unsigned short& preferredBufferSize) = 0;
// Reset "post-call" jitter statistics.
virtual int ResetJitterStatistics(int channel) = 0;
protected:
VoENetEqStats() {}
virtual ~VoENetEqStats() {}
};
} // namespace webrtc
#endif // #ifndef WEBRTC_VOICE_ENGINE_VOE_NETEQ_STATS_H

View File

@ -1,177 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// This sub-API supports the following functionalities:
//
// - External protocol support.
// - Extended port and address APIs.
// - Port and address filters.
// - Windows GQoS functions.
// - Packet timeout notification.
// - Dead-or-Alive connection observations.
// - Transmission of raw RTP/RTCP packets into existing channels.
//
// Usage example, omitting error checking:
//
// using namespace webrtc;
// VoiceEngine* voe = VoiceEngine::Create();
// VoEBase* base = VoEBase::GetInterface(voe);
// VoENetwork* netw = VoENetwork::GetInterface(voe);
// base->Init();
// int ch = base->CreateChannel();
// ...
// netw->SetPeriodicDeadOrAliveStatus(ch, true);
// ...
// base->DeleteChannel(ch);
// base->Terminate();
// base->Release();
// netw->Release();
// VoiceEngine::Delete(voe);
//
#ifndef WEBRTC_VOICE_ENGINE_VOE_NETWORK_H
#define WEBRTC_VOICE_ENGINE_VOE_NETWORK_H
#include "common_types.h"
namespace webrtc {
class VoiceEngine;
// VoEConnectionObserver
class WEBRTC_DLLEXPORT VoEConnectionObserver
{
public:
// This method will be called peridically and deliver dead-or-alive
// notifications for a specified |channel| when the observer interface
// has been installed and activated.
virtual void OnPeriodicDeadOrAlive(const int channel, const bool alive) = 0;
protected:
virtual ~VoEConnectionObserver() {}
};
// VoENetwork
class WEBRTC_DLLEXPORT VoENetwork
{
public:
// Factory for the VoENetwork sub-API. Increases an internal
// reference counter if successful. Returns NULL if the API is not
// supported or if construction fails.
static VoENetwork* GetInterface(VoiceEngine* voiceEngine);
// Releases the VoENetwork sub-API and decreases an internal
// reference counter. Returns the new reference count. This value should
// be zero for all sub-API:s before the VoiceEngine object can be safely
// deleted.
virtual int Release() = 0;
// Installs and enables a user-defined external transport protocol for a
// specified |channel|.
virtual int RegisterExternalTransport(
int channel, Transport& transport) = 0;
// Removes and disables a user-defined external transport protocol for a
// specified |channel|.
virtual int DeRegisterExternalTransport(int channel) = 0;
// The packets received from the network should be passed to this
// function when external transport is enabled. Note that the data
// including the RTP-header must also be given to the VoiceEngine.
virtual int ReceivedRTPPacket(
int channel, const void* data, unsigned int length) = 0;
// The packets received from the network should be passed to this
// function when external transport is enabled. Note that the data
// including the RTCP-header must also be given to the VoiceEngine.
virtual int ReceivedRTCPPacket(
int channel, const void* data, unsigned int length) = 0;
// Gets the source ports and IP address of incoming packets on a
// specific |channel|.
virtual int GetSourceInfo(
int channel, int& rtpPort, int& rtcpPort, char ipAddr[64]) = 0;
// Gets the local (host) IP address.
virtual int GetLocalIP(char ipAddr[64], bool ipv6 = false) = 0;
// Enables IPv6 for a specified |channel|.
virtual int EnableIPv6(int channel) = 0;
// Gets the current IPv6 staus for a specified |channel|.
virtual bool IPv6IsEnabled(int channel) = 0;
// Enables a port and IP address filter for incoming packets on a
// specific |channel|.
virtual int SetSourceFilter(int channel,
int rtpPort, int rtcpPort = 0, const char ipAddr[64] = 0) = 0;
// Gets the current port and IP-address filter for a specified |channel|.
virtual int GetSourceFilter(
int channel, int& rtpPort, int& rtcpPort, char ipAddr[64]) = 0;
// Sets the six-bit Differentiated Services Code Point (DSCP) in the
// IP header of the outgoing stream for a specific |channel|.
virtual int SetSendTOS(int channel,
int DSCP, int priority = -1, bool useSetSockopt = false) = 0;
// Gets the six-bit DSCP in the IP header of the outgoing stream for
// a specific channel.
virtual int GetSendTOS(
int channel, int& DSCP, int& priority, bool& useSetSockopt) = 0;
// Sets the Generic Quality of Service (GQoS) service level.
// The Windows operating system then maps to a Differentiated Services
// Code Point (DSCP) and to an 802.1p setting. [Windows only]
virtual int SetSendGQoS(
int channel, bool enable, int serviceType, int overrideDSCP = 0) = 0;
// Gets the Generic Quality of Service (GQoS) service level.
virtual int GetSendGQoS(
int channel, bool& enabled, int& serviceType, int& overrideDSCP) = 0;
// Enables or disables warnings that report if packets have not been
// received in |timeoutSeconds| seconds for a specific |channel|.
virtual int SetPacketTimeoutNotification(
int channel, bool enable, int timeoutSeconds = 2) = 0;
// Gets the current time-out notification status.
virtual int GetPacketTimeoutNotification(
int channel, bool& enabled, int& timeoutSeconds) = 0;
// Installs the observer class implementation for a specified |channel|.
virtual int RegisterDeadOrAliveObserver(
int channel, VoEConnectionObserver& observer) = 0;
// Removes the observer class implementation for a specified |channel|.
virtual int DeRegisterDeadOrAliveObserver(int channel) = 0;
// Enables or disables the periodic dead-or-alive callback functionality
// for a specified |channel|.
virtual int SetPeriodicDeadOrAliveStatus(
int channel, bool enable, int sampleTimeSeconds = 2) = 0;
// Gets the current dead-or-alive notification status.
virtual int GetPeriodicDeadOrAliveStatus(
int channel, bool& enabled, int& sampleTimeSeconds) = 0;
// Handles sending a raw UDP data packet over an existing RTP or RTCP
// socket.
virtual int SendUDPPacket(
int channel, const void* data, unsigned int length,
int& transmittedBytes, bool useRtcpSocket = false) = 0;
protected:
VoENetwork() {}
virtual ~VoENetwork() {}
};
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_VOE_NETWORK_H

View File

@ -1,234 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// This sub-API supports the following functionalities:
//
// - Callbacks for RTP and RTCP events such as modified SSRC or CSRC.
// - SSRC handling.
// - Transmission of RTCP sender reports.
// - Obtaining RTCP data from incoming RTCP sender reports.
// - RTP and RTCP statistics (jitter, packet loss, RTT etc.).
// - Forward Error Correction (FEC).
// - RTP Keepalive for maintaining the NAT mappings associated to RTP flows.
// - Writing RTP and RTCP packets to binary files for off-line analysis of
// the call quality.
// - Inserting extra RTP packets into active audio stream.
//
// Usage example, omitting error checking:
//
// using namespace webrtc;
// VoiceEngine* voe = VoiceEngine::Create();
// VoEBase* base = VoEBase::GetInterface(voe);
// VoERTP_RTCP* rtp_rtcp = VoERTP_RTCP::GetInterface(voe);
// base->Init();
// int ch = base->CreateChannel();
// ...
// rtp_rtcp->SetLocalSSRC(ch, 12345);
// ...
// base->DeleteChannel(ch);
// base->Terminate();
// base->Release();
// rtp_rtcp->Release();
// VoiceEngine::Delete(voe);
//
#ifndef WEBRTC_VOICE_ENGINE_VOE_RTP_RTCP_H
#define WEBRTC_VOICE_ENGINE_VOE_RTP_RTCP_H
#include "common_types.h"
namespace webrtc {
class VoiceEngine;
// VoERTPObserver
class WEBRTC_DLLEXPORT VoERTPObserver
{
public:
virtual void OnIncomingCSRCChanged(
const int channel, const unsigned int CSRC, const bool added) = 0;
virtual void OnIncomingSSRCChanged(
const int channel, const unsigned int SSRC) = 0;
protected:
virtual ~VoERTPObserver() {}
};
// VoERTCPObserver
class WEBRTC_DLLEXPORT VoERTCPObserver
{
public:
virtual void OnApplicationDataReceived(
const int channel, const unsigned char subType,
const unsigned int name, const unsigned char* data,
const unsigned short dataLengthInBytes) = 0;
protected:
virtual ~VoERTCPObserver() {}
};
// CallStatistics
struct CallStatistics
{
unsigned short fractionLost;
unsigned int cumulativeLost;
unsigned int extendedMax;
unsigned int jitterSamples;
int rttMs;
int bytesSent;
int packetsSent;
int bytesReceived;
int packetsReceived;
};
// VoERTP_RTCP
class WEBRTC_DLLEXPORT VoERTP_RTCP
{
public:
// Factory for the VoERTP_RTCP sub-API. Increases an internal
// reference counter if successful. Returns NULL if the API is not
// supported or if construction fails.
static VoERTP_RTCP* GetInterface(VoiceEngine* voiceEngine);
// Releases the VoERTP_RTCP sub-API and decreases an internal
// reference counter. Returns the new reference count. This value should
// be zero for all sub-API:s before the VoiceEngine object can be safely
// deleted.
virtual int Release() = 0;
// Registers an instance of a VoERTPObserver derived class for a specified
// |channel|. It will allow the user to observe callbacks related to the
// RTP protocol such as changes in the incoming SSRC.
virtual int RegisterRTPObserver(int channel, VoERTPObserver& observer) = 0;
// Deregisters an instance of a VoERTPObserver derived class for a
// specified |channel|.
virtual int DeRegisterRTPObserver(int channel) = 0;
// Registers an instance of a VoERTCPObserver derived class for a specified
// |channel|.
virtual int RegisterRTCPObserver(
int channel, VoERTCPObserver& observer) = 0;
// Deregisters an instance of a VoERTCPObserver derived class for a
// specified |channel|.
virtual int DeRegisterRTCPObserver(int channel) = 0;
// Sets the local RTP synchronization source identifier (SSRC) explicitly.
virtual int SetLocalSSRC(int channel, unsigned int ssrc) = 0;
// Gets the local RTP SSRC of a specified |channel|.
virtual int GetLocalSSRC(int channel, unsigned int& ssrc) = 0;
// Gets the SSRC of the incoming RTP packets.
virtual int GetRemoteSSRC(int channel, unsigned int& ssrc) = 0;
// Sets the status of rtp-audio-level-indication on a specific |channel|.
virtual int SetRTPAudioLevelIndicationStatus(
int channel, bool enable, unsigned char ID = 1) = 0;
// Sets the status of rtp-audio-level-indication on a specific |channel|.
virtual int GetRTPAudioLevelIndicationStatus(
int channel, bool& enabled, unsigned char& ID) = 0;
// Gets the CSRCs of the incoming RTP packets.
virtual int GetRemoteCSRCs(int channel, unsigned int arrCSRC[15]) = 0;
// Sets the RTCP status on a specific |channel|.
virtual int SetRTCPStatus(int channel, bool enable) = 0;
// Gets the RTCP status on a specific |channel|.
virtual int GetRTCPStatus(int channel, bool& enabled) = 0;
// Sets the canonical name (CNAME) parameter for RTCP reports on a
// specific |channel|.
virtual int SetRTCP_CNAME(int channel, const char cName[256]) = 0;
// Gets the canonical name (CNAME) parameter for RTCP reports on a
// specific |channel|.
virtual int GetRTCP_CNAME(int channel, char cName[256]) = 0;
// Gets the canonical name (CNAME) parameter for incoming RTCP reports
// on a specific channel.
virtual int GetRemoteRTCP_CNAME(int channel, char cName[256]) = 0;
// Gets RTCP data from incoming RTCP Sender Reports.
virtual int GetRemoteRTCPData(
int channel, unsigned int& NTPHigh, unsigned int& NTPLow,
unsigned int& timestamp, unsigned int& playoutTimestamp,
unsigned int* jitter = NULL, unsigned short* fractionLost = NULL) = 0;
// Gets RTP statistics for a specific |channel|.
virtual int GetRTPStatistics(
int channel, unsigned int& averageJitterMs, unsigned int& maxJitterMs,
unsigned int& discardedPackets) = 0;
// Gets RTCP statistics for a specific |channel|.
virtual int GetRTCPStatistics(int channel, CallStatistics& stats) = 0;
// Sends an RTCP APP packet on a specific |channel|.
virtual int SendApplicationDefinedRTCPPacket(
int channel, const unsigned char subType, unsigned int name,
const char* data, unsigned short dataLengthInBytes) = 0;
// Sets the Forward Error Correction (FEC) status on a specific |channel|.
virtual int SetFECStatus(
int channel, bool enable, int redPayloadtype = -1) = 0;
// Gets the FEC status on a specific |channel|.
virtual int GetFECStatus(
int channel, bool& enabled, int& redPayloadtype) = 0;
// Sets the RTP keepalive mechanism status.
// This functionality can maintain an existing Network Address Translator
// (NAT) mapping while regular RTP is no longer transmitted.
virtual int SetRTPKeepaliveStatus(
int channel, bool enable, unsigned char unknownPayloadType,
int deltaTransmitTimeSeconds = 15) = 0;
// Gets the RTP keepalive mechanism status.
virtual int GetRTPKeepaliveStatus(
int channel, bool& enabled, unsigned char& unknownPayloadType,
int& deltaTransmitTimeSeconds) = 0;
// Enables capturing of RTP packets to a binary file on a specific
// |channel| and for a given |direction|. The file can later be replayed
// using e.g. RTP Tools rtpplay since the binary file format is
// compatible with the rtpdump format.
virtual int StartRTPDump(
int channel, const char fileNameUTF8[1024],
RTPDirections direction = kRtpIncoming) = 0;
// Disables capturing of RTP packets to a binary file on a specific
// |channel| and for a given |direction|.
virtual int StopRTPDump(
int channel, RTPDirections direction = kRtpIncoming) = 0;
// Gets the the current RTP capturing state for the specified
// |channel| and |direction|.
virtual int RTPDumpIsActive(
int channel, RTPDirections direction = kRtpIncoming) = 0;
// Sends an extra RTP packet using an existing/active RTP session.
// It is possible to set the payload type, marker bit and payload
// of the extra RTP
virtual int InsertExtraRTPPacket(
int channel, unsigned char payloadType, bool markerBit,
const char* payloadData, unsigned short payloadSize) = 0;
protected:
VoERTP_RTCP() {}
virtual ~VoERTP_RTCP() {}
};
} // namespace webrtc
#endif // #ifndef WEBRTC_VOICE_ENGINE_VOE_RTP_RTCP_H

View File

@ -1,85 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// This sub-API supports the following functionalities:
//
// - RTP header modification (time stamp and sequence number fields).
// - Playout delay tuning to synchronize the voice with video.
// - Playout delay monitoring.
//
// Usage example, omitting error checking:
//
// using namespace webrtc;
// VoiceEngine* voe = VoiceEngine::Create();
// VoEBase* base = VoEBase::GetInterface(voe);
// VoEVideoSync* vsync = VoEVideoSync::GetInterface(voe);
// base->Init();
// ...
// int buffer_ms(0);
// vsync->GetPlayoutBufferSize(buffer_ms);
// ...
// base->Terminate();
// base->Release();
// vsync->Release();
// VoiceEngine::Delete(voe);
//
#ifndef WEBRTC_VOICE_ENGINE_VOE_VIDEO_SYNC_H
#define WEBRTC_VOICE_ENGINE_VOE_VIDEO_SYNC_H
#include "common_types.h"
namespace webrtc {
class RtpRtcp;
class VoiceEngine;
class WEBRTC_DLLEXPORT VoEVideoSync
{
public:
// Factory for the VoEVideoSync sub-API. Increases an internal
// reference counter if successful. Returns NULL if the API is not
// supported or if construction fails.
static VoEVideoSync* GetInterface(VoiceEngine* voiceEngine);
// Releases the VoEVideoSync sub-API and decreases an internal
// reference counter. Returns the new reference count. This value should
// be zero for all sub-API:s before the VoiceEngine object can be safely
// deleted.
virtual int Release() = 0;
// Gets the current sound card buffer size (playout delay).
virtual int GetPlayoutBufferSize(int& bufferMs) = 0;
// Sets an additional delay for the playout jitter buffer.
virtual int SetMinimumPlayoutDelay(int channel, int delayMs) = 0;
// Gets the sum of the algorithmic delay, jitter buffer delay, and the
// playout buffer delay for a specified |channel|.
virtual int GetDelayEstimate(int channel, int& delayMs) = 0;
// Manual initialization of the RTP timestamp.
virtual int SetInitTimestamp(int channel, unsigned int timestamp) = 0;
// Manual initialization of the RTP sequence number.
virtual int SetInitSequenceNumber(int channel, short sequenceNumber) = 0;
// Get the received RTP timestamp
virtual int GetPlayoutTimestamp(int channel, unsigned int& timestamp) = 0;
virtual int GetRtpRtcp (int channel, RtpRtcp* &rtpRtcpModule) = 0;
protected:
VoEVideoSync() { }
virtual ~VoEVideoSync() { }
};
} // namespace webrtc
#endif // #ifndef WEBRTC_VOICE_ENGINE_VOE_VIDEO_SYNC_H

View File

@ -1,127 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// This sub-API supports the following functionalities:
//
// - Speaker volume controls.
// - Microphone volume control.
// - Non-linear speech level control.
// - Mute functions.
// - Additional stereo scaling methods.
//
// Usage example, omitting error checking:
//
// using namespace webrtc;
// VoiceEngine* voe = VoiceEngine::Create();
// VoEBase* base = VoEBase::GetInterface(voe);
// VoEVolumeControl* volume = VoEVolumeControl::GetInterface(voe);
// base->Init();
// int ch = base->CreateChannel();
// ...
// volume->SetInputMute(ch, true);
// ...
// base->DeleteChannel(ch);
// base->Terminate();
// base->Release();
// volume->Release();
// VoiceEngine::Delete(voe);
//
#ifndef WEBRTC_VOICE_ENGINE_VOE_VOLUME_CONTROL_H
#define WEBRTC_VOICE_ENGINE_VOE_VOLUME_CONTROL_H
#include "common_types.h"
namespace webrtc {
class VoiceEngine;
class WEBRTC_DLLEXPORT VoEVolumeControl
{
public:
// Factory for the VoEVolumeControl sub-API. Increases an internal
// reference counter if successful. Returns NULL if the API is not
// supported or if construction fails.
static VoEVolumeControl* GetInterface(VoiceEngine* voiceEngine);
// Releases the VoEVolumeControl sub-API and decreases an internal
// reference counter. Returns the new reference count. This value should
// be zero for all sub-API:s before the VoiceEngine object can be safely
// deleted.
virtual int Release() = 0;
// Sets the speaker |volume| level. Valid range is [0,255].
virtual int SetSpeakerVolume(unsigned int volume) = 0;
// Gets the speaker |volume| level.
virtual int GetSpeakerVolume(unsigned int& volume) = 0;
// Mutes the speaker device completely in the operating system.
virtual int SetSystemOutputMute(bool enable) = 0;
// Gets the output device mute state in the operating system.
virtual int GetSystemOutputMute(bool &enabled) = 0;
// Sets the microphone volume level. Valid range is [0,255].
virtual int SetMicVolume(unsigned int volume) = 0;
// Gets the microphone volume level.
virtual int GetMicVolume(unsigned int& volume) = 0;
// Mutes the microphone input signal completely without affecting
// the audio device volume.
virtual int SetInputMute(int channel, bool enable) = 0;
// Gets the current microphone input mute state.
virtual int GetInputMute(int channel, bool& enabled) = 0;
// Mutes the microphone device completely in the operating system.
virtual int SetSystemInputMute(bool enable) = 0;
// Gets the mute state of the input device in the operating system.
virtual int GetSystemInputMute(bool& enabled) = 0;
// Gets the microphone speech |level|, mapped non-linearly to the range
// [0,9].
virtual int GetSpeechInputLevel(unsigned int& level) = 0;
// Gets the speaker speech |level|, mapped non-linearly to the range
// [0,9].
virtual int GetSpeechOutputLevel(int channel, unsigned int& level) = 0;
// Gets the microphone speech |level|, mapped linearly to the range
// [0,32768].
virtual int GetSpeechInputLevelFullRange(unsigned int& level) = 0;
// Gets the speaker speech |level|, mapped linearly to the range [0,32768].
virtual int GetSpeechOutputLevelFullRange(
int channel, unsigned int& level) = 0;
// Sets a volume |scaling| applied to the outgoing signal of a specific
// channel. Valid scale range is [0.0, 10.0].
virtual int SetChannelOutputVolumeScaling(int channel, float scaling) = 0;
// Gets the current volume scaling for a specified |channel|.
virtual int GetChannelOutputVolumeScaling(int channel, float& scaling) = 0;
// Scales volume of the |left| and |right| channels independently.
// Valid scale range is [0.0, 1.0].
virtual int SetOutputVolumePan(int channel, float left, float right) = 0;
// Gets the current left and right scaling factors.
virtual int GetOutputVolumePan(int channel, float& left, float& right) = 0;
protected:
VoEVolumeControl() {};
virtual ~VoEVolumeControl() {};
};
} // namespace webrtc
#endif // #ifndef WEBRTC_VOICE_ENGINE_VOE_VOLUME_CONTROL_H

View File

@ -1,98 +0,0 @@
# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
LOCAL_ARM_MODE := arm
LOCAL_MODULE := libwebrtc_voe_core
LOCAL_MODULE_TAGS := optional
LOCAL_CPP_EXTENSION := .cc
LOCAL_GENERATED_SOURCES :=
LOCAL_SRC_FILES := audio_frame_operations.cc \
channel.cc \
channel_manager.cc \
channel_manager_base.cc \
dtmf_inband.cc \
dtmf_inband_queue.cc \
level_indicator.cc \
monitor_module.cc \
output_mixer.cc \
ref_count.cc \
shared_data.cc \
statistics.cc \
transmit_mixer.cc \
utility.cc \
voe_audio_processing_impl.cc \
voe_base_impl.cc \
voe_call_report_impl.cc \
voe_codec_impl.cc \
voe_dtmf_impl.cc \
voe_encryption_impl.cc \
voe_external_media_impl.cc \
voe_file_impl.cc \
voe_hardware_impl.cc \
voe_neteq_stats_impl.cc \
voe_network_impl.cc \
voe_rtp_rtcp_impl.cc \
voe_video_sync_impl.cc \
voe_volume_control_impl.cc \
voice_engine_impl.cc
# Flags passed to both C and C++ files.
MY_CFLAGS :=
MY_CFLAGS_C :=
MY_DEFS := '-DNO_TCMALLOC' \
'-DNO_HEAPCHECKER' \
'-DWEBRTC_TARGET_PC' \
'-DWEBRTC_LINUX' \
'-DWEBRTC_THREAD_RR' \
'-DWEBRTC_ANDROID' \
'-DANDROID'
LOCAL_CFLAGS := $(MY_CFLAGS_C) $(MY_CFLAGS) $(MY_DEFS)
# Include paths placed before CFLAGS/CPPFLAGS
LOCAL_C_INCLUDES := $(LOCAL_PATH)/../../.. \
$(LOCAL_PATH)/../interface \
$(LOCAL_PATH)/../../../common_audio/resampler/main/interface \
$(LOCAL_PATH)/../../../common_audio/signal_processing_library/main/interface \
$(LOCAL_PATH)/../../../modules/audio_coding/main/interface \
$(LOCAL_PATH)/../../../modules/interface \
$(LOCAL_PATH)/../../../modules/audio_conference_mixer/interface \
$(LOCAL_PATH)/../../../modules/audio_device/main/interface \
$(LOCAL_PATH)/../../../modules/audio_processing/main/interface \
$(LOCAL_PATH)/../../../modules/media_file/interface \
$(LOCAL_PATH)/../../../modules/rtp_rtcp/interface \
$(LOCAL_PATH)/../../../modules/udp_transport/interface \
$(LOCAL_PATH)/../../../modules/utility/interface \
$(LOCAL_PATH)/../../../system_wrappers/interface
# Flags passed to only C++ (and not C) files.
LOCAL_CPPFLAGS :=
LOCAL_LDFLAGS :=
LOCAL_STATIC_LIBRARIES :=
LOCAL_SHARED_LIBRARIES := libcutils \
libdl \
libstlport
LOCAL_ADDITIONAL_DEPENDENCIES :=
ifeq ($(TARGET_OS)-$(TARGET_SIMULATOR),linux-true)
LOCAL_LDLIBS += -ldl -lpthread
endif
ifneq ($(TARGET_SIMULATOR),true)
LOCAL_SHARED_LIBRARIES += libdl
endif
include external/stlport/libstlport.mk
include $(BUILD_STATIC_LIBRARY)
#include $(BUILD_SHARED_LIBRARY)

View File

@ -1,129 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "audio_frame_operations.h"
#include "module_common_types.h"
namespace webrtc {
namespace voe {
WebRtc_Word32
AudioFrameOperations::MonoToStereo(AudioFrame& audioFrame)
{
if (audioFrame._audioChannel != 1)
{
return -1;
}
if ((audioFrame._payloadDataLengthInSamples << 1) >=
AudioFrame::kMaxAudioFrameSizeSamples)
{
// not enough memory to expand from mono to stereo
return -1;
}
WebRtc_Word16* payloadCopy =
new WebRtc_Word16[audioFrame._payloadDataLengthInSamples];
memcpy(payloadCopy, audioFrame._payloadData,
sizeof(WebRtc_Word16)*audioFrame._payloadDataLengthInSamples);
for (int i = 0; i < audioFrame._payloadDataLengthInSamples; i++)
{
audioFrame._payloadData[2*i] = payloadCopy[i];
audioFrame._payloadData[2*i+1] = payloadCopy[i];
}
audioFrame._audioChannel = 2;
delete [] payloadCopy;
return 0;
}
WebRtc_Word32
AudioFrameOperations::StereoToMono(AudioFrame& audioFrame)
{
if (audioFrame._audioChannel != 2)
{
return -1;
}
for (int i = 0; i < audioFrame._payloadDataLengthInSamples; i++)
{
audioFrame._payloadData[i] = (audioFrame._payloadData[2*i] >> 1) +
(audioFrame._payloadData[2*i+1] >> 1);
}
audioFrame._audioChannel = 1;
return 0;
}
WebRtc_Word32
AudioFrameOperations::Mute(AudioFrame& audioFrame)
{
const int sizeInBytes = sizeof(WebRtc_Word16) *
audioFrame._payloadDataLengthInSamples * audioFrame._audioChannel;
memset(audioFrame._payloadData, 0, sizeInBytes);
audioFrame._energy = 0;
return 0;
}
WebRtc_Word32
AudioFrameOperations::Scale(const float left,
const float right,
AudioFrame& audioFrame)
{
if (audioFrame._audioChannel == 1)
{
assert(false);
return -1;
}
for (int i = 0; i < audioFrame._payloadDataLengthInSamples; i++)
{
audioFrame._payloadData[2*i] =
(WebRtc_Word16)(left*audioFrame._payloadData[2*i]);
audioFrame._payloadData[2*i+1] =
(WebRtc_Word16)(right*audioFrame._payloadData[2*i+1]);
}
return 0;
}
WebRtc_Word32
AudioFrameOperations::ScaleWithSat(const float scale, AudioFrame& audioFrame)
{
WebRtc_Word32 tmp(0);
// Ensure that the output result is saturated [-32768, +32768].
for (int i = 0;
i < audioFrame._payloadDataLengthInSamples * audioFrame._audioChannel;
i++)
{
tmp = static_cast<WebRtc_Word32> (scale * audioFrame._payloadData[i]);
if (tmp < -32768)
{
audioFrame._payloadData[i] = -32768;
}
else if (tmp > 32767)
{
audioFrame._payloadData[i] = 32767;
}
else
{
audioFrame._payloadData[i] = static_cast<WebRtc_Word16> (tmp);
}
}
return 0;
}
} // namespace voe
} // namespace webrtc

View File

@ -1,43 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_AUDIO_FRAME_OPERATIONS_H
#define WEBRTC_VOICE_ENGINE_AUDIO_FRAME_OPERATIONS_H
#include "typedefs.h"
namespace webrtc {
class AudioFrame;
namespace voe {
class AudioFrameOperations
{
public:
static WebRtc_Word32 MonoToStereo(AudioFrame& audioFrame);
static WebRtc_Word32 StereoToMono(AudioFrame& audioFrame);
static WebRtc_Word32 Mute(AudioFrame& audioFrame);
static WebRtc_Word32 Scale(const float left,
const float right,
AudioFrame& audioFrame);
static WebRtc_Word32 ScaleWithSat(const float scale,
AudioFrame& audioFrame);
};
} // namespace voe
} // namespace webrtc
#endif // #ifndef WEBRTC_VOICE_ENGINE_AUDIO_FRAME_OPERATIONS_H

File diff suppressed because it is too large Load Diff

View File

@ -1,676 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_CHANNEL_H
#define WEBRTC_VOICE_ENGINE_CHANNEL_H
#include "voe_network.h"
#include "audio_coding_module.h"
#include "common_types.h"
#include "shared_data.h"
#include "rtp_rtcp.h"
#include "voe_audio_processing.h"
#include "voice_engine_defines.h"
#ifndef WEBRTC_EXTERNAL_TRANSPORT
#include "udp_transport.h"
#endif
#include "audio_conference_mixer_defines.h"
#include "file_player.h"
#include "file_recorder.h"
#ifdef WEBRTC_SRTP
#include "SrtpModule.h"
#endif
#include "dtmf_inband.h"
#include "dtmf_inband_queue.h"
#include "level_indicator.h"
#include "resampler.h"
#ifdef WEBRTC_DTMF_DETECTION
#include "voe_dtmf.h" // TelephoneEventDetectionMethods, TelephoneEventObserver
#endif
namespace webrtc
{
class CriticalSectionWrapper;
class ProcessThread;
class AudioDeviceModule;
class RtpRtcp;
class FileWrapper;
class RtpDump;
class VoiceEngineObserver;
class VoEMediaProcess;
class VoERTPObserver;
class VoERTCPObserver;
struct CallStatistics;
namespace voe
{
class Statistics;
class TransmitMixer;
class OutputMixer;
class Channel:
public RtpData,
public RtpFeedback,
public RtcpFeedback,
#ifndef WEBRTC_EXTERNAL_TRANSPORT
public UdpTransportData, // receiving packet from sockets
#endif
public FileCallback, // receiving notification from file player & recorder
public Transport,
public RtpAudioFeedback,
public AudioPacketizationCallback, // receive encoded packets from the ACM
public ACMVADCallback, // receive voice activity from the ACM
#ifdef WEBRTC_DTMF_DETECTION
public AudioCodingFeedback, // inband Dtmf detection in the ACM
#endif
public MixerParticipant // supplies output mixer with audio frames
{
public:
enum {KNumSocketThreads = 1};
enum {KNumberOfSocketBuffers = 8};
static WebRtc_UWord8 numSocketThreads;
public:
virtual ~Channel();
static WebRtc_Word32 CreateChannel(Channel*& channel,
const WebRtc_Word32 channelId,
const WebRtc_UWord32 instanceId);
Channel(const WebRtc_Word32 channelId, const WebRtc_UWord32 instanceId);
WebRtc_Word32 Init();
WebRtc_Word32 SetEngineInformation(
Statistics& engineStatistics,
OutputMixer& outputMixer,
TransmitMixer& transmitMixer,
ProcessThread& moduleProcessThread,
AudioDeviceModule& audioDeviceModule,
VoiceEngineObserver* voiceEngineObserver,
CriticalSectionWrapper* callbackCritSect);
WebRtc_Word32 UpdateLocalTimeStamp();
public:
// API methods
// VoEBase
WebRtc_Word32 StartPlayout();
WebRtc_Word32 StopPlayout();
WebRtc_Word32 StartSend();
WebRtc_Word32 StopSend();
WebRtc_Word32 StartReceiving();
WebRtc_Word32 StopReceiving();
#ifndef WEBRTC_EXTERNAL_TRANSPORT
WebRtc_Word32 SetLocalReceiver(const WebRtc_UWord16 rtpPort,
const WebRtc_UWord16 rtcpPort,
const WebRtc_Word8 ipAddr[64],
const WebRtc_Word8 multicastIpAddr[64]);
WebRtc_Word32 GetLocalReceiver(int& port, int& RTCPport, char ipAddr[]);
WebRtc_Word32 SetSendDestination(const WebRtc_UWord16 rtpPort,
const WebRtc_Word8 ipAddr[64],
const int sourcePort,
const WebRtc_UWord16 rtcpPort);
WebRtc_Word32 GetSendDestination(int& port, char ipAddr[64],
int& sourcePort, int& RTCPport);
#endif
WebRtc_Word32 SetNetEQPlayoutMode(NetEqModes mode);
WebRtc_Word32 GetNetEQPlayoutMode(NetEqModes& mode);
WebRtc_Word32 SetNetEQBGNMode(NetEqBgnModes mode);
WebRtc_Word32 GetNetEQBGNMode(NetEqBgnModes& mode);
WebRtc_Word32 SetOnHoldStatus(bool enable, OnHoldModes mode);
WebRtc_Word32 GetOnHoldStatus(bool& enabled, OnHoldModes& mode);
WebRtc_Word32 RegisterVoiceEngineObserver(VoiceEngineObserver& observer);
WebRtc_Word32 DeRegisterVoiceEngineObserver();
// VoECodec
WebRtc_Word32 GetSendCodec(CodecInst& codec);
WebRtc_Word32 GetRecCodec(CodecInst& codec);
WebRtc_Word32 SetSendCodec(const CodecInst& codec);
WebRtc_Word32 SetVADStatus(bool enableVAD, ACMVADMode mode,
bool disableDTX);
WebRtc_Word32 GetVADStatus(bool& enabledVAD, ACMVADMode& mode,
bool& disabledDTX);
WebRtc_Word32 SetRecPayloadType(const CodecInst& codec);
WebRtc_Word32 GetRecPayloadType(CodecInst& codec);
WebRtc_Word32 SetAMREncFormat(AmrMode mode);
WebRtc_Word32 SetAMRDecFormat(AmrMode mode);
WebRtc_Word32 SetAMRWbEncFormat(AmrMode mode);
WebRtc_Word32 SetAMRWbDecFormat(AmrMode mode);
WebRtc_Word32 SetSendCNPayloadType(int type, PayloadFrequencies frequency);
WebRtc_Word32 SetISACInitTargetRate(int rateBps, bool useFixedFrameSize);
WebRtc_Word32 SetISACMaxRate(int rateBps);
WebRtc_Word32 SetISACMaxPayloadSize(int sizeBytes);
// VoENetwork
WebRtc_Word32 RegisterExternalTransport(Transport& transport);
WebRtc_Word32 DeRegisterExternalTransport();
WebRtc_Word32 ReceivedRTPPacket(const WebRtc_Word8* data,
WebRtc_Word32 length);
WebRtc_Word32 ReceivedRTCPPacket(const WebRtc_Word8* data,
WebRtc_Word32 length);
#ifndef WEBRTC_EXTERNAL_TRANSPORT
WebRtc_Word32 GetSourceInfo(int& rtpPort, int& rtcpPort, char ipAddr[64]);
WebRtc_Word32 EnableIPv6();
bool IPv6IsEnabled() const;
WebRtc_Word32 SetSourceFilter(int rtpPort, int rtcpPort,
const char ipAddr[64]);
WebRtc_Word32 GetSourceFilter(int& rtpPort, int& rtcpPort, char ipAddr[64]);
WebRtc_Word32 SetSendTOS(int DSCP, int priority, bool useSetSockopt);
WebRtc_Word32 GetSendTOS(int &DSCP, int& priority, bool &useSetSockopt);
#if defined(_WIN32)
WebRtc_Word32 SetSendGQoS(bool enable, int serviceType, int overrideDSCP);
WebRtc_Word32 GetSendGQoS(bool &enabled, int &serviceType,
int &overrideDSCP);
#endif
#endif
WebRtc_Word32 SetPacketTimeoutNotification(bool enable, int timeoutSeconds);
WebRtc_Word32 GetPacketTimeoutNotification(bool& enabled,
int& timeoutSeconds);
WebRtc_Word32 RegisterDeadOrAliveObserver(VoEConnectionObserver& observer);
WebRtc_Word32 DeRegisterDeadOrAliveObserver();
WebRtc_Word32 SetPeriodicDeadOrAliveStatus(bool enable,
int sampleTimeSeconds);
WebRtc_Word32 GetPeriodicDeadOrAliveStatus(bool& enabled,
int& sampleTimeSeconds);
WebRtc_Word32 SendUDPPacket(const void* data, unsigned int length,
int& transmittedBytes, bool useRtcpSocket);
// VoEFile
int StartPlayingFileLocally(const char* fileName, const bool loop,
const FileFormats format,
const int startPosition,
const float volumeScaling,
const int stopPosition,
const CodecInst* codecInst);
int StartPlayingFileLocally(InStream* stream, const FileFormats format,
const int startPosition,
const float volumeScaling,
const int stopPosition,
const CodecInst* codecInst);
int StopPlayingFileLocally();
int IsPlayingFileLocally() const;
int ScaleLocalFilePlayout(const float scale);
int GetLocalPlayoutPosition(int& positionMs);
int StartPlayingFileAsMicrophone(const char* fileName, const bool loop,
const FileFormats format,
const int startPosition,
const float volumeScaling,
const int stopPosition,
const CodecInst* codecInst);
int StartPlayingFileAsMicrophone(InStream* stream,
const FileFormats format,
const int startPosition,
const float volumeScaling,
const int stopPosition,
const CodecInst* codecInst);
int StopPlayingFileAsMicrophone();
int IsPlayingFileAsMicrophone() const;
int ScaleFileAsMicrophonePlayout(const float scale);
int StartRecordingPlayout(const char* fileName, const CodecInst* codecInst);
int StartRecordingPlayout(OutStream* stream, const CodecInst* codecInst);
int StopRecordingPlayout();
void SetMixWithMicStatus(bool mix);
// VoEExternalMediaProcessing
int RegisterExternalMediaProcessing(ProcessingTypes type,
VoEMediaProcess& processObject);
int DeRegisterExternalMediaProcessing(ProcessingTypes type);
// VoEVolumeControl
int GetSpeechOutputLevel(WebRtc_UWord32& level) const;
int GetSpeechOutputLevelFullRange(WebRtc_UWord32& level) const;
int SetMute(const bool enable);
bool Mute() const;
int SetOutputVolumePan(float left, float right);
int GetOutputVolumePan(float& left, float& right) const;
int SetChannelOutputVolumeScaling(float scaling);
int GetChannelOutputVolumeScaling(float& scaling) const;
// VoECallReport
void ResetDeadOrAliveCounters();
int ResetRTCPStatistics();
int GetRoundTripTimeSummary(StatVal& delaysMs) const;
int GetDeadOrAliveCounters(int& countDead, int& countAlive) const;
// VoENetEqStats
int GetNetworkStatistics(NetworkStatistics& stats);
int GetJitterStatistics(JitterStatistics& stats);
int GetPreferredBufferSize(unsigned short& preferredBufferSize);
int ResetJitterStatistics();
// VoEVideoSync
int GetDelayEstimate(int& delayMs) const;
int SetMinimumPlayoutDelay(int delayMs);
int GetPlayoutTimestamp(unsigned int& timestamp);
int SetInitTimestamp(unsigned int timestamp);
int SetInitSequenceNumber(short sequenceNumber);
// VoEVideoSyncExtended
int GetRtpRtcp(RtpRtcp* &rtpRtcpModule) const;
// VoEEncryption
#ifdef WEBRTC_SRTP
int EnableSRTPSend(
CipherTypes cipherType,
int cipherKeyLength,
AuthenticationTypes authType,
int authKeyLength,
int authTagLength,
SecurityLevels level,
const unsigned char key[kVoiceEngineMaxSrtpKeyLength],
bool useForRTCP);
int DisableSRTPSend();
int EnableSRTPReceive(
CipherTypes cipherType,
int cipherKeyLength,
AuthenticationTypes authType,
int authKeyLength,
int authTagLength,
SecurityLevels level,
const unsigned char key[kVoiceEngineMaxSrtpKeyLength],
bool useForRTCP);
int DisableSRTPReceive();
#endif
int RegisterExternalEncryption(Encryption& encryption);
int DeRegisterExternalEncryption();
// VoEDtmf
int SendTelephoneEventOutband(unsigned char eventCode, int lengthMs,
int attenuationDb, bool playDtmfEvent);
int SendTelephoneEventInband(unsigned char eventCode, int lengthMs,
int attenuationDb, bool playDtmfEvent);
int SetDtmfPlayoutStatus(bool enable);
bool DtmfPlayoutStatus() const;
int SetSendTelephoneEventPayloadType(unsigned char type);
int GetSendTelephoneEventPayloadType(unsigned char& type);
#ifdef WEBRTC_DTMF_DETECTION
int RegisterTelephoneEventDetection(
TelephoneEventDetectionMethods detectionMethod,
VoETelephoneEventObserver& observer);
int DeRegisterTelephoneEventDetection();
int GetTelephoneEventDetectionStatus(
bool& enabled,
TelephoneEventDetectionMethods& detectionMethod);
#endif
// VoEAudioProcessingImpl
int UpdateRxVadDetection(AudioFrame& audioFrame);
int RegisterRxVadObserver(VoERxVadCallback &observer);
int DeRegisterRxVadObserver();
int VoiceActivityIndicator(int &activity);
#ifdef WEBRTC_VOICE_ENGINE_AGC
int SetRxAgcStatus(const bool enable, const AgcModes mode);
int GetRxAgcStatus(bool& enabled, AgcModes& mode);
int SetRxAgcConfig(const AgcConfig config);
int GetRxAgcConfig(AgcConfig& config);
#endif
#ifdef WEBRTC_VOICE_ENGINE_NR
int SetRxNsStatus(const bool enable, const NsModes mode);
int GetRxNsStatus(bool& enabled, NsModes& mode);
#endif
// VoERTP_RTCP
int RegisterRTPObserver(VoERTPObserver& observer);
int DeRegisterRTPObserver();
int RegisterRTCPObserver(VoERTCPObserver& observer);
int DeRegisterRTCPObserver();
int SetLocalSSRC(unsigned int ssrc);
int GetLocalSSRC(unsigned int& ssrc);
int GetRemoteSSRC(unsigned int& ssrc);
int GetRemoteCSRCs(unsigned int arrCSRC[15]);
int SetRTPAudioLevelIndicationStatus(bool enable, unsigned char ID);
int GetRTPAudioLevelIndicationStatus(bool& enable, unsigned char& ID);
int SetRTCPStatus(bool enable);
int GetRTCPStatus(bool& enabled);
int SetRTCP_CNAME(const char cName[256]);
int GetRTCP_CNAME(char cName[256]);
int GetRemoteRTCP_CNAME(char cName[256]);
int GetRemoteRTCPData(unsigned int& NTPHigh, unsigned int& NTPLow,
unsigned int& timestamp,
unsigned int& playoutTimestamp, unsigned int* jitter,
unsigned short* fractionLost);
int SendApplicationDefinedRTCPPacket(const unsigned char subType,
unsigned int name, const char* data,
unsigned short dataLengthInBytes);
int GetRTPStatistics(unsigned int& averageJitterMs,
unsigned int& maxJitterMs,
unsigned int& discardedPackets);
int GetRTPStatistics(CallStatistics& stats);
int SetFECStatus(bool enable, int redPayloadtype);
int GetFECStatus(bool& enabled, int& redPayloadtype);
int SetRTPKeepaliveStatus(bool enable, unsigned char unknownPayloadType,
int deltaTransmitTimeSeconds);
int GetRTPKeepaliveStatus(bool& enabled, unsigned char& unknownPayloadType,
int& deltaTransmitTimeSeconds);
int StartRTPDump(const char fileNameUTF8[1024], RTPDirections direction);
int StopRTPDump(RTPDirections direction);
bool RTPDumpIsActive(RTPDirections direction);
int InsertExtraRTPPacket(unsigned char payloadType, bool markerBit,
const char* payloadData,
unsigned short payloadSize);
public:
// From AudioPacketizationCallback in the ACM
WebRtc_Word32 SendData(FrameType frameType,
WebRtc_UWord8 payloadType,
WebRtc_UWord32 timeStamp,
const WebRtc_UWord8* payloadData,
WebRtc_UWord16 payloadSize,
const RTPFragmentationHeader* fragmentation);
// From ACMVADCallback in the ACM
WebRtc_Word32 InFrameType(WebRtc_Word16 frameType);
#ifdef WEBRTC_DTMF_DETECTION
public: // From AudioCodingFeedback in the ACM
int IncomingDtmf(const WebRtc_UWord8 digitDtmf, const bool end);
#endif
public:
WebRtc_Word32 OnRxVadDetected(const int vadDecision);
public:
// From RtpData in the RTP/RTCP module
WebRtc_Word32 OnReceivedPayloadData(const WebRtc_UWord8* payloadData,
const WebRtc_UWord16 payloadSize,
const WebRtcRTPHeader* rtpHeader);
public:
// From RtpFeedback in the RTP/RTCP module
WebRtc_Word32 OnInitializeDecoder(
const WebRtc_Word32 id,
const WebRtc_Word8 payloadType,
const WebRtc_Word8 payloadName[RTP_PAYLOAD_NAME_SIZE],
const WebRtc_UWord32 frequency,
const WebRtc_UWord8 channels,
const WebRtc_UWord32 rate);
void OnPacketTimeout(const WebRtc_Word32 id);
void OnReceivedPacket(const WebRtc_Word32 id,
const RtpRtcpPacketType packetType);
void OnPeriodicDeadOrAlive(const WebRtc_Word32 id,
const RTPAliveType alive);
void OnIncomingSSRCChanged(const WebRtc_Word32 id,
const WebRtc_UWord32 SSRC);
void OnIncomingCSRCChanged(const WebRtc_Word32 id,
const WebRtc_UWord32 CSRC, const bool added);
public:
// From RtcpFeedback in the RTP/RTCP module
void OnLipSyncUpdate(const WebRtc_Word32 id,
const WebRtc_Word32 audioVideoOffset) {};
void OnApplicationDataReceived(const WebRtc_Word32 id,
const WebRtc_UWord8 subType,
const WebRtc_UWord32 name,
const WebRtc_UWord16 length,
const WebRtc_UWord8* data);
void OnRTCPPacketTimeout(const WebRtc_Word32 id) {} ;
void OnTMMBRReceived(const WebRtc_Word32 id,
const WebRtc_UWord16 bwEstimateKbit) {};
void OnSendReportReceived(const WebRtc_Word32 id,
const WebRtc_UWord32 senderSSRC,
const WebRtc_UWord8* packet,
const WebRtc_UWord16 packetLength) {};
void OnReceiveReportReceived(const WebRtc_Word32 id,
const WebRtc_UWord32 senderSSRC,
const WebRtc_UWord8* packet,
const WebRtc_UWord16 packetLength) {};
public:
// From RtpAudioFeedback in the RTP/RTCP module
void OnReceivedTelephoneEvent(const WebRtc_Word32 id,
const WebRtc_UWord8 event,
const bool endOfEvent);
void OnPlayTelephoneEvent(const WebRtc_Word32 id,
const WebRtc_UWord8 event,
const WebRtc_UWord16 lengthMs,
const WebRtc_UWord8 volume);
public:
// From UdpTransportData in the Socket Transport module
void IncomingRTPPacket(const WebRtc_Word8* incomingRtpPacket,
const WebRtc_Word32 rtpPacketLength,
const WebRtc_Word8* fromIP,
const WebRtc_UWord16 fromPort);
void IncomingRTCPPacket(const WebRtc_Word8* incomingRtcpPacket,
const WebRtc_Word32 rtcpPacketLength,
const WebRtc_Word8* fromIP,
const WebRtc_UWord16 fromPort);
public:
// From Transport (called by the RTP/RTCP module)
int SendPacket(int /*channel*/, const void *data, int len);
int SendRTCPPacket(int /*channel*/, const void *data, int len);
public:
// From MixerParticipant
WebRtc_Word32 GetAudioFrame(const WebRtc_Word32 id,
AudioFrame& audioFrame);
WebRtc_Word32 NeededFrequency(const WebRtc_Word32 id);
public:
// From MonitorObserver
void OnPeriodicProcess();
public:
// From FileCallback
void PlayNotification(const WebRtc_Word32 id,
const WebRtc_UWord32 durationMs);
void RecordNotification(const WebRtc_Word32 id,
const WebRtc_UWord32 durationMs);
void PlayFileEnded(const WebRtc_Word32 id);
void RecordFileEnded(const WebRtc_Word32 id);
public:
WebRtc_UWord32 InstanceId() const
{
return _instanceId;
};
WebRtc_Word32 ChannelId() const
{
return _channelId;
};
bool Playing() const
{
return _playing;
};
bool Sending() const
{
return _sending;
};
bool Receiving() const
{
return _receiving;
};
bool ExternalTransport() const
{
return _externalTransport;
};
bool OutputIsOnHold() const
{
return _outputIsOnHold;
};
bool InputIsOnHold() const
{
return _inputIsOnHold;
};
RtpRtcp* const RtpRtcpModulePtr()
{
return &_rtpRtcpModule;
};
WebRtc_Word8 const OutputEnergyLevel()
{
return _outputAudioLevel.Level();
};
#ifndef WEBRTC_EXTERNAL_TRANSPORT
bool SendSocketsInitialized() const
{
return _socketTransportModule.SendSocketsInitialized();
};
bool ReceiveSocketsInitialized() const
{
return _socketTransportModule.ReceiveSocketsInitialized();
};
#endif
WebRtc_UWord32 Demultiplex(const AudioFrame& audioFrame,
const WebRtc_UWord8 audioLevel_dBov);
WebRtc_UWord32 PrepareEncodeAndSend(WebRtc_UWord32 mixingFrequency);
WebRtc_UWord32 EncodeAndSend();
private:
int InsertInbandDtmfTone();
WebRtc_Word32
MixOrReplaceAudioWithFile(const WebRtc_UWord32 mixingFrequency);
WebRtc_Word32 MixAudioWithFile(AudioFrame& audioFrame,
const WebRtc_UWord32 mixingFrequency);
WebRtc_Word32 GetPlayoutTimeStamp(WebRtc_UWord32& playoutTimestamp);
void UpdateDeadOrAliveCounters(bool alive);
WebRtc_Word32 SendPacketRaw(const void *data, int len, bool RTCP);
WebRtc_Word32 UpdatePacketDelay(const WebRtc_UWord32 timestamp,
const WebRtc_UWord16 sequenceNumber);
void RegisterReceiveCodecsToRTPModule();
int ApmProcessRx(AudioFrame& audioFrame);
private:
CriticalSectionWrapper& _fileCritSect;
CriticalSectionWrapper& _callbackCritSect;
CriticalSectionWrapper& _transmitCritSect;
WebRtc_UWord32 _instanceId;
WebRtc_Word32 _channelId;
private:
RtpRtcp& _rtpRtcpModule;
AudioCodingModule& _audioCodingModule;
#ifndef WEBRTC_EXTERNAL_TRANSPORT
UdpTransport& _socketTransportModule;
#endif
#ifdef WEBRTC_SRTP
SrtpModule& _srtpModule;
#endif
RtpDump& _rtpDumpIn;
RtpDump& _rtpDumpOut;
private:
AudioLevel _outputAudioLevel;
bool _externalTransport;
AudioFrame _audioFrame;
WebRtc_UWord8 _audioLevel_dBov;
FilePlayer* _inputFilePlayerPtr;
FilePlayer* _outputFilePlayerPtr;
FileRecorder* _outputFileRecorderPtr;
WebRtc_UWord32 _inputFilePlayerId;
WebRtc_UWord32 _outputFilePlayerId;
WebRtc_UWord32 _outputFileRecorderId;
bool _inputFilePlaying;
bool _outputFilePlaying;
bool _outputFileRecording;
DtmfInbandQueue _inbandDtmfQueue;
DtmfInband _inbandDtmfGenerator;
bool _outputExternalMedia;
bool _inputExternalMedia;
VoEMediaProcess* _inputExternalMediaCallbackPtr;
VoEMediaProcess* _outputExternalMediaCallbackPtr;
WebRtc_UWord8* _encryptionRTPBufferPtr;
WebRtc_UWord8* _decryptionRTPBufferPtr;
WebRtc_UWord8* _encryptionRTCPBufferPtr;
WebRtc_UWord8* _decryptionRTCPBufferPtr;
WebRtc_UWord32 _timeStamp;
WebRtc_UWord8 _sendTelephoneEventPayloadType;
WebRtc_UWord32 _playoutTimeStampRTP;
WebRtc_UWord32 _playoutTimeStampRTCP;
WebRtc_UWord32 _numberOfDiscardedPackets;
private:
// uses
Statistics* _engineStatisticsPtr;
OutputMixer* _outputMixerPtr;
TransmitMixer* _transmitMixerPtr;
ProcessThread* _moduleProcessThreadPtr;
AudioDeviceModule* _audioDeviceModulePtr;
VoiceEngineObserver* _voiceEngineObserverPtr; // owned by base
CriticalSectionWrapper* _callbackCritSectPtr; // owned by base
Transport* _transportPtr; // WebRtc socket or external transport
Encryption* _encryptionPtr; // WebRtc SRTP or external encryption
AudioProcessing* _rxAudioProcessingModulePtr; // far end AudioProcessing
#ifdef WEBRTC_DTMF_DETECTION
VoETelephoneEventObserver* _telephoneEventDetectionPtr;
#endif
VoERxVadCallback* _rxVadObserverPtr;
WebRtc_Word32 _oldVadDecision;
WebRtc_Word32 _sendFrameType; // Send data is voice, 1-voice, 0-otherwise
VoERTPObserver* _rtpObserverPtr;
VoERTCPObserver* _rtcpObserverPtr;
private:
// VoEBase
bool _outputIsOnHold;
bool _externalPlayout;
bool _inputIsOnHold;
bool _playing;
bool _sending;
bool _receiving;
bool _mixFileWithMicrophone;
bool _rtpObserver;
bool _rtcpObserver;
// VoEVolumeControl
bool _mute;
float _panLeft;
float _panRight;
float _outputGain;
// VoEEncryption
bool _encrypting;
bool _decrypting;
// VoEDtmf
bool _playOutbandDtmfEvent;
bool _playInbandDtmfEvent;
bool _inbandTelephoneEventDetection;
bool _outOfBandTelephoneEventDetecion;
// VoeRTP_RTCP
WebRtc_UWord8 _extraPayloadType;
bool _insertExtraRTPPacket;
bool _extraMarkerBit;
WebRtc_UWord32 _lastLocalTimeStamp;
WebRtc_Word8 _lastPayloadType;
bool _includeAudioLevelIndication;
// VoENetwork
bool _rtpPacketTimedOut;
bool _rtpPacketTimeOutIsEnabled;
WebRtc_UWord32 _rtpTimeOutSeconds;
bool _connectionObserver;
VoEConnectionObserver* _connectionObserverPtr;
WebRtc_UWord32 _countAliveDetections;
WebRtc_UWord32 _countDeadDetections;
AudioFrame::SpeechType _outputSpeechType;
// VoEVideoSync
WebRtc_UWord32 _averageDelayMs;
WebRtc_UWord16 _previousSequenceNumber;
WebRtc_UWord32 _previousTimestamp;
WebRtc_UWord16 _recPacketDelayMs;
// VoEAudioProcessing
bool _RxVadDetection;
bool _rxApmIsEnabled;
bool _rxAgcIsEnabled;
bool _rxNsIsEnabled;
};
} // namespace voe
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_CHANNEL_H

View File

@ -1,161 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "channel.h"
#include "channel_manager.h"
namespace webrtc
{
namespace voe
{
ChannelManager::ChannelManager(const WebRtc_UWord32 instanceId) :
ChannelManagerBase(),
_instanceId(instanceId)
{
}
ChannelManager::~ChannelManager()
{
ChannelManagerBase::DestroyAllItems();
}
bool ChannelManager::CreateChannel(WebRtc_Word32& channelId)
{
return ChannelManagerBase::CreateItem(channelId);
}
WebRtc_Word32 ChannelManager::DestroyChannel(const WebRtc_Word32 channelId)
{
Channel* deleteChannel =
static_cast<Channel*> (ChannelManagerBase::RemoveItem(channelId));
if (!deleteChannel)
{
return -1;
}
delete deleteChannel;
return 0;
}
WebRtc_Word32 ChannelManager::NumOfChannels() const
{
return ChannelManagerBase::NumOfItems();
}
WebRtc_Word32 ChannelManager::MaxNumOfChannels() const
{
return ChannelManagerBase::MaxNumOfItems();
}
void* ChannelManager::NewItem(WebRtc_Word32 itemID)
{
Channel* channel;
if (Channel::CreateChannel(channel, itemID, _instanceId) == -1)
{
return NULL;
}
return static_cast<void*> (channel);
}
void ChannelManager::DeleteItem(void* item)
{
Channel* deleteItem = static_cast<Channel*> (item);
delete deleteItem;
}
Channel* ChannelManager::GetChannel(const WebRtc_Word32 channelId) const
{
return static_cast<Channel*> (ChannelManagerBase::GetItem(channelId));
}
void ChannelManager::ReleaseChannel()
{
ChannelManagerBase::ReleaseItem();
}
void ChannelManager::GetChannelIds(WebRtc_Word32* channelsArray,
WebRtc_Word32& numOfChannels) const
{
ChannelManagerBase::GetItemIds(channelsArray, numOfChannels);
}
void ChannelManager::GetChannels(MapWrapper& channels) const
{
ChannelManagerBase::GetChannels(channels);
}
ScopedChannel::ScopedChannel(ChannelManager& chManager) :
_chManager(chManager),
_channelPtr(NULL)
{
// Copy all existing channels to the local map.
// It is not possible to utilize the ChannelPtr() API after
// this constructor. The intention is that this constructor
// is used in combination with the scoped iterator.
_chManager.GetChannels(_channels);
}
ScopedChannel::ScopedChannel(ChannelManager& chManager,
WebRtc_Word32 channelId) :
_chManager(chManager),
_channelPtr(NULL)
{
_channelPtr = _chManager.GetChannel(channelId);
}
ScopedChannel::~ScopedChannel()
{
if (_channelPtr != NULL || _channels.Size() != 0)
{
_chManager.ReleaseChannel();
}
// Delete the map
while (_channels.Erase(_channels.First()) == 0)
;
}
Channel* ScopedChannel::ChannelPtr()
{
return _channelPtr;
}
Channel* ScopedChannel::GetFirstChannel(void*& iterator) const
{
MapItem* it = _channels.First();
iterator = (void*) it;
if (!it)
{
return NULL;
}
return static_cast<Channel*> (it->GetItem());
}
Channel* ScopedChannel::GetNextChannel(void*& iterator) const
{
MapItem* it = (MapItem*) iterator;
if (!it)
{
iterator = NULL;
return NULL;
}
it = _channels.Next(it);
iterator = (void*) it;
if (!it)
{
return NULL;
}
return static_cast<Channel*> (it->GetItem());
}
} // namespace voe
} // namespace webrtc

View File

@ -1,89 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_CHANNEL_MANAGER_H
#define WEBRTC_VOICE_ENGINE_CHANNEL_MANAGER_H
#include "channel_manager_base.h"
#include "typedefs.h"
namespace webrtc
{
namespace voe
{
class ScopedChannel;
class Channel;
class ChannelManager: private ChannelManagerBase
{
friend class ScopedChannel;
public:
bool CreateChannel(WebRtc_Word32& channelId);
WebRtc_Word32 DestroyChannel(const WebRtc_Word32 channelId);
WebRtc_Word32 MaxNumOfChannels() const;
WebRtc_Word32 NumOfChannels() const;
void GetChannelIds(WebRtc_Word32* channelsArray,
WebRtc_Word32& numOfChannels) const;
ChannelManager(const WebRtc_UWord32 instanceId);
~ChannelManager();
private:
ChannelManager(const ChannelManager&);
ChannelManager& operator=(const ChannelManager&);
Channel* GetChannel(const WebRtc_Word32 channelId) const;
void GetChannels(MapWrapper& channels) const;
void ReleaseChannel();
virtual void* NewItem(WebRtc_Word32 itemID);
virtual void DeleteItem(void* item);
WebRtc_UWord32 _instanceId;
};
class ScopedChannel
{
public:
// Can only be created by the channel manager
ScopedChannel(ChannelManager& chManager);
ScopedChannel(ChannelManager& chManager, WebRtc_Word32 channelId);
Channel* ChannelPtr();
Channel* GetFirstChannel(void*& iterator) const;
Channel* GetNextChannel(void*& iterator) const;
~ScopedChannel();
private:
ChannelManager& _chManager;
Channel* _channelPtr;
MapWrapper _channels;
};
} // namespace voe
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_CHANNEL_MANAGER_H

View File

@ -1,227 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "channel_manager_base.h"
#include "critical_section_wrapper.h"
#include "rw_lock_wrapper.h"
#include <cassert>
namespace webrtc
{
namespace voe
{
ChannelManagerBase::ChannelManagerBase() :
_itemsCritSectPtr(CriticalSectionWrapper::CreateCriticalSection()),
_itemsRWLockPtr(RWLockWrapper::CreateRWLock())
{
for (int i = 0; i < KMaxNumberOfItems; i++)
{
_freeItemIds[i] = true;
}
}
ChannelManagerBase::~ChannelManagerBase()
{
if (_itemsRWLockPtr)
{
delete _itemsRWLockPtr;
_itemsRWLockPtr = NULL;
}
if (_itemsCritSectPtr)
{
delete _itemsCritSectPtr;
_itemsCritSectPtr = NULL;
}
}
bool ChannelManagerBase::GetFreeItemId(WebRtc_Word32& itemId)
{
CriticalSectionScoped cs(*_itemsCritSectPtr);
WebRtc_Word32 i(0);
while (i < KMaxNumberOfItems)
{
if (_freeItemIds[i])
{
itemId = i;
_freeItemIds[i] = false;
return true;
}
i++;
}
return false;
}
void ChannelManagerBase::AddFreeItemId(WebRtc_Word32 itemId)
{
assert(itemId < KMaxNumberOfItems);
_freeItemIds[itemId] = true;
}
void ChannelManagerBase::RemoveFreeItemIds()
{
for (int i = 0; i < KMaxNumberOfItems; i++)
{
_freeItemIds[i] = false;
}
}
bool ChannelManagerBase::CreateItem(WebRtc_Word32& itemId)
{
_itemsCritSectPtr->Enter();
void* itemPtr;
itemId = -1;
const bool success = GetFreeItemId(itemId);
if (!success)
{
_itemsCritSectPtr->Leave();
return false;
}
itemPtr = NewItem(itemId);
if (!itemPtr)
{
_itemsCritSectPtr->Leave();
return false;
}
_itemsCritSectPtr->Leave();
InsertItem(itemId, itemPtr);
return true;
}
void ChannelManagerBase::InsertItem(WebRtc_Word32 itemId, void* item)
{
CriticalSectionScoped cs(*_itemsCritSectPtr);
assert(!_items.Find(itemId));
_items.Insert(itemId, item);
}
void*
ChannelManagerBase::RemoveItem(WebRtc_Word32 itemId)
{
CriticalSectionScoped cs(*_itemsCritSectPtr);
WriteLockScoped wlock(*_itemsRWLockPtr);
MapItem* it = _items.Find(itemId);
if (!it)
{
return 0;
}
void* returnItem = it->GetItem();
_items.Erase(it);
AddFreeItemId(itemId);
return returnItem;
}
void ChannelManagerBase::DestroyAllItems()
{
CriticalSectionScoped cs(*_itemsCritSectPtr);
MapItem* it = _items.First();
while (it)
{
DeleteItem(it->GetItem());
_items.Erase(it);
it = _items.First();
}
RemoveFreeItemIds();
}
WebRtc_Word32 ChannelManagerBase::NumOfItems() const
{
return _items.Size();
}
WebRtc_Word32 ChannelManagerBase::MaxNumOfItems() const
{
return static_cast<WebRtc_Word32> (KMaxNumberOfItems);
}
void*
ChannelManagerBase::GetItem(WebRtc_Word32 itemId) const
{
CriticalSectionScoped cs(*_itemsCritSectPtr);
MapItem* it = _items.Find(itemId);
if (!it)
{
return 0;
}
_itemsRWLockPtr->AcquireLockShared();
return it->GetItem();
}
void*
ChannelManagerBase::GetFirstItem(void*& iterator) const
{
CriticalSectionScoped cs(*_itemsCritSectPtr);
MapItem* it = _items.First();
iterator = (void*) it;
if (!it)
{
return 0;
}
return it->GetItem();
}
void*
ChannelManagerBase::GetNextItem(void*& iterator) const
{
CriticalSectionScoped cs(*_itemsCritSectPtr);
MapItem* it = (MapItem*) iterator;
if (!it)
{
iterator = 0;
return 0;
}
it = _items.Next(it);
iterator = (void*) it;
if (!it)
{
return 0;
}
return it->GetItem();
}
void ChannelManagerBase::ReleaseItem()
{
_itemsRWLockPtr->ReleaseLockShared();
}
void ChannelManagerBase::GetItemIds(WebRtc_Word32* channelsArray,
WebRtc_Word32& numOfChannels) const
{
MapItem* it = _items.First();
numOfChannels = (numOfChannels <= _items.Size()) ?
numOfChannels : _items.Size();
for (int i = 0; i < numOfChannels; i++)
{
channelsArray[i] = it->GetId();
it = _items.Next(it);
}
}
void ChannelManagerBase::GetChannels(MapWrapper& channels) const
{
CriticalSectionScoped cs(*_itemsCritSectPtr);
if (_items.Size() == 0)
{
return;
}
_itemsRWLockPtr->AcquireLockShared();
for (MapItem* it = _items.First(); it != NULL; it = _items.Next(it))
{
channels.Insert(it->GetId(), it->GetItem());
}
}
} // namespace voe
} // namespace webrtc

View File

@ -1,90 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_CHANNEL_MANAGER_BASE_H
#define WEBRTC_VOICE_ENGINE_CHANNEL_MANAGER_BASE_H
#include "typedefs.h"
#include "map_wrapper.h"
#include "voice_engine_defines.h"
namespace webrtc
{
class CriticalSectionWrapper;
class RWLockWrapper;
namespace voe
{
class ScopedChannel;
class Channel;
class ChannelManagerBase
{
public:
enum {KMaxNumberOfItems = kVoiceEngineMaxNumOfChannels};
protected:
bool CreateItem(WebRtc_Word32& itemId);
void InsertItem(WebRtc_Word32 itemId, void* item);
void* RemoveItem(WebRtc_Word32 itemId);
void* GetItem(WebRtc_Word32 itemId) const;
void* GetFirstItem(void*& iterator) const ;
void* GetNextItem(void*& iterator) const;
void ReleaseItem();
void AddFreeItemId(WebRtc_Word32 itemId);
bool GetFreeItemId(WebRtc_Word32& itemId);
void RemoveFreeItemIds();
void DestroyAllItems();
WebRtc_Word32 NumOfItems() const;
WebRtc_Word32 MaxNumOfItems() const;
void GetItemIds(WebRtc_Word32* channelsArray,
WebRtc_Word32& numOfChannels) const;
void GetChannels(MapWrapper& channels) const;
virtual void* NewItem(WebRtc_Word32 itemId) = 0;
virtual void DeleteItem(void* item) = 0;
ChannelManagerBase();
virtual ~ChannelManagerBase();
private:
// Protects _items and _freeItemIds
CriticalSectionWrapper* _itemsCritSectPtr;
MapWrapper _items;
bool _freeItemIds[KMaxNumberOfItems];
// Protects channels from being destroyed while being used
RWLockWrapper* _itemsRWLockPtr;
};
} // namespace voe
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_CHANNEL_MANAGER_BASE_H

View File

@ -1,389 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "dtmf_inband.h"
#include "critical_section_wrapper.h"
#include "trace.h"
#include <cassert>
namespace webrtc {
const WebRtc_Word16 Dtmf_a_times2Tab8Khz[8]=
{
27978, 26956, 25701, 24219,
19073, 16325, 13085, 9314
};
const WebRtc_Word16 Dtmf_a_times2Tab16Khz[8]=
{
31548, 31281, 30951, 30556,
29144, 28361, 27409, 26258
};
const WebRtc_Word16 Dtmf_a_times2Tab32Khz[8]=
{
32462,32394, 32311, 32210, 31849, 31647, 31400, 31098
};
// Second table is sin(2*pi*f/fs) in Q14
const WebRtc_Word16 Dtmf_ym2Tab8Khz[8]=
{
8527, 9315, 10163, 11036,
13322, 14206, 15021, 15708
};
const WebRtc_Word16 Dtmf_ym2Tab16Khz[8]=
{
4429, 4879, 5380, 5918,
7490, 8207, 8979, 9801
};
const WebRtc_Word16 Dtmf_ym2Tab32Khz[8]=
{
2235, 2468, 2728, 3010, 3853, 4249, 4685, 5164
};
const WebRtc_Word16 Dtmf_dBm0kHz[37]=
{
16141, 14386, 12821, 11427, 10184, 9077,
8090, 7210, 6426, 5727, 5104, 4549,
4054, 3614, 3221, 2870, 2558, 2280,
2032, 1811, 1614, 1439, 1282, 1143,
1018, 908, 809, 721, 643, 573,
510, 455, 405, 361, 322, 287,
256
};
DtmfInband::DtmfInband(const WebRtc_Word32 id) :
_id(id),
_critSect(*CriticalSectionWrapper::CreateCriticalSection()),
_outputFrequencyHz(8000),
_reinit(true),
_remainingSamples(0),
_frameLengthSamples(0),
_eventCode(0),
_attenuationDb(0),
_playing(false),
_delaySinceLastToneMS(1000),
_lengthMs(0)
{
memset(_oldOutputLow, 0, sizeof(_oldOutputLow));
memset(_oldOutputHigh, 0, sizeof(_oldOutputHigh));
}
DtmfInband::~DtmfInband()
{
delete &_critSect;
}
int
DtmfInband::SetSampleRate(const WebRtc_UWord16 frequency)
{
if (frequency != 8000 &&
frequency != 16000 &&
frequency != 32000)
{
// invalid sample rate
assert(false);
return -1;
}
_outputFrequencyHz = frequency;
return 0;
}
int
DtmfInband::GetSampleRate(WebRtc_UWord16& frequency)
{
frequency = _outputFrequencyHz;
return 0;
}
void
DtmfInband::Init()
{
_remainingSamples = 0;
_frameLengthSamples = 0;
_eventCode = 0;
_attenuationDb = 0;
_lengthMs = 0;
_reinit = true;
_oldOutputLow[0] = 0;
_oldOutputLow[1] = 0;
_oldOutputHigh[0] = 0;
_oldOutputHigh[1] = 0;
_delaySinceLastToneMS = 1000;
}
int
DtmfInband::AddTone(const WebRtc_UWord8 eventCode,
WebRtc_Word32 lengthMs,
WebRtc_Word32 attenuationDb)
{
CriticalSectionScoped lock(_critSect);
if (attenuationDb > 36 || eventCode > 15)
{
assert(false);
return -1;
}
if (IsAddingTone())
{
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_id,-1),
"DtmfInband::AddTone() new tone interrupts ongoing tone");
}
ReInit();
_frameLengthSamples = static_cast<WebRtc_Word16> (_outputFrequencyHz / 100);
_eventCode = static_cast<WebRtc_Word16> (eventCode);
_attenuationDb = static_cast<WebRtc_Word16> (attenuationDb);
_remainingSamples = static_cast<WebRtc_Word32>
(lengthMs * (_outputFrequencyHz / 1000));
_lengthMs = lengthMs;
return 0;
}
int
DtmfInband::ResetTone()
{
CriticalSectionScoped lock(_critSect);
ReInit();
_frameLengthSamples = static_cast<WebRtc_Word16> (_outputFrequencyHz / 100);
_remainingSamples = static_cast<WebRtc_Word32>
(_lengthMs * (_outputFrequencyHz / 1000));
return 0;
}
int
DtmfInband::StartTone(const WebRtc_UWord8 eventCode,
WebRtc_Word32 attenuationDb)
{
CriticalSectionScoped lock(_critSect);
if (attenuationDb > 36 || eventCode > 15)
{
assert(false);
return -1;
}
if (IsAddingTone())
{
return -1;
}
ReInit();
_frameLengthSamples = static_cast<WebRtc_Word16> (_outputFrequencyHz / 100);
_eventCode = static_cast<WebRtc_Word16> (eventCode);
_attenuationDb = static_cast<WebRtc_Word16> (attenuationDb);
_playing = true;
return 0;
}
int
DtmfInband::StopTone()
{
CriticalSectionScoped lock(_critSect);
if (!_playing)
{
return 0;
}
_playing = false;
return 0;
}
// Shall be called between tones
void
DtmfInband::ReInit()
{
_reinit = true;
}
bool
DtmfInband::IsAddingTone()
{
CriticalSectionScoped lock(_critSect);
return (_remainingSamples > 0 || _playing);
}
int
DtmfInband::Get10msTone(WebRtc_Word16 output[320],
WebRtc_UWord16& outputSizeInSamples)
{
CriticalSectionScoped lock(_critSect);
if (DtmfFix_generate(output,
_eventCode,
_attenuationDb,
_frameLengthSamples,
_outputFrequencyHz) == -1)
{
return -1;
}
_remainingSamples -= _frameLengthSamples;
outputSizeInSamples = _frameLengthSamples;
_delaySinceLastToneMS = 0;
return 0;
}
void
DtmfInband::UpdateDelaySinceLastTone()
{
_delaySinceLastToneMS += kDtmfFrameSizeMs;
// avoid wraparound
if (_delaySinceLastToneMS > (1<<30))
{
_delaySinceLastToneMS = 1000;
}
}
WebRtc_UWord32
DtmfInband::DelaySinceLastTone() const
{
return _delaySinceLastToneMS;
}
WebRtc_Word16
DtmfInband::DtmfFix_generate(WebRtc_Word16 *decoded,
const WebRtc_Word16 value,
const WebRtc_Word16 volume,
const WebRtc_Word16 frameLen,
const WebRtc_Word16 fs)
{
const WebRtc_Word16 *a_times2Tbl;
const WebRtc_Word16 *y2_Table;
WebRtc_Word16 a1_times2 = 0, a2_times2 = 0;
if (fs==8000) {
a_times2Tbl=Dtmf_a_times2Tab8Khz;
y2_Table=Dtmf_ym2Tab8Khz;
} else if (fs==16000) {
a_times2Tbl=Dtmf_a_times2Tab16Khz;
y2_Table=Dtmf_ym2Tab16Khz;
} else if (fs==32000) {
a_times2Tbl=Dtmf_a_times2Tab32Khz;
y2_Table=Dtmf_ym2Tab32Khz;
} else {
return(-1);
}
if ((value==1)||(value==2)||(value==3)||(value==12)) {
a1_times2=a_times2Tbl[0];
if (_reinit) {
_oldOutputLow[0]=y2_Table[0];
_oldOutputLow[1]=0;
}
} else if ((value==4)||(value==5)||(value==6)||(value==13)) {
a1_times2=a_times2Tbl[1];
if (_reinit) {
_oldOutputLow[0]=y2_Table[1];
_oldOutputLow[1]=0;
}
} else if ((value==7)||(value==8)||(value==9)||(value==14)) {
a1_times2=a_times2Tbl[2];
if (_reinit) {
_oldOutputLow[0]=y2_Table[2];
_oldOutputLow[1]=0;
}
} else if ((value==10)||(value==0)||(value==11)||(value==15)) {
a1_times2=a_times2Tbl[3];
if (_reinit) {
_oldOutputLow[0]=y2_Table[3];
_oldOutputLow[1]=0;
}
}
if ((value==1)||(value==4)||(value==7)||(value==10)) {
a2_times2=a_times2Tbl[4];
if (_reinit) {
_oldOutputHigh[0]=y2_Table[4];
_oldOutputHigh[1]=0;
_reinit=false;
}
} else if ((value==2)||(value==5)||(value==8)||(value==0)) {
a2_times2=a_times2Tbl[5];
if (_reinit) {
_oldOutputHigh[0]=y2_Table[5];
_oldOutputHigh[1]=0;
_reinit=false;
}
} else if ((value==3)||(value==6)||(value==9)||(value==11)) {
a2_times2=a_times2Tbl[6];
if (_reinit) {
_oldOutputHigh[0]=y2_Table[6];
_oldOutputHigh[1]=0;
_reinit=false;
}
} else if ((value==12)||(value==13)||(value==14)||(value==15)) {
a2_times2=a_times2Tbl[7];
if (_reinit) {
_oldOutputHigh[0]=y2_Table[7];
_oldOutputHigh[1]=0;
_reinit=false;
}
}
return (DtmfFix_generateSignal(a1_times2,
a2_times2,
volume,
decoded,
frameLen));
}
WebRtc_Word16
DtmfInband::DtmfFix_generateSignal(const WebRtc_Word16 a1_times2,
const WebRtc_Word16 a2_times2,
const WebRtc_Word16 volume,
WebRtc_Word16 *signal,
const WebRtc_Word16 length)
{
int i;
/* Generate Signal */
for (i=0;i<length;i++) {
WebRtc_Word32 tempVal;
WebRtc_Word16 tempValLow, tempValHigh;
/* Use recursion formula y[n] = a*2*y[n-1] - y[n-2] */
tempValLow = (WebRtc_Word16)(((( (WebRtc_Word32)(a1_times2 *
_oldOutputLow[1])) + 8192) >> 14) - _oldOutputLow[0]);
tempValHigh = (WebRtc_Word16)(((( (WebRtc_Word32)(a2_times2 *
_oldOutputHigh[1])) + 8192) >> 14) - _oldOutputHigh[0]);
/* Update memory */
_oldOutputLow[0]=_oldOutputLow[1];
_oldOutputLow[1]=tempValLow;
_oldOutputHigh[0]=_oldOutputHigh[1];
_oldOutputHigh[1]=tempValHigh;
tempVal = (WebRtc_Word32)(kDtmfAmpLow * tempValLow) +
(WebRtc_Word32)(kDtmfAmpHigh * tempValHigh);
/* Norm the signal to Q14 */
tempVal=(tempVal+16384)>>15;
/* Scale the signal to correct dbM0 value */
signal[i]=(WebRtc_Word16)((tempVal*Dtmf_dBm0kHz[volume]+8192)>>14);
}
return(0);
}
} // namespace webrtc

View File

@ -1,93 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_DTMF_INBAND_H
#define WEBRTC_VOICE_ENGINE_DTMF_INBAND_H
#if _MSC_VER > 1000
#pragma once
#endif
#include "typedefs.h"
#include "voice_engine_defines.h"
namespace webrtc {
class CriticalSectionWrapper;
class DtmfInband
{
public:
DtmfInband(const WebRtc_Word32 id);
virtual ~DtmfInband();
void Init();
int SetSampleRate(const WebRtc_UWord16 frequency);
int GetSampleRate(WebRtc_UWord16& frequency);
int AddTone(const WebRtc_UWord8 eventCode,
WebRtc_Word32 lengthMs,
WebRtc_Word32 attenuationDb);
int ResetTone();
int StartTone(const WebRtc_UWord8 eventCode,
WebRtc_Word32 attenuationDb);
int StopTone();
bool IsAddingTone();
int Get10msTone(WebRtc_Word16 output[320],
WebRtc_UWord16& outputSizeInSamples);
WebRtc_UWord32 DelaySinceLastTone() const;
void UpdateDelaySinceLastTone();
private:
void ReInit();
WebRtc_Word16 DtmfFix_generate(WebRtc_Word16* decoded,
const WebRtc_Word16 value,
const WebRtc_Word16 volume,
const WebRtc_Word16 frameLen,
const WebRtc_Word16 fs);
private:
enum {kDtmfFrameSizeMs = 10};
enum {kDtmfAmpHigh = 32768};
enum {kDtmfAmpLow = 23171}; // 3 dB lower than the high frequency
WebRtc_Word16 DtmfFix_generateSignal(const WebRtc_Word16 a1_times2,
const WebRtc_Word16 a2_times2,
const WebRtc_Word16 volume,
WebRtc_Word16* signal,
const WebRtc_Word16 length);
private:
CriticalSectionWrapper& _critSect;
WebRtc_Word32 _id;
WebRtc_UWord16 _outputFrequencyHz; // {8000, 16000, 32000}
WebRtc_Word16 _oldOutputLow[2]; // Data needed for oscillator model
WebRtc_Word16 _oldOutputHigh[2]; // Data needed for oscillator model
WebRtc_Word16 _frameLengthSamples; // {80, 160, 320}
WebRtc_Word32 _remainingSamples;
WebRtc_Word16 _eventCode; // [0, 15]
WebRtc_Word16 _attenuationDb; // [0, 36]
WebRtc_Word32 _lengthMs;
bool _reinit; // 'true' if the oscillator should be reinit for next event
bool _playing;
WebRtc_UWord32 _delaySinceLastToneMS; // time since last generated tone [ms]
};
} // namespace webrtc
#endif // #ifndef WEBRTC_VOICE_ENGINE_DTMF_INBAND_H

View File

@ -1,88 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "dtmf_inband_queue.h"
#include "trace.h"
namespace webrtc {
DtmfInbandQueue::DtmfInbandQueue(const WebRtc_Word32 id):
_id(id),
_DtmfCritsect(*CriticalSectionWrapper::CreateCriticalSection()),
_nextEmptyIndex(0)
{
memset(_DtmfKey,0, sizeof(_DtmfKey));
memset(_DtmfLen,0, sizeof(_DtmfLen));
memset(_DtmfLevel,0, sizeof(_DtmfLevel));
}
DtmfInbandQueue::~DtmfInbandQueue()
{
delete &_DtmfCritsect;
}
int
DtmfInbandQueue::AddDtmf(WebRtc_UWord8 key,
WebRtc_UWord16 len,
WebRtc_UWord8 level)
{
CriticalSectionScoped lock(_DtmfCritsect);
if (_nextEmptyIndex >= kDtmfInbandMax)
{
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_id,-1),
"DtmfInbandQueue::AddDtmf() unable to add Dtmf tone");
return -1;
}
WebRtc_Word32 index = _nextEmptyIndex;
_DtmfKey[index] = key;
_DtmfLen[index] = len;
_DtmfLevel[index] = level;
_nextEmptyIndex++;
return 0;
}
WebRtc_Word8
DtmfInbandQueue::NextDtmf(WebRtc_UWord16* len, WebRtc_UWord8* level)
{
CriticalSectionScoped lock(_DtmfCritsect);
if(!PendingDtmf())
{
return -1;
}
WebRtc_Word8 nextDtmf = _DtmfKey[0];
*len=_DtmfLen[0];
*level=_DtmfLevel[0];
memmove(&(_DtmfKey[0]), &(_DtmfKey[1]),
_nextEmptyIndex*sizeof(WebRtc_UWord8));
memmove(&(_DtmfLen[0]), &(_DtmfLen[1]),
_nextEmptyIndex*sizeof(WebRtc_UWord16));
memmove(&(_DtmfLevel[0]), &(_DtmfLevel[1]),
_nextEmptyIndex*sizeof(WebRtc_UWord8));
_nextEmptyIndex--;
return nextDtmf;
}
bool
DtmfInbandQueue::PendingDtmf()
{
return(_nextEmptyIndex>0);
}
void
DtmfInbandQueue::ResetDtmf()
{
_nextEmptyIndex = 0;
}
} // namespace webrtc

View File

@ -1,52 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_DTMF_INBAND_QUEUE_H
#define WEBRTC_VOICE_ENGINE_DTMF_INBAND_QUEUE_H
#include "critical_section_wrapper.h"
#include "typedefs.h"
#include "voice_engine_defines.h"
namespace webrtc {
class DtmfInbandQueue
{
public:
DtmfInbandQueue(const WebRtc_Word32 id);
virtual ~DtmfInbandQueue();
int AddDtmf(WebRtc_UWord8 DtmfKey,
WebRtc_UWord16 len,
WebRtc_UWord8 level);
WebRtc_Word8 NextDtmf(WebRtc_UWord16* len, WebRtc_UWord8* level);
bool PendingDtmf();
void ResetDtmf();
private:
enum {kDtmfInbandMax = 20};
WebRtc_Word32 _id;
CriticalSectionWrapper& _DtmfCritsect;
WebRtc_UWord8 _nextEmptyIndex;
WebRtc_UWord8 _DtmfKey[kDtmfInbandMax];
WebRtc_UWord16 _DtmfLen[kDtmfInbandMax];
WebRtc_UWord8 _DtmfLevel[kDtmfInbandMax];
};
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_DTMF_INBAND_QUEUE_H

View File

@ -1,99 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "level_indicator.h"
#include "module_common_types.h"
#include "signal_processing_library.h"
namespace webrtc {
namespace voe {
// Number of bars on the indicator.
// Note that the number of elements is specified because we are indexing it
// in the range of 0-32
const WebRtc_Word8 permutation[33] =
{0,1,2,3,4,4,5,5,5,5,6,6,6,6,6,7,7,7,7,8,8,8,9,9,9,9,9,9,9,9,9,9,9};
AudioLevel::AudioLevel() :
_absMax(0),
_count(0),
_currentLevel(0),
_currentLevelFullRange(0)
{
}
AudioLevel::~AudioLevel()
{
}
void
AudioLevel::Clear()
{
_absMax = 0;
_count = 0;
_currentLevel = 0;
_currentLevelFullRange = 0;
}
void
AudioLevel::ComputeLevel(const AudioFrame& audioFrame)
{
WebRtc_Word16 absValue(0);
// Check speech level (works for 2 channels as well)
absValue = WebRtcSpl_MaxAbsValueW16(
audioFrame._payloadData,
audioFrame._payloadDataLengthInSamples*audioFrame._audioChannel);
if (absValue > _absMax)
_absMax = absValue;
// Update level approximately 10 times per second
if (_count++ == kUpdateFrequency)
{
_currentLevelFullRange = _absMax;
_count = 0;
// Highest value for a WebRtc_Word16 is 0x7fff = 32767
// Divide with 1000 to get in the range of 0-32 which is the range of
// the permutation vector
WebRtc_Word32 position = _absMax/1000;
// Make it less likely that the bar stays at position 0. I.e. only if
// its in the range 0-250 (instead of 0-1000)
if ((position == 0) && (_absMax > 250))
{
position = 1;
}
_currentLevel = permutation[position];
// Decay the absolute maximum (divide by 4)
_absMax >>= 2;
}
}
WebRtc_Word8
AudioLevel::Level() const
{
return _currentLevel;
}
WebRtc_Word16
AudioLevel::LevelFullRange() const
{
return _currentLevelFullRange;
}
} // namespace voe
} // namespace webrtc

View File

@ -1,49 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_LEVEL_INDICATOR_H
#define WEBRTC_VOICE_ENGINE_LEVEL_INDICATOR_H
#include "typedefs.h"
#include "voice_engine_defines.h"
namespace webrtc {
class AudioFrame;
namespace voe {
class AudioLevel
{
public:
AudioLevel();
virtual ~AudioLevel();
void ComputeLevel(const AudioFrame& audioFrame);
WebRtc_Word8 Level() const;
WebRtc_Word16 LevelFullRange() const;
void Clear();
private:
enum { kUpdateFrequency = 10};
WebRtc_Word16 _absMax;
WebRtc_Word16 _count;
WebRtc_Word8 _currentLevel;
WebRtc_Word16 _currentLevelFullRange;
};
} // namespace voe
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_LEVEL_INDICATOR_H

View File

@ -1,91 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "critical_section_wrapper.h"
#include "monitor_module.h"
namespace webrtc {
namespace voe {
MonitorModule::MonitorModule() :
_callbackCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
_observerPtr(NULL),
_lastProcessTime(GET_TIME_IN_MS())
{
}
MonitorModule::~MonitorModule()
{
delete &_callbackCritSect;
}
WebRtc_Word32
MonitorModule::RegisterObserver(MonitorObserver& observer)
{
CriticalSectionScoped lock(_callbackCritSect);
if (_observerPtr)
{
return -1;
}
_observerPtr = &observer;
return 0;
}
WebRtc_Word32
MonitorModule::DeRegisterObserver()
{
CriticalSectionScoped lock(_callbackCritSect);
if (!_observerPtr)
{
return 0;
}
_observerPtr = NULL;
return 0;
}
WebRtc_Word32
MonitorModule::Version(WebRtc_Word8* version,
WebRtc_UWord32& remainingBufferInBytes,
WebRtc_UWord32& position) const
{
return 0;
}
WebRtc_Word32
MonitorModule::ChangeUniqueId(const WebRtc_Word32 id)
{
return 0;
}
WebRtc_Word32
MonitorModule::TimeUntilNextProcess()
{
WebRtc_UWord32 now = GET_TIME_IN_MS();
WebRtc_Word32 timeToNext =
kAverageProcessUpdateTimeMs - (now - _lastProcessTime);
return (timeToNext);
}
WebRtc_Word32
MonitorModule::Process()
{
_lastProcessTime = GET_TIME_IN_MS();
if (_observerPtr)
{
CriticalSectionScoped lock(_callbackCritSect);
_observerPtr->OnPeriodicProcess();
}
return 0;
}
} // namespace voe
} // namespace webrtc

View File

@ -1,63 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_MONITOR_MODULE_H
#define WEBRTC_VOICE_ENGINE_MONITOR_MODULE_H
#include "module.h"
#include "typedefs.h"
#include "voice_engine_defines.h"
class MonitorObserver
{
public:
virtual void OnPeriodicProcess() = 0;
protected:
virtual ~MonitorObserver() {}
};
namespace webrtc {
class CriticalSectionWrapper;
namespace voe {
class MonitorModule : public Module
{
public:
WebRtc_Word32 RegisterObserver(MonitorObserver& observer);
WebRtc_Word32 DeRegisterObserver();
MonitorModule();
virtual ~MonitorModule();
public: // module
WebRtc_Word32 Version(WebRtc_Word8* version,
WebRtc_UWord32& remainingBufferInBytes,
WebRtc_UWord32& position) const;
WebRtc_Word32 ChangeUniqueId(const WebRtc_Word32 id);
WebRtc_Word32 TimeUntilNextProcess();
WebRtc_Word32 Process();
private:
enum { kAverageProcessUpdateTimeMs = 1000 };
MonitorObserver* _observerPtr;
CriticalSectionWrapper& _callbackCritSect;
WebRtc_Word32 _lastProcessTime;
};
} // namespace voe
} // namespace webrtc
#endif // VOICE_ENGINE_MONITOR_MODULE

View File

@ -1,748 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "output_mixer.h"
#include "audio_processing.h"
#include "audio_frame_operations.h"
#include "critical_section_wrapper.h"
#include "file_wrapper.h"
#include "trace.h"
#include "statistics.h"
#include "voe_external_media.h"
namespace webrtc {
namespace voe {
void
OutputMixer::NewMixedAudio(const WebRtc_Word32 id,
const AudioFrame& generalAudioFrame,
const AudioFrame** uniqueAudioFrames,
const WebRtc_UWord32 size)
{
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
"OutputMixer::NewMixedAudio(id=%d, size=%u)", id, size);
_audioFrame = generalAudioFrame;
_audioFrame._id = id;
}
void OutputMixer::MixedParticipants(
const WebRtc_Word32 id,
const ParticipantStatistics* participantStatistics,
const WebRtc_UWord32 size)
{
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
"OutputMixer::MixedParticipants(id=%d, size=%u)", id, size);
}
void OutputMixer::VADPositiveParticipants(
const WebRtc_Word32 id,
const ParticipantStatistics* participantStatistics,
const WebRtc_UWord32 size)
{
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
"OutputMixer::VADPositiveParticipants(id=%d, size=%u)",
id, size);
}
void OutputMixer::MixedAudioLevel(const WebRtc_Word32 id,
const WebRtc_UWord32 level)
{
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
"OutputMixer::MixedAudioLevel(id=%d, level=%u)", id, level);
}
void OutputMixer::PlayNotification(const WebRtc_Word32 id,
const WebRtc_UWord32 durationMs)
{
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
"OutputMixer::PlayNotification(id=%d, durationMs=%d)",
id, durationMs);
// Not implement yet
}
void OutputMixer::RecordNotification(const WebRtc_Word32 id,
const WebRtc_UWord32 durationMs)
{
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
"OutputMixer::RecordNotification(id=%d, durationMs=%d)",
id, durationMs);
// Not implement yet
}
void OutputMixer::PlayFileEnded(const WebRtc_Word32 id)
{
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
"OutputMixer::PlayFileEnded(id=%d)", id);
// not needed
}
void OutputMixer::RecordFileEnded(const WebRtc_Word32 id)
{
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
"OutputMixer::RecordFileEnded(id=%d)", id);
assert(id == _instanceId);
CriticalSectionScoped cs(_fileCritSect);
_outputFileRecording = false;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
"OutputMixer::RecordFileEnded() =>"
"output file recorder module is shutdown");
}
WebRtc_Word32
OutputMixer::Create(OutputMixer*& mixer, const WebRtc_UWord32 instanceId)
{
WEBRTC_TRACE(kTraceMemory, kTraceVoice, instanceId,
"OutputMixer::Create(instanceId=%d)", instanceId);
mixer = new OutputMixer(instanceId);
if (mixer == NULL)
{
WEBRTC_TRACE(kTraceMemory, kTraceVoice, instanceId,
"OutputMixer::Create() unable to allocate memory for"
"mixer");
return -1;
}
return 0;
}
OutputMixer::OutputMixer(const WebRtc_UWord32 instanceId) :
_callbackCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
_fileCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
_instanceId(instanceId),
_outputFileRecorderPtr(NULL),
_outputFileRecording(false),
_dtmfGenerator(instanceId),
_mixerModule(*AudioConferenceMixer::
CreateAudioConferenceMixer(instanceId)),
_externalMediaCallbackPtr(NULL),
_audioLevel(),
_externalMedia(false),
_panLeft(1.0f),
_panRight(1.0f),
_mixingFrequencyHz(8000)
{
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
"OutputMixer::OutputMixer() - ctor");
if ((_mixerModule.RegisterMixedStreamCallback(*this) == -1) ||
(_mixerModule.RegisterMixerStatusCallback(*this, 100) == -1))
{
WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
"OutputMixer::OutputMixer() failed to register mixer"
"callbacks");
}
_dtmfGenerator.Init();
}
void
OutputMixer::Destroy(OutputMixer*& mixer)
{
if (mixer)
{
delete mixer;
mixer = NULL;
}
}
OutputMixer::~OutputMixer()
{
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
"OutputMixer::~OutputMixer() - dtor");
if (_externalMedia)
{
DeRegisterExternalMediaProcessing();
}
{
CriticalSectionScoped cs(_fileCritSect);
if (_outputFileRecorderPtr)
{
_outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
_outputFileRecorderPtr->StopRecording();
FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
_outputFileRecorderPtr = NULL;
}
}
_mixerModule.UnRegisterMixerStatusCallback();
_mixerModule.UnRegisterMixedStreamCallback();
delete &_mixerModule;
delete &_callbackCritSect;
delete &_fileCritSect;
}
WebRtc_Word32
OutputMixer::SetEngineInformation(voe::Statistics& engineStatistics)
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
"OutputMixer::SetEngineInformation()");
_engineStatisticsPtr = &engineStatistics;
return 0;
}
WebRtc_Word32
OutputMixer::SetAudioProcessingModule(
AudioProcessing* audioProcessingModule)
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
"OutputMixer::SetAudioProcessingModule("
"audioProcessingModule=0x%x)", audioProcessingModule);
_audioProcessingModulePtr = audioProcessingModule;
return 0;
}
int OutputMixer::RegisterExternalMediaProcessing(
VoEMediaProcess& proccess_object)
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
"OutputMixer::RegisterExternalMediaProcessing()");
CriticalSectionScoped cs(_callbackCritSect);
_externalMediaCallbackPtr = &proccess_object;
_externalMedia = true;
return 0;
}
int OutputMixer::DeRegisterExternalMediaProcessing()
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
"OutputMixer::DeRegisterExternalMediaProcessing()");
CriticalSectionScoped cs(_callbackCritSect);
_externalMedia = false;
_externalMediaCallbackPtr = NULL;
return 0;
}
int OutputMixer::PlayDtmfTone(WebRtc_UWord8 eventCode, int lengthMs,
int attenuationDb)
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
"OutputMixer::PlayDtmfTone()");
if (_dtmfGenerator.AddTone(eventCode, lengthMs, attenuationDb) != 0)
{
_engineStatisticsPtr->SetLastError(VE_STILL_PLAYING_PREV_DTMF,
kTraceError,
"OutputMixer::PlayDtmfTone()");
return -1;
}
return 0;
}
int OutputMixer::StartPlayingDtmfTone(WebRtc_UWord8 eventCode,
int attenuationDb)
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
"OutputMixer::StartPlayingDtmfTone()");
if (_dtmfGenerator.StartTone(eventCode, attenuationDb) != 0)
{
_engineStatisticsPtr->SetLastError(
VE_STILL_PLAYING_PREV_DTMF,
kTraceError,
"OutputMixer::StartPlayingDtmfTone())");
return -1;
}
return 0;
}
int OutputMixer::StopPlayingDtmfTone()
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
"OutputMixer::StopPlayingDtmfTone()");
return (_dtmfGenerator.StopTone());
}
WebRtc_Word32
OutputMixer::SetMixabilityStatus(MixerParticipant& participant,
const bool mixable)
{
return _mixerModule.SetMixabilityStatus(participant, mixable);
}
WebRtc_Word32
OutputMixer::MixActiveChannels()
{
return _mixerModule.Process();
}
int
OutputMixer::GetSpeechOutputLevel(WebRtc_UWord32& level)
{
WebRtc_Word8 currentLevel = _audioLevel.Level();
level = static_cast<WebRtc_UWord32> (currentLevel);
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
"GetSpeechOutputLevel() => level=%u", level);
return 0;
}
int
OutputMixer::GetSpeechOutputLevelFullRange(WebRtc_UWord32& level)
{
WebRtc_Word16 currentLevel = _audioLevel.LevelFullRange();
level = static_cast<WebRtc_UWord32> (currentLevel);
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
"GetSpeechOutputLevelFullRange() => level=%u", level);
return 0;
}
int
OutputMixer::SetOutputVolumePan(float left, float right)
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
"OutputMixer::SetOutputVolumePan()");
_panLeft = left;
_panRight = right;
return 0;
}
int
OutputMixer::GetOutputVolumePan(float& left, float& right)
{
left = _panLeft;
right = _panRight;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
"GetOutputVolumePan() => left=%2.1f, right=%2.1f",
left, right);
return 0;
}
int OutputMixer::StartRecordingPlayout(const char* fileName,
const CodecInst* codecInst)
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
"OutputMixer::StartRecordingPlayout(fileName=%s)", fileName);
if (_outputFileRecording)
{
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,-1),
"StartRecordingPlayout() is already recording");
return 0;
}
FileFormats format;
const WebRtc_UWord32 notificationTime(0);
CodecInst dummyCodec={100,"L16",16000,320,1,320000};
if (codecInst != NULL && codecInst->channels != 1)
{
_engineStatisticsPtr->SetLastError(
VE_BAD_ARGUMENT, kTraceError,
"StartRecordingPlayout() invalid compression");
return(-1);
}
if(codecInst == NULL)
{
format = kFileFormatPcm16kHzFile;
codecInst=&dummyCodec;
}
else if((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
(STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
(STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
{
format = kFileFormatWavFile;
}
else
{
format = kFileFormatCompressedFile;
}
CriticalSectionScoped cs(_fileCritSect);
// Destroy the old instance
if (_outputFileRecorderPtr)
{
_outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
_outputFileRecorderPtr = NULL;
}
_outputFileRecorderPtr = FileRecorder::CreateFileRecorder(
_instanceId,
(const FileFormats)format);
if (_outputFileRecorderPtr == NULL)
{
_engineStatisticsPtr->SetLastError(
VE_INVALID_ARGUMENT, kTraceError,
"StartRecordingPlayout() fileRecorder format isnot correct");
return -1;
}
if (_outputFileRecorderPtr->StartRecordingAudioFile(
fileName,
(const CodecInst&)*codecInst,
notificationTime) != 0)
{
_engineStatisticsPtr->SetLastError(
VE_BAD_FILE, kTraceError,
"StartRecordingAudioFile() failed to start file recording");
_outputFileRecorderPtr->StopRecording();
FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
_outputFileRecorderPtr = NULL;
return -1;
}
_outputFileRecorderPtr->RegisterModuleFileCallback(this);
_outputFileRecording = true;
return 0;
}
int OutputMixer::StartRecordingPlayout(OutStream* stream,
const CodecInst* codecInst)
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
"OutputMixer::StartRecordingPlayout()");
if (_outputFileRecording)
{
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,-1),
"StartRecordingPlayout() is already recording");
return 0;
}
FileFormats format;
const WebRtc_UWord32 notificationTime(0);
CodecInst dummyCodec={100,"L16",16000,320,1,320000};
if (codecInst != NULL && codecInst->channels != 1)
{
_engineStatisticsPtr->SetLastError(
VE_BAD_ARGUMENT, kTraceError,
"StartRecordingPlayout() invalid compression");
return(-1);
}
if(codecInst == NULL)
{
format = kFileFormatPcm16kHzFile;
codecInst=&dummyCodec;
}
else if((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
(STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
(STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
{
format = kFileFormatWavFile;
}
else
{
format = kFileFormatCompressedFile;
}
CriticalSectionScoped cs(_fileCritSect);
// Destroy the old instance
if (_outputFileRecorderPtr)
{
_outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
_outputFileRecorderPtr = NULL;
}
_outputFileRecorderPtr = FileRecorder::CreateFileRecorder(
_instanceId,
(const FileFormats)format);
if (_outputFileRecorderPtr == NULL)
{
_engineStatisticsPtr->SetLastError(
VE_INVALID_ARGUMENT, kTraceError,
"StartRecordingPlayout() fileRecorder format isnot correct");
return -1;
}
if (_outputFileRecorderPtr->StartRecordingAudioFile(*stream,
*codecInst,
notificationTime) != 0)
{
_engineStatisticsPtr->SetLastError(VE_BAD_FILE, kTraceError,
"StartRecordingAudioFile() failed to start file recording");
_outputFileRecorderPtr->StopRecording();
FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
_outputFileRecorderPtr = NULL;
return -1;
}
_outputFileRecorderPtr->RegisterModuleFileCallback(this);
_outputFileRecording = true;
return 0;
}
int OutputMixer::StopRecordingPlayout()
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
"OutputMixer::StopRecordingPlayout()");
if (!_outputFileRecording)
{
WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
"StopRecordingPlayout() file isnot recording");
return -1;
}
CriticalSectionScoped cs(_fileCritSect);
if (_outputFileRecorderPtr->StopRecording() != 0)
{
_engineStatisticsPtr->SetLastError(
VE_STOP_RECORDING_FAILED, kTraceError,
"StopRecording(), could not stop recording");
return -1;
}
_outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
_outputFileRecorderPtr = NULL;
_outputFileRecording = false;
return 0;
}
WebRtc_Word32
OutputMixer::GetMixedAudio(const WebRtc_Word32 desiredFreqHz,
const WebRtc_UWord8 channels,
AudioFrame& audioFrame)
{
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
"OutputMixer::GetMixedAudio(desiredFreqHz=%d, channels=&d)",
desiredFreqHz, channels);
audioFrame = _audioFrame;
// --- Record playout if enabled
{
CriticalSectionScoped cs(_fileCritSect);
if (_outputFileRecording)
{
assert(audioFrame._audioChannel == 1);
if (_outputFileRecorderPtr)
{
_outputFileRecorderPtr->RecordAudioToFile(audioFrame);
}
}
}
int outLen(0);
if (audioFrame._audioChannel == 1)
{
if (_resampler.ResetIfNeeded(audioFrame._frequencyInHz,
desiredFreqHz,
kResamplerSynchronous) != 0)
{
WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
"OutputMixer::GetMixedAudio() unable to resample - 1");
return -1;
}
}
else
{
if (_resampler.ResetIfNeeded(audioFrame._frequencyInHz,
desiredFreqHz,
kResamplerSynchronousStereo) != 0)
{
WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
"OutputMixer::GetMixedAudio() unable to resample - 2");
return -1;
}
}
if (_resampler.Push(
_audioFrame._payloadData,
_audioFrame._payloadDataLengthInSamples*_audioFrame._audioChannel,
audioFrame._payloadData,
AudioFrame::kMaxAudioFrameSizeSamples,
outLen) == 0)
{
// Ensure that output from resampler matches the audio-frame format.
// Example: 10ms stereo output at 48kHz => outLen = 960 =>
// convert _payloadDataLengthInSamples to 480
audioFrame._payloadDataLengthInSamples =
(outLen / _audioFrame._audioChannel);
audioFrame._frequencyInHz = desiredFreqHz;
}
else
{
WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
"OutputMixer::GetMixedAudio() resampling failed");
return -1;
}
if ((channels == 2) && (audioFrame._audioChannel == 1))
{
AudioFrameOperations::MonoToStereo(audioFrame);
}
return 0;
}
WebRtc_Word32
OutputMixer::DoOperationsOnCombinedSignal()
{
if (_audioFrame._frequencyInHz != _mixingFrequencyHz)
{
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
"OutputMixer::DoOperationsOnCombinedSignal() => "
"mixing frequency = %d", _audioFrame._frequencyInHz);
_mixingFrequencyHz = _audioFrame._frequencyInHz;
}
// --- Insert inband Dtmf tone
if (_dtmfGenerator.IsAddingTone())
{
InsertInbandDtmfTone();
}
// Scale left and/or right channel(s) if balance is active
if (_panLeft != 1.0 || _panRight != 1.0)
{
if (_audioFrame._audioChannel == 1)
{
AudioFrameOperations::MonoToStereo(_audioFrame);
}
else
{
// Pure stereo mode (we are receiving a stereo signal).
}
assert(_audioFrame._audioChannel == 2);
AudioFrameOperations::Scale(_panLeft, _panRight, _audioFrame);
}
// --- Far-end Voice Quality Enhancement (AudioProcessing Module)
APMAnalyzeReverseStream();
// --- External media processing
if (_externalMedia)
{
CriticalSectionScoped cs(_callbackCritSect);
const bool isStereo = (_audioFrame._audioChannel == 2);
if (_externalMediaCallbackPtr)
{
_externalMediaCallbackPtr->Process(
-1,
kPlaybackAllChannelsMixed,
(WebRtc_Word16*)_audioFrame._payloadData,
_audioFrame._payloadDataLengthInSamples,
_audioFrame._frequencyInHz,
isStereo);
}
}
// --- Measure audio level (0-9) for the combined signal
_audioLevel.ComputeLevel(_audioFrame);
return 0;
}
// ----------------------------------------------------------------------------
// Private methods
// ----------------------------------------------------------------------------
int
OutputMixer::APMAnalyzeReverseStream()
{
int outLen(0);
AudioFrame audioFrame = _audioFrame;
// Convert from mixing frequency to APM frequency.
// Sending side determines APM frequency.
if (audioFrame._audioChannel == 1)
{
_apmResampler.ResetIfNeeded(_audioFrame._frequencyInHz,
_audioProcessingModulePtr->sample_rate_hz(),
kResamplerSynchronous);
}
else
{
_apmResampler.ResetIfNeeded(_audioFrame._frequencyInHz,
_audioProcessingModulePtr->sample_rate_hz(),
kResamplerSynchronousStereo);
}
if (_apmResampler.Push(
_audioFrame._payloadData,
_audioFrame._payloadDataLengthInSamples*_audioFrame._audioChannel,
audioFrame._payloadData,
AudioFrame::kMaxAudioFrameSizeSamples,
outLen) == 0)
{
audioFrame._payloadDataLengthInSamples =
(outLen / _audioFrame._audioChannel);
audioFrame._frequencyInHz = _audioProcessingModulePtr->sample_rate_hz();
}
if (audioFrame._audioChannel == 2)
{
AudioFrameOperations::StereoToMono(audioFrame);
}
// Perform far-end APM analyze
if (_audioProcessingModulePtr->AnalyzeReverseStream(&audioFrame) == -1)
{
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,-1),
"AudioProcessingModule::AnalyzeReverseStream() => error");
}
return 0;
}
int
OutputMixer::InsertInbandDtmfTone()
{
WebRtc_UWord16 sampleRate(0);
_dtmfGenerator.GetSampleRate(sampleRate);
if (sampleRate != _audioFrame._frequencyInHz)
{
// Update sample rate of Dtmf tone since the mixing frequency changed.
_dtmfGenerator.SetSampleRate(
(WebRtc_UWord16)(_audioFrame._frequencyInHz));
// Reset the tone to be added taking the new sample rate into account.
_dtmfGenerator.ResetTone();
}
WebRtc_Word16 toneBuffer[320];
WebRtc_UWord16 toneSamples(0);
if (_dtmfGenerator.Get10msTone(toneBuffer, toneSamples) == -1)
{
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
"OutputMixer::InsertInbandDtmfTone() inserting Dtmf"
"tone failed");
return -1;
}
// replace mixed audio with Dtmf tone
if (_audioFrame._audioChannel == 1)
{
// mono
memcpy(_audioFrame._payloadData, toneBuffer, sizeof(WebRtc_Word16)
* toneSamples);
} else
{
// stereo
for (int i = 0; i < _audioFrame._payloadDataLengthInSamples; i++)
{
_audioFrame._payloadData[2 * i] = toneBuffer[i];
_audioFrame._payloadData[2 * i + 1] = 0;
}
}
assert(_audioFrame._payloadDataLengthInSamples == toneSamples);
return 0;
}
} // namespace voe
} // namespace webrtc

View File

@ -1,159 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_OUTPUT_MIXER_H
#define WEBRTC_VOICE_ENGINE_OUTPUT_MIXER_H
#include "audio_conference_mixer.h"
#include "audio_conference_mixer_defines.h"
#include "common_types.h"
#include "dtmf_inband.h"
#include "file_recorder.h"
#include "level_indicator.h"
#include "resampler.h"
#include "voice_engine_defines.h"
namespace webrtc {
class AudioProcessing;
class CriticalSectionWrapper;
class FileWrapper;
class VoEMediaProcess;
namespace voe {
class Statistics;
class OutputMixer : public AudioMixerOutputReceiver,
public AudioMixerStatusReceiver,
public FileCallback
{
public:
static WebRtc_Word32 Create(OutputMixer*& mixer,
const WebRtc_UWord32 instanceId);
static void Destroy(OutputMixer*& mixer);
WebRtc_Word32 SetEngineInformation(Statistics& engineStatistics);
WebRtc_Word32 SetAudioProcessingModule(
AudioProcessing* audioProcessingModule);
// VoEExternalMedia
int RegisterExternalMediaProcessing(
VoEMediaProcess& proccess_object);
int DeRegisterExternalMediaProcessing();
// VoEDtmf
int PlayDtmfTone(WebRtc_UWord8 eventCode,
int lengthMs,
int attenuationDb);
int StartPlayingDtmfTone(WebRtc_UWord8 eventCode,
int attenuationDb);
int StopPlayingDtmfTone();
WebRtc_Word32 MixActiveChannels();
WebRtc_Word32 DoOperationsOnCombinedSignal();
WebRtc_Word32 SetMixabilityStatus(MixerParticipant& participant,
const bool mixable);
WebRtc_Word32 GetMixedAudio(const WebRtc_Word32 desiredFreqHz,
const WebRtc_UWord8 channels,
AudioFrame& audioFrame);
// VoEVolumeControl
int GetSpeechOutputLevel(WebRtc_UWord32& level);
int GetSpeechOutputLevelFullRange(WebRtc_UWord32& level);
int SetOutputVolumePan(float left, float right);
int GetOutputVolumePan(float& left, float& right);
// VoEFile
int StartRecordingPlayout(const char* fileName,
const CodecInst* codecInst);
int StartRecordingPlayout(OutStream* stream,
const CodecInst* codecInst);
int StopRecordingPlayout();
virtual ~OutputMixer();
public: // from AudioMixerOutputReceiver
virtual void NewMixedAudio(
const WebRtc_Word32 id,
const AudioFrame& generalAudioFrame,
const AudioFrame** uniqueAudioFrames,
const WebRtc_UWord32 size);
public: // from AudioMixerStatusReceiver
virtual void MixedParticipants(
const WebRtc_Word32 id,
const ParticipantStatistics* participantStatistics,
const WebRtc_UWord32 size);
virtual void VADPositiveParticipants(
const WebRtc_Word32 id,
const ParticipantStatistics* participantStatistics,
const WebRtc_UWord32 size);
virtual void MixedAudioLevel(const WebRtc_Word32 id,
const WebRtc_UWord32 level);
public: // For file recording
void PlayNotification(const WebRtc_Word32 id,
const WebRtc_UWord32 durationMs);
void RecordNotification(const WebRtc_Word32 id,
const WebRtc_UWord32 durationMs);
void PlayFileEnded(const WebRtc_Word32 id);
void RecordFileEnded(const WebRtc_Word32 id);
private:
OutputMixer(const WebRtc_UWord32 instanceId);
int APMAnalyzeReverseStream();
int InsertInbandDtmfTone();
private: // uses
Statistics* _engineStatisticsPtr;
AudioProcessing* _audioProcessingModulePtr;
private: // owns
CriticalSectionWrapper& _callbackCritSect;
// protect the _outputFileRecorderPtr and _outputFileRecording
CriticalSectionWrapper& _fileCritSect;
AudioConferenceMixer& _mixerModule;
AudioFrame _audioFrame;
Resampler _resampler; // converts mixed audio to fit ADM format
Resampler _apmResampler; // converts mixed audio to fit APM rate
AudioLevel _audioLevel; // measures audio level for the combined signal
DtmfInband _dtmfGenerator;
WebRtc_UWord32 _instanceId;
VoEMediaProcess* _externalMediaCallbackPtr;
bool _externalMedia;
float _panLeft;
float _panRight;
WebRtc_UWord32 _mixingFrequencyHz;
FileRecorder* _outputFileRecorderPtr;
bool _outputFileRecording;
};
} // namespace voe
} // namespace werbtc
#endif // VOICE_ENGINE_OUTPUT_MIXER_H

View File

@ -1,60 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "critical_section_wrapper.h"
#include "ref_count.h"
namespace webrtc {
namespace voe {
RefCount::RefCount() :
_count(0),
_crit(*CriticalSectionWrapper::CreateCriticalSection())
{
}
RefCount::~RefCount()
{
delete &_crit;
}
RefCount&
RefCount::operator++(int)
{
CriticalSectionScoped lock(_crit);
_count++;
return *this;
}
RefCount&
RefCount::operator--(int)
{
CriticalSectionScoped lock(_crit);
_count--;
return *this;
}
void
RefCount::Reset()
{
CriticalSectionScoped lock(_crit);
_count = 0;
}
int
RefCount::GetCount() const
{
return _count;
}
} // namespace voe
} // namespace webrtc

View File

@ -1,36 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_REF_COUNT_H
#define WEBRTC_VOICE_ENGINE_REF_COUNT_H
namespace webrtc {
class CriticalSectionWrapper;
namespace voe {
class RefCount
{
public:
RefCount();
~RefCount();
RefCount& operator++(int);
RefCount& operator--(int);
void Reset();
int GetCount() const;
private:
volatile int _count;
CriticalSectionWrapper& _crit;
};
} // namespace voe
} // namespace webrtc
#endif // #ifndef WEBRTC_VOICE_ENGINE_REF_COUNT_H

View File

@ -1,98 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "shared_data.h"
#include "audio_processing.h"
#include "critical_section_wrapper.h"
#include "channel.h"
#include "output_mixer.h"
#include "trace.h"
#include "transmit_mixer.h"
namespace webrtc {
namespace voe {
static WebRtc_Word32 _gInstanceCounter = 0;
SharedData::SharedData() :
_instanceId(++_gInstanceCounter),
_channelManager(_gInstanceCounter),
_engineStatistics(_gInstanceCounter),
_usingExternalAudioDevice(false),
_audioDevicePtr(NULL),
_audioProcessingModulePtr(NULL),
_moduleProcessThreadPtr(ProcessThread::CreateProcessThread()),
_apiCritPtr(CriticalSectionWrapper::CreateCriticalSection()),
_externalRecording(false),
_externalPlayout(false)
{
Trace::CreateTrace();
Trace::SetLevelFilter(WEBRTC_VOICE_ENGINE_DEFAULT_TRACE_FILTER);
if (OutputMixer::Create(_outputMixerPtr, _gInstanceCounter) == 0)
{
_outputMixerPtr->SetEngineInformation(_engineStatistics);
}
if (TransmitMixer::Create(_transmitMixerPtr, _gInstanceCounter) == 0)
{
_transmitMixerPtr->SetEngineInformation(*_moduleProcessThreadPtr,
_engineStatistics,
_channelManager);
}
_audioDeviceLayer = AudioDeviceModule::kPlatformDefaultAudio;
}
SharedData::~SharedData()
{
OutputMixer::Destroy(_outputMixerPtr);
TransmitMixer::Destroy(_transmitMixerPtr);
if (!_usingExternalAudioDevice)
{
AudioDeviceModule::Destroy(_audioDevicePtr);
}
AudioProcessing::Destroy(_audioProcessingModulePtr);
delete _apiCritPtr;
ProcessThread::DestroyProcessThread(_moduleProcessThreadPtr);
Trace::ReturnTrace();
}
WebRtc_UWord16
SharedData::NumOfSendingChannels()
{
WebRtc_Word32 numOfChannels = _channelManager.NumOfChannels();
if (numOfChannels <= 0)
{
return 0;
}
WebRtc_UWord16 nChannelsSending(0);
WebRtc_Word32* channelsArray = new WebRtc_Word32[numOfChannels];
_channelManager.GetChannelIds(channelsArray, numOfChannels);
for (int i = 0; i < numOfChannels; i++)
{
voe::ScopedChannel sc(_channelManager, channelsArray[i]);
Channel* chPtr = sc.ChannelPtr();
if (chPtr)
{
if (chPtr->Sending())
{
nChannelsSending++;
}
}
}
delete [] channelsArray;
return nChannelsSending;
}
} // namespace voe
} // namespace webrtc

View File

@ -1,63 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_SHARED_DATA_H
#define WEBRTC_VOICE_ENGINE_SHARED_DATA_H
#include "voice_engine_defines.h"
#include "channel_manager.h"
#include "statistics.h"
#include "process_thread.h"
#include "audio_device.h"
#include "audio_processing.h"
class ProcessThread;
namespace webrtc {
class CriticalSectionWrapper;
namespace voe {
class TransmitMixer;
class OutputMixer;
class SharedData
{
protected:
WebRtc_UWord16 NumOfSendingChannels();
protected:
const WebRtc_UWord32 _instanceId;
CriticalSectionWrapper* _apiCritPtr;
ChannelManager _channelManager;
Statistics _engineStatistics;
bool _usingExternalAudioDevice;
AudioDeviceModule* _audioDevicePtr;
OutputMixer* _outputMixerPtr;
TransmitMixer* _transmitMixerPtr;
AudioProcessing* _audioProcessingModulePtr;
ProcessThread* _moduleProcessThreadPtr;
protected:
bool _externalRecording;
bool _externalPlayout;
AudioDeviceModule::AudioLayer _audioDeviceLayer;
protected:
SharedData();
virtual ~SharedData();
};
} // namespace voe
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_SHARED_DATA_H

View File

@ -1,99 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <cassert>
#include <stdio.h>
#include "statistics.h"
#include "trace.h"
#include "critical_section_wrapper.h"
namespace webrtc {
namespace voe {
Statistics::Statistics(const WebRtc_UWord32 instanceId) :
_critPtr(CriticalSectionWrapper::CreateCriticalSection()),
_instanceId(instanceId),
_isInitialized(false),
_lastError(0)
{
}
Statistics::~Statistics()
{
if (_critPtr)
{
delete _critPtr;
_critPtr = NULL;
}
}
WebRtc_Word32 Statistics::SetInitialized()
{
_isInitialized = true;
return 0;
}
WebRtc_Word32 Statistics::SetUnInitialized()
{
_isInitialized = false;
return 0;
}
bool Statistics::Initialized() const
{
return _isInitialized;
}
WebRtc_Word32 Statistics::SetLastError(const WebRtc_Word32 error) const
{
CriticalSectionScoped cs(*_critPtr);
_lastError = error;
return 0;
}
WebRtc_Word32 Statistics::SetLastError(const WebRtc_Word32 error,
const TraceLevel level) const
{
CriticalSectionScoped cs(*_critPtr);
_lastError = error;
WEBRTC_TRACE(level, kTraceVoice, VoEId(_instanceId,-1),
"error code is set to %d",
_lastError);
return 0;
}
WebRtc_Word32 Statistics::SetLastError(
const WebRtc_Word32 error,
const TraceLevel level, const char* msg) const
{
CriticalSectionScoped cs(*_critPtr);
char traceMessage[KTraceMaxMessageSize];
assert(strlen(msg) < KTraceMaxMessageSize);
_lastError = error;
sprintf(traceMessage, "%s (error=%d)", msg, error);
WEBRTC_TRACE(level, kTraceVoice, VoEId(_instanceId,-1), "%s",
traceMessage);
return 0;
}
WebRtc_Word32 Statistics::LastError() const
{
CriticalSectionScoped cs(*_critPtr);
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
"LastError() => %d", _lastError);
return _lastError;
}
} // namespace voe
} // namespace webrtc

View File

@ -1,54 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_STATISTICS_H
#define WEBRTC_VOICE_ENGINE_STATISTICS_H
#include "common_types.h"
#include "typedefs.h"
#include "voice_engine_defines.h"
#include "voe_errors.h"
namespace webrtc {
class CriticalSectionWrapper;
namespace voe {
class Statistics
{
public:
enum {KTraceMaxMessageSize = 256};
public:
Statistics(const WebRtc_UWord32 instanceId);
~Statistics();
WebRtc_Word32 SetInitialized();
WebRtc_Word32 SetUnInitialized();
bool Initialized() const;
WebRtc_Word32 SetLastError(const WebRtc_Word32 error) const;
WebRtc_Word32 SetLastError(const WebRtc_Word32 error,
const TraceLevel level) const;
WebRtc_Word32 SetLastError(const WebRtc_Word32 error,
const TraceLevel level,
const char* msg) const;
WebRtc_Word32 LastError() const;
private:
CriticalSectionWrapper* _critPtr;
const WebRtc_UWord32 _instanceId;
mutable WebRtc_Word32 _lastError;
bool _isInitialized;
};
} // namespace voe
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_STATISTICS_H

File diff suppressed because it is too large Load Diff

View File

@ -1,227 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_TRANSMIT_MIXER_H
#define WEBRTC_VOICE_ENGINE_TRANSMIT_MIXER_H
#include "common_types.h"
#include "voe_base.h"
#include "file_player.h"
#include "file_recorder.h"
#include "level_indicator.h"
#include "module_common_types.h"
#include "monitor_module.h"
#include "resampler.h"
#include "voice_engine_defines.h"
namespace webrtc {
class AudioProcessing;
class ProcessThread;
class VoEExternalMedia;
class VoEMediaProcess;
namespace voe {
class ChannelManager;
class MixedAudio;
class Statistics;
class TransmitMixer : public MonitorObserver,
public FileCallback
{
public:
static WebRtc_Word32 Create(TransmitMixer*& mixer,
const WebRtc_UWord32 instanceId);
static void Destroy(TransmitMixer*& mixer);
WebRtc_Word32 SetEngineInformation(ProcessThread& processThread,
Statistics& engineStatistics,
ChannelManager& channelManager);
WebRtc_Word32 SetAudioProcessingModule(
AudioProcessing* audioProcessingModule);
WebRtc_Word32 PrepareDemux(const WebRtc_Word8* audioSamples,
const WebRtc_UWord32 nSamples,
const WebRtc_UWord8 nChannels,
const WebRtc_UWord32 samplesPerSec,
const WebRtc_UWord16 totalDelayMS,
const WebRtc_Word32 clockDrift,
const WebRtc_UWord16 currentMicLevel);
WebRtc_Word32 DemuxAndMix();
WebRtc_Word32 EncodeAndSend();
WebRtc_UWord32 CaptureLevel() const;
WebRtc_Word32 StopSend();
void SetRTPAudioLevelIndicationStatus(bool enable)
{ _includeAudioLevelIndication = enable; }
// VoEDtmf
void UpdateMuteMicrophoneTime(const WebRtc_UWord32 lengthMs);
// VoEExternalMedia
int RegisterExternalMediaProcessing(VoEMediaProcess& proccess_object);
int DeRegisterExternalMediaProcessing();
WebRtc_UWord32 GetMixingFrequency();
// VoEVolumeControl
int SetMute(const bool enable);
bool Mute() const;
WebRtc_Word8 AudioLevel() const;
WebRtc_Word16 AudioLevelFullRange() const;
bool IsRecordingCall();
bool IsRecordingMic();
int StartPlayingFileAsMicrophone(const char* fileName,
const bool loop,
const FileFormats format,
const int startPosition,
const float volumeScaling,
const int stopPosition,
const CodecInst* codecInst);
int StartPlayingFileAsMicrophone(InStream* stream,
const FileFormats format,
const int startPosition,
const float volumeScaling,
const int stopPosition,
const CodecInst* codecInst);
int StopPlayingFileAsMicrophone();
int IsPlayingFileAsMicrophone() const;
int ScaleFileAsMicrophonePlayout(const float scale);
int StartRecordingMicrophone(const char* fileName,
const CodecInst* codecInst);
int StartRecordingMicrophone(OutStream* stream,
const CodecInst* codecInst);
int StopRecordingMicrophone();
int StartRecordingCall(const char* fileName, const CodecInst* codecInst);
int StartRecordingCall(OutStream* stream, const CodecInst* codecInst);
int StopRecordingCall();
void SetMixWithMicStatus(bool mix);
WebRtc_Word32 RegisterVoiceEngineObserver(VoiceEngineObserver& observer);
virtual ~TransmitMixer();
public: // MonitorObserver
void OnPeriodicProcess();
public: // FileCallback
void PlayNotification(const WebRtc_Word32 id,
const WebRtc_UWord32 durationMs);
void RecordNotification(const WebRtc_Word32 id,
const WebRtc_UWord32 durationMs);
void PlayFileEnded(const WebRtc_Word32 id);
void RecordFileEnded(const WebRtc_Word32 id);
private:
TransmitMixer(const WebRtc_UWord32 instanceId);
private:
WebRtc_Word32 GenerateAudioFrame(const WebRtc_Word16 audioSamples[],
const WebRtc_UWord32 nSamples,
const WebRtc_UWord8 nChannels,
const WebRtc_UWord32 samplesPerSec,
const WebRtc_UWord32 mixingFrequency);
WebRtc_Word32 RecordAudioToFile(const WebRtc_UWord32 mixingFrequency);
WebRtc_Word32 MixOrReplaceAudioWithFile(
const WebRtc_UWord32 mixingFrequency);
WebRtc_Word32 APMProcessStream(const WebRtc_UWord16 totalDelayMS,
const WebRtc_Word32 clockDrift,
const WebRtc_UWord16 currentMicLevel);
#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
int TypingDetection();
#endif
private: // uses
Statistics* _engineStatisticsPtr;
ChannelManager* _channelManagerPtr;
AudioProcessing* _audioProcessingModulePtr;
VoiceEngineObserver* _voiceEngineObserverPtr;
ProcessThread* _processThreadPtr;
private: // owns
MonitorModule _monitorModule;
AudioFrame _audioFrame;
Resampler _audioResampler; // ADM sample rate -> mixing rate
FilePlayer* _filePlayerPtr;
FileRecorder* _fileRecorderPtr;
FileRecorder* _fileCallRecorderPtr;
WebRtc_UWord32 _filePlayerId;
WebRtc_UWord32 _fileRecorderId;
WebRtc_UWord32 _fileCallRecorderId;
bool _filePlaying;
bool _fileRecording;
bool _fileCallRecording;
voe::AudioLevel _audioLevel;
// protect file instances and their variables in MixedParticipants()
CriticalSectionWrapper& _critSect;
CriticalSectionWrapper& _callbackCritSect;
#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
WebRtc_Word32 _timeActive;
WebRtc_Word32 _penaltyCounter;
WebRtc_UWord32 _typingNoiseWarning;
#endif
WebRtc_UWord32 _saturationWarning;
WebRtc_UWord32 _noiseWarning;
private:
WebRtc_UWord32 _instanceId;
bool _mixFileWithMicrophone;
WebRtc_UWord32 _captureLevel;
bool _externalMedia;
VoEMediaProcess* _externalMediaCallbackPtr;
bool _mute;
WebRtc_Word32 _remainingMuteMicTimeMs;
WebRtc_UWord32 _mixingFrequency;
bool _includeAudioLevelIndication;
WebRtc_UWord8 _audioLevel_dBov;
};
#endif // WEBRTC_VOICE_ENGINE_TRANSMIT_MIXER_H
} // namespace voe
} // namespace webrtc

View File

@ -1,120 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "utility.h"
#include "module.h"
#include "trace.h"
namespace webrtc
{
namespace voe
{
void Utility::MixWithSat(WebRtc_Word16 target[],
const WebRtc_Word16 source[],
WebRtc_UWord16 len)
{
WebRtc_Word32 temp(0);
for (int i = 0; i < len; i++)
{
temp = source[i] + target[i];
if (temp > 32767)
target[i] = 32767;
else if (temp < -32768)
target[i] = -32768;
else
target[i] = (WebRtc_Word16) temp;
}
}
void Utility::MixSubtractWithSat(WebRtc_Word16 target[],
const WebRtc_Word16 source[],
WebRtc_UWord16 len)
{
WebRtc_Word32 temp(0);
for (int i = 0; i < len; i++)
{
temp = target[i] - source[i];
if (temp > 32767)
target[i] = 32767;
else if (temp < -32768)
target[i] = -32768;
else
target[i] = (WebRtc_Word16) temp;
}
}
void Utility::MixAndScaleWithSat(WebRtc_Word16 target[],
const WebRtc_Word16 source[], float scale,
WebRtc_UWord16 len)
{
WebRtc_Word32 temp(0);
for (int i = 0; i < len; i++)
{
temp = (WebRtc_Word32) (target[i] + scale * source[i]);
if (temp > 32767)
target[i] = 32767;
else if (temp < -32768)
target[i] = -32768;
else
target[i] = (WebRtc_Word16) temp;
}
}
void Utility::Scale(WebRtc_Word16 vector[], float scale, WebRtc_UWord16 len)
{
for (int i = 0; i < len; i++)
{
vector[i] = (WebRtc_Word16) (scale * vector[i]);
}
}
void Utility::ScaleWithSat(WebRtc_Word16 vector[], float scale,
WebRtc_UWord16 len)
{
WebRtc_Word32 temp(0);
for (int i = 0; i < len; i++)
{
temp = (WebRtc_Word32) (scale * vector[i]);
if (temp > 32767)
vector[i] = 32767;
else if (temp < -32768)
vector[i] = -32768;
else
vector[i] = (WebRtc_Word16) temp;
}
}
void Utility::TraceModuleVersion(const WebRtc_Word32 id,
const Module& module)
{
WebRtc_Word8 version[Utility::kMaxVersionSize] = { 0 };
WebRtc_UWord32 remainingBufferInBytes = Utility::kMaxVersionSize;
WebRtc_UWord32 position = 0;
if (module.Version(version, remainingBufferInBytes, position) == 0)
{
WebRtc_Word8* ptr(NULL);
while ((ptr = strchr(version, '\t')) != NULL)
{
*ptr = ' ';
}
while ((ptr = strchr(version, '\n')) != NULL)
{
*ptr = ' ';
}
WEBRTC_TRACE(kTraceInfo, kTraceVoice, id, "%s", version);
}
}
} // namespace voe
} // namespace webrtc

View File

@ -1,62 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* Contains functions often used by different parts of VoiceEngine.
*/
#ifndef WEBRTC_VOICE_ENGINE_UTILITY_H
#define WEBRTC_VOICE_ENGINE_UTILITY_H
#include "typedefs.h"
#include "voice_engine_defines.h"
namespace webrtc
{
class Module;
namespace voe
{
class Utility
{
public:
static void MixWithSat(WebRtc_Word16 target[],
const WebRtc_Word16 source[],
WebRtc_UWord16 len);
static void MixSubtractWithSat(WebRtc_Word16 target[],
const WebRtc_Word16 source[],
WebRtc_UWord16 len);
static void MixAndScaleWithSat(WebRtc_Word16 target[],
const WebRtc_Word16 source[],
float scale,
WebRtc_UWord16 len);
static void Scale(WebRtc_Word16 vector[], float scale, WebRtc_UWord16 len);
static void ScaleWithSat(WebRtc_Word16 vector[],
float scale,
WebRtc_UWord16 len);
static void TraceModuleVersion(const WebRtc_Word32 id,
const Module& module);
private:
enum {kMaxVersionSize = 640};
};
} // namespace voe
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_UTILITY_H

File diff suppressed because it is too large Load Diff

View File

@ -1,102 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_VOE_AUDIO_PROCESSING_IMPL_H
#define WEBRTC_VOICE_ENGINE_VOE_AUDIO_PROCESSING_IMPL_H
#include "voe_audio_processing.h"
#include "ref_count.h"
#include "shared_data.h"
namespace webrtc {
class VoEAudioProcessingImpl : public virtual voe::SharedData,
public VoEAudioProcessing,
public voe::RefCount
{
public:
virtual int Release();
virtual int SetNsStatus(bool enable, NsModes mode = kNsUnchanged);
virtual int GetNsStatus(bool& enabled, NsModes& mode);
virtual int SetAgcStatus(bool enable, AgcModes mode = kAgcUnchanged);
virtual int GetAgcStatus(bool& enabled, AgcModes& mode);
virtual int SetAgcConfig(const AgcConfig config);
virtual int GetAgcConfig(AgcConfig& config);
virtual int SetRxNsStatus(int channel,
bool enable,
NsModes mode = kNsUnchanged);
virtual int GetRxNsStatus(int channel, bool& enabled, NsModes& mode);
virtual int SetRxAgcStatus(int channel,
bool enable,
AgcModes mode = kAgcUnchanged);
virtual int GetRxAgcStatus(int channel, bool& enabled, AgcModes& mode);
virtual int SetRxAgcConfig(int channel, const AgcConfig config);
virtual int GetRxAgcConfig(int channel, AgcConfig& config);
virtual int SetEcStatus(bool enable, EcModes mode = kEcUnchanged);
virtual int GetEcStatus(bool& enabled, EcModes& mode);
virtual int SetAecmMode(AecmModes mode = kAecmSpeakerphone,
bool enableCNG = true);
virtual int GetAecmMode(AecmModes& mode, bool& enabledCNG);
virtual int RegisterRxVadObserver(int channel,
VoERxVadCallback& observer);
virtual int DeRegisterRxVadObserver(int channel);
virtual int VoiceActivityIndicator(int channel);
virtual int SetMetricsStatus(bool enable);
virtual int GetMetricsStatus(bool& enabled);
virtual int GetSpeechMetrics(int& levelTx, int& levelRx);
virtual int GetNoiseMetrics(int& levelTx, int& levelRx);
virtual int GetEchoMetrics(int& ERL, int& ERLE, int& RERL, int& A_NLP);
virtual int StartDebugRecording(const char* fileNameUTF8);
virtual int StopDebugRecording();
virtual int SetTypingDetectionStatus(bool enable);
virtual int GetTypingDetectionStatus(bool& enabled);
protected:
VoEAudioProcessingImpl();
virtual ~VoEAudioProcessingImpl();
private:
bool _isAecMode;
};
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_VOE_AUDIO_PROCESSING_IMPL_H

File diff suppressed because it is too large Load Diff

View File

@ -1,166 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_VOE_BASE_IMPL_H
#define WEBRTC_VOICE_ENGINE_VOE_BASE_IMPL_H
#include "voe_base.h"
#include "ref_count.h"
#include "shared_data.h"
namespace webrtc
{
class ProcessThread;
class VoEBaseImpl: public virtual voe::SharedData,
public VoEBase,
public voe::RefCount,
public AudioTransport,
public AudioDeviceObserver
{
public:
virtual int Release();
virtual int RegisterVoiceEngineObserver(VoiceEngineObserver& observer);
virtual int DeRegisterVoiceEngineObserver();
virtual int RegisterAudioDeviceModule(AudioDeviceModule& adm);
virtual int DeRegisterAudioDeviceModule();
virtual int Init();
virtual int Terminate();
virtual int MaxNumOfChannels();
virtual int CreateChannel();
virtual int DeleteChannel(int channel);
virtual int SetLocalReceiver(int channel, int port,
int RTCPport = kVoEDefault,
const char ipAddr[64] = NULL,
const char multiCastAddr[64] = NULL);
virtual int GetLocalReceiver(int channel, int& port, int& RTCPport,
char ipAddr[64]);
virtual int SetSendDestination(int channel, int port,
const char ipAddr[64],
int sourcePort = kVoEDefault,
int RTCPport = kVoEDefault);
virtual int GetSendDestination(int channel,
int& port,
char ipAddr[64],
int& sourcePort,
int& RTCPport);
virtual int StartReceive(int channel);
virtual int StartPlayout(int channel);
virtual int StartSend(int channel);
virtual int StopReceive(int channel);
virtual int StopPlayout(int channel);
virtual int StopSend(int channel);
virtual int SetNetEQPlayoutMode(int channel, NetEqModes mode);
virtual int GetNetEQPlayoutMode(int channel, NetEqModes& mode);
virtual int SetNetEQBGNMode(int channel, NetEqBgnModes mode);
virtual int GetNetEQBGNMode(int channel, NetEqBgnModes& mode);
virtual int SetOnHoldStatus(int channel,
bool enable,
OnHoldModes mode = kHoldSendAndPlay);
virtual int GetOnHoldStatus(int channel, bool& enabled, OnHoldModes& mode);
virtual int GetVersion(char version[1024]);
virtual int LastError();
// AudioTransport
virtual WebRtc_Word32
RecordedDataIsAvailable(const WebRtc_Word8* audioSamples,
const WebRtc_UWord32 nSamples,
const WebRtc_UWord8 nBytesPerSample,
const WebRtc_UWord8 nChannels,
const WebRtc_UWord32 samplesPerSec,
const WebRtc_UWord32 totalDelayMS,
const WebRtc_Word32 clockDrift,
const WebRtc_UWord32 currentMicLevel,
WebRtc_UWord32& newMicLevel);
virtual WebRtc_Word32 NeedMorePlayData(const WebRtc_UWord32 nSamples,
const WebRtc_UWord8 nBytesPerSample,
const WebRtc_UWord8 nChannels,
const WebRtc_UWord32 samplesPerSec,
WebRtc_Word8* audioSamples,
WebRtc_UWord32& nSamplesOut);
// AudioDeviceObserver
virtual void OnErrorIsReported(const ErrorCode error);
virtual void OnWarningIsReported(const WarningCode warning);
protected:
VoEBaseImpl();
virtual ~VoEBaseImpl();
private:
WebRtc_Word32 StartPlayout();
WebRtc_Word32 StopPlayout();
WebRtc_Word32 StartSend();
WebRtc_Word32 StopSend();
WebRtc_Word32 TerminateInternal();
WebRtc_Word32 AddBuildInfo(char* str) const;
WebRtc_Word32 AddVoEVersion(char* str) const;
#ifdef WEBRTC_EXTERNAL_TRANSPORT
WebRtc_Word32 AddExternalTransportBuild(char* str) const;
#else
WebRtc_Word32 AddSocketModuleVersion(char* str) const;
#endif
#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
WebRtc_Word32 AddExternalRecAndPlayoutBuild(char* str) const;
#endif
WebRtc_Word32 AddModuleVersion(Module* module, char* str) const;
WebRtc_Word32 AddADMVersion(char* str) const;
int AddAudioProcessingModuleVersion(char* str) const;
WebRtc_Word32 AddACMVersion(char* str) const;
WebRtc_Word32 AddConferenceMixerVersion(char* str) const;
#ifdef WEBRTC_SRTP
WebRtc_Word32 AddSRTPModuleVersion(char* str) const;
#endif
WebRtc_Word32 AddRtpRtcpModuleVersion(char* str) const;
WebRtc_Word32 AddSPLIBVersion(char* str) const;
VoiceEngineObserver* _voiceEngineObserverPtr;
CriticalSectionWrapper& _callbackCritSect;
bool _voiceEngineObserver;
WebRtc_UWord32 _oldVoEMicLevel;
WebRtc_UWord32 _oldMicLevel;
};
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_VOE_BASE_IMPL_H

View File

@ -1,564 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "voe_call_report_impl.h"
#include "audio_processing.h"
#include "channel.h"
#include "critical_section_wrapper.h"
#include "file_wrapper.h"
#include "trace.h"
#include "voe_errors.h"
#include "voice_engine_impl.h"
namespace webrtc
{
VoECallReport* VoECallReport::GetInterface(VoiceEngine* voiceEngine)
{
#ifndef WEBRTC_VOICE_ENGINE_CALL_REPORT_API
return NULL;
#else
if (NULL == voiceEngine)
{
return NULL;
}
VoiceEngineImpl* s =
reinterpret_cast<VoiceEngineImpl*> (voiceEngine);
VoECallReportImpl* d = s;
(*d)++;
return (d);
#endif
}
#ifdef WEBRTC_VOICE_ENGINE_CALL_REPORT_API
VoECallReportImpl::VoECallReportImpl() :
_file(*FileWrapper::Create())
{
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
"VoECallReportImpl() - ctor");
}
VoECallReportImpl::~VoECallReportImpl()
{
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
"~VoECallReportImpl() - dtor");
delete &_file;
}
int VoECallReportImpl::Release()
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"VoECallReportImpl::Release()");
(*this)--;
int refCount = GetCount();
if (refCount < 0)
{
Reset();
_engineStatistics.SetLastError(VE_INTERFACE_NOT_FOUND,
kTraceWarning);
return (-1);
}
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
"VoECallReportImpl reference counter = %d", refCount);
return (refCount);
}
int VoECallReportImpl::ResetCallReportStatistics(int channel)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"ResetCallReportStatistics(channel=%d)", channel);
ANDROID_NOT_SUPPORTED();IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
assert(_audioProcessingModulePtr != NULL);
int res1(0);
int res2(0);
bool levelMode =
_audioProcessingModulePtr->level_estimator()->is_enabled();
bool echoMode =
_audioProcessingModulePtr->echo_cancellation()->are_metrics_enabled();
// We always set the same mode for the level and echo
if (levelMode != echoMode)
{
_engineStatistics.SetLastError(VE_APM_ERROR, kTraceError,
"ResetCallReportStatistics() level mode "
"and echo mode are not the same");
return -1;
}
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
" current AudioProcessingModule metric currentState %d",
levelMode);
// Reset the APM statistics
if ((_audioProcessingModulePtr->level_estimator()->Enable(true) != 0)
|| (_audioProcessingModulePtr->echo_cancellation()->enable_metrics(true)
!= 0))
{
_engineStatistics.SetLastError(VE_APM_ERROR, kTraceError,
"ResetCallReportStatistics() unable to "
"set the AudioProcessingModule metrics "
"state");
return -1;
}
// Restore metric states
_audioProcessingModulePtr->level_estimator()->Enable(levelMode);
_audioProcessingModulePtr->echo_cancellation()->enable_metrics(echoMode);
// Reset channel dependent statistics
if (channel != -1)
{
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"ResetCallReportStatistics() failed "
"to locate channel");
return -1;
}
channelPtr->ResetDeadOrAliveCounters();
channelPtr->ResetRTCPStatistics();
}
else
{
WebRtc_Word32 numOfChannels = _channelManager.NumOfChannels();
if (numOfChannels <= 0)
{
return 0;
}
WebRtc_Word32* channelsArray = new WebRtc_Word32[numOfChannels];
_channelManager.GetChannelIds(channelsArray, numOfChannels);
for (int i = 0; i < numOfChannels; i++)
{
voe::ScopedChannel sc(_channelManager, channelsArray[i]);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr)
{
channelPtr->ResetDeadOrAliveCounters();
channelPtr->ResetRTCPStatistics();
}
}
delete[] channelsArray;
}
return 0;
}
int VoECallReportImpl::GetSpeechAndNoiseSummary(LevelStatistics& stats)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"GetSpeechAndNoiseSummary()");
ANDROID_NOT_SUPPORTED();IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
assert(_audioProcessingModulePtr != NULL);
return (GetSpeechAndNoiseSummaryInternal(stats));
}
int VoECallReportImpl::GetSpeechAndNoiseSummaryInternal(LevelStatistics& stats)
{
int ret(0);
bool mode(false);
LevelEstimator::Metrics metrics;
LevelEstimator::Metrics reverseMetrics;
// Ensure that level metrics is enabled
mode = _audioProcessingModulePtr->level_estimator()->is_enabled();
if (mode != false)
{
ret = _audioProcessingModulePtr->level_estimator()->GetMetrics(
&metrics, &reverseMetrics);
if (ret != 0)
{
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
" GetSpeechAndNoiseSummary(), AudioProcessingModule "
"level metrics error");
}
}
else
{
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
" GetSpeechAndNoiseSummary(), AudioProcessingModule level "
"metrics is not enabled");
}
if ((ret != 0) || (mode == false))
{
// Mark complete struct as invalid (-100 dBm0)
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
" unable to retrieve level metrics from the "
"AudioProcessingModule");
stats.noise_rx.min = -100;
stats.noise_rx.max = -100;
stats.noise_rx.average = -100;
stats.speech_rx.min = -100;
stats.speech_rx.max = -100;
stats.speech_rx.average = -100;
stats.noise_tx.min = -100;
stats.noise_tx.max = -100;
stats.noise_tx.average = -100;
stats.speech_tx.min = -100;
stats.speech_tx.max = -100;
stats.speech_tx.average = -100;
}
else
{
// Deliver output results to user
stats.noise_rx.min = reverseMetrics.noise.minimum;
stats.noise_rx.max = reverseMetrics.noise.maximum;
stats.noise_rx.average = reverseMetrics.noise.average;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
" noise_rx: min=%d, max=%d, avg=%d", stats.noise_rx.min,
stats.noise_rx.max, stats.noise_rx.average);
stats.noise_tx.min = metrics.noise.minimum;
stats.noise_tx.max = metrics.noise.maximum;
stats.noise_tx.average = metrics.noise.average;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
" noise_tx: min=%d, max=%d, avg=%d", stats.noise_tx.min,
stats.noise_tx.max, stats.noise_tx.average);
stats.speech_rx.min = reverseMetrics.speech.minimum;
stats.speech_rx.max = reverseMetrics.speech.maximum;
stats.speech_rx.average = reverseMetrics.speech.average;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
" speech_rx: min=%d, max=%d, avg=%d", stats.speech_rx.min,
stats.speech_rx.max, stats.speech_rx.average);
stats.speech_tx.min = metrics.speech.minimum;
stats.speech_tx.max = metrics.speech.maximum;
stats.speech_tx.average = metrics.speech.average;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
" speech_tx: min=%d, max=%d, avg=%d", stats.speech_tx.min,
stats.speech_tx.max, stats.speech_tx.average);
}
return 0;
}
int VoECallReportImpl::GetEchoMetricSummary(EchoStatistics& stats)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"GetEchoMetricSummary()");
ANDROID_NOT_SUPPORTED();IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
assert(_audioProcessingModulePtr != NULL);
return (GetEchoMetricSummaryInternal(stats));
}
int VoECallReportImpl::GetEchoMetricSummaryInternal(EchoStatistics& stats)
{
// Retrieve echo metrics from the AudioProcessingModule
int ret(0);
bool mode(false);
EchoCancellation::Metrics metrics;
// Ensure that echo metrics is enabled
mode =
_audioProcessingModulePtr->echo_cancellation()->are_metrics_enabled();
if (mode != false)
{
ret =
_audioProcessingModulePtr->echo_cancellation()->GetMetrics(&metrics);
if (ret != 0)
{
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
" AudioProcessingModule GetMetrics() => error");
}
}
else
{
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
" AudioProcessingModule echo metrics is not enabled");
}
if ((ret != 0) || (mode == false))
{
// Mark complete struct as invalid (-100 dB)
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
" unable to retrieve echo metrics from the "
"AudioProcessingModule");
stats.erl.min = -100;
stats.erl.max = -100;
stats.erl.average = -100;
stats.erle.min = -100;
stats.erle.max = -100;
stats.erle.average = -100;
stats.rerl.min = -100;
stats.rerl.max = -100;
stats.rerl.average = -100;
stats.a_nlp.min = -100;
stats.a_nlp.max = -100;
stats.a_nlp.average = -100;
}
else
{
// Deliver output results to user
stats.erl.min = metrics.echo_return_loss.minimum;
stats.erl.max = metrics.echo_return_loss.maximum;
stats.erl.average = metrics.echo_return_loss.average;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
" erl: min=%d, max=%d, avg=%d", stats.erl.min,
stats.erl.max, stats.erl.average);
stats.erle.min = metrics.echo_return_loss_enhancement.minimum;
stats.erle.max = metrics.echo_return_loss_enhancement.maximum;
stats.erle.average = metrics.echo_return_loss_enhancement.average;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
" erle: min=%d, max=%d, avg=%d", stats.erle.min,
stats.erle.max, stats.erle.average);
stats.rerl.min = metrics.residual_echo_return_loss.minimum;
stats.rerl.max = metrics.residual_echo_return_loss.maximum;
stats.rerl.average = metrics.residual_echo_return_loss.average;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
" rerl: min=%d, max=%d, avg=%d", stats.rerl.min,
stats.rerl.max, stats.rerl.average);
stats.a_nlp.min = metrics.a_nlp.minimum;
stats.a_nlp.max = metrics.a_nlp.maximum;
stats.a_nlp.average = metrics.a_nlp.average;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
" a_nlp: min=%d, max=%d, avg=%d", stats.a_nlp.min,
stats.a_nlp.max, stats.a_nlp.average);
}
return 0;
}
int VoECallReportImpl::GetRoundTripTimeSummary(int channel, StatVal& delaysMs)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"GetRoundTripTimeSummary()");
ANDROID_NOT_SUPPORTED();IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"GetRoundTripTimeSummary() failed to "
"locate channel");
return -1;
}
return channelPtr->GetRoundTripTimeSummary(delaysMs);
}
int VoECallReportImpl::GetDeadOrAliveSummary(int channel,
int& numOfDeadDetections,
int& numOfAliveDetections)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"GetDeadOrAliveSummary(channel=%d)", channel);
ANDROID_NOT_SUPPORTED();IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
return (GetDeadOrAliveSummaryInternal(channel, numOfDeadDetections,
numOfAliveDetections));
}
int VoECallReportImpl::GetDeadOrAliveSummaryInternal(int channel,
int& numOfDeadDetections,
int& numOfAliveDetections)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"GetDeadOrAliveSummary(channel=%d)", channel);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"GetRoundTripTimeSummary() failed to "
"locate channel");
return -1;
}
return channelPtr->GetDeadOrAliveCounters(numOfDeadDetections,
numOfAliveDetections);
}
int VoECallReportImpl::WriteReportToFile(const char* fileNameUTF8)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"WriteReportToFile(fileNameUTF8=%s)", fileNameUTF8);
ANDROID_NOT_SUPPORTED();IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
if (NULL == fileNameUTF8)
{
_engineStatistics.SetLastError(VE_INVALID_ARGUMENT, kTraceError,
"WriteReportToFile() invalid filename");
return -1;
}
if (_file.Open())
{
_file.CloseFile();
}
// Open text file in write mode
if (_file.OpenFile(fileNameUTF8, false, false, true) != 0)
{
_engineStatistics.SetLastError(VE_BAD_FILE, kTraceError,
"WriteReportToFile() unable to open the "
"file");
return -1;
}
// Summarize information and add it to the open file
//
_file.WriteText("WebRtc VoiceEngine Call Report\n");
_file.WriteText("==============================\n");
_file.WriteText("\nNetwork Packet Round Trip Time (RTT)\n");
_file.WriteText("------------------------------------\n\n");
WebRtc_Word32 numOfChannels = _channelManager.NumOfChannels();
if (numOfChannels <= 0)
{
return 0;
}
WebRtc_Word32* channelsArray = new WebRtc_Word32[numOfChannels];
_channelManager.GetChannelIds(channelsArray, numOfChannels);
for (int ch = 0; ch < numOfChannels; ch++)
{
voe::ScopedChannel sc(_channelManager, channelsArray[ch]);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr)
{
StatVal delaysMs;
_file.WriteText("channel %d:\n", ch);
channelPtr->GetRoundTripTimeSummary(delaysMs);
_file.WriteText(" min:%5d [ms]\n", delaysMs.min);
_file.WriteText(" max:%5d [ms]\n", delaysMs.max);
_file.WriteText(" avg:%5d [ms]\n", delaysMs.average);
}
}
_file.WriteText("\nDead-or-Alive Connection Detections\n");
_file.WriteText("------------------------------------\n\n");
for (int ch = 0; ch < numOfChannels; ch++)
{
voe::ScopedChannel sc(_channelManager, channelsArray[ch]);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr)
{
int nDead(0);
int nAlive(0);
_file.WriteText("channel %d:\n", ch);
GetDeadOrAliveSummary(ch, nDead, nAlive);
_file.WriteText(" #dead :%6d\n", nDead);
_file.WriteText(" #alive:%6d\n", nAlive);
}
}
delete[] channelsArray;
LevelStatistics stats;
GetSpeechAndNoiseSummary(stats);
_file.WriteText("\nLong-term Speech Levels\n");
_file.WriteText("-----------------------\n\n");
_file.WriteText("Transmitting side:\n");
_file.WriteText(" min:%5d [dBm0]\n", stats.speech_tx.min);
_file.WriteText(" max:%5d [dBm0]\n", stats.speech_tx.max);
_file.WriteText(" avg:%5d [dBm0]\n", stats.speech_tx.average);
_file.WriteText("\nReceiving side:\n");
_file.WriteText(" min:%5d [dBm0]\n", stats.speech_rx.min);
_file.WriteText(" max:%5d [dBm0]\n", stats.speech_rx.max);
_file.WriteText(" avg:%5d [dBm0]\n", stats.speech_rx.average);
_file.WriteText("\nLong-term Noise Levels\n");
_file.WriteText("----------------------\n\n");
_file.WriteText("Transmitting side:\n");
_file.WriteText(" min:%5d [dBm0]\n", stats.noise_tx.min);
_file.WriteText(" max:%5d [dBm0]\n", stats.noise_tx.max);
_file.WriteText(" avg:%5d [dBm0]\n", stats.noise_tx.average);
_file.WriteText("\nReceiving side:\n");
_file.WriteText(" min:%5d [dBm0]\n", stats.noise_rx.min);
_file.WriteText(" max:%5d [dBm0]\n", stats.noise_rx.max);
_file.WriteText(" avg:%5d [dBm0]\n", stats.noise_rx.average);
EchoStatistics echo;
GetEchoMetricSummary(echo);
_file.WriteText("\nEcho Metrics\n");
_file.WriteText("------------\n\n");
_file.WriteText("erl:\n");
_file.WriteText(" min:%5d [dB]\n", echo.erl.min);
_file.WriteText(" max:%5d [dB]\n", echo.erl.max);
_file.WriteText(" avg:%5d [dB]\n", echo.erl.average);
_file.WriteText("\nerle:\n");
_file.WriteText(" min:%5d [dB]\n", echo.erle.min);
_file.WriteText(" max:%5d [dB]\n", echo.erle.max);
_file.WriteText(" avg:%5d [dB]\n", echo.erle.average);
_file.WriteText("rerl:\n");
_file.WriteText(" min:%5d [dB]\n", echo.rerl.min);
_file.WriteText(" max:%5d [dB]\n", echo.rerl.max);
_file.WriteText(" avg:%5d [dB]\n", echo.rerl.average);
_file.WriteText("a_nlp:\n");
_file.WriteText(" min:%5d [dB]\n", echo.a_nlp.min);
_file.WriteText(" max:%5d [dB]\n", echo.a_nlp.max);
_file.WriteText(" avg:%5d [dB]\n", echo.a_nlp.average);
_file.WriteText("\n<END>");
_file.Flush();
_file.CloseFile();
return 0;
}
#endif // WEBRTC_VOICE_ENGINE_CALL_REPORT_API
} // namespace webrtc

View File

@ -1,63 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_VOE_CALL_REPORT_IMPL_H
#define WEBRTC_VOICE_ENGINE_VOE_CALL_REPORT_IMPL_H
#include "voe_call_report.h"
#include "ref_count.h"
#include "shared_data.h"
namespace webrtc
{
class FileWrapper;
class VoECallReportImpl: public virtual voe::SharedData,
public VoECallReport,
public voe::RefCount
{
public:
virtual int Release();
virtual int ResetCallReportStatistics(int channel);
virtual int GetSpeechAndNoiseSummary(LevelStatistics& stats);
virtual int GetEchoMetricSummary(EchoStatistics& stats);
virtual int GetRoundTripTimeSummary(int channel,
StatVal& delaysMs);
virtual int GetDeadOrAliveSummary(int channel, int& numOfDeadDetections,
int& numOfAliveDetections);
virtual int WriteReportToFile(const char* fileNameUTF8);
protected:
VoECallReportImpl();
virtual ~VoECallReportImpl();
private:
int GetDeadOrAliveSummaryInternal(int channel,
int& numOfDeadDetections,
int& numOfAliveDetections);
int GetEchoMetricSummaryInternal(EchoStatistics& stats);
int GetSpeechAndNoiseSummaryInternal(LevelStatistics& stats);
FileWrapper& _file;
};
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_VOE_CALL_REPORT_IMPL_H

View File

@ -1,717 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "voe_codec_impl.h"
#include "audio_coding_module.h"
#include "channel.h"
#include "critical_section_wrapper.h"
#include "trace.h"
#include "voe_errors.h"
#include "voice_engine_impl.h"
namespace webrtc
{
VoECodec* VoECodec::GetInterface(VoiceEngine* voiceEngine)
{
#ifndef WEBRTC_VOICE_ENGINE_CODEC_API
return NULL;
#else
if (NULL == voiceEngine)
{
return NULL;
}
VoiceEngineImpl* s =
reinterpret_cast<VoiceEngineImpl*> (voiceEngine);
VoECodecImpl* d = s;
(*d)++;
return (d);
#endif
}
#ifdef WEBRTC_VOICE_ENGINE_CODEC_API
VoECodecImpl::VoECodecImpl()
{
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
"VoECodecImpl() - ctor");
}
VoECodecImpl::~VoECodecImpl()
{
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
"~VoECodecImpl() - dtor");
}
int VoECodecImpl::Release()
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"VoECodecImpl::Release()");
(*this)--;
int refCount = GetCount();
if (refCount < 0)
{
Reset();
_engineStatistics.SetLastError(VE_INTERFACE_NOT_FOUND,
kTraceWarning);
return (-1);
}
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
"VoECodecImpl reference counter = %d", refCount);
return (refCount);
}
int VoECodecImpl::NumOfCodecs()
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"NumOfCodecs()");
// Number of supported codecs in the ACM
WebRtc_UWord8 nSupportedCodecs = AudioCodingModule::NumberOfCodecs();
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
"NumOfCodecs() => %u", nSupportedCodecs);
return (nSupportedCodecs);
}
int VoECodecImpl::GetCodec(int index, CodecInst& codec)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"GetCodec(index=%d, codec=?)", index);
CodecInst acmCodec;
if (AudioCodingModule::Codec(index, (CodecInst&) acmCodec)
== -1)
{
_engineStatistics.SetLastError(VE_INVALID_LISTNR, kTraceError,
"GetCodec() invalid index");
return -1;
}
ACMToExternalCodecRepresentation(codec, acmCodec);
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
"GetCodec() => plname=%s, pacsize=%d, plfreq=%d, pltype=%d, "
"channels=%d, rate=%d", codec.plname, codec.pacsize,
codec.plfreq, codec.pltype, codec.channels, codec.rate);
return 0;
}
int VoECodecImpl::SetSendCodec(int channel, const CodecInst& codec)
{
CodecInst copyCodec;
ExternalToACMCodecRepresentation(copyCodec, codec);
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"SetSendCodec(channel=%d, codec)", channel);
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
"codec: plname=%s, pacsize=%d, plfreq=%d, pltype=%d, "
"channels=%d, rate=%d", codec.plname, codec.pacsize,
codec.plfreq, codec.pltype, codec.channels, codec.rate);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
// External sanity checks performed outside the ACM
if ((STR_CASE_CMP(copyCodec.plname, "L16") == 0) &&
(copyCodec.pacsize >= 960))
{
_engineStatistics.SetLastError(VE_INVALID_ARGUMENT, kTraceError,
"SetSendCodec() invalid L16 packet "
"size");
return -1;
}
if (!STR_CASE_CMP(copyCodec.plname, "CN")
|| !STR_CASE_CMP(copyCodec.plname, "TELEPHONE-EVENT")
|| !STR_CASE_CMP(copyCodec.plname, "RED"))
{
_engineStatistics.SetLastError(VE_INVALID_ARGUMENT, kTraceError,
"SetSendCodec() invalid codec name");
return -1;
}
if (copyCodec.channels != 1)
{
_engineStatistics.SetLastError(VE_INVALID_ARGUMENT, kTraceError,
"SetSendCodec() invalid number of "
"channels");
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"GetSendCodec() failed to locate "
"channel");
return -1;
}
if (!AudioCodingModule::IsCodecValid(
(CodecInst&) copyCodec))
{
_engineStatistics.SetLastError(VE_INVALID_ARGUMENT, kTraceError,
"SetSendCodec() invalid codec");
return -1;
}
if (channelPtr->SetSendCodec(copyCodec) != 0)
{
_engineStatistics.SetLastError(VE_CANNOT_SET_SEND_CODEC,
kTraceError,
"SetSendCodec() failed to set send "
"codec");
return -1;
}
return 0;
}
int VoECodecImpl::GetSendCodec(int channel, CodecInst& codec)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"GetSendCodec(channel=%d, codec=?)", channel);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"GetSendCodec() failed to locate "
"channel");
return -1;
}
CodecInst acmCodec;
if (channelPtr->GetSendCodec(acmCodec) != 0)
{
_engineStatistics.SetLastError(VE_CANNOT_GET_SEND_CODEC, kTraceError,
"GetSendCodec() failed to get send "
"codec");
return -1;
}
ACMToExternalCodecRepresentation(codec, acmCodec);
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
"GetSendCodec() => plname=%s, pacsize=%d, plfreq=%d, "
"channels=%d, rate=%d", codec.plname, codec.pacsize,
codec.plfreq, codec.channels, codec.rate);
return 0;
}
int VoECodecImpl::GetRecCodec(int channel, CodecInst& codec)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"GetRecCodec(channel=%d, codec=?)", channel);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"GetRecCodec() failed to locate "
"channel");
return -1;
}
CodecInst acmCodec;
if (channelPtr->GetRecCodec(acmCodec) != 0)
{
_engineStatistics.SetLastError(VE_CANNOT_GET_REC_CODEC, kTraceError,
"GetRecCodec() failed to get received "
"codec");
return -1;
}
ACMToExternalCodecRepresentation(codec, acmCodec);
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
"GetRecCodec() => plname=%s, pacsize=%d, plfreq=%d, "
"channels=%d, rate=%d", codec.plname, codec.pacsize,
codec.plfreq, codec.channels, codec.rate);
return 0;
}
int VoECodecImpl::SetAMREncFormat(int channel, AmrMode mode)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"SetAMREncFormat(channel=%d, mode=%d)", channel, mode);
#ifdef WEBRTC_CODEC_GSMAMR
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"SetAMREncFormat() failed to locate "
"channel");
return -1;
}
return channelPtr->SetAMREncFormat(mode);
#else
_engineStatistics.SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
"SetAMREncFormat() AMR codec is not "
"supported");
return -1;
#endif
}
int VoECodecImpl::SetAMRDecFormat(int channel, AmrMode mode)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"SetAMRDecFormat(channel=%i, mode=%i)", channel, mode);
#ifdef WEBRTC_CODEC_GSMAMR
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"SetAMRDecFormat() failed to locate "
"channel");
return -1;
}
return channelPtr->SetAMRDecFormat(mode);
#else
_engineStatistics.SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
"SetAMRDecFormat() AMR codec is not "
"supported");
return -1;
#endif
}
int VoECodecImpl::SetAMRWbEncFormat(int channel, AmrMode mode)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"SetAMRWbEncFormat(channel=%d, mode=%d)", channel, mode);
ANDROID_NOT_SUPPORTED(); IPHONE_NOT_SUPPORTED();
#ifdef WEBRTC_CODEC_GSMAMRWB
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"SetAMRWbEncFormat() failed to locate "
"channel");
return -1;
}
return channelPtr->SetAMRWbEncFormat(mode);
#else
_engineStatistics.SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
"SetAMRWbEncFormat() AMR-wb codec is not "
"supported");
return -1;
#endif
}
int VoECodecImpl::SetAMRWbDecFormat(int channel, AmrMode mode)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"SetAMRWbDecFormat(channel=%i, mode=%i)", channel, mode);
ANDROID_NOT_SUPPORTED(); IPHONE_NOT_SUPPORTED();
#ifdef WEBRTC_CODEC_GSMAMRWB
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"SetAMRWbDecFormat() failed to locate "
"channel");
return -1;
}
return channelPtr->SetAMRWbDecFormat(mode);
#else
_engineStatistics.SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
"SetAMRWbDecFormat() AMR-wb codec is not "
"supported");
return -1;
#endif
}
int VoECodecImpl::SetRecPayloadType(int channel, const CodecInst& codec)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"SetRecPayloadType(channel=%d, codec)", channel);
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
"codec: plname=%s, plfreq=%d, pltype=%d, channels=%u, "
"pacsize=%d, rate=%d", codec.plname, codec.plfreq, codec.pltype,
codec.channels, codec.pacsize, codec.rate);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"GetRecPayloadType() failed to locate "
"channel");
return -1;
}
return channelPtr->SetRecPayloadType(codec);
}
int VoECodecImpl::GetRecPayloadType(int channel, CodecInst& codec)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"GetRecPayloadType(channel=%d, codec)", channel);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"GetRecPayloadType() failed to locate "
"channel");
return -1;
}
return channelPtr->GetRecPayloadType(codec);
}
int VoECodecImpl::SetSendCNPayloadType(int channel, int type,
PayloadFrequencies frequency)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"SetSendCNPayloadType(channel=%d, type=%d, frequency=%d)",
channel, type, frequency);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
if (type < 96 || type > 127)
{
// Only allow dynamic range: 96 to 127
_engineStatistics.SetLastError(VE_INVALID_PLTYPE, kTraceError,
"SetSendCNPayloadType() invalid payload "
"type");
return -1;
}
if ((frequency != kFreq16000Hz) && (frequency != kFreq32000Hz))
{
// It is not possible to modify the payload type for CN/8000.
// We only allow modification of the CN payload type for CN/16000
// and CN/32000.
_engineStatistics.SetLastError(VE_INVALID_PLFREQ, kTraceError,
"SetSendCNPayloadType() invalid payload"
" frequency");
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"SetSendCNPayloadType() failed to "
"locate channel");
return -1;
}
if (channelPtr->Sending())
{
_engineStatistics.SetLastError(VE_SENDING, kTraceError,
"SetSendCNPayloadType unable so set "
"payload type while sending");
return -1;
}
return channelPtr->SetSendCNPayloadType(type, frequency);
}
int VoECodecImpl::SetISACInitTargetRate(int channel, int rateBps,
bool useFixedFrameSize)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"SetISACInitTargetRate(channel=%d, rateBps=%d, "
"useFixedFrameSize=%d)", channel, rateBps, useFixedFrameSize);
ANDROID_NOT_SUPPORTED(); IPHONE_NOT_SUPPORTED();
#ifdef WEBRTC_CODEC_ISAC
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"SetISACInitTargetRate() failed to "
"locate channel");
return -1;
}
return channelPtr->SetISACInitTargetRate(rateBps, useFixedFrameSize);
#else
_engineStatistics.SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
"SetISACInitTargetRate() iSAC codec is not "
"supported");
return -1;
#endif
}
int VoECodecImpl::SetISACMaxRate(int channel, int rateBps)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"SetISACMaxRate(channel=%d, rateBps=%d)", channel, rateBps);
ANDROID_NOT_SUPPORTED(); IPHONE_NOT_SUPPORTED();
#ifdef WEBRTC_CODEC_ISAC
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"SetISACMaxRate() failed to locate "
"channel");
return -1;
}
return channelPtr->SetISACMaxRate(rateBps);
#else
_engineStatistics.SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
"SetISACMaxRate() iSAC codec is not "
"supported");
return -1;
#endif
}
int VoECodecImpl::SetISACMaxPayloadSize(int channel, int sizeBytes)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"SetISACMaxPayloadSize(channel=%d, sizeBytes=%d)", channel,
sizeBytes);
ANDROID_NOT_SUPPORTED(); IPHONE_NOT_SUPPORTED();
#ifdef WEBRTC_CODEC_ISAC
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"SetISACMaxPayloadSize() failed to "
"locate channel");
return -1;
}
return channelPtr->SetISACMaxPayloadSize(sizeBytes);
#else
_engineStatistics.SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
"SetISACMaxPayloadSize() iSAC codec is not "
"supported");
return -1;
#endif
return 0;
}
int VoECodecImpl::SetVADStatus(int channel, bool enable, VadModes mode,
bool disableDTX)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"SetVADStatus(channel=%i, enable=%i, mode=%i, disableDTX=%i)",
channel, enable, mode, disableDTX);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"SetVADStatus failed to locate channel");
return -1;
}
ACMVADMode vadMode(VADNormal);
switch (mode)
{
case kVadConventional:
vadMode = VADNormal;
break;
case kVadAggressiveLow:
vadMode = VADLowBitrate;
break;
case kVadAggressiveMid:
vadMode = VADAggr;
break;
case kVadAggressiveHigh:
vadMode = VADVeryAggr;
break;
default:
_engineStatistics.SetLastError(VE_INVALID_ARGUMENT, kTraceError,
"SetVADStatus() invalid VAD mode");
return -1;
}
return channelPtr->SetVADStatus(enable, vadMode, disableDTX);
}
int VoECodecImpl::GetVADStatus(int channel, bool& enabled, VadModes& mode,
bool& disabledDTX)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"GetVADStatus(channel=%i)", channel);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"GetVADStatus failed to locate channel");
return -1;
}
ACMVADMode vadMode;
int ret = channelPtr->GetVADStatus(enabled, vadMode, disabledDTX);
if (ret != 0)
{
_engineStatistics.SetLastError(VE_INVALID_OPERATION, kTraceError,
"GetVADStatus failed to get VAD mode");
return -1;
}
switch (vadMode)
{
case VADNormal:
mode = kVadConventional;
break;
case VADLowBitrate:
mode = kVadAggressiveLow;
break;
case VADAggr:
mode = kVadAggressiveMid;
break;
case VADVeryAggr:
mode = kVadAggressiveHigh;
break;
default:
_engineStatistics.SetLastError(VE_AUDIO_CODING_MODULE_ERROR,
kTraceError,
"GetVADStatus() invalid VAD mode");
return -1;
}
return 0;
}
void VoECodecImpl::ACMToExternalCodecRepresentation(CodecInst& toInst,
const CodecInst& fromInst)
{
toInst = fromInst;
if (STR_CASE_CMP(fromInst.plname,"SILK") == 0)
{
if (fromInst.plfreq == 12000)
{
if (fromInst.pacsize == 320)
{
toInst.pacsize = 240;
}
else if (fromInst.pacsize == 640)
{
toInst.pacsize = 480;
}
else if (fromInst.pacsize == 960)
{
toInst.pacsize = 720;
}
}
else if (fromInst.plfreq == 24000)
{
if (fromInst.pacsize == 640)
{
toInst.pacsize = 480;
}
else if (fromInst.pacsize == 1280)
{
toInst.pacsize = 960;
}
else if (fromInst.pacsize == 1920)
{
toInst.pacsize = 1440;
}
}
}
}
void VoECodecImpl::ExternalToACMCodecRepresentation(CodecInst& toInst,
const CodecInst& fromInst)
{
toInst = fromInst;
if (STR_CASE_CMP(fromInst.plname,"SILK") == 0)
{
if (fromInst.plfreq == 12000)
{
if (fromInst.pacsize == 240)
{
toInst.pacsize = 320;
}
else if (fromInst.pacsize == 480)
{
toInst.pacsize = 640;
}
else if (fromInst.pacsize == 720)
{
toInst.pacsize = 960;
}
}
else if (fromInst.plfreq == 24000)
{
if (fromInst.pacsize == 480)
{
toInst.pacsize = 640;
}
else if (fromInst.pacsize == 960)
{
toInst.pacsize = 1280;
}
else if (fromInst.pacsize == 1440)
{
toInst.pacsize = 1920;
}
}
}
}
#endif // WEBRTC_VOICE_ENGINE_CODEC_API
} // namespace webrtc

View File

@ -1,92 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_VOE_CODEC_IMPL_H
#define WEBRTC_VOICE_ENGINE_VOE_CODEC_IMPL_H
#include "voe_codec.h"
#include "ref_count.h"
#include "shared_data.h"
namespace webrtc
{
class VoECodecImpl: public virtual voe::SharedData,
public VoECodec,
public voe::RefCount
{
public:
virtual int Release();
virtual int NumOfCodecs();
virtual int GetCodec(int index, CodecInst& codec);
virtual int SetSendCodec(int channel, const CodecInst& codec);
virtual int GetSendCodec(int channel, CodecInst& codec);
virtual int GetRecCodec(int channel, CodecInst& codec);
virtual int SetAMREncFormat(int channel,
AmrMode mode = kRfc3267BwEfficient);
virtual int SetAMRDecFormat(int channel,
AmrMode mode = kRfc3267BwEfficient);
virtual int SetAMRWbEncFormat(int channel,
AmrMode mode = kRfc3267BwEfficient);
virtual int SetAMRWbDecFormat(int channel,
AmrMode mode = kRfc3267BwEfficient);
virtual int SetSendCNPayloadType(
int channel, int type,
PayloadFrequencies frequency = kFreq16000Hz);
virtual int SetRecPayloadType(int channel,
const CodecInst& codec);
virtual int GetRecPayloadType(int channel, CodecInst& codec);
virtual int SetISACInitTargetRate(int channel,
int rateBps,
bool useFixedFrameSize = false);
virtual int SetISACMaxRate(int channel, int rateBps);
virtual int SetISACMaxPayloadSize(int channel, int sizeBytes);
virtual int SetVADStatus(int channel,
bool enable,
VadModes mode = kVadConventional,
bool disableDTX = false);
virtual int GetVADStatus(int channel,
bool& enabled,
VadModes& mode,
bool& disabledDTX);
protected:
VoECodecImpl();
virtual ~VoECodecImpl();
private:
void ACMToExternalCodecRepresentation(CodecInst& toInst,
const CodecInst& fromInst);
void ExternalToACMCodecRepresentation(CodecInst& toInst,
const CodecInst& fromInst);
};
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_VOE_CODEC_IMPL_H

View File

@ -1,473 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "voe_dtmf_impl.h"
#include "channel.h"
#include "critical_section_wrapper.h"
#include "output_mixer.h"
#include "trace.h"
#include "transmit_mixer.h"
#include "voe_errors.h"
#include "voice_engine_impl.h"
namespace webrtc {
VoEDtmf* VoEDtmf::GetInterface(VoiceEngine* voiceEngine)
{
#ifndef WEBRTC_VOICE_ENGINE_DTMF_API
return NULL;
#else
if (NULL == voiceEngine)
{
return NULL;
}
VoiceEngineImpl* s =
reinterpret_cast<VoiceEngineImpl*> (voiceEngine);
VoEDtmfImpl* d = s;
( *d)++;
return (d);
#endif
}
#ifdef WEBRTC_VOICE_ENGINE_DTMF_API
VoEDtmfImpl::VoEDtmfImpl() :
_dtmfFeedback(true),
_dtmfDirectFeedback(false)
{
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1 ),
"VoEDtmfImpl::VoEDtmfImpl() - ctor");
}
VoEDtmfImpl::~VoEDtmfImpl()
{
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
"VoEDtmfImpl::~VoEDtmfImpl() - dtor");
}
int VoEDtmfImpl::Release()
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"VoEDtmf::Release()");
(*this)--;
int refCount = GetCount();
if (refCount < 0)
{
Reset(); // reset reference counter to zero => OK to delete VE
_engineStatistics.SetLastError(
VE_INTERFACE_NOT_FOUND, kTraceWarning);
return (-1);
}
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
"VoEDtmf reference counter = %d", refCount);
return (refCount);
}
int VoEDtmfImpl::SendTelephoneEvent(int channel,
unsigned char eventCode,
bool outOfBand,
int lengthMs,
int attenuationDb)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"SendTelephoneEvent(channel=%d, eventCode=%d, outOfBand=%d,"
"length=%d, attenuationDb=%d)",
channel, eventCode, (int)outOfBand, lengthMs, attenuationDb);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"SendTelephoneEvent() failed to locate channel");
return -1;
}
if (!channelPtr->Sending())
{
_engineStatistics.SetLastError(
VE_NOT_SENDING, kTraceError,
"SendTelephoneEvent() sending is not active");
return -1;
}
// Sanity check
const int maxEventCode = outOfBand ?
static_cast<int>(kMaxTelephoneEventCode) :
static_cast<int>(kMaxDtmfEventCode);
const bool testFailed = ((eventCode < 0) ||
(eventCode > maxEventCode) ||
(lengthMs < kMinTelephoneEventDuration) ||
(lengthMs > kMaxTelephoneEventDuration) ||
(attenuationDb < kMinTelephoneEventAttenuation) ||
(attenuationDb > kMaxTelephoneEventAttenuation));
if (testFailed)
{
_engineStatistics.SetLastError(
VE_INVALID_ARGUMENT, kTraceError,
"SendTelephoneEvent() invalid parameter(s)");
return -1;
}
const bool isDtmf =
(eventCode >= 0) && (eventCode <= kMaxDtmfEventCode);
const bool playDtmfToneDirect =
isDtmf && (_dtmfFeedback && _dtmfDirectFeedback);
if (playDtmfToneDirect)
{
// Mute the microphone signal while playing back the tone directly.
// This is to reduce the risk of introducing echo from the added output.
_transmitMixerPtr->UpdateMuteMicrophoneTime(lengthMs);
// Play out local feedback tone directly (same approach for both inband
// and outband).
// Reduce the length of the the tone with 80ms to reduce risk of echo.
// For non-direct feedback, outband and inband cases are handled
// differently.
_outputMixerPtr->PlayDtmfTone(eventCode, lengthMs-80, attenuationDb);
}
if (outOfBand)
{
// The RTP/RTCP module will always deliver OnPlayTelephoneEvent when
// an event is transmitted. It is up to the VoE to utilize it or not.
// This flag ensures that feedback/playout is enabled; however, the
// channel object must still parse out the Dtmf events (0-15) from
// all possible events (0-255).
const bool playDTFMEvent = (_dtmfFeedback && !_dtmfDirectFeedback);
return channelPtr->SendTelephoneEventOutband(eventCode,
lengthMs,
attenuationDb,
playDTFMEvent);
}
else
{
// For Dtmf tones, we want to ensure that inband tones are played out
// in sync with the transmitted audio. This flag is utilized by the
// channel object to determine if the queued Dtmf e vent shall also
// be fed to the output mixer in the same step as input audio is
// replaced by inband Dtmf tones.
const bool playDTFMEvent =
(isDtmf && _dtmfFeedback && !_dtmfDirectFeedback);
return channelPtr->SendTelephoneEventInband(eventCode,
lengthMs,
attenuationDb,
playDTFMEvent);
}
}
int VoEDtmfImpl::SetSendTelephoneEventPayloadType(int channel,
unsigned char type)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"SetSendTelephoneEventPayloadType(channel=%d, type=%u)",
channel, type);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"SetSendTelephoneEventPayloadType() failed to locate channel");
return -1;
}
return channelPtr->SetSendTelephoneEventPayloadType(type);
}
int VoEDtmfImpl::GetSendTelephoneEventPayloadType(int channel,
unsigned char& type)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetSendTelephoneEventPayloadType(channel=%d)", channel);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"GetSendTelephoneEventPayloadType() failed to locate channel");
return -1;
}
return channelPtr->GetSendTelephoneEventPayloadType(type);
}
int VoEDtmfImpl::PlayDtmfTone(unsigned char eventCode,
int lengthMs,
int attenuationDb)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"PlayDtmfTone(eventCode=%d, lengthMs=%d, attenuationDb=%d)",
eventCode, lengthMs, attenuationDb);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
if (!_audioDevicePtr->Playing())
{
_engineStatistics.SetLastError(
VE_NOT_PLAYING, kTraceError,
"PlayDtmfTone() no channel is playing out");
return -1;
}
if ((eventCode < kMinDtmfEventCode) ||
(eventCode > kMaxDtmfEventCode) ||
(lengthMs < kMinTelephoneEventDuration) ||
(lengthMs > kMaxTelephoneEventDuration) ||
(attenuationDb <kMinTelephoneEventAttenuation) ||
(attenuationDb > kMaxTelephoneEventAttenuation))
{
_engineStatistics.SetLastError(
VE_INVALID_ARGUMENT, kTraceError,
"PlayDtmfTone() invalid tone parameter(s)");
return -1;
}
return _outputMixerPtr->PlayDtmfTone(eventCode, lengthMs, attenuationDb);
}
int VoEDtmfImpl::StartPlayingDtmfTone(unsigned char eventCode,
int attenuationDb)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"StartPlayingDtmfTone(eventCode=%d, attenuationDb=%d)",
eventCode, attenuationDb);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
if (!_audioDevicePtr->Playing())
{
_engineStatistics.SetLastError(
VE_NOT_PLAYING, kTraceError,
"StartPlayingDtmfTone() no channel is playing out");
return -1;
}
if ((eventCode < kMinDtmfEventCode) ||
(eventCode > kMaxDtmfEventCode) ||
(attenuationDb < kMinTelephoneEventAttenuation) ||
(attenuationDb > kMaxTelephoneEventAttenuation))
{
_engineStatistics.SetLastError(
VE_INVALID_ARGUMENT, kTraceError,
"StartPlayingDtmfTone() invalid tone parameter(s)");
return -1;
}
return _outputMixerPtr->StartPlayingDtmfTone(eventCode, attenuationDb);
}
int VoEDtmfImpl::StopPlayingDtmfTone()
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"StopPlayingDtmfTone()");
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
return _outputMixerPtr->StopPlayingDtmfTone();
}
int VoEDtmfImpl::RegisterTelephoneEventDetection(
int channel,
TelephoneEventDetectionMethods detectionMethod,
VoETelephoneEventObserver& observer)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"RegisterTelephoneEventDetection(channel=%d, detectionMethod=%d,"
"observer=0x%x)", channel, detectionMethod, &observer);
#ifdef WEBRTC_DTMF_DETECTION
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"RegisterTelephoneEventDetection() failed to locate channel");
return -1;
}
return channelPtr->RegisterTelephoneEventDetection(detectionMethod,
observer);
#else
_engineStatistics.SetLastError(
VE_FUNC_NOT_SUPPORTED, kTraceError,
"SetTelephoneEventDetectionStatus() Dtmf detection is not supported");
return -1;
#endif
}
int VoEDtmfImpl::DeRegisterTelephoneEventDetection(int channel)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"DeRegisterTelephoneEventDetection(channel=%d)", channel);
#ifdef WEBRTC_DTMF_DETECTION
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"DeRegisterTelephoneEventDe tection() failed to locate channel");
return -1;
}
return channelPtr->DeRegisterTelephoneEventDetection();
#else
_engineStatistics.SetLastError(
VE_FUNC_NOT_SUPPORTED, kTraceError,
"DeRegisterTelephoneEventDetection() Dtmf detection is not supported");
return -1;
#endif
}
int VoEDtmfImpl::GetTelephoneEventDetectionStatus(
int channel,
bool& enabled,
TelephoneEventDetectionMethods& detectionMethod)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetTelephoneEventDetectionStatus(channel=%d)", channel);
#ifdef WEBRTC_DTMF_DETECTION
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"GetTelephoneEventDetectionStatus() failed to locate channel");
return -1;
}
return channelPtr->GetTelephoneEventDetectionStatus(enabled, detectionMethod);
#else
_engineStatistics.SetLastError(
VE_FUNC_NOT_SUPPORTED, kTraceError,
"GetTelephoneEventDetectionStatus() Dtmf detection is not supported");
return -1;
#endif
}
int VoEDtmfImpl::SetDtmfFeedbackStatus(bool enable, bool directFeedback)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"SetDtmfFeedbackStatus(enable=%d, directFeeback=%d)",
(int)enable, (int)directFeedback);
CriticalSectionScoped sc(*_apiCritPtr);
_dtmfFeedback = enable;
_dtmfDirectFeedback = directFeedback;
return 0;
}
int VoEDtmfImpl::GetDtmfFeedbackStatus(bool& enabled, bool& directFeedback)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetDtmfFeedbackStatus()");
CriticalSectionScoped sc(*_apiCritPtr);
enabled = _dtmfFeedback;
directFeedback = _dtmfDirectFeedback;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
"GetDtmfFeedbackStatus() => enabled=%d, directFeedback=%d",
enabled, directFeedback);
return 0;
}
int VoEDtmfImpl::SetDtmfPlayoutStatus(int channel, bool enable)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"SetDtmfPlayoutStatus(channel=%d, enable=%d)",
channel, enable);
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"SetDtmfPlayoutStatus() failed to locate channel");
return -1;
}
return channelPtr->SetDtmfPlayoutStatus(enable);
}
int VoEDtmfImpl::GetDtmfPlayoutStatus(int channel, bool& enabled)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetDtmfPlayoutStatus(channel=%d, enabled=?)", channel);
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"GetDtmfPlayoutStatus() failed to locate channel");
return -1;
}
enabled = channelPtr->DtmfPlayoutStatus();
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
"GetDtmfPlayoutStatus() => enabled=%d", enabled);
return 0;
}
#endif // #ifdef WEBRTC_VOICE_ENGINE_DTMF_API
} // namespace webrtc

View File

@ -1,83 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_VOE_DTMF_IMPL_H
#define WEBRTC_VOICE_ENGINE_VOE_DTMF_IMPL_H
#include "voe_dtmf.h"
#include "ref_count.h"
#include "shared_data.h"
namespace webrtc
{
class VoEDtmfImpl : public virtual voe::SharedData,
public VoEDtmf,
public voe::RefCount
{
public:
virtual int Release();
virtual int SendTelephoneEvent(
int channel,
unsigned char eventCode,
bool outOfBand = true,
int lengthMs = 160,
int attenuationDb = 10);
virtual int SetSendTelephoneEventPayloadType(int channel,
unsigned char type);
virtual int GetSendTelephoneEventPayloadType(int channel,
unsigned char& type);
virtual int SetDtmfFeedbackStatus(bool enable,
bool directFeedback = false);
virtual int GetDtmfFeedbackStatus(bool& enabled, bool& directFeedback);
virtual int PlayDtmfTone(unsigned char eventCode,
int lengthMs = 200,
int attenuationDb = 10);
virtual int StartPlayingDtmfTone(unsigned char eventCode,
int attenuationDb = 10);
virtual int StopPlayingDtmfTone();
virtual int RegisterTelephoneEventDetection(
int channel,
TelephoneEventDetectionMethods detectionMethod,
VoETelephoneEventObserver& observer);
virtual int DeRegisterTelephoneEventDetection(int channel);
virtual int GetTelephoneEventDetectionStatus(
int channel,
bool& enabled,
TelephoneEventDetectionMethods& detectionMethod);
virtual int SetDtmfPlayoutStatus(int channel, bool enable);
virtual int GetDtmfPlayoutStatus(int channel, bool& enabled);
protected:
VoEDtmfImpl();
virtual ~VoEDtmfImpl();
private:
bool _dtmfFeedback;
bool _dtmfDirectFeedback;
};
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_VOE_DTMF_IMPL_H

View File

@ -1,275 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "voe_encryption_impl.h"
#include "channel.h"
#include "critical_section_wrapper.h"
#include "trace.h"
#include "voe_errors.h"
#include "voice_engine_impl.h"
namespace webrtc {
VoEEncryption* VoEEncryption::GetInterface(VoiceEngine* voiceEngine)
{
#ifndef WEBRTC_VOICE_ENGINE_ENCRYPTION_API
return NULL;
#else
if (NULL == voiceEngine)
{
return NULL;
}
VoiceEngineImpl* s =
reinterpret_cast<VoiceEngineImpl*> (voiceEngine);
VoEEncryptionImpl* d = s;
(*d)++;
return (d);
#endif
}
#ifdef WEBRTC_VOICE_ENGINE_ENCRYPTION_API
VoEEncryptionImpl::VoEEncryptionImpl()
{
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
"VoEEncryptionImpl::VoEEncryptionImpl() - ctor");
}
VoEEncryptionImpl::~VoEEncryptionImpl()
{
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
"VoEEncryptionImpl::~VoEEncryptionImpl() - dtor");
}
int VoEEncryptionImpl::Release()
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"VoEEncryption::Release()");
(*this)--;
int refCount = GetCount();
if (refCount < 0)
{
// reset reference counter to zero => OK to delete VE
Reset();
_engineStatistics.SetLastError(VE_INTERFACE_NOT_FOUND,
kTraceWarning);
return (-1);
}
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
"VoEEncryption reference counter = %d", refCount);
return (refCount);
}
int VoEEncryptionImpl::EnableSRTPSend(
int channel,
CipherTypes cipherType,
int cipherKeyLength,
AuthenticationTypes authType,
int authKeyLength,
int authTagLength,
SecurityLevels level,
const unsigned char key[kVoiceEngineMaxSrtpKeyLength],
bool useForRTCP)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"EnableSRTPSend(channel=%i, cipherType=%i, cipherKeyLength=%i,"
" authType=%i, authKeyLength=%i, authTagLength=%i, level=%i, "
"key=?, useForRTCP=%d)",
channel, cipherType, cipherKeyLength, authType,
authKeyLength, authTagLength, level, useForRTCP);
#ifdef WEBRTC_SRTP
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"EnableSRTPSend() failed to locate channel");
return -1;
}
return channelPtr->EnableSRTPSend(cipherType,
cipherKeyLength,
authType,
authKeyLength,
authTagLength,
level,
key,
useForRTCP);
#else
_engineStatistics.SetLastError(
VE_FUNC_NOT_SUPPORTED, kTraceError,
"EnableSRTPSend() SRTP is not supported");
return -1;
#endif
}
int VoEEncryptionImpl::DisableSRTPSend(int channel)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"DisableSRTPSend(channel=%i)",channel);
#ifdef WEBRTC_SRTP
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"DisableSRTPSend() failed to locate channel");
return -1;
}
return channelPtr->DisableSRTPSend();
#else
_engineStatistics.SetLastError(
VE_FUNC_NOT_SUPPORTED, kTraceError,
"DisableSRTPSend() SRTP is not supported");
return -1;
#endif
}
int VoEEncryptionImpl::EnableSRTPReceive(
int channel,
CipherTypes cipherType,
int cipherKeyLength,
AuthenticationTypes authType,
int authKeyLength,
int authTagLength,
SecurityLevels level,
const unsigned char key[kVoiceEngineMaxSrtpKeyLength],
bool useForRTCP)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"EnableSRTPReceive(channel=%i, cipherType=%i, "
"cipherKeyLength=%i, authType=%i, authKeyLength=%i, "
"authTagLength=%i, level=%i, key=?, useForRTCP=%d)",
channel, cipherType, cipherKeyLength, authType,
authKeyLength, authTagLength, level, useForRTCP);
#ifdef WEBRTC_SRTP
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"EnableSRTPReceive() failed to locate channel");
return -1;
}
return channelPtr->EnableSRTPReceive(cipherType,
cipherKeyLength,
authType,
authKeyLength,
authTagLength,
level,
key,
useForRTCP);
#else
_engineStatistics.SetLastError(
VE_FUNC_NOT_SUPPORTED, kTraceError,
"EnableSRTPReceive() SRTP is not supported");
return -1;
#endif
}
int VoEEncryptionImpl::DisableSRTPReceive(int channel)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"DisableSRTPReceive(channel=%i)", channel);
#ifdef WEBRTC_SRTP
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"DisableSRTPReceive() failed to locate channel");
return -1;
}
return channelPtr->DisableSRTPReceive();
#else
_engineStatistics.SetLastError(
VE_FUNC_NOT_SUPPORTED, kTraceError,
"DisableSRTPReceive() SRTP is not supported");
return -1;
#endif
}
int VoEEncryptionImpl::RegisterExternalEncryption(int channel,
Encryption& encryption)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"RegisterExternalEncryption(channel=%d, encryption=0x%x)",
channel, &encryption);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"RegisterExternalEncryption() failed to locate channel");
return -1;
}
return channelPtr->RegisterExternalEncryption(encryption);
}
int VoEEncryptionImpl::DeRegisterExternalEncryption(int channel)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"DeRegisterExternalEncryption(channel=%d)", channel);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"DeRegisterExternalEncryption() failed to locate channel");
return -1;
}
return channelPtr->DeRegisterExternalEncryption();
}
#endif // #ifdef WEBRTC_VOICE_ENGINE_ENCRYPTION_API
// EOF
} // namespace webrtc

View File

@ -1,70 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_VOE_ENCRYPTION_IMPL_H
#define WEBRTC_VOICE_ENGINE_VOE_ENCRYPTION_IMPL_H
#include "voe_encryption.h"
#include "ref_count.h"
#include "shared_data.h"
namespace webrtc {
class VoEEncryptionImpl : public virtual voe::SharedData,
public VoEEncryption,
public voe::RefCount
{
public:
virtual int Release();
// SRTP
virtual int EnableSRTPSend(
int channel,
CipherTypes cipherType,
int cipherKeyLength,
AuthenticationTypes authType,
int authKeyLength,
int authTagLength,
SecurityLevels level,
const unsigned char key[kVoiceEngineMaxSrtpKeyLength],
bool useForRTCP = false);
virtual int DisableSRTPSend(int channel);
virtual int EnableSRTPReceive(
int channel,
CipherTypes cipherType,
int cipherKeyLength,
AuthenticationTypes authType,
int authKeyLength,
int authTagLength,
SecurityLevels level,
const unsigned char key[kVoiceEngineMaxSrtpKeyLength],
bool useForRTCP = false);
virtual int DisableSRTPReceive(int channel);
// External encryption
virtual int RegisterExternalEncryption(
int channel,
Encryption& encryption);
virtual int DeRegisterExternalEncryption(int channel);
protected:
VoEEncryptionImpl();
virtual ~VoEEncryptionImpl();
};
} // namespace webrtc
#endif // #ifndef WEBRTC_VOICE_ENGINE_VOE_ENCRYPTION_IMPL_H

View File

@ -1,406 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "voe_external_media_impl.h"
#include "channel.h"
#include "critical_section_wrapper.h"
#include "output_mixer.h"
#include "trace.h"
#include "transmit_mixer.h"
#include "voice_engine_impl.h"
#include "voe_errors.h"
namespace webrtc {
VoEExternalMedia* VoEExternalMedia::GetInterface(VoiceEngine* voiceEngine)
{
#ifndef WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API
return NULL;
#else
if (NULL == voiceEngine)
{
return NULL;
}
VoiceEngineImpl* s = reinterpret_cast<VoiceEngineImpl*> (voiceEngine);
VoEExternalMediaImpl* d = s;
(*d)++;
return (d);
#endif
}
#ifdef WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API
VoEExternalMediaImpl::VoEExternalMediaImpl()
: playout_delay_ms_(0)
{
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
"VoEExternalMediaImpl() - ctor");
}
VoEExternalMediaImpl::~VoEExternalMediaImpl()
{
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
"~VoEExternalMediaImpl() - dtor");
}
int VoEExternalMediaImpl::Release()
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"VoEExternalMedia::Release()");
(*this)--;
int refCount = GetCount();
if (refCount < 0)
{
Reset();
_engineStatistics.SetLastError(VE_INTERFACE_NOT_FOUND,
kTraceWarning);
return (-1);
}
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
"VoEExternalMedia reference counter = %d", refCount);
return (refCount);
}
int VoEExternalMediaImpl::RegisterExternalMediaProcessing(
int channel,
ProcessingTypes type,
VoEMediaProcess& processObject)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"RegisterExternalMediaProcessing(channel=%d, type=%d, "
"processObject=0x%x)", channel, type, &processObject);
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
switch (type)
{
case kPlaybackPerChannel:
case kRecordingPerChannel:
{
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"RegisterExternalMediaProcessing() "
"failed to locate channel");
return -1;
}
return channelPtr->RegisterExternalMediaProcessing(type,
processObject);
}
case kPlaybackAllChannelsMixed:
{
return _outputMixerPtr->RegisterExternalMediaProcessing(
processObject);
}
case kRecordingAllChannelsMixed:
{
return _transmitMixerPtr->RegisterExternalMediaProcessing(
processObject);
}
default:
{
_engineStatistics.SetLastError(
VE_INVALID_ARGUMENT, kTraceError,
"RegisterExternalMediaProcessing() invalid process type");
return -1;
}
}
return 0;
}
int VoEExternalMediaImpl::DeRegisterExternalMediaProcessing(
int channel,
ProcessingTypes type)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"DeRegisterExternalMediaProcessing(channel=%d)", channel);
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
switch (type)
{
case kPlaybackPerChannel:
case kRecordingPerChannel:
{
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"RegisterExternalMediaProcessing() "
"failed to locate channel");
return -1;
}
return channelPtr->DeRegisterExternalMediaProcessing(type);
}
case kPlaybackAllChannelsMixed:
{
return _outputMixerPtr->DeRegisterExternalMediaProcessing();
}
case kRecordingAllChannelsMixed:
{
return _transmitMixerPtr->DeRegisterExternalMediaProcessing();
}
default:
{
_engineStatistics.SetLastError(
VE_INVALID_ARGUMENT, kTraceError,
"RegisterExternalMediaProcessing() invalid process type");
return -1;
}
}
}
int VoEExternalMediaImpl::SetExternalRecordingStatus(bool enable)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"SetExternalRecordingStatus(enable=%d)", enable);
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
if (_audioDevicePtr->Recording())
{
_engineStatistics.SetLastError(
VE_ALREADY_SENDING,
kTraceError,
"SetExternalRecordingStatus() cannot set state while sending");
return -1;
}
_externalRecording = enable;
return 0;
#else
_engineStatistics.SetLastError(
VE_FUNC_NOT_SUPPORTED,
kTraceError,
"SetExternalRecordingStatus() external recording is not supported");
return -1;
#endif
}
int VoEExternalMediaImpl::ExternalRecordingInsertData(
const WebRtc_Word16 speechData10ms[],
int lengthSamples,
int samplingFreqHz,
int current_delay_ms)
{
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
"ExternalRecordingInsertData(speechData10ms=0x%x,"
" lengthSamples=%u, samplingFreqHz=%d, current_delay_ms=%d)",
&speechData10ms[0], lengthSamples, samplingFreqHz,
current_delay_ms);
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
if (!_externalRecording)
{
_engineStatistics.SetLastError(
VE_INVALID_OPERATION,
kTraceError,
"ExternalRecordingInsertData() external recording is not enabled");
return -1;
}
if (NumOfSendingChannels() == 0)
{
_engineStatistics.SetLastError(
VE_ALREADY_SENDING,
kTraceError,
"SetExternalRecordingStatus() no channel is sending");
return -1;
}
if ((16000 != samplingFreqHz) && (32000 != samplingFreqHz) &&
(48000 != samplingFreqHz) && (44000 != samplingFreqHz))
{
_engineStatistics.SetLastError(
VE_INVALID_ARGUMENT,
kTraceError,
"SetExternalRecordingStatus() invalid sample rate");
return -1;
}
if ((0 == lengthSamples) ||
((lengthSamples % (samplingFreqHz / 100)) != 0))
{
_engineStatistics.SetLastError(
VE_INVALID_ARGUMENT,
kTraceError,
"SetExternalRecordingStatus() invalid buffer size");
return -1;
}
if (current_delay_ms < 0)
{
_engineStatistics.SetLastError(
VE_INVALID_ARGUMENT,
kTraceError,
"SetExternalRecordingStatus() invalid delay)");
return -1;
}
WebRtc_UWord16 blockSize = samplingFreqHz / 100;
WebRtc_UWord32 nBlocks = lengthSamples / blockSize;
WebRtc_Word16 totalDelayMS = 0;
WebRtc_UWord16 playoutDelayMS = 0;
for (WebRtc_UWord32 i = 0; i < nBlocks; i++)
{
if (!_externalPlayout)
{
// Use real playout delay if external playout is not enabled.
_audioDevicePtr->PlayoutDelay(&playoutDelayMS);
totalDelayMS = current_delay_ms + playoutDelayMS;
}
else
{
// Use stored delay value given the last call
// to ExternalPlayoutGetData.
totalDelayMS = current_delay_ms + playout_delay_ms_;
// Compensate for block sizes larger than 10ms
totalDelayMS -= (WebRtc_Word16)(i*10);
if (totalDelayMS < 0)
totalDelayMS = 0;
}
_transmitMixerPtr->PrepareDemux(
(const WebRtc_Word8*)(&speechData10ms[i*blockSize]),
blockSize,
1,
samplingFreqHz,
totalDelayMS,
0,
0);
_transmitMixerPtr->DemuxAndMix();
_transmitMixerPtr->EncodeAndSend();
}
return 0;
#else
_engineStatistics.SetLastError(
VE_FUNC_NOT_SUPPORTED,
kTraceError,
"ExternalRecordingInsertData() external recording is not supported");
return -1;
#endif
}
int VoEExternalMediaImpl::SetExternalPlayoutStatus(bool enable)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"SetExternalPlayoutStatus(enable=%d)", enable);
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
if (_audioDevicePtr->Playing())
{
_engineStatistics.SetLastError(
VE_ALREADY_SENDING,
kTraceError,
"SetExternalPlayoutStatus() cannot set state while playing");
return -1;
}
_externalPlayout = enable;
return 0;
#else
_engineStatistics.SetLastError(
VE_FUNC_NOT_SUPPORTED,
kTraceError,
"SetExternalPlayoutStatus() external playout is not supported");
return -1;
#endif
}
int VoEExternalMediaImpl::ExternalPlayoutGetData(
WebRtc_Word16 speechData10ms[],
int samplingFreqHz,
int current_delay_ms,
int& lengthSamples)
{
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
"ExternalPlayoutGetData(speechData10ms=0x%x, samplingFreqHz=%d"
", current_delay_ms=%d)", &speechData10ms[0], samplingFreqHz,
current_delay_ms);
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
if (!_externalPlayout)
{
_engineStatistics.SetLastError(
VE_INVALID_OPERATION,
kTraceError,
"ExternalPlayoutGetData() external playout is not enabled");
return -1;
}
if ((16000 != samplingFreqHz) && (32000 != samplingFreqHz) &&
(48000 != samplingFreqHz) && (44000 != samplingFreqHz))
{
_engineStatistics.SetLastError(
VE_INVALID_ARGUMENT,
kTraceError,
"ExternalPlayoutGetData() invalid sample rate");
return -1;
}
if (current_delay_ms < 0)
{
_engineStatistics.SetLastError(
VE_INVALID_ARGUMENT,
kTraceError,
"ExternalPlayoutGetData() invalid delay)");
return -1;
}
AudioFrame audioFrame;
// Retrieve mixed output at the specified rate
_outputMixerPtr->MixActiveChannels();
_outputMixerPtr->DoOperationsOnCombinedSignal();
_outputMixerPtr->GetMixedAudio(samplingFreqHz, 1, audioFrame);
// Deliver audio (PCM) samples to the external sink
memcpy(speechData10ms,
audioFrame._payloadData,
sizeof(WebRtc_Word16)*(audioFrame._payloadDataLengthInSamples));
lengthSamples = audioFrame._payloadDataLengthInSamples;
// Store current playout delay (to be used by ExternalRecordingInsertData).
playout_delay_ms_ = current_delay_ms;
return 0;
#else
_engineStatistics.SetLastError(
VE_FUNC_NOT_SUPPORTED,
kTraceError,
"ExternalPlayoutGetData() external playout is not supported");
return -1;
#endif
}
#endif // WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API
} // namespace webrtc

View File

@ -1,62 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_VOE_EXTERNAL_MEDIA_IMPL_H
#define WEBRTC_VOICE_ENGINE_VOE_EXTERNAL_MEDIA_IMPL_H
#include "voe_external_media.h"
#include "ref_count.h"
#include "shared_data.h"
namespace webrtc {
class VoEExternalMediaImpl : public virtual voe::SharedData,
public VoEExternalMedia,
public voe::RefCount
{
public:
virtual int Release();
virtual int RegisterExternalMediaProcessing(
int channel,
ProcessingTypes type,
VoEMediaProcess& processObject);
virtual int DeRegisterExternalMediaProcessing(
int channel,
ProcessingTypes type);
virtual int SetExternalRecordingStatus(bool enable);
virtual int SetExternalPlayoutStatus(bool enable);
virtual int ExternalRecordingInsertData(
const WebRtc_Word16 speechData10ms[],
int lengthSamples,
int samplingFreqHz,
int current_delay_ms);
virtual int ExternalPlayoutGetData(WebRtc_Word16 speechData10ms[],
int samplingFreqHz,
int current_delay_ms,
int& lengthSamples);
protected:
VoEExternalMediaImpl();
virtual ~VoEExternalMediaImpl();
private:
int playout_delay_ms_;
};
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_VOE_EXTERNAL_MEDIA_IMPL_H

File diff suppressed because it is too large Load Diff

View File

@ -1,143 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_VOE_FILE_IMPL_H
#define WEBRTC_VOICE_ENGINE_VOE_FILE_IMPL_H
#include "voe_file.h"
#include "shared_data.h"
#include "ref_count.h"
namespace webrtc {
class VoEFileImpl : public virtual voe::SharedData,
public VoEFile, public voe::RefCount
{
public:
virtual int Release();
// Playout file locally
virtual int StartPlayingFileLocally(
int channel,
const char fileNameUTF8[1024],
bool loop = false,
FileFormats format = kFileFormatPcm16kHzFile,
float volumeScaling = 1.0,
int startPointMs = 0,
int stopPointMs = 0);
virtual int StartPlayingFileLocally(
int channel,
InStream* stream,
FileFormats format = kFileFormatPcm16kHzFile,
float volumeScaling = 1.0,
int startPointMs = 0, int stopPointMs = 0);
virtual int StopPlayingFileLocally(int channel);
virtual int IsPlayingFileLocally(int channel);
virtual int ScaleLocalFilePlayout(int channel, float scale);
// Use file as microphone input
virtual int StartPlayingFileAsMicrophone(
int channel,
const char fileNameUTF8[1024],
bool loop = false ,
bool mixWithMicrophone = false,
FileFormats format = kFileFormatPcm16kHzFile,
float volumeScaling = 1.0);
virtual int StartPlayingFileAsMicrophone(
int channel,
InStream* stream,
bool mixWithMicrophone = false,
FileFormats format = kFileFormatPcm16kHzFile,
float volumeScaling = 1.0);
virtual int StopPlayingFileAsMicrophone(int channel);
virtual int IsPlayingFileAsMicrophone(int channel);
virtual int ScaleFileAsMicrophonePlayout(int channel, float scale);
// Record speaker signal to file
virtual int StartRecordingPlayout(int channel,
const char* fileNameUTF8,
CodecInst* compression = NULL,
int maxSizeBytes = -1);
virtual int StartRecordingPlayout(int channel,
OutStream* stream,
CodecInst* compression = NULL);
virtual int StopRecordingPlayout(int channel);
// Record microphone signal to file
virtual int StartRecordingMicrophone(const char* fileNameUTF8,
CodecInst* compression = NULL,
int maxSizeBytes = -1);
virtual int StartRecordingMicrophone(OutStream* stream,
CodecInst* compression = NULL);
virtual int StopRecordingMicrophone();
// Conversion between different file formats
virtual int ConvertPCMToWAV(const char* fileNameInUTF8,
const char* fileNameOutUTF8);
virtual int ConvertPCMToWAV(InStream* streamIn,
OutStream* streamOut);
virtual int ConvertWAVToPCM(const char* fileNameInUTF8,
const char* fileNameOutUTF8);
virtual int ConvertWAVToPCM(InStream* streamIn,
OutStream* streamOut);
virtual int ConvertPCMToCompressed(const char* fileNameInUTF8,
const char* fileNameOutUTF8,
CodecInst* compression);
virtual int ConvertPCMToCompressed(InStream* streamIn,
OutStream* streamOut,
CodecInst* compression);
virtual int ConvertCompressedToPCM(const char* fileNameInUTF8,
const char* fileNameOutUTF8);
virtual int ConvertCompressedToPCM(InStream* streamIn,
OutStream* streamOut);
// Misc file functions
virtual int GetFileDuration(
const char* fileNameUTF8,
int& durationMs,
FileFormats format = kFileFormatPcm16kHzFile);
virtual int GetPlaybackPosition(int channel, int& positionMs);
protected:
VoEFileImpl();
virtual ~VoEFileImpl();
};
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_VOE_FILE_IMPL_H

View File

@ -1,820 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "voe_hardware_impl.h"
#include <cassert>
#include "cpu_wrapper.h"
#include "critical_section_wrapper.h"
#include "trace.h"
#include "voe_errors.h"
#include "voice_engine_impl.h"
namespace webrtc
{
VoEHardware* VoEHardware::GetInterface(VoiceEngine* voiceEngine)
{
#ifndef WEBRTC_VOICE_ENGINE_HARDWARE_API
return NULL;
#else
if (NULL == voiceEngine)
{
return NULL;
}
VoiceEngineImpl* s =
reinterpret_cast<VoiceEngineImpl*> (voiceEngine);
VoEHardwareImpl* d = s;
(*d)++;
return (d);
#endif
}
#ifdef WEBRTC_VOICE_ENGINE_HARDWARE_API
VoEHardwareImpl::VoEHardwareImpl() :
_cpu(NULL)
{
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
"VoEHardwareImpl() - ctor");
_cpu = CpuWrapper::CreateCpu();
if (_cpu)
{
_cpu->CpuUsage(); // init cpu usage
}
}
VoEHardwareImpl::~VoEHardwareImpl()
{
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
"~VoEHardwareImpl() - dtor");
if (_cpu)
{
delete _cpu;
_cpu = NULL;
}
}
int VoEHardwareImpl::Release()
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"VoEHardwareImpl::Release()");
(*this)--;
int refCount = GetCount();
if (refCount < 0)
{
Reset();
_engineStatistics.SetLastError(VE_INTERFACE_NOT_FOUND,
kTraceWarning);
return (-1);
}
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
"VoEHardwareImpl reference counter = %d", refCount);
return (refCount);
}
int VoEHardwareImpl::SetAudioDeviceLayer(AudioLayers audioLayer)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"SetAudioDeviceLayer(audioLayer=%d)", audioLayer);
// Don't allow a change if VoE is initialized
if (_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_ALREADY_INITED, kTraceError);
return -1;
}
// Map to AudioDeviceModule::AudioLayer
AudioDeviceModule::AudioLayer
wantedLayer(AudioDeviceModule::kPlatformDefaultAudio);
switch (audioLayer)
{
case kAudioPlatformDefault:
// already set above
break;
case kAudioWindowsCore:
wantedLayer = AudioDeviceModule::kWindowsCoreAudio;
break;
case kAudioWindowsWave:
wantedLayer = AudioDeviceModule::kWindowsWaveAudio;
break;
case kAudioLinuxAlsa:
wantedLayer = AudioDeviceModule::kLinuxAlsaAudio;
break;
case kAudioLinuxPulse:
wantedLayer = AudioDeviceModule::kLinuxPulseAudio;
break;
default:
_engineStatistics.SetLastError(VE_INVALID_ARGUMENT, kTraceError,
" invalid audio layer");
return -1;
}
// Save the audio device layer for Init()
_audioDeviceLayer = wantedLayer;
return 0;
}
int VoEHardwareImpl::GetAudioDeviceLayer(AudioLayers& audioLayer)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"GetAudioDeviceLayer(devices=?)");
// Can always be called regardless of VoE state
AudioDeviceModule::AudioLayer
activeLayer(AudioDeviceModule::kPlatformDefaultAudio);
if (_audioDevicePtr)
{
// Get active audio layer from ADM
if (_audioDevicePtr->ActiveAudioLayer(&activeLayer) != 0)
{
_engineStatistics.SetLastError(VE_UNDEFINED_SC_ERR, kTraceError,
" Audio Device error");
return -1;
}
}
else
{
// Return VoE's internal layer setting
activeLayer = _audioDeviceLayer;
}
// Map to AudioLayers
switch (activeLayer)
{
case AudioDeviceModule::kPlatformDefaultAudio:
audioLayer = kAudioPlatformDefault;
break;
case AudioDeviceModule::kWindowsCoreAudio:
audioLayer = kAudioWindowsCore;
break;
case AudioDeviceModule::kWindowsWaveAudio:
audioLayer = kAudioWindowsWave;
break;
case AudioDeviceModule::kLinuxAlsaAudio:
audioLayer = kAudioLinuxAlsa;
break;
case AudioDeviceModule::kLinuxPulseAudio:
audioLayer = kAudioLinuxPulse;
break;
default:
_engineStatistics.SetLastError(VE_UNDEFINED_SC_ERR, kTraceError,
" unknown audio layer");
}
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
" Output: audioLayer=%d", audioLayer);
return 0;
}
int VoEHardwareImpl::GetNumOfRecordingDevices(int& devices)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"GetNumOfRecordingDevices(devices=?)");
ANDROID_NOT_SUPPORTED(); IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
devices = static_cast<int> (_audioDevicePtr->RecordingDevices());
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
" Output: devices=%d", devices);
return 0;
}
int VoEHardwareImpl::GetNumOfPlayoutDevices(int& devices)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"GetNumOfPlayoutDevices(devices=?)");
ANDROID_NOT_SUPPORTED(); IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
devices = static_cast<int> (_audioDevicePtr->PlayoutDevices());
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
" Output: devices=%d", devices);
return 0;
}
int VoEHardwareImpl::GetRecordingDeviceName(int index,
char strNameUTF8[128],
char strGuidUTF8[128])
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"GetRecordingDeviceName(index=%d)", index);
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
if (strNameUTF8 == NULL)
{
_engineStatistics.SetLastError(
VE_INVALID_ARGUMENT, kTraceError,
"GetRecordingDeviceName() invalid argument");
return -1;
}
// Note that strGuidUTF8 is allowed to be NULL
// Init len variable to length of supplied vectors
const WebRtc_UWord16 strLen = 128;
// Check if length has been changed in module
assert(strLen == kAdmMaxDeviceNameSize);
assert(strLen == kAdmMaxGuidSize);
WebRtc_Word8 name[strLen];
WebRtc_Word8 guid[strLen];
// Get names from module
if (_audioDevicePtr->RecordingDeviceName(index, name, guid) != 0)
{
_engineStatistics.SetLastError(
VE_CANNOT_RETRIEVE_DEVICE_NAME, kTraceError,
"GetRecordingDeviceName() failed to get device name");
return -1;
}
// Copy to vectors supplied by user
strncpy(strNameUTF8, name, strLen);
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
" Output: strNameUTF8=%s", strNameUTF8);
if (strGuidUTF8 != NULL)
{
strncpy(strGuidUTF8, name, strLen);
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
" Output: strGuidUTF8=%s", strGuidUTF8);
}
return 0;
}
int VoEHardwareImpl::GetPlayoutDeviceName(int index,
char strNameUTF8[128],
char strGuidUTF8[128])
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"GetPlayoutDeviceName(index=%d)", index);
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
if (strNameUTF8 == NULL)
{
_engineStatistics.SetLastError(
VE_INVALID_ARGUMENT, kTraceError,
"GetPlayoutDeviceName() invalid argument");
return -1;
}
// Note that strGuidUTF8 is allowed to be NULL
// Init len variable to length of supplied vectors
const WebRtc_UWord16 strLen = 128;
// Check if length has been changed in module
assert(strLen == kAdmMaxDeviceNameSize);
assert(strLen == kAdmMaxGuidSize);
WebRtc_Word8 name[strLen];
WebRtc_Word8 guid[strLen];
// Get names from module
if (_audioDevicePtr->PlayoutDeviceName(index, name, guid) != 0)
{
_engineStatistics.SetLastError(
VE_CANNOT_RETRIEVE_DEVICE_NAME, kTraceError,
"GetPlayoutDeviceName() failed to get device name");
return -1;
}
// Copy to vectors supplied by user
strncpy(strNameUTF8, name, strLen);
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
" Output: strNameUTF8=%s", strNameUTF8);
if (strGuidUTF8 != NULL)
{
strncpy(strGuidUTF8, name, strLen);
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
" Output: strGuidUTF8=%s", strGuidUTF8);
}
return 0;
}
int VoEHardwareImpl::SetRecordingDevice(int index,
StereoChannel recordingChannel)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"SetRecordingDevice(index=%d, recordingChannel=%d)",
index, (int) recordingChannel);
CriticalSectionScoped cs(*_apiCritPtr);
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
bool isRecording(false);
// Store state about activated recording to be able to restore it after the
// recording device has been modified.
if (_audioDevicePtr->Recording())
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
"SetRecordingDevice() device is modified while recording"
" is active...");
isRecording = true;
if (_audioDevicePtr->StopRecording() == -1)
{
_engineStatistics.SetLastError(
VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
"SetRecordingDevice() unable to stop recording");
return -1;
}
}
// We let the module do the index sanity
// Set recording channel
AudioDeviceModule::ChannelType recCh =
AudioDeviceModule::kChannelBoth;
switch (recordingChannel)
{
case kStereoLeft:
recCh = AudioDeviceModule::kChannelLeft;
break;
case kStereoRight:
recCh = AudioDeviceModule::kChannelRight;
break;
case kStereoBoth:
// default setting kChannelBoth (<=> mono)
break;
default:
_engineStatistics.SetLastError(
VE_INVALID_ARGUMENT, kTraceError,
"SetRecordingDevice() unknown recording channel");
return -1;
}
// Cannot return error because of sanity above
_audioDevicePtr->RecordingChannel(&recCh);
// Map indices to unsigned since underlying functions need that
WebRtc_UWord16 indexU = static_cast<WebRtc_UWord16> (index);
WebRtc_Word32 res(0);
if (index == -1)
{
res = _audioDevicePtr->SetRecordingDevice(
AudioDeviceModule::kDefaultCommunicationDevice);
}
else if (index == -2)
{
res = _audioDevicePtr->SetRecordingDevice(
AudioDeviceModule::kDefaultDevice);
}
else
{
res = _audioDevicePtr->SetRecordingDevice(indexU);
}
if (res != 0)
{
_engineStatistics.SetLastError(
VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
"SetRecordingDevice() unable to set the recording device");
return -1;
}
// Init microphone, so user can do volume settings etc
if (_audioDevicePtr->InitMicrophone() == -1)
{
_engineStatistics.SetLastError(
VE_CANNOT_ACCESS_MIC_VOL, kTraceWarning,
"SetRecordingDevice() cannot access microphone");
}
// Set number of channels
bool available(false);
_audioDevicePtr->StereoRecordingIsAvailable(&available);
if (_audioDevicePtr->SetStereoRecording(available ? true : false) != 0)
{
_engineStatistics.SetLastError(
VE_SOUNDCARD_ERROR, kTraceWarning,
"SetRecordingDevice() failed to set mono recording mode");
}
// Restore recording if it was enabled already when calling this function.
if (isRecording)
{
if (!_externalRecording)
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
"SetRecordingDevice() recording is now being "
"restored...");
if (_audioDevicePtr->InitRecording() != 0)
{
WEBRTC_TRACE(kTraceError, kTraceVoice,
VoEId(_instanceId, -1),
"SetRecordingDevice() failed to initialize "
"recording");
return -1;
}
if (_audioDevicePtr->StartRecording() != 0)
{
WEBRTC_TRACE(kTraceError, kTraceVoice,
VoEId(_instanceId, -1),
"SetRecordingDevice() failed to start recording");
return -1;
}
}
}
return 0;
}
int VoEHardwareImpl::SetPlayoutDevice(int index)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"SetPlayoutDevice(index=%d)", index);
CriticalSectionScoped cs(*_apiCritPtr);
ANDROID_NOT_SUPPORTED(); IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
bool isPlaying(false);
// Store state about activated playout to be able to restore it after the
// playout device has been modified.
if (_audioDevicePtr->Playing())
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
"SetPlayoutDevice() device is modified while playout is "
"active...");
isPlaying = true;
if (_audioDevicePtr->StopPlayout() == -1)
{
_engineStatistics.SetLastError(
VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
"SetPlayoutDevice() unable to stop playout");
return -1;
}
}
// We let the module do the index sanity
// Map indices to unsigned since underlying functions need that
WebRtc_UWord16 indexU = static_cast<WebRtc_UWord16> (index);
WebRtc_Word32 res(0);
if (index == -1)
{
res = _audioDevicePtr->SetPlayoutDevice(
AudioDeviceModule::kDefaultCommunicationDevice);
}
else if (index == -2)
{
res = _audioDevicePtr->SetPlayoutDevice(
AudioDeviceModule::kDefaultDevice);
}
else
{
res = _audioDevicePtr->SetPlayoutDevice(indexU);
}
if (res != 0)
{
_engineStatistics.SetLastError(
VE_SOUNDCARD_ERROR, kTraceError,
"SetPlayoutDevice() unable to set the playout device");
return -1;
}
// Init speaker, so user can do volume settings etc
if (_audioDevicePtr->InitSpeaker() == -1)
{
_engineStatistics.SetLastError(
VE_CANNOT_ACCESS_SPEAKER_VOL, kTraceWarning,
"SetPlayoutDevice() cannot access speaker");
}
// Set number of channels
bool available(false);
_audioDevicePtr->StereoPlayoutIsAvailable(&available);
if (_audioDevicePtr->SetStereoPlayout(available ? true : false) != 0)
{
_engineStatistics.SetLastError(
VE_SOUNDCARD_ERROR, kTraceWarning,
"SetPlayoutDevice() failed to set stereo playout mode");
}
// Restore playout if it was enabled already when calling this function.
if (isPlaying)
{
if (!_externalPlayout)
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
"SetPlayoutDevice() playout is now being restored...");
if (_audioDevicePtr->InitPlayout() != 0)
{
WEBRTC_TRACE(kTraceError, kTraceVoice,
VoEId(_instanceId, -1),
"SetPlayoutDevice() failed to initialize playout");
return -1;
}
if (_audioDevicePtr->StartPlayout() != 0)
{
WEBRTC_TRACE(kTraceError, kTraceVoice,
VoEId(_instanceId, -1),
"SetPlayoutDevice() failed to start playout");
return -1;
}
}
}
return 0;
}
int VoEHardwareImpl::GetRecordingDeviceStatus(bool& isAvailable)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"GetRecordingDeviceStatus()");
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
// We let the module do isRecording sanity
bool available(false);
// Check availability
if (_audioDevicePtr->RecordingIsAvailable(&available) != 0)
{
_engineStatistics.SetLastError(VE_UNDEFINED_SC_REC_ERR, kTraceError,
" Audio Device error");
return -1;
}
isAvailable = available;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
" Output: isAvailable = %d)", (int) isAvailable);
return 0;
}
int VoEHardwareImpl::GetPlayoutDeviceStatus(bool& isAvailable)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"GetPlayoutDeviceStatus()");
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
// We let the module do isPlaying sanity
bool available(false);
// Check availability
if (_audioDevicePtr->PlayoutIsAvailable(&available) != 0)
{
_engineStatistics.SetLastError(VE_PLAY_UNDEFINED_SC_ERR,
kTraceError, " Audio Device error");
return -1;
}
isAvailable = available;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
" Output: isAvailable = %d)", (int) isAvailable);
return 0;
}
int VoEHardwareImpl::ResetAudioDevice()
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"ResetAudioDevice()");
ANDROID_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
#if defined(MAC_IPHONE)
if (_audioDevicePtr->ResetAudioDevice() < 0)
{
_engineStatistics.SetLastError(VE_SOUNDCARD_ERROR, kTraceError,
" Failed to reset sound device");
return -1;
}
#else
_engineStatistics.SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
" no support for resetting sound device");
return -1;
#endif
return 0;
}
int VoEHardwareImpl::AudioDeviceControl(unsigned int par1, unsigned int par2,
unsigned int par3)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"AudioDeviceControl(%i, %i, %i)", par1, par2, par3);
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
_engineStatistics.SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
" no support for resetting sound device");
return -1;
}
int VoEHardwareImpl::SetLoudspeakerStatus(bool enable)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"SetLoudspeakerStatus(enable=%i)", (int) enable);
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
#if defined(ANDROID)
if (_audioDevicePtr->SetLoudspeakerStatus(enable) < 0)
{
_engineStatistics.SetLastError(VE_IGNORED_FUNCTION, kTraceError,
" Failed to set loudspeaker status");
return -1;
}
return 0;
#else
_engineStatistics.SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
" no support for setting loudspeaker"
" status");
return -1;
#endif
}
int VoEHardwareImpl::GetLoudspeakerStatus(bool& enabled)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"GetLoudspeakerStatus()");
IPHONE_NOT_SUPPORTED();
#if defined(ANDROID)
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
if (_audioDevicePtr->GetLoudspeakerStatus(&enabled) < 0)
{
_engineStatistics.SetLastError(VE_IGNORED_FUNCTION, kTraceError,
" Failed to get loudspeaker status");
return -1;
}
return 0;
#else
_engineStatistics.SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
" no support for setting loudspeaker "
"status");
return -1;
#endif
}
int VoEHardwareImpl::GetCPULoad(int& loadPercent)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"GetCPULoad()");
ANDROID_NOT_SUPPORTED(); IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
// Get CPU load from ADM
WebRtc_UWord16 load(0);
if (_audioDevicePtr->CPULoad(&load) != 0)
{
_engineStatistics.SetLastError(VE_CPU_INFO_ERROR, kTraceError,
" error getting system CPU load");
return -1;
}
loadPercent = static_cast<int> (load);
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
" Output: loadPercent = %d", loadPercent);
return 0;
}
int VoEHardwareImpl::GetSystemCPULoad(int& loadPercent)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"GetSystemCPULoad(loadPercent=?)");
ANDROID_NOT_SUPPORTED(); IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
// Check if implemented for this platform
if (!_cpu)
{
_engineStatistics.SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
" no support for getting system CPU "
"load");
return -1;
}
// Get CPU load
WebRtc_Word32 load = _cpu->CpuUsage();
if (load < 0)
{
_engineStatistics.SetLastError(VE_CPU_INFO_ERROR, kTraceError,
" error getting system CPU load");
return -1;
}
loadPercent = static_cast<int> (load);
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
" Output: loadPercent = %d", loadPercent);
return 0;
}
#endif // WEBRTC_VOICE_ENGINE_HARDWARE_API
} // namespace webrtc

View File

@ -1,80 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_VOE_HARDWARE_IMPL_H
#define WEBRTC_VOICE_ENGINE_VOE_HARDWARE_IMPL_H
#include "voe_hardware.h"
#include "ref_count.h"
#include "shared_data.h"
namespace webrtc
{
class CpuWrapper;
class VoEHardwareImpl: public virtual voe::SharedData,
public VoEHardware,
public voe::RefCount
{
public:
virtual int Release();
virtual int GetNumOfRecordingDevices(int& devices);
virtual int GetNumOfPlayoutDevices(int& devices);
virtual int GetRecordingDeviceName(int index,
char strNameUTF8[128],
char strGuidUTF8[128]);
virtual int GetPlayoutDeviceName(int index,
char strNameUTF8[128],
char strGuidUTF8[128]);
virtual int GetRecordingDeviceStatus(bool& isAvailable);
virtual int GetPlayoutDeviceStatus(bool& isAvailable);
virtual int SetRecordingDevice(
int index,
StereoChannel recordingChannel = kStereoBoth);
virtual int SetPlayoutDevice(int index);
virtual int SetAudioDeviceLayer(AudioLayers audioLayer);
virtual int GetAudioDeviceLayer(AudioLayers& audioLayer);
virtual int GetCPULoad(int& loadPercent);
virtual int GetSystemCPULoad(int& loadPercent);
virtual int ResetAudioDevice();
virtual int AudioDeviceControl(unsigned int par1,
unsigned int par2,
unsigned int par3);
virtual int SetLoudspeakerStatus(bool enable);
virtual int GetLoudspeakerStatus(bool& enabled);
protected:
VoEHardwareImpl();
virtual ~VoEHardwareImpl();
private:
CpuWrapper* _cpu;
};
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_VOE_HARDWARE_IMPL_H

View File

@ -1,178 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "voe_neteq_stats_impl.h"
#include "audio_coding_module.h"
#include "channel.h"
#include "critical_section_wrapper.h"
#include "trace.h"
#include "voe_errors.h"
#include "voice_engine_impl.h"
namespace webrtc {
VoENetEqStats* VoENetEqStats::GetInterface(VoiceEngine* voiceEngine)
{
#ifndef WEBRTC_VOICE_ENGINE_NETEQ_STATS_API
return NULL;
#else
if (NULL == voiceEngine)
{
return NULL;
}
VoiceEngineImpl* s =
reinterpret_cast<VoiceEngineImpl*> (voiceEngine);
VoENetEqStatsImpl* d = s;
(*d)++;
return (d);
#endif
}
#ifdef WEBRTC_VOICE_ENGINE_NETEQ_STATS_API
VoENetEqStatsImpl::VoENetEqStatsImpl()
{
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
"VoENetEqStatsImpl::VoENetEqStatsImpl() - ctor");
}
VoENetEqStatsImpl::~VoENetEqStatsImpl()
{
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
"VoENetEqStatsImpl::~VoENetEqStatsImpl() - dtor");
}
int VoENetEqStatsImpl::Release()
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"VoENetEqStats::Release()");
(*this)--;
int refCount = GetCount();
if (refCount < 0)
{
Reset(); // reset reference counter to zero => OK to delete VE
_engineStatistics.SetLastError(
VE_INTERFACE_NOT_FOUND, kTraceWarning);
return (-1);
}
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
"VoENetEqStats reference counter = %d", refCount);
return (refCount);
}
int VoENetEqStatsImpl::GetNetworkStatistics(int channel,
NetworkStatistics& stats)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetNetworkStatistics(channel=%d, stats=?)", channel);
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"GetNetworkStatistics() failed to locate channel");
return -1;
}
return channelPtr->GetNetworkStatistics(stats);
}
int VoENetEqStatsImpl::GetJitterStatistics(int channel,
JitterStatistics& stats)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetJitterStatistics(channel=%i)", channel);
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"GetJitterStatistics() failed to locate channel");
return -1;
}
return channelPtr->GetJitterStatistics(stats);
}
int VoENetEqStatsImpl::GetPreferredBufferSize(
int channel,
unsigned short& preferredBufferSize)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetPreferredBufferSize(channel=%i, ?)", channel);
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"GetPreferredBufferSize() failed to locate channel");
return -1;
}
return channelPtr->GetPreferredBufferSize(preferredBufferSize);
}
int VoENetEqStatsImpl::ResetJitterStatistics(int channel)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"ResetJitterStatistics(channel=%i)", channel);
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"ResetJitterStatistics() failed to locate channel");
return -1;
}
return channelPtr->ResetJitterStatistics();
}
#endif // #ifdef WEBRTC_VOICE_ENGINE_NETEQ_STATS_API
} // namespace webrtc

View File

@ -1,46 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_VOE_NETEQ_STATS_IMPL_H
#define WEBRTC_VOICE_ENGINE_VOE_NETEQ_STATS_IMPL_H
#include "voe_neteq_stats.h"
#include "ref_count.h"
#include "shared_data.h"
namespace webrtc {
class VoENetEqStatsImpl : public virtual voe::SharedData,
public VoENetEqStats,
public voe::RefCount
{
public:
virtual int Release();
virtual int GetNetworkStatistics(int channel,
NetworkStatistics& stats);
virtual int GetJitterStatistics(int channel,
JitterStatistics& stats);
virtual int GetPreferredBufferSize(int channel,
unsigned short& preferredBufferSize);
virtual int ResetJitterStatistics(int channel);
protected:
VoENetEqStatsImpl();
virtual ~VoENetEqStatsImpl();
};
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_VOE_NETEQ_STATS_IMPL_H

View File

@ -1,944 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "voe_network_impl.h"
#include "channel.h"
#include "critical_section_wrapper.h"
#include "trace.h"
#include "voe_errors.h"
#include "voice_engine_impl.h"
namespace webrtc
{
VoENetwork* VoENetwork::GetInterface(VoiceEngine* voiceEngine)
{
#ifndef WEBRTC_VOICE_ENGINE_NETWORK_API
return NULL;
#else
if (NULL == voiceEngine)
{
return NULL;
}
VoiceEngineImpl* s =
reinterpret_cast<VoiceEngineImpl*> (voiceEngine);
VoENetworkImpl* d = s;
(*d)++;
return (d);
#endif
}
#ifdef WEBRTC_VOICE_ENGINE_NETWORK_API
VoENetworkImpl::VoENetworkImpl()
{
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
"VoENetworkImpl() - ctor");
}
VoENetworkImpl::~VoENetworkImpl()
{
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
"~VoENetworkImpl() - dtor");
}
int VoENetworkImpl::Release()
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"VoENetworkImpl::Release()");
(*this)--;
int refCount = GetCount();
if (refCount < 0)
{
Reset();
_engineStatistics.SetLastError(VE_INTERFACE_NOT_FOUND,
kTraceWarning);
return (-1);
}
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
"VoENetworkImpl reference counter = %d", refCount);
return (refCount);
}
int VoENetworkImpl::RegisterExternalTransport(int channel,
Transport& transport)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"SetExternalTransport(channel=%d, transport=0x%x)",
channel, &transport);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"SetExternalTransport() failed to locate channel");
return -1;
}
return channelPtr->RegisterExternalTransport(transport);
}
int VoENetworkImpl::DeRegisterExternalTransport(int channel)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"DeRegisterExternalTransport(channel=%d)", channel);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"DeRegisterExternalTransport() failed to locate channel");
return -1;
}
return channelPtr->DeRegisterExternalTransport();
}
int VoENetworkImpl::ReceivedRTPPacket(int channel,
const void* data,
unsigned int length)
{
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
"ReceivedRTPPacket(channel=%d, length=%u)", channel, length);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
if ((length < 12) || (length > 807))
{
_engineStatistics.SetLastError(
VE_INVALID_PACKET, kTraceError,
"ReceivedRTPPacket() invalid packet length");
return -1;
}
if (NULL == data)
{
_engineStatistics.SetLastError(
VE_INVALID_ARGUMENT, kTraceError,
"ReceivedRTPPacket() invalid data vector");
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"ReceivedRTPPacket() failed to locate channel");
return -1;
}
if (!channelPtr->ExternalTransport())
{
_engineStatistics.SetLastError(
VE_INVALID_OPERATION, kTraceError,
"ReceivedRTPPacket() external transport is not enabled");
return -1;
}
return channelPtr->ReceivedRTPPacket((const WebRtc_Word8*) data, length);
}
int VoENetworkImpl::ReceivedRTCPPacket(int channel, const void* data,
unsigned int length)
{
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
"ReceivedRTCPPacket(channel=%d, length=%u)", channel, length);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
if (length < 4)
{
_engineStatistics.SetLastError(
VE_INVALID_PACKET, kTraceError,
"ReceivedRTCPPacket() invalid packet length");
return -1;
}
if (NULL == data)
{
_engineStatistics.SetLastError(
VE_INVALID_ARGUMENT, kTraceError,
"ReceivedRTCPPacket() invalid data vector");
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"ReceivedRTCPPacket() failed to locate channel");
return -1;
}
if (!channelPtr->ExternalTransport())
{
_engineStatistics.SetLastError(
VE_INVALID_OPERATION, kTraceError,
"ReceivedRTCPPacket() external transport is not enabled");
return -1;
}
return channelPtr->ReceivedRTCPPacket((const WebRtc_Word8*) data, length);
}
int VoENetworkImpl::GetSourceInfo(int channel,
int& rtpPort,
int& rtcpPort,
char ipAddr[64])
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"GetSourceInfo(channel=%d, rtpPort=?, rtcpPort=?, ipAddr[]=?)",
channel);
#ifndef WEBRTC_EXTERNAL_TRANSPORT
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
if (NULL == ipAddr)
{
_engineStatistics.SetLastError(
VE_INVALID_ARGUMENT, kTraceError,
"GetSourceInfo() invalid IP-address buffer");
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"GetSourceInfo() failed to locate channel");
return -1;
}
if (channelPtr->ExternalTransport())
{
_engineStatistics.SetLastError(
VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
"GetSourceInfo() external transport is enabled");
return -1;
}
return channelPtr->GetSourceInfo(rtpPort, rtcpPort, ipAddr);
#else
_engineStatistics.SetLastError(
VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
"GetSourceInfo() VoE is built for external transport");
return -1;
#endif
}
int VoENetworkImpl::GetLocalIP(char ipAddr[64], bool ipv6)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"GetLocalIP(ipAddr[]=?, ipv6=%d)", ipv6);
IPHONE_NOT_SUPPORTED();
#ifndef WEBRTC_EXTERNAL_TRANSPORT
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
if (NULL == ipAddr)
{
_engineStatistics.SetLastError(
VE_INVALID_ARGUMENT, kTraceError,
"GetLocalIP() invalid IP-address buffer");
return -1;
}
// Create a temporary socket module to ensure that this method can be
// called also when no channels are created.
WebRtc_UWord8 numSockThreads(1);
UdpTransport* socketPtr =
UdpTransport::Create(
-1,
numSockThreads);
if (NULL == socketPtr)
{
_engineStatistics.SetLastError(
VE_SOCKET_TRANSPORT_MODULE_ERROR, kTraceError,
"GetLocalIP() failed to create socket module");
return -1;
}
WebRtc_Word8 localIPAddr[64];
if (ipv6)
{
WebRtc_UWord8 localIP[16];
if (socketPtr->LocalHostAddressIPV6(localIP) != 0)
{
_engineStatistics.SetLastError(
VE_INVALID_IP_ADDRESS, kTraceError,
"GetLocalIP() failed to retrieve local IP - 1");
return -1;
}
// Convert 128-bit address to character string (a:b:c:d:e:f:g:h)
sprintf(localIPAddr,
"%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x"
"%.2x:%.2x%.2x",
localIP[0], localIP[1], localIP[2], localIP[3], localIP[4],
localIP[5], localIP[6], localIP[7], localIP[8], localIP[9],
localIP[10], localIP[11], localIP[12], localIP[13],
localIP[14], localIP[15]);
}
else
{
WebRtc_UWord32 localIP(0);
// Read local IP (as 32-bit address) from the socket module
if (socketPtr->LocalHostAddress(localIP) != 0)
{
_engineStatistics.SetLastError(
VE_INVALID_IP_ADDRESS, kTraceError,
"GetLocalIP() failed to retrieve local IP - 2");
return -1;
}
// Convert 32-bit address to character string (x.y.z.w)
sprintf(localIPAddr, "%d.%d.%d.%d", (int) ((localIP >> 24) & 0x0ff),
(int) ((localIP >> 16) & 0x0ff),
(int) ((localIP >> 8) & 0x0ff),
(int) (localIP & 0x0ff));
}
strcpy(ipAddr, localIPAddr);
UdpTransport::Destroy(socketPtr);
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
"GetLocalIP() => ipAddr=%s", ipAddr);
return 0;
#else
_engineStatistics.SetLastError(
VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
"GetLocalIP() VoE is built for external transport");
return -1;
#endif
}
int VoENetworkImpl::EnableIPv6(int channel)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"EnableIPv6(channel=%d)", channel);
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
#ifndef WEBRTC_EXTERNAL_TRANSPORT
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"EnableIPv6() failed to locate channel");
return -1;
}
if (channelPtr->ExternalTransport())
{
_engineStatistics.SetLastError(
VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
"EnableIPv6() external transport is enabled");
return -1;
}
return channelPtr->EnableIPv6();
#else
_engineStatistics.SetLastError(
VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
"EnableIPv6() VoE is built for external transport");
return -1;
#endif
}
bool VoENetworkImpl::IPv6IsEnabled(int channel)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"IPv6IsEnabled(channel=%d)", channel);
#ifndef WEBRTC_EXTERNAL_TRANSPORT
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return false;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"IPv6IsEnabled() failed to locate channel");
return false;
}
if (channelPtr->ExternalTransport())
{
_engineStatistics.SetLastError(
VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
"IPv6IsEnabled() external transport is enabled");
return false;
}
return channelPtr->IPv6IsEnabled();
#else
_engineStatistics.SetLastError(
VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
"IPv6IsEnabled() VoE is built for external transport");
return false;
#endif
}
int VoENetworkImpl::SetSourceFilter(int channel,
int rtpPort,
int rtcpPort,
const char ipAddr[64])
{
(ipAddr == NULL) ? WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
VoEId(_instanceId, -1),
"SetSourceFilter(channel=%d, rtpPort=%d,"
" rtcpPort=%d)",
channel, rtpPort, rtcpPort)
: WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
VoEId(_instanceId, -1),
"SetSourceFilter(channel=%d, rtpPort=%d,"
" rtcpPort=%d, ipAddr=%s)",
channel, rtpPort, rtcpPort, ipAddr);
#ifndef WEBRTC_EXTERNAL_TRANSPORT
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
if ((rtpPort < 0) || (rtpPort > 65535))
{
_engineStatistics.SetLastError(
VE_INVALID_PORT_NMBR, kTraceError,
"SetSourceFilter() invalid RTP port");
return -1;
}
if ((rtcpPort < 0) || (rtcpPort > 65535))
{
_engineStatistics.SetLastError(
VE_INVALID_PORT_NMBR, kTraceError,
"SetSourceFilter() invalid RTCP port");
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"SetSourceFilter() failed to locate channel");
return -1;
}
if (channelPtr->ExternalTransport())
{
_engineStatistics.SetLastError(
VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
"SetSourceFilter() external transport is enabled");
return -1;
}
return channelPtr->SetSourceFilter(rtpPort, rtcpPort, ipAddr);
#else
_engineStatistics.SetLastError(
VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
"SetSourceFilter() VoE is built for external transport");
return -1;
#endif
}
int VoENetworkImpl::GetSourceFilter(int channel,
int& rtpPort,
int& rtcpPort,
char ipAddr[64])
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"GetSourceFilter(channel=%d, rtpPort=?, rtcpPort=?, "
"ipAddr[]=?)",
channel);
#ifndef WEBRTC_EXTERNAL_TRANSPORT
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
if (NULL == ipAddr)
{
_engineStatistics.SetLastError(
VE_INVALID_ARGUMENT, kTraceError,
"GetSourceFilter() invalid IP-address buffer");
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"GetSourceFilter() failed to locate channel");
return -1;
}
if (channelPtr->ExternalTransport())
{
_engineStatistics.SetLastError(
VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
"GetSourceFilter() external transport is enabled");
return -1;
}
return channelPtr->GetSourceFilter(rtpPort, rtcpPort, ipAddr);
#else
_engineStatistics.SetLastError(
VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
"GetSourceFilter() VoE is built for external transport");
return -1;
#endif
}
int VoENetworkImpl::SetSendTOS(int channel,
int DSCP,
int priority,
bool useSetSockopt)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"SetSendTOS(channel=%d, DSCP=%d, useSetSockopt=%d)",
channel, DSCP, useSetSockopt);
#if !defined(_WIN32) && !defined(WEBRTC_LINUX) && !defined(WEBRTC_MAC)
_engineStatistics.SetLastError(
VE_FUNC_NOT_SUPPORTED, kTraceWarning,
"SetSendTOS() is not supported on this platform");
return -1;
#endif
#ifndef WEBRTC_EXTERNAL_TRANSPORT
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
if ((DSCP < 0) || (DSCP > 63))
{
_engineStatistics.SetLastError(VE_INVALID_ARGUMENT, kTraceError,
"SetSendTOS() Invalid DSCP value");
return -1;
}
#if defined(_WIN32) || defined(WEBRTC_LINUX)
if ((priority < -1) || (priority > 7))
{
_engineStatistics.SetLastError(VE_INVALID_ARGUMENT, kTraceError,
"SetSendTOS() Invalid priority value");
return -1;
}
#else
if (-1 != priority)
{
_engineStatistics.SetLastError(VE_INVALID_ARGUMENT, kTraceError,
"SetSendTOS() priority not supported");
return -1;
}
#endif
#if defined(_WIN32)
if ((priority >= 0) && useSetSockopt)
{
// On Windows, priority and useSetSockopt cannot be combined
_engineStatistics.SetLastError(
VE_INVALID_ARGUMENT, kTraceError,
"SetSendTOS() priority and useSetSockopt conflict");
return -1;
}
#endif
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"SetSendTOS() failed to locate channel");
return -1;
}
if (channelPtr->ExternalTransport())
{
_engineStatistics.SetLastError(
VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
"SetSendTOS() external transport is enabled");
return -1;
}
#if defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
useSetSockopt = true;
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
" force useSetSockopt=true since there is no alternative"
" implementation");
#endif
return channelPtr->SetSendTOS(DSCP, priority, useSetSockopt);
#else
_engineStatistics.SetLastError(
VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
"SetSendTOS() VoE is built for external transport");
return -1;
#endif
}
int VoENetworkImpl::GetSendTOS(int channel,
int& DSCP,
int& priority,
bool& useSetSockopt)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"GetSendTOS(channel=%d)", channel);
#if !defined(_WIN32) && !defined(WEBRTC_LINUX) && !defined(WEBRTC_MAC)
_engineStatistics.SetLastError(
VE_FUNC_NOT_SUPPORTED, kTraceWarning,
"GetSendTOS() is not supported on this platform");
return -1;
#endif
#ifndef WEBRTC_EXTERNAL_TRANSPORT
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"GetSendTOS() failed to locate channel");
return -1;
}
if (channelPtr->ExternalTransport())
{
_engineStatistics.SetLastError(
VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
"GetSendTOS() external transport is enabled");
return -1;
}
return channelPtr->GetSendTOS(DSCP, priority, useSetSockopt);
#else
_engineStatistics.SetLastError(
VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
"GetSendTOS() VoE is built for external transport");
return -1;
#endif
}
int VoENetworkImpl::SetSendGQoS(int channel,
bool enable,
int serviceType,
int overrideDSCP)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"SetSendGQOS(channel=%d, enable=%d, serviceType=%d,"
" overrideDSCP=%d)",
channel, (int) enable, serviceType, overrideDSCP);
ANDROID_NOT_SUPPORTED(); IPHONE_NOT_SUPPORTED();
#if !defined(_WIN32)
_engineStatistics.SetLastError(
VE_FUNC_NOT_SUPPORTED, kTraceWarning,
"SetSendGQOS() is not supported on this platform");
return -1;
#elif !defined(WEBRTC_EXTERNAL_TRANSPORT)
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"SetSendGQOS() failed to locate channel");
return -1;
}
if (channelPtr->ExternalTransport())
{
_engineStatistics.SetLastError(
VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
"SetSendGQOS() external transport is enabled");
return -1;
}
return channelPtr->SetSendGQoS(enable, serviceType, overrideDSCP);
#else
_engineStatistics.SetLastError(
VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
"SetSendGQOS() VoE is built for external transport");
return -1;
#endif
}
int VoENetworkImpl::GetSendGQoS(int channel,
bool& enabled,
int& serviceType,
int& overrideDSCP)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"GetSendGQOS(channel=%d)", channel);
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
#if !defined(_WIN32)
_engineStatistics.SetLastError(
VE_FUNC_NOT_SUPPORTED, kTraceWarning,
"GetSendGQOS() is not supported on this platform");
return -1;
#elif !defined(WEBRTC_EXTERNAL_TRANSPORT)
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"GetSendGQOS() failed to locate channel");
return -1;
}
if (channelPtr->ExternalTransport())
{
_engineStatistics.SetLastError(
VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
"GetSendGQOS() external transport is enabled");
return -1;
}
return channelPtr->GetSendGQoS(enabled, serviceType, overrideDSCP);
#else
_engineStatistics.SetLastError(
VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
"GetSendGQOS() VoE is built for external transport");
return -1;
#endif
}
int VoENetworkImpl::SetPacketTimeoutNotification(int channel,
bool enable,
int timeoutSeconds)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"SetPacketTimeoutNotification(channel=%d, enable=%d, "
"timeoutSeconds=%d)",
channel, (int) enable, timeoutSeconds);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
if (enable &&
((timeoutSeconds < kVoiceEngineMinPacketTimeoutSec) ||
(timeoutSeconds > kVoiceEngineMaxPacketTimeoutSec)))
{
_engineStatistics.SetLastError(
VE_INVALID_ARGUMENT, kTraceError,
"SetPacketTimeoutNotification() invalid timeout size");
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"SetPacketTimeoutNotification() failed to locate channel");
return -1;
}
return channelPtr->SetPacketTimeoutNotification(enable, timeoutSeconds);
}
int VoENetworkImpl::GetPacketTimeoutNotification(int channel,
bool& enabled,
int& timeoutSeconds)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"GetPacketTimeoutNotification(channel=%d, enabled=?,"
" timeoutSeconds=?)", channel);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"GetPacketTimeoutNotification() failed to locate channel");
return -1;
}
return channelPtr->GetPacketTimeoutNotification(enabled, timeoutSeconds);
}
int VoENetworkImpl::RegisterDeadOrAliveObserver(int channel,
VoEConnectionObserver&
observer)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"RegisterDeadOrAliveObserver(channel=%d, observer=0x%x)",
channel, &observer);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"RegisterDeadOrAliveObserver() failed to locate channel");
return -1;
}
return channelPtr->RegisterDeadOrAliveObserver(observer);
}
int VoENetworkImpl::DeRegisterDeadOrAliveObserver(int channel)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"DeRegisterDeadOrAliveObserver(channel=%d)", channel);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"DeRegisterDeadOrAliveObserver() failed to locate channel");
return -1;
}
return channelPtr->DeRegisterDeadOrAliveObserver();
}
int VoENetworkImpl::SetPeriodicDeadOrAliveStatus(int channel, bool enable,
int sampleTimeSeconds)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"SetPeriodicDeadOrAliveStatus(channel=%d, enable=%d,"
" sampleTimeSeconds=%d)",
channel, enable, sampleTimeSeconds);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
if (enable &&
((sampleTimeSeconds < kVoiceEngineMinSampleTimeSec) ||
(sampleTimeSeconds > kVoiceEngineMaxSampleTimeSec)))
{
_engineStatistics.SetLastError(
VE_INVALID_ARGUMENT, kTraceError,
"SetPeriodicDeadOrAliveStatus() invalid sample time");
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"SetPeriodicDeadOrAliveStatus() failed to locate channel");
return -1;
}
return channelPtr->SetPeriodicDeadOrAliveStatus(enable, sampleTimeSeconds);
}
int VoENetworkImpl::GetPeriodicDeadOrAliveStatus(int channel,
bool& enabled,
int& sampleTimeSeconds)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"GetPeriodicDeadOrAliveStatus(channel=%d, enabled=?,"
" sampleTimeSeconds=?)", channel);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"GetPeriodicDeadOrAliveStatus() failed to locate channel");
return -1;
}
return channelPtr->GetPeriodicDeadOrAliveStatus(enabled,
sampleTimeSeconds);
}
int VoENetworkImpl::SendUDPPacket(int channel,
const void* data,
unsigned int length,
int& transmittedBytes,
bool useRtcpSocket)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"SendUDPPacket(channel=%d, data=0x%x, length=%u, useRTCP=%d)",
channel, data, length, useRtcpSocket);
#ifndef WEBRTC_EXTERNAL_TRANSPORT
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
if (NULL == data)
{
_engineStatistics.SetLastError(VE_INVALID_ARGUMENT, kTraceError,
"SendUDPPacket() invalid data buffer");
return -1;
}
if (0 == length)
{
_engineStatistics.SetLastError(VE_INVALID_PACKET, kTraceError,
"SendUDPPacket() invalid packet size");
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"SendUDPPacket() failed to locate channel");
return -1;
}
return channelPtr->SendUDPPacket(data,
length,
transmittedBytes,
useRtcpSocket);
#else
_engineStatistics.SetLastError(
VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
"SendUDPPacket() VoE is built for external transport");
return -1;
#endif
}
#endif // WEBRTC_VOICE_ENGINE_NETWORK_API
} // namespace webrtc

View File

@ -1,117 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_VOE_NETWORK_IMPL_H
#define WEBRTC_VOICE_ENGINE_VOE_NETWORK_IMPL_H
#include "voe_network.h"
#include "ref_count.h"
#include "shared_data.h"
namespace webrtc
{
class VoENetworkImpl: public virtual voe::SharedData,
public VoENetwork,
public voe::RefCount
{
public:
virtual int Release();
virtual int RegisterExternalTransport(int channel, Transport& transport);
virtual int DeRegisterExternalTransport(int channel);
virtual int ReceivedRTPPacket(int channel,
const void* data,
unsigned int length);
virtual int ReceivedRTCPPacket(int channel,
const void* data,
unsigned int length);
virtual int GetSourceInfo(int channel,
int& rtpPort,
int& rtcpPort,
char ipAddr[64]);
virtual int GetLocalIP(char ipAddr[64], bool ipv6 = false);
virtual int EnableIPv6(int channel);
virtual bool IPv6IsEnabled(int channel);
virtual int SetSourceFilter(int channel,
int rtpPort,
int rtcpPort,
const char ipAddr[64] = 0);
virtual int GetSourceFilter(int channel,
int& rtpPort,
int& rtcpPort,
char ipAddr[64]);
virtual int SetSendTOS(int channel,
int DSCP,
int priority = -1,
bool useSetSockopt = false);
virtual int GetSendTOS(int channel,
int& DSCP,
int& priority,
bool& useSetSockopt);
virtual int SetSendGQoS(int channel,
bool enable,
int serviceType,
int overrideDSCP);
virtual int GetSendGQoS(int channel,
bool& enabled,
int& serviceType,
int& overrideDSCP);
virtual int SetPacketTimeoutNotification(int channel,
bool enable,
int timeoutSeconds = 2);
virtual int GetPacketTimeoutNotification(int channel,
bool& enabled,
int& timeoutSeconds);
virtual int RegisterDeadOrAliveObserver(int channel,
VoEConnectionObserver& observer);
virtual int DeRegisterDeadOrAliveObserver(int channel);
virtual int SetPeriodicDeadOrAliveStatus(int channel,
bool enable,
int sampleTimeSeconds = 2);
virtual int GetPeriodicDeadOrAliveStatus(int channel,
bool& enabled,
int& sampleTimeSeconds);
virtual int SendUDPPacket(int channel,
const void* data,
unsigned int length,
int& transmittedBytes,
bool useRtcpSocket = false);
protected:
VoENetworkImpl();
virtual ~VoENetworkImpl();
};
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_VOE_NETWORK_IMPL_H

View File

@ -1,747 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "voe_rtp_rtcp_impl.h"
#include "trace.h"
#include "file_wrapper.h"
#include "critical_section_wrapper.h"
#include "voice_engine_impl.h"
#include "voe_errors.h"
#include "channel.h"
#include "transmit_mixer.h"
namespace webrtc {
VoERTP_RTCP* VoERTP_RTCP::GetInterface(VoiceEngine* voiceEngine)
{
#ifndef WEBRTC_VOICE_ENGINE_RTP_RTCP_API
return NULL;
#else
if (NULL == voiceEngine)
{
return NULL;
}
VoiceEngineImpl* s = reinterpret_cast<VoiceEngineImpl*> (voiceEngine);
VoERTP_RTCPImpl* d = s;
(*d)++;
return (d);
#endif
}
#ifdef WEBRTC_VOICE_ENGINE_RTP_RTCP_API
VoERTP_RTCPImpl::VoERTP_RTCPImpl()
{
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
"VoERTP_RTCPImpl::VoERTP_RTCPImpl() - ctor");
}
VoERTP_RTCPImpl::~VoERTP_RTCPImpl()
{
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
"VoERTP_RTCPImpl::~VoERTP_RTCPImpl() - dtor");
}
int VoERTP_RTCPImpl::Release()
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"VoERTP_RTCP::Release()");
(*this)--;
int refCount = GetCount();
if (refCount < 0)
{
Reset(); // reset reference counter to zero => OK to delete VE
_engineStatistics.SetLastError(
VE_INTERFACE_NOT_FOUND, kTraceWarning);
return (-1);
}
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
"VoERTP_RTCP reference counter = %d", refCount);
return (refCount);
}
int VoERTP_RTCPImpl::RegisterRTPObserver(int channel, VoERTPObserver& observer)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"RegisterRTPObserver(channel=%d observer=0x%x)",
channel, &observer);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"RegisterRTPObserver() failed to locate channel");
return -1;
}
return channelPtr->RegisterRTPObserver(observer);
}
int VoERTP_RTCPImpl::DeRegisterRTPObserver(int channel)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"DeRegisterRTPObserver(channel=%d)", channel);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"DeRegisterRTPObserver() failed to locate channel");
return -1;
}
return channelPtr->DeRegisterRTPObserver();
}
int VoERTP_RTCPImpl::RegisterRTCPObserver(int channel, VoERTCPObserver& observer)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"RegisterRTCPObserver(channel=%d observer=0x%x)",
channel, &observer);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"RegisterRTPObserver() failed to locate channel");
return -1;
}
return channelPtr->RegisterRTCPObserver(observer);
}
int VoERTP_RTCPImpl::DeRegisterRTCPObserver(int channel)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"DeRegisterRTCPObserver(channel=%d)", channel);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"DeRegisterRTCPObserver() failed to locate channel");
return -1;
}
return channelPtr->DeRegisterRTCPObserver();
}
int VoERTP_RTCPImpl::SetLocalSSRC(int channel, unsigned int ssrc)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"SetLocalSSRC(channel=%d, %lu)", channel, ssrc);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"SetLocalSSRC() failed to locate channel");
return -1;
}
return channelPtr->SetLocalSSRC(ssrc);
}
int VoERTP_RTCPImpl::GetLocalSSRC(int channel, unsigned int& ssrc)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetLocalSSRC(channel=%d, ssrc=?)", channel);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"GetLocalSSRC() failed to locate channel");
return -1;
}
return channelPtr->GetLocalSSRC(ssrc);
}
int VoERTP_RTCPImpl::GetRemoteSSRC(int channel, unsigned int& ssrc)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetRemoteSSRC(channel=%d, ssrc=?)", channel);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"GetRemoteSSRC() failed to locate channel");
return -1;
}
return channelPtr->GetRemoteSSRC(ssrc);
}
int VoERTP_RTCPImpl::GetRemoteCSRCs(int channel, unsigned int arrCSRC[15])
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetRemoteCSRCs(channel=%d, arrCSRC=?)", channel);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"GetRemoteCSRCs() failed to locate channel");
return -1;
}
return channelPtr->GetRemoteCSRCs(arrCSRC);
}
int VoERTP_RTCPImpl::SetRTPAudioLevelIndicationStatus(int channel,
bool enable,
unsigned char ID)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"SetRTPAudioLevelIndicationStatus(channel=%d, enable=%d,"
" ID=%u)", channel, enable, ID);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
if (ID < kVoiceEngineMinRtpExtensionId ||
ID > kVoiceEngineMaxRtpExtensionId)
{
// [RFC5285] The 4-bit ID is the local identifier of this element in
// the range 1-14 inclusive.
_engineStatistics.SetLastError(
VE_INVALID_ARGUMENT, kTraceError,
"SetRTPAudioLevelIndicationStatus() invalid ID parameter");
return -1;
}
// Set AudioProcessingModule level-metric mode based on user input.
// Note that this setting may conflict with the
// AudioProcessing::SetMetricsStatus API.
if (_audioProcessingModulePtr->level_estimator()->Enable(enable) != 0)
{
_engineStatistics.SetLastError(
VE_APM_ERROR, kTraceError,
"SetRTPAudioLevelIndicationStatus() failed to set level-metric"
"mode");
return -1;
}
// Ensure that the transmit mixer reads the audio-level metric for each
// 10ms packet and copies the same value to all active channels.
// The metric is derived within the AudioProcessingModule.
_transmitMixerPtr->SetRTPAudioLevelIndicationStatus(enable);
// Set state and ID for the specified channel.
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"SetRTPAudioLevelIndicationStatus() failed to locate channel");
return -1;
}
return channelPtr->SetRTPAudioLevelIndicationStatus(enable, ID);
}
int VoERTP_RTCPImpl::GetRTPAudioLevelIndicationStatus(int channel,
bool& enabled,
unsigned char& ID)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetRTPAudioLevelIndicationStatus(channel=%d, enable=?, ID=?)",
channel);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"GetRTPAudioLevelIndicationStatus() failed to locate channel");
return -1;
}
return channelPtr->GetRTPAudioLevelIndicationStatus(enabled, ID);
}
int VoERTP_RTCPImpl::SetRTCPStatus(int channel, bool enable)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"SetRTCPStatus(channel=%d, enable=%d)", channel, enable);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"SetRTCPStatus() failed to locate channel");
return -1;
}
return channelPtr->SetRTCPStatus(enable);
}
int VoERTP_RTCPImpl::GetRTCPStatus(int channel, bool& enabled)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetRTCPStatus(channel=%d)", channel);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"GetRTCPStatus() failed to locate channel");
return -1;
}
return channelPtr->GetRTCPStatus(enabled);
}
int VoERTP_RTCPImpl::SetRTCP_CNAME(int channel, const char cName[256])
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"SetRTCP_CNAME(channel=%d, cName=%s)", channel, cName);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"SetRTCP_CNAME() failed to locate channel");
return -1;
}
return channelPtr->SetRTCP_CNAME(cName);
}
int VoERTP_RTCPImpl::GetRTCP_CNAME(int channel, char cName[256])
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetRTCP_CNAME(channel=%d, cName=?)", channel);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"GetRTCP_CNAME() failed to locate channel");
return -1;
}
return channelPtr->GetRTCP_CNAME(cName);
}
int VoERTP_RTCPImpl::GetRemoteRTCP_CNAME(int channel, char cName[256])
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetRemoteRTCP_CNAME(channel=%d, cName=?)", channel);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"GetRemoteRTCP_CNAME() failed to locate channel");
return -1;
}
return channelPtr->GetRemoteRTCP_CNAME(cName);
}
int VoERTP_RTCPImpl::GetRemoteRTCPData(
int channel,
unsigned int& NTPHigh, // from sender info in SR
unsigned int& NTPLow, // from sender info in SR
unsigned int& timestamp, // from sender info in SR
unsigned int& playoutTimestamp, // derived locally
unsigned int* jitter, // from report block 1 in SR/RR
unsigned short* fractionLost) // from report block 1 in SR/RR
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetRemoteRTCPData(channel=%d,...)", channel);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"GetRemoteRTCP_CNAME() failed to locate channel");
return -1;
}
return channelPtr->GetRemoteRTCPData(NTPHigh,
NTPLow,
timestamp,
playoutTimestamp,
jitter,
fractionLost);
}
int VoERTP_RTCPImpl::SendApplicationDefinedRTCPPacket(
int channel,
const unsigned char subType,
unsigned int name,
const char* data,
unsigned short dataLengthInBytes)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"SendApplicationDefinedRTCPPacket(channel=%d, subType=%u,"
"name=%u, data=?, dataLengthInBytes=%u)",
channel, subType, name, dataLengthInBytes);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"SendApplicationDefinedRTCPPacket() failed to locate channel");
return -1;
}
return channelPtr->SendApplicationDefinedRTCPPacket(subType,
name,
data,
dataLengthInBytes);
}
int VoERTP_RTCPImpl::GetRTPStatistics(int channel,
unsigned int& averageJitterMs,
unsigned int& maxJitterMs,
unsigned int& discardedPackets)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetRTPStatistics(channel=%d,....)", channel);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"GetRTPStatistics() failed to locate channel");
return -1;
}
return channelPtr->GetRTPStatistics(averageJitterMs,
maxJitterMs,
discardedPackets);
}
int VoERTP_RTCPImpl::GetRTCPStatistics(int channel, CallStatistics& stats)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetRTCPStatistics(channel=%d)", channel);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"GetRTPStatistics() failed to locate channel");
return -1;
}
return channelPtr->GetRTPStatistics(stats);
}
int VoERTP_RTCPImpl::SetFECStatus(int channel, bool enable, int redPayloadtype)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"SetFECStatus(channel=%d, enable=%d, redPayloadtype=%d)",
channel, enable, redPayloadtype);
#ifdef WEBRTC_CODEC_RED
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"SetFECStatus() failed to locate channel");
return -1;
}
return channelPtr->SetFECStatus(enable, redPayloadtype);
#else
_engineStatistics.SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
"SetFECStatus() RED is not supported");
return -1;
#endif
}
int VoERTP_RTCPImpl::GetFECStatus(int channel,
bool& enabled,
int& redPayloadtype)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetFECStatus(channel=%d, enabled=?, redPayloadtype=?)",
channel);
#ifdef WEBRTC_CODEC_RED
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"GetFECStatus() failed to locate channel");
return -1;
}
return channelPtr->GetFECStatus(enabled, redPayloadtype);
#else
_engineStatistics.SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
"GetFECStatus() RED is not supported");
return -1;
#endif
}
int VoERTP_RTCPImpl::SetRTPKeepaliveStatus(int channel,
bool enable,
unsigned char unknownPayloadType,
int deltaTransmitTimeSeconds)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"SetRTPKeepaliveStatus(channel=%d, enable=%d,"
" unknownPayloadType=%u, deltaTransmitTimeSeconds=%d)",
channel, enable, unknownPayloadType, deltaTransmitTimeSeconds);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"SetRTPKeepaliveStatus() failed to locate channel");
return -1;
}
return channelPtr->SetRTPKeepaliveStatus(enable,
unknownPayloadType,
deltaTransmitTimeSeconds);
}
int VoERTP_RTCPImpl::GetRTPKeepaliveStatus(int channel,
bool& enabled,
unsigned char& unknownPayloadType,
int& deltaTransmitTimeSeconds)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetRTPKeepaliveStatus(channel=%d)", channel);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"GetRTPKeepaliveStatus() failed to locate channel");
return -1;
}
return channelPtr->GetRTPKeepaliveStatus(enabled,
unknownPayloadType,
deltaTransmitTimeSeconds);
}
int VoERTP_RTCPImpl::StartRTPDump(int channel,
const char fileNameUTF8[1024],
RTPDirections direction)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"StartRTPDump(channel=%d, fileNameUTF8=%s, direction=%d)",
channel, fileNameUTF8, direction);
assert(1024 == FileWrapper::kMaxFileNameSize);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"StartRTPDump() failed to locate channel");
return -1;
}
return channelPtr->StartRTPDump(fileNameUTF8, direction);
}
int VoERTP_RTCPImpl::StopRTPDump(int channel, RTPDirections direction)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"StopRTPDump(channel=%d, direction=%d)", channel, direction);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"StopRTPDump() failed to locate channel");
return -1;
}
return channelPtr->StopRTPDump(direction);
}
int VoERTP_RTCPImpl::RTPDumpIsActive(int channel, RTPDirections direction)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"RTPDumpIsActive(channel=%d, direction=%d)",
channel, direction);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"StopRTPDump() failed to locate channel");
return -1;
}
return channelPtr->RTPDumpIsActive(direction);
}
int VoERTP_RTCPImpl::InsertExtraRTPPacket(int channel,
unsigned char payloadType,
bool markerBit,
const char* payloadData,
unsigned short payloadSize)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"InsertExtraRTPPacket(channel=%d, payloadType=%u,"
" markerBit=%u, payloadSize=%u)",
channel, payloadType, markerBit, payloadSize);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"StopRTPDump() failed to locate channel");
return -1;
}
return channelPtr->InsertExtraRTPPacket(payloadType,
markerBit,
payloadData,
payloadSize);
}
#endif // #ifdef WEBRTC_VOICE_ENGINE_RTP_RTCP_API
} // namespace webrtc

View File

@ -1,134 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_VOE_RTP_RTCP_IMPL_H
#define WEBRTC_VOICE_ENGINE_VOE_RTP_RTCP_IMPL_H
#include "voe_rtp_rtcp.h"
#include "ref_count.h"
#include "shared_data.h"
namespace webrtc {
class VoERTP_RTCPImpl : public virtual voe::SharedData,
public VoERTP_RTCP,
public voe::RefCount
{
public:
virtual int Release();
// Registration of observers for RTP and RTCP callbacks
virtual int RegisterRTPObserver(int channel, VoERTPObserver& observer);
virtual int DeRegisterRTPObserver(int channel);
virtual int RegisterRTCPObserver(int channel, VoERTCPObserver& observer);
virtual int DeRegisterRTCPObserver(int channel);
// RTCP
virtual int SetRTCPStatus(int channel, bool enable);
virtual int GetRTCPStatus(int channel, bool& enabled);
virtual int SetRTCP_CNAME(int channel, const char cName[256]);
virtual int GetRTCP_CNAME(int channel, char cName[256]);
virtual int GetRemoteRTCP_CNAME(int channel, char cName[256]);
virtual int GetRemoteRTCPData(int channel,
unsigned int& NTPHigh,
unsigned int& NTPLow,
unsigned int& timestamp,
unsigned int& playoutTimestamp,
unsigned int* jitter = NULL,
unsigned short* fractionLost = NULL);
virtual int SendApplicationDefinedRTCPPacket(
int channel,
const unsigned char subType,
unsigned int name,
const char* data,
unsigned short dataLengthInBytes);
// SSRC
virtual int SetLocalSSRC(int channel, unsigned int ssrc);
virtual int GetLocalSSRC(int channel, unsigned int& ssrc);
virtual int GetRemoteSSRC(int channel, unsigned int& ssrc);
// RTP Header Extension for Client-to-Mixer Audio Level Indication
virtual int SetRTPAudioLevelIndicationStatus(int channel,
bool enable,
unsigned char ID);
virtual int GetRTPAudioLevelIndicationStatus(int channel,
bool& enabled,
unsigned char& ID);
// CSRC
virtual int GetRemoteCSRCs(int channel, unsigned int arrCSRC[15]);
// Statistics
virtual int GetRTPStatistics(int channel,
unsigned int& averageJitterMs,
unsigned int& maxJitterMs,
unsigned int& discardedPackets);
virtual int GetRTCPStatistics(int channel, CallStatistics& stats);
// RTP keepalive mechanism (maintains NAT mappings associated to RTP flows)
virtual int SetRTPKeepaliveStatus(int channel,
bool enable,
unsigned char unknownPayloadType,
int deltaTransmitTimeSeconds = 15);
virtual int GetRTPKeepaliveStatus(int channel,
bool& enabled,
unsigned char& unknownPayloadType,
int& deltaTransmitTimeSeconds);
// FEC
virtual int SetFECStatus(int channel,
bool enable,
int redPayloadtype = -1);
virtual int GetFECStatus(int channel, bool& enabled, int& redPayloadtype);
// Store RTP and RTCP packets and dump to file (compatible with rtpplay)
virtual int StartRTPDump(int channel,
const char fileNameUTF8[1024],
RTPDirections direction = kRtpIncoming);
virtual int StopRTPDump(int channel,
RTPDirections direction = kRtpIncoming);
virtual int RTPDumpIsActive(int channel,
RTPDirections direction = kRtpIncoming);
// Insert (and transmits) extra RTP packet into active RTP audio stream
virtual int InsertExtraRTPPacket(int channel,
unsigned char payloadType,
bool markerBit,
const char* payloadData,
unsigned short payloadSize);
protected:
VoERTP_RTCPImpl();
virtual ~VoERTP_RTCPImpl();
};
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_VOE_RTP_RTCP_IMPL_H

View File

@ -1,249 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "voe_video_sync_impl.h"
#include "channel.h"
#include "critical_section_wrapper.h"
#include "trace.h"
#include "voe_errors.h"
#include "voice_engine_impl.h"
namespace webrtc {
VoEVideoSync* VoEVideoSync::GetInterface(VoiceEngine* voiceEngine)
{
#ifndef WEBRTC_VOICE_ENGINE_VIDEO_SYNC_API
return NULL;
#else
if (NULL == voiceEngine)
{
return NULL;
}
VoiceEngineImpl* s =
reinterpret_cast<VoiceEngineImpl*> (voiceEngine);
VoEVideoSyncImpl* d = s;
(*d)++;
return (d);
#endif
}
#ifdef WEBRTC_VOICE_ENGINE_VIDEO_SYNC_API
VoEVideoSyncImpl::VoEVideoSyncImpl()
{
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
"VoEVideoSyncImpl::VoEVideoSyncImpl() - ctor");
}
VoEVideoSyncImpl::~VoEVideoSyncImpl()
{
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
"VoEVideoSyncImpl::~VoEVideoSyncImpl() - dtor");
}
int VoEVideoSyncImpl::Release()
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"VoEVideoSync::Release()");
(*this)--;
int refCount = GetCount();
if (refCount < 0)
{
Reset(); // reset reference counter to zero => OK to delete VE
_engineStatistics.SetLastError(VE_INTERFACE_NOT_FOUND,
kTraceWarning);
return (-1);
}
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
"VoEVideoSync reference counter = %d", refCount);
return (refCount);
}
int VoEVideoSyncImpl::GetPlayoutTimestamp(int channel, unsigned int& timestamp)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetPlayoutTimestamp(channel=%d, timestamp=?)", channel);
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"GetPlayoutTimestamp() failed to locate channel");
return -1;
}
return channelPtr->GetPlayoutTimestamp(timestamp);
}
int VoEVideoSyncImpl::SetInitTimestamp(int channel,
unsigned int timestamp)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"SetInitTimestamp(channel=%d, timestamp=%lu)",
channel, timestamp);
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"SetInitTimestamp() failed to locate channel");
return -1;
}
return channelPtr->SetInitTimestamp(timestamp);
}
int VoEVideoSyncImpl::SetInitSequenceNumber(int channel,
short sequenceNumber)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"SetInitSequenceNumber(channel=%d, sequenceNumber=%hd)",
channel, sequenceNumber);
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"SetInitSequenceNumber() failed to locate channel");
return -1;
}
return channelPtr->SetInitSequenceNumber(sequenceNumber);
}
int VoEVideoSyncImpl::SetMinimumPlayoutDelay(int channel,int delayMs)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"SetMinimumPlayoutDelay(channel=%d, delayMs=%d)",
channel, delayMs);
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"SetMinimumPlayoutDelay() failed to locate channel");
return -1;
}
return channelPtr->SetMinimumPlayoutDelay(delayMs);
}
int VoEVideoSyncImpl::GetDelayEstimate(int channel, int& delayMs)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetDelayEstimate(channel=%d, delayMs=?)", channel);
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"GetDelayEstimate() failed to locate channel");
return -1;
}
return channelPtr->GetDelayEstimate(delayMs);
}
int VoEVideoSyncImpl::GetPlayoutBufferSize(int& bufferMs)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetPlayoutBufferSize(bufferMs=?)");
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
AudioDeviceModule::BufferType type
(AudioDeviceModule::kFixedBufferSize);
WebRtc_UWord16 sizeMS(0);
if (_audioDevicePtr->PlayoutBuffer(&type, &sizeMS) != 0)
{
_engineStatistics.SetLastError(
VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
"GetPlayoutBufferSize() failed to read buffer size");
return -1;
}
bufferMs = sizeMS;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
"GetPlayoutBufferSize() => bufferMs=%d", bufferMs);
return 0;
}
int VoEVideoSyncImpl::GetRtpRtcp(int channel,
RtpRtcp* &rtpRtcpModule)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetRtpRtcp(channel=%i)", channel);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"GetPlayoutTimestamp() failed to locate channel");
return -1;
}
return channelPtr->GetRtpRtcp(rtpRtcpModule);
}
#endif // #ifdef WEBRTC_VOICE_ENGINE_VIDEO_SYNC_API
} // namespace webrtc

View File

@ -1,50 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_VOE_VIDEO_SYNC_IMPL_H
#define WEBRTC_VOICE_ENGINE_VOE_VIDEO_SYNC_IMPL_H
#include "voe_video_sync.h"
#include "ref_count.h"
#include "shared_data.h"
namespace webrtc {
class VoEVideoSyncImpl : public virtual voe::SharedData,
public VoEVideoSync,
public voe::RefCount
{
public:
virtual int Release();
virtual int GetPlayoutBufferSize(int& bufferMs);
virtual int SetMinimumPlayoutDelay(int channel, int delayMs);
virtual int GetDelayEstimate(int channel, int& delayMs);
virtual int SetInitTimestamp(int channel, unsigned int timestamp);
virtual int SetInitSequenceNumber(int channel, short sequenceNumber);
virtual int GetPlayoutTimestamp(int channel, unsigned int& timestamp);
virtual int GetRtpRtcp (int channel,
RtpRtcp* &rtpRtcpModule);
protected:
VoEVideoSyncImpl();
virtual ~VoEVideoSyncImpl();
};
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_VOE_VIDEO_SYNC_IMPL_H

View File

@ -1,661 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "voe_volume_control_impl.h"
#include "channel.h"
#include "critical_section_wrapper.h"
#include "output_mixer.h"
#include "trace.h"
#include "transmit_mixer.h"
#include "voe_errors.h"
#include "voice_engine_impl.h"
namespace webrtc {
VoEVolumeControl* VoEVolumeControl::GetInterface(VoiceEngine* voiceEngine)
{
#ifndef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API
return NULL;
#else
if (NULL == voiceEngine)
{
return NULL;
}
VoiceEngineImpl* s =
reinterpret_cast<VoiceEngineImpl*> (voiceEngine);
VoEVolumeControlImpl* d = s;
(*d)++;
return (d);
#endif
}
#ifdef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API
VoEVolumeControlImpl::VoEVolumeControlImpl()
{
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
"VoEVolumeControlImpl::VoEVolumeControlImpl() - ctor");
}
VoEVolumeControlImpl::~VoEVolumeControlImpl()
{
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
"VoEVolumeControlImpl::~VoEVolumeControlImpl() - dtor");
}
int VoEVolumeControlImpl::Release()
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"VoEVolumeControl::Release()");
(*this)--;
int refCount = GetCount();
if (refCount < 0)
{
Reset(); // reset reference counter to zero => OK to delete VE
_engineStatistics.SetLastError(
VE_INTERFACE_NOT_FOUND, kTraceWarning);
return (-1);
}
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
"VoEVolumeControl reference counter = %d", refCount);
return (refCount);
}
int VoEVolumeControlImpl::SetSpeakerVolume(unsigned int volume)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"SetSpeakerVolume(volume=%u)", volume);
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
if (volume > kMaxVolumeLevel)
{
_engineStatistics.SetLastError(
VE_INVALID_ARGUMENT, kTraceError,
"SetSpeakerVolume() invalid argument");
return -1;
}
WebRtc_UWord32 maxVol(0);
WebRtc_UWord32 spkrVol(0);
// scale: [0,kMaxVolumeLevel] -> [0,MaxSpeakerVolume]
if (_audioDevicePtr->MaxSpeakerVolume(&maxVol) != 0)
{
_engineStatistics.SetLastError(
VE_MIC_VOL_ERROR, kTraceError,
"SetSpeakerVolume() failed to get max volume");
return -1;
}
// round the value and avoid floating computation
spkrVol = (WebRtc_UWord32)((volume * maxVol +
(int)(kMaxVolumeLevel / 2)) / (kMaxVolumeLevel));
// set the actual volume using the audio mixer
if (_audioDevicePtr->SetSpeakerVolume(spkrVol) != 0)
{
_engineStatistics.SetLastError(
VE_MIC_VOL_ERROR, kTraceError,
"SetSpeakerVolume() failed to set speaker volume");
return -1;
}
return 0;
}
int VoEVolumeControlImpl::GetSpeakerVolume(unsigned int& volume)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetSpeakerVolume()");
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
WebRtc_UWord32 spkrVol(0);
WebRtc_UWord32 maxVol(0);
if (_audioDevicePtr->SpeakerVolume(&spkrVol) != 0)
{
_engineStatistics.SetLastError(
VE_GET_MIC_VOL_ERROR, kTraceError,
"GetSpeakerVolume() unable to get speaker volume");
return -1;
}
// scale: [0, MaxSpeakerVolume] -> [0, kMaxVolumeLevel]
if (_audioDevicePtr->MaxSpeakerVolume(&maxVol) != 0)
{
_engineStatistics.SetLastError(
VE_GET_MIC_VOL_ERROR, kTraceError,
"GetSpeakerVolume() unable to get max speaker volume");
return -1;
}
// round the value and avoid floating computation
volume = (WebRtc_UWord32) ((spkrVol * kMaxVolumeLevel +
(int)(maxVol / 2)) / (maxVol));
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
"GetSpeakerVolume() => volume=%d", volume);
return 0;
}
int VoEVolumeControlImpl::SetSystemOutputMute(bool enable)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetSystemOutputMute(enabled=%d)", enable);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
if (_audioDevicePtr->SetSpeakerMute(enable) != 0)
{
_engineStatistics.SetLastError(
VE_GET_MIC_VOL_ERROR, kTraceError,
"SpeakerMute() unable to Set speaker mute");
return -1;
}
return 0;
}
int VoEVolumeControlImpl::GetSystemOutputMute(bool& enabled)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetSystemOutputMute(enabled=?)");
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
if (_audioDevicePtr->SpeakerMute(&enabled) != 0)
{
_engineStatistics.SetLastError(
VE_GET_MIC_VOL_ERROR, kTraceError,
"SpeakerMute() unable to get speaker mute state");
return -1;
}
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
"GetSystemOutputMute() => %d", enabled);
return 0;
}
int VoEVolumeControlImpl::SetMicVolume(unsigned int volume)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"SetMicVolume(volume=%u)", volume);
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
if (volume > kMaxVolumeLevel)
{
_engineStatistics.SetLastError(
VE_INVALID_ARGUMENT, kTraceError,
"SetMicVolume() invalid argument");
return -1;
}
WebRtc_UWord32 maxVol(0);
WebRtc_UWord32 micVol(0);
// scale: [0, kMaxVolumeLevel] -> [0,MaxMicrophoneVolume]
if (_audioDevicePtr->MaxMicrophoneVolume(&maxVol) != 0)
{
_engineStatistics.SetLastError(
VE_MIC_VOL_ERROR, kTraceError,
"SetMicVolume() failed to get max volume");
return -1;
}
// round the value and avoid floating point computation
micVol = (WebRtc_UWord32) ((volume * maxVol +
(int)(kMaxVolumeLevel / 2)) / (kMaxVolumeLevel));
// set the actual volume using the audio mixer
if (_audioDevicePtr->SetMicrophoneVolume(micVol) != 0)
{
_engineStatistics.SetLastError(
VE_MIC_VOL_ERROR, kTraceError,
"SetMicVolume() failed to set mic volume");
return -1;
}
return 0;
}
int VoEVolumeControlImpl::GetMicVolume(unsigned int& volume)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetMicVolume()");
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
WebRtc_UWord32 micVol(0);
WebRtc_UWord32 maxVol(0);
if (_audioDevicePtr->MicrophoneVolume(&micVol) != 0)
{
_engineStatistics.SetLastError(
VE_GET_MIC_VOL_ERROR, kTraceError,
"GetMicVolume() unable to get microphone volume");
return -1;
}
// scale: [0, MaxMicrophoneVolume] -> [0, kMaxVolumeLevel]
if (_audioDevicePtr->MaxMicrophoneVolume(&maxVol) != 0)
{
_engineStatistics.SetLastError(
VE_GET_MIC_VOL_ERROR, kTraceError,
"GetMicVolume() unable to get max microphone volume");
return -1;
}
// round the value and avoid floating point calculation
volume = (WebRtc_UWord32) ((micVol * kMaxVolumeLevel +
(int)(maxVol / 2)) / (maxVol));
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
"GetMicVolume() => volume=%d", volume);
return 0;
}
int VoEVolumeControlImpl::SetInputMute(int channel, bool enable)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"SetInputMute(channel=%d, enable=%d)", channel, enable);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
if (channel == -1)
{
// Mute before demultiplexing <=> affects all channels
return _transmitMixerPtr->SetMute(enable);
}
else
{
// Mute after demultiplexing <=> affects one channel only
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"SetInputMute() failed to locate channel");
return -1;
}
return channelPtr->SetMute(enable);
}
return 0;
}
int VoEVolumeControlImpl::GetInputMute(int channel, bool& enabled)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetInputMute(channel=%d)", channel);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
if (channel == -1)
{
enabled = _transmitMixerPtr->Mute();
}
else
{
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"SetInputMute() failed to locate channel");
return -1;
}
enabled = channelPtr->Mute();
}
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
"GetInputMute() => enabled = %d", (int)enabled);
return 0;
}
int VoEVolumeControlImpl::SetSystemInputMute(bool enable)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"SetSystemInputMute(enabled=%d)", enable);
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
if (_audioDevicePtr->SetMicrophoneMute(enable) != 0)
{
_engineStatistics.SetLastError(
VE_GET_MIC_VOL_ERROR, kTraceError,
"MicrophoneMute() unable to set microphone mute state");
return -1;
}
return 0;
}
int VoEVolumeControlImpl::GetSystemInputMute(bool& enabled)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetSystemInputMute(enabled=?)");
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
if (_audioDevicePtr->MicrophoneMute(&enabled) != 0)
{
_engineStatistics.SetLastError(
VE_GET_MIC_VOL_ERROR, kTraceError,
"MicrophoneMute() unable to get microphone mute state");
return -1;
}
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
"GetSystemInputMute() => %d", enabled);
return 0;
}
int VoEVolumeControlImpl::GetSpeechInputLevel(unsigned int& level)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetSpeechInputLevel()");
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
WebRtc_Word8 currentLevel = _transmitMixerPtr->AudioLevel();
level = static_cast<unsigned int> (currentLevel);
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
"GetSpeechInputLevel() => %d", level);
return 0;
}
int VoEVolumeControlImpl::GetSpeechOutputLevel(int channel,
unsigned int& level)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetSpeechOutputLevel(channel=%d, level=?)", channel);
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
if (channel == -1)
{
return _outputMixerPtr->GetSpeechOutputLevel((WebRtc_UWord32&)level);
}
else
{
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"GetSpeechOutputLevel() failed to locate channel");
return -1;
}
channelPtr->GetSpeechOutputLevel((WebRtc_UWord32&)level);
}
return 0;
}
int VoEVolumeControlImpl::GetSpeechInputLevelFullRange(unsigned int& level)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetSpeechInputLevelFullRange(level=?)");
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
WebRtc_Word16 currentLevel = _transmitMixerPtr->AudioLevelFullRange();
level = static_cast<unsigned int> (currentLevel);
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
"GetSpeechInputLevelFullRange() => %d", level);
return 0;
}
int VoEVolumeControlImpl::GetSpeechOutputLevelFullRange(int channel,
unsigned int& level)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetSpeechOutputLevelFullRange(channel=%d, level=?)", channel);
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
if (channel == -1)
{
return _outputMixerPtr->GetSpeechOutputLevelFullRange(
(WebRtc_UWord32&)level);
}
else
{
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"GetSpeechOutputLevelFullRange() failed to locate channel");
return -1;
}
channelPtr->GetSpeechOutputLevelFullRange((WebRtc_UWord32&)level);
}
return 0;
}
int VoEVolumeControlImpl::SetChannelOutputVolumeScaling(int channel,
float scaling)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"SetChannelOutputVolumeScaling(channel=%d, scaling=%3.2f)",
channel, scaling);
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
if (scaling < kMinOutputVolumeScaling ||
scaling > kMaxOutputVolumeScaling)
{
_engineStatistics.SetLastError(
VE_INVALID_ARGUMENT, kTraceError,
"SetChannelOutputVolumeScaling() invalid parameter");
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"SetChannelOutputVolumeScaling() failed to locate channel");
return -1;
}
return channelPtr->SetChannelOutputVolumeScaling(scaling);
}
int VoEVolumeControlImpl::GetChannelOutputVolumeScaling(int channel,
float& scaling)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetChannelOutputVolumeScaling(channel=%d, scaling=?)", channel);
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"GetChannelOutputVolumeScaling() failed to locate channel");
return -1;
}
return channelPtr->GetChannelOutputVolumeScaling(scaling);
}
int VoEVolumeControlImpl::SetOutputVolumePan(int channel,
float left,
float right)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"SetOutputVolumePan(channel=%d, left=%2.1f, right=%2.1f)",
channel, left, right);
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
bool available(false);
_audioDevicePtr->StereoPlayoutIsAvailable(&available);
if (!available)
{
_engineStatistics.SetLastError(
VE_FUNC_NO_STEREO, kTraceError,
"SetOutputVolumePan() stereo playout not supported");
return -1;
}
if ((left < kMinOutputVolumePanning) ||
(left > kMaxOutputVolumePanning) ||
(right < kMinOutputVolumePanning) ||
(right > kMaxOutputVolumePanning))
{
_engineStatistics.SetLastError(
VE_INVALID_ARGUMENT, kTraceError,
"SetOutputVolumePan() invalid parameter");
return -1;
}
if (channel == -1)
{
// Master balance (affectes the signal after output mixing)
return _outputMixerPtr->SetOutputVolumePan(left, right);
}
else
{
// Per-channel balance (affects the signal before output mixing)
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"SetOutputVolumePan() failed to locate channel");
return -1;
}
return channelPtr->SetOutputVolumePan(left, right);
}
return 0;
}
int VoEVolumeControlImpl::GetOutputVolumePan(int channel,
float& left,
float& right)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetOutputVolumePan(channel=%d, left=?, right=?)", channel);
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
bool available(false);
_audioDevicePtr->StereoPlayoutIsAvailable(&available);
if (!available)
{
_engineStatistics.SetLastError(
VE_FUNC_NO_STEREO, kTraceError,
"GetOutputVolumePan() stereo playout not supported");
return -1;
}
if (channel == -1)
{
return _outputMixerPtr->GetOutputVolumePan(left, right);
}
else
{
voe::ScopedChannel sc(_channelManager, channel);
voe::Channel* channelPtr = sc.ChannelPtr();
if (channelPtr == NULL)
{
_engineStatistics.SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"GetOutputVolumePan() failed to locate channel");
return -1;
}
return channelPtr->GetOutputVolumePan(left, right);
}
return 0;
}
#endif // #ifdef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API
} // namespace webrtc

View File

@ -1,74 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_VOE_VOLUME_CONTROL_IMPL_H
#define WEBRTC_VOICE_ENGINE_VOE_VOLUME_CONTROL_IMPL_H
#include "voe_volume_control.h"
#include "ref_count.h"
#include "shared_data.h"
namespace webrtc {
class VoEVolumeControlImpl : public virtual voe::SharedData,
public VoEVolumeControl,
public voe::RefCount
{
public:
virtual int Release();
virtual int SetSpeakerVolume(unsigned int volume);
virtual int GetSpeakerVolume(unsigned int& volume);
virtual int SetSystemOutputMute(bool enable);
virtual int GetSystemOutputMute(bool& enabled);
virtual int SetMicVolume(unsigned int volume);
virtual int GetMicVolume(unsigned int& volume);
virtual int SetInputMute(int channel, bool enable);
virtual int GetInputMute(int channel, bool& enabled);
virtual int SetSystemInputMute(bool enable);
virtual int GetSystemInputMute(bool& enabled);
virtual int GetSpeechInputLevel(unsigned int& level);
virtual int GetSpeechOutputLevel(int channel, unsigned int& level);
virtual int GetSpeechInputLevelFullRange(unsigned int& level);
virtual int GetSpeechOutputLevelFullRange(int channel,
unsigned int& level);
virtual int SetChannelOutputVolumeScaling(int channel, float scaling);
virtual int GetChannelOutputVolumeScaling(int channel, float& scaling);
virtual int SetOutputVolumePan(int channel, float left, float right);
virtual int GetOutputVolumePan(int channel, float& left, float& right);
protected:
VoEVolumeControlImpl();
virtual ~VoEVolumeControlImpl();
};
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_VOE_VOLUME_CONTROL_IMPL_H

View File

@ -1,127 +0,0 @@
# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
{
'includes': [
'../../../common_settings.gypi',
],
'targets': [
{
'target_name': 'voice_engine_core',
'type': '<(library)',
'dependencies': [
'../../../common_audio/resampler/main/source/resampler.gyp:resampler',
'../../../common_audio/signal_processing_library/main/source/spl.gyp:spl',
'../../../modules/audio_coding/main/source/audio_coding_module.gyp:audio_coding_module',
'../../../modules/audio_conference_mixer/source/audio_conference_mixer.gyp:audio_conference_mixer',
'../../../modules/audio_device/main/source/audio_device.gyp:audio_device',
'../../../modules/audio_processing/main/source/apm.gyp:audio_processing',
'../../../modules/media_file/source/media_file.gyp:media_file',
'../../../modules/rtp_rtcp/source/rtp_rtcp.gyp:rtp_rtcp',
'../../../modules/udp_transport/source/udp_transport.gyp:udp_transport',
'../../../modules/utility/source/utility.gyp:webrtc_utility',
'../../../system_wrappers/source/system_wrappers.gyp:system_wrappers',
],
'include_dirs': [
'../../..',
'../interface',
],
'direct_dependent_settings': {
'include_dirs': [
'../../..',
'../interface',
],
},
'sources': [
'../../../common_types.h',
'../../../engine_configurations.h',
'../../../typedefs.h',
'../interface/voe_audio_processing.h',
'../interface/voe_base.h',
'../interface/voe_call_report.h',
'../interface/voe_codec.h',
'../interface/voe_dtmf.h',
'../interface/voe_encryption.h',
'../interface/voe_errors.h',
'../interface/voe_external_media.h',
'../interface/voe_file.h',
'../interface/voe_hardware.h',
'../interface/voe_neteq_stats.h',
'../interface/voe_network.h',
'../interface/voe_rtp_rtcp.h',
'../interface/voe_video_sync.h',
'../interface/voe_volume_control.h',
'audio_frame_operations.cc',
'audio_frame_operations.h',
'channel.cc',
'channel.h',
'channel_manager.cc',
'channel_manager.h',
'channel_manager_base.cc',
'channel_manager_base.h',
'dtmf_inband.cc',
'dtmf_inband.h',
'dtmf_inband_queue.cc',
'dtmf_inband_queue.h',
'level_indicator.cc',
'level_indicator.h',
'monitor_module.cc',
'monitor_module.h',
'output_mixer.cc',
'output_mixer.h',
'ref_count.cc',
'ref_count.h',
'shared_data.cc',
'shared_data.h',
'statistics.cc',
'statistics.h',
'transmit_mixer.cc',
'transmit_mixer.h',
'utility.cc',
'utility.h',
'voe_audio_processing_impl.cc',
'voe_audio_processing_impl.h',
'voe_base_impl.cc',
'voe_base_impl.h',
'voe_call_report_impl.cc',
'voe_call_report_impl.h',
'voe_codec_impl.cc',
'voe_codec_impl.h',
'voe_dtmf_impl.cc',
'voe_dtmf_impl.h',
'voe_encryption_impl.cc',
'voe_encryption_impl.h',
'voe_external_media_impl.cc',
'voe_external_media_impl.h',
'voe_file_impl.cc',
'voe_file_impl.h',
'voe_hardware_impl.cc',
'voe_hardware_impl.h',
'voe_neteq_stats_impl.cc',
'voe_neteq_stats_impl.h',
'voe_network_impl.cc',
'voe_network_impl.h',
'voe_rtp_rtcp_impl.cc',
'voe_rtp_rtcp_impl.h',
'voe_video_sync_impl.cc',
'voe_video_sync_impl.h',
'voe_volume_control_impl.cc',
'voe_volume_control_impl.h',
'voice_engine_defines.h',
'voice_engine_impl.cc',
'voice_engine_impl.h',
],
},
],
}
# Local Variables:
# tab-width:2
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=2 shiftwidth=2:

View File

@ -1,598 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* This file contains common constants for VoiceEngine, as well as
* platform specific settings and include files.
*/
#ifndef WEBRTC_VOICE_ENGINE_VOICE_ENGINE_DEFINES_H
#define WEBRTC_VOICE_ENGINE_VOICE_ENGINE_DEFINES_H
#include "engine_configurations.h"
// ----------------------------------------------------------------------------
// Enumerators
// ----------------------------------------------------------------------------
namespace webrtc
{
// VolumeControl
enum { kMinVolumeLevel = 0 };
enum { kMaxVolumeLevel = 255 };
// Min scale factor for per-channel volume scaling
const float kMinOutputVolumeScaling = 0.0f;
// Max scale factor for per-channel volume scaling
const float kMaxOutputVolumeScaling = 10.0f;
// Min scale factor for output volume panning
const float kMinOutputVolumePanning = 0.0f;
// Max scale factor for output volume panning
const float kMaxOutputVolumePanning = 1.0f;
// DTMF
enum { kMinDtmfEventCode = 0 }; // DTMF digit "0"
enum { kMaxDtmfEventCode = 15 }; // DTMF digit "D"
enum { kMinTelephoneEventCode = 0 }; // RFC4733 (Section 2.3.1)
enum { kMaxTelephoneEventCode = 255 }; // RFC4733 (Section 2.3.1)
enum { kMinTelephoneEventDuration = 100 };
enum { kMaxTelephoneEventDuration = 60000 }; // Actual limit is 2^16
enum { kMinTelephoneEventAttenuation = 0 }; // 0 dBm0
enum { kMaxTelephoneEventAttenuation = 36 }; // -36 dBm0
enum { kMinTelephoneEventSeparationMs = 100 }; // Min delta time between two
// telephone events
enum { EcAec = 0 }; // AEC mode
enum { EcAecm = 1 }; // AECM mode
enum { kVoiceEngineMaxIpPacketSizeBytes = 1500 }; // assumes Ethernet
enum { kVoiceEngineMaxModuleVersionSize = 960 };
// Base
enum { kVoiceEngineVersionMaxMessageSize = 1024 };
// Encryption
// SRTP uses 30 bytes key length
enum { kVoiceEngineMaxSrtpKeyLength = 30 };
// SRTP minimum key/tag length for encryption level
enum { kVoiceEngineMinSrtpEncryptLength = 16 };
// SRTP maximum key/tag length for encryption level
enum { kVoiceEngineMaxSrtpEncryptLength = 256 };
// SRTP maximum key/tag length for authentication level,
// HMAC SHA1 authentication type
enum { kVoiceEngineMaxSrtpAuthSha1Length = 20 };
// SRTP maximum tag length for authentication level,
// null authentication type
enum { kVoiceEngineMaxSrtpTagAuthNullLength = 12 };
// SRTP maximum key length for authentication level,
// null authentication type
enum { kVoiceEngineMaxSrtpKeyAuthNullLength = 256 };
// Audio processing
enum { kVoiceEngineAudioProcessingDeviceSampleRateHz = 48000 };
// Codec
// Min init target rate for iSAC-wb
enum { kVoiceEngineMinIsacInitTargetRateBpsWb = 10000 };
// Max init target rate for iSAC-wb
enum { kVoiceEngineMaxIsacInitTargetRateBpsWb = 32000 };
// Min init target rate for iSAC-swb
enum { kVoiceEngineMinIsacInitTargetRateBpsSwb = 10000 };
// Max init target rate for iSAC-swb
enum { kVoiceEngineMaxIsacInitTargetRateBpsSwb = 56000 };
// Lowest max rate for iSAC-wb
enum { kVoiceEngineMinIsacMaxRateBpsWb = 32000 };
// Highest max rate for iSAC-wb
enum { kVoiceEngineMaxIsacMaxRateBpsWb = 53400 };
// Lowest max rate for iSAC-swb
enum { kVoiceEngineMinIsacMaxRateBpsSwb = 32000 };
// Highest max rate for iSAC-swb
enum { kVoiceEngineMaxIsacMaxRateBpsSwb = 107000 };
// Lowest max payload size for iSAC-wb
enum { kVoiceEngineMinIsacMaxPayloadSizeBytesWb = 120 };
// Highest max payload size for iSAC-wb
enum { kVoiceEngineMaxIsacMaxPayloadSizeBytesWb = 400 };
// Lowest max payload size for iSAC-swb
enum { kVoiceEngineMinIsacMaxPayloadSizeBytesSwb = 120 };
// Highest max payload size for iSAC-swb
enum { kVoiceEngineMaxIsacMaxPayloadSizeBytesSwb = 600 };
// VideoSync
// Lowest minimum playout delay
enum { kVoiceEngineMinMinPlayoutDelayMs = 0 };
// Highest minimum playout delay
enum { kVoiceEngineMaxMinPlayoutDelayMs = 1000 };
// Network
// Min packet-timeout time for received RTP packets
enum { kVoiceEngineMinPacketTimeoutSec = 1 };
// Max packet-timeout time for received RTP packets
enum { kVoiceEngineMaxPacketTimeoutSec = 150 };
// Min sample time for dead-or-alive detection
enum { kVoiceEngineMinSampleTimeSec = 1 };
// Max sample time for dead-or-alive detection
enum { kVoiceEngineMaxSampleTimeSec = 150 };
// RTP/RTCP
// Min 4-bit ID for RTP extension (see section 4.2 in RFC 5285)
enum { kVoiceEngineMinRtpExtensionId = 1 };
// Max 4-bit ID for RTP extension
enum { kVoiceEngineMaxRtpExtensionId = 14 };
} // namespace webrtc
#define WEBRTC_AUDIO_PROCESSING_OFF false
#define WEBRTC_VOICE_ENGINE_HP_DEFAULT_STATE true
// AudioProcessing HP is ON
#define WEBRTC_VOICE_ENGINE_NS_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
// AudioProcessing NS off
#define WEBRTC_VOICE_ENGINE_AGC_DEFAULT_STATE true
// AudioProcessing AGC on
#define WEBRTC_VOICE_ENGINE_EC_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
// AudioProcessing EC off
#define WEBRTC_VOICE_ENGINE_LEVEL_ESTIMATOR_DEFAULT_STATE \
WEBRTC_AUDIO_PROCESSING_OFF
// AudioProcessing Estimator off
#define WEBRTC_VOICE_ENGINE_VAD_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
// AudioProcessing off
#define WEBRTC_VOICE_ENGINE_RX_AGC_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
// AudioProcessing RX AGC off
#define WEBRTC_VOICE_ENGINE_RX_NS_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
// AudioProcessing RX NS off
#define WEBRTC_VOICE_ENGINE_RX_HP_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
// AudioProcessing RX High Pass Filter off
#define WEBRTC_VOICE_ENGINE_NS_DEFAULT_MODE NoiseSuppression::kModerate
// AudioProcessing NS moderate suppression
#define WEBRTC_VOICE_ENGINE_AGC_DEFAULT_MODE GainControl::kAdaptiveAnalog
// AudioProcessing AGC analog digital combined
#define WEBRTC_VOICE_ENGINE_EC_DEFAULT_MODE EcAec
// AudioProcessing EC AEC
#define WEBRTC_VOICE_ENGINE_RX_AGC_DEFAULT_MODE GainControl::kAdaptiveDigital
// AudioProcessing AGC mode
#define WEBRTC_VOICE_ENGINE_RX_NS_DEFAULT_MODE NoiseSuppression::kModerate
// AudioProcessing RX NS mode
// Macros
// Comparison of two strings without regard to case
#define STR_CASE_CMP(x,y) ::_stricmp(x,y)
// Compares characters of two strings without regard to case
#define STR_NCASE_CMP(x,y,n) ::_strnicmp(x,y,n)
// ----------------------------------------------------------------------------
// Build information macros
// ----------------------------------------------------------------------------
#if defined(_DEBUG)
#define BUILDMODE "d"
#elif defined(DEBUG)
#define BUILDMODE "d"
#elif defined(NDEBUG)
#define BUILDMODE "r"
#else
#define BUILDMODE "?"
#endif
#define BUILDTIME __TIME__
#define BUILDDATE __DATE__
// Example: "Oct 10 2002 12:05:30 r"
#define BUILDINFO BUILDDATE " " BUILDTIME " " BUILDMODE
// ----------------------------------------------------------------------------
// Macros
// ----------------------------------------------------------------------------
#if (defined(_DEBUG) && defined(_WIN32) && (_MSC_VER >= 1400))
#include <windows.h>
#include <stdio.h>
#define DEBUG_PRINT(...) \
{ \
char msg[256]; \
sprintf(msg, __VA_ARGS__); \
OutputDebugStringA(msg); \
}
#else
// special fix for visual 2003
#define DEBUG_PRINT(exp) ((void)0)
#endif // defined(_DEBUG) && defined(_WIN32)
#define CHECK_CHANNEL(channel) if (CheckChannel(channel) == -1) return -1;
// ----------------------------------------------------------------------------
// Default Trace filter
// ----------------------------------------------------------------------------
#define WEBRTC_VOICE_ENGINE_DEFAULT_TRACE_FILTER \
kTraceStateInfo | kTraceWarning | kTraceError | kTraceCritical | \
kTraceApiCall
// ----------------------------------------------------------------------------
// Inline functions
// ----------------------------------------------------------------------------
namespace webrtc
{
inline int VoEId(const int veId, const int chId)
{
if (chId == -1)
{
const int dummyChannel(99);
return (int) ((veId << 16) + dummyChannel);
}
return (int) ((veId << 16) + chId);
}
inline int VoEModuleId(const int veId, const int chId)
{
return (int) ((veId << 16) + chId);
}
// Convert module ID to internal VoE channel ID
inline int VoEChannelId(const int moduleId)
{
return (int) (moduleId & 0xffff);
}
} // namespace webrtc
// ----------------------------------------------------------------------------
// Platform settings
// ----------------------------------------------------------------------------
// *** WINDOWS ***
#if defined(_WIN32)
#pragma comment( lib, "winmm.lib" )
#ifndef WEBRTC_EXTERNAL_TRANSPORT
#pragma comment( lib, "ws2_32.lib" )
#endif
// ----------------------------------------------------------------------------
// Enumerators
// ----------------------------------------------------------------------------
namespace webrtc
{
// Max number of supported channels
enum { kVoiceEngineMaxNumOfChannels = 32 };
// Max number of channels which can be played out simultaneously
enum { kVoiceEngineMaxNumOfActiveChannels = 16 };
} // namespace webrtc
// ----------------------------------------------------------------------------
// Defines
// ----------------------------------------------------------------------------
#include <windows.h>
#include <mmsystem.h> // timeGetTime
#define GET_TIME_IN_MS() ::timeGetTime()
#define SLEEP(x) ::Sleep(x)
// Comparison of two strings without regard to case
#define STR_CASE_CMP(x,y) ::_stricmp(x,y)
// Compares characters of two strings without regard to case
#define STR_NCASE_CMP(x,y,n) ::_strnicmp(x,y,n)
// Default device for Windows PC
#define WEBRTC_VOICE_ENGINE_DEFAULT_DEVICE \
AudioDeviceModule::kDefaultCommunicationDevice
#endif // #if (defined(_WIN32)
// *** LINUX ***
#ifdef WEBRTC_LINUX
#include <pthread.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#ifndef QNX
#include <linux/net.h>
#ifndef ANDROID
#include <sys/soundcard.h>
#endif // ANDROID
#endif // QNX
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <errno.h>
#include <sys/stat.h>
#include <sys/ioctl.h>
#include <unistd.h>
#include <fcntl.h>
#include <sched.h>
#include <time.h>
#include <sys/time.h>
#define DWORD unsigned long int
#define WINAPI
#define LPVOID void *
#define FALSE 0
#define TRUE 1
#define UINT unsigned int
#define UCHAR unsigned char
#define TCHAR char
#ifdef QNX
#define _stricmp stricmp
#else
#define _stricmp strcasecmp
#endif
#define GetLastError() errno
#define WSAGetLastError() errno
#define LPCTSTR const char*
#define LPCSTR const char*
#define wsprintf sprintf
#define TEXT(a) a
#define _ftprintf fprintf
#define _tcslen strlen
#define FAR
#define __cdecl
#define LPSOCKADDR struct sockaddr *
namespace
{
void Sleep(unsigned long x)
{
timespec t;
t.tv_sec = x/1000;
t.tv_nsec = (x-(x/1000)*1000)*1000000;
nanosleep(&t,NULL);
}
DWORD timeGetTime()
{
struct timeval tv;
struct timezone tz;
unsigned long val;
gettimeofday(&tv, &tz);
val= tv.tv_sec*1000+ tv.tv_usec/1000;
return(val);
}
}
#define SLEEP(x) ::Sleep(x)
#define GET_TIME_IN_MS timeGetTime
// Default device for Linux and Android
#define WEBRTC_VOICE_ENGINE_DEFAULT_DEVICE 0
#ifdef ANDROID
// ----------------------------------------------------------------------------
// Enumerators
// ----------------------------------------------------------------------------
namespace webrtc
{
// Max number of supported channels
enum { kVoiceEngineMaxNumOfChannels = 2 };
// Max number of channels which can be played out simultaneously
enum { kVoiceEngineMaxNumOfActiveChannels = 2 };
} // namespace webrtc
// ----------------------------------------------------------------------------
// Defines
// ----------------------------------------------------------------------------
// Always excluded for Android builds
#undef WEBRTC_CODEC_ISAC
#undef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
#undef WEBRTC_CONFERENCING
#undef WEBRTC_TYPING_DETECTION
// Default audio processing states
#undef WEBRTC_VOICE_ENGINE_NS_DEFAULT_STATE
#undef WEBRTC_VOICE_ENGINE_AGC_DEFAULT_STATE
#undef WEBRTC_VOICE_ENGINE_EC_DEFAULT_STATE
#define WEBRTC_VOICE_ENGINE_NS_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
#define WEBRTC_VOICE_ENGINE_AGC_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
#define WEBRTC_VOICE_ENGINE_EC_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
// Default audio processing modes
#undef WEBRTC_VOICE_ENGINE_NS_DEFAULT_MODE
#undef WEBRTC_VOICE_ENGINE_AGC_DEFAULT_MODE
#undef WEBRTC_VOICE_ENGINE_EC_DEFAULT_MODE
#define WEBRTC_VOICE_ENGINE_NS_DEFAULT_MODE \
NoiseSuppression::kModerate
#define WEBRTC_VOICE_ENGINE_AGC_DEFAULT_MODE \
GainControl::kAdaptiveDigital
#define WEBRTC_VOICE_ENGINE_EC_DEFAULT_MODE EcAecm
#define ANDROID_NOT_SUPPORTED() \
_engineStatistics.SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, \
"API call not supported"); \
return -1;
#else // LINUX PC
// ----------------------------------------------------------------------------
// Enumerators
// ----------------------------------------------------------------------------
namespace webrtc
{
// Max number of supported channels
enum { kVoiceEngineMaxNumOfChannels = 32 };
// Max number of channels which can be played out simultaneously
enum { kVoiceEngineMaxNumOfActiveChannels = 16 };
} // namespace webrtc
// ----------------------------------------------------------------------------
// Defines
// ----------------------------------------------------------------------------
#define ANDROID_NOT_SUPPORTED()
#endif // ANDROID - LINUX PC
#else
#define ANDROID_NOT_SUPPORTED()
#endif // #ifdef WEBRTC_LINUX
// *** WEBRTC_MAC ***
// including iPhone
#ifdef WEBRTC_MAC
#include <pthread.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <errno.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#include <sched.h>
#include <sys/time.h>
#include <time.h>
#include <AudioUnit/AudioUnit.h>
#if !defined(MAC_IPHONE) && !defined(MAC_IPHONE_SIM)
#include <CoreServices/CoreServices.h>
#include <CoreAudio/CoreAudio.h>
#include <AudioToolbox/DefaultAudioOutput.h>
#include <AudioToolbox/AudioConverter.h>
#include <CoreAudio/HostTime.h>
#endif
#define DWORD unsigned long int
#define WINAPI
#define LPVOID void *
#define FALSE 0
#define TRUE 1
#define SOCKADDR_IN struct sockaddr_in
#define UINT unsigned int
#define UCHAR unsigned char
#define TCHAR char
#define _stricmp strcasecmp
#define GetLastError() errno
#define WSAGetLastError() errno
#define LPCTSTR const char*
#define wsprintf sprintf
#define TEXT(a) a
#define _ftprintf fprintf
#define _tcslen strlen
#define FAR
#define __cdecl
#define LPSOCKADDR struct sockaddr *
#define LPCSTR const char*
#define ULONG unsigned long
namespace
{
void Sleep(unsigned long x)
{
timespec t;
t.tv_sec = x/1000;
t.tv_nsec = (x-(x/1000)*1000)*1000000;
nanosleep(&t,NULL);
}
DWORD WebRtcTimeGetTime()
{
struct timeval tv;
struct timezone tz;
unsigned long val;
gettimeofday(&tv, &tz);
val= tv.tv_sec*1000+ tv.tv_usec/1000;
return(val);
}
}
#define SLEEP(x) ::Sleep(x)
#define GET_TIME_IN_MS WebRtcTimeGetTime
// Default device for Mac and iPhone
#define WEBRTC_VOICE_ENGINE_DEFAULT_DEVICE 0
// iPhone specific
#if defined(MAC_IPHONE) || defined(MAC_IPHONE_SIM)
// ----------------------------------------------------------------------------
// Enumerators
// ----------------------------------------------------------------------------
namespace webrtc
{
// Max number of supported channels
enum { kVoiceEngineMaxNumOfChannels = 2 };
// Max number of channels which can be played out simultaneously
enum { kVoiceEngineMaxNumOfActiveChannels = 2 };
} // namespace webrtc
// ----------------------------------------------------------------------------
// Defines
// ----------------------------------------------------------------------------
// Always excluded for iPhone builds
#undef WEBRTC_CODEC_ISAC
#undef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
#undef WEBRTC_VOICE_ENGINE_NS_DEFAULT_STATE
#undef WEBRTC_VOICE_ENGINE_AGC_DEFAULT_STATE
#undef WEBRTC_VOICE_ENGINE_EC_DEFAULT_STATE
#define WEBRTC_VOICE_ENGINE_NS_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
#define WEBRTC_VOICE_ENGINE_AGC_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
#define WEBRTC_VOICE_ENGINE_EC_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
#undef WEBRTC_VOICE_ENGINE_NS_DEFAULT_MODE
#undef WEBRTC_VOICE_ENGINE_AGC_DEFAULT_MODE
#undef WEBRTC_VOICE_ENGINE_EC_DEFAULT_MODE
#define WEBRTC_VOICE_ENGINE_NS_DEFAULT_MODE \
NoiseSuppression::kModerate
#define WEBRTC_VOICE_ENGINE_AGC_DEFAULT_MODE \
GainControl::kAdaptiveDigital
#define WEBRTC_VOICE_ENGINE_EC_DEFAULT_MODE EcAecm
#define IPHONE_NOT_SUPPORTED() \
_engineStatistics.SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, \
"API call not supported"); \
return -1;
#else // Non-iPhone
// ----------------------------------------------------------------------------
// Enumerators
// ----------------------------------------------------------------------------
namespace webrtc
{
// Max number of supported channels
enum { kVoiceEngineMaxNumOfChannels = 32 };
// Max number of channels which can be played out simultaneously
enum { kVoiceEngineMaxNumOfActiveChannels = 16 };
} // namespace webrtc
// ----------------------------------------------------------------------------
// Defines
// ----------------------------------------------------------------------------
#define IPHONE_NOT_SUPPORTED()
#endif
#else
#define IPHONE_NOT_SUPPORTED()
#endif // #ifdef WEBRTC_MAC
#endif // WEBRTC_VOICE_ENGINE_VOICE_ENGINE_DEFINES_H

View File

@ -1,310 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "voice_engine_impl.h"
#include "trace.h"
#ifdef ANDROID
#include "audio_device.h" // SetAndroidObjects
#endif
namespace webrtc
{
// Counter to be ensure that we can add a correct ID in all static trace
// methods. It is not the nicest solution, especially not since we already
// have a counter in VoEBaseImpl. In other words, there is room for
// improvement here.
static WebRtc_Word32 gVoiceEngineInstanceCounter = 0;
extern "C"
{
WEBRTC_DLLEXPORT VoiceEngine* GetVoiceEngine();
VoiceEngine* GetVoiceEngine()
{
VoiceEngineImpl* self = new VoiceEngineImpl();
VoiceEngine* ve = reinterpret_cast<VoiceEngine*> (self);
if (ve != NULL)
{
gVoiceEngineInstanceCounter++;
}
return ve;
}
} // extern "C"
VoiceEngine* VoiceEngine::Create()
{
#if (defined _WIN32)
HMODULE hmod_ = LoadLibrary(TEXT("VoiceEngineTestingDynamic.dll"));
if (hmod_)
{
typedef VoiceEngine* (*PfnGetVoiceEngine)(void);
PfnGetVoiceEngine pfn = (PfnGetVoiceEngine)GetProcAddress(
hmod_,"GetVoiceEngine");
if (pfn)
{
VoiceEngine* self = pfn();
return (self);
}
}
#endif
return GetVoiceEngine();
}
int VoiceEngine::SetTraceFilter(const unsigned int filter)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
VoEId(gVoiceEngineInstanceCounter, -1),
"SetTraceFilter(filter=0x%x)", filter);
// Remember old filter
WebRtc_UWord32 oldFilter = 0;
Trace::LevelFilter(oldFilter);
// Set new filter
WebRtc_Word32 ret = Trace::SetLevelFilter(filter);
// If previous log was ignored, log again after changing filter
if (kTraceNone == oldFilter)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, -1,
"SetTraceFilter(filter=0x%x)", filter);
}
return (ret);
}
int VoiceEngine::SetTraceFile(const char* fileNameUTF8,
const bool addFileCounter)
{
int ret = Trace::SetTraceFile(fileNameUTF8, addFileCounter);
WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
VoEId(gVoiceEngineInstanceCounter, -1),
"SetTraceFile(fileNameUTF8=%s, addFileCounter=%d)",
fileNameUTF8, addFileCounter);
return (ret);
}
int VoiceEngine::SetTraceCallback(TraceCallback* callback)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
VoEId(gVoiceEngineInstanceCounter, -1),
"SetTraceCallback(callback=0x%x)", callback);
return (Trace::SetTraceCallback(callback));
}
bool VoiceEngine::Delete(VoiceEngine*& voiceEngine, bool ignoreRefCounters)
{
if (voiceEngine == NULL)
{
return false;
}
VoiceEngineImpl* s = reinterpret_cast<VoiceEngineImpl*> (voiceEngine);
VoEBaseImpl* base = s;
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, -1,
"VoiceEngine::Delete(voiceEngine=0x%p, ignoreRefCounters=%d)",
voiceEngine, ignoreRefCounters);
if (!ignoreRefCounters)
{
if (base->GetCount() != 0)
{
WEBRTC_TRACE(kTraceCritical, kTraceVoice, -1,
"VoEBase reference counter is %d => memory will not "
"be released properly!", base->GetCount());
return false;
}
#ifdef WEBRTC_VOICE_ENGINE_CODEC_API
VoECodecImpl* codec = s;
if (codec->GetCount() != 0)
{
WEBRTC_TRACE(kTraceCritical, kTraceVoice, -1,
"VoECodec reference counter is %d => memory will not "
"be released properly!", codec->GetCount());
return false;
}
#endif
#ifdef WEBRTC_VOICE_ENGINE_DTMF_API
VoEDtmfImpl* dtmf = s;
if (dtmf->GetCount() != 0)
{
WEBRTC_TRACE(kTraceCritical, kTraceVoice, -1,
"VoEDtmf reference counter is %d =>"
"memory will not be released properly!",
dtmf->GetCount());
return false;
}
#endif
#ifdef WEBRTC_VOICE_ENGINE_ENCRYPTION_API
VoEEncryptionImpl* encrypt = s;
if (encrypt->GetCount() != 0)
{
WEBRTC_TRACE(kTraceCritical, kTraceVoice, -1,
"VoEEncryption reference counter is %d => "
"memory will not be released properly!",
encrypt->GetCount());
return false;
}
#endif
#ifdef WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API
VoEExternalMediaImpl* extmedia = s;
if (extmedia->GetCount() != 0)
{
WEBRTC_TRACE(kTraceCritical, kTraceVoice, -1,
"VoEExternalMedia reference counter is %d => "
"memory will not be released properly!",
extmedia->GetCount());
return false;
}
#endif
#ifdef WEBRTC_VOICE_ENGINE_CALL_REPORT_API
VoECallReportImpl* report = s;
if (report->GetCount() != 0)
{
WEBRTC_TRACE(kTraceCritical, kTraceVoice, -1,
"VoECallReport reference counter is %d => memory "
"will not be released properly!",
report->GetCount());
return false;
}
#endif
#ifdef WEBRTC_VOICE_ENGINE_FILE_API
VoEFileImpl* file = s;
if (file->GetCount() != 0)
{
WEBRTC_TRACE(
kTraceCritical,
kTraceVoice,
-1,
"VoEFile reference counter is %d => memory will not "
"be released properly!",
file->GetCount());
return false;
}
#endif
#ifdef WEBRTC_VOICE_ENGINE_HARDWARE_API
VoEHardwareImpl* hware = s;
if (hware->GetCount() != 0)
{
WEBRTC_TRACE(kTraceCritical, kTraceVoice, -1,
"VoEHardware reference counter is %d => memory will "
"not be released properly!", hware->GetCount());
return false;
}
#endif
#ifdef WEBRTC_VOICE_ENGINE_NETEQ_STATS_API
VoENetEqStatsImpl* neteqst = s;
if (neteqst->GetCount() != 0)
{
WEBRTC_TRACE(kTraceCritical, kTraceVoice, -1,
"VoENetEqStats reference counter is %d => "
"memory will not be released properly!",
neteqst->GetCount());
return false;
}
#endif
#ifdef WEBRTC_VOICE_ENGINE_NETWORK_API
VoENetworkImpl* netw = s;
if (netw->GetCount() != 0)
{
WEBRTC_TRACE(kTraceCritical, kTraceVoice, -1,
"VoENetworkImpl reference counter is %d => memory "
"will not be released properly!", netw->GetCount());
return false;
}
#endif
#ifdef WEBRTC_VOICE_ENGINE_RTP_RTCP_API
VoERTP_RTCPImpl* rtcp = s;
if (rtcp->GetCount() != 0)
{
WEBRTC_TRACE(kTraceCritical, kTraceVoice, -1,
"VoERTP_RTCP reference counter is %d =>"
"memory will not be released properly!",
rtcp->GetCount());
return false;
}
#endif
#ifdef WEBRTC_VOICE_ENGINE_VIDEO_SYNC_API
VoEVideoSyncImpl* vsync = s;
if (vsync->GetCount() != 0)
{
WEBRTC_TRACE(kTraceCritical, kTraceVoice, -1,
"VoEVideoSync reference counter is %d => "
"memory will not be released properly!",
vsync->GetCount());
return false;
}
#endif
#ifdef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API
VoEVolumeControlImpl* volume = s;
if (volume->GetCount() != 0)
{
WEBRTC_TRACE(kTraceCritical, kTraceVoice, -1,
"VoEVolumeControl reference counter is %d =>"
"memory will not be released properly!",
volume->GetCount());
return false;
}
#endif
#ifdef WEBRTC_VOICE_ENGINE_AUDIO_PROCESSING_API
VoEAudioProcessingImpl* apm = s;
if (apm->GetCount() != 0)
{
WEBRTC_TRACE(kTraceCritical, kTraceVoice, -1,
"VoEAudioProcessing reference counter is %d => "
"memory will not be released properly!",
apm->GetCount());
return false;
}
#endif
WEBRTC_TRACE(kTraceInfo, kTraceVoice, -1,
"all reference counters are zero => deleting the "
"VoiceEngine instance...");
} // if (!ignoreRefCounters)
else
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice, -1,
"reference counters are ignored => deleting the "
"VoiceEngine instance...");
}
delete s;
voiceEngine = NULL;
return true;
}
int VoiceEngine::SetAndroidObjects(void* javaVM, void* env, void* context)
{
#ifdef ANDROID
return AudioDeviceModule::SetAndroidObjects(javaVM, env, context);
#else
return -1;
#endif
}
} //namespace webrtc

View File

@ -1,113 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_VOICE_ENGINE_IMPL_H
#define WEBRTC_VOICE_ENGINE_VOICE_ENGINE_IMPL_H
#include "engine_configurations.h"
#include "voe_base_impl.h"
#ifdef WEBRTC_VOICE_ENGINE_AUDIO_PROCESSING_API
#include "voe_audio_processing_impl.h"
#endif
#ifdef WEBRTC_VOICE_ENGINE_CALL_REPORT_API
#include "voe_call_report_impl.h"
#endif
#ifdef WEBRTC_VOICE_ENGINE_CODEC_API
#include "voe_codec_impl.h"
#endif
#ifdef WEBRTC_VOICE_ENGINE_DTMF_API
#include "voe_dtmf_impl.h"
#endif
#ifdef WEBRTC_VOICE_ENGINE_ENCRYPTION_API
#include "voe_encryption_impl.h"
#endif
#ifdef WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API
#include "voe_external_media_impl.h"
#endif
#ifdef WEBRTC_VOICE_ENGINE_FILE_API
#include "voe_file_impl.h"
#endif
#ifdef WEBRTC_VOICE_ENGINE_HARDWARE_API
#include "voe_hardware_impl.h"
#endif
#ifdef WEBRTC_VOICE_ENGINE_NETEQ_STATS_API
#include "voe_neteq_stats_impl.h"
#endif
#ifdef WEBRTC_VOICE_ENGINE_NETWORK_API
#include "voe_network_impl.h"
#endif
#ifdef WEBRTC_VOICE_ENGINE_RTP_RTCP_API
#include "voe_rtp_rtcp_impl.h"
#endif
#ifdef WEBRTC_VOICE_ENGINE_VIDEO_SYNC_API
#include "voe_video_sync_impl.h"
#endif
#ifdef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API
#include "voe_volume_control_impl.h"
#endif
namespace webrtc
{
class VoiceEngineImpl :
#ifdef WEBRTC_VOICE_ENGINE_AUDIO_PROCESSING_API
public VoEAudioProcessingImpl,
#endif
#ifdef WEBRTC_VOICE_ENGINE_CALL_REPORT_API
public VoECallReportImpl,
#endif
#ifdef WEBRTC_VOICE_ENGINE_CODEC_API
public VoECodecImpl,
#endif
#ifdef WEBRTC_VOICE_ENGINE_DTMF_API
public VoEDtmfImpl,
#endif
#ifdef WEBRTC_VOICE_ENGINE_ENCRYPTION_API
public VoEEncryptionImpl,
#endif
#ifdef WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API
public VoEExternalMediaImpl,
#endif
#ifdef WEBRTC_VOICE_ENGINE_FILE_API
public VoEFileImpl,
#endif
#ifdef WEBRTC_VOICE_ENGINE_HARDWARE_API
public VoEHardwareImpl,
#endif
#ifdef WEBRTC_VOICE_ENGINE_NETEQ_STATS_API
public VoENetEqStatsImpl,
#endif
#ifdef WEBRTC_VOICE_ENGINE_NETWORK_API
public VoENetworkImpl,
#endif
#ifdef WEBRTC_VOICE_ENGINE_RTP_RTCP_API
public VoERTP_RTCPImpl,
#endif
#ifdef WEBRTC_VOICE_ENGINE_VIDEO_SYNC_API
public VoEVideoSyncImpl,
#endif
#ifdef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API
public VoEVolumeControlImpl,
#endif
public VoEBaseImpl
{
public:
VoiceEngineImpl()
{
};
virtual ~VoiceEngineImpl()
{
};
};
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_VOICE_ENGINE_IMPL_H

View File

@ -1,7 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<classpath>
<classpathentry kind="con" path="com.android.ide.eclipse.adt.ANDROID_FRAMEWORK"/>
<classpathentry kind="src" path="src"/>
<classpathentry kind="src" path="gen"/>
<classpathentry kind="output" path="bin"/>
</classpath>

View File

@ -1,33 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>AndroidTest</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>com.android.ide.eclipse.adt.ResourceManagerBuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>com.android.ide.eclipse.adt.PreCompilerBuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>org.eclipse.jdt.core.javabuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>com.android.ide.eclipse.adt.ApkBuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>com.android.ide.eclipse.adt.AndroidNature</nature>
<nature>org.eclipse.jdt.core.javanature</nature>
</natures>
</projectDescription>

View File

@ -1,30 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. -->
<!-- -->
<!-- Use of this source code is governed by a BSD-style license -->
<!-- that can be found in the LICENSE file in the root of the source -->
<!-- tree. An additional intellectual property rights grant can be found -->
<!-- in the file PATENTS. All contributing project authors may -->
<!-- be found in the AUTHORS file in the root of the source tree. -->
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
android:versionCode="1"
android:versionName="1.0" package="org.webrtc.voiceengine.test">
<application android:icon="@drawable/icon" android:label="@string/app_name" android:debuggable="true">
<activity android:name=".AndroidTest"
android:label="@string/app_name"
android:screenOrientation="portrait">
<intent-filter>
<action android:name="android.intent.action.MAIN" />
<category android:name="android.intent.category.LAUNCHER" />
</intent-filter>
</activity>
</application>
<uses-sdk android:minSdkVersion="3" />
<uses-permission android:name="android.permission.MODIFY_AUDIO_SETTINGS" />
<uses-permission android:name="android.permission.RECORD_AUDIO" />
<uses-permission android:name="android.permission.INTERNET" />
</manifest>

View File

@ -1,11 +0,0 @@
# This file is automatically generated by Android Tools.
# Do not modify this file -- YOUR CHANGES WILL BE ERASED!
#
# This file must be checked in Version Control Systems.
#
# To customize properties used by the Ant build system use,
# "build.properties", and override values to adapt the script to your
# project structure.
# Project target.
target=android-3

View File

@ -1,30 +0,0 @@
/* AUTO-GENERATED FILE. DO NOT MODIFY.
*
* This class was automatically generated by the
* aapt tool from the resource data it found. It
* should not be modified by hand.
*/
package org.webrtc.voiceengine.test;
public final class R {
public static final class attr {
}
public static final class drawable {
public static final int icon=0x7f020000;
}
public static final class id {
public static final int Button01=0x7f050002;
public static final int Button02=0x7f050005;
public static final int EditText01=0x7f050001;
public static final int Spinner01=0x7f050003;
public static final int Spinner02=0x7f050004;
public static final int TextView01=0x7f050000;
}
public static final class layout {
public static final int main=0x7f030000;
}
public static final class string {
public static final int app_name=0x7f040000;
}
}

View File

@ -1,29 +0,0 @@
# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
# Android makefile for webrtc VoiceEngine Java API wrapper
# This setup assumes two libs built outside Android makefile structure.
LOCAL_PATH := $(call my-dir)
WEBRTC_INTERFACES_PATH := $(LOCAL_PATH)/../../../../../../../../build/interface
WEBRTC_LIBS_PATH := $(LOCAL_PATH)/../../../../../../../../build/libraries
WEBRTC_AUTO_TEST_PATH := $(LOCAL_PATH)/../../../auto_test
include $(CLEAR_VARS)
LOCAL_MODULE := android_test
LOCAL_SRC_FILES := android_test.cc
LOCAL_CFLAGS := -DWEBRTC_TARGET_PC # For typedefs.h
LOCAL_C_INCLUDES := $(WEBRTC_INTERFACES_PATH) $(WEBRTC_AUTO_TEST_PATH)
LOCAL_LDLIBS := \
$(WEBRTC_LIBS_PATH)/VoiceEngine_android_gcc.a \
$(WEBRTC_AUTO_TEST_PATH)/auto_test_android_gcc.a \
-llog -lgcc
include $(BUILD_SHARED_LIBRARY)

File diff suppressed because it is too large Load Diff

View File

@ -1,261 +0,0 @@
/* DO NOT EDIT THIS FILE - it is machine generated */
#include <jni.h>
/* Header for class org_webrtc_voiceengine_test_AndroidTest */
#ifndef _Included_org_webrtc_voiceengine_test_AndroidTest
#define _Included_org_webrtc_voiceengine_test_AndroidTest
#ifdef __cplusplus
extern "C" {
#endif
/*
* Class: org_webrtc_voiceengine_test_AndroidTest
* Method: NativeInit
* Signature: ()Z
*/
JNIEXPORT jboolean JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_NativeInit
(JNIEnv *, jclass);
/*
* Class: org_webrtc_voiceengine_test_AndroidTest
* Method: RunAutoTest
* Signature: (II)I
*/
JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_RunAutoTest
(JNIEnv *, jobject, jint, jint);
/*
* Class: org_webrtc_voiceengine_test_AndroidTest
* Method: Create
* Signature: ()Z
*/
JNIEXPORT jboolean JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_Create
(JNIEnv *, jobject);
/*
* Class: org_webrtc_voiceengine_test_AndroidTest
* Method: Delete
* Signature: ()Z
*/
JNIEXPORT jboolean JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_Delete
(JNIEnv *, jobject);
/*
* Class: org_webrtc_voiceengine_test_AndroidTest
* Method: Authenticate
* Signature: (Ljava/lang/String;)I
*/
JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_Authenticate
(JNIEnv *, jobject, jstring);
/*
* Class: org_webrtc_voiceengine_test_AndroidTest
* Method: Init
* Signature: (IIIZZ)I
*/
JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_Init
(JNIEnv *, jobject, jint, jint, jint, jboolean, jboolean);
/*
* Class: org_webrtc_voiceengine_test_AndroidTest
* Method: Terminate
* Signature: ()I
*/
JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_Terminate
(JNIEnv *, jobject);
/*
* Class: org_webrtc_voiceengine_test_AndroidTest
* Method: CreateChannel
* Signature: ()I
*/
JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_CreateChannel
(JNIEnv *, jobject);
/*
* Class: org_webrtc_voiceengine_test_AndroidTest
* Method: DeleteChannel
* Signature: (I)I
*/
JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_DeleteChannel
(JNIEnv *, jobject, jint);
/*
* Class: org_webrtc_voiceengine_test_AndroidTest
* Method: SetLocalReceiver
* Signature: (II)I
*/
JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_SetLocalReceiver
(JNIEnv *, jobject, jint, jint);
/*
* Class: org_webrtc_voiceengine_test_AndroidTest
* Method: SetSendDestination
* Signature: (IILjava/lang/String;)I
*/
JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_SetSendDestination
(JNIEnv *, jobject, jint, jint, jstring);
/*
* Class: org_webrtc_voiceengine_test_AndroidTest
* Method: StartListen
* Signature: (I)I
*/
JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StartListen
(JNIEnv *, jobject, jint);
/*
* Class: org_webrtc_voiceengine_test_AndroidTest
* Method: StartPlayout
* Signature: (I)I
*/
JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StartPlayout
(JNIEnv *, jobject, jint);
/*
* Class: org_webrtc_voiceengine_test_AndroidTest
* Method: StartSend
* Signature: (I)I
*/
JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StartSend
(JNIEnv *, jobject, jint);
/*
* Class: org_webrtc_voiceengine_test_AndroidTest
* Method: StopListen
* Signature: (I)I
*/
JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StopListen
(JNIEnv *, jobject, jint);
/*
* Class: org_webrtc_voiceengine_test_AndroidTest
* Method: StopPlayout
* Signature: (I)I
*/
JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StopPlayout
(JNIEnv *, jobject, jint);
/*
* Class: org_webrtc_voiceengine_test_AndroidTest
* Method: StopSend
* Signature: (I)I
*/
JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StopSend
(JNIEnv *, jobject, jint);
/*
* Class: org_webrtc_voiceengine_test_AndroidTest
* Method: StartPlayingFileLocally
* Signature: (ILjava/lang/String;Z)I
*/
JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StartPlayingFileLocally
(JNIEnv *, jobject, jint, jstring, jboolean);
/*
* Class: org_webrtc_voiceengine_test_AndroidTest
* Method: StopPlayingFileLocally
* Signature: (I)I
*/
JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StopPlayingFileLocally
(JNIEnv *, jobject, jint);
/*
* Class: org_webrtc_voiceengine_test_AndroidTest
* Method: StartRecordingPlayout
* Signature: (ILjava/lang/String;Z)I
*/
JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StartRecordingPlayout
(JNIEnv *, jobject, jint, jstring, jboolean);
/*
* Class: org_webrtc_voiceengine_test_AndroidTest
* Method: StopRecordingPlayout
* Signature: (I)I
*/
JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StopRecordingPlayout
(JNIEnv *, jobject, jint);
/*
* Class: org_webrtc_voiceengine_test_AndroidTest
* Method: StartPlayingFileAsMicrophone
* Signature: (ILjava/lang/String;Z)I
*/
JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StartPlayingFileAsMicrophone
(JNIEnv *, jobject, jint, jstring, jboolean);
/*
* Class: org_webrtc_voiceengine_test_AndroidTest
* Method: StopPlayingFileAsMicrophone
* Signature: (I)I
*/
JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StopPlayingFileAsMicrophone
(JNIEnv *, jobject, jint);
/*
* Class: org_webrtc_voiceengine_test_AndroidTest
* Method: NumOfCodecs
* Signature: ()I
*/
JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_NumOfCodecs
(JNIEnv *, jobject);
/*
* Class: org_webrtc_voiceengine_test_AndroidTest
* Method: SetSendCodec
* Signature: (II)I
*/
JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_SetSendCodec
(JNIEnv *, jobject, jint, jint);
/*
* Class: org_webrtc_voiceengine_test_AndroidTest
* Method: SetVADStatus
* Signature: (IZI)I
*/
JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_SetVADStatus
(JNIEnv *, jobject, jint, jboolean, jint);
/*
* Class: org_webrtc_voiceengine_test_AndroidTest
* Method: SetNSStatus
* Signature: (ZI)I
*/
JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_SetNSStatus
(JNIEnv *, jobject, jboolean, jint);
/*
* Class: org_webrtc_voiceengine_test_AndroidTest
* Method: SetAGCStatus
* Signature: (ZI)I
*/
JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_SetAGCStatus
(JNIEnv *, jobject, jboolean, jint);
/*
* Class: org_webrtc_voiceengine_test_AndroidTest
* Method: SetECStatus
* Signature: (ZI)I
*/
JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_SetECStatus
(JNIEnv *, jobject, jboolean, jint);
/*
* Class: org_webrtc_voiceengine_test_AndroidTest
* Method: SetSpeakerVolume
* Signature: (I)I
*/
JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_SetSpeakerVolume
(JNIEnv *, jobject, jint);
/*
* Class: org_webrtc_voiceengine_test_AndroidTest
* Method: SetLoudspeakerStatus
* Signature: (Z)I
*/
JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_SetLoudspeakerStatus
(JNIEnv *, jobject, jboolean);
#ifdef __cplusplus
}
#endif
#endif

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.1 KiB

View File

@ -1,22 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. -->
<!-- -->
<!-- Use of this source code is governed by a BSD-style license -->
<!-- that can be found in the LICENSE file in the root of the source -->
<!-- tree. An additional intellectual property rights grant can be found -->
<!-- in the file PATENTS. All contributing project authors may -->
<!-- be found in the AUTHORS file in the root of the source tree. -->
<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
android:orientation="vertical"
android:layout_width="fill_parent"
android:layout_height="fill_parent"
>
<TextView android:text="@+id/TextView01" android:id="@+id/TextView01" android:layout_width="wrap_content" android:layout_height="wrap_content"></TextView>
<EditText android:text="@+id/EditText01" android:id="@+id/EditText01" android:layout_width="wrap_content" android:layout_height="wrap_content"></EditText><Button android:text="@+id/Button01" android:id="@+id/Button01" android:layout_width="wrap_content" android:layout_height="wrap_content"></Button>
<Spinner android:id="@+id/Spinner01" android:layout_width="wrap_content" android:layout_height="wrap_content"></Spinner>
<Spinner android:id="@+id/Spinner02" android:layout_width="wrap_content" android:layout_height="wrap_content"></Spinner><Button android:text="@+id/Button02" android:id="@+id/Button02" android:layout_width="wrap_content" android:layout_height="wrap_content"></Button>
</LinearLayout>

View File

@ -1,13 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. -->
<!-- -->
<!-- Use of this source code is governed by a BSD-style license -->
<!-- that can be found in the LICENSE file in the root of the source -->
<!-- tree. An additional intellectual property rights grant can be found -->
<!-- in the file PATENTS. All contributing project authors may -->
<!-- be found in the AUTHORS file in the root of the source tree. -->
<resources>
<string name="app_name">WebRtc VoiceEngine Android Test</string>
</resources>

View File

@ -1,47 +0,0 @@
# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
LOCAL_PATH:= $(call my-dir)
# voice engine test app
include $(CLEAR_VARS)
LOCAL_MODULE_TAGS := tests
LOCAL_CPP_EXTENSION := .cc
LOCAL_SRC_FILES:= \
voe_cpu_test.cc \
voe_standard_test.cc \
voe_stress_test.cc \
voe_unit_test.cc \
voe_extended_test.cc
# Flags passed to both C and C++ files.
LOCAL_CFLAGS := \
'-DWEBRTC_TARGET_PC' \
'-DWEBRTC_LINUX' \
'-DWEBRTC_ANDROID' \
'-DANDROID'
LOCAL_CPPFLAGS :=
LOCAL_LDFLAGS :=
LOCAL_C_INCLUDES := \
external/gtest/include \
$(LOCAL_PATH)/../../interface \
$(LOCAL_PATH)/../../../.. \
$(LOCAL_PATH)/../../../../system_wrappers/interface
LOCAL_STATIC_LIBRARIES :=
LOCAL_SHARED_LIBRARIES := \
libutils \
libstlport \
libwebrtc
LOCAL_MODULE:= webrtc_voe_autotest
include external/stlport/libstlport.mk
include $(BUILD_EXECUTABLE)

View File

@ -1,106 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <cassert>
#if defined(_WIN32)
#include <conio.h>
#endif
#include "voe_cpu_test.h"
using namespace webrtc;
namespace voetest {
#ifdef MAC_IPHONE
extern char micFile[256];
#else
extern const char* micFile;
#endif
#define CHECK(expr) \
if (expr) \
{ \
printf("Error at line: %i, %s \n", __LINE__, #expr); \
printf("Error code: %i \n", base->LastError()); \
PAUSE \
return -1; \
}
extern char* GetFilename(char* filename);
extern const char* GetFilename(const char* filename);
extern int GetResource(char* resource, char* dest, int destLen);
extern char* GetResource(char* resource);
extern const char* GetResource(const char* resource);
VoECpuTest::VoECpuTest(VoETestManager& mgr) :
_mgr(mgr)
{
}
int VoECpuTest::DoTest()
{
printf("------------------------------------------------\n");
printf(" CPU Reference Test\n");
printf("------------------------------------------------\n");
VoEBase* base = _mgr.BasePtr();
VoEFile* file = _mgr.FilePtr();
VoECodec* codec = _mgr.CodecPtr();
VoEAudioProcessing* apm = _mgr.APMPtr();
int channel(-1);
CodecInst isac;
isac.pltype = 104;
strcpy(isac.plname, "ISAC");
isac.pacsize = 960;
isac.plfreq = 32000;
isac.channels = 1;
isac.rate = -1;
CHECK(base->Init());
channel = base->CreateChannel();
CHECK(base->SetLocalReceiver(channel, 5566));
CHECK(base->SetSendDestination(channel, 5566, "127.0.0.1"));
CHECK(codec->SetRecPayloadType(channel, isac));
CHECK(codec->SetSendCodec(channel, isac));
CHECK(base->StartReceive(channel));
CHECK(base->StartPlayout(channel));
CHECK(base->StartSend(channel));
CHECK(file->StartPlayingFileAsMicrophone(channel, micFile, true, true));
CHECK(codec->SetVADStatus(channel, true));
CHECK(apm->SetAgcStatus(true, kAgcAdaptiveAnalog));
CHECK(apm->SetNsStatus(true, kNsModerateSuppression));
CHECK(apm->SetEcStatus(true, kEcAec));
TEST_LOG("\nMeasure CPU and memory while running a full-duplex"
" iSAC-swb call.\n\n");
PAUSE
CHECK(base->StopSend(channel));
CHECK(base->StopPlayout(channel));
CHECK(base->StopReceive(channel));
base->DeleteChannel(channel);
CHECK(base->Terminate());
return 0;
}
} // namespace voetest

View File

@ -1,32 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_VOE_CPU_TEST_H
#define WEBRTC_VOICE_ENGINE_VOE_CPU_TEST_H
#include "voe_standard_test.h"
namespace voetest {
class VoETestManager;
class VoECpuTest
{
public:
VoECpuTest(VoETestManager& mgr);
~VoECpuTest() {};
int DoTest();
private:
VoETestManager& _mgr;
};
} // namespace voetest
#endif // WEBRTC_VOICE_ENGINE_VOE_CPU_TEST_H

File diff suppressed because it is too large Load Diff

View File

@ -1,140 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_VOE_EXTENDED_TEST_H
#define WEBRTC_VOICE_ENGINE_VOE_EXTENDED_TEST_H
#include "voe_standard_test.h"
namespace voetest {
class VoETestManager;
// ----------------------------------------------------------------------------
// Transport
// ----------------------------------------------------------------------------
class ExtendedTestTransport : public Transport
{
public:
ExtendedTestTransport(VoENetwork* ptr);
~ExtendedTestTransport();
VoENetwork* myNetw;
protected:
virtual int SendPacket(int channel,const void *data,int len);
virtual int SendRTCPPacket(int channel, const void *data, int len);
private:
static bool Run(void* ptr);
bool Process();
private:
ThreadWrapper* _thread;
CriticalSectionWrapper* _lock;
EventWrapper* _event;
private:
unsigned char _packetBuffer[1612];
int _length;
int _channel;
};
class XTransport : public Transport
{
public:
XTransport(VoENetwork* netw, VoEFile* file);
VoENetwork* _netw;
VoEFile* _file;
public:
virtual int SendPacket(int channel, const void *data, int len);
virtual int SendRTCPPacket(int channel, const void *data, int len);
};
class XRTPObserver : public VoERTPObserver
{
public:
XRTPObserver();
~XRTPObserver();
virtual void OnIncomingCSRCChanged(const int channel,
const unsigned int CSRC,
const bool added);
virtual void OnIncomingSSRCChanged(const int channel,
const unsigned int SSRC);
public:
unsigned int _SSRC;
};
// ----------------------------------------------------------------------------
// VoEExtendedTest
// ----------------------------------------------------------------------------
class VoEExtendedTest : public VoiceEngineObserver,
public VoEConnectionObserver
{
public:
VoEExtendedTest(VoETestManager& mgr);
~VoEExtendedTest();
int PrepareTest(const char* str) const;
int TestPassed(const char* str) const;
int TestBase();
int TestCallReport();
int TestCodec();
int TestDtmf();
int TestEncryption();
int TestExternalMedia();
int TestFile();
int TestHardware();
int TestNetEqStats();
int TestNetwork();
int TestRTP_RTCP();
int TestVideoSync();
int TestVolumeControl();
int TestAPM();
public:
int ErrorCode() const
{
return _errCode;
}
;
void ClearErrorCode()
{
_errCode = 0;
}
;
protected:
// from VoiceEngineObserver
void CallbackOnError(const int errCode, const int channel);
void CallbackOnTrace(const TraceLevel level,
const char* message,
const int length);
protected:
// from VoEConnectionObserver
void OnPeriodicDeadOrAlive(const int channel, const bool alive);
private:
void Play(int channel,
unsigned int timeMillisec,
bool addFileAsMicrophone = false,
bool addTimeMarker = false);
void Sleep(unsigned int timeMillisec, bool addMarker = false);
void StartMedia(int channel,
int rtpPort,
bool listen,
bool playout,
bool send);
void StopMedia(int channel);
private:
VoETestManager& _mgr;
private:
int _errCode;
bool _alive;
bool _listening[32];
bool _playing[32];
bool _sending[32];
};
} // namespace voetest
#endif // WEBRTC_VOICE_ENGINE_VOE_EXTENDED_TEST_H

File diff suppressed because it is too large Load Diff

View File

@ -1,375 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_VOE_STANDARD_TEST_H
#define WEBRTC_VOICE_ENGINE_VOE_STANDARD_TEST_H
#include "voe_test_defines.h"
#include "voe_test_interface.h"
#include "voe_errors.h"
#include "voe_base.h"
#include "voe_file.h"
#include "voe_dtmf.h"
#include "voe_rtp_rtcp.h"
#include "voe_audio_processing.h"
#ifdef WEBRTC_VOICE_ENGINE_CALL_REPORT_API
#include "voe_call_report.h"
#endif
#ifdef WEBRTC_VOICE_ENGINE_CODEC_API
#include "voe_codec.h"
#endif
#ifdef WEBRTC_VOICE_ENGINE_ENCRYPTION_API
#include "voe_encryption.h"
#endif
#ifdef WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API
#include "voe_external_media.h"
#endif
#ifdef WEBRTC_VOICE_ENGINE_HARDWARE_API
#include "voe_hardware.h"
#endif
#ifdef WEBRTC_VOICE_ENGINE_NETWORK_API
#include "voe_network.h"
#endif
#ifdef WEBRTC_VOICE_ENGINE_VIDEO_SYNC_API
#include "voe_video_sync.h"
#endif
#ifdef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API
#include "voe_volume_control.h"
#endif
#ifdef _TEST_NETEQ_STATS_
namespace webrtc
{
class CriticalSectionWrapper;
class ThreadWrapper;
class VoENetEqStats;
}
#endif
#if defined(ANDROID)
extern char mobileLogMsg[640];
#endif
namespace voetest
{
void createSummary(VoiceEngine* ve);
void prepareDelivery();
class MyRTPObserver: public VoERTPObserver
{
public:
MyRTPObserver();
~MyRTPObserver();
virtual void OnIncomingCSRCChanged(const int channel,
const unsigned int CSRC,
const bool added);
virtual void OnIncomingSSRCChanged(const int channel,
const unsigned int SSRC);
void Reset();
public:
unsigned int _SSRC[2];
unsigned int _CSRC[2][2]; // stores 2 SSRCs for each channel
bool _added[2][2];
int _size[2];
};
class MyTraceCallback: public TraceCallback
{
public:
void Print(const TraceLevel level, const char *traceString,
const int length);
};
class MyDeadOrAlive: public VoEConnectionObserver
{
public:
void OnPeriodicDeadOrAlive(const int channel, const bool alive);
};
class ErrorObserver: public VoiceEngineObserver
{
public:
ErrorObserver();
void CallbackOnError(const int channel, const int errCode);
public:
int code;
};
class RtcpAppHandler: public VoERTCPObserver
{
public:
void OnApplicationDataReceived(const int channel,
const unsigned char subType,
const unsigned int name,
const unsigned char* data,
const unsigned short dataLengthInBytes);
void Reset();
~RtcpAppHandler()
{
};
unsigned short _lengthBytes;
unsigned char _data[256];
unsigned char _subType;
unsigned int _name;
};
class DtmfCallback: public VoETelephoneEventObserver
{
public:
int counter;
DtmfCallback()
{
counter = 0;
}
virtual void OnReceivedTelephoneEventInband(const int channel,
const unsigned char eventCode,
const bool endOfEvent)
{
char msg[128];
if (endOfEvent)
sprintf(msg, "(event=%d, [END])", eventCode);
else
sprintf(msg, "(event=%d, [START])", eventCode);
TEST_LOG("%s", msg);
if (!endOfEvent)
counter++; // cound start of event only
fflush(NULL);
}
virtual void OnReceivedTelephoneEventOutOfBand(
const int channel,
const unsigned char eventCode,
const bool endOfEvent)
{
char msg[128];
if (endOfEvent)
sprintf(msg, "(event=%d, [END])", eventCode);
else
sprintf(msg, "(event=%d, [START])", eventCode);
TEST_LOG("%s", msg);
if (!endOfEvent)
counter++; // cound start of event only
fflush(NULL);
}
};
class my_encryption: public Encryption
{
void encrypt(int channel_no, unsigned char * in_data,
unsigned char * out_data, int bytes_in, int * bytes_out);
void decrypt(int channel_no, unsigned char * in_data,
unsigned char * out_data, int bytes_in, int * bytes_out);
void encrypt_rtcp(int channel_no, unsigned char * in_data,
unsigned char * out_data, int bytes_in, int * bytes_out);
void decrypt_rtcp(int channel_no, unsigned char * in_data,
unsigned char * out_data, int bytes_in, int * bytes_out);
};
class RxCallback: public VoERxVadCallback
{
public:
RxCallback() :
_vadDecision(-1)
{
};
virtual void OnRxVad(int, int vadDecision)
{
char msg[128];
sprintf(msg, "RX VAD detected decision %d \n", vadDecision);
TEST_LOG("%s", msg);
_vadDecision = vadDecision;
}
int _vadDecision;
};
#ifdef WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API
class MyMedia: public VoEMediaProcess
{
public:
virtual void Process(const int channel, const ProcessingTypes type,
WebRtc_Word16 audio_10ms[], const int length,
const int samplingFreqHz, const bool stereo);
private:
int f;
};
#endif
class SubAPIManager
{
public:
SubAPIManager() :
_base(true),
_callReport(false),
_codec(false),
_dtmf(false),
_encryption(false),
_externalMedia(false),
_file(false),
_hardware(false),
_netEqStats(false),
_network(false),
_rtp_rtcp(false),
_videoSync(false),
_volumeControl(false),
_apm(false),
_xsel(XSEL_Invalid)
{
#ifdef WEBRTC_VOICE_ENGINE_CALL_REPORT_API
_callReport = true;
#endif
#ifdef WEBRTC_VOICE_ENGINE_CODEC_API
_codec = true;
#endif
#ifdef WEBRTC_VOICE_ENGINE_DTMF_API
_dtmf = true;
#endif
#ifdef WEBRTC_VOICE_ENGINE_ENCRYPTION_API
_encryption = true;
#endif
#ifdef WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API
_externalMedia = true;
#endif
#ifdef WEBRTC_VOICE_ENGINE_FILE_API
_file = true;
#endif
#ifdef WEBRTC_VOICE_ENGINE_HARDWARE_API
_hardware = true;
#endif
#ifdef WEBRTC_VOICE_ENGINE_NETEQ_STATS_API
_netEqStats = true;
#endif
#ifdef WEBRTC_VOICE_ENGINE_NETWORK_API
_network = true;
#endif
#ifdef WEBRTC_VOICE_ENGINE_RTP_RTCP_API
_rtp_rtcp = true;
#endif
#ifdef WEBRTC_VOICE_ENGINE_VIDEO_SYNC_API
_videoSync = true;
#endif
#ifdef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API
_volumeControl = true;
#endif
#ifdef WEBRTC_VOICE_ENGINE_AUDIO_PROCESSING_API
_apm = true;
#endif
};
void DisplayStatus() const;
bool GetExtendedMenuSelection(ExtendedSelection& sel);
private:
bool _base, _callReport, _codec, _dtmf, _encryption;
bool _externalMedia, _file, _hardware;
bool _netEqStats, _network, _rtp_rtcp, _videoSync, _volumeControl, _apm;
ExtendedSelection _xsel;
};
class VoETestManager
{
public:
VoETestManager();
~VoETestManager();
void GetInterfaces();
int ReleaseInterfaces();
int DoStandardTest();
VoiceEngine* VoiceEnginePtr() const
{
return ve;
};
VoEBase* BasePtr() const
{
return base;
};
VoECodec* CodecPtr() const
{
return codec;
};
VoEVolumeControl* VolumeControlPtr() const
{
return volume;
};
VoEDtmf* DtmfPtr() const
{
return dtmf;
};
VoERTP_RTCP* RTP_RTCPPtr() const
{
return rtp_rtcp;
};
VoEAudioProcessing* APMPtr() const
{
return apm;
};
VoENetwork* NetworkPtr() const
{
return netw;
};
VoEFile* FilePtr() const
{
return file;
};
VoEHardware* HardwarePtr() const
{
return hardware;
};
VoEVideoSync* VideoSyncPtr() const
{
return vsync;
};
VoEEncryption* EncryptionPtr() const
{
return encrypt;
};
VoEExternalMedia* ExternalMediaPtr() const
{
return xmedia;
};
VoECallReport* CallReportPtr() const
{
return report;
};
#ifdef _TEST_NETEQ_STATS_
VoENetEqStats* NetEqStatsPtr() const
{
return neteqst;
};
#endif
private:
VoiceEngine* ve;
VoEBase* base;
VoECodec* codec;
VoEVolumeControl* volume;
VoEDtmf* dtmf;
VoERTP_RTCP* rtp_rtcp;
VoEAudioProcessing* apm;
VoENetwork* netw;
VoEFile* file;
VoEHardware* hardware;
VoEVideoSync* vsync;
VoEEncryption* encrypt;
VoEExternalMedia* xmedia;
VoECallReport* report;
#ifdef _TEST_NETEQ_STATS_
VoENetEqStats* neteqst;
#endif
int instanceCount;
};
} // namespace voetest
#endif // WEBRTC_VOICE_ENGINE_VOE_STANDARD_TEST_H

View File

@ -1,454 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// Some ideas of improvements:
// Break out common init and maybe terminate to separate function(s).
// How much trace should we have enabled?
// API error counter, to print info and return -1 if any error.
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <cassert>
#if defined(_WIN32)
#include <conio.h>
#endif
#include "voe_stress_test.h"
#include "voe_standard_test.h"
#include "../../source/voice_engine_defines.h" // defines build macros
#include "thread_wrapper.h"
using namespace webrtc;
namespace voetest {
#define VALIDATE_STRESS(expr) \
if (expr) \
{ \
printf("Error at line: %i, %s \n", __LINE__, #expr); \
printf("Error code: %i \n", base->LastError()); \
}
#ifdef _WIN32
// Pause if supported
#define PAUSE_OR_SLEEP(x) PAUSE;
#else
// Sleep a bit instead if pause not supported
#define PAUSE_OR_SLEEP(x) SLEEP(x);
#endif
extern char* GetFilename(char* filename);
extern const char* GetFilename(const char* filename);
extern int GetResource(char* resource, char* dest, int destLen);
extern char* GetResource(char* resource);
extern const char* GetResource(const char* resource);
const char* VoEStressTest::_key = "====YUtFWRAAAAADBtIHgAAAAAEAAAAcAAAAAQBHU0ds"
"b2JhbCBJUCBTb3VuZAAC\nAAAAIwAAAExpY2Vuc2VkIHRvIE5vcnRlbCBOZXR3cm9rcwAAAAA"
"xAAAAZxZ7/u0M\niFYyTwSwko5Uutf7mh8S0O4rYZYTFidbzQeuGonuL17F/2oD/2pfDp3jL4"
"Rf3z/A\nnlJsEJgEtASkDNFuwLILjGY0pzjjAYQp3pCl6z6k2MtE06AirdjGLYCjENpq/opX"
"\nOrs3sIuwdYK5va/aFcsjBDmlsGCUM48RDYG9s23bIHYafXUC4ofOaubbZPWiPTmL\nEVJ8WH"
"4F9pgNjALc14oJXfON7r/3\n=EsLx";
int VoEStressTest::DoTest()
{
int test(-1);
while (test != 0)
{
test = MenuSelection();
switch (test)
{
case 0:
// Quit stress test
break;
case 1:
// All tests
StartStopTest();
CreateDeleteChannelsTest();
MultipleThreadsTest();
break;
case 2:
StartStopTest();
break;
case 3:
CreateDeleteChannelsTest();
break;
case 4:
MultipleThreadsTest();
break;
default:
// Should not be possible
printf("Invalid selection! (Test code error)\n");
assert(false);
} // switch
} // while
return 0;
}
int VoEStressTest::MenuSelection()
{
printf("------------------------------------------------\n");
printf("Select stress test\n\n");
printf(" (0) Quit\n");
printf(" (1) All\n");
printf("- - - - - - - - - - - - - - - - - - - - - - - - \n");
printf(" (2) Start/stop\n");
printf(" (3) Create/delete channels\n");
printf(" (4) Multiple threads\n");
const int maxMenuSelection = 4;
int selection(-1);
int dummy(0);
while ((selection < 0) || (selection > maxMenuSelection))
{
printf("\n: ");
dummy = scanf("%d", &selection);
if ((selection < 0) || (selection > maxMenuSelection))
{
printf("Invalid selection!\n");
}
}
return selection;
}
int VoEStressTest::StartStopTest()
{
printf("------------------------------------------------\n");
printf("Running start/stop test\n");
printf("------------------------------------------------\n");
printf("\nNOTE: this thest will fail after a while if Core audio is used\n");
printf("because MS returns AUDCLNT_E_CPUUSAGE_EXCEEDED (VoE Error 10013).\n");
// Get sub-API pointers
VoEBase* base = _mgr.BasePtr();
// Set trace
// VALIDATE_STRESS(base->SetTraceFileName(
// GetFilename("VoEStressTest_StartStop_trace.txt")));
// VALIDATE_STRESS(base->SetDebugTraceFileName(
// GetFilename("VoEStressTest_StartStop_trace_debug.txt")));
// VALIDATE_STRESS(base->SetTraceFilter(kTraceStateInfo |
// kTraceWarning | kTraceError |
// kTraceCritical | kTraceApiCall |
// kTraceMemory | kTraceInfo));
VALIDATE_STRESS(base->Init());
VALIDATE_STRESS(base->CreateChannel());
///////////// Start test /////////////
int numberOfLoops(2000);
int loopSleep(200);
int i(0);
int markInterval(20);
printf("Running %d loops with %d ms sleep. Mark every %d loop. \n",
numberOfLoops, loopSleep, markInterval);
printf("Test will take approximately %d minutes. \n",
numberOfLoops*loopSleep/1000/60+1);
for (i=0; i<numberOfLoops; ++i)
{
VALIDATE_STRESS(base->SetLocalReceiver(0, 4800));
VALIDATE_STRESS(base->SetSendDestination(0, 4800, "127.0.0.1"));
VALIDATE_STRESS(base->StartReceive(0));
VALIDATE_STRESS(base->StartPlayout(0));
VALIDATE_STRESS(base->StartSend(0));
if (!(i % markInterval)) MARK();
SLEEP(loopSleep);
VALIDATE_STRESS(base->StopSend(0));
VALIDATE_STRESS(base->StopPlayout(0));
VALIDATE_STRESS(base->StopReceive(0));
}
ANL();
VALIDATE_STRESS(base->SetLocalReceiver(0, 4800));
VALIDATE_STRESS(base->SetSendDestination(0, 4800, "127.0.0.1"));
VALIDATE_STRESS(base->StartReceive(0));
VALIDATE_STRESS(base->StartPlayout(0));
VALIDATE_STRESS(base->StartSend(0));
printf("Verify that audio is good. \n");
PAUSE_OR_SLEEP(20000);
VALIDATE_STRESS(base->StopSend(0));
VALIDATE_STRESS(base->StopPlayout(0));
VALIDATE_STRESS(base->StopReceive(0));
///////////// End test /////////////
// Terminate
VALIDATE_STRESS(base->DeleteChannel(0));
VALIDATE_STRESS(base->Terminate());
printf("Test finished \n");
return 0;
}
int VoEStressTest::CreateDeleteChannelsTest()
{
printf("------------------------------------------------\n");
printf("Running create/delete channels test\n");
printf("------------------------------------------------\n");
// Get sub-API pointers
VoEBase* base = _mgr.BasePtr();
// Set trace
// VALIDATE_STRESS(base->SetTraceFileName(
// GetFilename("VoEStressTest_CreateChannels_trace.txt")));
// VALIDATE_STRESS(base->SetDebugTraceFileName(
// GetFilename("VoEStressTest_CreateChannels_trace_debug.txt")));
// VALIDATE_STRESS(base->SetTraceFilter(kTraceStateInfo |
// kTraceWarning | kTraceError |
// kTraceCritical | kTraceApiCall |
// kTraceMemory | kTraceInfo));
VALIDATE_STRESS(base->Init());
///////////// Start test /////////////
int numberOfLoops(10000);
int loopSleep(10);
int i(0);
int markInterval(200);
printf("Running %d loops with %d ms sleep. Mark every %d loop. \n",
numberOfLoops, loopSleep, markInterval);
printf("Test will take approximately %d minutes. \n",
numberOfLoops * loopSleep / 1000 / 60 + 1);
// Some possible extensions include:
// Different sleep times (fixed or random) or zero.
// Start call on all or some channels.
// Two parts: first have a slight overweight to creating channels,
// then to deleting. (To ensure we hit max channels and go to zero.)
// Make sure audio is OK after test has finished.
// Set up, start with maxChannels/2 channels
const int maxChannels = base->MaxNumOfChannels();
VALIDATE_STRESS(maxChannels < 1); // Should always have at least one channel
bool* channelState = new bool[maxChannels];
memset(channelState, 0, maxChannels*sizeof(bool));
int channel(0);
int noOfActiveChannels(0);
for (i=0; i<(maxChannels/2); ++i)
{
channel = base->CreateChannel();
VALIDATE_STRESS(channel < 0);
if (channel >= 0)
{
channelState[channel] = true;
++noOfActiveChannels;
}
}
srand((unsigned int)time(NULL));
bool action(false);
double rnd(0.0);
int res(0);
// Create/delete channels with slight
for (i=0; i<numberOfLoops; ++i)
{
// Randomize action (create or delete channel)
action = rand() <= (RAND_MAX / 2);
if (action)
{
if (noOfActiveChannels < maxChannels)
{
// Create new channel
channel = base->CreateChannel();
VALIDATE_STRESS(channel < 0);
if (channel >= 0)
{
channelState[channel] = true;
++noOfActiveChannels;
}
}
}
else
{
if (noOfActiveChannels > 0)
{
// Delete random channel that's created [0, maxChannels - 1]
do
{
rnd = static_cast<double>(rand());
channel = static_cast<int>(rnd /
(static_cast<double>(RAND_MAX) + 1.0f) * maxChannels);
} while (!channelState[channel]); // Must find a created channel
res = base->DeleteChannel(channel);
VALIDATE_STRESS(0 != res);
if (0 == res)
{
channelState[channel] = false;
--noOfActiveChannels;
}
}
}
if (!(i % markInterval)) MARK();
SLEEP(loopSleep);
}
ANL();
delete [] channelState;
///////////// End test /////////////
// Terminate
VALIDATE_STRESS(base->Terminate()); // Deletes all channels
printf("Test finished \n");
return 0;
}
int VoEStressTest::MultipleThreadsTest()
{
printf("------------------------------------------------\n");
printf("Running multiple threads test\n");
printf("------------------------------------------------\n");
// Get sub-API pointers
VoEBase* base = _mgr.BasePtr();
// Set trace
// VALIDATE_STRESS(base->SetTraceFileName(
// GetFilename("VoEStressTest_MultipleThreads_trace.txt")));
// VALIDATE_STRESS(base->SetDebugTraceFileName(
// GetFilename("VoEStressTest_MultipleThreads_trace_debug.txt")));
// VALIDATE_STRESS(base->SetTraceFilter(kTraceStateInfo |
// kTraceWarning | kTraceError |
// kTraceCritical | kTraceApiCall |
// kTraceMemory | kTraceInfo));
// Init
VALIDATE_STRESS(base->Init());
VALIDATE_STRESS(base->CreateChannel());
///////////// Start test /////////////
int numberOfLoops(10000);
int loopSleep(0);
int i(0);
int markInterval(1000);
printf("Running %d loops with %d ms sleep. Mark every %d loop. \n",
numberOfLoops, loopSleep, markInterval);
printf("Test will take approximately %d minutes. \n",
numberOfLoops * loopSleep / 1000 / 60 + 1);
srand((unsigned int)time(NULL));
int rnd(0);
// Start extra thread
const char* threadName = "StressTest Extra API Thread";
_ptrExtraApiThread = ThreadWrapper::CreateThread(
RunExtraApi, this, kNormalPriority, threadName);
unsigned int id(0);
VALIDATE_STRESS(!_ptrExtraApiThread->Start(id));
// Some possible extensions include:
// Add more API calls to randomize
// More threads
// Different sleep times (fixed or random).
// Make sure audio is OK after test has finished.
// Call random API functions here and in extra thread, ignore any error
for (i=0; i<numberOfLoops; ++i)
{
// This part should be equal to the marked part in the extra thread
// --- BEGIN ---
rnd = rand();
if (rnd < (RAND_MAX / 2))
{
// Start playout
base->StartPlayout(0);
}
else
{
// Stop playout
base->StopPlayout(0);
}
// --- END ---
if (!(i % markInterval)) MARK();
SLEEP(loopSleep);
}
ANL();
// Stop extra thread
VALIDATE_STRESS(!_ptrExtraApiThread->Stop());
delete _ptrExtraApiThread;
///////////// End test /////////////
// Terminate
VALIDATE_STRESS(base->Terminate()); // Deletes all channels
printf("Test finished \n");
return 0;
}
// Thread functions
bool VoEStressTest::RunExtraApi(void* ptr)
{
return static_cast<VoEStressTest*>(ptr)->ProcessExtraApi();
}
bool VoEStressTest::ProcessExtraApi()
{
// Prepare
VoEBase* base = _mgr.BasePtr();
int rnd(0);
// Call random API function, ignore any error
// This part should be equal to the marked part in the main thread
// --- BEGIN ---
rnd = rand();
if (rnd < (RAND_MAX / 2))
{
// Start playout
base->StartPlayout(0);
}
else
{
// Stop playout
base->StopPlayout(0);
}
// --- END ---
return true;
}
} // namespace voetest

View File

@ -1,50 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_VOE_STRESS_TEST_H
#define WEBRTC_VOICE_ENGINE_VOE_STRESS_TEST_H
namespace webrtc
{
class ThreadWrapper;
}
using namespace webrtc;
namespace voetest
{
class VoETestManager;
class VoEStressTest
{
public:
VoEStressTest(VoETestManager& mgr) : _mgr(mgr), _ptrExtraApiThread(NULL) {};
~VoEStressTest() {};
int DoTest();
private:
int MenuSelection();
int StartStopTest();
int CreateDeleteChannelsTest();
int MultipleThreadsTest();
static bool RunExtraApi(void* ptr);
bool ProcessExtraApi();
VoETestManager& _mgr;
static const char* _key;
ThreadWrapper* _ptrExtraApiThread;
};
} // namespace voetest
#endif // WEBRTC_VOICE_ENGINE_VOE_STRESS_TEST_H

View File

@ -1,185 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_VOE_TEST_DEFINES_H
#define WEBRTC_VOICE_ENGINE_VOE_TEST_DEFINES_H
// Read WEBRTC_VOICE_ENGINE_XXX_API compiler flags
#include "engine_configurations.h"
#ifdef ANDROID
#include <android/log.h>
#define ANDROID_LOG_TAG "VoiceEngine Auto Test"
#define TEST_LOG(...) \
__android_log_print(ANDROID_LOG_DEBUG, ANDROID_LOG_TAG, __VA_ARGS__)
#define TEST_LOG_ERROR(...) \
__android_log_print(ANDROID_LOG_ERROR, ANDROID_LOG_TAG, __VA_ARGS__)
#else
#define TEST_LOG printf
#define TEST_LOG_ERROR printf
#endif
// Select the tests to execute, list order below is same as they will be
// executed. Note that, all settings below will be overrided by sub-API
// settings in engine_configurations.h.
#define _TEST_BASE_
#define _TEST_RTP_RTCP_
#define _TEST_HARDWARE_
#define _TEST_CODEC_
#define _TEST_DTMF_
#define _TEST_VOLUME_
#define _TEST_AUDIO_PROCESSING_
#define _TEST_FILE_
#define _TEST_NETWORK_
#define _TEST_CALL_REPORT_
#define _TEST_VIDEO_SYNC_
#define _TEST_ENCRYPT_
#define _TEST_NETEQ_STATS_
#define _TEST_XMEDIA_
#define TESTED_AUDIO_LAYER kAudioPlatformDefault
//#define TESTED_AUDIO_LAYER kAudioLinuxPulse
// #define _ENABLE_VISUAL_LEAK_DETECTOR_ // Enables VLD to find memory leaks
// #define _ENABLE_IPV6_TESTS_ // Enables IPv6 tests in network xtest
// #define _USE_EXTENDED_TRACE_ // Adds unique trace files for extended test
// #define _MEMORY_TEST_
// Enable this when running instrumentation of some kind to exclude tests
// that will not pass due to slowed down execution.
// #define _INSTRUMENTATION_TESTING_
// Exclude (override) API tests given preprocessor settings in
// engine_configurations.h
#ifndef WEBRTC_VOICE_ENGINE_CODEC_API
#undef _TEST_CODEC_
#endif
#ifndef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API
#undef _TEST_VOLUME_
#endif
#ifndef WEBRTC_VOICE_ENGINE_DTMF_API
#undef _TEST_DTMF_
#endif
#ifndef WEBRTC_VOICE_ENGINE_RTP_RTCP_API
#undef _TEST_RTP_RTCP_
#endif
#ifndef WEBRTC_VOICE_ENGINE_AUDIO_PROCESSING_API
#undef _TEST_AUDIO_PROCESSING_
#endif
#ifndef WEBRTC_VOICE_ENGINE_FILE_API
#undef _TEST_FILE_
#endif
#ifndef WEBRTC_VOICE_ENGINE_VIDEO_SYNC_API
#undef _TEST_VIDEO_SYNC_
#endif
#ifndef WEBRTC_VOICE_ENGINE_ENCRYPTION_API
#undef _TEST_ENCRYPT_
#endif
#ifndef WEBRTC_VOICE_ENGINE_HARDWARE_API
#undef _TEST_HARDWARE_
#endif
#ifndef WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API
#undef _TEST_XMEDIA_
#endif
#ifndef WEBRTC_VOICE_ENGINE_NETWORK_API
#undef _TEST_NETWORK_
#endif
#ifndef WEBRTC_VOICE_ENGINE_NETEQ_STATS_API
#undef _TEST_NETEQ_STATS_
#endif
#ifndef WEBRTC_VOICE_ENGINE_CALL_REPORT_API
#undef _TEST_CALL_REPORT_
#endif
// Some parts can cause problems while running Insure
#ifdef __INSURE__
#define _INSTRUMENTATION_TESTING_
#undef WEBRTC_SRTP
#endif
// Time in ms to test each packet size for each codec
#define CODEC_TEST_TIME 400
#define MARK() TEST_LOG("."); fflush(NULL); // Add test marker
#define ANL() TEST_LOG("\n") // Add New Line
#define AOK() TEST_LOG("[Test is OK]"); fflush(NULL); // Add OK
#if defined(_WIN32)
#define PAUSE \
{ \
TEST_LOG("Press any key to continue..."); \
_getch(); \
TEST_LOG("\n"); \
}
#else
#define PAUSE \
{ \
TEST_LOG("Continuing (pause not supported)\n"); \
}
#endif
#define TEST(s) \
{ \
TEST_LOG("Testing: %s", #s); \
} \
#ifdef _INSTRUMENTATION_TESTING_
// Don't stop execution if error occurs
#define TEST_MUSTPASS(expr) \
{ \
if ((expr)) \
{ \
TEST_LOG_ERROR("Error at line:%i, %s \n",__LINE__, #expr); \
TEST_LOG_ERROR("Error code: %i\n",base->LastError()); \
} \
}
#define TEST_ERROR(code) \
{ \
int err = base->LastError(); \
if (err != code) \
{ \
TEST_LOG_ERROR("Invalid error code (%d, should be %d) at line %d\n",
code, err, __LINE__); \
} \
}
#else
#define TEST_MUSTPASS(expr) \
{ \
if ((expr)) \
{ \
TEST_LOG_ERROR("\nError at line:%i, %s \n",__LINE__, #expr); \
TEST_LOG_ERROR("Error code: %i\n",base->LastError()); \
PAUSE \
return -1; \
} \
}
#define TEST_ERROR(code) \
{ \
int err = base->LastError(); \
if (err != code) \
{ \
TEST_LOG_ERROR("Invalid error code (%d, should be %d) at line %d\n", err, code, __LINE__); \
PAUSE \
return -1; \
} \
}
#endif // #ifdef _INSTRUMENTATION_TESTING_
#define EXCLUDE() \
{ \
TEST_LOG("\n>>> Excluding test at line: %i <<<\n\n",__LINE__); \
}
#define INCOMPLETE() \
{ \
TEST_LOG("\n>>> Incomplete test at line: %i <<<\n\n",__LINE__); \
}
#endif // WEBRTC_VOICE_ENGINE_VOE_TEST_DEFINES_H

Some files were not shown because too many files have changed in this diff Show More