Removes parts of the webrtc::VoEExternalMedia sub API as part of a clean-up operation where the goal is to remove unused APIs.
BUG=3206 R=niklas.enbom@webrtc.org Review URL: https://webrtc-codereview.appspot.com/14419004 git-svn-id: http://webrtc.googlecode.com/svn/trunk@6102 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
parent
924e81f797
commit
1cec3957b8
@ -63,7 +63,6 @@
|
|||||||
#define WEBRTC_VOICE_ENGINE_AGC // Near-end AGC
|
#define WEBRTC_VOICE_ENGINE_AGC // Near-end AGC
|
||||||
#define WEBRTC_VOICE_ENGINE_ECHO // Near-end AEC
|
#define WEBRTC_VOICE_ENGINE_ECHO // Near-end AEC
|
||||||
#define WEBRTC_VOICE_ENGINE_NR // Near-end NS
|
#define WEBRTC_VOICE_ENGINE_NR // Near-end NS
|
||||||
#define WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
|
|
||||||
|
|
||||||
#if !defined(WEBRTC_ANDROID) && !defined(WEBRTC_IOS)
|
#if !defined(WEBRTC_ANDROID) && !defined(WEBRTC_IOS)
|
||||||
#define WEBRTC_VOICE_ENGINE_TYPING_DETECTION // Typing detection
|
#define WEBRTC_VOICE_ENGINE_TYPING_DETECTION // Typing detection
|
||||||
|
@ -7,28 +7,6 @@
|
|||||||
* in the file PATENTS. All contributing project authors may
|
* in the file PATENTS. All contributing project authors may
|
||||||
* be found in the AUTHORS file in the root of the source tree.
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// In some cases it is desirable to use an audio source or sink which may
|
|
||||||
// not be available to the VoiceEngine, such as a DV camera. This sub-API
|
|
||||||
// contains functions that allow for the use of such external recording
|
|
||||||
// sources and playout sinks. It also describes how recorded data, or data
|
|
||||||
// to be played out, can be modified outside the VoiceEngine.
|
|
||||||
//
|
|
||||||
// Usage example, omitting error checking:
|
|
||||||
//
|
|
||||||
// using namespace webrtc;
|
|
||||||
// VoiceEngine* voe = VoiceEngine::Create();
|
|
||||||
// VoEBase* base = VoEBase::GetInterface(voe);
|
|
||||||
// VoEMediaProcess media = VoEMediaProcess::GetInterface(voe);
|
|
||||||
// base->Init();
|
|
||||||
// ...
|
|
||||||
// media->SetExternalRecordingStatus(true);
|
|
||||||
// ...
|
|
||||||
// base->Terminate();
|
|
||||||
// base->Release();
|
|
||||||
// media->Release();
|
|
||||||
// VoiceEngine::Delete(voe);
|
|
||||||
//
|
|
||||||
#ifndef WEBRTC_VOICE_ENGINE_VOE_EXTERNAL_MEDIA_H
|
#ifndef WEBRTC_VOICE_ENGINE_VOE_EXTERNAL_MEDIA_H
|
||||||
#define WEBRTC_VOICE_ENGINE_VOE_EXTERNAL_MEDIA_H
|
#define WEBRTC_VOICE_ENGINE_VOE_EXTERNAL_MEDIA_H
|
||||||
|
|
||||||
@ -83,28 +61,6 @@ public:
|
|||||||
virtual int DeRegisterExternalMediaProcessing(
|
virtual int DeRegisterExternalMediaProcessing(
|
||||||
int channel, ProcessingTypes type) = 0;
|
int channel, ProcessingTypes type) = 0;
|
||||||
|
|
||||||
// Toogles state of external recording.
|
|
||||||
virtual int SetExternalRecordingStatus(bool enable) = 0;
|
|
||||||
|
|
||||||
// Toogles state of external playout.
|
|
||||||
virtual int SetExternalPlayoutStatus(bool enable) = 0;
|
|
||||||
|
|
||||||
// This function accepts externally recorded audio. During transmission,
|
|
||||||
// this method should be called at as regular an interval as possible
|
|
||||||
// with frames of corresponding size.
|
|
||||||
virtual int ExternalRecordingInsertData(
|
|
||||||
const int16_t speechData10ms[], int lengthSamples,
|
|
||||||
int samplingFreqHz, int current_delay_ms) = 0;
|
|
||||||
|
|
||||||
// This function gets audio for an external playout sink.
|
|
||||||
// During transmission, this function should be called every ~10 ms
|
|
||||||
// to obtain a new 10 ms frame of audio. The length of the block will
|
|
||||||
// be 160, 320, 440 or 480 samples (for 16, 32, 44 or 48 kHz sampling
|
|
||||||
// rates respectively).
|
|
||||||
virtual int ExternalPlayoutGetData(
|
|
||||||
int16_t speechData10ms[], int samplingFreqHz,
|
|
||||||
int current_delay_ms, int& lengthSamples) = 0;
|
|
||||||
|
|
||||||
// Pulls an audio frame from the specified |channel| for external mixing.
|
// Pulls an audio frame from the specified |channel| for external mixing.
|
||||||
// If the |desired_sample_rate_hz| is 0, the signal will be returned with
|
// If the |desired_sample_rate_hz| is 0, the signal will be returned with
|
||||||
// its native frequency, otherwise it will be resampled. Valid frequencies
|
// its native frequency, otherwise it will be resampled. Valid frequencies
|
||||||
@ -115,6 +71,16 @@ public:
|
|||||||
// Sets the state of external mixing. Cannot be changed during playback.
|
// Sets the state of external mixing. Cannot be changed during playback.
|
||||||
virtual int SetExternalMixing(int channel, bool enable) = 0;
|
virtual int SetExternalMixing(int channel, bool enable) = 0;
|
||||||
|
|
||||||
|
// Don't use. To be removed.
|
||||||
|
virtual int SetExternalRecordingStatus(bool enable) { return -1; }
|
||||||
|
virtual int SetExternalPlayoutStatus(bool enable) { return -1; }
|
||||||
|
virtual int ExternalRecordingInsertData(
|
||||||
|
const int16_t speechData10ms[], int lengthSamples,
|
||||||
|
int samplingFreqHz, int current_delay_ms) { return -1; }
|
||||||
|
virtual int ExternalPlayoutGetData(
|
||||||
|
int16_t speechData10ms[], int samplingFreqHz,
|
||||||
|
int current_delay_ms, int& lengthSamples) { return -1; }
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
VoEExternalMedia() {}
|
VoEExternalMedia() {}
|
||||||
virtual ~VoEExternalMedia() {}
|
virtual ~VoEExternalMedia() {}
|
||||||
|
@ -28,43 +28,6 @@ class ExternalMediaTest : public AfterStreamingFixture {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
TEST_F(ExternalMediaTest, ManualCanRecordAndPlaybackUsingExternalPlayout) {
|
|
||||||
SwitchToManualMicrophone();
|
|
||||||
|
|
||||||
EXPECT_EQ(0, voe_base_->StopSend(channel_));
|
|
||||||
EXPECT_EQ(0, voe_base_->StopPlayout(channel_));
|
|
||||||
EXPECT_EQ(0, voe_xmedia_->SetExternalPlayoutStatus(true));
|
|
||||||
EXPECT_EQ(0, voe_base_->StartPlayout(channel_));
|
|
||||||
EXPECT_EQ(0, voe_base_->StartSend(channel_));
|
|
||||||
|
|
||||||
TEST_LOG("Recording data for 2 seconds starting now: please speak.\n");
|
|
||||||
int16_t recording[32000];
|
|
||||||
for (int i = 0; i < 200; i++) {
|
|
||||||
int sample_length = 0;
|
|
||||||
EXPECT_EQ(0, voe_xmedia_->ExternalPlayoutGetData(
|
|
||||||
&(recording[i * 160]), 16000, 100, sample_length));
|
|
||||||
EXPECT_EQ(160, sample_length);
|
|
||||||
Sleep(10);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPECT_EQ(0, voe_base_->StopSend(channel_));
|
|
||||||
EXPECT_EQ(0, voe_base_->StopPlayout(channel_));
|
|
||||||
EXPECT_EQ(0, voe_xmedia_->SetExternalPlayoutStatus(false));
|
|
||||||
EXPECT_EQ(0, voe_base_->StartPlayout(channel_));
|
|
||||||
EXPECT_EQ(0, voe_xmedia_->SetExternalRecordingStatus(true));
|
|
||||||
EXPECT_EQ(0, voe_base_->StartSend(channel_));
|
|
||||||
|
|
||||||
TEST_LOG("Playing back recording, you should hear what you said earlier.\n");
|
|
||||||
for (int i = 0; i < 200; i++) {
|
|
||||||
EXPECT_EQ(0, voe_xmedia_->ExternalRecordingInsertData(
|
|
||||||
&(recording[i * 160]), 160, 16000, 20));
|
|
||||||
Sleep(10);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPECT_EQ(0, voe_base_->StopSend(channel_));
|
|
||||||
EXPECT_EQ(0, voe_xmedia_->SetExternalRecordingStatus(false));
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ExternalMediaTest,
|
TEST_F(ExternalMediaTest,
|
||||||
ManualRegisterExternalMediaProcessingOnAllChannelsAffectsPlayout) {
|
ManualRegisterExternalMediaProcessingOnAllChannelsAffectsPlayout) {
|
||||||
TEST_LOG("Enabling external media processing: audio should be affected.\n");
|
TEST_LOG("Enabling external media processing: audio should be affected.\n");
|
||||||
|
@ -791,16 +791,6 @@ int VoEBaseImpl::GetVersion(char version[1024])
|
|||||||
accLen += len;
|
accLen += len;
|
||||||
assert(accLen < kVoiceEngineVersionMaxMessageSize);
|
assert(accLen < kVoiceEngineVersionMaxMessageSize);
|
||||||
#endif
|
#endif
|
||||||
#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
|
|
||||||
len = AddExternalRecAndPlayoutBuild(versionPtr);
|
|
||||||
if (len == -1)
|
|
||||||
{
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
versionPtr += len;
|
|
||||||
accLen += len;
|
|
||||||
assert(accLen < kVoiceEngineVersionMaxMessageSize);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
memcpy(version, versionBuf, accLen);
|
memcpy(version, versionBuf, accLen);
|
||||||
version[accLen] = '\0';
|
version[accLen] = '\0';
|
||||||
@ -850,13 +840,6 @@ int32_t VoEBaseImpl::AddExternalTransportBuild(char* str) const
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
|
|
||||||
int32_t VoEBaseImpl::AddExternalRecAndPlayoutBuild(char* str) const
|
|
||||||
{
|
|
||||||
return sprintf(str, "External recording and playout build\n");
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
int VoEBaseImpl::LastError()
|
int VoEBaseImpl::LastError()
|
||||||
{
|
{
|
||||||
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
|
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
|
||||||
|
@ -148,9 +148,6 @@ private:
|
|||||||
int InitializeChannel(voe::ChannelOwner* channel_owner);
|
int InitializeChannel(voe::ChannelOwner* channel_owner);
|
||||||
#ifdef WEBRTC_EXTERNAL_TRANSPORT
|
#ifdef WEBRTC_EXTERNAL_TRANSPORT
|
||||||
int32_t AddExternalTransportBuild(char* str) const;
|
int32_t AddExternalTransportBuild(char* str) const;
|
||||||
#endif
|
|
||||||
#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
|
|
||||||
int32_t AddExternalRecAndPlayoutBuild(char* str) const;
|
|
||||||
#endif
|
#endif
|
||||||
VoiceEngineObserver* _voiceEngineObserverPtr;
|
VoiceEngineObserver* _voiceEngineObserverPtr;
|
||||||
CriticalSectionWrapper& _callbackCritSect;
|
CriticalSectionWrapper& _callbackCritSect;
|
||||||
|
@ -143,203 +143,6 @@ int VoEExternalMediaImpl::DeRegisterExternalMediaProcessing(
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int VoEExternalMediaImpl::SetExternalRecordingStatus(bool enable)
|
|
||||||
{
|
|
||||||
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(shared_->instance_id(), -1),
|
|
||||||
"SetExternalRecordingStatus(enable=%d)", enable);
|
|
||||||
#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
|
|
||||||
if (shared_->audio_device()->Recording())
|
|
||||||
{
|
|
||||||
shared_->SetLastError(VE_ALREADY_SENDING, kTraceError,
|
|
||||||
"SetExternalRecordingStatus() cannot set state while sending");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
shared_->set_ext_recording(enable);
|
|
||||||
return 0;
|
|
||||||
#else
|
|
||||||
shared_->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
|
|
||||||
"SetExternalRecordingStatus() external recording is not supported");
|
|
||||||
return -1;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
int VoEExternalMediaImpl::ExternalRecordingInsertData(
|
|
||||||
const int16_t speechData10ms[],
|
|
||||||
int lengthSamples,
|
|
||||||
int samplingFreqHz,
|
|
||||||
int current_delay_ms)
|
|
||||||
{
|
|
||||||
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(shared_->instance_id(), -1),
|
|
||||||
"ExternalRecordingInsertData(speechData10ms=0x%x,"
|
|
||||||
" lengthSamples=%u, samplingFreqHz=%d, current_delay_ms=%d)",
|
|
||||||
&speechData10ms[0], lengthSamples, samplingFreqHz,
|
|
||||||
current_delay_ms);
|
|
||||||
#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
|
|
||||||
if (!shared_->statistics().Initialized())
|
|
||||||
{
|
|
||||||
shared_->SetLastError(VE_NOT_INITED, kTraceError);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
if (!shared_->ext_recording())
|
|
||||||
{
|
|
||||||
shared_->SetLastError(VE_INVALID_OPERATION, kTraceError,
|
|
||||||
"ExternalRecordingInsertData() external recording is not enabled");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
if (shared_->NumOfSendingChannels() == 0)
|
|
||||||
{
|
|
||||||
shared_->SetLastError(VE_ALREADY_SENDING, kTraceError,
|
|
||||||
"SetExternalRecordingStatus() no channel is sending");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
if ((16000 != samplingFreqHz) && (32000 != samplingFreqHz) &&
|
|
||||||
(48000 != samplingFreqHz) && (44000 != samplingFreqHz))
|
|
||||||
{
|
|
||||||
shared_->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
|
|
||||||
"SetExternalRecordingStatus() invalid sample rate");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
if ((0 == lengthSamples) ||
|
|
||||||
((lengthSamples % (samplingFreqHz / 100)) != 0))
|
|
||||||
{
|
|
||||||
shared_->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
|
|
||||||
"SetExternalRecordingStatus() invalid buffer size");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
if (current_delay_ms < 0)
|
|
||||||
{
|
|
||||||
shared_->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
|
|
||||||
"SetExternalRecordingStatus() invalid delay)");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
uint16_t blockSize = samplingFreqHz / 100;
|
|
||||||
uint32_t nBlocks = lengthSamples / blockSize;
|
|
||||||
int16_t totalDelayMS = 0;
|
|
||||||
uint16_t playoutDelayMS = 0;
|
|
||||||
|
|
||||||
for (uint32_t i = 0; i < nBlocks; i++)
|
|
||||||
{
|
|
||||||
if (!shared_->ext_playout())
|
|
||||||
{
|
|
||||||
// Use real playout delay if external playout is not enabled.
|
|
||||||
if (shared_->audio_device()->PlayoutDelay(&playoutDelayMS) != 0) {
|
|
||||||
shared_->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceWarning,
|
|
||||||
"PlayoutDelay() unable to get the playout delay");
|
|
||||||
}
|
|
||||||
totalDelayMS = current_delay_ms + playoutDelayMS;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
// Use stored delay value given the last call
|
|
||||||
// to ExternalPlayoutGetData.
|
|
||||||
totalDelayMS = current_delay_ms + playout_delay_ms_;
|
|
||||||
// Compensate for block sizes larger than 10ms
|
|
||||||
totalDelayMS -= (int16_t)(i*10);
|
|
||||||
if (totalDelayMS < 0)
|
|
||||||
totalDelayMS = 0;
|
|
||||||
}
|
|
||||||
shared_->transmit_mixer()->PrepareDemux(
|
|
||||||
(const int8_t*)(&speechData10ms[i*blockSize]),
|
|
||||||
blockSize,
|
|
||||||
1,
|
|
||||||
samplingFreqHz,
|
|
||||||
totalDelayMS,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
false); // Typing detection not supported
|
|
||||||
|
|
||||||
shared_->transmit_mixer()->DemuxAndMix();
|
|
||||||
shared_->transmit_mixer()->EncodeAndSend();
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
#else
|
|
||||||
shared_->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
|
|
||||||
"ExternalRecordingInsertData() external recording is not supported");
|
|
||||||
return -1;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
int VoEExternalMediaImpl::SetExternalPlayoutStatus(bool enable)
|
|
||||||
{
|
|
||||||
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(shared_->instance_id(), -1),
|
|
||||||
"SetExternalPlayoutStatus(enable=%d)", enable);
|
|
||||||
#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
|
|
||||||
if (shared_->audio_device()->Playing())
|
|
||||||
{
|
|
||||||
shared_->SetLastError(VE_ALREADY_SENDING, kTraceError,
|
|
||||||
"SetExternalPlayoutStatus() cannot set state while playing");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
shared_->set_ext_playout(enable);
|
|
||||||
return 0;
|
|
||||||
#else
|
|
||||||
shared_->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
|
|
||||||
"SetExternalPlayoutStatus() external playout is not supported");
|
|
||||||
return -1;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
int VoEExternalMediaImpl::ExternalPlayoutGetData(
|
|
||||||
int16_t speechData10ms[],
|
|
||||||
int samplingFreqHz,
|
|
||||||
int current_delay_ms,
|
|
||||||
int& lengthSamples)
|
|
||||||
{
|
|
||||||
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(shared_->instance_id(), -1),
|
|
||||||
"ExternalPlayoutGetData(speechData10ms=0x%x, samplingFreqHz=%d"
|
|
||||||
", current_delay_ms=%d)", &speechData10ms[0], samplingFreqHz,
|
|
||||||
current_delay_ms);
|
|
||||||
#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
|
|
||||||
if (!shared_->statistics().Initialized())
|
|
||||||
{
|
|
||||||
shared_->SetLastError(VE_NOT_INITED, kTraceError);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
if (!shared_->ext_playout())
|
|
||||||
{
|
|
||||||
shared_->SetLastError(VE_INVALID_OPERATION, kTraceError,
|
|
||||||
"ExternalPlayoutGetData() external playout is not enabled");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
if ((16000 != samplingFreqHz) && (32000 != samplingFreqHz) &&
|
|
||||||
(48000 != samplingFreqHz) && (44000 != samplingFreqHz))
|
|
||||||
{
|
|
||||||
shared_->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
|
|
||||||
"ExternalPlayoutGetData() invalid sample rate");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
if (current_delay_ms < 0)
|
|
||||||
{
|
|
||||||
shared_->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
|
|
||||||
"ExternalPlayoutGetData() invalid delay)");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
AudioFrame audioFrame;
|
|
||||||
|
|
||||||
// Retrieve mixed output at the specified rate
|
|
||||||
shared_->output_mixer()->MixActiveChannels();
|
|
||||||
shared_->output_mixer()->DoOperationsOnCombinedSignal(true);
|
|
||||||
shared_->output_mixer()->GetMixedAudio(samplingFreqHz, 1, &audioFrame);
|
|
||||||
|
|
||||||
// Deliver audio (PCM) samples to the external sink
|
|
||||||
memcpy(speechData10ms,
|
|
||||||
audioFrame.data_,
|
|
||||||
sizeof(int16_t)*(audioFrame.samples_per_channel_));
|
|
||||||
lengthSamples = audioFrame.samples_per_channel_;
|
|
||||||
|
|
||||||
// Store current playout delay (to be used by ExternalRecordingInsertData).
|
|
||||||
playout_delay_ms_ = current_delay_ms;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
#else
|
|
||||||
shared_->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
|
|
||||||
"ExternalPlayoutGetData() external playout is not supported");
|
|
||||||
return -1;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
int VoEExternalMediaImpl::GetAudioFrame(int channel, int desired_sample_rate_hz,
|
int VoEExternalMediaImpl::GetAudioFrame(int channel, int desired_sample_rate_hz,
|
||||||
AudioFrame* frame) {
|
AudioFrame* frame) {
|
||||||
WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
|
WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
|
||||||
|
@ -29,20 +29,6 @@ public:
|
|||||||
int channel,
|
int channel,
|
||||||
ProcessingTypes type);
|
ProcessingTypes type);
|
||||||
|
|
||||||
virtual int SetExternalRecordingStatus(bool enable);
|
|
||||||
|
|
||||||
virtual int SetExternalPlayoutStatus(bool enable);
|
|
||||||
|
|
||||||
virtual int ExternalRecordingInsertData(
|
|
||||||
const int16_t speechData10ms[],
|
|
||||||
int lengthSamples,
|
|
||||||
int samplingFreqHz,
|
|
||||||
int current_delay_ms);
|
|
||||||
|
|
||||||
virtual int ExternalPlayoutGetData(int16_t speechData10ms[],
|
|
||||||
int samplingFreqHz,
|
|
||||||
int current_delay_ms,
|
|
||||||
int& lengthSamples);
|
|
||||||
|
|
||||||
virtual int GetAudioFrame(int channel, int desired_sample_rate_hz,
|
virtual int GetAudioFrame(int channel, int desired_sample_rate_hz,
|
||||||
AudioFrame* frame);
|
AudioFrame* frame);
|
||||||
@ -54,9 +40,6 @@ protected:
|
|||||||
virtual ~VoEExternalMediaImpl();
|
virtual ~VoEExternalMediaImpl();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
|
|
||||||
int playout_delay_ms_;
|
|
||||||
#endif
|
|
||||||
voe::SharedData* shared_;
|
voe::SharedData* shared_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user