Fix AudioFrame types.
volume_ is not set anywhere so I'm removing it. BUG= TEST=trybots Review URL: https://webrtc-codereview.appspot.com/556004 git-svn-id: http://webrtc.googlecode.com/svn/trunk@2196 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
@@ -1845,7 +1845,6 @@ WebRtc_Word32 AudioCodingModuleImpl::PlayoutData10Ms(
|
|||||||
}
|
}
|
||||||
|
|
||||||
audio_frame.id_ = _id;
|
audio_frame.id_ = _id;
|
||||||
audio_frame.volume_ = -1;
|
|
||||||
audio_frame.energy_ = -1;
|
audio_frame.energy_ = -1;
|
||||||
audio_frame.timestamp_ = 0;
|
audio_frame.timestamp_ = 0;
|
||||||
|
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ void SetParticipantStatistics(ParticipantStatistics* stats,
|
|||||||
const AudioFrame& frame)
|
const AudioFrame& frame)
|
||||||
{
|
{
|
||||||
stats->participant = frame.id_;
|
stats->participant = frame.id_;
|
||||||
stats->level = frame.volume_;
|
stats->level = 0; // TODO(andrew): to what should this be set?
|
||||||
}
|
}
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
@@ -1005,9 +1005,9 @@ void AudioConferenceMixerImpl::UpdateVADPositiveParticipants(
|
|||||||
_scratchVadPositiveParticipants[
|
_scratchVadPositiveParticipants[
|
||||||
_scratchVadPositiveParticipantsAmount].participant =
|
_scratchVadPositiveParticipantsAmount].participant =
|
||||||
audioFrame->id_;
|
audioFrame->id_;
|
||||||
|
// TODO(andrew): to what should this be set?
|
||||||
_scratchVadPositiveParticipants[
|
_scratchVadPositiveParticipants[
|
||||||
_scratchVadPositiveParticipantsAmount].level =
|
_scratchVadPositiveParticipantsAmount].level = 0;
|
||||||
audioFrame->volume_;
|
|
||||||
_scratchVadPositiveParticipantsAmount++;
|
_scratchVadPositiveParticipantsAmount++;
|
||||||
}
|
}
|
||||||
item = mixList.Next(item);
|
item = mixList.Next(item);
|
||||||
|
|||||||
@@ -694,35 +694,23 @@ VideoFrame::Free()
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*************************************************
|
/* This class holds up to 60 ms of super-wideband (32 kHz) stereo audio. It
|
||||||
|
* allows for adding and subtracting frames while keeping track of the resulting
|
||||||
|
* states.
|
||||||
*
|
*
|
||||||
* AudioFrame class
|
* Notes
|
||||||
|
* - The total number of samples in |data_| is
|
||||||
|
* samples_per_channel_ * num_channels_
|
||||||
*
|
*
|
||||||
* The AudioFrame class holds up to 60 ms wideband
|
* - Stereo data is interleaved starting with the left channel.
|
||||||
* audio. It allows for adding and subtracting frames
|
|
||||||
* while keeping track of the resulting states.
|
|
||||||
*
|
*
|
||||||
* Note
|
* - The +operator assume that you would never add exactly opposite frames when
|
||||||
* - The +operator assume that you would never add
|
* deciding the resulting state. To do this use the -operator.
|
||||||
* exact opposite frames when deciding the resulting
|
*/
|
||||||
* state. To do this use the -operator.
|
|
||||||
*
|
|
||||||
* - num_channels_ of 1 indicated mono, and 2
|
|
||||||
* indicates stereo.
|
|
||||||
*
|
|
||||||
* - samples_per_channel_ is the number of
|
|
||||||
* samples per channel. Therefore, the total
|
|
||||||
* number of samples in data_ is
|
|
||||||
* (samples_per_channel_ * num_channels_).
|
|
||||||
*
|
|
||||||
* - Stereo data is stored in interleaved fashion
|
|
||||||
* starting with the left channel.
|
|
||||||
*
|
|
||||||
*************************************************/
|
|
||||||
class AudioFrame
|
class AudioFrame
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
enum { kMaxDataSizeSamples = 3840 }; // stereo 32KHz 60ms 2*32*60
|
enum { kMaxDataSizeSamples = 3840 }; // stereo, 32 kHz, 60ms (2*32*60)
|
||||||
|
|
||||||
enum VADActivity
|
enum VADActivity
|
||||||
{
|
{
|
||||||
@@ -742,39 +730,35 @@ public:
|
|||||||
AudioFrame();
|
AudioFrame();
|
||||||
virtual ~AudioFrame();
|
virtual ~AudioFrame();
|
||||||
|
|
||||||
WebRtc_Word32 UpdateFrame(
|
int UpdateFrame(
|
||||||
const WebRtc_Word32 id,
|
int id,
|
||||||
const WebRtc_UWord32 timeStamp,
|
uint32_t timestamp,
|
||||||
const WebRtc_Word16* payloadData,
|
const int16_t* data,
|
||||||
const WebRtc_UWord16 payloadDataLengthInSamples,
|
int samples_per_channel,
|
||||||
const int frequencyInHz,
|
int sample_rate_hz,
|
||||||
const SpeechType speechType,
|
SpeechType speech_type,
|
||||||
const VADActivity vadActivity,
|
VADActivity vad_activity,
|
||||||
const WebRtc_UWord8 audioChannel = 1,
|
int num_channels = 1,
|
||||||
const WebRtc_Word32 volume = -1,
|
uint32_t energy = -1);
|
||||||
const WebRtc_Word32 energy = -1);
|
|
||||||
|
|
||||||
AudioFrame& Append(const AudioFrame& rhs);
|
AudioFrame& Append(const AudioFrame& rhs);
|
||||||
|
|
||||||
void Mute();
|
void Mute();
|
||||||
|
|
||||||
AudioFrame& operator=(const AudioFrame& rhs);
|
AudioFrame& operator=(const AudioFrame& rhs);
|
||||||
AudioFrame& operator>>=(const WebRtc_Word32 rhs);
|
AudioFrame& operator>>=(const int rhs);
|
||||||
AudioFrame& operator+=(const AudioFrame& rhs);
|
AudioFrame& operator+=(const AudioFrame& rhs);
|
||||||
AudioFrame& operator-=(const AudioFrame& rhs);
|
AudioFrame& operator-=(const AudioFrame& rhs);
|
||||||
|
|
||||||
// TODO(andrew): clean up types.
|
int id_;
|
||||||
WebRtc_Word32 id_;
|
uint32_t timestamp_;
|
||||||
WebRtc_UWord32 timestamp_;
|
int16_t data_[kMaxDataSizeSamples];
|
||||||
|
int samples_per_channel_;
|
||||||
WebRtc_Word16 data_[kMaxDataSizeSamples];
|
|
||||||
WebRtc_UWord16 samples_per_channel_;
|
|
||||||
int sample_rate_hz_;
|
int sample_rate_hz_;
|
||||||
WebRtc_UWord8 num_channels_;
|
int num_channels_;
|
||||||
SpeechType speech_type_;
|
SpeechType speech_type_;
|
||||||
VADActivity vad_activity_;
|
VADActivity vad_activity_;
|
||||||
WebRtc_UWord32 energy_;
|
uint32_t energy_;
|
||||||
WebRtc_Word32 volume_; // TODO(andrew): investigate removing.
|
|
||||||
};
|
};
|
||||||
|
|
||||||
inline
|
inline
|
||||||
@@ -788,8 +772,7 @@ AudioFrame::AudioFrame()
|
|||||||
num_channels_(1),
|
num_channels_(1),
|
||||||
speech_type_(kUndefined),
|
speech_type_(kUndefined),
|
||||||
vad_activity_(kVadUnknown),
|
vad_activity_(kVadUnknown),
|
||||||
energy_(0xffffffff),
|
energy_(0xffffffff)
|
||||||
volume_(0xffffffff)
|
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -799,44 +782,42 @@ AudioFrame::~AudioFrame()
|
|||||||
}
|
}
|
||||||
|
|
||||||
inline
|
inline
|
||||||
WebRtc_Word32
|
int
|
||||||
AudioFrame::UpdateFrame(
|
AudioFrame::UpdateFrame(
|
||||||
const WebRtc_Word32 id,
|
int id,
|
||||||
const WebRtc_UWord32 timeStamp,
|
uint32_t timestamp,
|
||||||
const WebRtc_Word16* payloadData,
|
const int16_t* data,
|
||||||
const WebRtc_UWord16 payloadDataLengthInSamples,
|
int samples_per_channel,
|
||||||
const int frequencyInHz,
|
int sample_rate_hz,
|
||||||
const SpeechType speechType,
|
SpeechType speech_type,
|
||||||
const VADActivity vadActivity,
|
VADActivity vad_activity,
|
||||||
const WebRtc_UWord8 audioChannel,
|
int num_channels,
|
||||||
const WebRtc_Word32 volume,
|
uint32_t energy)
|
||||||
const WebRtc_Word32 energy)
|
|
||||||
{
|
{
|
||||||
id_ = id;
|
id_ = id;
|
||||||
timestamp_ = timeStamp;
|
timestamp_ = timestamp;
|
||||||
sample_rate_hz_ = frequencyInHz;
|
sample_rate_hz_ = sample_rate_hz;
|
||||||
speech_type_ = speechType;
|
speech_type_ = speech_type;
|
||||||
vad_activity_ = vadActivity;
|
vad_activity_ = vad_activity;
|
||||||
volume_ = volume;
|
num_channels_ = num_channels;
|
||||||
num_channels_ = audioChannel;
|
|
||||||
energy_ = energy;
|
energy_ = energy;
|
||||||
|
|
||||||
if((payloadDataLengthInSamples > kMaxDataSizeSamples) ||
|
if((samples_per_channel > kMaxDataSizeSamples) ||
|
||||||
(audioChannel > 2) || (audioChannel < 1))
|
(num_channels > 2) || (num_channels < 1))
|
||||||
{
|
{
|
||||||
samples_per_channel_ = 0;
|
samples_per_channel_ = 0;
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
samples_per_channel_ = payloadDataLengthInSamples;
|
samples_per_channel_ = samples_per_channel;
|
||||||
if(payloadData != NULL)
|
if(data != NULL)
|
||||||
{
|
{
|
||||||
memcpy(data_, payloadData, sizeof(WebRtc_Word16) *
|
memcpy(data_, data, sizeof(int16_t) *
|
||||||
payloadDataLengthInSamples * num_channels_);
|
samples_per_channel * num_channels_);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
memset(data_,0,sizeof(WebRtc_Word16) *
|
memset(data_,0,sizeof(int16_t) *
|
||||||
payloadDataLengthInSamples * num_channels_);
|
samples_per_channel * num_channels_);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -845,7 +826,7 @@ inline
|
|||||||
void
|
void
|
||||||
AudioFrame::Mute()
|
AudioFrame::Mute()
|
||||||
{
|
{
|
||||||
memset(data_, 0, samples_per_channel_ * sizeof(WebRtc_Word16));
|
memset(data_, 0, samples_per_channel_ * num_channels_ * sizeof(int16_t));
|
||||||
}
|
}
|
||||||
|
|
||||||
inline
|
inline
|
||||||
@@ -868,20 +849,19 @@ AudioFrame::operator=(const AudioFrame& rhs)
|
|||||||
sample_rate_hz_ = rhs.sample_rate_hz_;
|
sample_rate_hz_ = rhs.sample_rate_hz_;
|
||||||
speech_type_ = rhs.speech_type_;
|
speech_type_ = rhs.speech_type_;
|
||||||
vad_activity_ = rhs.vad_activity_;
|
vad_activity_ = rhs.vad_activity_;
|
||||||
volume_ = rhs.volume_;
|
|
||||||
num_channels_ = rhs.num_channels_;
|
num_channels_ = rhs.num_channels_;
|
||||||
energy_ = rhs.energy_;
|
energy_ = rhs.energy_;
|
||||||
|
|
||||||
samples_per_channel_ = rhs.samples_per_channel_;
|
samples_per_channel_ = rhs.samples_per_channel_;
|
||||||
memcpy(data_, rhs.data_,
|
memcpy(data_, rhs.data_,
|
||||||
sizeof(WebRtc_Word16) * rhs.samples_per_channel_ * num_channels_);
|
sizeof(int16_t) * rhs.samples_per_channel_ * num_channels_);
|
||||||
|
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline
|
inline
|
||||||
AudioFrame&
|
AudioFrame&
|
||||||
AudioFrame::operator>>=(const WebRtc_Word32 rhs)
|
AudioFrame::operator>>=(const int rhs)
|
||||||
{
|
{
|
||||||
assert((num_channels_ > 0) && (num_channels_ < 3));
|
assert((num_channels_ > 0) && (num_channels_ < 3));
|
||||||
if((num_channels_ > 2) ||
|
if((num_channels_ > 2) ||
|
||||||
@@ -889,9 +869,9 @@ AudioFrame::operator>>=(const WebRtc_Word32 rhs)
|
|||||||
{
|
{
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
for(WebRtc_UWord16 i = 0; i < samples_per_channel_ * num_channels_; i++)
|
for(int i = 0; i < samples_per_channel_ * num_channels_; i++)
|
||||||
{
|
{
|
||||||
data_[i] = WebRtc_Word16(data_[i] >> rhs);
|
data_[i] = static_cast<int16_t>(data_[i] >> rhs);
|
||||||
}
|
}
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
@@ -926,8 +906,8 @@ AudioFrame::Append(const AudioFrame& rhs)
|
|||||||
speech_type_ = kUndefined;
|
speech_type_ = kUndefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
WebRtc_UWord16 offset = samples_per_channel_ * num_channels_;
|
int offset = samples_per_channel_ * num_channels_;
|
||||||
for(WebRtc_UWord16 i = 0;
|
for(int i = 0;
|
||||||
i < rhs.samples_per_channel_ * rhs.num_channels_;
|
i < rhs.samples_per_channel_ * rhs.num_channels_;
|
||||||
i++)
|
i++)
|
||||||
{
|
{
|
||||||
@@ -986,14 +966,14 @@ AudioFrame::operator+=(const AudioFrame& rhs)
|
|||||||
if(noPrevData)
|
if(noPrevData)
|
||||||
{
|
{
|
||||||
memcpy(data_, rhs.data_,
|
memcpy(data_, rhs.data_,
|
||||||
sizeof(WebRtc_Word16) * rhs.samples_per_channel_ * num_channels_);
|
sizeof(int16_t) * rhs.samples_per_channel_ * num_channels_);
|
||||||
} else
|
} else
|
||||||
{
|
{
|
||||||
// IMPROVEMENT this can be done very fast in assembly
|
// IMPROVEMENT this can be done very fast in assembly
|
||||||
for(WebRtc_UWord16 i = 0; i < samples_per_channel_ * num_channels_; i++)
|
for(int i = 0; i < samples_per_channel_ * num_channels_; i++)
|
||||||
{
|
{
|
||||||
WebRtc_Word32 wrapGuard = (WebRtc_Word32)data_[i] +
|
int32_t wrapGuard = static_cast<int32_t>(data_[i]) +
|
||||||
(WebRtc_Word32)rhs.data_[i];
|
static_cast<int32_t>(rhs.data_[i]);
|
||||||
if(wrapGuard < -32768)
|
if(wrapGuard < -32768)
|
||||||
{
|
{
|
||||||
data_[i] = -32768;
|
data_[i] = -32768;
|
||||||
@@ -1002,12 +982,11 @@ AudioFrame::operator+=(const AudioFrame& rhs)
|
|||||||
data_[i] = 32767;
|
data_[i] = 32767;
|
||||||
}else
|
}else
|
||||||
{
|
{
|
||||||
data_[i] = (WebRtc_Word16)wrapGuard;
|
data_[i] = (int16_t)wrapGuard;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
energy_ = 0xffffffff;
|
energy_ = 0xffffffff;
|
||||||
volume_ = 0xffffffff;
|
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1034,10 +1013,10 @@ AudioFrame::operator-=(const AudioFrame& rhs)
|
|||||||
}
|
}
|
||||||
speech_type_ = kUndefined;
|
speech_type_ = kUndefined;
|
||||||
|
|
||||||
for(WebRtc_UWord16 i = 0; i < samples_per_channel_ * num_channels_; i++)
|
for(int i = 0; i < samples_per_channel_ * num_channels_; i++)
|
||||||
{
|
{
|
||||||
WebRtc_Word32 wrapGuard = (WebRtc_Word32)data_[i] -
|
int32_t wrapGuard = static_cast<int32_t>(data_[i]) -
|
||||||
(WebRtc_Word32)rhs.data_[i];
|
static_cast<int32_t>(rhs.data_[i]);
|
||||||
if(wrapGuard < -32768)
|
if(wrapGuard < -32768)
|
||||||
{
|
{
|
||||||
data_[i] = -32768;
|
data_[i] = -32768;
|
||||||
@@ -1048,11 +1027,10 @@ AudioFrame::operator-=(const AudioFrame& rhs)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
data_[i] = (WebRtc_Word16)wrapGuard;
|
data_[i] = (int16_t)wrapGuard;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
energy_ = 0xffffffff;
|
energy_ = 0xffffffff;
|
||||||
volume_ = 0xffffffff;
|
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -33,10 +33,13 @@ public:
|
|||||||
|
|
||||||
static void DestroyFilePlayer(FilePlayer* player);
|
static void DestroyFilePlayer(FilePlayer* player);
|
||||||
|
|
||||||
virtual WebRtc_Word32 Get10msAudioFromFile(
|
// Read 10 ms of audio at |frequencyInHz| to |outBuffer|. |lengthInSamples|
|
||||||
WebRtc_Word16* decodedDataBuffer,
|
// will be set to the number of samples read (not the number of samples per
|
||||||
WebRtc_UWord32& decodedDataLengthInSamples,
|
// channel).
|
||||||
const WebRtc_UWord32 frequencyInHz) = 0;
|
virtual int Get10msAudioFromFile(
|
||||||
|
int16_t* outBuffer,
|
||||||
|
int& lengthInSamples,
|
||||||
|
int frequencyInHz) = 0;
|
||||||
|
|
||||||
// Register callback for receiving file playing notifications.
|
// Register callback for receiving file playing notifications.
|
||||||
virtual WebRtc_Word32 RegisterModuleFileCallback(
|
virtual WebRtc_Word32 RegisterModuleFileCallback(
|
||||||
|
|||||||
@@ -117,9 +117,9 @@ WebRtc_Word32 FilePlayerImpl::AudioCodec(CodecInst& audioCodec) const
|
|||||||
}
|
}
|
||||||
|
|
||||||
WebRtc_Word32 FilePlayerImpl::Get10msAudioFromFile(
|
WebRtc_Word32 FilePlayerImpl::Get10msAudioFromFile(
|
||||||
WebRtc_Word16* outBuffer,
|
int16_t* outBuffer,
|
||||||
WebRtc_UWord32& lengthInSamples,
|
int& lengthInSamples,
|
||||||
WebRtc_UWord32 frequencyInHz)
|
int frequencyInHz)
|
||||||
{
|
{
|
||||||
if(_codec.plfreq == 0)
|
if(_codec.plfreq == 0)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -32,11 +32,10 @@ public:
|
|||||||
FilePlayerImpl(WebRtc_UWord32 instanceID, FileFormats fileFormat);
|
FilePlayerImpl(WebRtc_UWord32 instanceID, FileFormats fileFormat);
|
||||||
~FilePlayerImpl();
|
~FilePlayerImpl();
|
||||||
|
|
||||||
// FilePlayer functions.
|
virtual int Get10msAudioFromFile(
|
||||||
virtual WebRtc_Word32 Get10msAudioFromFile(
|
int16_t* outBuffer,
|
||||||
WebRtc_Word16* decodedDataBuffer,
|
int& lengthInSamples,
|
||||||
WebRtc_UWord32& decodedDataLengthInSamples,
|
int frequencyInHz);
|
||||||
const WebRtc_UWord32 frequencyInHz);
|
|
||||||
virtual WebRtc_Word32 RegisterModuleFileCallback(FileCallback* callback);
|
virtual WebRtc_Word32 RegisterModuleFileCallback(FileCallback* callback);
|
||||||
virtual WebRtc_Word32 StartPlayingFile(
|
virtual WebRtc_Word32 StartPlayingFile(
|
||||||
const char* fileName,
|
const char* fileName,
|
||||||
|
|||||||
@@ -123,7 +123,7 @@ class ViEFilePlayer
|
|||||||
ThreadWrapper* decode_thread_;
|
ThreadWrapper* decode_thread_;
|
||||||
EventWrapper* decode_event_;
|
EventWrapper* decode_event_;
|
||||||
WebRtc_Word16 decoded_audio_[kMaxDecodedAudioLength];
|
WebRtc_Word16 decoded_audio_[kMaxDecodedAudioLength];
|
||||||
WebRtc_UWord32 decoded_audio_length_;
|
int decoded_audio_length_;
|
||||||
|
|
||||||
// Trick - list containing VoE buffer reading this file. Used if multiple
|
// Trick - list containing VoE buffer reading this file. Used if multiple
|
||||||
// audio channels are sending.
|
// audio channels are sending.
|
||||||
|
|||||||
@@ -6142,11 +6142,13 @@ Channel::GetRtpRtcp(RtpRtcp* &rtpRtcpModule) const
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(andrew): refactor Mix functions here and in transmit_mixer.cc to use
|
||||||
|
// a shared helper.
|
||||||
WebRtc_Word32
|
WebRtc_Word32
|
||||||
Channel::MixOrReplaceAudioWithFile(const int mixingFrequency)
|
Channel::MixOrReplaceAudioWithFile(const int mixingFrequency)
|
||||||
{
|
{
|
||||||
scoped_array<WebRtc_Word16> fileBuffer(new WebRtc_Word16[640]);
|
scoped_array<WebRtc_Word16> fileBuffer(new WebRtc_Word16[640]);
|
||||||
WebRtc_UWord32 fileSamples(0);
|
int fileSamples(0);
|
||||||
|
|
||||||
{
|
{
|
||||||
CriticalSectionScoped cs(&_fileCritSect);
|
CriticalSectionScoped cs(&_fileCritSect);
|
||||||
@@ -6186,10 +6188,10 @@ Channel::MixOrReplaceAudioWithFile(const int mixingFrequency)
|
|||||||
// Currently file stream is always mono.
|
// Currently file stream is always mono.
|
||||||
// TODO(xians): Change the code when FilePlayer supports real stereo.
|
// TODO(xians): Change the code when FilePlayer supports real stereo.
|
||||||
Utility::MixWithSat(_audioFrame.data_,
|
Utility::MixWithSat(_audioFrame.data_,
|
||||||
static_cast<int>(_audioFrame.num_channels_),
|
_audioFrame.num_channels_,
|
||||||
fileBuffer.get(),
|
fileBuffer.get(),
|
||||||
1,
|
1,
|
||||||
static_cast<int>(fileSamples));
|
fileSamples);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@@ -6199,7 +6201,7 @@ Channel::MixOrReplaceAudioWithFile(const int mixingFrequency)
|
|||||||
_audioFrame.UpdateFrame(_channelId,
|
_audioFrame.UpdateFrame(_channelId,
|
||||||
-1,
|
-1,
|
||||||
fileBuffer.get(),
|
fileBuffer.get(),
|
||||||
static_cast<WebRtc_UWord16>(fileSamples),
|
fileSamples,
|
||||||
mixingFrequency,
|
mixingFrequency,
|
||||||
AudioFrame::kNormalSpeech,
|
AudioFrame::kNormalSpeech,
|
||||||
AudioFrame::kVadUnknown,
|
AudioFrame::kVadUnknown,
|
||||||
@@ -6216,7 +6218,7 @@ Channel::MixAudioWithFile(AudioFrame& audioFrame,
|
|||||||
assert(mixingFrequency <= 32000);
|
assert(mixingFrequency <= 32000);
|
||||||
|
|
||||||
scoped_array<WebRtc_Word16> fileBuffer(new WebRtc_Word16[640]);
|
scoped_array<WebRtc_Word16> fileBuffer(new WebRtc_Word16[640]);
|
||||||
WebRtc_UWord32 fileSamples(0);
|
int fileSamples(0);
|
||||||
|
|
||||||
{
|
{
|
||||||
CriticalSectionScoped cs(&_fileCritSect);
|
CriticalSectionScoped cs(&_fileCritSect);
|
||||||
@@ -6246,10 +6248,10 @@ Channel::MixAudioWithFile(AudioFrame& audioFrame,
|
|||||||
// Currently file stream is always mono.
|
// Currently file stream is always mono.
|
||||||
// TODO(xians): Change the code when FilePlayer supports real stereo.
|
// TODO(xians): Change the code when FilePlayer supports real stereo.
|
||||||
Utility::MixWithSat(audioFrame.data_,
|
Utility::MixWithSat(audioFrame.data_,
|
||||||
static_cast<int>(audioFrame.num_channels_),
|
audioFrame.num_channels_,
|
||||||
fileBuffer.get(),
|
fileBuffer.get(),
|
||||||
1,
|
1,
|
||||||
static_cast<int>(fileSamples));
|
fileSamples);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1229,8 +1229,7 @@ WebRtc_Word32 TransmitMixer::MixOrReplaceAudioWithFile(
|
|||||||
{
|
{
|
||||||
scoped_array<WebRtc_Word16> fileBuffer(new WebRtc_Word16[640]);
|
scoped_array<WebRtc_Word16> fileBuffer(new WebRtc_Word16[640]);
|
||||||
|
|
||||||
WebRtc_UWord32 fileSamples(0);
|
int fileSamples(0);
|
||||||
|
|
||||||
{
|
{
|
||||||
CriticalSectionScoped cs(&_critSect);
|
CriticalSectionScoped cs(&_critSect);
|
||||||
if (_filePlayerPtr == NULL)
|
if (_filePlayerPtr == NULL)
|
||||||
@@ -1260,10 +1259,10 @@ WebRtc_Word32 TransmitMixer::MixOrReplaceAudioWithFile(
|
|||||||
// Currently file stream is always mono.
|
// Currently file stream is always mono.
|
||||||
// TODO(xians): Change the code when FilePlayer supports real stereo.
|
// TODO(xians): Change the code when FilePlayer supports real stereo.
|
||||||
Utility::MixWithSat(_audioFrame.data_,
|
Utility::MixWithSat(_audioFrame.data_,
|
||||||
static_cast<int>(_audioFrame.num_channels_),
|
_audioFrame.num_channels_,
|
||||||
fileBuffer.get(),
|
fileBuffer.get(),
|
||||||
1,
|
1,
|
||||||
static_cast<int>(fileSamples));
|
fileSamples);
|
||||||
} else
|
} else
|
||||||
{
|
{
|
||||||
// Replace ACM audio with file.
|
// Replace ACM audio with file.
|
||||||
@@ -1272,12 +1271,11 @@ WebRtc_Word32 TransmitMixer::MixOrReplaceAudioWithFile(
|
|||||||
_audioFrame.UpdateFrame(-1,
|
_audioFrame.UpdateFrame(-1,
|
||||||
-1,
|
-1,
|
||||||
fileBuffer.get(),
|
fileBuffer.get(),
|
||||||
static_cast<WebRtc_UWord16>(fileSamples),
|
fileSamples,
|
||||||
mixingFrequency,
|
mixingFrequency,
|
||||||
AudioFrame::kNormalSpeech,
|
AudioFrame::kNormalSpeech,
|
||||||
AudioFrame::kVadUnknown,
|
AudioFrame::kVadUnknown,
|
||||||
1);
|
1);
|
||||||
|
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -256,7 +256,7 @@ WebRtc_Word32 VoEBaseImpl::NeedMorePlayData(
|
|||||||
_shared->output_mixer()->GetMixedAudio(samplesPerSec, nChannels,
|
_shared->output_mixer()->GetMixedAudio(samplesPerSec, nChannels,
|
||||||
_audioFrame);
|
_audioFrame);
|
||||||
|
|
||||||
assert(nSamples == _audioFrame.samples_per_channel_);
|
assert(static_cast<int>(nSamples) == _audioFrame.samples_per_channel_);
|
||||||
assert(samplesPerSec ==
|
assert(samplesPerSec ==
|
||||||
static_cast<WebRtc_UWord32>(_audioFrame.sample_rate_hz_));
|
static_cast<WebRtc_UWord32>(_audioFrame.sample_rate_hz_));
|
||||||
|
|
||||||
|
|||||||
@@ -628,6 +628,8 @@ int VoEFileImpl::StopRecordingMicrophone()
|
|||||||
return _shared->transmit_mixer()->StopRecordingMicrophone();
|
return _shared->transmit_mixer()->StopRecordingMicrophone();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(andrew): a cursory inspection suggests there's a large amount of
|
||||||
|
// overlap in these convert functions which could be refactored to a helper.
|
||||||
int VoEFileImpl::ConvertPCMToWAV(const char* fileNameInUTF8,
|
int VoEFileImpl::ConvertPCMToWAV(const char* fileNameInUTF8,
|
||||||
const char* fileNameOutUTF8)
|
const char* fileNameOutUTF8)
|
||||||
{
|
{
|
||||||
@@ -677,7 +679,7 @@ int VoEFileImpl::ConvertPCMToWAV(const char* fileNameInUTF8,
|
|||||||
// Run throught the file
|
// Run throught the file
|
||||||
AudioFrame audioFrame;
|
AudioFrame audioFrame;
|
||||||
WebRtc_Word16 decodedData[160];
|
WebRtc_Word16 decodedData[160];
|
||||||
WebRtc_UWord32 decLength=0;
|
int decLength=0;
|
||||||
const WebRtc_UWord32 frequency = 16000;
|
const WebRtc_UWord32 frequency = 16000;
|
||||||
|
|
||||||
while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency))
|
while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency))
|
||||||
@@ -767,7 +769,7 @@ int VoEFileImpl::ConvertPCMToWAV(InStream* streamIn, OutStream* streamOut)
|
|||||||
// Run throught the file
|
// Run throught the file
|
||||||
AudioFrame audioFrame;
|
AudioFrame audioFrame;
|
||||||
WebRtc_Word16 decodedData[160];
|
WebRtc_Word16 decodedData[160];
|
||||||
WebRtc_UWord32 decLength=0;
|
int decLength=0;
|
||||||
const WebRtc_UWord32 frequency = 16000;
|
const WebRtc_UWord32 frequency = 16000;
|
||||||
|
|
||||||
while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency))
|
while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency))
|
||||||
@@ -855,7 +857,7 @@ int VoEFileImpl::ConvertWAVToPCM(const char* fileNameInUTF8,
|
|||||||
// Run throught the file
|
// Run throught the file
|
||||||
AudioFrame audioFrame;
|
AudioFrame audioFrame;
|
||||||
WebRtc_Word16 decodedData[160];
|
WebRtc_Word16 decodedData[160];
|
||||||
WebRtc_UWord32 decLength=0;
|
int decLength=0;
|
||||||
const WebRtc_UWord32 frequency = 16000;
|
const WebRtc_UWord32 frequency = 16000;
|
||||||
|
|
||||||
while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency))
|
while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency))
|
||||||
@@ -947,7 +949,7 @@ int VoEFileImpl::ConvertWAVToPCM(InStream* streamIn, OutStream* streamOut)
|
|||||||
// Run throught the file
|
// Run throught the file
|
||||||
AudioFrame audioFrame;
|
AudioFrame audioFrame;
|
||||||
WebRtc_Word16 decodedData[160];
|
WebRtc_Word16 decodedData[160];
|
||||||
WebRtc_UWord32 decLength=0;
|
int decLength=0;
|
||||||
const WebRtc_UWord32 frequency = 16000;
|
const WebRtc_UWord32 frequency = 16000;
|
||||||
|
|
||||||
while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency))
|
while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency))
|
||||||
@@ -1033,7 +1035,7 @@ int VoEFileImpl::ConvertPCMToCompressed(const char* fileNameInUTF8,
|
|||||||
// Run throught the file
|
// Run throught the file
|
||||||
AudioFrame audioFrame;
|
AudioFrame audioFrame;
|
||||||
WebRtc_Word16 decodedData[160];
|
WebRtc_Word16 decodedData[160];
|
||||||
WebRtc_UWord32 decLength=0;
|
int decLength=0;
|
||||||
const WebRtc_UWord32 frequency = 16000;
|
const WebRtc_UWord32 frequency = 16000;
|
||||||
|
|
||||||
while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency))
|
while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency))
|
||||||
@@ -1125,7 +1127,7 @@ int VoEFileImpl::ConvertPCMToCompressed(InStream* streamIn,
|
|||||||
// Run throught the file
|
// Run throught the file
|
||||||
AudioFrame audioFrame;
|
AudioFrame audioFrame;
|
||||||
WebRtc_Word16 decodedData[160];
|
WebRtc_Word16 decodedData[160];
|
||||||
WebRtc_UWord32 decLength=0;
|
int decLength=0;
|
||||||
const WebRtc_UWord32 frequency = 16000;
|
const WebRtc_UWord32 frequency = 16000;
|
||||||
|
|
||||||
while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency))
|
while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency))
|
||||||
@@ -1215,7 +1217,7 @@ int VoEFileImpl::ConvertCompressedToPCM(const char* fileNameInUTF8,
|
|||||||
// Run throught the file
|
// Run throught the file
|
||||||
AudioFrame audioFrame;
|
AudioFrame audioFrame;
|
||||||
WebRtc_Word16 decodedData[160];
|
WebRtc_Word16 decodedData[160];
|
||||||
WebRtc_UWord32 decLength=0;
|
int decLength=0;
|
||||||
const WebRtc_UWord32 frequency = 16000;
|
const WebRtc_UWord32 frequency = 16000;
|
||||||
|
|
||||||
while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency))
|
while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency))
|
||||||
@@ -1312,7 +1314,7 @@ int VoEFileImpl::ConvertCompressedToPCM(InStream* streamIn,
|
|||||||
// Run throught the file
|
// Run throught the file
|
||||||
AudioFrame audioFrame;
|
AudioFrame audioFrame;
|
||||||
WebRtc_Word16 decodedData[160];
|
WebRtc_Word16 decodedData[160];
|
||||||
WebRtc_UWord32 decLength=0;
|
int decLength=0;
|
||||||
const WebRtc_UWord32 frequency = 16000;
|
const WebRtc_UWord32 frequency = 16000;
|
||||||
|
|
||||||
while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency))
|
while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency))
|
||||||
|
|||||||
Reference in New Issue
Block a user