Simplify AudioBuffer::mixed_low_pass_data API
R=andrew@webrtc.org, kwiberg@webrtc.org Review URL: https://webrtc-codereview.appspot.com/21869004 git-svn-id: http://webrtc.googlecode.com/svn/trunk@6715 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
parent
af93fc08a1
commit
2561d52460
@ -51,7 +51,6 @@ int KeyboardChannelIndex(AudioProcessing::ChannelLayout layout) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
void StereoToMono(const float* left, const float* right, float* out,
|
||||
int samples_per_channel) {
|
||||
for (int i = 0; i < samples_per_channel; ++i) {
|
||||
@ -155,8 +154,7 @@ AudioBuffer::AudioBuffer(int input_samples_per_channel,
|
||||
num_proc_channels_(num_process_channels),
|
||||
output_samples_per_channel_(output_samples_per_channel),
|
||||
samples_per_split_channel_(proc_samples_per_channel_),
|
||||
num_mixed_channels_(0),
|
||||
num_mixed_low_pass_channels_(0),
|
||||
mixed_low_pass_valid_(false),
|
||||
reference_copied_(false),
|
||||
activity_(AudioFrame::kVadUnknown),
|
||||
keyboard_data_(NULL),
|
||||
@ -278,8 +276,7 @@ void AudioBuffer::CopyTo(int samples_per_channel,
|
||||
|
||||
void AudioBuffer::InitForNewData() {
|
||||
keyboard_data_ = NULL;
|
||||
num_mixed_channels_ = 0;
|
||||
num_mixed_low_pass_channels_ = 0;
|
||||
mixed_low_pass_valid_ = false;
|
||||
reference_copied_ = false;
|
||||
activity_ = AudioFrame::kVadUnknown;
|
||||
}
|
||||
@ -289,6 +286,7 @@ const int16_t* AudioBuffer::data(int channel) const {
|
||||
}
|
||||
|
||||
int16_t* AudioBuffer::data(int channel) {
|
||||
mixed_low_pass_valid_ = false;
|
||||
const AudioBuffer* t = this;
|
||||
return const_cast<int16_t*>(t->data(channel));
|
||||
}
|
||||
@ -298,6 +296,7 @@ const float* AudioBuffer::data_f(int channel) const {
|
||||
}
|
||||
|
||||
float* AudioBuffer::data_f(int channel) {
|
||||
mixed_low_pass_valid_ = false;
|
||||
const AudioBuffer* t = this;
|
||||
return const_cast<float*>(t->data_f(channel));
|
||||
}
|
||||
@ -308,6 +307,7 @@ const int16_t* AudioBuffer::low_pass_split_data(int channel) const {
|
||||
}
|
||||
|
||||
int16_t* AudioBuffer::low_pass_split_data(int channel) {
|
||||
mixed_low_pass_valid_ = false;
|
||||
const AudioBuffer* t = this;
|
||||
return const_cast<int16_t*>(t->low_pass_split_data(channel));
|
||||
}
|
||||
@ -318,6 +318,7 @@ const float* AudioBuffer::low_pass_split_data_f(int channel) const {
|
||||
}
|
||||
|
||||
float* AudioBuffer::low_pass_split_data_f(int channel) {
|
||||
mixed_low_pass_valid_ = false;
|
||||
const AudioBuffer* t = this;
|
||||
return const_cast<float*>(t->low_pass_split_data_f(channel));
|
||||
}
|
||||
@ -341,12 +342,26 @@ float* AudioBuffer::high_pass_split_data_f(int channel) {
|
||||
return const_cast<float*>(t->high_pass_split_data_f(channel));
|
||||
}
|
||||
|
||||
const int16_t* AudioBuffer::mixed_data(int channel) const {
|
||||
return mixed_channels_->channel(channel);
|
||||
}
|
||||
const int16_t* AudioBuffer::mixed_low_pass_data() {
|
||||
// Currently only mixing stereo to mono is supported.
|
||||
assert(num_proc_channels_ == 1 || num_proc_channels_ == 2);
|
||||
|
||||
const int16_t* AudioBuffer::mixed_low_pass_data(int channel) const {
|
||||
return mixed_low_pass_channels_->channel(channel);
|
||||
if (num_proc_channels_ == 1) {
|
||||
return low_pass_split_data(0);
|
||||
}
|
||||
|
||||
if (!mixed_low_pass_valid_) {
|
||||
if (!mixed_low_pass_channels_.get()) {
|
||||
mixed_low_pass_channels_.reset(
|
||||
new ChannelBuffer<int16_t>(samples_per_split_channel_, 1));
|
||||
}
|
||||
StereoToMono(low_pass_split_data(0),
|
||||
low_pass_split_data(1),
|
||||
mixed_low_pass_channels_->data(),
|
||||
samples_per_split_channel_);
|
||||
mixed_low_pass_valid_ = true;
|
||||
}
|
||||
return mixed_low_pass_channels_->data();
|
||||
}
|
||||
|
||||
const int16_t* AudioBuffer::low_pass_reference(int channel) const {
|
||||
@ -433,42 +448,6 @@ void AudioBuffer::InterleaveTo(AudioFrame* frame, bool data_changed) const {
|
||||
}
|
||||
}
|
||||
|
||||
void AudioBuffer::CopyAndMix(int num_mixed_channels) {
|
||||
// We currently only support the stereo to mono case.
|
||||
assert(num_proc_channels_ == 2);
|
||||
assert(num_mixed_channels == 1);
|
||||
if (!mixed_channels_.get()) {
|
||||
mixed_channels_.reset(
|
||||
new ChannelBuffer<int16_t>(proc_samples_per_channel_,
|
||||
num_mixed_channels));
|
||||
}
|
||||
|
||||
StereoToMono(channels_->ibuf()->channel(0),
|
||||
channels_->ibuf()->channel(1),
|
||||
mixed_channels_->channel(0),
|
||||
proc_samples_per_channel_);
|
||||
|
||||
num_mixed_channels_ = num_mixed_channels;
|
||||
}
|
||||
|
||||
void AudioBuffer::CopyAndMixLowPass(int num_mixed_channels) {
|
||||
// We currently only support the stereo to mono case.
|
||||
assert(num_proc_channels_ == 2);
|
||||
assert(num_mixed_channels == 1);
|
||||
if (!mixed_low_pass_channels_.get()) {
|
||||
mixed_low_pass_channels_.reset(
|
||||
new ChannelBuffer<int16_t>(samples_per_split_channel_,
|
||||
num_mixed_channels));
|
||||
}
|
||||
|
||||
StereoToMono(low_pass_split_data(0),
|
||||
low_pass_split_data(1),
|
||||
mixed_low_pass_channels_->channel(0),
|
||||
samples_per_split_channel_);
|
||||
|
||||
num_mixed_low_pass_channels_ = num_mixed_channels;
|
||||
}
|
||||
|
||||
void AudioBuffer::CopyLowPassToReference() {
|
||||
reference_copied_ = true;
|
||||
if (!low_pass_reference_channels_.get()) {
|
||||
|
@ -63,8 +63,9 @@ class AudioBuffer {
|
||||
const int16_t* low_pass_split_data(int channel) const;
|
||||
int16_t* high_pass_split_data(int channel);
|
||||
const int16_t* high_pass_split_data(int channel) const;
|
||||
const int16_t* mixed_data(int channel) const;
|
||||
const int16_t* mixed_low_pass_data(int channel) const;
|
||||
// Returns a pointer to the low-pass data downmixed to mono. If this data
|
||||
// isn't already available it re-calculates it.
|
||||
const int16_t* mixed_low_pass_data();
|
||||
const int16_t* low_pass_reference(int channel) const;
|
||||
|
||||
// Float versions of the accessors, with automatic conversion back and forth
|
||||
@ -85,7 +86,6 @@ class AudioBuffer {
|
||||
|
||||
// Use for int16 interleaved data.
|
||||
void DeinterleaveFrom(AudioFrame* audioFrame);
|
||||
void InterleaveTo(AudioFrame* audioFrame) const;
|
||||
// If |data_changed| is false, only the non-audio data members will be copied
|
||||
// to |frame|.
|
||||
void InterleaveTo(AudioFrame* frame, bool data_changed) const;
|
||||
@ -97,9 +97,6 @@ class AudioBuffer {
|
||||
void CopyTo(int samples_per_channel,
|
||||
AudioProcessing::ChannelLayout layout,
|
||||
float* const* data);
|
||||
|
||||
void CopyAndMix(int num_mixed_channels);
|
||||
void CopyAndMixLowPass(int num_mixed_channels);
|
||||
void CopyLowPassToReference();
|
||||
|
||||
private:
|
||||
@ -112,8 +109,7 @@ class AudioBuffer {
|
||||
const int num_proc_channels_;
|
||||
const int output_samples_per_channel_;
|
||||
int samples_per_split_channel_;
|
||||
int num_mixed_channels_;
|
||||
int num_mixed_low_pass_channels_;
|
||||
bool mixed_low_pass_valid_;
|
||||
bool reference_copied_;
|
||||
AudioFrame::VADActivity activity_;
|
||||
|
||||
@ -121,7 +117,6 @@ class AudioBuffer {
|
||||
scoped_ptr<IFChannelBuffer> channels_;
|
||||
scoped_ptr<SplitChannelBuffer> split_channels_;
|
||||
scoped_ptr<SplitFilterStates[]> filter_states_;
|
||||
scoped_ptr<ChannelBuffer<int16_t> > mixed_channels_;
|
||||
scoped_ptr<ChannelBuffer<int16_t> > mixed_low_pass_channels_;
|
||||
scoped_ptr<ChannelBuffer<int16_t> > low_pass_reference_channels_;
|
||||
scoped_ptr<ChannelBuffer<float> > input_buffer_;
|
||||
|
@ -59,17 +59,11 @@ int GainControlImpl::ProcessRenderAudio(AudioBuffer* audio) {
|
||||
|
||||
assert(audio->samples_per_split_channel() <= 160);
|
||||
|
||||
const int16_t* mixed_data = audio->low_pass_split_data(0);
|
||||
if (audio->num_channels() > 1) {
|
||||
audio->CopyAndMixLowPass(1);
|
||||
mixed_data = audio->mixed_low_pass_data(0);
|
||||
}
|
||||
|
||||
for (int i = 0; i < num_handles(); i++) {
|
||||
Handle* my_handle = static_cast<Handle*>(handle(i));
|
||||
int err = WebRtcAgc_AddFarend(
|
||||
my_handle,
|
||||
mixed_data,
|
||||
audio->mixed_low_pass_data(),
|
||||
static_cast<int16_t>(audio->samples_per_split_channel()));
|
||||
|
||||
if (err != apm_->kNoError) {
|
||||
|
@ -61,17 +61,11 @@ int VoiceDetectionImpl::ProcessCaptureAudio(AudioBuffer* audio) {
|
||||
}
|
||||
assert(audio->samples_per_split_channel() <= 160);
|
||||
|
||||
const int16_t* mixed_data = audio->low_pass_split_data(0);
|
||||
if (audio->num_channels() > 1) {
|
||||
audio->CopyAndMixLowPass(1);
|
||||
mixed_data = audio->mixed_low_pass_data(0);
|
||||
}
|
||||
|
||||
// TODO(ajm): concatenate data in frame buffer here.
|
||||
|
||||
int vad_ret = WebRtcVad_Process(static_cast<Handle*>(handle(0)),
|
||||
apm_->proc_split_sample_rate_hz(),
|
||||
mixed_data,
|
||||
audio->mixed_low_pass_data(),
|
||||
frame_size_samples_);
|
||||
if (vad_ret == 0) {
|
||||
stream_has_voice_ = false;
|
||||
|
Loading…
x
Reference in New Issue
Block a user