Simplify audio_buffer APIs

Now there is only one API to get the data or the channels (one const and one no const) merged or by band.
The band is passed in as a parameter, instead of calling different methods.

BUG=webrtc:3146
R=andrew@webrtc.org, bjornv@webrtc.org, kwiberg@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/27249004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@7790 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
aluebs@webrtc.org 2014-12-03 01:06:35 +00:00
parent ceca014b8b
commit a7384a1126
8 changed files with 109 additions and 171 deletions

View File

@ -199,16 +199,15 @@ void AudioBuffer::InitForNewData() {
activity_ = AudioFrame::kVadUnknown; activity_ = AudioFrame::kVadUnknown;
} }
const int16_t* AudioBuffer::data(int channel) const { const int16_t* AudioBuffer::data_const(int channel) const {
return channels_->ibuf_const()->channel(channel); return channels_const()[channel];
} }
int16_t* AudioBuffer::data(int channel) { int16_t* AudioBuffer::data(int channel) {
mixed_low_pass_valid_ = false; return channels()[channel];
return channels_->ibuf()->channel(channel);
} }
const int16_t* const* AudioBuffer::channels() const { const int16_t* const* AudioBuffer::channels_const() const {
return channels_->ibuf_const()->channels(); return channels_->ibuf_const()->channels();
} }
@ -217,16 +216,42 @@ int16_t* const* AudioBuffer::channels() {
return channels_->ibuf()->channels(); return channels_->ibuf()->channels();
} }
const float* AudioBuffer::data_f(int channel) const { const int16_t* AudioBuffer::split_data_const(int channel, Band band) const {
return channels_->fbuf_const()->channel(channel); const int16_t* const* chs = split_channels_const(band);
return chs ? chs[channel] : NULL;
}
int16_t* AudioBuffer::split_data(int channel, Band band) {
int16_t* const* chs = split_channels(band);
return chs ? chs[channel] : NULL;
}
const int16_t* const* AudioBuffer::split_channels_const(Band band) const {
if (split_channels_.size() > static_cast<size_t>(band)) {
return split_channels_[band]->ibuf_const()->channels();
} else {
return band == kBand0To8kHz ? channels_->ibuf_const()->channels() : NULL;
}
}
int16_t* const* AudioBuffer::split_channels(Band band) {
mixed_low_pass_valid_ = false;
if (split_channels_.size() > static_cast<size_t>(band)) {
return split_channels_[band]->ibuf()->channels();
} else {
return band == kBand0To8kHz ? channels_->ibuf()->channels() : NULL;
}
}
const float* AudioBuffer::data_const_f(int channel) const {
return channels_const_f()[channel];
} }
float* AudioBuffer::data_f(int channel) { float* AudioBuffer::data_f(int channel) {
mixed_low_pass_valid_ = false; return channels_f()[channel];
return channels_->fbuf()->channel(channel);
} }
const float* const* AudioBuffer::channels_f() const { const float* const* AudioBuffer::channels_const_f() const {
return channels_->fbuf_const()->channels(); return channels_->fbuf_const()->channels();
} }
@ -235,114 +260,31 @@ float* const* AudioBuffer::channels_f() {
return channels_->fbuf()->channels(); return channels_->fbuf()->channels();
} }
const int16_t* AudioBuffer::low_pass_split_data(int channel) const { const float* AudioBuffer::split_data_const_f(int channel, Band band) const {
return split_channels_.size() > 0 const float* const* chs = split_channels_const_f(band);
? split_channels_[0]->ibuf_const()->channel(channel) return chs ? chs[channel] : NULL;
: data(channel);
} }
int16_t* AudioBuffer::low_pass_split_data(int channel) { float* AudioBuffer::split_data_f(int channel, Band band) {
float* const* chs = split_channels_f(band);
return chs ? chs[channel] : NULL;
}
const float* const* AudioBuffer::split_channels_const_f(Band band) const {
if (split_channels_.size() > static_cast<size_t>(band)) {
return split_channels_[band]->fbuf_const()->channels();
} else {
return band == kBand0To8kHz ? channels_->fbuf_const()->channels() : NULL;
}
}
float* const* AudioBuffer::split_channels_f(Band band) {
mixed_low_pass_valid_ = false; mixed_low_pass_valid_ = false;
return split_channels_.size() > 0 if (split_channels_.size() > static_cast<size_t>(band)) {
? split_channels_[0]->ibuf()->channel(channel) return split_channels_[band]->fbuf()->channels();
: data(channel); } else {
} return band == kBand0To8kHz ? channels_->fbuf()->channels() : NULL;
}
const int16_t* const* AudioBuffer::low_pass_split_channels() const {
return split_channels_.size() > 0
? split_channels_[0]->ibuf_const()->channels()
: channels();
}
int16_t* const* AudioBuffer::low_pass_split_channels() {
mixed_low_pass_valid_ = false;
return split_channels_.size() > 0 ? split_channels_[0]->ibuf()->channels()
: channels();
}
const float* AudioBuffer::low_pass_split_data_f(int channel) const {
return split_channels_.size() > 0
? split_channels_[0]->fbuf_const()->channel(channel)
: data_f(channel);
}
float* AudioBuffer::low_pass_split_data_f(int channel) {
mixed_low_pass_valid_ = false;
return split_channels_.size() > 0
? split_channels_[0]->fbuf()->channel(channel)
: data_f(channel);
}
const float* const* AudioBuffer::low_pass_split_channels_f() const {
return split_channels_.size() > 0
? split_channels_[0]->fbuf_const()->channels()
: channels_f();
}
float* const* AudioBuffer::low_pass_split_channels_f() {
mixed_low_pass_valid_ = false;
return split_channels_.size() > 0
? split_channels_[0]->fbuf()->channels()
: channels_f();
}
const int16_t* AudioBuffer::high_pass_split_data(int channel) const {
return split_channels_.size() > 1
? split_channels_[1]->ibuf_const()->channel(channel)
: NULL;
}
int16_t* AudioBuffer::high_pass_split_data(int channel) {
return split_channels_.size() > 1
? split_channels_[1]->ibuf()->channel(channel)
: NULL;
}
const int16_t* const* AudioBuffer::high_pass_split_channels() const {
return split_channels_.size() > 1
? split_channels_[1]->ibuf_const()->channels()
: NULL;
}
int16_t* const* AudioBuffer::high_pass_split_channels() {
return split_channels_.size() > 1 ? split_channels_[1]->ibuf()->channels()
: NULL;
}
const float* AudioBuffer::high_pass_split_data_f(int channel) const {
return split_channels_.size() > 1
? split_channels_[1]->fbuf_const()->channel(channel)
: NULL;
}
float* AudioBuffer::high_pass_split_data_f(int channel) {
return split_channels_.size() > 1
? split_channels_[1]->fbuf()->channel(channel)
: NULL;
}
const float* const* AudioBuffer::high_pass_split_channels_f() const {
return split_channels_.size() > 1
? split_channels_[1]->fbuf_const()->channels()
: NULL;
}
float* const* AudioBuffer::high_pass_split_channels_f() {
return split_channels_.size() > 1
? split_channels_[1]->fbuf()->channels()
: NULL;
}
const float* const* AudioBuffer::super_high_pass_split_channels_f() const {
return split_channels_.size() > 2
? split_channels_[2]->fbuf_const()->channels()
: NULL;
}
float* const* AudioBuffer::super_high_pass_split_channels_f() {
return split_channels_.size() > 2
? split_channels_[2]->fbuf()->channels()
: NULL;
} }
const int16_t* AudioBuffer::mixed_low_pass_data() { const int16_t* AudioBuffer::mixed_low_pass_data() {
@ -350,7 +292,7 @@ const int16_t* AudioBuffer::mixed_low_pass_data() {
assert(num_proc_channels_ == 1 || num_proc_channels_ == 2); assert(num_proc_channels_ == 1 || num_proc_channels_ == 2);
if (num_proc_channels_ == 1) { if (num_proc_channels_ == 1) {
return low_pass_split_data(0); return split_data_const(0, kBand0To8kHz);
} }
if (!mixed_low_pass_valid_) { if (!mixed_low_pass_valid_) {
@ -358,8 +300,8 @@ const int16_t* AudioBuffer::mixed_low_pass_data() {
mixed_low_pass_channels_.reset( mixed_low_pass_channels_.reset(
new ChannelBuffer<int16_t>(samples_per_split_channel_, 1)); new ChannelBuffer<int16_t>(samples_per_split_channel_, 1));
} }
StereoToMono(low_pass_split_data(0), StereoToMono(split_data_const(0, kBand0To8kHz),
low_pass_split_data(1), split_data_const(1, kBand0To8kHz),
mixed_low_pass_channels_->data(), mixed_low_pass_channels_->data(),
samples_per_split_channel_); samples_per_split_channel_);
mixed_low_pass_valid_ = true; mixed_low_pass_valid_ = true;
@ -462,7 +404,8 @@ void AudioBuffer::CopyLowPassToReference() {
num_proc_channels_)); num_proc_channels_));
} }
for (int i = 0; i < num_proc_channels_; i++) { for (int i = 0; i < num_proc_channels_; i++) {
low_pass_reference_channels_->CopyFrom(low_pass_split_data(i), i); low_pass_reference_channels_->CopyFrom(split_data_const(i, kBand0To8kHz),
i);
} }
} }

View File

@ -27,6 +27,12 @@ namespace webrtc {
class PushSincResampler; class PushSincResampler;
class IFChannelBuffer; class IFChannelBuffer;
enum Band {
kBand0To8kHz = 0,
kBand8To16kHz = 1,
kBand16To24kHz = 2
};
class AudioBuffer { class AudioBuffer {
public: public:
// TODO(ajm): Switch to take ChannelLayouts. // TODO(ajm): Switch to take ChannelLayouts.
@ -46,17 +52,14 @@ class AudioBuffer {
// in memory. Prefer to use the const variants of each accessor when // in memory. Prefer to use the const variants of each accessor when
// possible, since they incur less float<->int16 conversion overhead. // possible, since they incur less float<->int16 conversion overhead.
int16_t* data(int channel); int16_t* data(int channel);
const int16_t* data(int channel) const; const int16_t* data_const(int channel) const;
int16_t* const* channels(); int16_t* const* channels();
const int16_t* const* channels() const; const int16_t* const* channels_const() const;
int16_t* low_pass_split_data(int channel); int16_t* split_data(int channel, Band band);
const int16_t* low_pass_split_data(int channel) const; const int16_t* split_data_const(int channel, Band band) const;
int16_t* high_pass_split_data(int channel); int16_t* const* split_channels(Band band);
const int16_t* high_pass_split_data(int channel) const; const int16_t* const* split_channels_const(Band band) const;
int16_t* const* low_pass_split_channels();
const int16_t* const* low_pass_split_channels() const;
int16_t* const* high_pass_split_channels();
const int16_t* const* high_pass_split_channels() const;
// Returns a pointer to the low-pass data downmixed to mono. If this data // Returns a pointer to the low-pass data downmixed to mono. If this data
// isn't already available it re-calculates it. // isn't already available it re-calculates it.
const int16_t* mixed_low_pass_data(); const int16_t* mixed_low_pass_data();
@ -65,22 +68,13 @@ class AudioBuffer {
// Float versions of the accessors, with automatic conversion back and forth // Float versions of the accessors, with automatic conversion back and forth
// as necessary. The range of the numbers are the same as for int16_t. // as necessary. The range of the numbers are the same as for int16_t.
float* data_f(int channel); float* data_f(int channel);
const float* data_f(int channel) const; const float* data_const_f(int channel) const;
float* const* channels_f(); float* const* channels_f();
const float* const* channels_f() const; const float* const* channels_const_f() const;
float* split_data_f(int channel, Band band);
float* low_pass_split_data_f(int channel); const float* split_data_const_f(int channel, Band band) const;
const float* low_pass_split_data_f(int channel) const; float* const* split_channels_f(Band band);
float* high_pass_split_data_f(int channel); const float* const* split_channels_const_f(Band band) const;
const float* high_pass_split_data_f(int channel) const;
float* const* low_pass_split_channels_f();
const float* const* low_pass_split_channels_f() const;
float* const* high_pass_split_channels_f();
const float* const* high_pass_split_channels_f() const;
float* const* super_high_pass_split_channels_f();
const float* const* super_high_pass_split_channels_f() const;
const float* keyboard_data() const; const float* keyboard_data() const;

View File

@ -89,7 +89,7 @@ int EchoCancellationImpl::ProcessRenderAudio(const AudioBuffer* audio) {
Handle* my_handle = static_cast<Handle*>(handle(handle_index)); Handle* my_handle = static_cast<Handle*>(handle(handle_index));
err = WebRtcAec_BufferFarend( err = WebRtcAec_BufferFarend(
my_handle, my_handle,
audio->low_pass_split_data_f(j), audio->split_data_const_f(j, kBand0To8kHz),
static_cast<int16_t>(audio->samples_per_split_channel())); static_cast<int16_t>(audio->samples_per_split_channel()));
if (err != apm_->kNoError) { if (err != apm_->kNoError) {
@ -129,10 +129,10 @@ int EchoCancellationImpl::ProcessCaptureAudio(AudioBuffer* audio) {
Handle* my_handle = handle(handle_index); Handle* my_handle = handle(handle_index);
err = WebRtcAec_Process( err = WebRtcAec_Process(
my_handle, my_handle,
audio->low_pass_split_data_f(i), audio->split_data_const_f(i, kBand0To8kHz),
audio->high_pass_split_data_f(i), audio->split_data_const_f(i, kBand8To16kHz),
audio->low_pass_split_data_f(i), audio->split_data_f(i, kBand0To8kHz),
audio->high_pass_split_data_f(i), audio->split_data_f(i, kBand8To16kHz),
static_cast<int16_t>(audio->samples_per_split_channel()), static_cast<int16_t>(audio->samples_per_split_channel()),
apm_->stream_delay_ms(), apm_->stream_delay_ms(),
stream_drift_samples_); stream_drift_samples_);

View File

@ -95,7 +95,7 @@ int EchoControlMobileImpl::ProcessRenderAudio(const AudioBuffer* audio) {
Handle* my_handle = static_cast<Handle*>(handle(handle_index)); Handle* my_handle = static_cast<Handle*>(handle(handle_index));
err = WebRtcAecm_BufferFarend( err = WebRtcAecm_BufferFarend(
my_handle, my_handle,
audio->low_pass_split_data(j), audio->split_data_const(j, kBand0To8kHz),
static_cast<int16_t>(audio->samples_per_split_channel())); static_cast<int16_t>(audio->samples_per_split_channel()));
if (err != apm_->kNoError) { if (err != apm_->kNoError) {
@ -129,7 +129,7 @@ int EchoControlMobileImpl::ProcessCaptureAudio(AudioBuffer* audio) {
// TODO(ajm): improve how this works, possibly inside AECM. // TODO(ajm): improve how this works, possibly inside AECM.
// This is kind of hacked up. // This is kind of hacked up.
const int16_t* noisy = audio->low_pass_reference(i); const int16_t* noisy = audio->low_pass_reference(i);
int16_t* clean = audio->low_pass_split_data(i); const int16_t* clean = audio->split_data_const(i, kBand0To8kHz);
if (noisy == NULL) { if (noisy == NULL) {
noisy = clean; noisy = clean;
clean = NULL; clean = NULL;
@ -140,7 +140,7 @@ int EchoControlMobileImpl::ProcessCaptureAudio(AudioBuffer* audio) {
my_handle, my_handle,
noisy, noisy,
clean, clean,
audio->low_pass_split_data(i), audio->split_data(i, kBand0To8kHz),
static_cast<int16_t>(audio->samples_per_split_channel()), static_cast<int16_t>(audio->samples_per_split_channel()),
apm_->stream_delay_ms()); apm_->stream_delay_ms());

View File

@ -90,8 +90,8 @@ int GainControlImpl::AnalyzeCaptureAudio(AudioBuffer* audio) {
Handle* my_handle = static_cast<Handle*>(handle(i)); Handle* my_handle = static_cast<Handle*>(handle(i));
err = WebRtcAgc_AddMic( err = WebRtcAgc_AddMic(
my_handle, my_handle,
audio->low_pass_split_data(i), audio->split_data(i, kBand0To8kHz),
audio->high_pass_split_data(i), audio->split_data(i, kBand8To16kHz),
static_cast<int16_t>(audio->samples_per_split_channel())); static_cast<int16_t>(audio->samples_per_split_channel()));
if (err != apm_->kNoError) { if (err != apm_->kNoError) {
@ -106,8 +106,8 @@ int GainControlImpl::AnalyzeCaptureAudio(AudioBuffer* audio) {
err = WebRtcAgc_VirtualMic( err = WebRtcAgc_VirtualMic(
my_handle, my_handle,
audio->low_pass_split_data(i), audio->split_data(i, kBand0To8kHz),
audio->high_pass_split_data(i), audio->split_data(i, kBand8To16kHz),
static_cast<int16_t>(audio->samples_per_split_channel()), static_cast<int16_t>(audio->samples_per_split_channel()),
analog_capture_level_, analog_capture_level_,
&capture_level_out); &capture_level_out);
@ -144,11 +144,11 @@ int GainControlImpl::ProcessCaptureAudio(AudioBuffer* audio) {
int err = WebRtcAgc_Process( int err = WebRtcAgc_Process(
my_handle, my_handle,
audio->low_pass_split_data(i), audio->split_data_const(i, kBand0To8kHz),
audio->high_pass_split_data(i), audio->split_data_const(i, kBand8To16kHz),
static_cast<int16_t>(audio->samples_per_split_channel()), static_cast<int16_t>(audio->samples_per_split_channel()),
audio->low_pass_split_data(i), audio->split_data(i, kBand0To8kHz),
audio->high_pass_split_data(i), audio->split_data(i, kBand8To16kHz),
capture_levels_[i], capture_levels_[i],
&capture_level_out, &capture_level_out,
apm_->echo_cancellation()->stream_has_echo(), apm_->echo_cancellation()->stream_has_echo(),

View File

@ -123,7 +123,7 @@ int HighPassFilterImpl::ProcessCaptureAudio(AudioBuffer* audio) {
for (int i = 0; i < num_handles(); i++) { for (int i = 0; i < num_handles(); i++) {
Handle* my_handle = static_cast<Handle*>(handle(i)); Handle* my_handle = static_cast<Handle*>(handle(i));
err = Filter(my_handle, err = Filter(my_handle,
audio->low_pass_split_data(i), audio->split_data(i, kBand0To8kHz),
audio->samples_per_split_channel()); audio->samples_per_split_channel());
if (err != apm_->kNoError) { if (err != apm_->kNoError) {

View File

@ -31,7 +31,8 @@ int LevelEstimatorImpl::ProcessStream(AudioBuffer* audio) {
RMSLevel* rms_level = static_cast<RMSLevel*>(handle(0)); RMSLevel* rms_level = static_cast<RMSLevel*>(handle(0));
for (int i = 0; i < audio->num_channels(); ++i) { for (int i = 0; i < audio->num_channels(); ++i) {
rms_level->Process(audio->data(i), audio->samples_per_channel()); rms_level->Process(audio->data_const(i),
audio->samples_per_channel());
} }
return AudioProcessing::kNoError; return AudioProcessing::kNoError;

View File

@ -67,7 +67,7 @@ int NoiseSuppressionImpl::AnalyzeCaptureAudio(AudioBuffer* audio) {
Handle* my_handle = static_cast<Handle*>(handle(i)); Handle* my_handle = static_cast<Handle*>(handle(i));
int err = WebRtcNs_Analyze(my_handle, int err = WebRtcNs_Analyze(my_handle,
audio->low_pass_split_data_f(i)); audio->split_data_f(i, kBand0To8kHz));
if (err != apm_->kNoError) { if (err != apm_->kNoError) {
return GetHandleError(my_handle); return GetHandleError(my_handle);
} }
@ -89,16 +89,16 @@ int NoiseSuppressionImpl::ProcessCaptureAudio(AudioBuffer* audio) {
Handle* my_handle = static_cast<Handle*>(handle(i)); Handle* my_handle = static_cast<Handle*>(handle(i));
#if defined(WEBRTC_NS_FLOAT) #if defined(WEBRTC_NS_FLOAT)
err = WebRtcNs_Process(my_handle, err = WebRtcNs_Process(my_handle,
audio->low_pass_split_data_f(i), audio->split_data_f(i, kBand0To8kHz),
audio->high_pass_split_data_f(i), audio->split_data_f(i, kBand8To16kHz),
audio->low_pass_split_data_f(i), audio->split_data_f(i, kBand0To8kHz),
audio->high_pass_split_data_f(i)); audio->split_data_f(i, kBand8To16kHz));
#elif defined(WEBRTC_NS_FIXED) #elif defined(WEBRTC_NS_FIXED)
err = WebRtcNsx_Process(my_handle, err = WebRtcNsx_Process(my_handle,
audio->low_pass_split_data(i), audio->split_data(i, kBand0To8kHz),
audio->high_pass_split_data(i), audio->split_data(i, kBand8To16kHz),
audio->low_pass_split_data(i), audio->split_data(i, kBand0To8kHz),
audio->high_pass_split_data(i)); audio->split_data(i, kBand8To16kHz));
#endif #endif
if (err != apm_->kNoError) { if (err != apm_->kNoError) {