Implement AudioEncoderPcmU/A classes and convert AudioDecoder tests

BUG=3926
R=kjellander@webrtc.org, kwiberg@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/29799004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@7481 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
henrik.lundin@webrtc.org
2014-10-21 12:48:29 +00:00
parent 78ea06dd34
commit def1e97ed2
6 changed files with 247 additions and 56 deletions

View File

@@ -147,7 +147,9 @@ config("g711_config") {
source_set("g711") {
sources = [
"codecs/g711/include/audio_encoder_pcm.h",
"codecs/g711/include/g711_interface.h",
"codecs/g711/audio_encoder_pcm.cc",
"codecs/g711/g711_interface.c",
"codecs/g711/g711.c",
"codecs/g711/g711.h",

View File

@@ -12,7 +12,6 @@
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_AUDIO_ENCODER_H_
#include <algorithm>
#include <limits>
#include "webrtc/base/checks.h"
#include "webrtc/typedefs.h"
@@ -28,24 +27,27 @@ class AudioEncoder {
// Accepts one 10 ms block of input audio (i.e., sample_rate_hz() / 100 *
// num_channels() samples). Multi-channel audio must be sample-interleaved.
// If successful, the encoder produces zero or more bytes of output in
// |encoded|, and returns the number of bytes. In case of error, -1 is
// returned. It is an error for the encoder to attempt to produce more than
// |max_encoded_bytes| bytes of output.
ssize_t Encode(uint32_t timestamp,
// |encoded|, and provides the number of encoded bytes in |encoded_bytes|.
// In case of error, false is returned, otherwise true. It is an error for the
// encoder to attempt to produce more than |max_encoded_bytes| bytes of
// output.
bool Encode(uint32_t timestamp,
const int16_t* audio,
size_t num_samples,
size_t max_encoded_bytes,
uint8_t* encoded,
size_t* encoded_bytes,
uint32_t* encoded_timestamp) {
CHECK_EQ(num_samples,
static_cast<size_t>(sample_rate_hz() / 100 * num_channels()));
ssize_t num_bytes =
Encode(timestamp, audio, max_encoded_bytes, encoded, encoded_timestamp);
CHECK_LE(num_bytes,
static_cast<ssize_t>(std::min(
bool ret = Encode(timestamp,
audio,
max_encoded_bytes,
static_cast<size_t>(std::numeric_limits<ssize_t>::max()))));
return num_bytes;
encoded,
encoded_bytes,
encoded_timestamp);
CHECK_LE(*encoded_bytes, max_encoded_bytes);
return ret;
}
// Returns the input sample rate in Hz, the number of input channels, and the
@@ -56,10 +58,11 @@ class AudioEncoder {
virtual int num_10ms_frames_per_packet() const = 0;
protected:
virtual ssize_t Encode(uint32_t timestamp,
virtual bool Encode(uint32_t timestamp,
const int16_t* audio,
size_t max_encoded_bytes,
uint8_t* encoded,
size_t* encoded_bytes,
uint32_t* encoded_timestamp) = 0;
};

View File

@@ -0,0 +1,100 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h"
#include <limits>
#include "webrtc/modules/audio_coding/codecs/g711/include/g711_interface.h"
namespace webrtc {
namespace {
int16_t NumSamplesPerFrame(int num_channels,
int frame_size_ms,
int sample_rate_hz) {
int samples_per_frame = num_channels * frame_size_ms * sample_rate_hz / 1000;
CHECK_LE(samples_per_frame, std::numeric_limits<int16_t>::max())
<< "Frame size too large.";
return static_cast<int16_t>(samples_per_frame);
}
} // namespace
AudioEncoderPcm::AudioEncoderPcm(const Config& config)
: num_channels_(config.num_channels),
num_10ms_frames_per_packet_(config.frame_size_ms / 10),
full_frame_samples_(NumSamplesPerFrame(num_channels_,
config.frame_size_ms,
kSampleRateHz)),
first_timestamp_in_buffer_(0) {
CHECK_EQ(config.frame_size_ms % 10, 0)
<< "Frame size must be an integer multiple of 10 ms.";
speech_buffer_.reserve(full_frame_samples_);
}
AudioEncoderPcm::~AudioEncoderPcm() {
}
int AudioEncoderPcm::sample_rate_hz() const {
return kSampleRateHz;
}
int AudioEncoderPcm::num_channels() const {
return num_channels_;
}
int AudioEncoderPcm::num_10ms_frames_per_packet() const {
return num_10ms_frames_per_packet_;
}
bool AudioEncoderPcm::Encode(uint32_t timestamp,
const int16_t* audio,
size_t max_encoded_bytes,
uint8_t* encoded,
size_t* encoded_bytes,
uint32_t* encoded_timestamp) {
const int num_samples = sample_rate_hz() / 100 * num_channels();
if (speech_buffer_.empty()) {
first_timestamp_in_buffer_ = timestamp;
}
for (int i = 0; i < num_samples; ++i) {
speech_buffer_.push_back(audio[i]);
}
if (speech_buffer_.size() < static_cast<size_t>(full_frame_samples_)) {
*encoded_bytes = 0;
return true;
}
CHECK_EQ(speech_buffer_.size(), static_cast<size_t>(full_frame_samples_));
int16_t ret = EncodeCall(&speech_buffer_[0], full_frame_samples_, encoded);
speech_buffer_.clear();
*encoded_timestamp = first_timestamp_in_buffer_;
if (ret < 0)
return false;
*encoded_bytes = static_cast<size_t>(ret);
return true;
}
int16_t AudioEncoderPcmA::EncodeCall(const int16_t* audio,
size_t input_len,
uint8_t* encoded) {
return WebRtcG711_EncodeA(NULL,
const_cast<int16_t*>(audio),
static_cast<int16_t>(input_len),
reinterpret_cast<int16_t*>(encoded));
}
int16_t AudioEncoderPcmU::EncodeCall(const int16_t* audio,
size_t input_len,
uint8_t* encoded) {
return WebRtcG711_EncodeU(NULL,
const_cast<int16_t*>(audio),
static_cast<int16_t>(input_len),
reinterpret_cast<int16_t*>(encoded));
}
} // namespace webrtc

View File

@@ -23,9 +23,11 @@
},
'sources': [
'include/g711_interface.h',
'include/audio_encoder_pcm.h',
'g711_interface.c',
'g711.c',
'g711.h',
'audio_encoder_pcm.cc',
],
},
], # targets

View File

@@ -0,0 +1,79 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_G711_INCLUDE_AUDIO_ENCODER_PCM_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_G711_INCLUDE_AUDIO_ENCODER_PCM_H_
#include <vector>
#include "webrtc/modules/audio_coding/codecs/audio_encoder.h"
namespace webrtc {
class AudioEncoderPcm : public AudioEncoder {
public:
struct Config {
Config() : frame_size_ms(20), num_channels(1) {}
int frame_size_ms;
int num_channels;
};
explicit AudioEncoderPcm(const Config& config);
virtual ~AudioEncoderPcm();
virtual int sample_rate_hz() const OVERRIDE;
virtual int num_channels() const OVERRIDE;
virtual int num_10ms_frames_per_packet() const OVERRIDE;
protected:
virtual bool Encode(uint32_t timestamp,
const int16_t* audio,
size_t max_encoded_bytes,
uint8_t* encoded,
size_t* encoded_bytes,
uint32_t* encoded_timestamp) OVERRIDE;
virtual int16_t EncodeCall(const int16_t* audio,
size_t input_len,
uint8_t* encoded) = 0;
private:
static const int kSampleRateHz = 8000;
const int num_channels_;
const int num_10ms_frames_per_packet_;
const int16_t full_frame_samples_;
std::vector<int16_t> speech_buffer_;
uint32_t first_timestamp_in_buffer_;
};
class AudioEncoderPcmA : public AudioEncoderPcm {
public:
explicit AudioEncoderPcmA(const Config& config) : AudioEncoderPcm(config) {}
protected:
virtual int16_t EncodeCall(const int16_t* audio,
size_t input_len,
uint8_t* encoded) OVERRIDE;
};
class AudioEncoderPcmU : public AudioEncoderPcm {
public:
explicit AudioEncoderPcmU(const Config& config) : AudioEncoderPcm(config) {}
protected:
virtual int16_t EncodeCall(const int16_t* audio,
size_t input_len,
uint8_t* encoded) OVERRIDE;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_G711_INCLUDE_AUDIO_ENCODER_PCM_H_

View File

@@ -21,6 +21,7 @@
#include "webrtc/modules/audio_coding/codecs/celt/include/celt_interface.h"
#endif
#include "webrtc/modules/audio_coding/codecs/g711/include/g711_interface.h"
#include "webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h"
#include "webrtc/modules/audio_coding/codecs/g722/include/g722_interface.h"
#include "webrtc/modules/audio_coding/codecs/ilbc/interface/ilbc.h"
#include "webrtc/modules/audio_coding/codecs/isac/fix/interface/isacfix.h"
@@ -28,6 +29,7 @@
#include "webrtc/modules/audio_coding/codecs/opus/interface/opus_interface.h"
#include "webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h"
#include "webrtc/system_wrappers/interface/data_log.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
#include "webrtc/test/testsupport/fileutils.h"
namespace webrtc {
@@ -43,6 +45,7 @@ class AudioDecoderTest : public ::testing::Test {
data_length_(0),
encoded_bytes_(0),
channels_(1),
output_timestamp_(0),
decoder_(NULL) {
input_file_ = webrtc::test::ProjectRootPath() +
"resources/audio_coding/testfile32kHz.pcm";
@@ -90,9 +93,25 @@ class AudioDecoderTest : public ::testing::Test {
virtual void InitEncoder() { }
// This method must be implemented for all tests derived from this class.
virtual int EncodeFrame(const int16_t* input, size_t input_len,
uint8_t* output) = 0;
// TODO(henrik.lundin) Change return type to size_t once most/all overriding
// implementations are gone.
virtual int EncodeFrame(const int16_t* input,
size_t input_len_samples,
uint8_t* output) {
size_t enc_len_bytes = 0;
for (int i = 0; i < audio_encoder_->num_10ms_frames_per_packet(); ++i) {
EXPECT_EQ(0u, enc_len_bytes);
EXPECT_TRUE(audio_encoder_->Encode(0,
input,
audio_encoder_->sample_rate_hz() / 100,
data_length_ * 2,
output,
&enc_len_bytes,
&output_timestamp_));
}
EXPECT_EQ(input_len_samples, enc_len_bytes);
return static_cast<int>(enc_len_bytes);
}
// Encodes and decodes audio. The absolute difference between the input and
// output is compared vs |tolerance|, and the mean-squared error is compared
@@ -217,7 +236,9 @@ class AudioDecoderTest : public ::testing::Test {
size_t data_length_;
size_t encoded_bytes_;
size_t channels_;
uint32_t output_timestamp_;
AudioDecoder* decoder_;
scoped_ptr<AudioEncoder> audio_encoder_;
};
class AudioDecoderPcmUTest : public AudioDecoderTest {
@@ -226,17 +247,9 @@ class AudioDecoderPcmUTest : public AudioDecoderTest {
frame_size_ = 160;
data_length_ = 10 * frame_size_;
decoder_ = new AudioDecoderPcmU;
assert(decoder_);
}
virtual int EncodeFrame(const int16_t* input, size_t input_len_samples,
uint8_t* output) {
int enc_len_bytes =
WebRtcG711_EncodeU(NULL, const_cast<int16_t*>(input),
static_cast<int>(input_len_samples),
reinterpret_cast<int16_t*>(output));
EXPECT_EQ(input_len_samples, static_cast<size_t>(enc_len_bytes));
return enc_len_bytes;
AudioEncoderPcmU::Config config;
config.frame_size_ms = static_cast<int>(frame_size_ / 8);
audio_encoder_.reset(new AudioEncoderPcmU(config));
}
};
@@ -246,17 +259,9 @@ class AudioDecoderPcmATest : public AudioDecoderTest {
frame_size_ = 160;
data_length_ = 10 * frame_size_;
decoder_ = new AudioDecoderPcmA;
assert(decoder_);
}
virtual int EncodeFrame(const int16_t* input, size_t input_len_samples,
uint8_t* output) {
int enc_len_bytes =
WebRtcG711_EncodeA(NULL, const_cast<int16_t*>(input),
static_cast<int>(input_len_samples),
reinterpret_cast<int16_t*>(output));
EXPECT_EQ(input_len_samples, static_cast<size_t>(enc_len_bytes));
return enc_len_bytes;
AudioEncoderPcmA::Config config;
config.frame_size_ms = static_cast<int>(frame_size_ / 8);
audio_encoder_.reset(new AudioEncoderPcmA(config));
}
};