Moving encoded_bytes into EncodedInfo
BUG=3926 R=kwiberg@webrtc.org Review URL: https://webrtc-codereview.appspot.com/35469004 git-svn-id: http://webrtc.googlecode.com/svn/trunk@7883 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
parent
c8bc717905
commit
3b79daff14
@ -23,6 +23,9 @@ namespace webrtc {
|
|||||||
class AudioEncoder {
|
class AudioEncoder {
|
||||||
public:
|
public:
|
||||||
struct EncodedInfo {
|
struct EncodedInfo {
|
||||||
|
EncodedInfo() : encoded_bytes(0), encoded_timestamp(0), payload_type(0) {}
|
||||||
|
|
||||||
|
size_t encoded_bytes;
|
||||||
uint32_t encoded_timestamp;
|
uint32_t encoded_timestamp;
|
||||||
int payload_type;
|
int payload_type;
|
||||||
};
|
};
|
||||||
@ -41,7 +44,6 @@ class AudioEncoder {
|
|||||||
size_t num_samples_per_channel,
|
size_t num_samples_per_channel,
|
||||||
size_t max_encoded_bytes,
|
size_t max_encoded_bytes,
|
||||||
uint8_t* encoded,
|
uint8_t* encoded,
|
||||||
size_t* encoded_bytes,
|
|
||||||
EncodedInfo* info) {
|
EncodedInfo* info) {
|
||||||
CHECK_EQ(num_samples_per_channel,
|
CHECK_EQ(num_samples_per_channel,
|
||||||
static_cast<size_t>(sample_rate_hz() / 100));
|
static_cast<size_t>(sample_rate_hz() / 100));
|
||||||
@ -49,9 +51,8 @@ class AudioEncoder {
|
|||||||
audio,
|
audio,
|
||||||
max_encoded_bytes,
|
max_encoded_bytes,
|
||||||
encoded,
|
encoded,
|
||||||
encoded_bytes,
|
|
||||||
info);
|
info);
|
||||||
CHECK_LE(*encoded_bytes, max_encoded_bytes);
|
CHECK_LE(info->encoded_bytes, max_encoded_bytes);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -76,7 +77,6 @@ class AudioEncoder {
|
|||||||
const int16_t* audio,
|
const int16_t* audio,
|
||||||
size_t max_encoded_bytes,
|
size_t max_encoded_bytes,
|
||||||
uint8_t* encoded,
|
uint8_t* encoded,
|
||||||
size_t* encoded_bytes,
|
|
||||||
EncodedInfo* info) = 0;
|
EncodedInfo* info) = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -90,13 +90,12 @@ bool AudioEncoderCng::EncodeInternal(uint32_t timestamp,
|
|||||||
const int16_t* audio,
|
const int16_t* audio,
|
||||||
size_t max_encoded_bytes,
|
size_t max_encoded_bytes,
|
||||||
uint8_t* encoded,
|
uint8_t* encoded,
|
||||||
size_t* encoded_bytes,
|
|
||||||
EncodedInfo* info) {
|
EncodedInfo* info) {
|
||||||
DCHECK_GE(max_encoded_bytes, static_cast<size_t>(num_cng_coefficients_ + 1));
|
DCHECK_GE(max_encoded_bytes, static_cast<size_t>(num_cng_coefficients_ + 1));
|
||||||
if (max_encoded_bytes < static_cast<size_t>(num_cng_coefficients_ + 1)) {
|
if (max_encoded_bytes < static_cast<size_t>(num_cng_coefficients_ + 1)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
*encoded_bytes = 0;
|
info->encoded_bytes = 0;
|
||||||
const int num_samples = sample_rate_hz() / 100 * num_channels();
|
const int num_samples = sample_rate_hz() / 100 * num_channels();
|
||||||
if (speech_buffer_.empty()) {
|
if (speech_buffer_.empty()) {
|
||||||
CHECK_EQ(frames_in_buffer_, 0);
|
CHECK_EQ(frames_in_buffer_, 0);
|
||||||
@ -143,15 +142,14 @@ bool AudioEncoderCng::EncodeInternal(uint32_t timestamp,
|
|||||||
bool return_val = true;
|
bool return_val = true;
|
||||||
switch (activity) {
|
switch (activity) {
|
||||||
case Vad::kPassive: {
|
case Vad::kPassive: {
|
||||||
return_val = EncodePassive(encoded, encoded_bytes);
|
return_val = EncodePassive(encoded, &info->encoded_bytes);
|
||||||
info->encoded_timestamp = first_timestamp_in_buffer_;
|
info->encoded_timestamp = first_timestamp_in_buffer_;
|
||||||
info->payload_type = cng_payload_type_;
|
info->payload_type = cng_payload_type_;
|
||||||
last_frame_active_ = false;
|
last_frame_active_ = false;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case Vad::kActive: {
|
case Vad::kActive: {
|
||||||
return_val =
|
return_val = EncodeActive(max_encoded_bytes, encoded, info);
|
||||||
EncodeActive(max_encoded_bytes, encoded, encoded_bytes, info);
|
|
||||||
last_frame_active_ = true;
|
last_frame_active_ = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -190,17 +188,16 @@ bool AudioEncoderCng::EncodePassive(uint8_t* encoded, size_t* encoded_bytes) {
|
|||||||
|
|
||||||
bool AudioEncoderCng::EncodeActive(size_t max_encoded_bytes,
|
bool AudioEncoderCng::EncodeActive(size_t max_encoded_bytes,
|
||||||
uint8_t* encoded,
|
uint8_t* encoded,
|
||||||
size_t* encoded_bytes,
|
|
||||||
EncodedInfo* info) {
|
EncodedInfo* info) {
|
||||||
const size_t samples_per_10ms_frame = 10 * sample_rate_hz_ / 1000;
|
const size_t samples_per_10ms_frame = 10 * sample_rate_hz_ / 1000;
|
||||||
for (int i = 0; i < frames_in_buffer_; ++i) {
|
for (int i = 0; i < frames_in_buffer_; ++i) {
|
||||||
if (!speech_encoder_->Encode(first_timestamp_in_buffer_,
|
if (!speech_encoder_->Encode(first_timestamp_in_buffer_,
|
||||||
&speech_buffer_[i * samples_per_10ms_frame],
|
&speech_buffer_[i * samples_per_10ms_frame],
|
||||||
samples_per_10ms_frame, max_encoded_bytes,
|
samples_per_10ms_frame, max_encoded_bytes,
|
||||||
encoded, encoded_bytes, info))
|
encoded, info))
|
||||||
return false;
|
return false;
|
||||||
if (i < frames_in_buffer_ - 1) {
|
if (i < frames_in_buffer_ - 1) {
|
||||||
CHECK_EQ(*encoded_bytes, 0u) << "Encoder delivered data too early.";
|
CHECK_EQ(info->encoded_bytes, 0u) << "Encoder delivered data too early.";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
|
@ -18,6 +18,7 @@ using ::testing::Return;
|
|||||||
using ::testing::_;
|
using ::testing::_;
|
||||||
using ::testing::SetArgPointee;
|
using ::testing::SetArgPointee;
|
||||||
using ::testing::InSequence;
|
using ::testing::InSequence;
|
||||||
|
using ::testing::Invoke;
|
||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
|
|
||||||
@ -33,8 +34,7 @@ class AudioEncoderCngTest : public ::testing::Test {
|
|||||||
AudioEncoderCngTest()
|
AudioEncoderCngTest()
|
||||||
: mock_vad_(new MockVad(Vad::kVadNormal)),
|
: mock_vad_(new MockVad(Vad::kVadNormal)),
|
||||||
timestamp_(4711),
|
timestamp_(4711),
|
||||||
num_audio_samples_10ms_(0),
|
num_audio_samples_10ms_(0) {
|
||||||
encoded_bytes_(0) {
|
|
||||||
memset(encoded_, 0, kMaxEncodedBytes);
|
memset(encoded_, 0, kMaxEncodedBytes);
|
||||||
memset(audio_, 0, kMaxNumSamples * 2);
|
memset(audio_, 0, kMaxNumSamples * 2);
|
||||||
config_.speech_encoder = &mock_encoder_;
|
config_.speech_encoder = &mock_encoder_;
|
||||||
@ -70,11 +70,9 @@ class AudioEncoderCngTest : public ::testing::Test {
|
|||||||
|
|
||||||
void Encode() {
|
void Encode() {
|
||||||
ASSERT_TRUE(cng_) << "Must call CreateCng() first.";
|
ASSERT_TRUE(cng_) << "Must call CreateCng() first.";
|
||||||
memset(&encoded_info_, 0, sizeof(encoded_info_));
|
encoded_info_ = AudioEncoder::EncodedInfo();
|
||||||
encoded_bytes_ = 0;
|
|
||||||
ASSERT_TRUE(cng_->Encode(timestamp_, audio_, num_audio_samples_10ms_,
|
ASSERT_TRUE(cng_->Encode(timestamp_, audio_, num_audio_samples_10ms_,
|
||||||
kMaxEncodedBytes, encoded_, &encoded_bytes_,
|
kMaxEncodedBytes, encoded_, &encoded_info_));
|
||||||
&encoded_info_));
|
|
||||||
timestamp_ += num_audio_samples_10ms_;
|
timestamp_ += num_audio_samples_10ms_;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -89,30 +87,31 @@ class AudioEncoderCngTest : public ::testing::Test {
|
|||||||
.WillRepeatedly(Return(active_speech ? Vad::kActive : Vad::kPassive));
|
.WillRepeatedly(Return(active_speech ? Vad::kActive : Vad::kPassive));
|
||||||
|
|
||||||
// Don't expect any calls to the encoder yet.
|
// Don't expect any calls to the encoder yet.
|
||||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _, _)).Times(0);
|
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _)).Times(0);
|
||||||
for (int i = 0; i < blocks_per_frame - 1; ++i) {
|
for (int i = 0; i < blocks_per_frame - 1; ++i) {
|
||||||
Encode();
|
Encode();
|
||||||
EXPECT_EQ(0u, encoded_bytes_);
|
EXPECT_EQ(0u, encoded_info_.encoded_bytes);
|
||||||
}
|
}
|
||||||
|
AudioEncoder::EncodedInfo info;
|
||||||
if (active_speech) {
|
if (active_speech) {
|
||||||
// Now expect |blocks_per_frame| calls to the encoder in sequence.
|
// Now expect |blocks_per_frame| calls to the encoder in sequence.
|
||||||
// Let the speech codec mock return true and set the number of encoded
|
// Let the speech codec mock return true and set the number of encoded
|
||||||
// bytes to |kMockReturnEncodedBytes|.
|
// bytes to |kMockReturnEncodedBytes|.
|
||||||
InSequence s;
|
InSequence s;
|
||||||
for (int j = 0; j < blocks_per_frame - 1; ++j) {
|
for (int j = 0; j < blocks_per_frame - 1; ++j) {
|
||||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _, _))
|
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
||||||
.WillOnce(DoAll(SetArgPointee<4>(0), Return(true)));
|
.WillOnce(DoAll(SetArgPointee<4>(info), Return(true)));
|
||||||
}
|
}
|
||||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _, _))
|
info.encoded_bytes = kMockReturnEncodedBytes;
|
||||||
.WillOnce(
|
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
||||||
DoAll(SetArgPointee<4>(kMockReturnEncodedBytes), Return(true)));
|
.WillOnce(DoAll(SetArgPointee<4>(info), Return(true)));
|
||||||
}
|
}
|
||||||
Encode();
|
Encode();
|
||||||
if (active_speech) {
|
if (active_speech) {
|
||||||
EXPECT_EQ(kMockReturnEncodedBytes, encoded_bytes_);
|
EXPECT_EQ(kMockReturnEncodedBytes, encoded_info_.encoded_bytes);
|
||||||
} else {
|
} else {
|
||||||
EXPECT_EQ(static_cast<size_t>(config_.num_cng_coefficients + 1),
|
EXPECT_EQ(static_cast<size_t>(config_.num_cng_coefficients + 1),
|
||||||
encoded_bytes_);
|
encoded_info_.encoded_bytes);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -184,7 +183,6 @@ class AudioEncoderCngTest : public ::testing::Test {
|
|||||||
int16_t audio_[kMaxNumSamples];
|
int16_t audio_[kMaxNumSamples];
|
||||||
size_t num_audio_samples_10ms_;
|
size_t num_audio_samples_10ms_;
|
||||||
uint8_t encoded_[kMaxEncodedBytes];
|
uint8_t encoded_[kMaxEncodedBytes];
|
||||||
size_t encoded_bytes_;
|
|
||||||
AudioEncoder::EncodedInfo encoded_info_;
|
AudioEncoder::EncodedInfo encoded_info_;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -239,7 +237,7 @@ TEST_F(AudioEncoderCngTest, EncodePassive) {
|
|||||||
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
|
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
|
||||||
.WillRepeatedly(Return(Vad::kPassive));
|
.WillRepeatedly(Return(Vad::kPassive));
|
||||||
// Expect no calls at all to the speech encoder mock.
|
// Expect no calls at all to the speech encoder mock.
|
||||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _, _)).Times(0);
|
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _)).Times(0);
|
||||||
uint32_t expected_timestamp = timestamp_;
|
uint32_t expected_timestamp = timestamp_;
|
||||||
for (int i = 0; i < 100; ++i) {
|
for (int i = 0; i < 100; ++i) {
|
||||||
Encode();
|
Encode();
|
||||||
@ -251,13 +249,13 @@ TEST_F(AudioEncoderCngTest, EncodePassive) {
|
|||||||
// If so, verify that we got a CNG encoding.
|
// If so, verify that we got a CNG encoding.
|
||||||
EXPECT_EQ(kCngPayloadType, encoded_info_.payload_type);
|
EXPECT_EQ(kCngPayloadType, encoded_info_.payload_type);
|
||||||
EXPECT_EQ(static_cast<size_t>(config_.num_cng_coefficients) + 1,
|
EXPECT_EQ(static_cast<size_t>(config_.num_cng_coefficients) + 1,
|
||||||
encoded_bytes_);
|
encoded_info_.encoded_bytes);
|
||||||
EXPECT_EQ(expected_timestamp, encoded_info_.encoded_timestamp);
|
EXPECT_EQ(expected_timestamp, encoded_info_.encoded_timestamp);
|
||||||
}
|
}
|
||||||
expected_timestamp += kBlocksPerFrame * num_audio_samples_10ms_;
|
expected_timestamp += kBlocksPerFrame * num_audio_samples_10ms_;
|
||||||
} else {
|
} else {
|
||||||
// Otherwise, expect no output.
|
// Otherwise, expect no output.
|
||||||
EXPECT_EQ(0u, encoded_bytes_);
|
EXPECT_EQ(0u, encoded_info_.encoded_bytes);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -268,19 +266,19 @@ TEST_F(AudioEncoderCngTest, MixedActivePassive) {
|
|||||||
CreateCng();
|
CreateCng();
|
||||||
|
|
||||||
// All of the frame is active speech.
|
// All of the frame is active speech.
|
||||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _, _))
|
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
||||||
.Times(6)
|
.Times(6)
|
||||||
.WillRepeatedly(Return(true));
|
.WillRepeatedly(Return(true));
|
||||||
EXPECT_TRUE(CheckMixedActivePassive(Vad::kActive, Vad::kActive));
|
EXPECT_TRUE(CheckMixedActivePassive(Vad::kActive, Vad::kActive));
|
||||||
|
|
||||||
// First half of the frame is active speech.
|
// First half of the frame is active speech.
|
||||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _, _))
|
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
||||||
.Times(6)
|
.Times(6)
|
||||||
.WillRepeatedly(Return(true));
|
.WillRepeatedly(Return(true));
|
||||||
EXPECT_TRUE(CheckMixedActivePassive(Vad::kActive, Vad::kPassive));
|
EXPECT_TRUE(CheckMixedActivePassive(Vad::kActive, Vad::kPassive));
|
||||||
|
|
||||||
// Second half of the frame is active speech.
|
// Second half of the frame is active speech.
|
||||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _, _))
|
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
||||||
.Times(6)
|
.Times(6)
|
||||||
.WillRepeatedly(Return(true));
|
.WillRepeatedly(Return(true));
|
||||||
EXPECT_TRUE(CheckMixedActivePassive(Vad::kPassive, Vad::kActive));
|
EXPECT_TRUE(CheckMixedActivePassive(Vad::kPassive, Vad::kActive));
|
||||||
@ -324,7 +322,7 @@ TEST_F(AudioEncoderCngTest, VadInputSize60Ms) {
|
|||||||
// speech encoder.
|
// speech encoder.
|
||||||
TEST_F(AudioEncoderCngTest, VerifyEncoderInfoPropagation) {
|
TEST_F(AudioEncoderCngTest, VerifyEncoderInfoPropagation) {
|
||||||
CreateCng();
|
CreateCng();
|
||||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _, &encoded_info_))
|
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, &encoded_info_))
|
||||||
.WillOnce(Return(true));
|
.WillOnce(Return(true));
|
||||||
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(1));
|
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(1));
|
||||||
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
|
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
|
||||||
@ -335,7 +333,7 @@ TEST_F(AudioEncoderCngTest, VerifyEncoderInfoPropagation) {
|
|||||||
// Verifies that the correct payload type is set when CNG is encoded.
|
// Verifies that the correct payload type is set when CNG is encoded.
|
||||||
TEST_F(AudioEncoderCngTest, VerifyCngPayloadType) {
|
TEST_F(AudioEncoderCngTest, VerifyCngPayloadType) {
|
||||||
CreateCng();
|
CreateCng();
|
||||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _, _)).Times(0);
|
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _)).Times(0);
|
||||||
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(1));
|
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(1));
|
||||||
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
|
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
|
||||||
.WillOnce(Return(Vad::kPassive));
|
.WillOnce(Return(Vad::kPassive));
|
||||||
@ -357,20 +355,22 @@ TEST_F(AudioEncoderCngTest, VerifySidFrameAfterSpeech) {
|
|||||||
Encode();
|
Encode();
|
||||||
EXPECT_EQ(kCngPayloadType, encoded_info_.payload_type);
|
EXPECT_EQ(kCngPayloadType, encoded_info_.payload_type);
|
||||||
EXPECT_EQ(static_cast<size_t>(config_.num_cng_coefficients) + 1,
|
EXPECT_EQ(static_cast<size_t>(config_.num_cng_coefficients) + 1,
|
||||||
encoded_bytes_);
|
encoded_info_.encoded_bytes);
|
||||||
// Encode again, and make sure we got no frame at all (since the SID frame
|
// Encode again, and make sure we got no frame at all (since the SID frame
|
||||||
// period is 100 ms by default).
|
// period is 100 ms by default).
|
||||||
Encode();
|
Encode();
|
||||||
EXPECT_EQ(0u, encoded_bytes_);
|
EXPECT_EQ(0u, encoded_info_.encoded_bytes);
|
||||||
|
|
||||||
// Now encode active speech.
|
// Now encode active speech.
|
||||||
encoded_info_.payload_type = 0;
|
encoded_info_.payload_type = 0;
|
||||||
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
|
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
|
||||||
.WillOnce(Return(Vad::kActive));
|
.WillOnce(Return(Vad::kActive));
|
||||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _, _))
|
AudioEncoder::EncodedInfo info;
|
||||||
.WillOnce(DoAll(SetArgPointee<4>(kMockReturnEncodedBytes), Return(true)));
|
info.encoded_bytes = kMockReturnEncodedBytes;
|
||||||
|
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
||||||
|
.WillOnce(DoAll(SetArgPointee<4>(info), Return(true)));
|
||||||
Encode();
|
Encode();
|
||||||
EXPECT_EQ(kMockReturnEncodedBytes, encoded_bytes_);
|
EXPECT_EQ(kMockReturnEncodedBytes, encoded_info_.encoded_bytes);
|
||||||
|
|
||||||
// Go back to noise again, and verify that a SID frame is emitted.
|
// Go back to noise again, and verify that a SID frame is emitted.
|
||||||
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
|
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
|
||||||
@ -378,7 +378,7 @@ TEST_F(AudioEncoderCngTest, VerifySidFrameAfterSpeech) {
|
|||||||
Encode();
|
Encode();
|
||||||
EXPECT_EQ(kCngPayloadType, encoded_info_.payload_type);
|
EXPECT_EQ(kCngPayloadType, encoded_info_.payload_type);
|
||||||
EXPECT_EQ(static_cast<size_t>(config_.num_cng_coefficients) + 1,
|
EXPECT_EQ(static_cast<size_t>(config_.num_cng_coefficients) + 1,
|
||||||
encoded_bytes_);
|
encoded_info_.encoded_bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
|
#if GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
|
||||||
|
@ -57,7 +57,6 @@ class AudioEncoderCng : public AudioEncoder {
|
|||||||
const int16_t* audio,
|
const int16_t* audio,
|
||||||
size_t max_encoded_bytes,
|
size_t max_encoded_bytes,
|
||||||
uint8_t* encoded,
|
uint8_t* encoded,
|
||||||
size_t* encoded_bytes,
|
|
||||||
EncodedInfo* info) OVERRIDE;
|
EncodedInfo* info) OVERRIDE;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
@ -71,7 +70,6 @@ class AudioEncoderCng : public AudioEncoder {
|
|||||||
|
|
||||||
bool EncodeActive(size_t max_encoded_bytes,
|
bool EncodeActive(size_t max_encoded_bytes,
|
||||||
uint8_t* encoded,
|
uint8_t* encoded,
|
||||||
size_t* encoded_bytes,
|
|
||||||
EncodedInfo* info);
|
EncodedInfo* info);
|
||||||
|
|
||||||
AudioEncoder* speech_encoder_;
|
AudioEncoder* speech_encoder_;
|
||||||
|
@ -63,7 +63,6 @@ bool AudioEncoderPcm::EncodeInternal(uint32_t timestamp,
|
|||||||
const int16_t* audio,
|
const int16_t* audio,
|
||||||
size_t max_encoded_bytes,
|
size_t max_encoded_bytes,
|
||||||
uint8_t* encoded,
|
uint8_t* encoded,
|
||||||
size_t* encoded_bytes,
|
|
||||||
EncodedInfo* info) {
|
EncodedInfo* info) {
|
||||||
const int num_samples = sample_rate_hz() / 100 * num_channels();
|
const int num_samples = sample_rate_hz() / 100 * num_channels();
|
||||||
if (speech_buffer_.empty()) {
|
if (speech_buffer_.empty()) {
|
||||||
@ -73,7 +72,7 @@ bool AudioEncoderPcm::EncodeInternal(uint32_t timestamp,
|
|||||||
speech_buffer_.push_back(audio[i]);
|
speech_buffer_.push_back(audio[i]);
|
||||||
}
|
}
|
||||||
if (speech_buffer_.size() < static_cast<size_t>(full_frame_samples_)) {
|
if (speech_buffer_.size() < static_cast<size_t>(full_frame_samples_)) {
|
||||||
*encoded_bytes = 0;
|
info->encoded_bytes = 0;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
CHECK_EQ(speech_buffer_.size(), static_cast<size_t>(full_frame_samples_));
|
CHECK_EQ(speech_buffer_.size(), static_cast<size_t>(full_frame_samples_));
|
||||||
@ -83,7 +82,7 @@ bool AudioEncoderPcm::EncodeInternal(uint32_t timestamp,
|
|||||||
info->payload_type = payload_type_;
|
info->payload_type = payload_type_;
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return false;
|
return false;
|
||||||
*encoded_bytes = static_cast<size_t>(ret);
|
info->encoded_bytes = static_cast<size_t>(ret);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -44,7 +44,6 @@ class AudioEncoderPcm : public AudioEncoder {
|
|||||||
const int16_t* audio,
|
const int16_t* audio,
|
||||||
size_t max_encoded_bytes,
|
size_t max_encoded_bytes,
|
||||||
uint8_t* encoded,
|
uint8_t* encoded,
|
||||||
size_t* encoded_bytes,
|
|
||||||
EncodedInfo* info) OVERRIDE;
|
EncodedInfo* info) OVERRIDE;
|
||||||
|
|
||||||
virtual int16_t EncodeCall(const int16_t* audio,
|
virtual int16_t EncodeCall(const int16_t* audio,
|
||||||
|
@ -68,7 +68,6 @@ bool AudioEncoderG722::EncodeInternal(uint32_t timestamp,
|
|||||||
const int16_t* audio,
|
const int16_t* audio,
|
||||||
size_t max_encoded_bytes,
|
size_t max_encoded_bytes,
|
||||||
uint8_t* encoded,
|
uint8_t* encoded,
|
||||||
size_t* encoded_bytes,
|
|
||||||
EncodedInfo* info) {
|
EncodedInfo* info) {
|
||||||
const int samples_per_channel =
|
const int samples_per_channel =
|
||||||
kSampleRateHz / 100 * num_10ms_frames_per_packet_;
|
kSampleRateHz / 100 * num_10ms_frames_per_packet_;
|
||||||
@ -86,7 +85,7 @@ bool AudioEncoderG722::EncodeInternal(uint32_t timestamp,
|
|||||||
|
|
||||||
// If we don't yet have enough samples for a packet, we're done for now.
|
// If we don't yet have enough samples for a packet, we're done for now.
|
||||||
if (++num_10ms_frames_buffered_ < num_10ms_frames_per_packet_) {
|
if (++num_10ms_frames_buffered_ < num_10ms_frames_per_packet_) {
|
||||||
*encoded_bytes = 0;
|
info->encoded_bytes = 0;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -115,7 +114,7 @@ bool AudioEncoderG722::EncodeInternal(uint32_t timestamp,
|
|||||||
encoded[i * num_channels_ + j] =
|
encoded[i * num_channels_ + j] =
|
||||||
interleave_buffer_[2 * j] << 4 | interleave_buffer_[2 * j + 1];
|
interleave_buffer_[2 * j] << 4 | interleave_buffer_[2 * j + 1];
|
||||||
}
|
}
|
||||||
*encoded_bytes = samples_per_channel / 2 * num_channels_;
|
info->encoded_bytes = samples_per_channel / 2 * num_channels_;
|
||||||
info->encoded_timestamp = first_timestamp_in_buffer_;
|
info->encoded_timestamp = first_timestamp_in_buffer_;
|
||||||
info->payload_type = payload_type_;
|
info->payload_type = payload_type_;
|
||||||
return true;
|
return true;
|
||||||
|
@ -40,7 +40,6 @@ class AudioEncoderG722 : public AudioEncoder {
|
|||||||
const int16_t* audio,
|
const int16_t* audio,
|
||||||
size_t max_encoded_bytes,
|
size_t max_encoded_bytes,
|
||||||
uint8_t* encoded,
|
uint8_t* encoded,
|
||||||
size_t* encoded_bytes,
|
|
||||||
EncodedInfo* info) OVERRIDE;
|
EncodedInfo* info) OVERRIDE;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -56,7 +56,6 @@ bool AudioEncoderIlbc::EncodeInternal(uint32_t timestamp,
|
|||||||
const int16_t* audio,
|
const int16_t* audio,
|
||||||
size_t max_encoded_bytes,
|
size_t max_encoded_bytes,
|
||||||
uint8_t* encoded,
|
uint8_t* encoded,
|
||||||
size_t* encoded_bytes,
|
|
||||||
EncodedInfo* info) {
|
EncodedInfo* info) {
|
||||||
const size_t expected_output_len =
|
const size_t expected_output_len =
|
||||||
num_10ms_frames_per_packet_ == 2 ? 38 : 50;
|
num_10ms_frames_per_packet_ == 2 ? 38 : 50;
|
||||||
@ -74,7 +73,7 @@ bool AudioEncoderIlbc::EncodeInternal(uint32_t timestamp,
|
|||||||
// If we don't yet have enough buffered input for a whole packet, we're done
|
// If we don't yet have enough buffered input for a whole packet, we're done
|
||||||
// for now.
|
// for now.
|
||||||
if (++num_10ms_frames_buffered_ < num_10ms_frames_per_packet_) {
|
if (++num_10ms_frames_buffered_ < num_10ms_frames_per_packet_) {
|
||||||
*encoded_bytes = 0;
|
info->encoded_bytes = 0;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -89,7 +88,7 @@ bool AudioEncoderIlbc::EncodeInternal(uint32_t timestamp,
|
|||||||
if (output_len == -1)
|
if (output_len == -1)
|
||||||
return false; // Encoding error.
|
return false; // Encoding error.
|
||||||
DCHECK_EQ(output_len, static_cast<int>(expected_output_len));
|
DCHECK_EQ(output_len, static_cast<int>(expected_output_len));
|
||||||
*encoded_bytes = output_len;
|
info->encoded_bytes = output_len;
|
||||||
info->encoded_timestamp = first_timestamp_in_buffer_;
|
info->encoded_timestamp = first_timestamp_in_buffer_;
|
||||||
info->payload_type = payload_type_;
|
info->payload_type = payload_type_;
|
||||||
return true;
|
return true;
|
||||||
|
@ -39,7 +39,6 @@ class AudioEncoderIlbc : public AudioEncoder {
|
|||||||
const int16_t* audio,
|
const int16_t* audio,
|
||||||
size_t max_encoded_bytes,
|
size_t max_encoded_bytes,
|
||||||
uint8_t* encoded,
|
uint8_t* encoded,
|
||||||
size_t* encoded_bytes,
|
|
||||||
EncodedInfo* info) OVERRIDE;
|
EncodedInfo* info) OVERRIDE;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -87,7 +87,6 @@ class AudioEncoderDecoderIsac : public AudioEncoder, public AudioDecoder {
|
|||||||
const int16_t* audio,
|
const int16_t* audio,
|
||||||
size_t max_encoded_bytes,
|
size_t max_encoded_bytes,
|
||||||
uint8_t* encoded,
|
uint8_t* encoded,
|
||||||
size_t* encoded_bytes,
|
|
||||||
EncodedInfo* info) OVERRIDE;
|
EncodedInfo* info) OVERRIDE;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -111,7 +111,6 @@ bool AudioEncoderDecoderIsac::EncodeInternal(uint32_t timestamp,
|
|||||||
const int16_t* audio,
|
const int16_t* audio,
|
||||||
size_t max_encoded_bytes,
|
size_t max_encoded_bytes,
|
||||||
uint8_t* encoded,
|
uint8_t* encoded,
|
||||||
size_t* encoded_bytes,
|
|
||||||
EncodedInfo* info) {
|
EncodedInfo* info) {
|
||||||
if (!packet_in_progress_) {
|
if (!packet_in_progress_) {
|
||||||
// Starting a new packet; remember the timestamp for later.
|
// Starting a new packet; remember the timestamp for later.
|
||||||
@ -133,7 +132,7 @@ bool AudioEncoderDecoderIsac::EncodeInternal(uint32_t timestamp,
|
|||||||
// buffer. All we can do is check for an overrun after the fact.
|
// buffer. All we can do is check for an overrun after the fact.
|
||||||
CHECK(static_cast<size_t>(r) <= max_encoded_bytes);
|
CHECK(static_cast<size_t>(r) <= max_encoded_bytes);
|
||||||
|
|
||||||
*encoded_bytes = r;
|
info->encoded_bytes = r;
|
||||||
if (r > 0) {
|
if (r > 0) {
|
||||||
// Got enough input to produce a packet. Return the saved timestamp from
|
// Got enough input to produce a packet. Return the saved timestamp from
|
||||||
// the first chunk of input that went into the packet.
|
// the first chunk of input that went into the packet.
|
||||||
|
@ -26,12 +26,11 @@ class MockAudioEncoder : public AudioEncoder {
|
|||||||
MOCK_CONST_METHOD0(Num10MsFramesInNextPacket, int());
|
MOCK_CONST_METHOD0(Num10MsFramesInNextPacket, int());
|
||||||
MOCK_CONST_METHOD0(Max10MsFramesInAPacket, int());
|
MOCK_CONST_METHOD0(Max10MsFramesInAPacket, int());
|
||||||
// Note, we explicitly chose not to create a mock for the Encode method.
|
// Note, we explicitly chose not to create a mock for the Encode method.
|
||||||
MOCK_METHOD6(EncodeInternal,
|
MOCK_METHOD5(EncodeInternal,
|
||||||
bool(uint32_t timestamp,
|
bool(uint32_t timestamp,
|
||||||
const int16_t* audio,
|
const int16_t* audio,
|
||||||
size_t max_encoded_bytes,
|
size_t max_encoded_bytes,
|
||||||
uint8_t* encoded,
|
uint8_t* encoded,
|
||||||
size_t* encoded_bytes,
|
|
||||||
EncodedInfo* info));
|
EncodedInfo* info));
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -82,7 +82,6 @@ bool AudioEncoderOpus::EncodeInternal(uint32_t timestamp,
|
|||||||
const int16_t* audio,
|
const int16_t* audio,
|
||||||
size_t max_encoded_bytes,
|
size_t max_encoded_bytes,
|
||||||
uint8_t* encoded,
|
uint8_t* encoded,
|
||||||
size_t* encoded_bytes,
|
|
||||||
EncodedInfo* info) {
|
EncodedInfo* info) {
|
||||||
if (input_buffer_.empty())
|
if (input_buffer_.empty())
|
||||||
first_timestamp_in_buffer_ = timestamp;
|
first_timestamp_in_buffer_ = timestamp;
|
||||||
@ -90,7 +89,7 @@ bool AudioEncoderOpus::EncodeInternal(uint32_t timestamp,
|
|||||||
audio + samples_per_10ms_frame_);
|
audio + samples_per_10ms_frame_);
|
||||||
if (input_buffer_.size() < (static_cast<size_t>(num_10ms_frames_per_packet_) *
|
if (input_buffer_.size() < (static_cast<size_t>(num_10ms_frames_per_packet_) *
|
||||||
samples_per_10ms_frame_)) {
|
samples_per_10ms_frame_)) {
|
||||||
*encoded_bytes = 0;
|
info->encoded_bytes = 0;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
CHECK_EQ(input_buffer_.size(),
|
CHECK_EQ(input_buffer_.size(),
|
||||||
@ -103,7 +102,7 @@ bool AudioEncoderOpus::EncodeInternal(uint32_t timestamp,
|
|||||||
input_buffer_.clear();
|
input_buffer_.clear();
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
return false;
|
return false;
|
||||||
*encoded_bytes = r;
|
info->encoded_bytes = r;
|
||||||
info->encoded_timestamp = first_timestamp_in_buffer_;
|
info->encoded_timestamp = first_timestamp_in_buffer_;
|
||||||
info->payload_type = payload_type_;
|
info->payload_type = payload_type_;
|
||||||
return true;
|
return true;
|
||||||
|
@ -41,7 +41,6 @@ class AudioEncoderOpus : public AudioEncoder {
|
|||||||
const int16_t* audio,
|
const int16_t* audio,
|
||||||
size_t max_encoded_bytes,
|
size_t max_encoded_bytes,
|
||||||
uint8_t* encoded,
|
uint8_t* encoded,
|
||||||
size_t* encoded_bytes,
|
|
||||||
EncodedInfo* info) OVERRIDE;
|
EncodedInfo* info) OVERRIDE;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -135,14 +135,14 @@ class AudioDecoderTest : public ::testing::Test {
|
|||||||
virtual int EncodeFrame(const int16_t* input,
|
virtual int EncodeFrame(const int16_t* input,
|
||||||
size_t input_len_samples,
|
size_t input_len_samples,
|
||||||
uint8_t* output) {
|
uint8_t* output) {
|
||||||
size_t enc_len_bytes = 0;
|
encoded_info_.encoded_bytes = 0;
|
||||||
const size_t samples_per_10ms = audio_encoder_->sample_rate_hz() / 100;
|
const size_t samples_per_10ms = audio_encoder_->sample_rate_hz() / 100;
|
||||||
CHECK_EQ(samples_per_10ms * audio_encoder_->Num10MsFramesInNextPacket(),
|
CHECK_EQ(samples_per_10ms * audio_encoder_->Num10MsFramesInNextPacket(),
|
||||||
input_len_samples);
|
input_len_samples);
|
||||||
scoped_ptr<int16_t[]> interleaved_input(
|
scoped_ptr<int16_t[]> interleaved_input(
|
||||||
new int16_t[channels_ * samples_per_10ms]);
|
new int16_t[channels_ * samples_per_10ms]);
|
||||||
for (int i = 0; i < audio_encoder_->Num10MsFramesInNextPacket(); ++i) {
|
for (int i = 0; i < audio_encoder_->Num10MsFramesInNextPacket(); ++i) {
|
||||||
EXPECT_EQ(0u, enc_len_bytes);
|
EXPECT_EQ(0u, encoded_info_.encoded_bytes);
|
||||||
|
|
||||||
// Duplicate the mono input signal to however many channels the test
|
// Duplicate the mono input signal to however many channels the test
|
||||||
// wants.
|
// wants.
|
||||||
@ -152,10 +152,10 @@ class AudioDecoderTest : public ::testing::Test {
|
|||||||
|
|
||||||
EXPECT_TRUE(audio_encoder_->Encode(
|
EXPECT_TRUE(audio_encoder_->Encode(
|
||||||
0, interleaved_input.get(), audio_encoder_->sample_rate_hz() / 100,
|
0, interleaved_input.get(), audio_encoder_->sample_rate_hz() / 100,
|
||||||
data_length_ * 2, output, &enc_len_bytes, &encoded_info_));
|
data_length_ * 2, output, &encoded_info_));
|
||||||
}
|
}
|
||||||
EXPECT_EQ(payload_type_, encoded_info_.payload_type);
|
EXPECT_EQ(payload_type_, encoded_info_.payload_type);
|
||||||
return static_cast<int>(enc_len_bytes);
|
return static_cast<int>(encoded_info_.encoded_bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encodes and decodes audio. The absolute difference between the input and
|
// Encodes and decodes audio. The absolute difference between the input and
|
||||||
|
Loading…
Reference in New Issue
Block a user