AudioEncoder: change Encode and EncodeInternal return type to void

After code cleanup done on issues:
https://webrtc-codereview.appspot.com/34259004/
https://webrtc-codereview.appspot.com/43409004/
https://webrtc-codereview.appspot.com/34309004/
https://webrtc-codereview.appspot.com/34309004/
https://webrtc-codereview.appspot.com/36209004/
https://webrtc-codereview.appspot.com/40899004/
https://webrtc-codereview.appspot.com/39279004/
https://webrtc-codereview.appspot.com/42099005/
and the similar work done for AudioEncoderDecoderIsacT,  methods AudioEncoder::Encode and AudioEncoder::EncodeInternal will always succeed. Therefore, there is no need for them to return bool value that represents success or failure.

R=kwiberg@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/38279004

Cr-Commit-Position: refs/heads/master@{#8518}
git-svn-id: http://webrtc.googlecode.com/svn/trunk@8518 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
jmarusic@webrtc.org 2015-02-26 15:38:10 +00:00
parent 00b8f6b364
commit b1f0de30be
22 changed files with 66 additions and 90 deletions

View File

@ -19,7 +19,7 @@ AudioEncoder::EncodedInfo::EncodedInfo() : EncodedInfoLeaf() {
AudioEncoder::EncodedInfo::~EncodedInfo() { AudioEncoder::EncodedInfo::~EncodedInfo() {
} }
bool AudioEncoder::Encode(uint32_t rtp_timestamp, void AudioEncoder::Encode(uint32_t rtp_timestamp,
const int16_t* audio, const int16_t* audio,
size_t num_samples_per_channel, size_t num_samples_per_channel,
size_t max_encoded_bytes, size_t max_encoded_bytes,
@ -27,10 +27,8 @@ bool AudioEncoder::Encode(uint32_t rtp_timestamp,
EncodedInfo* info) { EncodedInfo* info) {
CHECK_EQ(num_samples_per_channel, CHECK_EQ(num_samples_per_channel,
static_cast<size_t>(SampleRateHz() / 100)); static_cast<size_t>(SampleRateHz() / 100));
bool ret = EncodeInternal(rtp_timestamp, audio, max_encoded_bytes, encoded, info);
EncodeInternal(rtp_timestamp, audio, max_encoded_bytes, encoded, info);
CHECK_LE(info->encoded_bytes, max_encoded_bytes); CHECK_LE(info->encoded_bytes, max_encoded_bytes);
return ret;
} }
int AudioEncoder::RtpTimestampRateHz() const { int AudioEncoder::RtpTimestampRateHz() const {

View File

@ -56,12 +56,11 @@ class AudioEncoder {
// Accepts one 10 ms block of input audio (i.e., sample_rate_hz() / 100 * // Accepts one 10 ms block of input audio (i.e., sample_rate_hz() / 100 *
// num_channels() samples). Multi-channel audio must be sample-interleaved. // num_channels() samples). Multi-channel audio must be sample-interleaved.
// If successful, the encoder produces zero or more bytes of output in // The encoder produces zero or more bytes of output in |encoded|,
// |encoded|, and provides the number of encoded bytes in |encoded_bytes|. // and provides the number of encoded bytes in |encoded_bytes|.
// In case of error, false is returned, otherwise true. It is an error for the // The caller is responsible for making sure that |max_encoded_bytes| is
// encoder to attempt to produce more than |max_encoded_bytes| bytes of // not smaller than the number of bytes actually produced by the encoder.
// output. void Encode(uint32_t rtp_timestamp,
bool Encode(uint32_t rtp_timestamp,
const int16_t* audio, const int16_t* audio,
size_t num_samples_per_channel, size_t num_samples_per_channel,
size_t max_encoded_bytes, size_t max_encoded_bytes,
@ -98,7 +97,7 @@ class AudioEncoder {
virtual void SetProjectedPacketLossRate(double fraction) {} virtual void SetProjectedPacketLossRate(double fraction) {}
protected: protected:
virtual bool EncodeInternal(uint32_t rtp_timestamp, virtual void EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio, const int16_t* audio,
size_t max_encoded_bytes, size_t max_encoded_bytes,
uint8_t* encoded, uint8_t* encoded,

View File

@ -95,15 +95,12 @@ void AudioEncoderCng::SetProjectedPacketLossRate(double fraction) {
speech_encoder_->SetProjectedPacketLossRate(fraction); speech_encoder_->SetProjectedPacketLossRate(fraction);
} }
bool AudioEncoderCng::EncodeInternal(uint32_t rtp_timestamp, void AudioEncoderCng::EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio, const int16_t* audio,
size_t max_encoded_bytes, size_t max_encoded_bytes,
uint8_t* encoded, uint8_t* encoded,
EncodedInfo* info) { EncodedInfo* info) {
DCHECK_GE(max_encoded_bytes, static_cast<size_t>(num_cng_coefficients_ + 1)); CHECK_GE(max_encoded_bytes, static_cast<size_t>(num_cng_coefficients_ + 1));
if (max_encoded_bytes < static_cast<size_t>(num_cng_coefficients_ + 1)) {
return false;
}
info->encoded_bytes = 0; info->encoded_bytes = 0;
const int num_samples = SampleRateHz() / 100 * NumChannels(); const int num_samples = SampleRateHz() / 100 * NumChannels();
if (speech_buffer_.empty()) { if (speech_buffer_.empty()) {
@ -115,7 +112,7 @@ bool AudioEncoderCng::EncodeInternal(uint32_t rtp_timestamp,
} }
++frames_in_buffer_; ++frames_in_buffer_;
if (frames_in_buffer_ < speech_encoder_->Num10MsFramesInNextPacket()) { if (frames_in_buffer_ < speech_encoder_->Num10MsFramesInNextPacket()) {
return true; return;
} }
CHECK_LE(frames_in_buffer_, 6) CHECK_LE(frames_in_buffer_, 6)
<< "Frame size cannot be larger than 60 ms when using VAD/CNG."; << "Frame size cannot be larger than 60 ms when using VAD/CNG.";
@ -169,7 +166,6 @@ bool AudioEncoderCng::EncodeInternal(uint32_t rtp_timestamp,
speech_buffer_.clear(); speech_buffer_.clear();
frames_in_buffer_ = 0; frames_in_buffer_ = 0;
return true;
} }
void AudioEncoderCng::EncodePassive(uint8_t* encoded, size_t* encoded_bytes) { void AudioEncoderCng::EncodePassive(uint8_t* encoded, size_t* encoded_bytes) {
@ -196,10 +192,10 @@ void AudioEncoderCng::EncodeActive(size_t max_encoded_bytes,
EncodedInfo* info) { EncodedInfo* info) {
const size_t samples_per_10ms_frame = 10 * SampleRateHz() / 1000; const size_t samples_per_10ms_frame = 10 * SampleRateHz() / 1000;
for (int i = 0; i < frames_in_buffer_; ++i) { for (int i = 0; i < frames_in_buffer_; ++i) {
CHECK(speech_encoder_->Encode(first_timestamp_in_buffer_, speech_encoder_->Encode(first_timestamp_in_buffer_,
&speech_buffer_[i * samples_per_10ms_frame], &speech_buffer_[i * samples_per_10ms_frame],
samples_per_10ms_frame, max_encoded_bytes, samples_per_10ms_frame, max_encoded_bytes,
encoded, info)); encoded, info);
if (i < frames_in_buffer_ - 1) { if (i < frames_in_buffer_ - 1) {
CHECK_EQ(info->encoded_bytes, 0u) << "Encoder delivered data too early."; CHECK_EQ(info->encoded_bytes, 0u) << "Encoder delivered data too early.";
} }

View File

@ -72,8 +72,8 @@ class AudioEncoderCngTest : public ::testing::Test {
void Encode() { void Encode() {
ASSERT_TRUE(cng_) << "Must call CreateCng() first."; ASSERT_TRUE(cng_) << "Must call CreateCng() first.";
encoded_info_ = AudioEncoder::EncodedInfo(); encoded_info_ = AudioEncoder::EncodedInfo();
ASSERT_TRUE(cng_->Encode(timestamp_, audio_, num_audio_samples_10ms_, cng_->Encode(timestamp_, audio_, num_audio_samples_10ms_,
kMaxEncodedBytes, encoded_, &encoded_info_)); kMaxEncodedBytes, encoded_, &encoded_info_);
timestamp_ += num_audio_samples_10ms_; timestamp_ += num_audio_samples_10ms_;
} }
@ -101,11 +101,11 @@ class AudioEncoderCngTest : public ::testing::Test {
InSequence s; InSequence s;
for (int j = 0; j < blocks_per_frame - 1; ++j) { for (int j = 0; j < blocks_per_frame - 1; ++j) {
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _)) EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
.WillOnce(DoAll(SetArgPointee<4>(info), Return(true))); .WillOnce(SetArgPointee<4>(info));
} }
info.encoded_bytes = kMockReturnEncodedBytes; info.encoded_bytes = kMockReturnEncodedBytes;
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _)) EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
.WillOnce(DoAll(SetArgPointee<4>(info), Return(true))); .WillOnce(SetArgPointee<4>(info));
} }
Encode(); Encode();
if (active_speech) { if (active_speech) {
@ -280,20 +280,17 @@ TEST_F(AudioEncoderCngTest, MixedActivePassive) {
// All of the frame is active speech. // All of the frame is active speech.
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _)) EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
.Times(6) .Times(6);
.WillRepeatedly(Return(true));
EXPECT_TRUE(CheckMixedActivePassive(Vad::kActive, Vad::kActive)); EXPECT_TRUE(CheckMixedActivePassive(Vad::kActive, Vad::kActive));
// First half of the frame is active speech. // First half of the frame is active speech.
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _)) EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
.Times(6) .Times(6);
.WillRepeatedly(Return(true));
EXPECT_TRUE(CheckMixedActivePassive(Vad::kActive, Vad::kPassive)); EXPECT_TRUE(CheckMixedActivePassive(Vad::kActive, Vad::kPassive));
// Second half of the frame is active speech. // Second half of the frame is active speech.
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _)) EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
.Times(6) .Times(6);
.WillRepeatedly(Return(true));
EXPECT_TRUE(CheckMixedActivePassive(Vad::kPassive, Vad::kActive)); EXPECT_TRUE(CheckMixedActivePassive(Vad::kPassive, Vad::kActive));
// All of the frame is passive speech. Expect no calls to |mock_encoder_|. // All of the frame is passive speech. Expect no calls to |mock_encoder_|.
@ -335,8 +332,7 @@ TEST_F(AudioEncoderCngTest, VadInputSize60Ms) {
// speech encoder. // speech encoder.
TEST_F(AudioEncoderCngTest, VerifyEncoderInfoPropagation) { TEST_F(AudioEncoderCngTest, VerifyEncoderInfoPropagation) {
CreateCng(); CreateCng();
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, &encoded_info_)) EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, &encoded_info_));
.WillOnce(Return(true));
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(1)); EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(1));
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _)) EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
.WillOnce(Return(Vad::kActive)); .WillOnce(Return(Vad::kActive));
@ -381,7 +377,7 @@ TEST_F(AudioEncoderCngTest, VerifySidFrameAfterSpeech) {
AudioEncoder::EncodedInfo info; AudioEncoder::EncodedInfo info;
info.encoded_bytes = kMockReturnEncodedBytes; info.encoded_bytes = kMockReturnEncodedBytes;
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _)) EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
.WillOnce(DoAll(SetArgPointee<4>(info), Return(true))); .WillOnce(SetArgPointee<4>(info));
Encode(); Encode();
EXPECT_EQ(kMockReturnEncodedBytes, encoded_info_.encoded_bytes); EXPECT_EQ(kMockReturnEncodedBytes, encoded_info_.encoded_bytes);

View File

@ -55,7 +55,7 @@ class AudioEncoderCng final : public AudioEncoder {
void SetProjectedPacketLossRate(double fraction) override; void SetProjectedPacketLossRate(double fraction) override;
protected: protected:
virtual bool EncodeInternal(uint32_t rtp_timestamp, virtual void EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio, const int16_t* audio,
size_t max_encoded_bytes, size_t max_encoded_bytes,
uint8_t* encoded, uint8_t* encoded,

View File

@ -60,7 +60,7 @@ int AudioEncoderPcm::Max10MsFramesInAPacket() const {
return num_10ms_frames_per_packet_; return num_10ms_frames_per_packet_;
} }
bool AudioEncoderPcm::EncodeInternal(uint32_t rtp_timestamp, void AudioEncoderPcm::EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio, const int16_t* audio,
size_t max_encoded_bytes, size_t max_encoded_bytes,
uint8_t* encoded, uint8_t* encoded,
@ -74,7 +74,7 @@ bool AudioEncoderPcm::EncodeInternal(uint32_t rtp_timestamp,
} }
if (speech_buffer_.size() < static_cast<size_t>(full_frame_samples_)) { if (speech_buffer_.size() < static_cast<size_t>(full_frame_samples_)) {
info->encoded_bytes = 0; info->encoded_bytes = 0;
return true; return;
} }
CHECK_EQ(speech_buffer_.size(), static_cast<size_t>(full_frame_samples_)); CHECK_EQ(speech_buffer_.size(), static_cast<size_t>(full_frame_samples_));
int16_t ret = EncodeCall(&speech_buffer_[0], full_frame_samples_, encoded); int16_t ret = EncodeCall(&speech_buffer_[0], full_frame_samples_, encoded);
@ -83,7 +83,6 @@ bool AudioEncoderPcm::EncodeInternal(uint32_t rtp_timestamp,
info->encoded_timestamp = first_timestamp_in_buffer_; info->encoded_timestamp = first_timestamp_in_buffer_;
info->payload_type = payload_type_; info->payload_type = payload_type_;
info->encoded_bytes = static_cast<size_t>(ret); info->encoded_bytes = static_cast<size_t>(ret);
return true;
} }
int16_t AudioEncoderPcmA::EncodeCall(const int16_t* audio, int16_t AudioEncoderPcmA::EncodeCall(const int16_t* audio,

View File

@ -40,7 +40,7 @@ class AudioEncoderPcm : public AudioEncoder {
protected: protected:
AudioEncoderPcm(const Config& config, int sample_rate_hz); AudioEncoderPcm(const Config& config, int sample_rate_hz);
virtual bool EncodeInternal(uint32_t rtp_timestamp, virtual void EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio, const int16_t* audio,
size_t max_encoded_bytes, size_t max_encoded_bytes,
uint8_t* encoded, uint8_t* encoded,

View File

@ -69,7 +69,7 @@ int AudioEncoderG722::Max10MsFramesInAPacket() const {
return num_10ms_frames_per_packet_; return num_10ms_frames_per_packet_;
} }
bool AudioEncoderG722::EncodeInternal(uint32_t rtp_timestamp, void AudioEncoderG722::EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio, const int16_t* audio,
size_t max_encoded_bytes, size_t max_encoded_bytes,
uint8_t* encoded, uint8_t* encoded,
@ -91,7 +91,7 @@ bool AudioEncoderG722::EncodeInternal(uint32_t rtp_timestamp,
// If we don't yet have enough samples for a packet, we're done for now. // If we don't yet have enough samples for a packet, we're done for now.
if (++num_10ms_frames_buffered_ < num_10ms_frames_per_packet_) { if (++num_10ms_frames_buffered_ < num_10ms_frames_per_packet_) {
info->encoded_bytes = 0; info->encoded_bytes = 0;
return true; return;
} }
// Encode each channel separately. // Encode each channel separately.
@ -121,7 +121,6 @@ bool AudioEncoderG722::EncodeInternal(uint32_t rtp_timestamp,
info->encoded_bytes = samples_per_channel / 2 * num_channels_; info->encoded_bytes = samples_per_channel / 2 * num_channels_;
info->encoded_timestamp = first_timestamp_in_buffer_; info->encoded_timestamp = first_timestamp_in_buffer_;
info->payload_type = payload_type_; info->payload_type = payload_type_;
return true;
} }
} // namespace webrtc } // namespace webrtc

View File

@ -37,7 +37,7 @@ class AudioEncoderG722 : public AudioEncoder {
virtual int Max10MsFramesInAPacket() const OVERRIDE; virtual int Max10MsFramesInAPacket() const OVERRIDE;
protected: protected:
virtual bool EncodeInternal(uint32_t rtp_timestamp, virtual void EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio, const int16_t* audio,
size_t max_encoded_bytes, size_t max_encoded_bytes,
uint8_t* encoded, uint8_t* encoded,

View File

@ -56,7 +56,7 @@ int AudioEncoderIlbc::Max10MsFramesInAPacket() const {
return num_10ms_frames_per_packet_; return num_10ms_frames_per_packet_;
} }
bool AudioEncoderIlbc::EncodeInternal(uint32_t rtp_timestamp, void AudioEncoderIlbc::EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio, const int16_t* audio,
size_t max_encoded_bytes, size_t max_encoded_bytes,
uint8_t* encoded, uint8_t* encoded,
@ -93,7 +93,7 @@ bool AudioEncoderIlbc::EncodeInternal(uint32_t rtp_timestamp,
// for now. // for now.
if (++num_10ms_frames_buffered_ < num_10ms_frames_per_packet_) { if (++num_10ms_frames_buffered_ < num_10ms_frames_per_packet_) {
info->encoded_bytes = 0; info->encoded_bytes = 0;
return true; return;
} }
// Encode buffered input. // Encode buffered input.
@ -109,7 +109,6 @@ bool AudioEncoderIlbc::EncodeInternal(uint32_t rtp_timestamp,
info->encoded_bytes = output_len; info->encoded_bytes = output_len;
info->encoded_timestamp = first_timestamp_in_buffer_; info->encoded_timestamp = first_timestamp_in_buffer_;
info->payload_type = payload_type_; info->payload_type = payload_type_;
return true;
} }
} // namespace webrtc } // namespace webrtc

View File

@ -37,7 +37,7 @@ class AudioEncoderIlbc : public AudioEncoder {
virtual int Max10MsFramesInAPacket() const OVERRIDE; virtual int Max10MsFramesInAPacket() const OVERRIDE;
protected: protected:
virtual bool EncodeInternal(uint32_t rtp_timestamp, virtual void EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio, const int16_t* audio,
size_t max_encoded_bytes, size_t max_encoded_bytes,
uint8_t* encoded, uint8_t* encoded,

View File

@ -95,7 +95,7 @@ class AudioEncoderDecoderIsacT : public AudioEncoder, public AudioDecoder {
protected: protected:
// AudioEncoder protected method. // AudioEncoder protected method.
virtual bool EncodeInternal(uint32_t rtp_timestamp, virtual void EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio, const int16_t* audio,
size_t max_encoded_bytes, size_t max_encoded_bytes,
uint8_t* encoded, uint8_t* encoded,

View File

@ -186,7 +186,7 @@ int AudioEncoderDecoderIsacT<T>::Max10MsFramesInAPacket() const {
} }
template <typename T> template <typename T>
bool AudioEncoderDecoderIsacT<T>::EncodeInternal(uint32_t rtp_timestamp, void AudioEncoderDecoderIsacT<T>::EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio, const int16_t* audio,
size_t max_encoded_bytes, size_t max_encoded_bytes,
uint8_t* encoded, uint8_t* encoded,
@ -202,11 +202,7 @@ bool AudioEncoderDecoderIsacT<T>::EncodeInternal(uint32_t rtp_timestamp,
CriticalSectionScoped cs(state_lock_.get()); CriticalSectionScoped cs(state_lock_.get());
r = T::Encode(isac_state_, audio, encoded); r = T::Encode(isac_state_, audio, encoded);
} }
if (r < 0) { CHECK_GE(r, 0);
// An error occurred; propagate it to the caller.
packet_in_progress_ = false;
return false;
}
// T::Encode doesn't allow us to tell it the size of the output // T::Encode doesn't allow us to tell it the size of the output
// buffer. All we can do is check for an overrun after the fact. // buffer. All we can do is check for an overrun after the fact.
@ -214,7 +210,7 @@ bool AudioEncoderDecoderIsacT<T>::EncodeInternal(uint32_t rtp_timestamp,
info->encoded_bytes = r; info->encoded_bytes = r;
if (r == 0) if (r == 0)
return true; return;
// Got enough input to produce a packet. Return the saved timestamp from // Got enough input to produce a packet. Return the saved timestamp from
// the first chunk of input that went into the packet. // the first chunk of input that went into the packet.
@ -223,7 +219,7 @@ bool AudioEncoderDecoderIsacT<T>::EncodeInternal(uint32_t rtp_timestamp,
info->payload_type = payload_type_; info->payload_type = payload_type_;
if (!T::has_redundant_encoder) if (!T::has_redundant_encoder)
return true; return;
if (redundant_length_bytes_ == 0) { if (redundant_length_bytes_ == 0) {
// Do not emit the first output frame when using redundant encoding. // Do not emit the first output frame when using redundant encoding.
@ -260,7 +256,6 @@ bool AudioEncoderDecoderIsacT<T>::EncodeInternal(uint32_t rtp_timestamp,
DCHECK_LE(redundant_length_bytes_, sizeof(redundant_payload_)); DCHECK_LE(redundant_length_bytes_, sizeof(redundant_payload_));
DCHECK_GE(redundant_length_bytes_, 0u); DCHECK_GE(redundant_length_bytes_, 0u);
last_encoded_timestamp_ = packet_timestamp_; last_encoded_timestamp_ = packet_timestamp_;
return true;
} }
template <typename T> template <typename T>

View File

@ -51,11 +51,10 @@ TEST(AudioEncoderIsacRedTest, CompareRedAndNoRed) {
EXPECT_EQ(0u, red_info.encoded_bytes); EXPECT_EQ(0u, red_info.encoded_bytes);
EXPECT_EQ(0u, red_info.redundant.size()); EXPECT_EQ(0u, red_info.redundant.size());
const uint32_t timestamp = static_cast<uint32_t>(i); const uint32_t timestamp = static_cast<uint32_t>(i);
EXPECT_TRUE(isac_encoder.Encode(timestamp, input, k10MsSamples, isac_encoder.Encode(timestamp, input, k10MsSamples, kMaxEncodedSizeBytes,
kMaxEncodedSizeBytes, encoded, &info)); encoded, &info);
EXPECT_TRUE(isac_red_encoder.Encode(timestamp, input, k10MsSamples, isac_red_encoder.Encode(timestamp, input, k10MsSamples,
kMaxEncodedSizeBytes, red_encoded, kMaxEncodedSizeBytes, red_encoded, &red_info);
&red_info));
} }
EXPECT_GT(info.encoded_bytes, 0u) EXPECT_GT(info.encoded_bytes, 0u)
<< "Regular codec did not produce any output"; << "Regular codec did not produce any output";

View File

@ -29,7 +29,7 @@ class MockAudioEncoder : public AudioEncoder {
MOCK_METHOD1(SetProjectedPacketLossRate, void(double)); MOCK_METHOD1(SetProjectedPacketLossRate, void(double));
// Note, we explicitly chose not to create a mock for the Encode method. // Note, we explicitly chose not to create a mock for the Encode method.
MOCK_METHOD5(EncodeInternal, MOCK_METHOD5(EncodeInternal,
bool(uint32_t timestamp, void(uint32_t timestamp,
const int16_t* audio, const int16_t* audio,
size_t max_encoded_bytes, size_t max_encoded_bytes,
uint8_t* encoded, uint8_t* encoded,

View File

@ -166,7 +166,7 @@ void AudioEncoderOpus::SetProjectedPacketLossRate(double fraction) {
} }
} }
bool AudioEncoderOpus::EncodeInternal(uint32_t rtp_timestamp, void AudioEncoderOpus::EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio, const int16_t* audio,
size_t max_encoded_bytes, size_t max_encoded_bytes,
uint8_t* encoded, uint8_t* encoded,
@ -178,7 +178,7 @@ bool AudioEncoderOpus::EncodeInternal(uint32_t rtp_timestamp,
if (input_buffer_.size() < (static_cast<size_t>(num_10ms_frames_per_packet_) * if (input_buffer_.size() < (static_cast<size_t>(num_10ms_frames_per_packet_) *
samples_per_10ms_frame_)) { samples_per_10ms_frame_)) {
info->encoded_bytes = 0; info->encoded_bytes = 0;
return true; return;
} }
CHECK_EQ(input_buffer_.size(), CHECK_EQ(input_buffer_.size(),
static_cast<size_t>(num_10ms_frames_per_packet_) * static_cast<size_t>(num_10ms_frames_per_packet_) *
@ -193,7 +193,6 @@ bool AudioEncoderOpus::EncodeInternal(uint32_t rtp_timestamp,
info->encoded_bytes = r; info->encoded_bytes = r;
info->encoded_timestamp = first_timestamp_in_buffer_; info->encoded_timestamp = first_timestamp_in_buffer_;
info->payload_type = payload_type_; info->payload_type = payload_type_;
return true;
} }
} // namespace webrtc } // namespace webrtc

View File

@ -54,7 +54,7 @@ class AudioEncoderOpus final : public AudioEncoder {
ApplicationMode application() const { return application_; } ApplicationMode application() const { return application_; }
protected: protected:
virtual bool EncodeInternal(uint32_t rtp_timestamp, virtual void EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio, const int16_t* audio,
size_t max_encoded_bytes, size_t max_encoded_bytes,
uint8_t* encoded, uint8_t* encoded,

View File

@ -56,14 +56,14 @@ void AudioEncoderCopyRed::SetProjectedPacketLossRate(double fraction) {
speech_encoder_->SetProjectedPacketLossRate(fraction); speech_encoder_->SetProjectedPacketLossRate(fraction);
} }
bool AudioEncoderCopyRed::EncodeInternal(uint32_t rtp_timestamp, void AudioEncoderCopyRed::EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio, const int16_t* audio,
size_t max_encoded_bytes, size_t max_encoded_bytes,
uint8_t* encoded, uint8_t* encoded,
EncodedInfo* info) { EncodedInfo* info) {
CHECK(speech_encoder_->Encode(rtp_timestamp, audio, speech_encoder_->Encode(rtp_timestamp, audio,
static_cast<size_t>(SampleRateHz() / 100), static_cast<size_t>(SampleRateHz() / 100),
max_encoded_bytes, encoded, info)); max_encoded_bytes, encoded, info);
CHECK_GE(max_encoded_bytes, CHECK_GE(max_encoded_bytes,
info->encoded_bytes + secondary_info_.encoded_bytes); info->encoded_bytes + secondary_info_.encoded_bytes);
CHECK(info->redundant.empty()) << "Cannot use nested redundant encoders."; CHECK(info->redundant.empty()) << "Cannot use nested redundant encoders.";
@ -97,7 +97,6 @@ bool AudioEncoderCopyRed::EncodeInternal(uint32_t rtp_timestamp,
it != info->redundant.end(); ++it) { it != info->redundant.end(); ++it) {
info->encoded_bytes += it->encoded_bytes; info->encoded_bytes += it->encoded_bytes;
} }
return true;
} }
} // namespace webrtc } // namespace webrtc

View File

@ -44,7 +44,7 @@ class AudioEncoderCopyRed : public AudioEncoder {
void SetProjectedPacketLossRate(double fraction) override; void SetProjectedPacketLossRate(double fraction) override;
protected: protected:
virtual bool EncodeInternal(uint32_t rtp_timestamp, virtual void EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio, const int16_t* audio,
size_t max_encoded_bytes, size_t max_encoded_bytes,
uint8_t* encoded, uint8_t* encoded,

View File

@ -57,8 +57,8 @@ class AudioEncoderCopyRedTest : public ::testing::Test {
void Encode() { void Encode() {
ASSERT_TRUE(red_.get() != NULL); ASSERT_TRUE(red_.get() != NULL);
encoded_info_ = AudioEncoder::EncodedInfo(); encoded_info_ = AudioEncoder::EncodedInfo();
ASSERT_TRUE(red_->Encode(timestamp_, audio_, num_audio_samples_10ms, red_->Encode(timestamp_, audio_, num_audio_samples_10ms,
kMaxEncodedBytes, encoded_, &encoded_info_)); kMaxEncodedBytes, encoded_, &encoded_info_);
timestamp_ += num_audio_samples_10ms; timestamp_ += num_audio_samples_10ms;
} }
@ -79,7 +79,7 @@ class MockEncodeHelper {
memset(&info_, 0, sizeof(info_)); memset(&info_, 0, sizeof(info_));
} }
bool Encode(uint32_t timestamp, void Encode(uint32_t timestamp,
const int16_t* audio, const int16_t* audio,
size_t max_encoded_bytes, size_t max_encoded_bytes,
uint8_t* encoded, uint8_t* encoded,
@ -91,7 +91,6 @@ class MockEncodeHelper {
} }
CHECK(info); CHECK(info);
*info = info_; *info = info_;
return true;
} }
AudioEncoder::EncodedInfo info_; AudioEncoder::EncodedInfo info_;
@ -141,8 +140,7 @@ TEST_F(AudioEncoderCopyRedTest, CheckImmediateEncode) {
InSequence s; InSequence s;
MockFunction<void(int check_point_id)> check; MockFunction<void(int check_point_id)> check;
for (int i = 1; i <= 6; ++i) { for (int i = 1; i <= 6; ++i) {
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _)) EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _));
.WillOnce(Return(true));
EXPECT_CALL(check, Call(i)); EXPECT_CALL(check, Call(i));
Encode(); Encode();
check.Call(i); check.Call(i);
@ -157,7 +155,7 @@ TEST_F(AudioEncoderCopyRedTest, CheckNoOuput) {
AudioEncoder::EncodedInfo info; AudioEncoder::EncodedInfo info;
info.encoded_bytes = kEncodedSize; info.encoded_bytes = kEncodedSize;
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _)) EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
.WillOnce(DoAll(SetArgPointee<4>(info), Return(true))); .WillOnce(SetArgPointee<4>(info));
Encode(); Encode();
// First call is a special case, since it does not include a secondary // First call is a special case, since it does not include a secondary
// payload. // payload.
@ -167,14 +165,14 @@ TEST_F(AudioEncoderCopyRedTest, CheckNoOuput) {
// Next call to the speech encoder will not produce any output. // Next call to the speech encoder will not produce any output.
info.encoded_bytes = 0; info.encoded_bytes = 0;
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _)) EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
.WillOnce(DoAll(SetArgPointee<4>(info), Return(true))); .WillOnce(SetArgPointee<4>(info));
Encode(); Encode();
EXPECT_EQ(0u, encoded_info_.encoded_bytes); EXPECT_EQ(0u, encoded_info_.encoded_bytes);
// Final call to the speech encoder will produce output. // Final call to the speech encoder will produce output.
info.encoded_bytes = kEncodedSize; info.encoded_bytes = kEncodedSize;
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _)) EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
.WillOnce(DoAll(SetArgPointee<4>(info), Return(true))); .WillOnce(SetArgPointee<4>(info));
Encode(); Encode();
EXPECT_EQ(2 * kEncodedSize, encoded_info_.encoded_bytes); EXPECT_EQ(2 * kEncodedSize, encoded_info_.encoded_bytes);
ASSERT_EQ(2u, encoded_info_.redundant.size()); ASSERT_EQ(2u, encoded_info_.redundant.size());
@ -191,7 +189,7 @@ TEST_F(AudioEncoderCopyRedTest, CheckPayloadSizes) {
AudioEncoder::EncodedInfo info; AudioEncoder::EncodedInfo info;
info.encoded_bytes = encode_size; info.encoded_bytes = encode_size;
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _)) EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
.WillOnce(DoAll(SetArgPointee<4>(info), Return(true))); .WillOnce(SetArgPointee<4>(info));
} }
// First call is a special case, since it does not include a secondary // First call is a special case, since it does not include a secondary

View File

@ -236,9 +236,9 @@ int16_t ACMGenericCodec::Encode(uint8_t* bitstream,
AudioEncoder::EncodedInfo* encoded_info) { AudioEncoder::EncodedInfo* encoded_info) {
WriteLockScoped wl(codec_wrapper_lock_); WriteLockScoped wl(codec_wrapper_lock_);
CHECK(!input_.empty()); CHECK(!input_.empty());
CHECK(encoder_->Encode(rtp_timestamp_, &input_[0], encoder_->Encode(rtp_timestamp_, &input_[0],
input_.size() / encoder_->NumChannels(), input_.size() / encoder_->NumChannels(),
2 * MAX_PAYLOAD_SIZE_BYTE, bitstream, encoded_info)); 2 * MAX_PAYLOAD_SIZE_BYTE, bitstream, encoded_info);
input_.clear(); input_.clear();
*bitstream_len_byte = static_cast<int16_t>(encoded_info->encoded_bytes); *bitstream_len_byte = static_cast<int16_t>(encoded_info->encoded_bytes);
*timestamp = encoded_info->encoded_timestamp; *timestamp = encoded_info->encoded_timestamp;

View File

@ -150,9 +150,9 @@ class AudioDecoderTest : public ::testing::Test {
samples_per_10ms, channels_, samples_per_10ms, channels_,
interleaved_input.get()); interleaved_input.get());
EXPECT_TRUE(audio_encoder_->Encode( audio_encoder_->Encode(0, interleaved_input.get(),
0, interleaved_input.get(), audio_encoder_->SampleRateHz() / 100, audio_encoder_->SampleRateHz() / 100,
data_length_ * 2, output, &encoded_info_)); data_length_ * 2, output, &encoded_info_);
} }
EXPECT_EQ(payload_type_, encoded_info_.payload_type); EXPECT_EQ(payload_type_, encoded_info_.payload_type);
return static_cast<int>(encoded_info_.encoded_bytes); return static_cast<int>(encoded_info_.encoded_bytes);