Reland 8749: AudioEncoder: return EncodedInfo from Encode() and EncodeInternal()
Old review at: https://webrtc-codereview.appspot.com/43839004/ R=kwiberg@webrtc.org Review URL: https://webrtc-codereview.appspot.com/45769004 Cr-Commit-Position: refs/heads/master@{#8788} git-svn-id: http://webrtc.googlecode.com/svn/trunk@8788 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
parent
d21406d333
commit
9afaee74ab
@ -19,16 +19,17 @@ AudioEncoder::EncodedInfo::EncodedInfo() : EncodedInfoLeaf() {
|
||||
AudioEncoder::EncodedInfo::~EncodedInfo() {
|
||||
}
|
||||
|
||||
void AudioEncoder::Encode(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t num_samples_per_channel,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) {
|
||||
AudioEncoder::EncodedInfo AudioEncoder::Encode(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t num_samples_per_channel,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) {
|
||||
CHECK_EQ(num_samples_per_channel,
|
||||
static_cast<size_t>(SampleRateHz() / 100));
|
||||
EncodeInternal(rtp_timestamp, audio, max_encoded_bytes, encoded, info);
|
||||
CHECK_LE(info->encoded_bytes, max_encoded_bytes);
|
||||
EncodedInfo info =
|
||||
EncodeInternal(rtp_timestamp, audio, max_encoded_bytes, encoded);
|
||||
CHECK_LE(info.encoded_bytes, max_encoded_bytes);
|
||||
return info;
|
||||
}
|
||||
|
||||
int AudioEncoder::RtpTimestampRateHz() const {
|
||||
|
@ -58,16 +58,15 @@ class AudioEncoder {
|
||||
|
||||
// Accepts one 10 ms block of input audio (i.e., sample_rate_hz() / 100 *
|
||||
// num_channels() samples). Multi-channel audio must be sample-interleaved.
|
||||
// The encoder produces zero or more bytes of output in |encoded|,
|
||||
// and provides additional encoding information in |info|.
|
||||
// The encoder produces zero or more bytes of output in |encoded| and
|
||||
// returns additional encoding information.
|
||||
// The caller is responsible for making sure that |max_encoded_bytes| is
|
||||
// not smaller than the number of bytes actually produced by the encoder.
|
||||
void Encode(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t num_samples_per_channel,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info);
|
||||
EncodedInfo Encode(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t num_samples_per_channel,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded);
|
||||
|
||||
// Return the input sample rate in Hz and the number of input channels.
|
||||
// These are constants set at instantiation time.
|
||||
@ -107,11 +106,10 @@ class AudioEncoder {
|
||||
virtual void SetProjectedPacketLossRate(double fraction) {}
|
||||
|
||||
protected:
|
||||
virtual void EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) = 0;
|
||||
virtual EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) = 0;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
@ -109,13 +109,12 @@ void AudioEncoderCng::SetProjectedPacketLossRate(double fraction) {
|
||||
speech_encoder_->SetProjectedPacketLossRate(fraction);
|
||||
}
|
||||
|
||||
void AudioEncoderCng::EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) {
|
||||
AudioEncoder::EncodedInfo AudioEncoderCng::EncodeInternal(
|
||||
uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) {
|
||||
CHECK_GE(max_encoded_bytes, static_cast<size_t>(num_cng_coefficients_ + 1));
|
||||
info->encoded_bytes = 0;
|
||||
const int num_samples = SampleRateHz() / 100 * NumChannels();
|
||||
if (speech_buffer_.empty()) {
|
||||
CHECK_EQ(frames_in_buffer_, 0);
|
||||
@ -126,7 +125,7 @@ void AudioEncoderCng::EncodeInternal(uint32_t rtp_timestamp,
|
||||
}
|
||||
++frames_in_buffer_;
|
||||
if (frames_in_buffer_ < speech_encoder_->Num10MsFramesInNextPacket()) {
|
||||
return;
|
||||
return EncodedInfo();
|
||||
}
|
||||
CHECK_LE(frames_in_buffer_ * 10, kMaxFrameSizeMs)
|
||||
<< "Frame size cannot be larger than " << kMaxFrameSizeMs
|
||||
@ -159,14 +158,15 @@ void AudioEncoderCng::EncodeInternal(uint32_t rtp_timestamp,
|
||||
samples_per_10ms_frame * blocks_in_second_vad_call, SampleRateHz());
|
||||
}
|
||||
|
||||
EncodedInfo info;
|
||||
switch (activity) {
|
||||
case Vad::kPassive: {
|
||||
EncodePassive(max_encoded_bytes, encoded, info);
|
||||
info = EncodePassive(max_encoded_bytes, encoded);
|
||||
last_frame_active_ = false;
|
||||
break;
|
||||
}
|
||||
case Vad::kActive: {
|
||||
EncodeActive(max_encoded_bytes, encoded, info);
|
||||
info = EncodeActive(max_encoded_bytes, encoded);
|
||||
last_frame_active_ = true;
|
||||
break;
|
||||
}
|
||||
@ -178,15 +178,17 @@ void AudioEncoderCng::EncodeInternal(uint32_t rtp_timestamp,
|
||||
|
||||
speech_buffer_.clear();
|
||||
frames_in_buffer_ = 0;
|
||||
return info;
|
||||
}
|
||||
|
||||
void AudioEncoderCng::EncodePassive(size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) {
|
||||
AudioEncoder::EncodedInfo AudioEncoderCng::EncodePassive(
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) {
|
||||
bool force_sid = last_frame_active_;
|
||||
bool output_produced = false;
|
||||
const size_t samples_per_10ms_frame = SamplesPer10msFrame();
|
||||
CHECK_GE(max_encoded_bytes, frames_in_buffer_ * samples_per_10ms_frame);
|
||||
AudioEncoder::EncodedInfo info;
|
||||
for (int i = 0; i < frames_in_buffer_; ++i) {
|
||||
int16_t encoded_bytes_tmp = 0;
|
||||
CHECK_GE(WebRtcCng_Encode(cng_inst_.get(),
|
||||
@ -195,30 +197,32 @@ void AudioEncoderCng::EncodePassive(size_t max_encoded_bytes,
|
||||
encoded, &encoded_bytes_tmp, force_sid), 0);
|
||||
if (encoded_bytes_tmp > 0) {
|
||||
CHECK(!output_produced);
|
||||
info->encoded_bytes = static_cast<size_t>(encoded_bytes_tmp);
|
||||
info.encoded_bytes = static_cast<size_t>(encoded_bytes_tmp);
|
||||
output_produced = true;
|
||||
force_sid = false;
|
||||
}
|
||||
}
|
||||
info->encoded_timestamp = first_timestamp_in_buffer_;
|
||||
info->payload_type = cng_payload_type_;
|
||||
info->send_even_if_empty = true;
|
||||
info->speech = false;
|
||||
info.encoded_timestamp = first_timestamp_in_buffer_;
|
||||
info.payload_type = cng_payload_type_;
|
||||
info.send_even_if_empty = true;
|
||||
info.speech = false;
|
||||
return info;
|
||||
}
|
||||
|
||||
void AudioEncoderCng::EncodeActive(size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) {
|
||||
AudioEncoder::EncodedInfo AudioEncoderCng::EncodeActive(
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) {
|
||||
const size_t samples_per_10ms_frame = SamplesPer10msFrame();
|
||||
AudioEncoder::EncodedInfo info;
|
||||
for (int i = 0; i < frames_in_buffer_; ++i) {
|
||||
speech_encoder_->Encode(first_timestamp_in_buffer_,
|
||||
&speech_buffer_[i * samples_per_10ms_frame],
|
||||
samples_per_10ms_frame, max_encoded_bytes,
|
||||
encoded, info);
|
||||
info = speech_encoder_->Encode(
|
||||
first_timestamp_in_buffer_, &speech_buffer_[i * samples_per_10ms_frame],
|
||||
samples_per_10ms_frame, max_encoded_bytes, encoded);
|
||||
if (i < frames_in_buffer_ - 1) {
|
||||
CHECK_EQ(info->encoded_bytes, 0u) << "Encoder delivered data too early.";
|
||||
CHECK_EQ(info.encoded_bytes, 0u) << "Encoder delivered data too early.";
|
||||
}
|
||||
}
|
||||
return info;
|
||||
}
|
||||
|
||||
size_t AudioEncoderCng::SamplesPer10msFrame() const {
|
||||
|
@ -75,9 +75,8 @@ class AudioEncoderCngTest : public ::testing::Test {
|
||||
|
||||
void Encode() {
|
||||
ASSERT_TRUE(cng_) << "Must call CreateCng() first.";
|
||||
encoded_info_ = AudioEncoder::EncodedInfo();
|
||||
cng_->Encode(timestamp_, audio_, num_audio_samples_10ms_,
|
||||
encoded_.size(), &encoded_[0], &encoded_info_);
|
||||
encoded_info_ = cng_->Encode(timestamp_, audio_, num_audio_samples_10ms_,
|
||||
encoded_.size(), &encoded_[0]);
|
||||
timestamp_ += num_audio_samples_10ms_;
|
||||
}
|
||||
|
||||
@ -92,24 +91,24 @@ class AudioEncoderCngTest : public ::testing::Test {
|
||||
.WillRepeatedly(Return(active_speech ? Vad::kActive : Vad::kPassive));
|
||||
|
||||
// Don't expect any calls to the encoder yet.
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _)).Times(0);
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).Times(0);
|
||||
for (int i = 0; i < blocks_per_frame - 1; ++i) {
|
||||
Encode();
|
||||
EXPECT_EQ(0u, encoded_info_.encoded_bytes);
|
||||
}
|
||||
AudioEncoder::EncodedInfo info;
|
||||
if (active_speech) {
|
||||
// Now expect |blocks_per_frame| calls to the encoder in sequence.
|
||||
// Let the speech codec mock return true and set the number of encoded
|
||||
// bytes to |kMockReturnEncodedBytes|.
|
||||
InSequence s;
|
||||
AudioEncoder::EncodedInfo info;
|
||||
for (int j = 0; j < blocks_per_frame - 1; ++j) {
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
||||
.WillOnce(SetArgPointee<4>(info));
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||
.WillOnce(Return(info));
|
||||
}
|
||||
info.encoded_bytes = kMockReturnEncodedBytes;
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
||||
.WillOnce(SetArgPointee<4>(info));
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||
.WillOnce(Return(info));
|
||||
}
|
||||
Encode();
|
||||
if (active_speech) {
|
||||
@ -254,7 +253,7 @@ TEST_F(AudioEncoderCngTest, EncodePassive) {
|
||||
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
|
||||
.WillRepeatedly(Return(Vad::kPassive));
|
||||
// Expect no calls at all to the speech encoder mock.
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _)).Times(0);
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).Times(0);
|
||||
uint32_t expected_timestamp = timestamp_;
|
||||
for (int i = 0; i < 100; ++i) {
|
||||
Encode();
|
||||
@ -284,20 +283,23 @@ TEST_F(AudioEncoderCngTest, MixedActivePassive) {
|
||||
CreateCng();
|
||||
|
||||
// All of the frame is active speech.
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
||||
.Times(6);
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||
.Times(6)
|
||||
.WillRepeatedly(Return(AudioEncoder::EncodedInfo()));
|
||||
EXPECT_TRUE(CheckMixedActivePassive(Vad::kActive, Vad::kActive));
|
||||
EXPECT_TRUE(encoded_info_.speech);
|
||||
|
||||
// First half of the frame is active speech.
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
||||
.Times(6);
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||
.Times(6)
|
||||
.WillRepeatedly(Return(AudioEncoder::EncodedInfo()));
|
||||
EXPECT_TRUE(CheckMixedActivePassive(Vad::kActive, Vad::kPassive));
|
||||
EXPECT_TRUE(encoded_info_.speech);
|
||||
|
||||
// Second half of the frame is active speech.
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
||||
.Times(6);
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||
.Times(6)
|
||||
.WillRepeatedly(Return(AudioEncoder::EncodedInfo()));
|
||||
EXPECT_TRUE(CheckMixedActivePassive(Vad::kPassive, Vad::kActive));
|
||||
EXPECT_TRUE(encoded_info_.speech);
|
||||
|
||||
@ -336,22 +338,10 @@ TEST_F(AudioEncoderCngTest, VadInputSize60Ms) {
|
||||
CheckVadInputSize(60, 30, 30);
|
||||
}
|
||||
|
||||
// Verifies that the EncodedInfo struct pointer passed to
|
||||
// AudioEncoderCng::Encode is propagated to the Encode call to the underlying
|
||||
// speech encoder.
|
||||
TEST_F(AudioEncoderCngTest, VerifyEncoderInfoPropagation) {
|
||||
CreateCng();
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, &encoded_info_));
|
||||
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(1));
|
||||
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
|
||||
.WillOnce(Return(Vad::kActive));
|
||||
Encode();
|
||||
}
|
||||
|
||||
// Verifies that the correct payload type is set when CNG is encoded.
|
||||
TEST_F(AudioEncoderCngTest, VerifyCngPayloadType) {
|
||||
CreateCng();
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _)).Times(0);
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).Times(0);
|
||||
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(1));
|
||||
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
|
||||
.WillOnce(Return(Vad::kPassive));
|
||||
@ -385,8 +375,7 @@ TEST_F(AudioEncoderCngTest, VerifySidFrameAfterSpeech) {
|
||||
.WillOnce(Return(Vad::kActive));
|
||||
AudioEncoder::EncodedInfo info;
|
||||
info.encoded_bytes = kMockReturnEncodedBytes;
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
||||
.WillOnce(SetArgPointee<4>(info));
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).WillOnce(Return(info));
|
||||
Encode();
|
||||
EXPECT_EQ(kMockReturnEncodedBytes, encoded_info_.encoded_bytes);
|
||||
|
||||
|
@ -56,11 +56,10 @@ class AudioEncoderCng final : public AudioEncoder {
|
||||
void SetProjectedPacketLossRate(double fraction) override;
|
||||
|
||||
protected:
|
||||
void EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) override;
|
||||
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) override;
|
||||
|
||||
private:
|
||||
// Deleter for use with scoped_ptr. E.g., use as
|
||||
@ -69,12 +68,8 @@ class AudioEncoderCng final : public AudioEncoder {
|
||||
inline void operator()(CNG_enc_inst* ptr) const { WebRtcCng_FreeEnc(ptr); }
|
||||
};
|
||||
|
||||
void EncodePassive(size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info);
|
||||
void EncodeActive(size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info);
|
||||
EncodedInfo EncodePassive(size_t max_encoded_bytes, uint8_t* encoded);
|
||||
EncodedInfo EncodeActive(size_t max_encoded_bytes, uint8_t* encoded);
|
||||
size_t SamplesPer10msFrame() const;
|
||||
|
||||
AudioEncoder* speech_encoder_;
|
||||
|
@ -66,11 +66,11 @@ int AudioEncoderPcm::Max10MsFramesInAPacket() const {
|
||||
return num_10ms_frames_per_packet_;
|
||||
}
|
||||
|
||||
void AudioEncoderPcm::EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) {
|
||||
AudioEncoder::EncodedInfo AudioEncoderPcm::EncodeInternal(
|
||||
uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) {
|
||||
const int num_samples = SampleRateHz() / 100 * NumChannels();
|
||||
if (speech_buffer_.empty()) {
|
||||
first_timestamp_in_buffer_ = rtp_timestamp;
|
||||
@ -79,17 +79,18 @@ void AudioEncoderPcm::EncodeInternal(uint32_t rtp_timestamp,
|
||||
speech_buffer_.push_back(audio[i]);
|
||||
}
|
||||
if (speech_buffer_.size() < full_frame_samples_) {
|
||||
info->encoded_bytes = 0;
|
||||
return;
|
||||
return EncodedInfo();
|
||||
}
|
||||
CHECK_EQ(speech_buffer_.size(), full_frame_samples_);
|
||||
CHECK_GE(max_encoded_bytes, full_frame_samples_);
|
||||
int16_t ret = EncodeCall(&speech_buffer_[0], full_frame_samples_, encoded);
|
||||
CHECK_GE(ret, 0);
|
||||
speech_buffer_.clear();
|
||||
info->encoded_timestamp = first_timestamp_in_buffer_;
|
||||
info->payload_type = payload_type_;
|
||||
info->encoded_bytes = static_cast<size_t>(ret);
|
||||
EncodedInfo info;
|
||||
info.encoded_timestamp = first_timestamp_in_buffer_;
|
||||
info.payload_type = payload_type_;
|
||||
info.encoded_bytes = static_cast<size_t>(ret);
|
||||
return info;
|
||||
}
|
||||
|
||||
int16_t AudioEncoderPcmA::EncodeCall(const int16_t* audio,
|
||||
|
@ -41,11 +41,10 @@ class AudioEncoderPcm : public AudioEncoder {
|
||||
protected:
|
||||
AudioEncoderPcm(const Config& config, int sample_rate_hz);
|
||||
|
||||
void EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) override;
|
||||
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) override;
|
||||
|
||||
virtual int16_t EncodeCall(const int16_t* audio,
|
||||
size_t input_len,
|
||||
|
@ -77,11 +77,11 @@ int AudioEncoderG722::Max10MsFramesInAPacket() const {
|
||||
return num_10ms_frames_per_packet_;
|
||||
}
|
||||
|
||||
void AudioEncoderG722::EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) {
|
||||
AudioEncoder::EncodedInfo AudioEncoderG722::EncodeInternal(
|
||||
uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) {
|
||||
CHECK_GE(max_encoded_bytes, MaxEncodedBytes());
|
||||
|
||||
if (num_10ms_frames_buffered_ == 0)
|
||||
@ -95,8 +95,7 @@ void AudioEncoderG722::EncodeInternal(uint32_t rtp_timestamp,
|
||||
|
||||
// If we don't yet have enough samples for a packet, we're done for now.
|
||||
if (++num_10ms_frames_buffered_ < num_10ms_frames_per_packet_) {
|
||||
info->encoded_bytes = 0;
|
||||
return;
|
||||
return EncodedInfo();
|
||||
}
|
||||
|
||||
// Encode each channel separately.
|
||||
@ -124,9 +123,11 @@ void AudioEncoderG722::EncodeInternal(uint32_t rtp_timestamp,
|
||||
encoded[i * num_channels_ + j] =
|
||||
interleave_buffer_[2 * j] << 4 | interleave_buffer_[2 * j + 1];
|
||||
}
|
||||
info->encoded_bytes = samples_per_channel / 2 * num_channels_;
|
||||
info->encoded_timestamp = first_timestamp_in_buffer_;
|
||||
info->payload_type = payload_type_;
|
||||
EncodedInfo info;
|
||||
info.encoded_bytes = samples_per_channel / 2 * num_channels_;
|
||||
info.encoded_timestamp = first_timestamp_in_buffer_;
|
||||
info.payload_type = payload_type_;
|
||||
return info;
|
||||
}
|
||||
|
||||
int AudioEncoderG722::SamplesPerChannel() const {
|
||||
|
@ -38,11 +38,10 @@ class AudioEncoderG722 : public AudioEncoder {
|
||||
int Max10MsFramesInAPacket() const override;
|
||||
|
||||
protected:
|
||||
void EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) override;
|
||||
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) override;
|
||||
|
||||
private:
|
||||
// The encoder state for one channel.
|
||||
|
@ -63,11 +63,11 @@ int AudioEncoderIlbc::Max10MsFramesInAPacket() const {
|
||||
return num_10ms_frames_per_packet_;
|
||||
}
|
||||
|
||||
void AudioEncoderIlbc::EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) {
|
||||
AudioEncoder::EncodedInfo AudioEncoderIlbc::EncodeInternal(
|
||||
uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) {
|
||||
DCHECK_GE(max_encoded_bytes, RequiredOutputSizeBytes());
|
||||
|
||||
// Save timestamp if starting a new packet.
|
||||
@ -82,8 +82,7 @@ void AudioEncoderIlbc::EncodeInternal(uint32_t rtp_timestamp,
|
||||
// If we don't yet have enough buffered input for a whole packet, we're done
|
||||
// for now.
|
||||
if (++num_10ms_frames_buffered_ < num_10ms_frames_per_packet_) {
|
||||
info->encoded_bytes = 0;
|
||||
return;
|
||||
return EncodedInfo();
|
||||
}
|
||||
|
||||
// Encode buffered input.
|
||||
@ -95,10 +94,12 @@ void AudioEncoderIlbc::EncodeInternal(uint32_t rtp_timestamp,
|
||||
kSampleRateHz / 100 * num_10ms_frames_per_packet_,
|
||||
encoded);
|
||||
CHECK_GE(output_len, 0);
|
||||
info->encoded_bytes = output_len;
|
||||
DCHECK_EQ(info->encoded_bytes, RequiredOutputSizeBytes());
|
||||
info->encoded_timestamp = first_timestamp_in_buffer_;
|
||||
info->payload_type = payload_type_;
|
||||
EncodedInfo info;
|
||||
info.encoded_bytes = output_len;
|
||||
DCHECK_EQ(info.encoded_bytes, RequiredOutputSizeBytes());
|
||||
info.encoded_timestamp = first_timestamp_in_buffer_;
|
||||
info.payload_type = payload_type_;
|
||||
return info;
|
||||
}
|
||||
|
||||
size_t AudioEncoderIlbc::RequiredOutputSizeBytes() const {
|
||||
|
@ -38,11 +38,10 @@ class AudioEncoderIlbc : public AudioEncoder {
|
||||
int Max10MsFramesInAPacket() const override;
|
||||
|
||||
protected:
|
||||
void EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) override;
|
||||
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) override;
|
||||
|
||||
private:
|
||||
size_t RequiredOutputSizeBytes() const;
|
||||
|
@ -85,11 +85,10 @@ class AudioEncoderDecoderIsacT : public AudioEncoder, public AudioDecoder {
|
||||
|
||||
protected:
|
||||
// AudioEncoder protected method.
|
||||
void EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) override;
|
||||
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) override;
|
||||
|
||||
// AudioDecoder protected method.
|
||||
int DecodeInternal(const uint8_t* encoded,
|
||||
|
@ -184,11 +184,11 @@ int AudioEncoderDecoderIsacT<T>::Max10MsFramesInAPacket() const {
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void AudioEncoderDecoderIsacT<T>::EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) {
|
||||
AudioEncoder::EncodedInfo AudioEncoderDecoderIsacT<T>::EncodeInternal(
|
||||
uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) {
|
||||
CriticalSectionScoped cs_lock(lock_.get());
|
||||
if (!packet_in_progress_) {
|
||||
// Starting a new packet; remember the timestamp for later.
|
||||
@ -206,15 +206,17 @@ void AudioEncoderDecoderIsacT<T>::EncodeInternal(uint32_t rtp_timestamp,
|
||||
// buffer. All we can do is check for an overrun after the fact.
|
||||
CHECK(static_cast<size_t>(r) <= max_encoded_bytes);
|
||||
|
||||
info->encoded_bytes = r;
|
||||
if (r == 0)
|
||||
return;
|
||||
return EncodedInfo();
|
||||
|
||||
// Got enough input to produce a packet. Return the saved timestamp from
|
||||
// the first chunk of input that went into the packet.
|
||||
packet_in_progress_ = false;
|
||||
info->encoded_timestamp = packet_timestamp_;
|
||||
info->payload_type = payload_type_;
|
||||
EncodedInfo info;
|
||||
info.encoded_bytes = r;
|
||||
info.encoded_timestamp = packet_timestamp_;
|
||||
info.payload_type = payload_type_;
|
||||
return info;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
|
@ -29,12 +29,11 @@ class MockAudioEncoder : public AudioEncoder {
|
||||
MOCK_METHOD1(SetTargetBitrate, void(int));
|
||||
MOCK_METHOD1(SetProjectedPacketLossRate, void(double));
|
||||
// Note, we explicitly chose not to create a mock for the Encode method.
|
||||
MOCK_METHOD5(EncodeInternal,
|
||||
void(uint32_t timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info));
|
||||
MOCK_METHOD4(EncodeInternal,
|
||||
EncodedInfo(uint32_t timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded));
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
@ -183,19 +183,18 @@ void AudioEncoderOpus::SetProjectedPacketLossRate(double fraction) {
|
||||
}
|
||||
}
|
||||
|
||||
void AudioEncoderOpus::EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) {
|
||||
AudioEncoder::EncodedInfo AudioEncoderOpus::EncodeInternal(
|
||||
uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) {
|
||||
if (input_buffer_.empty())
|
||||
first_timestamp_in_buffer_ = rtp_timestamp;
|
||||
input_buffer_.insert(input_buffer_.end(), audio,
|
||||
audio + samples_per_10ms_frame_);
|
||||
if (input_buffer_.size() < (static_cast<size_t>(num_10ms_frames_per_packet_) *
|
||||
samples_per_10ms_frame_)) {
|
||||
info->encoded_bytes = 0;
|
||||
return;
|
||||
return EncodedInfo();
|
||||
}
|
||||
CHECK_EQ(input_buffer_.size(),
|
||||
static_cast<size_t>(num_10ms_frames_per_packet_) *
|
||||
@ -207,12 +206,13 @@ void AudioEncoderOpus::EncodeInternal(uint32_t rtp_timestamp,
|
||||
ClampInt16(max_encoded_bytes), encoded);
|
||||
CHECK_GE(r, 0); // Fails only if fed invalid data.
|
||||
input_buffer_.clear();
|
||||
info->encoded_bytes = r;
|
||||
info->encoded_timestamp = first_timestamp_in_buffer_;
|
||||
info->payload_type = payload_type_;
|
||||
// Allows Opus to send empty packets.
|
||||
info->send_even_if_empty = true;
|
||||
info->speech = r > 0;
|
||||
EncodedInfo info;
|
||||
info.encoded_bytes = r;
|
||||
info.encoded_timestamp = first_timestamp_in_buffer_;
|
||||
info.payload_type = payload_type_;
|
||||
info.send_even_if_empty = true; // Allows Opus to send empty packets.
|
||||
info.speech = r > 0;
|
||||
return info;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
@ -58,11 +58,10 @@ class AudioEncoderOpus final : public AudioEncoder {
|
||||
bool dtx_enabled() const { return dtx_enabled_; }
|
||||
|
||||
protected:
|
||||
void EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) override;
|
||||
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) override;
|
||||
|
||||
private:
|
||||
const int num_10ms_frames_per_packet_;
|
||||
|
@ -60,48 +60,48 @@ void AudioEncoderCopyRed::SetProjectedPacketLossRate(double fraction) {
|
||||
speech_encoder_->SetProjectedPacketLossRate(fraction);
|
||||
}
|
||||
|
||||
void AudioEncoderCopyRed::EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) {
|
||||
speech_encoder_->Encode(rtp_timestamp, audio,
|
||||
static_cast<size_t>(SampleRateHz() / 100),
|
||||
max_encoded_bytes, encoded, info);
|
||||
AudioEncoder::EncodedInfo AudioEncoderCopyRed::EncodeInternal(
|
||||
uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) {
|
||||
EncodedInfo info = speech_encoder_->Encode(
|
||||
rtp_timestamp, audio, static_cast<size_t>(SampleRateHz() / 100),
|
||||
max_encoded_bytes, encoded);
|
||||
CHECK_GE(max_encoded_bytes,
|
||||
info->encoded_bytes + secondary_info_.encoded_bytes);
|
||||
CHECK(info->redundant.empty()) << "Cannot use nested redundant encoders.";
|
||||
info.encoded_bytes + secondary_info_.encoded_bytes);
|
||||
CHECK(info.redundant.empty()) << "Cannot use nested redundant encoders.";
|
||||
|
||||
if (info->encoded_bytes > 0) {
|
||||
if (info.encoded_bytes > 0) {
|
||||
// |info| will be implicitly cast to an EncodedInfoLeaf struct, effectively
|
||||
// discarding the (empty) vector of redundant information. This is
|
||||
// intentional.
|
||||
info->redundant.push_back(*info);
|
||||
DCHECK_EQ(info->redundant.size(), 1u);
|
||||
info.redundant.push_back(info);
|
||||
DCHECK_EQ(info.redundant.size(), 1u);
|
||||
if (secondary_info_.encoded_bytes > 0) {
|
||||
memcpy(&encoded[info->encoded_bytes], secondary_encoded_.get(),
|
||||
memcpy(&encoded[info.encoded_bytes], secondary_encoded_.get(),
|
||||
secondary_info_.encoded_bytes);
|
||||
info->redundant.push_back(secondary_info_);
|
||||
DCHECK_EQ(info->redundant.size(), 2u);
|
||||
info.redundant.push_back(secondary_info_);
|
||||
DCHECK_EQ(info.redundant.size(), 2u);
|
||||
}
|
||||
// Save primary to secondary.
|
||||
if (secondary_allocated_ < info->encoded_bytes) {
|
||||
secondary_encoded_.reset(new uint8_t[info->encoded_bytes]);
|
||||
secondary_allocated_ = info->encoded_bytes;
|
||||
if (secondary_allocated_ < info.encoded_bytes) {
|
||||
secondary_encoded_.reset(new uint8_t[info.encoded_bytes]);
|
||||
secondary_allocated_ = info.encoded_bytes;
|
||||
}
|
||||
CHECK(secondary_encoded_);
|
||||
memcpy(secondary_encoded_.get(), encoded, info->encoded_bytes);
|
||||
secondary_info_ = *info;
|
||||
DCHECK_EQ(info->speech, info->redundant[0].speech);
|
||||
memcpy(secondary_encoded_.get(), encoded, info.encoded_bytes);
|
||||
secondary_info_ = info;
|
||||
DCHECK_EQ(info.speech, info.redundant[0].speech);
|
||||
}
|
||||
// Update main EncodedInfo.
|
||||
info->payload_type = red_payload_type_;
|
||||
info->encoded_bytes = 0;
|
||||
for (std::vector<EncodedInfoLeaf>::const_iterator it =
|
||||
info->redundant.begin();
|
||||
it != info->redundant.end(); ++it) {
|
||||
info->encoded_bytes += it->encoded_bytes;
|
||||
info.payload_type = red_payload_type_;
|
||||
info.encoded_bytes = 0;
|
||||
for (std::vector<EncodedInfoLeaf>::const_iterator it = info.redundant.begin();
|
||||
it != info.redundant.end(); ++it) {
|
||||
info.encoded_bytes += it->encoded_bytes;
|
||||
}
|
||||
return info;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
@ -45,11 +45,10 @@ class AudioEncoderCopyRed : public AudioEncoder {
|
||||
void SetProjectedPacketLossRate(double fraction) override;
|
||||
|
||||
protected:
|
||||
void EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) override;
|
||||
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) override;
|
||||
|
||||
private:
|
||||
AudioEncoder* speech_encoder_;
|
||||
|
@ -60,9 +60,8 @@ class AudioEncoderCopyRedTest : public ::testing::Test {
|
||||
|
||||
void Encode() {
|
||||
ASSERT_TRUE(red_.get() != NULL);
|
||||
encoded_info_ = AudioEncoder::EncodedInfo();
|
||||
red_->Encode(timestamp_, audio_, num_audio_samples_10ms,
|
||||
encoded_.size(), &encoded_[0], &encoded_info_);
|
||||
encoded_info_ = red_->Encode(timestamp_, audio_, num_audio_samples_10ms,
|
||||
encoded_.size(), &encoded_[0]);
|
||||
timestamp_ += num_audio_samples_10ms;
|
||||
}
|
||||
|
||||
@ -83,18 +82,16 @@ class MockEncodeHelper {
|
||||
memset(&info_, 0, sizeof(info_));
|
||||
}
|
||||
|
||||
void Encode(uint32_t timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
AudioEncoder::EncodedInfo* info) {
|
||||
AudioEncoder::EncodedInfo Encode(uint32_t timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) {
|
||||
if (write_payload_) {
|
||||
CHECK(encoded);
|
||||
CHECK_LE(info_.encoded_bytes, max_encoded_bytes);
|
||||
memcpy(encoded, payload_, info_.encoded_bytes);
|
||||
}
|
||||
CHECK(info);
|
||||
*info = info_;
|
||||
return info_;
|
||||
}
|
||||
|
||||
AudioEncoder::EncodedInfo info_;
|
||||
@ -144,7 +141,8 @@ TEST_F(AudioEncoderCopyRedTest, CheckImmediateEncode) {
|
||||
InSequence s;
|
||||
MockFunction<void(int check_point_id)> check;
|
||||
for (int i = 1; i <= 6; ++i) {
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _));
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||
.WillRepeatedly(Return(AudioEncoder::EncodedInfo()));
|
||||
EXPECT_CALL(check, Call(i));
|
||||
Encode();
|
||||
check.Call(i);
|
||||
@ -153,13 +151,13 @@ TEST_F(AudioEncoderCopyRedTest, CheckImmediateEncode) {
|
||||
|
||||
// Checks that no output is produced if the underlying codec doesn't emit any
|
||||
// new data, even if the RED codec is loaded with a secondary encoding.
|
||||
TEST_F(AudioEncoderCopyRedTest, CheckNoOuput) {
|
||||
TEST_F(AudioEncoderCopyRedTest, CheckNoOutput) {
|
||||
// Start with one Encode() call that will produce output.
|
||||
static const size_t kEncodedSize = 17;
|
||||
AudioEncoder::EncodedInfo info;
|
||||
info.encoded_bytes = kEncodedSize;
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
||||
.WillOnce(SetArgPointee<4>(info));
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||
.WillOnce(Return(info));
|
||||
Encode();
|
||||
// First call is a special case, since it does not include a secondary
|
||||
// payload.
|
||||
@ -168,15 +166,15 @@ TEST_F(AudioEncoderCopyRedTest, CheckNoOuput) {
|
||||
|
||||
// Next call to the speech encoder will not produce any output.
|
||||
info.encoded_bytes = 0;
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
||||
.WillOnce(SetArgPointee<4>(info));
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||
.WillOnce(Return(info));
|
||||
Encode();
|
||||
EXPECT_EQ(0u, encoded_info_.encoded_bytes);
|
||||
|
||||
// Final call to the speech encoder will produce output.
|
||||
info.encoded_bytes = kEncodedSize;
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
||||
.WillOnce(SetArgPointee<4>(info));
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||
.WillOnce(Return(info));
|
||||
Encode();
|
||||
EXPECT_EQ(2 * kEncodedSize, encoded_info_.encoded_bytes);
|
||||
ASSERT_EQ(2u, encoded_info_.redundant.size());
|
||||
@ -192,8 +190,8 @@ TEST_F(AudioEncoderCopyRedTest, CheckPayloadSizes) {
|
||||
for (int encode_size = 1; encode_size <= kNumPackets; ++encode_size) {
|
||||
AudioEncoder::EncodedInfo info;
|
||||
info.encoded_bytes = encode_size;
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
||||
.WillOnce(SetArgPointee<4>(info));
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||
.WillOnce(Return(info));
|
||||
}
|
||||
|
||||
// First call is a special case, since it does not include a secondary
|
||||
@ -218,7 +216,7 @@ TEST_F(AudioEncoderCopyRedTest, CheckTimestamps) {
|
||||
helper.info_.encoded_bytes = 17;
|
||||
helper.info_.encoded_timestamp = timestamp_;
|
||||
uint32_t primary_timestamp = timestamp_;
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||
.WillRepeatedly(Invoke(&helper, &MockEncodeHelper::Encode));
|
||||
|
||||
// First call is a special case, since it does not include a secondary
|
||||
@ -249,7 +247,7 @@ TEST_F(AudioEncoderCopyRedTest, CheckPayloads) {
|
||||
payload[i] = i;
|
||||
}
|
||||
helper.payload_ = payload;
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||
.WillRepeatedly(Invoke(&helper, &MockEncodeHelper::Encode));
|
||||
|
||||
// First call is a special case, since it does not include a secondary
|
||||
@ -286,7 +284,7 @@ TEST_F(AudioEncoderCopyRedTest, CheckPayloadType) {
|
||||
helper.info_.encoded_bytes = 17;
|
||||
const int primary_payload_type = red_payload_type_ + 1;
|
||||
helper.info_.payload_type = primary_payload_type;
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||
.WillRepeatedly(Invoke(&helper, &MockEncodeHelper::Encode));
|
||||
|
||||
// First call is a special case, since it does not include a secondary
|
||||
|
@ -46,8 +46,8 @@ class AcmGenericCodecTest : public ::testing::Test {
|
||||
int expected_send_even_if_empty) {
|
||||
uint8_t out[kPacketSizeSamples];
|
||||
AudioEncoder::EncodedInfo encoded_info;
|
||||
codec_->GetAudioEncoder()->Encode(timestamp_, kZeroData, kDataLengthSamples,
|
||||
kPacketSizeSamples, out, &encoded_info);
|
||||
encoded_info = codec_->GetAudioEncoder()->Encode(
|
||||
timestamp_, kZeroData, kDataLengthSamples, kPacketSizeSamples, out);
|
||||
timestamp_ += kDataLengthSamples;
|
||||
EXPECT_TRUE(encoded_info.redundant.empty());
|
||||
EXPECT_EQ(expected_out_length, encoded_info.encoded_bytes);
|
||||
|
@ -246,9 +246,9 @@ int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
|
||||
last_rtp_timestamp_ = rtp_timestamp;
|
||||
first_frame_ = false;
|
||||
|
||||
audio_encoder->Encode(rtp_timestamp, input_data.audio,
|
||||
input_data.length_per_channel, sizeof(stream), stream,
|
||||
&encoded_info);
|
||||
encoded_info = audio_encoder->Encode(rtp_timestamp, input_data.audio,
|
||||
input_data.length_per_channel,
|
||||
sizeof(stream), stream);
|
||||
if (encoded_info.encoded_bytes == 0 && !encoded_info.send_even_if_empty) {
|
||||
// Not enough data.
|
||||
return 0;
|
||||
|
@ -150,9 +150,9 @@ class AudioDecoderTest : public ::testing::Test {
|
||||
samples_per_10ms, channels_,
|
||||
interleaved_input.get());
|
||||
|
||||
audio_encoder_->Encode(0, interleaved_input.get(),
|
||||
audio_encoder_->SampleRateHz() / 100,
|
||||
data_length_ * 2, output, &encoded_info_);
|
||||
encoded_info_ = audio_encoder_->Encode(
|
||||
0, interleaved_input.get(), audio_encoder_->SampleRateHz() / 100,
|
||||
data_length_ * 2, output);
|
||||
}
|
||||
EXPECT_EQ(payload_type_, encoded_info_.payload_type);
|
||||
return static_cast<int>(encoded_info_.encoded_bytes);
|
||||
|
Loading…
x
Reference in New Issue
Block a user