Revert 8749 "We changed Encode() and EncodeInternal() return typ..."
The reason is that this cl adds a static initializer so we can't roll webrtc into Chromium. See audio_encoder.cc and 'sizes' regression here: http://build.chromium.org/p/chromium/builders/Linux%20x64/builds/186 > We changed Encode() and EncodeInternal() return type from bool to void in this issue: > https://webrtc-codereview.appspot.com/38279004/ > Now we don't have to pass EncodedInfo as output parameter, but can return it instead. This also adds the benefit of making clear that EncodeInternal() needs to fill in this info. > > R=kwiberg@webrtc.org > > Review URL: https://webrtc-codereview.appspot.com/43839004 TBR=jmarusic@webrtc.org Review URL: https://webrtc-codereview.appspot.com/49449004 Cr-Commit-Position: refs/heads/master@{#8772} git-svn-id: http://webrtc.googlecode.com/svn/trunk@8772 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
parent
3fffd66dfa
commit
019955d770
@ -19,19 +19,16 @@ AudioEncoder::EncodedInfo::EncodedInfo() : EncodedInfoLeaf() {
|
||||
AudioEncoder::EncodedInfo::~EncodedInfo() {
|
||||
}
|
||||
|
||||
const AudioEncoder::EncodedInfo AudioEncoder::kZeroEncodedBytes;
|
||||
|
||||
AudioEncoder::EncodedInfo AudioEncoder::Encode(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t num_samples_per_channel,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) {
|
||||
void AudioEncoder::Encode(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t num_samples_per_channel,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) {
|
||||
CHECK_EQ(num_samples_per_channel,
|
||||
static_cast<size_t>(SampleRateHz() / 100));
|
||||
EncodedInfo info =
|
||||
EncodeInternal(rtp_timestamp, audio, max_encoded_bytes, encoded);
|
||||
CHECK_LE(info.encoded_bytes, max_encoded_bytes);
|
||||
return info;
|
||||
EncodeInternal(rtp_timestamp, audio, max_encoded_bytes, encoded, info);
|
||||
CHECK_LE(info->encoded_bytes, max_encoded_bytes);
|
||||
}
|
||||
|
||||
int AudioEncoder::RtpTimestampRateHz() const {
|
||||
|
@ -54,21 +54,20 @@ class AudioEncoder {
|
||||
std::vector<EncodedInfoLeaf> redundant;
|
||||
};
|
||||
|
||||
static const EncodedInfo kZeroEncodedBytes;
|
||||
|
||||
virtual ~AudioEncoder() {}
|
||||
|
||||
// Accepts one 10 ms block of input audio (i.e., sample_rate_hz() / 100 *
|
||||
// num_channels() samples). Multi-channel audio must be sample-interleaved.
|
||||
// The encoder produces zero or more bytes of output in |encoded| and
|
||||
// returns additional encoding information.
|
||||
// The encoder produces zero or more bytes of output in |encoded|,
|
||||
// and provides additional encoding information in |info|.
|
||||
// The caller is responsible for making sure that |max_encoded_bytes| is
|
||||
// not smaller than the number of bytes actually produced by the encoder.
|
||||
EncodedInfo Encode(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t num_samples_per_channel,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded);
|
||||
void Encode(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t num_samples_per_channel,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info);
|
||||
|
||||
// Return the input sample rate in Hz and the number of input channels.
|
||||
// These are constants set at instantiation time.
|
||||
@ -108,10 +107,11 @@ class AudioEncoder {
|
||||
virtual void SetProjectedPacketLossRate(double fraction) {}
|
||||
|
||||
protected:
|
||||
virtual EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) = 0;
|
||||
virtual void EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) = 0;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
@ -109,12 +109,13 @@ void AudioEncoderCng::SetProjectedPacketLossRate(double fraction) {
|
||||
speech_encoder_->SetProjectedPacketLossRate(fraction);
|
||||
}
|
||||
|
||||
AudioEncoder::EncodedInfo AudioEncoderCng::EncodeInternal(
|
||||
uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) {
|
||||
void AudioEncoderCng::EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) {
|
||||
CHECK_GE(max_encoded_bytes, static_cast<size_t>(num_cng_coefficients_ + 1));
|
||||
info->encoded_bytes = 0;
|
||||
const int num_samples = SampleRateHz() / 100 * NumChannels();
|
||||
if (speech_buffer_.empty()) {
|
||||
CHECK_EQ(frames_in_buffer_, 0);
|
||||
@ -125,7 +126,7 @@ AudioEncoder::EncodedInfo AudioEncoderCng::EncodeInternal(
|
||||
}
|
||||
++frames_in_buffer_;
|
||||
if (frames_in_buffer_ < speech_encoder_->Num10MsFramesInNextPacket()) {
|
||||
return kZeroEncodedBytes;
|
||||
return;
|
||||
}
|
||||
CHECK_LE(frames_in_buffer_ * 10, kMaxFrameSizeMs)
|
||||
<< "Frame size cannot be larger than " << kMaxFrameSizeMs
|
||||
@ -158,15 +159,14 @@ AudioEncoder::EncodedInfo AudioEncoderCng::EncodeInternal(
|
||||
samples_per_10ms_frame * blocks_in_second_vad_call, SampleRateHz());
|
||||
}
|
||||
|
||||
EncodedInfo info;
|
||||
switch (activity) {
|
||||
case Vad::kPassive: {
|
||||
info = EncodePassive(max_encoded_bytes, encoded);
|
||||
EncodePassive(max_encoded_bytes, encoded, info);
|
||||
last_frame_active_ = false;
|
||||
break;
|
||||
}
|
||||
case Vad::kActive: {
|
||||
info = EncodeActive(max_encoded_bytes, encoded);
|
||||
EncodeActive(max_encoded_bytes, encoded, info);
|
||||
last_frame_active_ = true;
|
||||
break;
|
||||
}
|
||||
@ -178,17 +178,15 @@ AudioEncoder::EncodedInfo AudioEncoderCng::EncodeInternal(
|
||||
|
||||
speech_buffer_.clear();
|
||||
frames_in_buffer_ = 0;
|
||||
return info;
|
||||
}
|
||||
|
||||
AudioEncoder::EncodedInfo AudioEncoderCng::EncodePassive(
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) {
|
||||
void AudioEncoderCng::EncodePassive(size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) {
|
||||
bool force_sid = last_frame_active_;
|
||||
bool output_produced = false;
|
||||
const size_t samples_per_10ms_frame = SamplesPer10msFrame();
|
||||
CHECK_GE(max_encoded_bytes, frames_in_buffer_ * samples_per_10ms_frame);
|
||||
AudioEncoder::EncodedInfo info;
|
||||
for (int i = 0; i < frames_in_buffer_; ++i) {
|
||||
int16_t encoded_bytes_tmp = 0;
|
||||
CHECK_GE(WebRtcCng_Encode(cng_inst_.get(),
|
||||
@ -197,32 +195,30 @@ AudioEncoder::EncodedInfo AudioEncoderCng::EncodePassive(
|
||||
encoded, &encoded_bytes_tmp, force_sid), 0);
|
||||
if (encoded_bytes_tmp > 0) {
|
||||
CHECK(!output_produced);
|
||||
info.encoded_bytes = static_cast<size_t>(encoded_bytes_tmp);
|
||||
info->encoded_bytes = static_cast<size_t>(encoded_bytes_tmp);
|
||||
output_produced = true;
|
||||
force_sid = false;
|
||||
}
|
||||
}
|
||||
info.encoded_timestamp = first_timestamp_in_buffer_;
|
||||
info.payload_type = cng_payload_type_;
|
||||
info.send_even_if_empty = true;
|
||||
info.speech = false;
|
||||
return info;
|
||||
info->encoded_timestamp = first_timestamp_in_buffer_;
|
||||
info->payload_type = cng_payload_type_;
|
||||
info->send_even_if_empty = true;
|
||||
info->speech = false;
|
||||
}
|
||||
|
||||
AudioEncoder::EncodedInfo AudioEncoderCng::EncodeActive(
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) {
|
||||
void AudioEncoderCng::EncodeActive(size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) {
|
||||
const size_t samples_per_10ms_frame = SamplesPer10msFrame();
|
||||
AudioEncoder::EncodedInfo info;
|
||||
for (int i = 0; i < frames_in_buffer_; ++i) {
|
||||
info = speech_encoder_->Encode(
|
||||
first_timestamp_in_buffer_, &speech_buffer_[i * samples_per_10ms_frame],
|
||||
samples_per_10ms_frame, max_encoded_bytes, encoded);
|
||||
speech_encoder_->Encode(first_timestamp_in_buffer_,
|
||||
&speech_buffer_[i * samples_per_10ms_frame],
|
||||
samples_per_10ms_frame, max_encoded_bytes,
|
||||
encoded, info);
|
||||
if (i < frames_in_buffer_ - 1) {
|
||||
CHECK_EQ(info.encoded_bytes, 0u) << "Encoder delivered data too early.";
|
||||
CHECK_EQ(info->encoded_bytes, 0u) << "Encoder delivered data too early.";
|
||||
}
|
||||
}
|
||||
return info;
|
||||
}
|
||||
|
||||
size_t AudioEncoderCng::SamplesPer10msFrame() const {
|
||||
|
@ -75,8 +75,9 @@ class AudioEncoderCngTest : public ::testing::Test {
|
||||
|
||||
void Encode() {
|
||||
ASSERT_TRUE(cng_) << "Must call CreateCng() first.";
|
||||
encoded_info_ = cng_->Encode(timestamp_, audio_, num_audio_samples_10ms_,
|
||||
encoded_.size(), &encoded_[0]);
|
||||
encoded_info_ = AudioEncoder::EncodedInfo();
|
||||
cng_->Encode(timestamp_, audio_, num_audio_samples_10ms_,
|
||||
encoded_.size(), &encoded_[0], &encoded_info_);
|
||||
timestamp_ += num_audio_samples_10ms_;
|
||||
}
|
||||
|
||||
@ -91,24 +92,24 @@ class AudioEncoderCngTest : public ::testing::Test {
|
||||
.WillRepeatedly(Return(active_speech ? Vad::kActive : Vad::kPassive));
|
||||
|
||||
// Don't expect any calls to the encoder yet.
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).Times(0);
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _)).Times(0);
|
||||
for (int i = 0; i < blocks_per_frame - 1; ++i) {
|
||||
Encode();
|
||||
EXPECT_EQ(0u, encoded_info_.encoded_bytes);
|
||||
}
|
||||
AudioEncoder::EncodedInfo info;
|
||||
if (active_speech) {
|
||||
// Now expect |blocks_per_frame| calls to the encoder in sequence.
|
||||
// Let the speech codec mock return true and set the number of encoded
|
||||
// bytes to |kMockReturnEncodedBytes|.
|
||||
InSequence s;
|
||||
for (int j = 0; j < blocks_per_frame - 1; ++j) {
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||
.WillOnce(Return(AudioEncoder::kZeroEncodedBytes));
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
||||
.WillOnce(SetArgPointee<4>(info));
|
||||
}
|
||||
AudioEncoder::EncodedInfo info;
|
||||
info.encoded_bytes = kMockReturnEncodedBytes;
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||
.WillOnce(Return(info));
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
||||
.WillOnce(SetArgPointee<4>(info));
|
||||
}
|
||||
Encode();
|
||||
if (active_speech) {
|
||||
@ -253,7 +254,7 @@ TEST_F(AudioEncoderCngTest, EncodePassive) {
|
||||
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
|
||||
.WillRepeatedly(Return(Vad::kPassive));
|
||||
// Expect no calls at all to the speech encoder mock.
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).Times(0);
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _)).Times(0);
|
||||
uint32_t expected_timestamp = timestamp_;
|
||||
for (int i = 0; i < 100; ++i) {
|
||||
Encode();
|
||||
@ -283,23 +284,20 @@ TEST_F(AudioEncoderCngTest, MixedActivePassive) {
|
||||
CreateCng();
|
||||
|
||||
// All of the frame is active speech.
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||
.Times(6)
|
||||
.WillRepeatedly(Return(AudioEncoder::kZeroEncodedBytes));
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
||||
.Times(6);
|
||||
EXPECT_TRUE(CheckMixedActivePassive(Vad::kActive, Vad::kActive));
|
||||
EXPECT_TRUE(encoded_info_.speech);
|
||||
|
||||
// First half of the frame is active speech.
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||
.Times(6)
|
||||
.WillRepeatedly(Return(AudioEncoder::kZeroEncodedBytes));
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
||||
.Times(6);
|
||||
EXPECT_TRUE(CheckMixedActivePassive(Vad::kActive, Vad::kPassive));
|
||||
EXPECT_TRUE(encoded_info_.speech);
|
||||
|
||||
// Second half of the frame is active speech.
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||
.Times(6)
|
||||
.WillRepeatedly(Return(AudioEncoder::kZeroEncodedBytes));
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
||||
.Times(6);
|
||||
EXPECT_TRUE(CheckMixedActivePassive(Vad::kPassive, Vad::kActive));
|
||||
EXPECT_TRUE(encoded_info_.speech);
|
||||
|
||||
@ -338,10 +336,22 @@ TEST_F(AudioEncoderCngTest, VadInputSize60Ms) {
|
||||
CheckVadInputSize(60, 30, 30);
|
||||
}
|
||||
|
||||
// Verifies that the EncodedInfo struct pointer passed to
|
||||
// AudioEncoderCng::Encode is propagated to the Encode call to the underlying
|
||||
// speech encoder.
|
||||
TEST_F(AudioEncoderCngTest, VerifyEncoderInfoPropagation) {
|
||||
CreateCng();
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, &encoded_info_));
|
||||
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(1));
|
||||
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
|
||||
.WillOnce(Return(Vad::kActive));
|
||||
Encode();
|
||||
}
|
||||
|
||||
// Verifies that the correct payload type is set when CNG is encoded.
|
||||
TEST_F(AudioEncoderCngTest, VerifyCngPayloadType) {
|
||||
CreateCng();
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).Times(0);
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _)).Times(0);
|
||||
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(1));
|
||||
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
|
||||
.WillOnce(Return(Vad::kPassive));
|
||||
@ -375,7 +385,8 @@ TEST_F(AudioEncoderCngTest, VerifySidFrameAfterSpeech) {
|
||||
.WillOnce(Return(Vad::kActive));
|
||||
AudioEncoder::EncodedInfo info;
|
||||
info.encoded_bytes = kMockReturnEncodedBytes;
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).WillOnce(Return(info));
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
||||
.WillOnce(SetArgPointee<4>(info));
|
||||
Encode();
|
||||
EXPECT_EQ(kMockReturnEncodedBytes, encoded_info_.encoded_bytes);
|
||||
|
||||
|
@ -56,10 +56,11 @@ class AudioEncoderCng final : public AudioEncoder {
|
||||
void SetProjectedPacketLossRate(double fraction) override;
|
||||
|
||||
protected:
|
||||
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) override;
|
||||
void EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) override;
|
||||
|
||||
private:
|
||||
// Deleter for use with scoped_ptr. E.g., use as
|
||||
@ -68,8 +69,12 @@ class AudioEncoderCng final : public AudioEncoder {
|
||||
inline void operator()(CNG_enc_inst* ptr) const { WebRtcCng_FreeEnc(ptr); }
|
||||
};
|
||||
|
||||
EncodedInfo EncodePassive(size_t max_encoded_bytes, uint8_t* encoded);
|
||||
EncodedInfo EncodeActive(size_t max_encoded_bytes, uint8_t* encoded);
|
||||
void EncodePassive(size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info);
|
||||
void EncodeActive(size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info);
|
||||
size_t SamplesPer10msFrame() const;
|
||||
|
||||
AudioEncoder* speech_encoder_;
|
||||
|
@ -66,11 +66,11 @@ int AudioEncoderPcm::Max10MsFramesInAPacket() const {
|
||||
return num_10ms_frames_per_packet_;
|
||||
}
|
||||
|
||||
AudioEncoder::EncodedInfo AudioEncoderPcm::EncodeInternal(
|
||||
uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) {
|
||||
void AudioEncoderPcm::EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) {
|
||||
const int num_samples = SampleRateHz() / 100 * NumChannels();
|
||||
if (speech_buffer_.empty()) {
|
||||
first_timestamp_in_buffer_ = rtp_timestamp;
|
||||
@ -79,18 +79,17 @@ AudioEncoder::EncodedInfo AudioEncoderPcm::EncodeInternal(
|
||||
speech_buffer_.push_back(audio[i]);
|
||||
}
|
||||
if (speech_buffer_.size() < full_frame_samples_) {
|
||||
return kZeroEncodedBytes;
|
||||
info->encoded_bytes = 0;
|
||||
return;
|
||||
}
|
||||
CHECK_EQ(speech_buffer_.size(), full_frame_samples_);
|
||||
CHECK_GE(max_encoded_bytes, full_frame_samples_);
|
||||
int16_t ret = EncodeCall(&speech_buffer_[0], full_frame_samples_, encoded);
|
||||
CHECK_GE(ret, 0);
|
||||
speech_buffer_.clear();
|
||||
EncodedInfo info;
|
||||
info.encoded_timestamp = first_timestamp_in_buffer_;
|
||||
info.payload_type = payload_type_;
|
||||
info.encoded_bytes = static_cast<size_t>(ret);
|
||||
return info;
|
||||
info->encoded_timestamp = first_timestamp_in_buffer_;
|
||||
info->payload_type = payload_type_;
|
||||
info->encoded_bytes = static_cast<size_t>(ret);
|
||||
}
|
||||
|
||||
int16_t AudioEncoderPcmA::EncodeCall(const int16_t* audio,
|
||||
|
@ -41,10 +41,11 @@ class AudioEncoderPcm : public AudioEncoder {
|
||||
protected:
|
||||
AudioEncoderPcm(const Config& config, int sample_rate_hz);
|
||||
|
||||
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) override;
|
||||
void EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) override;
|
||||
|
||||
virtual int16_t EncodeCall(const int16_t* audio,
|
||||
size_t input_len,
|
||||
|
@ -77,11 +77,11 @@ int AudioEncoderG722::Max10MsFramesInAPacket() const {
|
||||
return num_10ms_frames_per_packet_;
|
||||
}
|
||||
|
||||
AudioEncoder::EncodedInfo AudioEncoderG722::EncodeInternal(
|
||||
uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) {
|
||||
void AudioEncoderG722::EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) {
|
||||
CHECK_GE(max_encoded_bytes, MaxEncodedBytes());
|
||||
|
||||
if (num_10ms_frames_buffered_ == 0)
|
||||
@ -95,7 +95,8 @@ AudioEncoder::EncodedInfo AudioEncoderG722::EncodeInternal(
|
||||
|
||||
// If we don't yet have enough samples for a packet, we're done for now.
|
||||
if (++num_10ms_frames_buffered_ < num_10ms_frames_per_packet_) {
|
||||
return kZeroEncodedBytes;
|
||||
info->encoded_bytes = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
// Encode each channel separately.
|
||||
@ -123,11 +124,9 @@ AudioEncoder::EncodedInfo AudioEncoderG722::EncodeInternal(
|
||||
encoded[i * num_channels_ + j] =
|
||||
interleave_buffer_[2 * j] << 4 | interleave_buffer_[2 * j + 1];
|
||||
}
|
||||
EncodedInfo info;
|
||||
info.encoded_bytes = samples_per_channel / 2 * num_channels_;
|
||||
info.encoded_timestamp = first_timestamp_in_buffer_;
|
||||
info.payload_type = payload_type_;
|
||||
return info;
|
||||
info->encoded_bytes = samples_per_channel / 2 * num_channels_;
|
||||
info->encoded_timestamp = first_timestamp_in_buffer_;
|
||||
info->payload_type = payload_type_;
|
||||
}
|
||||
|
||||
int AudioEncoderG722::SamplesPerChannel() const {
|
||||
|
@ -38,10 +38,11 @@ class AudioEncoderG722 : public AudioEncoder {
|
||||
int Max10MsFramesInAPacket() const override;
|
||||
|
||||
protected:
|
||||
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) override;
|
||||
void EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) override;
|
||||
|
||||
private:
|
||||
// The encoder state for one channel.
|
||||
|
@ -63,11 +63,11 @@ int AudioEncoderIlbc::Max10MsFramesInAPacket() const {
|
||||
return num_10ms_frames_per_packet_;
|
||||
}
|
||||
|
||||
AudioEncoder::EncodedInfo AudioEncoderIlbc::EncodeInternal(
|
||||
uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) {
|
||||
void AudioEncoderIlbc::EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) {
|
||||
DCHECK_GE(max_encoded_bytes, RequiredOutputSizeBytes());
|
||||
|
||||
// Save timestamp if starting a new packet.
|
||||
@ -82,7 +82,8 @@ AudioEncoder::EncodedInfo AudioEncoderIlbc::EncodeInternal(
|
||||
// If we don't yet have enough buffered input for a whole packet, we're done
|
||||
// for now.
|
||||
if (++num_10ms_frames_buffered_ < num_10ms_frames_per_packet_) {
|
||||
return kZeroEncodedBytes;
|
||||
info->encoded_bytes = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
// Encode buffered input.
|
||||
@ -94,12 +95,10 @@ AudioEncoder::EncodedInfo AudioEncoderIlbc::EncodeInternal(
|
||||
kSampleRateHz / 100 * num_10ms_frames_per_packet_,
|
||||
encoded);
|
||||
CHECK_GE(output_len, 0);
|
||||
EncodedInfo info;
|
||||
info.encoded_bytes = output_len;
|
||||
DCHECK_EQ(info.encoded_bytes, RequiredOutputSizeBytes());
|
||||
info.encoded_timestamp = first_timestamp_in_buffer_;
|
||||
info.payload_type = payload_type_;
|
||||
return info;
|
||||
info->encoded_bytes = output_len;
|
||||
DCHECK_EQ(info->encoded_bytes, RequiredOutputSizeBytes());
|
||||
info->encoded_timestamp = first_timestamp_in_buffer_;
|
||||
info->payload_type = payload_type_;
|
||||
}
|
||||
|
||||
size_t AudioEncoderIlbc::RequiredOutputSizeBytes() const {
|
||||
|
@ -38,10 +38,11 @@ class AudioEncoderIlbc : public AudioEncoder {
|
||||
int Max10MsFramesInAPacket() const override;
|
||||
|
||||
protected:
|
||||
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) override;
|
||||
void EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) override;
|
||||
|
||||
private:
|
||||
size_t RequiredOutputSizeBytes() const;
|
||||
|
@ -84,10 +84,11 @@ class AudioEncoderDecoderIsacT : public AudioEncoder, public AudioDecoder {
|
||||
|
||||
protected:
|
||||
// AudioEncoder protected method.
|
||||
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) override;
|
||||
void EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) override;
|
||||
|
||||
// AudioDecoder protected method.
|
||||
int DecodeInternal(const uint8_t* encoded,
|
||||
|
@ -184,11 +184,11 @@ int AudioEncoderDecoderIsacT<T>::Max10MsFramesInAPacket() const {
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
AudioEncoder::EncodedInfo AudioEncoderDecoderIsacT<T>::EncodeInternal(
|
||||
uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) {
|
||||
void AudioEncoderDecoderIsacT<T>::EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) {
|
||||
CriticalSectionScoped cs_lock(lock_.get());
|
||||
if (!packet_in_progress_) {
|
||||
// Starting a new packet; remember the timestamp for later.
|
||||
@ -206,17 +206,15 @@ AudioEncoder::EncodedInfo AudioEncoderDecoderIsacT<T>::EncodeInternal(
|
||||
// buffer. All we can do is check for an overrun after the fact.
|
||||
CHECK(static_cast<size_t>(r) <= max_encoded_bytes);
|
||||
|
||||
info->encoded_bytes = r;
|
||||
if (r == 0)
|
||||
return kZeroEncodedBytes;
|
||||
return;
|
||||
|
||||
// Got enough input to produce a packet. Return the saved timestamp from
|
||||
// the first chunk of input that went into the packet.
|
||||
packet_in_progress_ = false;
|
||||
EncodedInfo info;
|
||||
info.encoded_bytes = r;
|
||||
info.encoded_timestamp = packet_timestamp_;
|
||||
info.payload_type = payload_type_;
|
||||
return info;
|
||||
info->encoded_timestamp = packet_timestamp_;
|
||||
info->payload_type = payload_type_;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
|
@ -29,11 +29,12 @@ class MockAudioEncoder : public AudioEncoder {
|
||||
MOCK_METHOD1(SetTargetBitrate, void(int));
|
||||
MOCK_METHOD1(SetProjectedPacketLossRate, void(double));
|
||||
// Note, we explicitly chose not to create a mock for the Encode method.
|
||||
MOCK_METHOD4(EncodeInternal,
|
||||
EncodedInfo(uint32_t timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded));
|
||||
MOCK_METHOD5(EncodeInternal,
|
||||
void(uint32_t timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info));
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
@ -183,18 +183,19 @@ void AudioEncoderOpus::SetProjectedPacketLossRate(double fraction) {
|
||||
}
|
||||
}
|
||||
|
||||
AudioEncoder::EncodedInfo AudioEncoderOpus::EncodeInternal(
|
||||
uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) {
|
||||
void AudioEncoderOpus::EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) {
|
||||
if (input_buffer_.empty())
|
||||
first_timestamp_in_buffer_ = rtp_timestamp;
|
||||
input_buffer_.insert(input_buffer_.end(), audio,
|
||||
audio + samples_per_10ms_frame_);
|
||||
if (input_buffer_.size() < (static_cast<size_t>(num_10ms_frames_per_packet_) *
|
||||
samples_per_10ms_frame_)) {
|
||||
return kZeroEncodedBytes;
|
||||
info->encoded_bytes = 0;
|
||||
return;
|
||||
}
|
||||
CHECK_EQ(input_buffer_.size(),
|
||||
static_cast<size_t>(num_10ms_frames_per_packet_) *
|
||||
@ -206,13 +207,12 @@ AudioEncoder::EncodedInfo AudioEncoderOpus::EncodeInternal(
|
||||
ClampInt16(max_encoded_bytes), encoded);
|
||||
CHECK_GE(r, 0); // Fails only if fed invalid data.
|
||||
input_buffer_.clear();
|
||||
EncodedInfo info;
|
||||
info.encoded_bytes = r;
|
||||
info.encoded_timestamp = first_timestamp_in_buffer_;
|
||||
info.payload_type = payload_type_;
|
||||
info.send_even_if_empty = true; // Allows Opus to send empty packets.
|
||||
info.speech = r > 0;
|
||||
return info;
|
||||
info->encoded_bytes = r;
|
||||
info->encoded_timestamp = first_timestamp_in_buffer_;
|
||||
info->payload_type = payload_type_;
|
||||
// Allows Opus to send empty packets.
|
||||
info->send_even_if_empty = true;
|
||||
info->speech = r > 0;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
@ -58,10 +58,11 @@ class AudioEncoderOpus final : public AudioEncoder {
|
||||
bool dtx_enabled() const { return dtx_enabled_; }
|
||||
|
||||
protected:
|
||||
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) override;
|
||||
void EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) override;
|
||||
|
||||
private:
|
||||
const int num_10ms_frames_per_packet_;
|
||||
|
@ -60,48 +60,48 @@ void AudioEncoderCopyRed::SetProjectedPacketLossRate(double fraction) {
|
||||
speech_encoder_->SetProjectedPacketLossRate(fraction);
|
||||
}
|
||||
|
||||
AudioEncoder::EncodedInfo AudioEncoderCopyRed::EncodeInternal(
|
||||
uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) {
|
||||
EncodedInfo info = speech_encoder_->Encode(
|
||||
rtp_timestamp, audio, static_cast<size_t>(SampleRateHz() / 100),
|
||||
max_encoded_bytes, encoded);
|
||||
void AudioEncoderCopyRed::EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) {
|
||||
speech_encoder_->Encode(rtp_timestamp, audio,
|
||||
static_cast<size_t>(SampleRateHz() / 100),
|
||||
max_encoded_bytes, encoded, info);
|
||||
CHECK_GE(max_encoded_bytes,
|
||||
info.encoded_bytes + secondary_info_.encoded_bytes);
|
||||
CHECK(info.redundant.empty()) << "Cannot use nested redundant encoders.";
|
||||
info->encoded_bytes + secondary_info_.encoded_bytes);
|
||||
CHECK(info->redundant.empty()) << "Cannot use nested redundant encoders.";
|
||||
|
||||
if (info.encoded_bytes > 0) {
|
||||
if (info->encoded_bytes > 0) {
|
||||
// |info| will be implicitly cast to an EncodedInfoLeaf struct, effectively
|
||||
// discarding the (empty) vector of redundant information. This is
|
||||
// intentional.
|
||||
info.redundant.push_back(info);
|
||||
DCHECK_EQ(info.redundant.size(), 1u);
|
||||
info->redundant.push_back(*info);
|
||||
DCHECK_EQ(info->redundant.size(), 1u);
|
||||
if (secondary_info_.encoded_bytes > 0) {
|
||||
memcpy(&encoded[info.encoded_bytes], secondary_encoded_.get(),
|
||||
memcpy(&encoded[info->encoded_bytes], secondary_encoded_.get(),
|
||||
secondary_info_.encoded_bytes);
|
||||
info.redundant.push_back(secondary_info_);
|
||||
DCHECK_EQ(info.redundant.size(), 2u);
|
||||
info->redundant.push_back(secondary_info_);
|
||||
DCHECK_EQ(info->redundant.size(), 2u);
|
||||
}
|
||||
// Save primary to secondary.
|
||||
if (secondary_allocated_ < info.encoded_bytes) {
|
||||
secondary_encoded_.reset(new uint8_t[info.encoded_bytes]);
|
||||
secondary_allocated_ = info.encoded_bytes;
|
||||
if (secondary_allocated_ < info->encoded_bytes) {
|
||||
secondary_encoded_.reset(new uint8_t[info->encoded_bytes]);
|
||||
secondary_allocated_ = info->encoded_bytes;
|
||||
}
|
||||
CHECK(secondary_encoded_);
|
||||
memcpy(secondary_encoded_.get(), encoded, info.encoded_bytes);
|
||||
secondary_info_ = info;
|
||||
DCHECK_EQ(info.speech, info.redundant[0].speech);
|
||||
memcpy(secondary_encoded_.get(), encoded, info->encoded_bytes);
|
||||
secondary_info_ = *info;
|
||||
DCHECK_EQ(info->speech, info->redundant[0].speech);
|
||||
}
|
||||
// Update main EncodedInfo.
|
||||
info.payload_type = red_payload_type_;
|
||||
info.encoded_bytes = 0;
|
||||
for (std::vector<EncodedInfoLeaf>::const_iterator it = info.redundant.begin();
|
||||
it != info.redundant.end(); ++it) {
|
||||
info.encoded_bytes += it->encoded_bytes;
|
||||
info->payload_type = red_payload_type_;
|
||||
info->encoded_bytes = 0;
|
||||
for (std::vector<EncodedInfoLeaf>::const_iterator it =
|
||||
info->redundant.begin();
|
||||
it != info->redundant.end(); ++it) {
|
||||
info->encoded_bytes += it->encoded_bytes;
|
||||
}
|
||||
return info;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
@ -45,10 +45,11 @@ class AudioEncoderCopyRed : public AudioEncoder {
|
||||
void SetProjectedPacketLossRate(double fraction) override;
|
||||
|
||||
protected:
|
||||
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) override;
|
||||
void EncodeInternal(uint32_t rtp_timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
EncodedInfo* info) override;
|
||||
|
||||
private:
|
||||
AudioEncoder* speech_encoder_;
|
||||
|
@ -60,8 +60,9 @@ class AudioEncoderCopyRedTest : public ::testing::Test {
|
||||
|
||||
void Encode() {
|
||||
ASSERT_TRUE(red_.get() != NULL);
|
||||
encoded_info_ = red_->Encode(timestamp_, audio_, num_audio_samples_10ms,
|
||||
encoded_.size(), &encoded_[0]);
|
||||
encoded_info_ = AudioEncoder::EncodedInfo();
|
||||
red_->Encode(timestamp_, audio_, num_audio_samples_10ms,
|
||||
encoded_.size(), &encoded_[0], &encoded_info_);
|
||||
timestamp_ += num_audio_samples_10ms;
|
||||
}
|
||||
|
||||
@ -82,16 +83,18 @@ class MockEncodeHelper {
|
||||
memset(&info_, 0, sizeof(info_));
|
||||
}
|
||||
|
||||
AudioEncoder::EncodedInfo Encode(uint32_t timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded) {
|
||||
void Encode(uint32_t timestamp,
|
||||
const int16_t* audio,
|
||||
size_t max_encoded_bytes,
|
||||
uint8_t* encoded,
|
||||
AudioEncoder::EncodedInfo* info) {
|
||||
if (write_payload_) {
|
||||
CHECK(encoded);
|
||||
CHECK_LE(info_.encoded_bytes, max_encoded_bytes);
|
||||
memcpy(encoded, payload_, info_.encoded_bytes);
|
||||
}
|
||||
return info_;
|
||||
CHECK(info);
|
||||
*info = info_;
|
||||
}
|
||||
|
||||
AudioEncoder::EncodedInfo info_;
|
||||
@ -141,8 +144,7 @@ TEST_F(AudioEncoderCopyRedTest, CheckImmediateEncode) {
|
||||
InSequence s;
|
||||
MockFunction<void(int check_point_id)> check;
|
||||
for (int i = 1; i <= 6; ++i) {
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||
.WillRepeatedly(Return(AudioEncoder::kZeroEncodedBytes));
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _));
|
||||
EXPECT_CALL(check, Call(i));
|
||||
Encode();
|
||||
check.Call(i);
|
||||
@ -151,13 +153,13 @@ TEST_F(AudioEncoderCopyRedTest, CheckImmediateEncode) {
|
||||
|
||||
// Checks that no output is produced if the underlying codec doesn't emit any
|
||||
// new data, even if the RED codec is loaded with a secondary encoding.
|
||||
TEST_F(AudioEncoderCopyRedTest, CheckNoOutput) {
|
||||
TEST_F(AudioEncoderCopyRedTest, CheckNoOuput) {
|
||||
// Start with one Encode() call that will produce output.
|
||||
static const size_t kEncodedSize = 17;
|
||||
AudioEncoder::EncodedInfo nonZeroEncodedBytes;
|
||||
nonZeroEncodedBytes.encoded_bytes = kEncodedSize;
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||
.WillOnce(Return(nonZeroEncodedBytes));
|
||||
AudioEncoder::EncodedInfo info;
|
||||
info.encoded_bytes = kEncodedSize;
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
||||
.WillOnce(SetArgPointee<4>(info));
|
||||
Encode();
|
||||
// First call is a special case, since it does not include a secondary
|
||||
// payload.
|
||||
@ -165,14 +167,16 @@ TEST_F(AudioEncoderCopyRedTest, CheckNoOutput) {
|
||||
EXPECT_EQ(kEncodedSize, encoded_info_.encoded_bytes);
|
||||
|
||||
// Next call to the speech encoder will not produce any output.
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||
.WillOnce(Return(AudioEncoder::kZeroEncodedBytes));
|
||||
info.encoded_bytes = 0;
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
||||
.WillOnce(SetArgPointee<4>(info));
|
||||
Encode();
|
||||
EXPECT_EQ(0u, encoded_info_.encoded_bytes);
|
||||
|
||||
// Final call to the speech encoder will produce output.
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||
.WillOnce(Return(nonZeroEncodedBytes));
|
||||
info.encoded_bytes = kEncodedSize;
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
||||
.WillOnce(SetArgPointee<4>(info));
|
||||
Encode();
|
||||
EXPECT_EQ(2 * kEncodedSize, encoded_info_.encoded_bytes);
|
||||
ASSERT_EQ(2u, encoded_info_.redundant.size());
|
||||
@ -188,8 +192,8 @@ TEST_F(AudioEncoderCopyRedTest, CheckPayloadSizes) {
|
||||
for (int encode_size = 1; encode_size <= kNumPackets; ++encode_size) {
|
||||
AudioEncoder::EncodedInfo info;
|
||||
info.encoded_bytes = encode_size;
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||
.WillOnce(Return(info));
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
||||
.WillOnce(SetArgPointee<4>(info));
|
||||
}
|
||||
|
||||
// First call is a special case, since it does not include a secondary
|
||||
@ -214,7 +218,7 @@ TEST_F(AudioEncoderCopyRedTest, CheckTimestamps) {
|
||||
helper.info_.encoded_bytes = 17;
|
||||
helper.info_.encoded_timestamp = timestamp_;
|
||||
uint32_t primary_timestamp = timestamp_;
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
||||
.WillRepeatedly(Invoke(&helper, &MockEncodeHelper::Encode));
|
||||
|
||||
// First call is a special case, since it does not include a secondary
|
||||
@ -245,7 +249,7 @@ TEST_F(AudioEncoderCopyRedTest, CheckPayloads) {
|
||||
payload[i] = i;
|
||||
}
|
||||
helper.payload_ = payload;
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
||||
.WillRepeatedly(Invoke(&helper, &MockEncodeHelper::Encode));
|
||||
|
||||
// First call is a special case, since it does not include a secondary
|
||||
@ -282,7 +286,7 @@ TEST_F(AudioEncoderCopyRedTest, CheckPayloadType) {
|
||||
helper.info_.encoded_bytes = 17;
|
||||
const int primary_payload_type = red_payload_type_ + 1;
|
||||
helper.info_.payload_type = primary_payload_type;
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
||||
.WillRepeatedly(Invoke(&helper, &MockEncodeHelper::Encode));
|
||||
|
||||
// First call is a special case, since it does not include a secondary
|
||||
|
@ -234,8 +234,8 @@ void ACMGenericCodec::Encode(uint32_t input_timestamp,
|
||||
first_frame_ = false;
|
||||
CHECK_EQ(audio_channel, encoder_->NumChannels());
|
||||
|
||||
*encoded_info = encoder_->Encode(rtp_timestamp_, audio, length_per_channel,
|
||||
2 * MAX_PAYLOAD_SIZE_BYTE, bitstream);
|
||||
encoder_->Encode(rtp_timestamp_, audio, length_per_channel,
|
||||
2 * MAX_PAYLOAD_SIZE_BYTE, bitstream, encoded_info);
|
||||
*bitstream_len_byte = static_cast<int16_t>(encoded_info->encoded_bytes);
|
||||
}
|
||||
|
||||
|
@ -150,9 +150,9 @@ class AudioDecoderTest : public ::testing::Test {
|
||||
samples_per_10ms, channels_,
|
||||
interleaved_input.get());
|
||||
|
||||
encoded_info_ = audio_encoder_->Encode(
|
||||
0, interleaved_input.get(), audio_encoder_->SampleRateHz() / 100,
|
||||
data_length_ * 2, output);
|
||||
audio_encoder_->Encode(0, interleaved_input.get(),
|
||||
audio_encoder_->SampleRateHz() / 100,
|
||||
data_length_ * 2, output, &encoded_info_);
|
||||
}
|
||||
EXPECT_EQ(payload_type_, encoded_info_.payload_type);
|
||||
return static_cast<int>(encoded_info_.encoded_bytes);
|
||||
|
Loading…
x
Reference in New Issue
Block a user