We changed Encode() and EncodeInternal() return type from bool to void in this issue:
https://webrtc-codereview.appspot.com/38279004/ Now we don't have to pass EncodedInfo as output parameter, but can return it instead. This also adds the benefit of making clear that EncodeInternal() needs to fill in this info. R=kwiberg@webrtc.org Review URL: https://webrtc-codereview.appspot.com/43839004 Cr-Commit-Position: refs/heads/master@{#8749} git-svn-id: http://webrtc.googlecode.com/svn/trunk@8749 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
parent
73d763e71f
commit
0cb612b43b
@ -19,16 +19,19 @@ AudioEncoder::EncodedInfo::EncodedInfo() : EncodedInfoLeaf() {
|
|||||||
AudioEncoder::EncodedInfo::~EncodedInfo() {
|
AudioEncoder::EncodedInfo::~EncodedInfo() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioEncoder::Encode(uint32_t rtp_timestamp,
|
const AudioEncoder::EncodedInfo AudioEncoder::kZeroEncodedBytes;
|
||||||
const int16_t* audio,
|
|
||||||
size_t num_samples_per_channel,
|
AudioEncoder::EncodedInfo AudioEncoder::Encode(uint32_t rtp_timestamp,
|
||||||
size_t max_encoded_bytes,
|
const int16_t* audio,
|
||||||
uint8_t* encoded,
|
size_t num_samples_per_channel,
|
||||||
EncodedInfo* info) {
|
size_t max_encoded_bytes,
|
||||||
|
uint8_t* encoded) {
|
||||||
CHECK_EQ(num_samples_per_channel,
|
CHECK_EQ(num_samples_per_channel,
|
||||||
static_cast<size_t>(SampleRateHz() / 100));
|
static_cast<size_t>(SampleRateHz() / 100));
|
||||||
EncodeInternal(rtp_timestamp, audio, max_encoded_bytes, encoded, info);
|
EncodedInfo info =
|
||||||
CHECK_LE(info->encoded_bytes, max_encoded_bytes);
|
EncodeInternal(rtp_timestamp, audio, max_encoded_bytes, encoded);
|
||||||
|
CHECK_LE(info.encoded_bytes, max_encoded_bytes);
|
||||||
|
return info;
|
||||||
}
|
}
|
||||||
|
|
||||||
int AudioEncoder::RtpTimestampRateHz() const {
|
int AudioEncoder::RtpTimestampRateHz() const {
|
||||||
|
@ -54,20 +54,21 @@ class AudioEncoder {
|
|||||||
std::vector<EncodedInfoLeaf> redundant;
|
std::vector<EncodedInfoLeaf> redundant;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const EncodedInfo kZeroEncodedBytes;
|
||||||
|
|
||||||
virtual ~AudioEncoder() {}
|
virtual ~AudioEncoder() {}
|
||||||
|
|
||||||
// Accepts one 10 ms block of input audio (i.e., sample_rate_hz() / 100 *
|
// Accepts one 10 ms block of input audio (i.e., sample_rate_hz() / 100 *
|
||||||
// num_channels() samples). Multi-channel audio must be sample-interleaved.
|
// num_channels() samples). Multi-channel audio must be sample-interleaved.
|
||||||
// The encoder produces zero or more bytes of output in |encoded|,
|
// The encoder produces zero or more bytes of output in |encoded| and
|
||||||
// and provides additional encoding information in |info|.
|
// returns additional encoding information.
|
||||||
// The caller is responsible for making sure that |max_encoded_bytes| is
|
// The caller is responsible for making sure that |max_encoded_bytes| is
|
||||||
// not smaller than the number of bytes actually produced by the encoder.
|
// not smaller than the number of bytes actually produced by the encoder.
|
||||||
void Encode(uint32_t rtp_timestamp,
|
EncodedInfo Encode(uint32_t rtp_timestamp,
|
||||||
const int16_t* audio,
|
const int16_t* audio,
|
||||||
size_t num_samples_per_channel,
|
size_t num_samples_per_channel,
|
||||||
size_t max_encoded_bytes,
|
size_t max_encoded_bytes,
|
||||||
uint8_t* encoded,
|
uint8_t* encoded);
|
||||||
EncodedInfo* info);
|
|
||||||
|
|
||||||
// Return the input sample rate in Hz and the number of input channels.
|
// Return the input sample rate in Hz and the number of input channels.
|
||||||
// These are constants set at instantiation time.
|
// These are constants set at instantiation time.
|
||||||
@ -107,11 +108,10 @@ class AudioEncoder {
|
|||||||
virtual void SetProjectedPacketLossRate(double fraction) {}
|
virtual void SetProjectedPacketLossRate(double fraction) {}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
virtual void EncodeInternal(uint32_t rtp_timestamp,
|
virtual EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
|
||||||
const int16_t* audio,
|
const int16_t* audio,
|
||||||
size_t max_encoded_bytes,
|
size_t max_encoded_bytes,
|
||||||
uint8_t* encoded,
|
uint8_t* encoded) = 0;
|
||||||
EncodedInfo* info) = 0;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
@ -109,13 +109,12 @@ void AudioEncoderCng::SetProjectedPacketLossRate(double fraction) {
|
|||||||
speech_encoder_->SetProjectedPacketLossRate(fraction);
|
speech_encoder_->SetProjectedPacketLossRate(fraction);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioEncoderCng::EncodeInternal(uint32_t rtp_timestamp,
|
AudioEncoder::EncodedInfo AudioEncoderCng::EncodeInternal(
|
||||||
const int16_t* audio,
|
uint32_t rtp_timestamp,
|
||||||
size_t max_encoded_bytes,
|
const int16_t* audio,
|
||||||
uint8_t* encoded,
|
size_t max_encoded_bytes,
|
||||||
EncodedInfo* info) {
|
uint8_t* encoded) {
|
||||||
CHECK_GE(max_encoded_bytes, static_cast<size_t>(num_cng_coefficients_ + 1));
|
CHECK_GE(max_encoded_bytes, static_cast<size_t>(num_cng_coefficients_ + 1));
|
||||||
info->encoded_bytes = 0;
|
|
||||||
const int num_samples = SampleRateHz() / 100 * NumChannels();
|
const int num_samples = SampleRateHz() / 100 * NumChannels();
|
||||||
if (speech_buffer_.empty()) {
|
if (speech_buffer_.empty()) {
|
||||||
CHECK_EQ(frames_in_buffer_, 0);
|
CHECK_EQ(frames_in_buffer_, 0);
|
||||||
@ -126,7 +125,7 @@ void AudioEncoderCng::EncodeInternal(uint32_t rtp_timestamp,
|
|||||||
}
|
}
|
||||||
++frames_in_buffer_;
|
++frames_in_buffer_;
|
||||||
if (frames_in_buffer_ < speech_encoder_->Num10MsFramesInNextPacket()) {
|
if (frames_in_buffer_ < speech_encoder_->Num10MsFramesInNextPacket()) {
|
||||||
return;
|
return kZeroEncodedBytes;
|
||||||
}
|
}
|
||||||
CHECK_LE(frames_in_buffer_ * 10, kMaxFrameSizeMs)
|
CHECK_LE(frames_in_buffer_ * 10, kMaxFrameSizeMs)
|
||||||
<< "Frame size cannot be larger than " << kMaxFrameSizeMs
|
<< "Frame size cannot be larger than " << kMaxFrameSizeMs
|
||||||
@ -159,14 +158,15 @@ void AudioEncoderCng::EncodeInternal(uint32_t rtp_timestamp,
|
|||||||
samples_per_10ms_frame * blocks_in_second_vad_call, SampleRateHz());
|
samples_per_10ms_frame * blocks_in_second_vad_call, SampleRateHz());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
EncodedInfo info;
|
||||||
switch (activity) {
|
switch (activity) {
|
||||||
case Vad::kPassive: {
|
case Vad::kPassive: {
|
||||||
EncodePassive(max_encoded_bytes, encoded, info);
|
info = EncodePassive(max_encoded_bytes, encoded);
|
||||||
last_frame_active_ = false;
|
last_frame_active_ = false;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case Vad::kActive: {
|
case Vad::kActive: {
|
||||||
EncodeActive(max_encoded_bytes, encoded, info);
|
info = EncodeActive(max_encoded_bytes, encoded);
|
||||||
last_frame_active_ = true;
|
last_frame_active_ = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -178,15 +178,17 @@ void AudioEncoderCng::EncodeInternal(uint32_t rtp_timestamp,
|
|||||||
|
|
||||||
speech_buffer_.clear();
|
speech_buffer_.clear();
|
||||||
frames_in_buffer_ = 0;
|
frames_in_buffer_ = 0;
|
||||||
|
return info;
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioEncoderCng::EncodePassive(size_t max_encoded_bytes,
|
AudioEncoder::EncodedInfo AudioEncoderCng::EncodePassive(
|
||||||
uint8_t* encoded,
|
size_t max_encoded_bytes,
|
||||||
EncodedInfo* info) {
|
uint8_t* encoded) {
|
||||||
bool force_sid = last_frame_active_;
|
bool force_sid = last_frame_active_;
|
||||||
bool output_produced = false;
|
bool output_produced = false;
|
||||||
const size_t samples_per_10ms_frame = SamplesPer10msFrame();
|
const size_t samples_per_10ms_frame = SamplesPer10msFrame();
|
||||||
CHECK_GE(max_encoded_bytes, frames_in_buffer_ * samples_per_10ms_frame);
|
CHECK_GE(max_encoded_bytes, frames_in_buffer_ * samples_per_10ms_frame);
|
||||||
|
AudioEncoder::EncodedInfo info;
|
||||||
for (int i = 0; i < frames_in_buffer_; ++i) {
|
for (int i = 0; i < frames_in_buffer_; ++i) {
|
||||||
int16_t encoded_bytes_tmp = 0;
|
int16_t encoded_bytes_tmp = 0;
|
||||||
CHECK_GE(WebRtcCng_Encode(cng_inst_.get(),
|
CHECK_GE(WebRtcCng_Encode(cng_inst_.get(),
|
||||||
@ -195,30 +197,32 @@ void AudioEncoderCng::EncodePassive(size_t max_encoded_bytes,
|
|||||||
encoded, &encoded_bytes_tmp, force_sid), 0);
|
encoded, &encoded_bytes_tmp, force_sid), 0);
|
||||||
if (encoded_bytes_tmp > 0) {
|
if (encoded_bytes_tmp > 0) {
|
||||||
CHECK(!output_produced);
|
CHECK(!output_produced);
|
||||||
info->encoded_bytes = static_cast<size_t>(encoded_bytes_tmp);
|
info.encoded_bytes = static_cast<size_t>(encoded_bytes_tmp);
|
||||||
output_produced = true;
|
output_produced = true;
|
||||||
force_sid = false;
|
force_sid = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
info->encoded_timestamp = first_timestamp_in_buffer_;
|
info.encoded_timestamp = first_timestamp_in_buffer_;
|
||||||
info->payload_type = cng_payload_type_;
|
info.payload_type = cng_payload_type_;
|
||||||
info->send_even_if_empty = true;
|
info.send_even_if_empty = true;
|
||||||
info->speech = false;
|
info.speech = false;
|
||||||
|
return info;
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioEncoderCng::EncodeActive(size_t max_encoded_bytes,
|
AudioEncoder::EncodedInfo AudioEncoderCng::EncodeActive(
|
||||||
uint8_t* encoded,
|
size_t max_encoded_bytes,
|
||||||
EncodedInfo* info) {
|
uint8_t* encoded) {
|
||||||
const size_t samples_per_10ms_frame = SamplesPer10msFrame();
|
const size_t samples_per_10ms_frame = SamplesPer10msFrame();
|
||||||
|
AudioEncoder::EncodedInfo info;
|
||||||
for (int i = 0; i < frames_in_buffer_; ++i) {
|
for (int i = 0; i < frames_in_buffer_; ++i) {
|
||||||
speech_encoder_->Encode(first_timestamp_in_buffer_,
|
info = speech_encoder_->Encode(
|
||||||
&speech_buffer_[i * samples_per_10ms_frame],
|
first_timestamp_in_buffer_, &speech_buffer_[i * samples_per_10ms_frame],
|
||||||
samples_per_10ms_frame, max_encoded_bytes,
|
samples_per_10ms_frame, max_encoded_bytes, encoded);
|
||||||
encoded, info);
|
|
||||||
if (i < frames_in_buffer_ - 1) {
|
if (i < frames_in_buffer_ - 1) {
|
||||||
CHECK_EQ(info->encoded_bytes, 0u) << "Encoder delivered data too early.";
|
CHECK_EQ(info.encoded_bytes, 0u) << "Encoder delivered data too early.";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return info;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t AudioEncoderCng::SamplesPer10msFrame() const {
|
size_t AudioEncoderCng::SamplesPer10msFrame() const {
|
||||||
|
@ -75,9 +75,8 @@ class AudioEncoderCngTest : public ::testing::Test {
|
|||||||
|
|
||||||
void Encode() {
|
void Encode() {
|
||||||
ASSERT_TRUE(cng_) << "Must call CreateCng() first.";
|
ASSERT_TRUE(cng_) << "Must call CreateCng() first.";
|
||||||
encoded_info_ = AudioEncoder::EncodedInfo();
|
encoded_info_ = cng_->Encode(timestamp_, audio_, num_audio_samples_10ms_,
|
||||||
cng_->Encode(timestamp_, audio_, num_audio_samples_10ms_,
|
encoded_.size(), &encoded_[0]);
|
||||||
encoded_.size(), &encoded_[0], &encoded_info_);
|
|
||||||
timestamp_ += num_audio_samples_10ms_;
|
timestamp_ += num_audio_samples_10ms_;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -92,24 +91,24 @@ class AudioEncoderCngTest : public ::testing::Test {
|
|||||||
.WillRepeatedly(Return(active_speech ? Vad::kActive : Vad::kPassive));
|
.WillRepeatedly(Return(active_speech ? Vad::kActive : Vad::kPassive));
|
||||||
|
|
||||||
// Don't expect any calls to the encoder yet.
|
// Don't expect any calls to the encoder yet.
|
||||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _)).Times(0);
|
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).Times(0);
|
||||||
for (int i = 0; i < blocks_per_frame - 1; ++i) {
|
for (int i = 0; i < blocks_per_frame - 1; ++i) {
|
||||||
Encode();
|
Encode();
|
||||||
EXPECT_EQ(0u, encoded_info_.encoded_bytes);
|
EXPECT_EQ(0u, encoded_info_.encoded_bytes);
|
||||||
}
|
}
|
||||||
AudioEncoder::EncodedInfo info;
|
|
||||||
if (active_speech) {
|
if (active_speech) {
|
||||||
// Now expect |blocks_per_frame| calls to the encoder in sequence.
|
// Now expect |blocks_per_frame| calls to the encoder in sequence.
|
||||||
// Let the speech codec mock return true and set the number of encoded
|
// Let the speech codec mock return true and set the number of encoded
|
||||||
// bytes to |kMockReturnEncodedBytes|.
|
// bytes to |kMockReturnEncodedBytes|.
|
||||||
InSequence s;
|
InSequence s;
|
||||||
for (int j = 0; j < blocks_per_frame - 1; ++j) {
|
for (int j = 0; j < blocks_per_frame - 1; ++j) {
|
||||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||||
.WillOnce(SetArgPointee<4>(info));
|
.WillOnce(Return(AudioEncoder::kZeroEncodedBytes));
|
||||||
}
|
}
|
||||||
|
AudioEncoder::EncodedInfo info;
|
||||||
info.encoded_bytes = kMockReturnEncodedBytes;
|
info.encoded_bytes = kMockReturnEncodedBytes;
|
||||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||||
.WillOnce(SetArgPointee<4>(info));
|
.WillOnce(Return(info));
|
||||||
}
|
}
|
||||||
Encode();
|
Encode();
|
||||||
if (active_speech) {
|
if (active_speech) {
|
||||||
@ -254,7 +253,7 @@ TEST_F(AudioEncoderCngTest, EncodePassive) {
|
|||||||
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
|
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
|
||||||
.WillRepeatedly(Return(Vad::kPassive));
|
.WillRepeatedly(Return(Vad::kPassive));
|
||||||
// Expect no calls at all to the speech encoder mock.
|
// Expect no calls at all to the speech encoder mock.
|
||||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _)).Times(0);
|
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).Times(0);
|
||||||
uint32_t expected_timestamp = timestamp_;
|
uint32_t expected_timestamp = timestamp_;
|
||||||
for (int i = 0; i < 100; ++i) {
|
for (int i = 0; i < 100; ++i) {
|
||||||
Encode();
|
Encode();
|
||||||
@ -284,20 +283,23 @@ TEST_F(AudioEncoderCngTest, MixedActivePassive) {
|
|||||||
CreateCng();
|
CreateCng();
|
||||||
|
|
||||||
// All of the frame is active speech.
|
// All of the frame is active speech.
|
||||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||||
.Times(6);
|
.Times(6)
|
||||||
|
.WillRepeatedly(Return(AudioEncoder::kZeroEncodedBytes));
|
||||||
EXPECT_TRUE(CheckMixedActivePassive(Vad::kActive, Vad::kActive));
|
EXPECT_TRUE(CheckMixedActivePassive(Vad::kActive, Vad::kActive));
|
||||||
EXPECT_TRUE(encoded_info_.speech);
|
EXPECT_TRUE(encoded_info_.speech);
|
||||||
|
|
||||||
// First half of the frame is active speech.
|
// First half of the frame is active speech.
|
||||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||||
.Times(6);
|
.Times(6)
|
||||||
|
.WillRepeatedly(Return(AudioEncoder::kZeroEncodedBytes));
|
||||||
EXPECT_TRUE(CheckMixedActivePassive(Vad::kActive, Vad::kPassive));
|
EXPECT_TRUE(CheckMixedActivePassive(Vad::kActive, Vad::kPassive));
|
||||||
EXPECT_TRUE(encoded_info_.speech);
|
EXPECT_TRUE(encoded_info_.speech);
|
||||||
|
|
||||||
// Second half of the frame is active speech.
|
// Second half of the frame is active speech.
|
||||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||||
.Times(6);
|
.Times(6)
|
||||||
|
.WillRepeatedly(Return(AudioEncoder::kZeroEncodedBytes));
|
||||||
EXPECT_TRUE(CheckMixedActivePassive(Vad::kPassive, Vad::kActive));
|
EXPECT_TRUE(CheckMixedActivePassive(Vad::kPassive, Vad::kActive));
|
||||||
EXPECT_TRUE(encoded_info_.speech);
|
EXPECT_TRUE(encoded_info_.speech);
|
||||||
|
|
||||||
@ -336,22 +338,10 @@ TEST_F(AudioEncoderCngTest, VadInputSize60Ms) {
|
|||||||
CheckVadInputSize(60, 30, 30);
|
CheckVadInputSize(60, 30, 30);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verifies that the EncodedInfo struct pointer passed to
|
|
||||||
// AudioEncoderCng::Encode is propagated to the Encode call to the underlying
|
|
||||||
// speech encoder.
|
|
||||||
TEST_F(AudioEncoderCngTest, VerifyEncoderInfoPropagation) {
|
|
||||||
CreateCng();
|
|
||||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, &encoded_info_));
|
|
||||||
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(1));
|
|
||||||
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
|
|
||||||
.WillOnce(Return(Vad::kActive));
|
|
||||||
Encode();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verifies that the correct payload type is set when CNG is encoded.
|
// Verifies that the correct payload type is set when CNG is encoded.
|
||||||
TEST_F(AudioEncoderCngTest, VerifyCngPayloadType) {
|
TEST_F(AudioEncoderCngTest, VerifyCngPayloadType) {
|
||||||
CreateCng();
|
CreateCng();
|
||||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _)).Times(0);
|
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).Times(0);
|
||||||
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(1));
|
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(1));
|
||||||
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
|
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
|
||||||
.WillOnce(Return(Vad::kPassive));
|
.WillOnce(Return(Vad::kPassive));
|
||||||
@ -385,8 +375,7 @@ TEST_F(AudioEncoderCngTest, VerifySidFrameAfterSpeech) {
|
|||||||
.WillOnce(Return(Vad::kActive));
|
.WillOnce(Return(Vad::kActive));
|
||||||
AudioEncoder::EncodedInfo info;
|
AudioEncoder::EncodedInfo info;
|
||||||
info.encoded_bytes = kMockReturnEncodedBytes;
|
info.encoded_bytes = kMockReturnEncodedBytes;
|
||||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).WillOnce(Return(info));
|
||||||
.WillOnce(SetArgPointee<4>(info));
|
|
||||||
Encode();
|
Encode();
|
||||||
EXPECT_EQ(kMockReturnEncodedBytes, encoded_info_.encoded_bytes);
|
EXPECT_EQ(kMockReturnEncodedBytes, encoded_info_.encoded_bytes);
|
||||||
|
|
||||||
|
@ -56,11 +56,10 @@ class AudioEncoderCng final : public AudioEncoder {
|
|||||||
void SetProjectedPacketLossRate(double fraction) override;
|
void SetProjectedPacketLossRate(double fraction) override;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
void EncodeInternal(uint32_t rtp_timestamp,
|
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
|
||||||
const int16_t* audio,
|
const int16_t* audio,
|
||||||
size_t max_encoded_bytes,
|
size_t max_encoded_bytes,
|
||||||
uint8_t* encoded,
|
uint8_t* encoded) override;
|
||||||
EncodedInfo* info) override;
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// Deleter for use with scoped_ptr. E.g., use as
|
// Deleter for use with scoped_ptr. E.g., use as
|
||||||
@ -69,12 +68,8 @@ class AudioEncoderCng final : public AudioEncoder {
|
|||||||
inline void operator()(CNG_enc_inst* ptr) const { WebRtcCng_FreeEnc(ptr); }
|
inline void operator()(CNG_enc_inst* ptr) const { WebRtcCng_FreeEnc(ptr); }
|
||||||
};
|
};
|
||||||
|
|
||||||
void EncodePassive(size_t max_encoded_bytes,
|
EncodedInfo EncodePassive(size_t max_encoded_bytes, uint8_t* encoded);
|
||||||
uint8_t* encoded,
|
EncodedInfo EncodeActive(size_t max_encoded_bytes, uint8_t* encoded);
|
||||||
EncodedInfo* info);
|
|
||||||
void EncodeActive(size_t max_encoded_bytes,
|
|
||||||
uint8_t* encoded,
|
|
||||||
EncodedInfo* info);
|
|
||||||
size_t SamplesPer10msFrame() const;
|
size_t SamplesPer10msFrame() const;
|
||||||
|
|
||||||
AudioEncoder* speech_encoder_;
|
AudioEncoder* speech_encoder_;
|
||||||
|
@ -66,11 +66,11 @@ int AudioEncoderPcm::Max10MsFramesInAPacket() const {
|
|||||||
return num_10ms_frames_per_packet_;
|
return num_10ms_frames_per_packet_;
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioEncoderPcm::EncodeInternal(uint32_t rtp_timestamp,
|
AudioEncoder::EncodedInfo AudioEncoderPcm::EncodeInternal(
|
||||||
const int16_t* audio,
|
uint32_t rtp_timestamp,
|
||||||
size_t max_encoded_bytes,
|
const int16_t* audio,
|
||||||
uint8_t* encoded,
|
size_t max_encoded_bytes,
|
||||||
EncodedInfo* info) {
|
uint8_t* encoded) {
|
||||||
const int num_samples = SampleRateHz() / 100 * NumChannels();
|
const int num_samples = SampleRateHz() / 100 * NumChannels();
|
||||||
if (speech_buffer_.empty()) {
|
if (speech_buffer_.empty()) {
|
||||||
first_timestamp_in_buffer_ = rtp_timestamp;
|
first_timestamp_in_buffer_ = rtp_timestamp;
|
||||||
@ -79,17 +79,18 @@ void AudioEncoderPcm::EncodeInternal(uint32_t rtp_timestamp,
|
|||||||
speech_buffer_.push_back(audio[i]);
|
speech_buffer_.push_back(audio[i]);
|
||||||
}
|
}
|
||||||
if (speech_buffer_.size() < full_frame_samples_) {
|
if (speech_buffer_.size() < full_frame_samples_) {
|
||||||
info->encoded_bytes = 0;
|
return kZeroEncodedBytes;
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
CHECK_EQ(speech_buffer_.size(), full_frame_samples_);
|
CHECK_EQ(speech_buffer_.size(), full_frame_samples_);
|
||||||
CHECK_GE(max_encoded_bytes, full_frame_samples_);
|
CHECK_GE(max_encoded_bytes, full_frame_samples_);
|
||||||
int16_t ret = EncodeCall(&speech_buffer_[0], full_frame_samples_, encoded);
|
int16_t ret = EncodeCall(&speech_buffer_[0], full_frame_samples_, encoded);
|
||||||
CHECK_GE(ret, 0);
|
CHECK_GE(ret, 0);
|
||||||
speech_buffer_.clear();
|
speech_buffer_.clear();
|
||||||
info->encoded_timestamp = first_timestamp_in_buffer_;
|
EncodedInfo info;
|
||||||
info->payload_type = payload_type_;
|
info.encoded_timestamp = first_timestamp_in_buffer_;
|
||||||
info->encoded_bytes = static_cast<size_t>(ret);
|
info.payload_type = payload_type_;
|
||||||
|
info.encoded_bytes = static_cast<size_t>(ret);
|
||||||
|
return info;
|
||||||
}
|
}
|
||||||
|
|
||||||
int16_t AudioEncoderPcmA::EncodeCall(const int16_t* audio,
|
int16_t AudioEncoderPcmA::EncodeCall(const int16_t* audio,
|
||||||
|
@ -41,11 +41,10 @@ class AudioEncoderPcm : public AudioEncoder {
|
|||||||
protected:
|
protected:
|
||||||
AudioEncoderPcm(const Config& config, int sample_rate_hz);
|
AudioEncoderPcm(const Config& config, int sample_rate_hz);
|
||||||
|
|
||||||
void EncodeInternal(uint32_t rtp_timestamp,
|
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
|
||||||
const int16_t* audio,
|
const int16_t* audio,
|
||||||
size_t max_encoded_bytes,
|
size_t max_encoded_bytes,
|
||||||
uint8_t* encoded,
|
uint8_t* encoded) override;
|
||||||
EncodedInfo* info) override;
|
|
||||||
|
|
||||||
virtual int16_t EncodeCall(const int16_t* audio,
|
virtual int16_t EncodeCall(const int16_t* audio,
|
||||||
size_t input_len,
|
size_t input_len,
|
||||||
|
@ -77,11 +77,11 @@ int AudioEncoderG722::Max10MsFramesInAPacket() const {
|
|||||||
return num_10ms_frames_per_packet_;
|
return num_10ms_frames_per_packet_;
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioEncoderG722::EncodeInternal(uint32_t rtp_timestamp,
|
AudioEncoder::EncodedInfo AudioEncoderG722::EncodeInternal(
|
||||||
const int16_t* audio,
|
uint32_t rtp_timestamp,
|
||||||
size_t max_encoded_bytes,
|
const int16_t* audio,
|
||||||
uint8_t* encoded,
|
size_t max_encoded_bytes,
|
||||||
EncodedInfo* info) {
|
uint8_t* encoded) {
|
||||||
CHECK_GE(max_encoded_bytes, MaxEncodedBytes());
|
CHECK_GE(max_encoded_bytes, MaxEncodedBytes());
|
||||||
|
|
||||||
if (num_10ms_frames_buffered_ == 0)
|
if (num_10ms_frames_buffered_ == 0)
|
||||||
@ -95,8 +95,7 @@ void AudioEncoderG722::EncodeInternal(uint32_t rtp_timestamp,
|
|||||||
|
|
||||||
// If we don't yet have enough samples for a packet, we're done for now.
|
// If we don't yet have enough samples for a packet, we're done for now.
|
||||||
if (++num_10ms_frames_buffered_ < num_10ms_frames_per_packet_) {
|
if (++num_10ms_frames_buffered_ < num_10ms_frames_per_packet_) {
|
||||||
info->encoded_bytes = 0;
|
return kZeroEncodedBytes;
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encode each channel separately.
|
// Encode each channel separately.
|
||||||
@ -124,9 +123,11 @@ void AudioEncoderG722::EncodeInternal(uint32_t rtp_timestamp,
|
|||||||
encoded[i * num_channels_ + j] =
|
encoded[i * num_channels_ + j] =
|
||||||
interleave_buffer_[2 * j] << 4 | interleave_buffer_[2 * j + 1];
|
interleave_buffer_[2 * j] << 4 | interleave_buffer_[2 * j + 1];
|
||||||
}
|
}
|
||||||
info->encoded_bytes = samples_per_channel / 2 * num_channels_;
|
EncodedInfo info;
|
||||||
info->encoded_timestamp = first_timestamp_in_buffer_;
|
info.encoded_bytes = samples_per_channel / 2 * num_channels_;
|
||||||
info->payload_type = payload_type_;
|
info.encoded_timestamp = first_timestamp_in_buffer_;
|
||||||
|
info.payload_type = payload_type_;
|
||||||
|
return info;
|
||||||
}
|
}
|
||||||
|
|
||||||
int AudioEncoderG722::SamplesPerChannel() const {
|
int AudioEncoderG722::SamplesPerChannel() const {
|
||||||
|
@ -38,11 +38,10 @@ class AudioEncoderG722 : public AudioEncoder {
|
|||||||
int Max10MsFramesInAPacket() const override;
|
int Max10MsFramesInAPacket() const override;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
void EncodeInternal(uint32_t rtp_timestamp,
|
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
|
||||||
const int16_t* audio,
|
const int16_t* audio,
|
||||||
size_t max_encoded_bytes,
|
size_t max_encoded_bytes,
|
||||||
uint8_t* encoded,
|
uint8_t* encoded) override;
|
||||||
EncodedInfo* info) override;
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// The encoder state for one channel.
|
// The encoder state for one channel.
|
||||||
|
@ -63,11 +63,11 @@ int AudioEncoderIlbc::Max10MsFramesInAPacket() const {
|
|||||||
return num_10ms_frames_per_packet_;
|
return num_10ms_frames_per_packet_;
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioEncoderIlbc::EncodeInternal(uint32_t rtp_timestamp,
|
AudioEncoder::EncodedInfo AudioEncoderIlbc::EncodeInternal(
|
||||||
const int16_t* audio,
|
uint32_t rtp_timestamp,
|
||||||
size_t max_encoded_bytes,
|
const int16_t* audio,
|
||||||
uint8_t* encoded,
|
size_t max_encoded_bytes,
|
||||||
EncodedInfo* info) {
|
uint8_t* encoded) {
|
||||||
DCHECK_GE(max_encoded_bytes, RequiredOutputSizeBytes());
|
DCHECK_GE(max_encoded_bytes, RequiredOutputSizeBytes());
|
||||||
|
|
||||||
// Save timestamp if starting a new packet.
|
// Save timestamp if starting a new packet.
|
||||||
@ -82,8 +82,7 @@ void AudioEncoderIlbc::EncodeInternal(uint32_t rtp_timestamp,
|
|||||||
// If we don't yet have enough buffered input for a whole packet, we're done
|
// If we don't yet have enough buffered input for a whole packet, we're done
|
||||||
// for now.
|
// for now.
|
||||||
if (++num_10ms_frames_buffered_ < num_10ms_frames_per_packet_) {
|
if (++num_10ms_frames_buffered_ < num_10ms_frames_per_packet_) {
|
||||||
info->encoded_bytes = 0;
|
return kZeroEncodedBytes;
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encode buffered input.
|
// Encode buffered input.
|
||||||
@ -95,10 +94,12 @@ void AudioEncoderIlbc::EncodeInternal(uint32_t rtp_timestamp,
|
|||||||
kSampleRateHz / 100 * num_10ms_frames_per_packet_,
|
kSampleRateHz / 100 * num_10ms_frames_per_packet_,
|
||||||
encoded);
|
encoded);
|
||||||
CHECK_GE(output_len, 0);
|
CHECK_GE(output_len, 0);
|
||||||
info->encoded_bytes = output_len;
|
EncodedInfo info;
|
||||||
DCHECK_EQ(info->encoded_bytes, RequiredOutputSizeBytes());
|
info.encoded_bytes = output_len;
|
||||||
info->encoded_timestamp = first_timestamp_in_buffer_;
|
DCHECK_EQ(info.encoded_bytes, RequiredOutputSizeBytes());
|
||||||
info->payload_type = payload_type_;
|
info.encoded_timestamp = first_timestamp_in_buffer_;
|
||||||
|
info.payload_type = payload_type_;
|
||||||
|
return info;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t AudioEncoderIlbc::RequiredOutputSizeBytes() const {
|
size_t AudioEncoderIlbc::RequiredOutputSizeBytes() const {
|
||||||
|
@ -38,11 +38,10 @@ class AudioEncoderIlbc : public AudioEncoder {
|
|||||||
int Max10MsFramesInAPacket() const override;
|
int Max10MsFramesInAPacket() const override;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
void EncodeInternal(uint32_t rtp_timestamp,
|
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
|
||||||
const int16_t* audio,
|
const int16_t* audio,
|
||||||
size_t max_encoded_bytes,
|
size_t max_encoded_bytes,
|
||||||
uint8_t* encoded,
|
uint8_t* encoded) override;
|
||||||
EncodedInfo* info) override;
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
size_t RequiredOutputSizeBytes() const;
|
size_t RequiredOutputSizeBytes() const;
|
||||||
|
@ -84,11 +84,10 @@ class AudioEncoderDecoderIsacT : public AudioEncoder, public AudioDecoder {
|
|||||||
|
|
||||||
protected:
|
protected:
|
||||||
// AudioEncoder protected method.
|
// AudioEncoder protected method.
|
||||||
void EncodeInternal(uint32_t rtp_timestamp,
|
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
|
||||||
const int16_t* audio,
|
const int16_t* audio,
|
||||||
size_t max_encoded_bytes,
|
size_t max_encoded_bytes,
|
||||||
uint8_t* encoded,
|
uint8_t* encoded) override;
|
||||||
EncodedInfo* info) override;
|
|
||||||
|
|
||||||
// AudioDecoder protected method.
|
// AudioDecoder protected method.
|
||||||
int DecodeInternal(const uint8_t* encoded,
|
int DecodeInternal(const uint8_t* encoded,
|
||||||
|
@ -184,11 +184,11 @@ int AudioEncoderDecoderIsacT<T>::Max10MsFramesInAPacket() const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void AudioEncoderDecoderIsacT<T>::EncodeInternal(uint32_t rtp_timestamp,
|
AudioEncoder::EncodedInfo AudioEncoderDecoderIsacT<T>::EncodeInternal(
|
||||||
const int16_t* audio,
|
uint32_t rtp_timestamp,
|
||||||
size_t max_encoded_bytes,
|
const int16_t* audio,
|
||||||
uint8_t* encoded,
|
size_t max_encoded_bytes,
|
||||||
EncodedInfo* info) {
|
uint8_t* encoded) {
|
||||||
CriticalSectionScoped cs_lock(lock_.get());
|
CriticalSectionScoped cs_lock(lock_.get());
|
||||||
if (!packet_in_progress_) {
|
if (!packet_in_progress_) {
|
||||||
// Starting a new packet; remember the timestamp for later.
|
// Starting a new packet; remember the timestamp for later.
|
||||||
@ -206,15 +206,17 @@ void AudioEncoderDecoderIsacT<T>::EncodeInternal(uint32_t rtp_timestamp,
|
|||||||
// buffer. All we can do is check for an overrun after the fact.
|
// buffer. All we can do is check for an overrun after the fact.
|
||||||
CHECK(static_cast<size_t>(r) <= max_encoded_bytes);
|
CHECK(static_cast<size_t>(r) <= max_encoded_bytes);
|
||||||
|
|
||||||
info->encoded_bytes = r;
|
|
||||||
if (r == 0)
|
if (r == 0)
|
||||||
return;
|
return kZeroEncodedBytes;
|
||||||
|
|
||||||
// Got enough input to produce a packet. Return the saved timestamp from
|
// Got enough input to produce a packet. Return the saved timestamp from
|
||||||
// the first chunk of input that went into the packet.
|
// the first chunk of input that went into the packet.
|
||||||
packet_in_progress_ = false;
|
packet_in_progress_ = false;
|
||||||
info->encoded_timestamp = packet_timestamp_;
|
EncodedInfo info;
|
||||||
info->payload_type = payload_type_;
|
info.encoded_bytes = r;
|
||||||
|
info.encoded_timestamp = packet_timestamp_;
|
||||||
|
info.payload_type = payload_type_;
|
||||||
|
return info;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
|
@ -29,12 +29,11 @@ class MockAudioEncoder : public AudioEncoder {
|
|||||||
MOCK_METHOD1(SetTargetBitrate, void(int));
|
MOCK_METHOD1(SetTargetBitrate, void(int));
|
||||||
MOCK_METHOD1(SetProjectedPacketLossRate, void(double));
|
MOCK_METHOD1(SetProjectedPacketLossRate, void(double));
|
||||||
// Note, we explicitly chose not to create a mock for the Encode method.
|
// Note, we explicitly chose not to create a mock for the Encode method.
|
||||||
MOCK_METHOD5(EncodeInternal,
|
MOCK_METHOD4(EncodeInternal,
|
||||||
void(uint32_t timestamp,
|
EncodedInfo(uint32_t timestamp,
|
||||||
const int16_t* audio,
|
const int16_t* audio,
|
||||||
size_t max_encoded_bytes,
|
size_t max_encoded_bytes,
|
||||||
uint8_t* encoded,
|
uint8_t* encoded));
|
||||||
EncodedInfo* info));
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
@ -183,19 +183,18 @@ void AudioEncoderOpus::SetProjectedPacketLossRate(double fraction) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioEncoderOpus::EncodeInternal(uint32_t rtp_timestamp,
|
AudioEncoder::EncodedInfo AudioEncoderOpus::EncodeInternal(
|
||||||
const int16_t* audio,
|
uint32_t rtp_timestamp,
|
||||||
size_t max_encoded_bytes,
|
const int16_t* audio,
|
||||||
uint8_t* encoded,
|
size_t max_encoded_bytes,
|
||||||
EncodedInfo* info) {
|
uint8_t* encoded) {
|
||||||
if (input_buffer_.empty())
|
if (input_buffer_.empty())
|
||||||
first_timestamp_in_buffer_ = rtp_timestamp;
|
first_timestamp_in_buffer_ = rtp_timestamp;
|
||||||
input_buffer_.insert(input_buffer_.end(), audio,
|
input_buffer_.insert(input_buffer_.end(), audio,
|
||||||
audio + samples_per_10ms_frame_);
|
audio + samples_per_10ms_frame_);
|
||||||
if (input_buffer_.size() < (static_cast<size_t>(num_10ms_frames_per_packet_) *
|
if (input_buffer_.size() < (static_cast<size_t>(num_10ms_frames_per_packet_) *
|
||||||
samples_per_10ms_frame_)) {
|
samples_per_10ms_frame_)) {
|
||||||
info->encoded_bytes = 0;
|
return kZeroEncodedBytes;
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
CHECK_EQ(input_buffer_.size(),
|
CHECK_EQ(input_buffer_.size(),
|
||||||
static_cast<size_t>(num_10ms_frames_per_packet_) *
|
static_cast<size_t>(num_10ms_frames_per_packet_) *
|
||||||
@ -207,12 +206,13 @@ void AudioEncoderOpus::EncodeInternal(uint32_t rtp_timestamp,
|
|||||||
ClampInt16(max_encoded_bytes), encoded);
|
ClampInt16(max_encoded_bytes), encoded);
|
||||||
CHECK_GE(r, 0); // Fails only if fed invalid data.
|
CHECK_GE(r, 0); // Fails only if fed invalid data.
|
||||||
input_buffer_.clear();
|
input_buffer_.clear();
|
||||||
info->encoded_bytes = r;
|
EncodedInfo info;
|
||||||
info->encoded_timestamp = first_timestamp_in_buffer_;
|
info.encoded_bytes = r;
|
||||||
info->payload_type = payload_type_;
|
info.encoded_timestamp = first_timestamp_in_buffer_;
|
||||||
// Allows Opus to send empty packets.
|
info.payload_type = payload_type_;
|
||||||
info->send_even_if_empty = true;
|
info.send_even_if_empty = true; // Allows Opus to send empty packets.
|
||||||
info->speech = r > 0;
|
info.speech = r > 0;
|
||||||
|
return info;
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
@ -58,11 +58,10 @@ class AudioEncoderOpus final : public AudioEncoder {
|
|||||||
bool dtx_enabled() const { return dtx_enabled_; }
|
bool dtx_enabled() const { return dtx_enabled_; }
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
void EncodeInternal(uint32_t rtp_timestamp,
|
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
|
||||||
const int16_t* audio,
|
const int16_t* audio,
|
||||||
size_t max_encoded_bytes,
|
size_t max_encoded_bytes,
|
||||||
uint8_t* encoded,
|
uint8_t* encoded) override;
|
||||||
EncodedInfo* info) override;
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const int num_10ms_frames_per_packet_;
|
const int num_10ms_frames_per_packet_;
|
||||||
|
@ -60,48 +60,48 @@ void AudioEncoderCopyRed::SetProjectedPacketLossRate(double fraction) {
|
|||||||
speech_encoder_->SetProjectedPacketLossRate(fraction);
|
speech_encoder_->SetProjectedPacketLossRate(fraction);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioEncoderCopyRed::EncodeInternal(uint32_t rtp_timestamp,
|
AudioEncoder::EncodedInfo AudioEncoderCopyRed::EncodeInternal(
|
||||||
const int16_t* audio,
|
uint32_t rtp_timestamp,
|
||||||
size_t max_encoded_bytes,
|
const int16_t* audio,
|
||||||
uint8_t* encoded,
|
size_t max_encoded_bytes,
|
||||||
EncodedInfo* info) {
|
uint8_t* encoded) {
|
||||||
speech_encoder_->Encode(rtp_timestamp, audio,
|
EncodedInfo info = speech_encoder_->Encode(
|
||||||
static_cast<size_t>(SampleRateHz() / 100),
|
rtp_timestamp, audio, static_cast<size_t>(SampleRateHz() / 100),
|
||||||
max_encoded_bytes, encoded, info);
|
max_encoded_bytes, encoded);
|
||||||
CHECK_GE(max_encoded_bytes,
|
CHECK_GE(max_encoded_bytes,
|
||||||
info->encoded_bytes + secondary_info_.encoded_bytes);
|
info.encoded_bytes + secondary_info_.encoded_bytes);
|
||||||
CHECK(info->redundant.empty()) << "Cannot use nested redundant encoders.";
|
CHECK(info.redundant.empty()) << "Cannot use nested redundant encoders.";
|
||||||
|
|
||||||
if (info->encoded_bytes > 0) {
|
if (info.encoded_bytes > 0) {
|
||||||
// |info| will be implicitly cast to an EncodedInfoLeaf struct, effectively
|
// |info| will be implicitly cast to an EncodedInfoLeaf struct, effectively
|
||||||
// discarding the (empty) vector of redundant information. This is
|
// discarding the (empty) vector of redundant information. This is
|
||||||
// intentional.
|
// intentional.
|
||||||
info->redundant.push_back(*info);
|
info.redundant.push_back(info);
|
||||||
DCHECK_EQ(info->redundant.size(), 1u);
|
DCHECK_EQ(info.redundant.size(), 1u);
|
||||||
if (secondary_info_.encoded_bytes > 0) {
|
if (secondary_info_.encoded_bytes > 0) {
|
||||||
memcpy(&encoded[info->encoded_bytes], secondary_encoded_.get(),
|
memcpy(&encoded[info.encoded_bytes], secondary_encoded_.get(),
|
||||||
secondary_info_.encoded_bytes);
|
secondary_info_.encoded_bytes);
|
||||||
info->redundant.push_back(secondary_info_);
|
info.redundant.push_back(secondary_info_);
|
||||||
DCHECK_EQ(info->redundant.size(), 2u);
|
DCHECK_EQ(info.redundant.size(), 2u);
|
||||||
}
|
}
|
||||||
// Save primary to secondary.
|
// Save primary to secondary.
|
||||||
if (secondary_allocated_ < info->encoded_bytes) {
|
if (secondary_allocated_ < info.encoded_bytes) {
|
||||||
secondary_encoded_.reset(new uint8_t[info->encoded_bytes]);
|
secondary_encoded_.reset(new uint8_t[info.encoded_bytes]);
|
||||||
secondary_allocated_ = info->encoded_bytes;
|
secondary_allocated_ = info.encoded_bytes;
|
||||||
}
|
}
|
||||||
CHECK(secondary_encoded_);
|
CHECK(secondary_encoded_);
|
||||||
memcpy(secondary_encoded_.get(), encoded, info->encoded_bytes);
|
memcpy(secondary_encoded_.get(), encoded, info.encoded_bytes);
|
||||||
secondary_info_ = *info;
|
secondary_info_ = info;
|
||||||
DCHECK_EQ(info->speech, info->redundant[0].speech);
|
DCHECK_EQ(info.speech, info.redundant[0].speech);
|
||||||
}
|
}
|
||||||
// Update main EncodedInfo.
|
// Update main EncodedInfo.
|
||||||
info->payload_type = red_payload_type_;
|
info.payload_type = red_payload_type_;
|
||||||
info->encoded_bytes = 0;
|
info.encoded_bytes = 0;
|
||||||
for (std::vector<EncodedInfoLeaf>::const_iterator it =
|
for (std::vector<EncodedInfoLeaf>::const_iterator it = info.redundant.begin();
|
||||||
info->redundant.begin();
|
it != info.redundant.end(); ++it) {
|
||||||
it != info->redundant.end(); ++it) {
|
info.encoded_bytes += it->encoded_bytes;
|
||||||
info->encoded_bytes += it->encoded_bytes;
|
|
||||||
}
|
}
|
||||||
|
return info;
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
@ -45,11 +45,10 @@ class AudioEncoderCopyRed : public AudioEncoder {
|
|||||||
void SetProjectedPacketLossRate(double fraction) override;
|
void SetProjectedPacketLossRate(double fraction) override;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
void EncodeInternal(uint32_t rtp_timestamp,
|
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
|
||||||
const int16_t* audio,
|
const int16_t* audio,
|
||||||
size_t max_encoded_bytes,
|
size_t max_encoded_bytes,
|
||||||
uint8_t* encoded,
|
uint8_t* encoded) override;
|
||||||
EncodedInfo* info) override;
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
AudioEncoder* speech_encoder_;
|
AudioEncoder* speech_encoder_;
|
||||||
|
@ -60,9 +60,8 @@ class AudioEncoderCopyRedTest : public ::testing::Test {
|
|||||||
|
|
||||||
void Encode() {
|
void Encode() {
|
||||||
ASSERT_TRUE(red_.get() != NULL);
|
ASSERT_TRUE(red_.get() != NULL);
|
||||||
encoded_info_ = AudioEncoder::EncodedInfo();
|
encoded_info_ = red_->Encode(timestamp_, audio_, num_audio_samples_10ms,
|
||||||
red_->Encode(timestamp_, audio_, num_audio_samples_10ms,
|
encoded_.size(), &encoded_[0]);
|
||||||
encoded_.size(), &encoded_[0], &encoded_info_);
|
|
||||||
timestamp_ += num_audio_samples_10ms;
|
timestamp_ += num_audio_samples_10ms;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -83,18 +82,16 @@ class MockEncodeHelper {
|
|||||||
memset(&info_, 0, sizeof(info_));
|
memset(&info_, 0, sizeof(info_));
|
||||||
}
|
}
|
||||||
|
|
||||||
void Encode(uint32_t timestamp,
|
AudioEncoder::EncodedInfo Encode(uint32_t timestamp,
|
||||||
const int16_t* audio,
|
const int16_t* audio,
|
||||||
size_t max_encoded_bytes,
|
size_t max_encoded_bytes,
|
||||||
uint8_t* encoded,
|
uint8_t* encoded) {
|
||||||
AudioEncoder::EncodedInfo* info) {
|
|
||||||
if (write_payload_) {
|
if (write_payload_) {
|
||||||
CHECK(encoded);
|
CHECK(encoded);
|
||||||
CHECK_LE(info_.encoded_bytes, max_encoded_bytes);
|
CHECK_LE(info_.encoded_bytes, max_encoded_bytes);
|
||||||
memcpy(encoded, payload_, info_.encoded_bytes);
|
memcpy(encoded, payload_, info_.encoded_bytes);
|
||||||
}
|
}
|
||||||
CHECK(info);
|
return info_;
|
||||||
*info = info_;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
AudioEncoder::EncodedInfo info_;
|
AudioEncoder::EncodedInfo info_;
|
||||||
@ -144,7 +141,8 @@ TEST_F(AudioEncoderCopyRedTest, CheckImmediateEncode) {
|
|||||||
InSequence s;
|
InSequence s;
|
||||||
MockFunction<void(int check_point_id)> check;
|
MockFunction<void(int check_point_id)> check;
|
||||||
for (int i = 1; i <= 6; ++i) {
|
for (int i = 1; i <= 6; ++i) {
|
||||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _));
|
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||||
|
.WillRepeatedly(Return(AudioEncoder::kZeroEncodedBytes));
|
||||||
EXPECT_CALL(check, Call(i));
|
EXPECT_CALL(check, Call(i));
|
||||||
Encode();
|
Encode();
|
||||||
check.Call(i);
|
check.Call(i);
|
||||||
@ -153,13 +151,13 @@ TEST_F(AudioEncoderCopyRedTest, CheckImmediateEncode) {
|
|||||||
|
|
||||||
// Checks that no output is produced if the underlying codec doesn't emit any
|
// Checks that no output is produced if the underlying codec doesn't emit any
|
||||||
// new data, even if the RED codec is loaded with a secondary encoding.
|
// new data, even if the RED codec is loaded with a secondary encoding.
|
||||||
TEST_F(AudioEncoderCopyRedTest, CheckNoOuput) {
|
TEST_F(AudioEncoderCopyRedTest, CheckNoOutput) {
|
||||||
// Start with one Encode() call that will produce output.
|
// Start with one Encode() call that will produce output.
|
||||||
static const size_t kEncodedSize = 17;
|
static const size_t kEncodedSize = 17;
|
||||||
AudioEncoder::EncodedInfo info;
|
AudioEncoder::EncodedInfo nonZeroEncodedBytes;
|
||||||
info.encoded_bytes = kEncodedSize;
|
nonZeroEncodedBytes.encoded_bytes = kEncodedSize;
|
||||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||||
.WillOnce(SetArgPointee<4>(info));
|
.WillOnce(Return(nonZeroEncodedBytes));
|
||||||
Encode();
|
Encode();
|
||||||
// First call is a special case, since it does not include a secondary
|
// First call is a special case, since it does not include a secondary
|
||||||
// payload.
|
// payload.
|
||||||
@ -167,16 +165,14 @@ TEST_F(AudioEncoderCopyRedTest, CheckNoOuput) {
|
|||||||
EXPECT_EQ(kEncodedSize, encoded_info_.encoded_bytes);
|
EXPECT_EQ(kEncodedSize, encoded_info_.encoded_bytes);
|
||||||
|
|
||||||
// Next call to the speech encoder will not produce any output.
|
// Next call to the speech encoder will not produce any output.
|
||||||
info.encoded_bytes = 0;
|
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
.WillOnce(Return(AudioEncoder::kZeroEncodedBytes));
|
||||||
.WillOnce(SetArgPointee<4>(info));
|
|
||||||
Encode();
|
Encode();
|
||||||
EXPECT_EQ(0u, encoded_info_.encoded_bytes);
|
EXPECT_EQ(0u, encoded_info_.encoded_bytes);
|
||||||
|
|
||||||
// Final call to the speech encoder will produce output.
|
// Final call to the speech encoder will produce output.
|
||||||
info.encoded_bytes = kEncodedSize;
|
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
.WillOnce(Return(nonZeroEncodedBytes));
|
||||||
.WillOnce(SetArgPointee<4>(info));
|
|
||||||
Encode();
|
Encode();
|
||||||
EXPECT_EQ(2 * kEncodedSize, encoded_info_.encoded_bytes);
|
EXPECT_EQ(2 * kEncodedSize, encoded_info_.encoded_bytes);
|
||||||
ASSERT_EQ(2u, encoded_info_.redundant.size());
|
ASSERT_EQ(2u, encoded_info_.redundant.size());
|
||||||
@ -192,8 +188,8 @@ TEST_F(AudioEncoderCopyRedTest, CheckPayloadSizes) {
|
|||||||
for (int encode_size = 1; encode_size <= kNumPackets; ++encode_size) {
|
for (int encode_size = 1; encode_size <= kNumPackets; ++encode_size) {
|
||||||
AudioEncoder::EncodedInfo info;
|
AudioEncoder::EncodedInfo info;
|
||||||
info.encoded_bytes = encode_size;
|
info.encoded_bytes = encode_size;
|
||||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||||
.WillOnce(SetArgPointee<4>(info));
|
.WillOnce(Return(info));
|
||||||
}
|
}
|
||||||
|
|
||||||
// First call is a special case, since it does not include a secondary
|
// First call is a special case, since it does not include a secondary
|
||||||
@ -218,7 +214,7 @@ TEST_F(AudioEncoderCopyRedTest, CheckTimestamps) {
|
|||||||
helper.info_.encoded_bytes = 17;
|
helper.info_.encoded_bytes = 17;
|
||||||
helper.info_.encoded_timestamp = timestamp_;
|
helper.info_.encoded_timestamp = timestamp_;
|
||||||
uint32_t primary_timestamp = timestamp_;
|
uint32_t primary_timestamp = timestamp_;
|
||||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||||
.WillRepeatedly(Invoke(&helper, &MockEncodeHelper::Encode));
|
.WillRepeatedly(Invoke(&helper, &MockEncodeHelper::Encode));
|
||||||
|
|
||||||
// First call is a special case, since it does not include a secondary
|
// First call is a special case, since it does not include a secondary
|
||||||
@ -249,7 +245,7 @@ TEST_F(AudioEncoderCopyRedTest, CheckPayloads) {
|
|||||||
payload[i] = i;
|
payload[i] = i;
|
||||||
}
|
}
|
||||||
helper.payload_ = payload;
|
helper.payload_ = payload;
|
||||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||||
.WillRepeatedly(Invoke(&helper, &MockEncodeHelper::Encode));
|
.WillRepeatedly(Invoke(&helper, &MockEncodeHelper::Encode));
|
||||||
|
|
||||||
// First call is a special case, since it does not include a secondary
|
// First call is a special case, since it does not include a secondary
|
||||||
@ -286,7 +282,7 @@ TEST_F(AudioEncoderCopyRedTest, CheckPayloadType) {
|
|||||||
helper.info_.encoded_bytes = 17;
|
helper.info_.encoded_bytes = 17;
|
||||||
const int primary_payload_type = red_payload_type_ + 1;
|
const int primary_payload_type = red_payload_type_ + 1;
|
||||||
helper.info_.payload_type = primary_payload_type;
|
helper.info_.payload_type = primary_payload_type;
|
||||||
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
|
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
|
||||||
.WillRepeatedly(Invoke(&helper, &MockEncodeHelper::Encode));
|
.WillRepeatedly(Invoke(&helper, &MockEncodeHelper::Encode));
|
||||||
|
|
||||||
// First call is a special case, since it does not include a secondary
|
// First call is a special case, since it does not include a secondary
|
||||||
|
@ -234,8 +234,8 @@ void ACMGenericCodec::Encode(uint32_t input_timestamp,
|
|||||||
first_frame_ = false;
|
first_frame_ = false;
|
||||||
CHECK_EQ(audio_channel, encoder_->NumChannels());
|
CHECK_EQ(audio_channel, encoder_->NumChannels());
|
||||||
|
|
||||||
encoder_->Encode(rtp_timestamp_, audio, length_per_channel,
|
*encoded_info = encoder_->Encode(rtp_timestamp_, audio, length_per_channel,
|
||||||
2 * MAX_PAYLOAD_SIZE_BYTE, bitstream, encoded_info);
|
2 * MAX_PAYLOAD_SIZE_BYTE, bitstream);
|
||||||
*bitstream_len_byte = static_cast<int16_t>(encoded_info->encoded_bytes);
|
*bitstream_len_byte = static_cast<int16_t>(encoded_info->encoded_bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -150,9 +150,9 @@ class AudioDecoderTest : public ::testing::Test {
|
|||||||
samples_per_10ms, channels_,
|
samples_per_10ms, channels_,
|
||||||
interleaved_input.get());
|
interleaved_input.get());
|
||||||
|
|
||||||
audio_encoder_->Encode(0, interleaved_input.get(),
|
encoded_info_ = audio_encoder_->Encode(
|
||||||
audio_encoder_->SampleRateHz() / 100,
|
0, interleaved_input.get(), audio_encoder_->SampleRateHz() / 100,
|
||||||
data_length_ * 2, output, &encoded_info_);
|
data_length_ * 2, output);
|
||||||
}
|
}
|
||||||
EXPECT_EQ(payload_type_, encoded_info_.payload_type);
|
EXPECT_EQ(payload_type_, encoded_info_.payload_type);
|
||||||
return static_cast<int>(encoded_info_.encoded_bytes);
|
return static_cast<int>(encoded_info_.encoded_bytes);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user