Remove WebRtcACMEncodingType

The parameter was not needed; it was sufficient with a bool indicating
speech or not speech. This change propagates to the InFrameType
callback function. Some tests are updated too.

COAUTHOR=kwiberg@webrtc.org
R=minyue@webrtc.org
TBR=henrika@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/42209004

Cr-Commit-Position: refs/heads/master@{#8626}
git-svn-id: http://webrtc.googlecode.com/svn/trunk@8626 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
henrik.lundin@webrtc.org 2015-03-06 07:50:34 +00:00
parent 84f5309dbd
commit e9217b4bdb
12 changed files with 92 additions and 235 deletions

View File

@ -49,25 +49,6 @@ const int kIsacPacSize480 = 480;
const int kIsacPacSize960 = 960;
const int kIsacPacSize1440 = 1440;
// An encoded bit-stream is labeled by one of the following enumerators.
//
// kNoEncoding : There has been no encoding.
// kActiveNormalEncoded : Active audio frame coded by the codec.
// kPassiveNormalEncoded : Passive audio frame coded by the codec.
// kPassiveDTXNB : Passive audio frame coded by narrow-band CN.
// kPassiveDTXWB : Passive audio frame coded by wide-band CN.
// kPassiveDTXSWB : Passive audio frame coded by super-wide-band CN.
// kPassiveDTXFB : Passive audio frame coded by full-band CN.
enum WebRtcACMEncodingType {
kNoEncoding,
kActiveNormalEncoded,
kPassiveNormalEncoded,
kPassiveDTXNB,
kPassiveDTXWB,
kPassiveDTXSWB,
kPassiveDTXFB
};
// A structure which contains codec parameters. For instance, used when
// initializing encoder and decoder.
//

View File

@ -36,47 +36,27 @@ namespace webrtc {
namespace {
static const int kInvalidPayloadType = 255;
std::map<int, std::pair<int, WebRtcACMEncodingType>>::iterator
FindSampleRateInMap(
std::map<int, std::pair<int, WebRtcACMEncodingType>>* cng_pt_map,
int sample_rate_hz) {
std::map<int, int>::iterator FindSampleRateInMap(std::map<int, int>* cng_pt_map,
int sample_rate_hz) {
return find_if(cng_pt_map->begin(), cng_pt_map->end(),
[sample_rate_hz](decltype(*cng_pt_map->begin()) p) {
return p.second.first == sample_rate_hz;
});
return p.second == sample_rate_hz;
});
}
void SetCngPtInMap(
std::map<int, std::pair<int, WebRtcACMEncodingType>>* cng_pt_map,
int sample_rate_hz,
int payload_type) {
void SetCngPtInMap(std::map<int, int>* cng_pt_map,
int sample_rate_hz,
int payload_type) {
if (payload_type == kInvalidPayloadType)
return;
CHECK_GE(payload_type, 0);
CHECK_LT(payload_type, 128);
WebRtcACMEncodingType encoding_type;
switch (sample_rate_hz) {
case 8000:
encoding_type = kPassiveDTXNB;
break;
case 16000:
encoding_type = kPassiveDTXWB;
break;
case 32000:
encoding_type = kPassiveDTXSWB;
break;
case 48000:
encoding_type = kPassiveDTXFB;
break;
default:
FATAL() << "Unsupported frequency.";
}
auto pt_iter = FindSampleRateInMap(cng_pt_map, sample_rate_hz);
if (pt_iter != cng_pt_map->end()) {
// Remove item in map with sample_rate_hz.
cng_pt_map->erase(pt_iter);
}
(*cng_pt_map)[payload_type] = std::make_pair(sample_rate_hz, encoding_type);
(*cng_pt_map)[payload_type] = sample_rate_hz;
}
} // namespace
@ -230,14 +210,13 @@ CNG_dec_inst* AudioDecoderProxy::CngDecoderInstance() {
return decoder_->CngDecoderInstance();
}
int16_t ACMGenericCodec::Encode(uint32_t input_timestamp,
const int16_t* audio,
uint16_t length_per_channel,
uint8_t audio_channel,
uint8_t* bitstream,
int16_t* bitstream_len_byte,
WebRtcACMEncodingType* encoding_type,
AudioEncoder::EncodedInfo* encoded_info) {
void ACMGenericCodec::Encode(uint32_t input_timestamp,
const int16_t* audio,
uint16_t length_per_channel,
uint8_t audio_channel,
uint8_t* bitstream,
int16_t* bitstream_len_byte,
AudioEncoder::EncodedInfo* encoded_info) {
WriteLockScoped wl(codec_wrapper_lock_);
CHECK_EQ(length_per_channel, encoder_->SampleRateHz() / 100);
rtp_timestamp_ = first_frame_
@ -256,26 +235,6 @@ int16_t ACMGenericCodec::Encode(uint32_t input_timestamp,
encoder_->Encode(rtp_timestamp_, audio, length_per_channel,
2 * MAX_PAYLOAD_SIZE_BYTE, bitstream, encoded_info);
*bitstream_len_byte = static_cast<int16_t>(encoded_info->encoded_bytes);
if (encoded_info->encoded_bytes == 0) {
*encoding_type = kNoEncoding;
if (encoded_info->send_even_if_empty) {
bitstream[0] = 0;
return 1;
}
return 0;
}
int payload_type = encoded_info->payload_type;
if (!encoded_info->redundant.empty())
payload_type = encoded_info->redundant[0].payload_type;
auto cng_iter = cng_pt_.find(payload_type);
if (cng_iter == cng_pt_.end()) {
*encoding_type = kActiveNormalEncoded;
} else {
*encoding_type = cng_iter->second.second;
}
return *bitstream_len_byte;
}
int16_t ACMGenericCodec::EncoderParams(WebRtcACMCodecParams* enc_params) {

View File

@ -122,38 +122,15 @@ class ACMGenericCodec {
// -timestamp : contains the RTP timestamp, this is the
// sampling time of the first sample encoded
// (measured in number of samples).
// -encoding_type : contains the type of encoding applied on the
// audio samples. The alternatives are
// (c.f. acm_common_types.h)
// -kNoEncoding:
// there was not enough data to encode. or
// some error has happened that we could
// not do encoding.
// -kActiveNormalEncoded:
// the audio frame is active and encoded by
// the given codec.
// -kPassiveNormalEncoded:
// the audio frame is passive but coded with
// the given codec (NO DTX).
// -kPassiveDTXWB:
// The audio frame is passive and used
// wide-band CN to encode.
// -kPassiveDTXNB:
// The audio frame is passive and used
// narrow-band CN to encode.
//
// Return value:
// -1 if error is occurred, otherwise the length of the bit-stream in
// bytes.
//
int16_t Encode(uint32_t input_timestamp,
const int16_t* audio,
uint16_t length_per_channel,
uint8_t audio_channel,
uint8_t* bitstream,
int16_t* bitstream_len_byte,
WebRtcACMEncodingType* encoding_type,
AudioEncoder::EncodedInfo* encoded_info);
void Encode(uint32_t input_timestamp,
const int16_t* audio,
uint16_t length_per_channel,
uint8_t audio_channel,
uint8_t* bitstream,
int16_t* bitstream_len_byte,
AudioEncoder::EncodedInfo* encoded_info);
///////////////////////////////////////////////////////////////////////////
// bool EncoderInitialized();
@ -487,9 +464,8 @@ class ACMGenericCodec {
bool first_frame_ GUARDED_BY(codec_wrapper_lock_);
uint32_t rtp_timestamp_ GUARDED_BY(codec_wrapper_lock_);
uint32_t last_rtp_timestamp_ GUARDED_BY(codec_wrapper_lock_);
// Map from payload type to sample rate (Hz) and encoding type.
std::map<int, std::pair<int, WebRtcACMEncodingType>> cng_pt_
GUARDED_BY(codec_wrapper_lock_);
// Map from payload type to CNG sample rate (Hz).
std::map<int, int> cng_pt_ GUARDED_BY(codec_wrapper_lock_);
int red_payload_type_ GUARDED_BY(codec_wrapper_lock_);
OpusApplicationMode opus_application_ GUARDED_BY(codec_wrapper_lock_);
bool opus_application_set_ GUARDED_BY(codec_wrapper_lock_);

View File

@ -39,21 +39,16 @@ class AcmGenericCodecTest : public ::testing::Test {
ASSERT_EQ(0, codec_->InitEncoder(&acm_codec_params_, true));
}
void EncodeAndVerify(int expected_return_val,
size_t expected_out_length,
void EncodeAndVerify(size_t expected_out_length,
uint32_t expected_timestamp,
WebRtcACMEncodingType expected_encoding_type,
int expected_payload_type,
int expected_send_even_if_empty) {
uint8_t out[kDataLengthSamples];
int16_t out_length;
WebRtcACMEncodingType encoding_type;
AudioEncoder::EncodedInfo encoded_info;
EXPECT_EQ(expected_return_val,
codec_->Encode(timestamp_, kZeroData, kDataLengthSamples, 1, out,
&out_length,&encoding_type, &encoded_info));
codec_->Encode(timestamp_, kZeroData, kDataLengthSamples, 1, out,
&out_length, &encoded_info);
timestamp_ += kDataLengthSamples;
EXPECT_EQ(expected_encoding_type, encoding_type);
EXPECT_TRUE(encoded_info.redundant.empty());
EXPECT_EQ(expected_out_length, encoded_info.encoded_bytes);
EXPECT_EQ(expected_out_length, rtc::checked_cast<size_t>(out_length));
@ -75,35 +70,36 @@ class AcmGenericCodecTest : public ::testing::Test {
// (which is signaled as 0 bytes output of type kNoEncoding). The next encode
// call should produce one SID frame of 9 bytes. The third call should not
// result in any output (just like the first one). The fourth and final encode
// call should produce an "empty frame", which is like no output, but with the
// return value from the encode function being 1. (The reason to produce an
// empty frame is to drive sending of DTMF packets in the RTP/RTCP module.)
// call should produce an "empty frame", which is like no output, but with
// AudioEncoder::EncodedInfo::send_even_if_empty set to true. (The reason to
// produce an empty frame is to drive sending of DTMF packets in the RTP/RTCP
// module.)
TEST_F(AcmGenericCodecTest, VerifyCngFrames) {
CreateCodec();
uint32_t expected_timestamp = timestamp_;
// Verify no frame.
{
SCOPED_TRACE("First encoding");
EncodeAndVerify(0, 0, expected_timestamp, kNoEncoding, -1, -1);
EncodeAndVerify(0, expected_timestamp, -1, -1);
}
// Verify SID frame delivered.
{
SCOPED_TRACE("Second encoding");
EncodeAndVerify(9, 9, expected_timestamp, kPassiveDTXNB, kCngPt, 1);
EncodeAndVerify(9, expected_timestamp, kCngPt, 1);
}
// Verify no frame.
{
SCOPED_TRACE("Third encoding");
EncodeAndVerify(0, 0, expected_timestamp, kNoEncoding, -1, -1);
EncodeAndVerify(0, expected_timestamp, -1, -1);
}
// Verify NoEncoding.
expected_timestamp += 2 * kDataLengthSamples;
{
SCOPED_TRACE("Fourth encoding");
EncodeAndVerify(1, 0, expected_timestamp, kNoEncoding, kCngPt, 1);
EncodeAndVerify(0, expected_timestamp, kCngPt, 1);
}
}

View File

@ -223,8 +223,6 @@ int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
// TODO(turajs): |length_bytes| & |red_length_bytes| can be of type int if
// ACMGenericCodec::Encode() & ACMGenericCodec::GetRedPayload() allows.
int16_t length_bytes = 2 * MAX_PAYLOAD_SIZE_BYTE;
int status;
WebRtcACMEncodingType encoding_type;
FrameType frame_type = kAudioFrameSpeech;
uint8_t current_payload_type = 0;
bool has_data_to_send = false;
@ -238,52 +236,24 @@ int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
if (!HaveValidEncoder("Process")) {
return -1;
}
status = codecs_[current_send_codec_idx_]->Encode(
codecs_[current_send_codec_idx_]->Encode(
input_data.input_timestamp, input_data.audio,
input_data.length_per_channel, input_data.audio_channel, stream,
&length_bytes, &encoding_type, &encoded_info);
if (status < 0) {
// Encode failed.
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
"Process(): Encoding Failed");
length_bytes = 0;
return -1;
} else if (status == 0) {
&length_bytes, &encoded_info);
if (encoded_info.encoded_bytes == 0 && !encoded_info.send_even_if_empty) {
// Not enough data.
return 0;
} else {
switch (encoding_type) {
case kNoEncoding: {
current_payload_type = previous_pltype_;
frame_type = kFrameEmpty;
length_bytes = 0;
break;
}
case kActiveNormalEncoded:
case kPassiveNormalEncoded: {
frame_type = kAudioFrameSpeech;
break;
}
case kPassiveDTXNB: {
frame_type = kAudioFrameCN;
break;
}
case kPassiveDTXWB: {
frame_type = kAudioFrameCN;
break;
}
case kPassiveDTXSWB: {
frame_type = kAudioFrameCN;
break;
}
case kPassiveDTXFB: {
frame_type = kAudioFrameCN;
break;
}
if (encoded_info.encoded_bytes == 0 && encoded_info.send_even_if_empty) {
frame_type = kFrameEmpty;
current_payload_type = previous_pltype_;
} else {
DCHECK_GT(encoded_info.encoded_bytes, 0u);
frame_type = encoded_info.speech ? kAudioFrameSpeech : kAudioFrameCN;
current_payload_type = encoded_info.payload_type;
previous_pltype_ = current_payload_type;
}
has_data_to_send = true;
current_payload_type = encoded_info.payload_type;
previous_pltype_ = current_payload_type;
ConvertEncodedInfoToFragmentationHeader(encoded_info, &my_fragmentation);
}
@ -308,7 +278,7 @@ int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
if (vad_callback_ != NULL) {
// Callback with VAD decision.
vad_callback_->InFrameType(static_cast<int16_t>(encoding_type));
vad_callback_->InFrameType(frame_type);
}
}
return length_bytes;

View File

@ -58,7 +58,7 @@ class ACMVADCallback {
public:
virtual ~ACMVADCallback() {}
virtual int32_t InFrameType(int16_t frameType) = 0;
virtual int32_t InFrameType(FrameType frame_type) = 0;
};
// Callback class used for reporting receiver statistics

View File

@ -37,21 +37,18 @@ ActivityMonitor::ActivityMonitor() {
ResetStatistics();
}
int32_t ActivityMonitor::InFrameType(int16_t frame_type) {
int32_t ActivityMonitor::InFrameType(FrameType frame_type) {
counter_[frame_type]++;
return 0;
}
void ActivityMonitor::PrintStatistics() {
printf("\n");
printf("kActiveNormalEncoded kPassiveNormalEncoded kPassiveDTXWB ");
printf("kPassiveDTXNB kPassiveDTXSWB kNoEncoding\n");
printf("%19u", counter_[1]);
printf("%22u", counter_[2]);
printf("%14u", counter_[3]);
printf("%14u", counter_[4]);
printf("%14u", counter_[5]);
printf("%11u", counter_[0]);
printf("kFrameEmpty %u\n", counter_[kFrameEmpty]);
printf("kAudioFrameSpeech %u\n", counter_[kAudioFrameSpeech]);
printf("kAudioFrameCN %u\n", counter_[kAudioFrameCN]);
printf("kVideoFrameKey %u\n", counter_[kVideoFrameKey]);
printf("kVideoFrameDelta %u\n", counter_[kVideoFrameDelta]);
printf("\n\n");
}
@ -71,7 +68,6 @@ TestVadDtx::TestVadDtx()
EXPECT_EQ(0, acm_send_->RegisterTransportCallback(channel_.get()));
channel_->RegisterReceiverACM(acm_receive_.get());
EXPECT_EQ(0, acm_send_->RegisterVADCallback(monitor_.get()));
assert(monitor_->kPacketTypes == this->kPacketTypes);
}
void TestVadDtx::RegisterCodec(CodecInst codec_param) {
@ -118,22 +114,19 @@ void TestVadDtx::Run(std::string in_filename, int frequency, int channels,
monitor_->PrintStatistics();
#endif
uint32_t stats[kPacketTypes];
uint32_t stats[5];
monitor_->GetStatistics(stats);
monitor_->ResetStatistics();
for (int i = 0; i < kPacketTypes; i++) {
for (const auto& st : stats) {
int i = &st - stats; // Calculate the current position in stats.
switch (expects[i]) {
case 0: {
EXPECT_EQ(static_cast<uint32_t>(0), stats[i]) << "stats["
<< i
<< "] error.";
EXPECT_EQ(0u, st) << "stats[" << i << "] error.";
break;
}
case 1: {
EXPECT_GT(stats[i], static_cast<uint32_t>(0)) << "stats["
<< i
<< "] error.";
EXPECT_GT(st, 0u) << "stats[" << i << "] error.";
break;
}
}
@ -198,14 +191,7 @@ void TestWebRtcVadDtx::RunTestCases() {
// Set the expectation and run the test.
void TestWebRtcVadDtx::Test(bool new_outfile) {
int expects[kPacketTypes];
int frequency = acm_send_->SendFrequency();
expects[0] = -1; // Do not care.
expects[1] = 1;
expects[2] = vad_enabled_ && !use_webrtc_dtx_;
expects[3] = use_webrtc_dtx_ && (frequency == 8000);
expects[4] = use_webrtc_dtx_ && (frequency == 16000);
expects[5] = use_webrtc_dtx_ && (frequency == 32000);
int expects[] = {-1, 1, use_webrtc_dtx_, 0, 0};
if (new_outfile) {
output_file_num_++;
}
@ -251,7 +237,7 @@ void TestWebRtcVadDtx::SetVAD(bool enable_dtx, bool enable_vad,
// Following is the implementation of TestOpusDtx.
void TestOpusDtx::Perform() {
#ifdef WEBRTC_CODEC_OPUS
int expects[kPacketTypes] = {0, 1, 0, 0, 0, 0};
int expects[] = {0, 1, 0, 0, 0};
// Register Opus as send codec
std::string out_filename = webrtc::test::OutputPath() +
@ -263,7 +249,7 @@ void TestOpusDtx::Perform() {
32000, 1, out_filename, false, expects);
EXPECT_EQ(0, acm_send_->EnableOpusDtx());
expects[0] = 1;
expects[kFrameEmpty] = 1;
Run(webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"),
32000, 1, out_filename, true, expects);
@ -271,7 +257,7 @@ void TestOpusDtx::Perform() {
out_filename = webrtc::test::OutputPath() + "testOpusDtx_outFile_stereo.pcm";
RegisterCodec(kOpusStereo);
EXPECT_EQ(0, acm_send_->DisableOpusDtx());
expects[0] = 0;
expects[kFrameEmpty] = 0;
Run(webrtc::test::ResourcePath("audio_coding/teststereo32kHz", "pcm"),
32000, 2, out_filename, false, expects);
@ -279,7 +265,7 @@ void TestOpusDtx::Perform() {
EXPECT_EQ(0, acm_send_->SetOpusApplication(kVoip));
EXPECT_EQ(0, acm_send_->EnableOpusDtx());
expects[0] = 1;
expects[kFrameEmpty] = 1;
Run(webrtc::test::ResourcePath("audio_coding/teststereo32kHz", "pcm"),
32000, 2, out_filename, true, expects);
#endif

View File

@ -23,22 +23,18 @@ namespace webrtc {
class ActivityMonitor : public ACMVADCallback {
public:
static const int kPacketTypes = 6;
ActivityMonitor();
int32_t InFrameType(int16_t frame_type);
int32_t InFrameType(FrameType frame_type);
void PrintStatistics();
void ResetStatistics();
void GetStatistics(uint32_t* stats);
private:
// Counting according to
// counter_[0] - kNoEncoding,
// counter_[1] - kActiveNormalEncoded,
// counter_[2] - kPassiveNormalEncoded,
// counter_[3] - kPassiveDTXNB,
// counter_[4] - kPassiveDTXWB,
// counter_[5] - kPassiveDTXSWB
uint32_t counter_[kPacketTypes];
// 0 - kFrameEmpty
// 1 - kAudioFrameSpeech
// 2 - kAudioFrameCN
// 3 - kVideoFrameKey (not used by audio)
// 4 - kVideoFrameDelta (not used by audio)
uint32_t counter_[5];
};
@ -49,7 +45,6 @@ class ActivityMonitor : public ACMVADCallback {
class TestVadDtx : public ACMTest {
public:
static const int kOutputFreqHz = 16000;
static const int kPacketTypes = 6;
TestVadDtx();
@ -65,12 +60,11 @@ class TestVadDtx : public ACMTest {
// 0 : there have been no packets of type |x|,
// 1 : there have been packets of type |x|,
// with |x| indicates the following packet types
// 0 - kNoEncoding
// 1 - kActiveNormalEncoded
// 2 - kPassiveNormalEncoded
// 3 - kPassiveDTXNB
// 4 - kPassiveDTXWB
// 5 - kPassiveDTXSWB
// 0 - kFrameEmpty
// 1 - kAudioFrameSpeech
// 2 - kAudioFrameCN
// 3 - kVideoFrameKey (not used by audio)
// 4 - kVideoFrameDelta (not used by audio)
void Run(std::string in_filename, int frequency, int channels,
std::string out_filename, bool append, const int* expects);

View File

@ -13,6 +13,7 @@
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/common.h"
@ -305,28 +306,23 @@ void DTMFDetector::PrintDetectedDigits() {
}
void VADCallback::Reset() {
for (int n = 0; n < 6; n++) {
_numFrameTypes[n] = 0;
}
memset(_numFrameTypes, 0, sizeof(_numFrameTypes));
}
VADCallback::VADCallback() {
for (int n = 0; n < 6; n++) {
_numFrameTypes[n] = 0;
}
memset(_numFrameTypes, 0, sizeof(_numFrameTypes));
}
void VADCallback::PrintFrameTypes() {
fprintf(stdout, "No encoding.................. %d\n", _numFrameTypes[0]);
fprintf(stdout, "Active normal encoded........ %d\n", _numFrameTypes[1]);
fprintf(stdout, "Passive normal encoded....... %d\n", _numFrameTypes[2]);
fprintf(stdout, "Passive DTX wideband......... %d\n", _numFrameTypes[3]);
fprintf(stdout, "Passive DTX narrowband....... %d\n", _numFrameTypes[4]);
fprintf(stdout, "Passive DTX super-wideband... %d\n", _numFrameTypes[5]);
printf("kFrameEmpty......... %d\n", _numFrameTypes[kFrameEmpty]);
printf("kAudioFrameSpeech... %d\n", _numFrameTypes[kAudioFrameSpeech]);
printf("kAudioFrameCN....... %d\n", _numFrameTypes[kAudioFrameCN]);
printf("kVideoFrameKey...... %d\n", _numFrameTypes[kVideoFrameKey]);
printf("kVideoFrameDelta.... %d\n", _numFrameTypes[kVideoFrameDelta]);
}
int32_t VADCallback::InFrameType(int16_t frameType) {
_numFrameTypes[frameType]++;
int32_t VADCallback::InFrameType(FrameType frame_type) {
_numFrameTypes[frame_type]++;
return 0;
}

View File

@ -134,13 +134,13 @@ class VADCallback : public ACMVADCallback {
~VADCallback() {
}
int32_t InFrameType(int16_t frameType);
int32_t InFrameType(FrameType frame_type);
void PrintFrameTypes();
void Reset();
private:
uint32_t _numFrameTypes[6];
uint32_t _numFrameTypes[5];
};
void UseLegacyAcm(webrtc::Config* config);

View File

@ -195,14 +195,13 @@ Channel::SendData(FrameType frameType,
}
int32_t
Channel::InFrameType(int16_t frameType)
Channel::InFrameType(FrameType frame_type)
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::InFrameType(frameType=%d)", frameType);
"Channel::InFrameType(frame_type=%d)", frame_type);
CriticalSectionScoped cs(&_callbackCritSect);
// 1 indicates speech
_sendFrameType = (frameType == 1) ? 1 : 0;
_sendFrameType = (frame_type == kAudioFrameSpeech);
return 0;
}

View File

@ -353,7 +353,7 @@ public:
const RTPFragmentationHeader* fragmentation) override;
// From ACMVADCallback in the ACM
int32_t InFrameType(int16_t frameType) override;
int32_t InFrameType(FrameType frame_type) override;
int32_t OnRxVadDetected(int vadDecision);