Remove WebRtcACMEncodingType

The parameter was not needed; it was sufficient with a bool indicating
speech or not speech. This change propagates to the InFrameType
callback function. Some tests are updated too.

COAUTHOR=kwiberg@webrtc.org
R=minyue@webrtc.org
TBR=henrika@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/42209004

Cr-Commit-Position: refs/heads/master@{#8626}
git-svn-id: http://webrtc.googlecode.com/svn/trunk@8626 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
henrik.lundin@webrtc.org
2015-03-06 07:50:34 +00:00
parent 84f5309dbd
commit e9217b4bdb
12 changed files with 92 additions and 235 deletions

View File

@@ -49,25 +49,6 @@ const int kIsacPacSize480 = 480;
const int kIsacPacSize960 = 960; const int kIsacPacSize960 = 960;
const int kIsacPacSize1440 = 1440; const int kIsacPacSize1440 = 1440;
// An encoded bit-stream is labeled by one of the following enumerators.
//
// kNoEncoding : There has been no encoding.
// kActiveNormalEncoded : Active audio frame coded by the codec.
// kPassiveNormalEncoded : Passive audio frame coded by the codec.
// kPassiveDTXNB : Passive audio frame coded by narrow-band CN.
// kPassiveDTXWB : Passive audio frame coded by wide-band CN.
// kPassiveDTXSWB : Passive audio frame coded by super-wide-band CN.
// kPassiveDTXFB : Passive audio frame coded by full-band CN.
enum WebRtcACMEncodingType {
kNoEncoding,
kActiveNormalEncoded,
kPassiveNormalEncoded,
kPassiveDTXNB,
kPassiveDTXWB,
kPassiveDTXSWB,
kPassiveDTXFB
};
// A structure which contains codec parameters. For instance, used when // A structure which contains codec parameters. For instance, used when
// initializing encoder and decoder. // initializing encoder and decoder.
// //

View File

@@ -36,47 +36,27 @@ namespace webrtc {
namespace { namespace {
static const int kInvalidPayloadType = 255; static const int kInvalidPayloadType = 255;
std::map<int, std::pair<int, WebRtcACMEncodingType>>::iterator std::map<int, int>::iterator FindSampleRateInMap(std::map<int, int>* cng_pt_map,
FindSampleRateInMap( int sample_rate_hz) {
std::map<int, std::pair<int, WebRtcACMEncodingType>>* cng_pt_map,
int sample_rate_hz) {
return find_if(cng_pt_map->begin(), cng_pt_map->end(), return find_if(cng_pt_map->begin(), cng_pt_map->end(),
[sample_rate_hz](decltype(*cng_pt_map->begin()) p) { [sample_rate_hz](decltype(*cng_pt_map->begin()) p) {
return p.second.first == sample_rate_hz; return p.second == sample_rate_hz;
}); });
} }
void SetCngPtInMap( void SetCngPtInMap(std::map<int, int>* cng_pt_map,
std::map<int, std::pair<int, WebRtcACMEncodingType>>* cng_pt_map, int sample_rate_hz,
int sample_rate_hz, int payload_type) {
int payload_type) {
if (payload_type == kInvalidPayloadType) if (payload_type == kInvalidPayloadType)
return; return;
CHECK_GE(payload_type, 0); CHECK_GE(payload_type, 0);
CHECK_LT(payload_type, 128); CHECK_LT(payload_type, 128);
WebRtcACMEncodingType encoding_type;
switch (sample_rate_hz) {
case 8000:
encoding_type = kPassiveDTXNB;
break;
case 16000:
encoding_type = kPassiveDTXWB;
break;
case 32000:
encoding_type = kPassiveDTXSWB;
break;
case 48000:
encoding_type = kPassiveDTXFB;
break;
default:
FATAL() << "Unsupported frequency.";
}
auto pt_iter = FindSampleRateInMap(cng_pt_map, sample_rate_hz); auto pt_iter = FindSampleRateInMap(cng_pt_map, sample_rate_hz);
if (pt_iter != cng_pt_map->end()) { if (pt_iter != cng_pt_map->end()) {
// Remove item in map with sample_rate_hz. // Remove item in map with sample_rate_hz.
cng_pt_map->erase(pt_iter); cng_pt_map->erase(pt_iter);
} }
(*cng_pt_map)[payload_type] = std::make_pair(sample_rate_hz, encoding_type); (*cng_pt_map)[payload_type] = sample_rate_hz;
} }
} // namespace } // namespace
@@ -230,14 +210,13 @@ CNG_dec_inst* AudioDecoderProxy::CngDecoderInstance() {
return decoder_->CngDecoderInstance(); return decoder_->CngDecoderInstance();
} }
int16_t ACMGenericCodec::Encode(uint32_t input_timestamp, void ACMGenericCodec::Encode(uint32_t input_timestamp,
const int16_t* audio, const int16_t* audio,
uint16_t length_per_channel, uint16_t length_per_channel,
uint8_t audio_channel, uint8_t audio_channel,
uint8_t* bitstream, uint8_t* bitstream,
int16_t* bitstream_len_byte, int16_t* bitstream_len_byte,
WebRtcACMEncodingType* encoding_type, AudioEncoder::EncodedInfo* encoded_info) {
AudioEncoder::EncodedInfo* encoded_info) {
WriteLockScoped wl(codec_wrapper_lock_); WriteLockScoped wl(codec_wrapper_lock_);
CHECK_EQ(length_per_channel, encoder_->SampleRateHz() / 100); CHECK_EQ(length_per_channel, encoder_->SampleRateHz() / 100);
rtp_timestamp_ = first_frame_ rtp_timestamp_ = first_frame_
@@ -256,26 +235,6 @@ int16_t ACMGenericCodec::Encode(uint32_t input_timestamp,
encoder_->Encode(rtp_timestamp_, audio, length_per_channel, encoder_->Encode(rtp_timestamp_, audio, length_per_channel,
2 * MAX_PAYLOAD_SIZE_BYTE, bitstream, encoded_info); 2 * MAX_PAYLOAD_SIZE_BYTE, bitstream, encoded_info);
*bitstream_len_byte = static_cast<int16_t>(encoded_info->encoded_bytes); *bitstream_len_byte = static_cast<int16_t>(encoded_info->encoded_bytes);
if (encoded_info->encoded_bytes == 0) {
*encoding_type = kNoEncoding;
if (encoded_info->send_even_if_empty) {
bitstream[0] = 0;
return 1;
}
return 0;
}
int payload_type = encoded_info->payload_type;
if (!encoded_info->redundant.empty())
payload_type = encoded_info->redundant[0].payload_type;
auto cng_iter = cng_pt_.find(payload_type);
if (cng_iter == cng_pt_.end()) {
*encoding_type = kActiveNormalEncoded;
} else {
*encoding_type = cng_iter->second.second;
}
return *bitstream_len_byte;
} }
int16_t ACMGenericCodec::EncoderParams(WebRtcACMCodecParams* enc_params) { int16_t ACMGenericCodec::EncoderParams(WebRtcACMCodecParams* enc_params) {

View File

@@ -122,38 +122,15 @@ class ACMGenericCodec {
// -timestamp : contains the RTP timestamp, this is the // -timestamp : contains the RTP timestamp, this is the
// sampling time of the first sample encoded // sampling time of the first sample encoded
// (measured in number of samples). // (measured in number of samples).
// -encoding_type : contains the type of encoding applied on the
// audio samples. The alternatives are
// (c.f. acm_common_types.h)
// -kNoEncoding:
// there was not enough data to encode. or
// some error has happened that we could
// not do encoding.
// -kActiveNormalEncoded:
// the audio frame is active and encoded by
// the given codec.
// -kPassiveNormalEncoded:
// the audio frame is passive but coded with
// the given codec (NO DTX).
// -kPassiveDTXWB:
// The audio frame is passive and used
// wide-band CN to encode.
// -kPassiveDTXNB:
// The audio frame is passive and used
// narrow-band CN to encode.
// //
// Return value:
// -1 if error is occurred, otherwise the length of the bit-stream in
// bytes.
// //
int16_t Encode(uint32_t input_timestamp, void Encode(uint32_t input_timestamp,
const int16_t* audio, const int16_t* audio,
uint16_t length_per_channel, uint16_t length_per_channel,
uint8_t audio_channel, uint8_t audio_channel,
uint8_t* bitstream, uint8_t* bitstream,
int16_t* bitstream_len_byte, int16_t* bitstream_len_byte,
WebRtcACMEncodingType* encoding_type, AudioEncoder::EncodedInfo* encoded_info);
AudioEncoder::EncodedInfo* encoded_info);
/////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////
// bool EncoderInitialized(); // bool EncoderInitialized();
@@ -487,9 +464,8 @@ class ACMGenericCodec {
bool first_frame_ GUARDED_BY(codec_wrapper_lock_); bool first_frame_ GUARDED_BY(codec_wrapper_lock_);
uint32_t rtp_timestamp_ GUARDED_BY(codec_wrapper_lock_); uint32_t rtp_timestamp_ GUARDED_BY(codec_wrapper_lock_);
uint32_t last_rtp_timestamp_ GUARDED_BY(codec_wrapper_lock_); uint32_t last_rtp_timestamp_ GUARDED_BY(codec_wrapper_lock_);
// Map from payload type to sample rate (Hz) and encoding type. // Map from payload type to CNG sample rate (Hz).
std::map<int, std::pair<int, WebRtcACMEncodingType>> cng_pt_ std::map<int, int> cng_pt_ GUARDED_BY(codec_wrapper_lock_);
GUARDED_BY(codec_wrapper_lock_);
int red_payload_type_ GUARDED_BY(codec_wrapper_lock_); int red_payload_type_ GUARDED_BY(codec_wrapper_lock_);
OpusApplicationMode opus_application_ GUARDED_BY(codec_wrapper_lock_); OpusApplicationMode opus_application_ GUARDED_BY(codec_wrapper_lock_);
bool opus_application_set_ GUARDED_BY(codec_wrapper_lock_); bool opus_application_set_ GUARDED_BY(codec_wrapper_lock_);

View File

@@ -39,21 +39,16 @@ class AcmGenericCodecTest : public ::testing::Test {
ASSERT_EQ(0, codec_->InitEncoder(&acm_codec_params_, true)); ASSERT_EQ(0, codec_->InitEncoder(&acm_codec_params_, true));
} }
void EncodeAndVerify(int expected_return_val, void EncodeAndVerify(size_t expected_out_length,
size_t expected_out_length,
uint32_t expected_timestamp, uint32_t expected_timestamp,
WebRtcACMEncodingType expected_encoding_type,
int expected_payload_type, int expected_payload_type,
int expected_send_even_if_empty) { int expected_send_even_if_empty) {
uint8_t out[kDataLengthSamples]; uint8_t out[kDataLengthSamples];
int16_t out_length; int16_t out_length;
WebRtcACMEncodingType encoding_type;
AudioEncoder::EncodedInfo encoded_info; AudioEncoder::EncodedInfo encoded_info;
EXPECT_EQ(expected_return_val, codec_->Encode(timestamp_, kZeroData, kDataLengthSamples, 1, out,
codec_->Encode(timestamp_, kZeroData, kDataLengthSamples, 1, out, &out_length, &encoded_info);
&out_length,&encoding_type, &encoded_info));
timestamp_ += kDataLengthSamples; timestamp_ += kDataLengthSamples;
EXPECT_EQ(expected_encoding_type, encoding_type);
EXPECT_TRUE(encoded_info.redundant.empty()); EXPECT_TRUE(encoded_info.redundant.empty());
EXPECT_EQ(expected_out_length, encoded_info.encoded_bytes); EXPECT_EQ(expected_out_length, encoded_info.encoded_bytes);
EXPECT_EQ(expected_out_length, rtc::checked_cast<size_t>(out_length)); EXPECT_EQ(expected_out_length, rtc::checked_cast<size_t>(out_length));
@@ -75,35 +70,36 @@ class AcmGenericCodecTest : public ::testing::Test {
// (which is signaled as 0 bytes output of type kNoEncoding). The next encode // (which is signaled as 0 bytes output of type kNoEncoding). The next encode
// call should produce one SID frame of 9 bytes. The third call should not // call should produce one SID frame of 9 bytes. The third call should not
// result in any output (just like the first one). The fourth and final encode // result in any output (just like the first one). The fourth and final encode
// call should produce an "empty frame", which is like no output, but with the // call should produce an "empty frame", which is like no output, but with
// return value from the encode function being 1. (The reason to produce an // AudioEncoder::EncodedInfo::send_even_if_empty set to true. (The reason to
// empty frame is to drive sending of DTMF packets in the RTP/RTCP module.) // produce an empty frame is to drive sending of DTMF packets in the RTP/RTCP
// module.)
TEST_F(AcmGenericCodecTest, VerifyCngFrames) { TEST_F(AcmGenericCodecTest, VerifyCngFrames) {
CreateCodec(); CreateCodec();
uint32_t expected_timestamp = timestamp_; uint32_t expected_timestamp = timestamp_;
// Verify no frame. // Verify no frame.
{ {
SCOPED_TRACE("First encoding"); SCOPED_TRACE("First encoding");
EncodeAndVerify(0, 0, expected_timestamp, kNoEncoding, -1, -1); EncodeAndVerify(0, expected_timestamp, -1, -1);
} }
// Verify SID frame delivered. // Verify SID frame delivered.
{ {
SCOPED_TRACE("Second encoding"); SCOPED_TRACE("Second encoding");
EncodeAndVerify(9, 9, expected_timestamp, kPassiveDTXNB, kCngPt, 1); EncodeAndVerify(9, expected_timestamp, kCngPt, 1);
} }
// Verify no frame. // Verify no frame.
{ {
SCOPED_TRACE("Third encoding"); SCOPED_TRACE("Third encoding");
EncodeAndVerify(0, 0, expected_timestamp, kNoEncoding, -1, -1); EncodeAndVerify(0, expected_timestamp, -1, -1);
} }
// Verify NoEncoding. // Verify NoEncoding.
expected_timestamp += 2 * kDataLengthSamples; expected_timestamp += 2 * kDataLengthSamples;
{ {
SCOPED_TRACE("Fourth encoding"); SCOPED_TRACE("Fourth encoding");
EncodeAndVerify(1, 0, expected_timestamp, kNoEncoding, kCngPt, 1); EncodeAndVerify(0, expected_timestamp, kCngPt, 1);
} }
} }

View File

@@ -223,8 +223,6 @@ int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
// TODO(turajs): |length_bytes| & |red_length_bytes| can be of type int if // TODO(turajs): |length_bytes| & |red_length_bytes| can be of type int if
// ACMGenericCodec::Encode() & ACMGenericCodec::GetRedPayload() allows. // ACMGenericCodec::Encode() & ACMGenericCodec::GetRedPayload() allows.
int16_t length_bytes = 2 * MAX_PAYLOAD_SIZE_BYTE; int16_t length_bytes = 2 * MAX_PAYLOAD_SIZE_BYTE;
int status;
WebRtcACMEncodingType encoding_type;
FrameType frame_type = kAudioFrameSpeech; FrameType frame_type = kAudioFrameSpeech;
uint8_t current_payload_type = 0; uint8_t current_payload_type = 0;
bool has_data_to_send = false; bool has_data_to_send = false;
@@ -238,52 +236,24 @@ int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
if (!HaveValidEncoder("Process")) { if (!HaveValidEncoder("Process")) {
return -1; return -1;
} }
status = codecs_[current_send_codec_idx_]->Encode( codecs_[current_send_codec_idx_]->Encode(
input_data.input_timestamp, input_data.audio, input_data.input_timestamp, input_data.audio,
input_data.length_per_channel, input_data.audio_channel, stream, input_data.length_per_channel, input_data.audio_channel, stream,
&length_bytes, &encoding_type, &encoded_info); &length_bytes, &encoded_info);
if (status < 0) { if (encoded_info.encoded_bytes == 0 && !encoded_info.send_even_if_empty) {
// Encode failed.
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
"Process(): Encoding Failed");
length_bytes = 0;
return -1;
} else if (status == 0) {
// Not enough data. // Not enough data.
return 0; return 0;
} else { } else {
switch (encoding_type) { if (encoded_info.encoded_bytes == 0 && encoded_info.send_even_if_empty) {
case kNoEncoding: { frame_type = kFrameEmpty;
current_payload_type = previous_pltype_; current_payload_type = previous_pltype_;
frame_type = kFrameEmpty; } else {
length_bytes = 0; DCHECK_GT(encoded_info.encoded_bytes, 0u);
break; frame_type = encoded_info.speech ? kAudioFrameSpeech : kAudioFrameCN;
} current_payload_type = encoded_info.payload_type;
case kActiveNormalEncoded: previous_pltype_ = current_payload_type;
case kPassiveNormalEncoded: {
frame_type = kAudioFrameSpeech;
break;
}
case kPassiveDTXNB: {
frame_type = kAudioFrameCN;
break;
}
case kPassiveDTXWB: {
frame_type = kAudioFrameCN;
break;
}
case kPassiveDTXSWB: {
frame_type = kAudioFrameCN;
break;
}
case kPassiveDTXFB: {
frame_type = kAudioFrameCN;
break;
}
} }
has_data_to_send = true; has_data_to_send = true;
current_payload_type = encoded_info.payload_type;
previous_pltype_ = current_payload_type;
ConvertEncodedInfoToFragmentationHeader(encoded_info, &my_fragmentation); ConvertEncodedInfoToFragmentationHeader(encoded_info, &my_fragmentation);
} }
@@ -308,7 +278,7 @@ int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
if (vad_callback_ != NULL) { if (vad_callback_ != NULL) {
// Callback with VAD decision. // Callback with VAD decision.
vad_callback_->InFrameType(static_cast<int16_t>(encoding_type)); vad_callback_->InFrameType(frame_type);
} }
} }
return length_bytes; return length_bytes;

View File

@@ -58,7 +58,7 @@ class ACMVADCallback {
public: public:
virtual ~ACMVADCallback() {} virtual ~ACMVADCallback() {}
virtual int32_t InFrameType(int16_t frameType) = 0; virtual int32_t InFrameType(FrameType frame_type) = 0;
}; };
// Callback class used for reporting receiver statistics // Callback class used for reporting receiver statistics

View File

@@ -37,21 +37,18 @@ ActivityMonitor::ActivityMonitor() {
ResetStatistics(); ResetStatistics();
} }
int32_t ActivityMonitor::InFrameType(int16_t frame_type) { int32_t ActivityMonitor::InFrameType(FrameType frame_type) {
counter_[frame_type]++; counter_[frame_type]++;
return 0; return 0;
} }
void ActivityMonitor::PrintStatistics() { void ActivityMonitor::PrintStatistics() {
printf("\n"); printf("\n");
printf("kActiveNormalEncoded kPassiveNormalEncoded kPassiveDTXWB "); printf("kFrameEmpty %u\n", counter_[kFrameEmpty]);
printf("kPassiveDTXNB kPassiveDTXSWB kNoEncoding\n"); printf("kAudioFrameSpeech %u\n", counter_[kAudioFrameSpeech]);
printf("%19u", counter_[1]); printf("kAudioFrameCN %u\n", counter_[kAudioFrameCN]);
printf("%22u", counter_[2]); printf("kVideoFrameKey %u\n", counter_[kVideoFrameKey]);
printf("%14u", counter_[3]); printf("kVideoFrameDelta %u\n", counter_[kVideoFrameDelta]);
printf("%14u", counter_[4]);
printf("%14u", counter_[5]);
printf("%11u", counter_[0]);
printf("\n\n"); printf("\n\n");
} }
@@ -71,7 +68,6 @@ TestVadDtx::TestVadDtx()
EXPECT_EQ(0, acm_send_->RegisterTransportCallback(channel_.get())); EXPECT_EQ(0, acm_send_->RegisterTransportCallback(channel_.get()));
channel_->RegisterReceiverACM(acm_receive_.get()); channel_->RegisterReceiverACM(acm_receive_.get());
EXPECT_EQ(0, acm_send_->RegisterVADCallback(monitor_.get())); EXPECT_EQ(0, acm_send_->RegisterVADCallback(monitor_.get()));
assert(monitor_->kPacketTypes == this->kPacketTypes);
} }
void TestVadDtx::RegisterCodec(CodecInst codec_param) { void TestVadDtx::RegisterCodec(CodecInst codec_param) {
@@ -118,22 +114,19 @@ void TestVadDtx::Run(std::string in_filename, int frequency, int channels,
monitor_->PrintStatistics(); monitor_->PrintStatistics();
#endif #endif
uint32_t stats[kPacketTypes]; uint32_t stats[5];
monitor_->GetStatistics(stats); monitor_->GetStatistics(stats);
monitor_->ResetStatistics(); monitor_->ResetStatistics();
for (int i = 0; i < kPacketTypes; i++) { for (const auto& st : stats) {
int i = &st - stats; // Calculate the current position in stats.
switch (expects[i]) { switch (expects[i]) {
case 0: { case 0: {
EXPECT_EQ(static_cast<uint32_t>(0), stats[i]) << "stats[" EXPECT_EQ(0u, st) << "stats[" << i << "] error.";
<< i
<< "] error.";
break; break;
} }
case 1: { case 1: {
EXPECT_GT(stats[i], static_cast<uint32_t>(0)) << "stats[" EXPECT_GT(st, 0u) << "stats[" << i << "] error.";
<< i
<< "] error.";
break; break;
} }
} }
@@ -198,14 +191,7 @@ void TestWebRtcVadDtx::RunTestCases() {
// Set the expectation and run the test. // Set the expectation and run the test.
void TestWebRtcVadDtx::Test(bool new_outfile) { void TestWebRtcVadDtx::Test(bool new_outfile) {
int expects[kPacketTypes]; int expects[] = {-1, 1, use_webrtc_dtx_, 0, 0};
int frequency = acm_send_->SendFrequency();
expects[0] = -1; // Do not care.
expects[1] = 1;
expects[2] = vad_enabled_ && !use_webrtc_dtx_;
expects[3] = use_webrtc_dtx_ && (frequency == 8000);
expects[4] = use_webrtc_dtx_ && (frequency == 16000);
expects[5] = use_webrtc_dtx_ && (frequency == 32000);
if (new_outfile) { if (new_outfile) {
output_file_num_++; output_file_num_++;
} }
@@ -251,7 +237,7 @@ void TestWebRtcVadDtx::SetVAD(bool enable_dtx, bool enable_vad,
// Following is the implementation of TestOpusDtx. // Following is the implementation of TestOpusDtx.
void TestOpusDtx::Perform() { void TestOpusDtx::Perform() {
#ifdef WEBRTC_CODEC_OPUS #ifdef WEBRTC_CODEC_OPUS
int expects[kPacketTypes] = {0, 1, 0, 0, 0, 0}; int expects[] = {0, 1, 0, 0, 0};
// Register Opus as send codec // Register Opus as send codec
std::string out_filename = webrtc::test::OutputPath() + std::string out_filename = webrtc::test::OutputPath() +
@@ -263,7 +249,7 @@ void TestOpusDtx::Perform() {
32000, 1, out_filename, false, expects); 32000, 1, out_filename, false, expects);
EXPECT_EQ(0, acm_send_->EnableOpusDtx()); EXPECT_EQ(0, acm_send_->EnableOpusDtx());
expects[0] = 1; expects[kFrameEmpty] = 1;
Run(webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"), Run(webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"),
32000, 1, out_filename, true, expects); 32000, 1, out_filename, true, expects);
@@ -271,7 +257,7 @@ void TestOpusDtx::Perform() {
out_filename = webrtc::test::OutputPath() + "testOpusDtx_outFile_stereo.pcm"; out_filename = webrtc::test::OutputPath() + "testOpusDtx_outFile_stereo.pcm";
RegisterCodec(kOpusStereo); RegisterCodec(kOpusStereo);
EXPECT_EQ(0, acm_send_->DisableOpusDtx()); EXPECT_EQ(0, acm_send_->DisableOpusDtx());
expects[0] = 0; expects[kFrameEmpty] = 0;
Run(webrtc::test::ResourcePath("audio_coding/teststereo32kHz", "pcm"), Run(webrtc::test::ResourcePath("audio_coding/teststereo32kHz", "pcm"),
32000, 2, out_filename, false, expects); 32000, 2, out_filename, false, expects);
@@ -279,7 +265,7 @@ void TestOpusDtx::Perform() {
EXPECT_EQ(0, acm_send_->SetOpusApplication(kVoip)); EXPECT_EQ(0, acm_send_->SetOpusApplication(kVoip));
EXPECT_EQ(0, acm_send_->EnableOpusDtx()); EXPECT_EQ(0, acm_send_->EnableOpusDtx());
expects[0] = 1; expects[kFrameEmpty] = 1;
Run(webrtc::test::ResourcePath("audio_coding/teststereo32kHz", "pcm"), Run(webrtc::test::ResourcePath("audio_coding/teststereo32kHz", "pcm"),
32000, 2, out_filename, true, expects); 32000, 2, out_filename, true, expects);
#endif #endif

View File

@@ -23,22 +23,18 @@ namespace webrtc {
class ActivityMonitor : public ACMVADCallback { class ActivityMonitor : public ACMVADCallback {
public: public:
static const int kPacketTypes = 6;
ActivityMonitor(); ActivityMonitor();
int32_t InFrameType(int16_t frame_type); int32_t InFrameType(FrameType frame_type);
void PrintStatistics(); void PrintStatistics();
void ResetStatistics(); void ResetStatistics();
void GetStatistics(uint32_t* stats); void GetStatistics(uint32_t* stats);
private: private:
// Counting according to // 0 - kFrameEmpty
// counter_[0] - kNoEncoding, // 1 - kAudioFrameSpeech
// counter_[1] - kActiveNormalEncoded, // 2 - kAudioFrameCN
// counter_[2] - kPassiveNormalEncoded, // 3 - kVideoFrameKey (not used by audio)
// counter_[3] - kPassiveDTXNB, // 4 - kVideoFrameDelta (not used by audio)
// counter_[4] - kPassiveDTXWB, uint32_t counter_[5];
// counter_[5] - kPassiveDTXSWB
uint32_t counter_[kPacketTypes];
}; };
@@ -49,7 +45,6 @@ class ActivityMonitor : public ACMVADCallback {
class TestVadDtx : public ACMTest { class TestVadDtx : public ACMTest {
public: public:
static const int kOutputFreqHz = 16000; static const int kOutputFreqHz = 16000;
static const int kPacketTypes = 6;
TestVadDtx(); TestVadDtx();
@@ -65,12 +60,11 @@ class TestVadDtx : public ACMTest {
// 0 : there have been no packets of type |x|, // 0 : there have been no packets of type |x|,
// 1 : there have been packets of type |x|, // 1 : there have been packets of type |x|,
// with |x| indicates the following packet types // with |x| indicates the following packet types
// 0 - kNoEncoding // 0 - kFrameEmpty
// 1 - kActiveNormalEncoded // 1 - kAudioFrameSpeech
// 2 - kPassiveNormalEncoded // 2 - kAudioFrameCN
// 3 - kPassiveDTXNB // 3 - kVideoFrameKey (not used by audio)
// 4 - kPassiveDTXWB // 4 - kVideoFrameDelta (not used by audio)
// 5 - kPassiveDTXSWB
void Run(std::string in_filename, int frequency, int channels, void Run(std::string in_filename, int frequency, int channels,
std::string out_filename, bool append, const int* expects); std::string out_filename, bool append, const int* expects);

View File

@@ -13,6 +13,7 @@
#include <assert.h> #include <assert.h>
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
#include <string.h>
#include "testing/gtest/include/gtest/gtest.h" #include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/common.h" #include "webrtc/common.h"
@@ -305,28 +306,23 @@ void DTMFDetector::PrintDetectedDigits() {
} }
void VADCallback::Reset() { void VADCallback::Reset() {
for (int n = 0; n < 6; n++) { memset(_numFrameTypes, 0, sizeof(_numFrameTypes));
_numFrameTypes[n] = 0;
}
} }
VADCallback::VADCallback() { VADCallback::VADCallback() {
for (int n = 0; n < 6; n++) { memset(_numFrameTypes, 0, sizeof(_numFrameTypes));
_numFrameTypes[n] = 0;
}
} }
void VADCallback::PrintFrameTypes() { void VADCallback::PrintFrameTypes() {
fprintf(stdout, "No encoding.................. %d\n", _numFrameTypes[0]); printf("kFrameEmpty......... %d\n", _numFrameTypes[kFrameEmpty]);
fprintf(stdout, "Active normal encoded........ %d\n", _numFrameTypes[1]); printf("kAudioFrameSpeech... %d\n", _numFrameTypes[kAudioFrameSpeech]);
fprintf(stdout, "Passive normal encoded....... %d\n", _numFrameTypes[2]); printf("kAudioFrameCN....... %d\n", _numFrameTypes[kAudioFrameCN]);
fprintf(stdout, "Passive DTX wideband......... %d\n", _numFrameTypes[3]); printf("kVideoFrameKey...... %d\n", _numFrameTypes[kVideoFrameKey]);
fprintf(stdout, "Passive DTX narrowband....... %d\n", _numFrameTypes[4]); printf("kVideoFrameDelta.... %d\n", _numFrameTypes[kVideoFrameDelta]);
fprintf(stdout, "Passive DTX super-wideband... %d\n", _numFrameTypes[5]);
} }
int32_t VADCallback::InFrameType(int16_t frameType) { int32_t VADCallback::InFrameType(FrameType frame_type) {
_numFrameTypes[frameType]++; _numFrameTypes[frame_type]++;
return 0; return 0;
} }

View File

@@ -134,13 +134,13 @@ class VADCallback : public ACMVADCallback {
~VADCallback() { ~VADCallback() {
} }
int32_t InFrameType(int16_t frameType); int32_t InFrameType(FrameType frame_type);
void PrintFrameTypes(); void PrintFrameTypes();
void Reset(); void Reset();
private: private:
uint32_t _numFrameTypes[6]; uint32_t _numFrameTypes[5];
}; };
void UseLegacyAcm(webrtc::Config* config); void UseLegacyAcm(webrtc::Config* config);

View File

@@ -195,14 +195,13 @@ Channel::SendData(FrameType frameType,
} }
int32_t int32_t
Channel::InFrameType(int16_t frameType) Channel::InFrameType(FrameType frame_type)
{ {
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::InFrameType(frameType=%d)", frameType); "Channel::InFrameType(frame_type=%d)", frame_type);
CriticalSectionScoped cs(&_callbackCritSect); CriticalSectionScoped cs(&_callbackCritSect);
// 1 indicates speech _sendFrameType = (frame_type == kAudioFrameSpeech);
_sendFrameType = (frameType == 1) ? 1 : 0;
return 0; return 0;
} }

View File

@@ -353,7 +353,7 @@ public:
const RTPFragmentationHeader* fragmentation) override; const RTPFragmentationHeader* fragmentation) override;
// From ACMVADCallback in the ACM // From ACMVADCallback in the ACM
int32_t InFrameType(int16_t frameType) override; int32_t InFrameType(FrameType frame_type) override;
int32_t OnRxVadDetected(int vadDecision); int32_t OnRxVadDetected(int vadDecision);