Moved voe_neteq_stats_unittest to audio_coding_module_unittest

The design of VoeNetEqStatsTest in voice_engine_unittests depended on
being able to inject a factory for the audio coding module into
voice engine. This functionality is now likely going away, which would
make this test fail to compile. Further, the functionality under test
is mostly ACM functionality, wherefore it makes better sense to test it
at ACM level.

BUG=2996
R=henrika@webrtc.org, turaj@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/12029004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@5912 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
henrik.lundin@webrtc.org 2014-04-15 17:59:25 +00:00
parent 6c75c98964
commit 7c6e3d188a
4 changed files with 173 additions and 286 deletions

View File

@ -0,0 +1,172 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h"
#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
#include "webrtc/test/testsupport/gtest_disable.h"
namespace webrtc {
const int kSampleRateHz = 16000;
const int kNumSamples10ms = kSampleRateHz / 100;
const int kFrameSizeMs = 10; // Multiple of 10.
const int kFrameSizeSamples = kFrameSizeMs / 10 * kNumSamples10ms;
const int kPayloadSizeBytes = kFrameSizeSamples * sizeof(int16_t);
const uint8_t kPayloadType = 111;
class RtpUtility {
public:
RtpUtility(int samples_per_packet, uint8_t payload_type)
: samples_per_packet_(samples_per_packet), payload_type_(payload_type) {}
virtual ~RtpUtility() {}
void Populate(WebRtcRTPHeader* rtp_header) {
rtp_header->header.sequenceNumber = 0xABCD;
rtp_header->header.timestamp = 0xABCDEF01;
rtp_header->header.payloadType = payload_type_;
rtp_header->header.markerBit = false;
rtp_header->header.ssrc = 0x1234;
rtp_header->header.numCSRCs = 0;
rtp_header->frameType = kAudioFrameSpeech;
rtp_header->header.payload_type_frequency = kSampleRateHz;
rtp_header->type.Audio.channel = 1;
rtp_header->type.Audio.isCNG = false;
}
void Forward(WebRtcRTPHeader* rtp_header) {
++rtp_header->header.sequenceNumber;
rtp_header->header.timestamp += samples_per_packet_;
}
private:
int samples_per_packet_;
uint8_t payload_type_;
};
class AudioCodingModuleTest : public ::testing::Test {
protected:
AudioCodingModuleTest()
: rtp_utility_(new RtpUtility(kFrameSizeSamples, kPayloadType)),
acm2_(AudioCodingModule::Create(1 /*id*/)) {}
~AudioCodingModuleTest() {}
void TearDown() {}
void SetUp() {
CodecInst codec;
AudioCodingModule::Codec("L16", &codec, kSampleRateHz, 1);
codec.pltype = kPayloadType;
// Register L16 codec in ACMs.
ASSERT_EQ(0, acm2_->RegisterReceiveCodec(codec));
rtp_utility_->Populate(&rtp_header_);
}
void InsertPacketAndPullAudio() {
AudioFrame audio_frame;
const uint8_t kPayload[kPayloadSizeBytes] = {0};
ASSERT_EQ(0,
acm2_->IncomingPacket(kPayload, kPayloadSizeBytes, rtp_header_));
ASSERT_EQ(0, acm2_->PlayoutData10Ms(-1, &audio_frame));
rtp_utility_->Forward(&rtp_header_);
}
void JustPullAudio() {
AudioFrame audio_frame;
ASSERT_EQ(0, acm2_->PlayoutData10Ms(-1, &audio_frame));
}
scoped_ptr<RtpUtility> rtp_utility_;
scoped_ptr<AudioCodingModule> acm2_;
WebRtcRTPHeader rtp_header_;
};
// Check if the statistics are initialized correctly. Before any call to ACM
// all fields have to be zero.
TEST_F(AudioCodingModuleTest, DISABLED_ON_ANDROID(InitializedToZero)) {
AudioDecodingCallStats stats;
acm2_->GetDecodingCallStatistics(&stats);
EXPECT_EQ(0, stats.calls_to_neteq);
EXPECT_EQ(0, stats.calls_to_silence_generator);
EXPECT_EQ(0, stats.decoded_normal);
EXPECT_EQ(0, stats.decoded_cng);
EXPECT_EQ(0, stats.decoded_plc);
EXPECT_EQ(0, stats.decoded_plc_cng);
}
// Apply an initial playout delay. Calls to AudioCodingModule::PlayoutData10ms()
// should result in generating silence, check the associated field.
TEST_F(AudioCodingModuleTest, DISABLED_ON_ANDROID(SilenceGeneratorCalled)) {
AudioDecodingCallStats stats;
const int kInitialDelay = 100;
acm2_->SetInitialPlayoutDelay(kInitialDelay);
AudioFrame audio_frame;
int num_calls = 0;
for (int time_ms = 0; time_ms < kInitialDelay;
time_ms += kFrameSizeMs, ++num_calls) {
InsertPacketAndPullAudio();
}
acm2_->GetDecodingCallStatistics(&stats);
EXPECT_EQ(0, stats.calls_to_neteq);
EXPECT_EQ(num_calls, stats.calls_to_silence_generator);
EXPECT_EQ(0, stats.decoded_normal);
EXPECT_EQ(0, stats.decoded_cng);
EXPECT_EQ(0, stats.decoded_plc);
EXPECT_EQ(0, stats.decoded_plc_cng);
}
// Insert some packets and pull audio. Check statistics are valid. Then,
// simulate packet loss and check if PLC and PLC-to-CNG statistics are
// correctly updated.
TEST_F(AudioCodingModuleTest, DISABLED_ON_ANDROID(NetEqCalls)) {
AudioDecodingCallStats stats;
const int kNumNormalCalls = 10;
AudioFrame audio_frame;
for (int num_calls = 0; num_calls < kNumNormalCalls; ++num_calls) {
InsertPacketAndPullAudio();
}
acm2_->GetDecodingCallStatistics(&stats);
EXPECT_EQ(kNumNormalCalls, stats.calls_to_neteq);
EXPECT_EQ(0, stats.calls_to_silence_generator);
EXPECT_EQ(kNumNormalCalls, stats.decoded_normal);
EXPECT_EQ(0, stats.decoded_cng);
EXPECT_EQ(0, stats.decoded_plc);
EXPECT_EQ(0, stats.decoded_plc_cng);
const int kNumPlc = 3;
const int kNumPlcCng = 5;
// Simulate packet-loss. NetEq first performs PLC then PLC fades to CNG.
for (int n = 0; n < kNumPlc + kNumPlcCng; ++n) {
JustPullAudio();
}
acm2_->GetDecodingCallStatistics(&stats);
EXPECT_EQ(kNumNormalCalls + kNumPlc + kNumPlcCng, stats.calls_to_neteq);
EXPECT_EQ(0, stats.calls_to_silence_generator);
EXPECT_EQ(kNumNormalCalls, stats.decoded_normal);
EXPECT_EQ(0, stats.decoded_cng);
EXPECT_EQ(kNumPlc, stats.decoded_plc);
EXPECT_EQ(kNumPlcCng, stats.decoded_plc_cng);
}
} // namespace webrtc

View File

@ -103,6 +103,7 @@
],
'sources': [
'audio_coding/main/acm2/acm_receiver_unittest.cc',
'audio_coding/main/acm2/audio_coding_module_unittest.cc',
'audio_coding/main/acm2/call_statistics_unittest.cc',
'audio_coding/main/acm2/initial_delay_manager_unittest.cc',
'audio_coding/main/acm2/nack_unittest.cc',

View File

@ -1,285 +0,0 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/voice_engine/include/voe_neteq_stats.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h"
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h"
#include "webrtc/modules/audio_coding/main/source/audio_coding_module_impl.h"
#include "webrtc/modules/audio_device/include/fake_audio_device.h"
#include "webrtc/system_wrappers/interface/clock.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
#include "webrtc/test/testsupport/gtest_disable.h"
#include "webrtc/voice_engine/include/voe_base.h"
#include "webrtc/voice_engine/include/voe_hardware.h"
#include "webrtc/voice_engine/voice_engine_defines.h"
namespace webrtc {
namespace voe {
namespace {
const int kSampleRateHz = 16000;
const int kNumSamples10ms = kSampleRateHz / 100;
const int kFrameSizeMs = 10; // Multiple of 10.
const int kFrameSizeSamples = kFrameSizeMs / 10 * kNumSamples10ms;
const int kPayloadSizeBytes = kFrameSizeSamples * sizeof(int16_t);
const uint8_t kPayloadType = 111;
class RtpUtility {
public:
RtpUtility(int samples_per_packet, uint8_t payload_type)
: samples_per_packet_(samples_per_packet), payload_type_(payload_type) {}
virtual ~RtpUtility() {}
void Populate(WebRtcRTPHeader* rtp_header) {
rtp_header->header.sequenceNumber = 0xABCD;
rtp_header->header.timestamp = 0xABCDEF01;
rtp_header->header.payloadType = payload_type_;
rtp_header->header.markerBit = false;
rtp_header->header.ssrc = 0x1234;
rtp_header->header.numCSRCs = 0;
rtp_header->frameType = kAudioFrameSpeech;
rtp_header->header.payload_type_frequency = kSampleRateHz;
rtp_header->type.Audio.channel = 1;
rtp_header->type.Audio.isCNG = false;
}
void Forward(WebRtcRTPHeader* rtp_header) {
++rtp_header->header.sequenceNumber;
rtp_header->header.timestamp += samples_per_packet_;
}
private:
int samples_per_packet_;
uint8_t payload_type_;
};
// This factory method allows access to ACM of a channel, facilitating insertion
// of packets to and pulling audio of ACM.
struct InsertAcm : AudioCodingModuleFactory {
explicit InsertAcm(AudioCodingModule* acm) : acm_(acm) {}
~InsertAcm() {}
virtual AudioCodingModule* Create(int /*id*/) const { return acm_; }
AudioCodingModule* acm_;
};
class VoENetEqStatsTest : public ::testing::Test {
protected:
VoENetEqStatsTest()
: acm1_(new acm1::AudioCodingModuleImpl(1, Clock::GetRealTimeClock())),
acm2_(new acm2::AudioCodingModuleImpl(2)),
voe_(VoiceEngine::Create()),
base_(VoEBase::GetInterface(voe_)),
voe_neteq_stats_(VoENetEqStats::GetInterface(voe_)),
channel_acm1_(-1),
channel_acm2_(-1),
adm_(new FakeAudioDeviceModule),
rtp_utility_(new RtpUtility(kFrameSizeSamples, kPayloadType)) {}
~VoENetEqStatsTest() {}
void TearDown() {
voe_neteq_stats_->Release();
base_->DeleteChannel(channel_acm1_);
base_->DeleteChannel(channel_acm2_);
base_->Terminate();
base_->Release();
VoiceEngine::Delete(voe_);
}
void SetUp() {
// Check if all components are valid.
ASSERT_TRUE(voe_ != NULL);
ASSERT_TRUE(base_ != NULL);
ASSERT_TRUE(adm_.get() != NULL);
ASSERT_EQ(0, base_->Init(adm_.get()));
// Set configs.
config_acm1_.Set<AudioCodingModuleFactory>(new InsertAcm(acm1_));
config_acm2_.Set<AudioCodingModuleFactory>(new InsertAcm(acm2_));
// Create channe1s;
channel_acm1_ = base_->CreateChannel(config_acm1_);
ASSERT_NE(-1, channel_acm1_);
channel_acm2_ = base_->CreateChannel(config_acm2_);
ASSERT_NE(-1, channel_acm2_);
CodecInst codec;
AudioCodingModule::Codec("L16", &codec, kSampleRateHz, 1);
codec.pltype = kPayloadType;
// Register L16 codec in ACMs.
ASSERT_EQ(0, acm1_->RegisterReceiveCodec(codec));
ASSERT_EQ(0, acm2_->RegisterReceiveCodec(codec));
rtp_utility_->Populate(&rtp_header_);
}
void InsertPacketAndPullAudio() {
AudioFrame audio_frame;
const uint8_t kPayload[kPayloadSizeBytes] = {0};
ASSERT_EQ(0,
acm1_->IncomingPacket(kPayload, kPayloadSizeBytes, rtp_header_));
ASSERT_EQ(0,
acm2_->IncomingPacket(kPayload, kPayloadSizeBytes, rtp_header_));
ASSERT_EQ(0, acm1_->PlayoutData10Ms(-1, &audio_frame));
ASSERT_EQ(0, acm2_->PlayoutData10Ms(-1, &audio_frame));
rtp_utility_->Forward(&rtp_header_);
}
void JustPullAudio() {
AudioFrame audio_frame;
ASSERT_EQ(0, acm1_->PlayoutData10Ms(-1, &audio_frame));
ASSERT_EQ(0, acm2_->PlayoutData10Ms(-1, &audio_frame));
}
Config config_acm1_;
Config config_acm2_;
// ACMs are inserted into VoE channels, and this class is not the owner of
// them. Therefore, they should not be deleted, not even in destructor.
AudioCodingModule* acm1_;
AudioCodingModule* acm2_;
VoiceEngine* voe_;
VoEBase* base_;
VoENetEqStats* voe_neteq_stats_;
int channel_acm1_;
int channel_acm2_;
scoped_ptr<FakeAudioDeviceModule> adm_;
scoped_ptr<RtpUtility> rtp_utility_;
WebRtcRTPHeader rtp_header_;
};
// Check if the statistics are initialized correctly. Before any call to ACM
// all fields have to be zero.
TEST_F(VoENetEqStatsTest, DISABLED_ON_ANDROID(InitializedToZero)) {
AudioDecodingCallStats stats;
ASSERT_EQ(0,
voe_neteq_stats_->GetDecodingCallStatistics(channel_acm1_, &stats));
EXPECT_EQ(0, stats.calls_to_neteq);
EXPECT_EQ(0, stats.calls_to_silence_generator);
EXPECT_EQ(0, stats.decoded_normal);
EXPECT_EQ(0, stats.decoded_cng);
EXPECT_EQ(0, stats.decoded_plc);
EXPECT_EQ(0, stats.decoded_plc_cng);
ASSERT_EQ(0,
voe_neteq_stats_->GetDecodingCallStatistics(channel_acm2_, &stats));
EXPECT_EQ(0, stats.calls_to_neteq);
EXPECT_EQ(0, stats.calls_to_silence_generator);
EXPECT_EQ(0, stats.decoded_normal);
EXPECT_EQ(0, stats.decoded_cng);
EXPECT_EQ(0, stats.decoded_plc);
EXPECT_EQ(0, stats.decoded_plc_cng);
}
// Apply an initial playout delay. Calls to AudioCodingModule::PlayoutData10ms()
// should result in generating silence, check the associated field.
TEST_F(VoENetEqStatsTest, DISABLED_ON_ANDROID(SilenceGeneratorCalled)) {
AudioDecodingCallStats stats;
const int kInitialDelay = 100;
acm1_->SetInitialPlayoutDelay(kInitialDelay);
acm2_->SetInitialPlayoutDelay(kInitialDelay);
AudioFrame audio_frame;
int num_calls = 0;
for (int time_ms = 0; time_ms < kInitialDelay;
time_ms += kFrameSizeMs, ++num_calls) {
InsertPacketAndPullAudio();
}
ASSERT_EQ(0,
voe_neteq_stats_->GetDecodingCallStatistics(channel_acm1_, &stats));
EXPECT_EQ(0, stats.calls_to_neteq);
EXPECT_EQ(num_calls, stats.calls_to_silence_generator);
EXPECT_EQ(0, stats.decoded_normal);
EXPECT_EQ(0, stats.decoded_cng);
EXPECT_EQ(0, stats.decoded_plc);
EXPECT_EQ(0, stats.decoded_plc_cng);
ASSERT_EQ(0,
voe_neteq_stats_->GetDecodingCallStatistics(channel_acm2_, &stats));
EXPECT_EQ(0, stats.calls_to_neteq);
EXPECT_EQ(num_calls, stats.calls_to_silence_generator);
EXPECT_EQ(0, stats.decoded_normal);
EXPECT_EQ(0, stats.decoded_cng);
EXPECT_EQ(0, stats.decoded_plc);
EXPECT_EQ(0, stats.decoded_plc_cng);
}
// Insert some packets and pull audio. Check statistics are valid. Then,
// simulate packet loss and check if PLC and PLC-to-CNG statistics are
// correctly updated.
TEST_F(VoENetEqStatsTest, DISABLED_ON_ANDROID(NetEqCalls)) {
AudioDecodingCallStats stats;
const int kNumNormalCalls = 10;
AudioFrame audio_frame;
for (int num_calls = 0; num_calls < kNumNormalCalls; ++num_calls) {
InsertPacketAndPullAudio();
}
ASSERT_EQ(0,
voe_neteq_stats_->GetDecodingCallStatistics(channel_acm1_, &stats));
EXPECT_EQ(kNumNormalCalls, stats.calls_to_neteq);
EXPECT_EQ(0, stats.calls_to_silence_generator);
EXPECT_EQ(kNumNormalCalls, stats.decoded_normal);
EXPECT_EQ(0, stats.decoded_cng);
EXPECT_EQ(0, stats.decoded_plc);
EXPECT_EQ(0, stats.decoded_plc_cng);
ASSERT_EQ(0,
voe_neteq_stats_->GetDecodingCallStatistics(channel_acm2_, &stats));
EXPECT_EQ(kNumNormalCalls, stats.calls_to_neteq);
EXPECT_EQ(0, stats.calls_to_silence_generator);
EXPECT_EQ(kNumNormalCalls, stats.decoded_normal);
EXPECT_EQ(0, stats.decoded_cng);
EXPECT_EQ(0, stats.decoded_plc);
EXPECT_EQ(0, stats.decoded_plc_cng);
const int kNumPlc = 3;
const int kNumPlcCng = 5;
// Simulate packet-loss. NetEq first performs PLC then PLC fades to CNG.
for (int n = 0; n < kNumPlc + kNumPlcCng; ++n) {
JustPullAudio();
}
ASSERT_EQ(0,
voe_neteq_stats_->GetDecodingCallStatistics(channel_acm1_, &stats));
EXPECT_EQ(kNumNormalCalls + kNumPlc + kNumPlcCng, stats.calls_to_neteq);
EXPECT_EQ(0, stats.calls_to_silence_generator);
EXPECT_EQ(kNumNormalCalls, stats.decoded_normal);
EXPECT_EQ(0, stats.decoded_cng);
EXPECT_EQ(kNumPlc, stats.decoded_plc);
EXPECT_EQ(kNumPlcCng, stats.decoded_plc_cng);
ASSERT_EQ(0,
voe_neteq_stats_->GetDecodingCallStatistics(channel_acm2_, &stats));
EXPECT_EQ(kNumNormalCalls + kNumPlc + kNumPlcCng, stats.calls_to_neteq);
EXPECT_EQ(0, stats.calls_to_silence_generator);
EXPECT_EQ(kNumNormalCalls, stats.decoded_normal);
EXPECT_EQ(0, stats.decoded_cng);
EXPECT_EQ(kNumPlc, stats.decoded_plc);
EXPECT_EQ(kNumPlcCng, stats.decoded_plc_cng);
}
} // namespace
} // namespace voe
} // namespace webrtc

View File

@ -126,7 +126,6 @@
'voe_audio_processing_unittest.cc',
'voe_base_unittest.cc',
'voe_codec_unittest.cc',
'voe_neteq_stats_unittest.cc',
],
'conditions': [
# TODO(henrike): remove build_with_chromium==1 when the bots are