Update talk to 58174641 together with http://review.webrtc.org/4319005/.
R=turaj@webrtc.org Review URL: https://webrtc-codereview.appspot.com/5809004 git-svn-id: http://webrtc.googlecode.com/svn/trunk@5287 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
parent
92c2793154
commit
24301a67c6
@ -339,12 +339,14 @@ class FakeWebRtcVideoEngine
|
||||
};
|
||||
class Capturer : public webrtc::ViEExternalCapture {
|
||||
public:
|
||||
Capturer() : channel_id_(-1), denoising_(false), last_capture_time_(0) { }
|
||||
Capturer() : channel_id_(-1), denoising_(false),
|
||||
last_capture_time_(0), incoming_frame_num_(0) { }
|
||||
int channel_id() const { return channel_id_; }
|
||||
void set_channel_id(int channel_id) { channel_id_ = channel_id; }
|
||||
bool denoising() const { return denoising_; }
|
||||
void set_denoising(bool denoising) { denoising_ = denoising; }
|
||||
int64 last_capture_time() { return last_capture_time_; }
|
||||
int64 last_capture_time() const { return last_capture_time_; }
|
||||
int incoming_frame_num() const { return incoming_frame_num_; }
|
||||
|
||||
// From ViEExternalCapture
|
||||
virtual int IncomingFrame(unsigned char* videoFrame,
|
||||
@ -359,6 +361,7 @@ class FakeWebRtcVideoEngine
|
||||
const webrtc::ViEVideoFrameI420& video_frame,
|
||||
unsigned long long captureTime) {
|
||||
last_capture_time_ = captureTime;
|
||||
++incoming_frame_num_;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -366,6 +369,7 @@ class FakeWebRtcVideoEngine
|
||||
int channel_id_;
|
||||
bool denoising_;
|
||||
int64 last_capture_time_;
|
||||
int incoming_frame_num_;
|
||||
};
|
||||
|
||||
FakeWebRtcVideoEngine(const cricket::VideoCodec* const* codecs,
|
||||
@ -408,6 +412,16 @@ class FakeWebRtcVideoEngine
|
||||
|
||||
int GetLastCapturer() const { return last_capturer_; }
|
||||
int GetNumCapturers() const { return static_cast<int>(capturers_.size()); }
|
||||
int GetIncomingFrameNum(int channel_id) const {
|
||||
for (std::map<int, Capturer*>::const_iterator iter = capturers_.begin();
|
||||
iter != capturers_.end(); ++iter) {
|
||||
Capturer* capturer = iter->second;
|
||||
if (capturer->channel_id() == channel_id) {
|
||||
return capturer->incoming_frame_num();
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
void set_fail_alloc_capturer(bool fail_alloc_capturer) {
|
||||
fail_alloc_capturer_ = fail_alloc_capturer;
|
||||
}
|
||||
|
@ -631,6 +631,13 @@ class FakeWebRtcVoiceEngine
|
||||
|
||||
// webrtc::VoENetEqStats
|
||||
WEBRTC_STUB(GetNetworkStatistics, (int, webrtc::NetworkStatistics&));
|
||||
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||
WEBRTC_FUNC_CONST(GetDecodingCallStatistics, (int channel,
|
||||
webrtc::AudioDecodingCallStats*)) {
|
||||
WEBRTC_CHECK_CHANNEL(channel);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
// webrtc::VoENetwork
|
||||
WEBRTC_FUNC(RegisterExternalTransport, (int channel,
|
||||
|
@ -145,6 +145,9 @@ class WebRtcMediaEngine : public cricket::MediaEngineInterface {
|
||||
virtual void SetVideoLogging(int min_sev, const char* filter) OVERRIDE {
|
||||
delegate_->SetVideoLogging(min_sev, filter);
|
||||
}
|
||||
virtual bool StartAecDump(FILE* file) OVERRIDE {
|
||||
return delegate_->StartAecDump(file);
|
||||
}
|
||||
virtual bool RegisterVoiceProcessor(
|
||||
uint32 ssrc, VoiceProcessor* video_processor,
|
||||
MediaProcessorDirection direction) OVERRIDE {
|
||||
|
@ -2118,18 +2118,6 @@ bool WebRtcVideoMediaChannel::GetSendChannelKey(uint32 local_ssrc,
|
||||
return true;
|
||||
}
|
||||
|
||||
WebRtcVideoChannelSendInfo* WebRtcVideoMediaChannel::GetSendChannel(
|
||||
VideoCapturer* video_capturer) {
|
||||
for (SendChannelMap::iterator iter = send_channels_.begin();
|
||||
iter != send_channels_.end(); ++iter) {
|
||||
WebRtcVideoChannelSendInfo* send_channel = iter->second;
|
||||
if (send_channel->video_capturer() == video_capturer) {
|
||||
return send_channel;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
WebRtcVideoChannelSendInfo* WebRtcVideoMediaChannel::GetSendChannel(
|
||||
uint32 local_ssrc) {
|
||||
uint32 key;
|
||||
@ -2159,6 +2147,18 @@ bool WebRtcVideoMediaChannel::CreateSendChannelKey(uint32 local_ssrc,
|
||||
return true;
|
||||
}
|
||||
|
||||
int WebRtcVideoMediaChannel::GetSendChannelNum(VideoCapturer* capturer) {
|
||||
int num = 0;
|
||||
for (SendChannelMap::iterator iter = send_channels_.begin();
|
||||
iter != send_channels_.end(); ++iter) {
|
||||
WebRtcVideoChannelSendInfo* send_channel = iter->second;
|
||||
if (send_channel->video_capturer() == capturer) {
|
||||
++num;
|
||||
}
|
||||
}
|
||||
return num;
|
||||
}
|
||||
|
||||
uint32 WebRtcVideoMediaChannel::GetDefaultChannelSsrc() {
|
||||
WebRtcVideoChannelSendInfo* send_channel = send_channels_[0];
|
||||
const StreamParams* sp = send_channel->stream_params();
|
||||
@ -2174,11 +2174,8 @@ bool WebRtcVideoMediaChannel::DeleteSendChannel(uint32 ssrc_key) {
|
||||
return false;
|
||||
}
|
||||
WebRtcVideoChannelSendInfo* send_channel = send_channels_[ssrc_key];
|
||||
VideoCapturer* capturer = send_channel->video_capturer();
|
||||
if (capturer != NULL) {
|
||||
capturer->SignalVideoFrame.disconnect(this);
|
||||
send_channel->set_video_capturer(NULL);
|
||||
}
|
||||
MaybeDisconnectCapturer(send_channel->video_capturer());
|
||||
send_channel->set_video_capturer(NULL);
|
||||
|
||||
int channel_id = send_channel->channel_id();
|
||||
int capture_id = send_channel->capture_id();
|
||||
@ -2217,7 +2214,7 @@ bool WebRtcVideoMediaChannel::RemoveCapturer(uint32 ssrc) {
|
||||
if (capturer == NULL) {
|
||||
return false;
|
||||
}
|
||||
capturer->SignalVideoFrame.disconnect(this);
|
||||
MaybeDisconnectCapturer(capturer);
|
||||
send_channel->set_video_capturer(NULL);
|
||||
const int64 timestamp = send_channel->local_stream_info()->time_stamp();
|
||||
if (send_codec_) {
|
||||
@ -2468,14 +2465,10 @@ bool WebRtcVideoMediaChannel::SetCapturer(uint32 ssrc,
|
||||
return false;
|
||||
}
|
||||
VideoCapturer* old_capturer = send_channel->video_capturer();
|
||||
if (old_capturer) {
|
||||
old_capturer->SignalVideoFrame.disconnect(this);
|
||||
}
|
||||
MaybeDisconnectCapturer(old_capturer);
|
||||
|
||||
send_channel->set_video_capturer(capturer);
|
||||
capturer->SignalVideoFrame.connect(
|
||||
this,
|
||||
&WebRtcVideoMediaChannel::SendFrame);
|
||||
MaybeConnectCapturer(capturer);
|
||||
if (!capturer->IsScreencast() && ratio_w_ != 0 && ratio_h_ != 0) {
|
||||
capturer->UpdateAspectRatio(ratio_w_, ratio_h_);
|
||||
}
|
||||
@ -2865,20 +2858,23 @@ bool WebRtcVideoMediaChannel::GetRenderer(uint32 ssrc,
|
||||
return true;
|
||||
}
|
||||
|
||||
// TODO(zhurunz): Add unittests to test this function.
|
||||
// TODO(thorcarpenter): This is broken. One capturer registered on two ssrc
|
||||
// will not send any video to the second ssrc send channel. We should remove
|
||||
// GetSendChannel(capturer) and pass in an ssrc here.
|
||||
void WebRtcVideoMediaChannel::SendFrame(VideoCapturer* capturer,
|
||||
const VideoFrame* frame) {
|
||||
// If there's send channel registers to the |capturer|, then only send the
|
||||
// frame to that channel and return. Otherwise send the frame to the default
|
||||
// channel, which currently taking frames from the engine.
|
||||
WebRtcVideoChannelSendInfo* send_channel = GetSendChannel(capturer);
|
||||
if (send_channel) {
|
||||
SendFrame(send_channel, frame, capturer->IsScreencast());
|
||||
// If the |capturer| is registered to any send channel, then send the frame
|
||||
// to those send channels.
|
||||
bool capturer_is_channel_owned = false;
|
||||
for (SendChannelMap::iterator iter = send_channels_.begin();
|
||||
iter != send_channels_.end(); ++iter) {
|
||||
WebRtcVideoChannelSendInfo* send_channel = iter->second;
|
||||
if (send_channel->video_capturer() == capturer) {
|
||||
SendFrame(send_channel, frame, capturer->IsScreencast());
|
||||
capturer_is_channel_owned = true;
|
||||
}
|
||||
}
|
||||
if (capturer_is_channel_owned) {
|
||||
return;
|
||||
}
|
||||
|
||||
// TODO(hellner): Remove below for loop once the captured frame no longer
|
||||
// come from the engine, i.e. the engine no longer owns a capturer.
|
||||
for (SendChannelMap::iterator iter = send_channels_.begin();
|
||||
@ -3754,6 +3750,19 @@ bool WebRtcVideoMediaChannel::SetLocalRtxSsrc(int channel_id,
|
||||
return true;
|
||||
}
|
||||
|
||||
void WebRtcVideoMediaChannel::MaybeConnectCapturer(VideoCapturer* capturer) {
|
||||
if (capturer != NULL && GetSendChannelNum(capturer) == 1) {
|
||||
capturer->SignalVideoFrame.connect(this,
|
||||
&WebRtcVideoMediaChannel::SendFrame);
|
||||
}
|
||||
}
|
||||
|
||||
void WebRtcVideoMediaChannel::MaybeDisconnectCapturer(VideoCapturer* capturer) {
|
||||
if (capturer != NULL && GetSendChannelNum(capturer) == 1) {
|
||||
capturer->SignalVideoFrame.disconnect(this);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace cricket
|
||||
|
||||
#endif // HAVE_WEBRTC_VIDEO
|
||||
|
@ -366,11 +366,12 @@ class WebRtcVideoMediaChannel : public talk_base::MessageHandler,
|
||||
// If the local ssrc correspond to that of the default channel the key is 0.
|
||||
// For all other channels the returned key will be the same as the local ssrc.
|
||||
bool GetSendChannelKey(uint32 local_ssrc, uint32* key);
|
||||
WebRtcVideoChannelSendInfo* GetSendChannel(VideoCapturer* video_capturer);
|
||||
WebRtcVideoChannelSendInfo* GetSendChannel(uint32 local_ssrc);
|
||||
// Creates a new unique key that can be used for inserting a new send channel
|
||||
// into |send_channels_|
|
||||
bool CreateSendChannelKey(uint32 local_ssrc, uint32* key);
|
||||
// Get the number of the send channels |capturer| registered with.
|
||||
int GetSendChannelNum(VideoCapturer* capturer);
|
||||
|
||||
bool IsDefaultChannel(int channel_id) const {
|
||||
return channel_id == vie_channel_;
|
||||
@ -404,6 +405,13 @@ class WebRtcVideoMediaChannel : public talk_base::MessageHandler,
|
||||
bool SetLocalRtxSsrc(int channel_id, const StreamParams& send_params,
|
||||
uint32 primary_ssrc, int stream_idx);
|
||||
|
||||
// Connect |capturer| to WebRtcVideoMediaChannel if it is only registered
|
||||
// to one send channel, i.e. the first send channel.
|
||||
void MaybeConnectCapturer(VideoCapturer* capturer);
|
||||
// Disconnect |capturer| from WebRtcVideoMediaChannel if it is only registered
|
||||
// to one send channel, i.e. the last send channel.
|
||||
void MaybeDisconnectCapturer(VideoCapturer* capturer);
|
||||
|
||||
// Global state.
|
||||
WebRtcVideoEngine* engine_;
|
||||
VoiceMediaChannel* voice_channel_;
|
||||
|
@ -1216,6 +1216,53 @@ TEST_F(WebRtcVideoEngineTestFake, SetOptionsWithDenoising) {
|
||||
EXPECT_FALSE(vie_.GetCaptureDenoising(capture_id));
|
||||
}
|
||||
|
||||
TEST_F(WebRtcVideoEngineTestFake, MultipleSendStreamsWithOneCapturer) {
|
||||
EXPECT_TRUE(SetupEngine());
|
||||
|
||||
// Start the capturer
|
||||
cricket::FakeVideoCapturer capturer;
|
||||
cricket::VideoFormat capture_format_vga = cricket::VideoFormat(640, 480,
|
||||
cricket::VideoFormat::FpsToInterval(30), cricket::FOURCC_I420);
|
||||
EXPECT_EQ(cricket::CS_RUNNING, capturer.Start(capture_format_vga));
|
||||
|
||||
// Add send streams and connect the capturer
|
||||
for (unsigned int i = 0; i < sizeof(kSsrcs2)/sizeof(kSsrcs2[0]); ++i) {
|
||||
EXPECT_TRUE(channel_->AddSendStream(
|
||||
cricket::StreamParams::CreateLegacy(kSsrcs2[i])));
|
||||
// Register the capturer to the ssrc.
|
||||
EXPECT_TRUE(channel_->SetCapturer(kSsrcs2[i], &capturer));
|
||||
}
|
||||
|
||||
const int channel0 = vie_.GetChannelFromLocalSsrc(kSsrcs2[0]);
|
||||
ASSERT_NE(-1, channel0);
|
||||
const int channel1 = vie_.GetChannelFromLocalSsrc(kSsrcs2[1]);
|
||||
ASSERT_NE(-1, channel1);
|
||||
ASSERT_NE(channel0, channel1);
|
||||
|
||||
// Set send codec.
|
||||
std::vector<cricket::VideoCodec> codecs;
|
||||
cricket::VideoCodec send_codec(100, "VP8", 640, 480, 30, 0);
|
||||
codecs.push_back(send_codec);
|
||||
EXPECT_TRUE(channel_->SetSendCodecs(codecs));
|
||||
|
||||
EXPECT_TRUE(capturer.CaptureFrame());
|
||||
EXPECT_EQ(1, vie_.GetIncomingFrameNum(channel0));
|
||||
EXPECT_EQ(1, vie_.GetIncomingFrameNum(channel1));
|
||||
|
||||
EXPECT_TRUE(channel_->RemoveSendStream(kSsrcs2[0]));
|
||||
EXPECT_TRUE(capturer.CaptureFrame());
|
||||
// channel0 is the default channel, so it won't be deleted.
|
||||
// But it should be disconnected from the capturer.
|
||||
EXPECT_EQ(1, vie_.GetIncomingFrameNum(channel0));
|
||||
EXPECT_EQ(2, vie_.GetIncomingFrameNum(channel1));
|
||||
|
||||
EXPECT_TRUE(channel_->RemoveSendStream(kSsrcs2[1]));
|
||||
EXPECT_TRUE(capturer.CaptureFrame());
|
||||
EXPECT_EQ(1, vie_.GetIncomingFrameNum(channel0));
|
||||
// channel1 has already been deleted.
|
||||
EXPECT_EQ(-1, vie_.GetIncomingFrameNum(channel1));
|
||||
}
|
||||
|
||||
|
||||
// Disabled since its flaky: b/11288120
|
||||
TEST_F(WebRtcVideoEngineTestFake, DISABLED_SendReceiveBitratesStats) {
|
||||
|
@ -382,6 +382,25 @@ struct NetworkStatistics // NETEQ statistics
|
||||
int addedSamples;
|
||||
};
|
||||
|
||||
// Statistics for calls to AudioCodingModule::PlayoutData10Ms().
|
||||
struct AudioDecodingCallStats {
|
||||
AudioDecodingCallStats()
|
||||
: calls_to_silence_generator(0),
|
||||
calls_to_neteq(0),
|
||||
decoded_normal(0),
|
||||
decoded_plc(0),
|
||||
decoded_cng(0),
|
||||
decoded_plc_cng(0) {}
|
||||
|
||||
int calls_to_silence_generator; // Number of calls where silence generated,
|
||||
// and NetEq was disengaged from decoding.
|
||||
int calls_to_neteq; // Number of calls to NetEq.
|
||||
int decoded_normal; // Number of calls where audio RTP packet decoded.
|
||||
int decoded_plc; // Number of calls resulted in PLC.
|
||||
int decoded_cng; // Number of calls where comfort noise generated due to DTX.
|
||||
int decoded_plc_cng; // Number of calls resulted where PLC faded to CNG.
|
||||
};
|
||||
|
||||
typedef struct
|
||||
{
|
||||
int min; // minumum
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include "webrtc/common_types.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_resampler.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/call_statistics.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/nack.h"
|
||||
#include "webrtc/modules/audio_coding/neteq4/interface/audio_decoder.h"
|
||||
#include "webrtc/modules/audio_coding/neteq4/interface/neteq.h"
|
||||
@ -461,6 +462,7 @@ int AcmReceiver::GetAudio(int desired_freq_hz, AudioFrame* audio_frame) {
|
||||
audio_frame->vad_activity_ = previous_audio_activity_;
|
||||
SetAudioFrameActivityAndType(vad_enabled_, type, audio_frame);
|
||||
previous_audio_activity_ = audio_frame->vad_activity_;
|
||||
call_stats_.DecodedByNetEq(audio_frame->speech_type_);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -761,6 +763,9 @@ bool AcmReceiver::GetSilence(int desired_sample_rate_hz, AudioFrame* frame) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Update statistics.
|
||||
call_stats_.DecodedBySilenceGenerator();
|
||||
|
||||
// Set the values if already got a packet, otherwise set to default values.
|
||||
if (last_audio_decoder_ >= 0) {
|
||||
current_sample_rate_hz_ = ACMCodecDB::database_[last_audio_decoder_].plfreq;
|
||||
@ -832,6 +837,12 @@ void AcmReceiver::InsertStreamOfSyncPackets(
|
||||
}
|
||||
}
|
||||
|
||||
void AcmReceiver::GetDecodingCallStatistics(
|
||||
AudioDecodingCallStats* stats) const {
|
||||
CriticalSectionScoped lock(neteq_crit_sect_);
|
||||
*stats = call_stats_.GetDecodingStatistics();
|
||||
}
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_codec_database.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_resampler.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/call_statistics.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/initial_delay_manager.h"
|
||||
#include "webrtc/modules/audio_coding/neteq4/interface/neteq.h"
|
||||
#include "webrtc/modules/interface/module_common_types.h"
|
||||
@ -320,6 +321,10 @@ class AcmReceiver {
|
||||
//
|
||||
NetEqBackgroundNoiseMode BackgroundNoiseModeForTest() const;
|
||||
|
||||
//
|
||||
// Get statistics of calls to GetAudio().
|
||||
void GetDecodingCallStatistics(AudioDecodingCallStats* stats) const;
|
||||
|
||||
private:
|
||||
int PayloadType2CodecIndex(uint8_t payload_type) const;
|
||||
|
||||
@ -361,6 +366,8 @@ class AcmReceiver {
|
||||
// initial delay is set.
|
||||
scoped_ptr<InitialDelayManager::SyncStream> missing_packets_sync_stream_;
|
||||
scoped_ptr<InitialDelayManager::SyncStream> late_packets_sync_stream_;
|
||||
|
||||
CallStatistics call_stats_;
|
||||
};
|
||||
|
||||
} // namespace acm2
|
||||
|
@ -84,6 +84,8 @@
|
||||
'audio_coding_module.cc',
|
||||
'audio_coding_module_impl.cc',
|
||||
'audio_coding_module_impl.h',
|
||||
'call_statistics.cc',
|
||||
'call_statistics.h',
|
||||
'initial_delay_manager.cc',
|
||||
'initial_delay_manager.h',
|
||||
'nack.cc',
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_generic_codec.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_resampler.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/call_statistics.h"
|
||||
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
|
||||
#include "webrtc/system_wrappers/interface/rw_lock_wrapper.h"
|
||||
#include "webrtc/system_wrappers/interface/trace.h"
|
||||
@ -1979,6 +1980,11 @@ const char* AudioCodingModuleImpl::Version() const {
|
||||
return kExperimentalAcmVersion;
|
||||
}
|
||||
|
||||
void AudioCodingModuleImpl::GetDecodingCallStatistics(
|
||||
AudioDecodingCallStats* call_stats) const {
|
||||
receiver_.GetDecodingCallStatistics(call_stats);
|
||||
}
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
|
@ -228,6 +228,8 @@ class AudioCodingModuleImpl : public AudioCodingModule {
|
||||
|
||||
std::vector<uint16_t> GetNackList(int round_trip_time_ms) const;
|
||||
|
||||
void GetDecodingCallStatistics(AudioDecodingCallStats* stats) const;
|
||||
|
||||
private:
|
||||
int UnregisterReceiveCodecSafe(int payload_type);
|
||||
|
||||
|
55
webrtc/modules/audio_coding/main/acm2/call_statistics.cc
Normal file
55
webrtc/modules/audio_coding/main/acm2/call_statistics.cc
Normal file
@ -0,0 +1,55 @@
|
||||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/call_statistics.h"
|
||||
|
||||
#include <cassert>
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
void CallStatistics::DecodedByNetEq(AudioFrame::SpeechType speech_type) {
|
||||
++decoding_stat_.calls_to_neteq;
|
||||
switch (speech_type) {
|
||||
case AudioFrame::kNormalSpeech: {
|
||||
++decoding_stat_.decoded_normal;
|
||||
break;
|
||||
}
|
||||
case AudioFrame::kPLC: {
|
||||
++decoding_stat_.decoded_plc;
|
||||
break;
|
||||
}
|
||||
case AudioFrame::kCNG: {
|
||||
++decoding_stat_.decoded_cng;
|
||||
break;
|
||||
}
|
||||
case AudioFrame::kPLCCNG: {
|
||||
++decoding_stat_.decoded_plc_cng;
|
||||
break;
|
||||
}
|
||||
case AudioFrame::kUndefined: {
|
||||
// If the audio is decoded by NetEq, |kUndefined| is not an option.
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CallStatistics::DecodedBySilenceGenerator() {
|
||||
++decoding_stat_.calls_to_silence_generator;
|
||||
}
|
||||
|
||||
const AudioDecodingCallStats& CallStatistics::GetDecodingStatistics() const {
|
||||
return decoding_stat_;
|
||||
}
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
63
webrtc/modules/audio_coding/main/acm2/call_statistics.h
Normal file
63
webrtc/modules/audio_coding/main/acm2/call_statistics.h
Normal file
@ -0,0 +1,63 @@
|
||||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_CALL_STATISTICS_H_
|
||||
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_CALL_STATISTICS_H_
|
||||
|
||||
#include "webrtc/common_types.h"
|
||||
#include "webrtc/modules/interface/module_common_types.h"
|
||||
|
||||
//
|
||||
// This class is for book keeping of calls to ACM. It is not useful to log API
|
||||
// calls which are supposed to be called every 10ms, e.g. PlayoutData10Ms(),
|
||||
// however, it is useful to know the number of such calls in a given time
|
||||
// interval. The current implementation covers calls to PlayoutData10Ms() with
|
||||
// detailed accounting of the decoded speech type.
|
||||
//
|
||||
// Thread Safety
|
||||
// =============
|
||||
// Please note that this class in not thread safe. The class must be protected
|
||||
// if different APIs are called from different threads.
|
||||
//
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
class CallStatistics {
|
||||
public:
|
||||
CallStatistics() {}
|
||||
~CallStatistics() {}
|
||||
|
||||
// Call this method to indicate that NetEq engaged in decoding. |speech_type|
|
||||
// is the audio-type according to NetEq.
|
||||
void DecodedByNetEq(AudioFrame::SpeechType speech_type);
|
||||
|
||||
// Call this method to indicate that a decoding call resulted in generating
|
||||
// silence, i.e. call to NetEq is bypassed and the output audio is zero.
|
||||
void DecodedBySilenceGenerator();
|
||||
|
||||
// Get statistics for decoding. The statistics include the number of calls to
|
||||
// NetEq and silence generator, as well as the type of speech pulled of off
|
||||
// NetEq, c.f. declaration of AudioDecodingCallStats for detailed description.
|
||||
const AudioDecodingCallStats& GetDecodingStatistics() const;
|
||||
|
||||
private:
|
||||
// Reset the decoding statistics.
|
||||
void ResetDecodingStatistics();
|
||||
|
||||
AudioDecodingCallStats decoding_stat_;
|
||||
};
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_CALL_STATISTICS_H_
|
@ -0,0 +1,55 @@
|
||||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/call_statistics.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
TEST(CallStatisticsTest, InitializedZero) {
|
||||
CallStatistics call_stats;
|
||||
AudioDecodingCallStats stats;
|
||||
|
||||
stats = call_stats.GetDecodingStatistics();
|
||||
EXPECT_EQ(0, stats.calls_to_neteq);
|
||||
EXPECT_EQ(0, stats.calls_to_silence_generator);
|
||||
EXPECT_EQ(0, stats.decoded_normal);
|
||||
EXPECT_EQ(0, stats.decoded_cng);
|
||||
EXPECT_EQ(0, stats.decoded_plc);
|
||||
EXPECT_EQ(0, stats.decoded_plc_cng);
|
||||
}
|
||||
|
||||
TEST(CallStatisticsTest, AllCalls) {
|
||||
CallStatistics call_stats;
|
||||
AudioDecodingCallStats stats;
|
||||
|
||||
call_stats.DecodedBySilenceGenerator();
|
||||
call_stats.DecodedByNetEq(AudioFrame::kNormalSpeech);
|
||||
call_stats.DecodedByNetEq(AudioFrame::kPLC);
|
||||
call_stats.DecodedByNetEq(AudioFrame::kPLCCNG);
|
||||
call_stats.DecodedByNetEq(AudioFrame::kCNG);
|
||||
|
||||
stats = call_stats.GetDecodingStatistics();
|
||||
EXPECT_EQ(4, stats.calls_to_neteq);
|
||||
EXPECT_EQ(1, stats.calls_to_silence_generator);
|
||||
EXPECT_EQ(1, stats.decoded_normal);
|
||||
EXPECT_EQ(1, stats.decoded_cng);
|
||||
EXPECT_EQ(1, stats.decoded_plc);
|
||||
EXPECT_EQ(1, stats.decoded_plc_cng);
|
||||
}
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
|
||||
|
@ -931,6 +931,9 @@ class AudioCodingModule: public Module {
|
||||
// is returned.
|
||||
//
|
||||
virtual std::vector<uint16_t> GetNackList(int round_trip_time_ms) const = 0;
|
||||
|
||||
virtual void GetDecodingCallStatistics(
|
||||
AudioDecodingCallStats* call_stats) const = 0;
|
||||
};
|
||||
|
||||
struct AudioCodingModuleFactory {
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include "webrtc/engine_configurations.h"
|
||||
#include "webrtc/modules/audio_coding/main/source/acm_codec_database.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/call_statistics.h"
|
||||
#include "webrtc/modules/audio_coding/main/source/acm_dtmf_detection.h"
|
||||
#include "webrtc/modules/audio_coding/main/source/acm_generic_codec.h"
|
||||
#include "webrtc/modules/audio_coding/main/source/acm_resampler.h"
|
||||
@ -2273,6 +2274,9 @@ int32_t AudioCodingModuleImpl::PlayoutData10Ms(
|
||||
{
|
||||
CriticalSectionScoped lock(acm_crit_sect_);
|
||||
|
||||
// Update call statistics.
|
||||
call_stats_.DecodedByNetEq(audio_frame->speech_type_);
|
||||
|
||||
if (update_nack) {
|
||||
assert(nack_.get());
|
||||
nack_->UpdateLastDecodedPacket(decoded_seq_num, decoded_timestamp);
|
||||
@ -2879,6 +2883,9 @@ bool AudioCodingModuleImpl::GetSilence(int desired_sample_rate_hz,
|
||||
return false;
|
||||
}
|
||||
|
||||
// Record call to silence generator.
|
||||
call_stats_.DecodedBySilenceGenerator();
|
||||
|
||||
// We stop accumulating packets, if the number of packets or the total size
|
||||
// exceeds a threshold.
|
||||
int max_num_packets;
|
||||
@ -3030,6 +3037,12 @@ const char* AudioCodingModuleImpl::Version() const {
|
||||
return kLegacyAcmVersion;
|
||||
}
|
||||
|
||||
void AudioCodingModuleImpl::GetDecodingCallStatistics(
|
||||
AudioDecodingCallStats* call_stats) const {
|
||||
CriticalSectionScoped lock(acm_crit_sect_);
|
||||
*call_stats = call_stats_.GetDecodingStatistics();
|
||||
}
|
||||
|
||||
} // namespace acm1
|
||||
|
||||
} // namespace webrtc
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include "webrtc/modules/audio_coding/main/source/acm_codec_database.h"
|
||||
#include "webrtc/modules/audio_coding/main/source/acm_neteq.h"
|
||||
#include "webrtc/modules/audio_coding/main/source/acm_resampler.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/call_statistics.h"
|
||||
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
|
||||
|
||||
namespace webrtc {
|
||||
@ -303,6 +304,8 @@ class AudioCodingModuleImpl : public AudioCodingModule {
|
||||
// Disable NACK.
|
||||
void DisableNack();
|
||||
|
||||
void GetDecodingCallStatistics(AudioDecodingCallStats* call_stats) const;
|
||||
|
||||
private:
|
||||
// Change required states after starting to receive the codec corresponding
|
||||
// to |index|.
|
||||
@ -441,6 +444,8 @@ class AudioCodingModuleImpl : public AudioCodingModule {
|
||||
Clock* clock_;
|
||||
scoped_ptr<acm2::Nack> nack_;
|
||||
bool nack_enabled_;
|
||||
|
||||
acm2::CallStatistics call_stats_;
|
||||
};
|
||||
|
||||
} // namespace acm1
|
||||
|
@ -1104,14 +1104,6 @@ int WebRtcNetEQ_GetSpeechOutputType(void *inst, enum WebRtcNetEQOutputType *outp
|
||||
/* If CN or internal CNG */
|
||||
*outputType = kOutputCNG;
|
||||
|
||||
#ifdef NETEQ_VAD
|
||||
}
|
||||
else if ( NetEqMainInst->DSPinst.VADInst.VADDecision == 0 )
|
||||
{
|
||||
/* post-decode VAD says passive speaker */
|
||||
*outputType = kOutputVADPassive;
|
||||
#endif /* NETEQ_VAD */
|
||||
|
||||
}
|
||||
else if ((NetEqMainInst->DSPinst.w16_mode == MODE_EXPAND)
|
||||
&& (NetEqMainInst->DSPinst.ExpandInst.w16_expandMuteFactor == 0))
|
||||
@ -1125,6 +1117,14 @@ int WebRtcNetEQ_GetSpeechOutputType(void *inst, enum WebRtcNetEQOutputType *outp
|
||||
/* PLC mode */
|
||||
*outputType = kOutputPLC;
|
||||
|
||||
#ifdef NETEQ_VAD
|
||||
}
|
||||
else if ( NetEqMainInst->DSPinst.VADInst.VADDecision == 0 )
|
||||
{
|
||||
/* post-decode VAD says passive speaker */
|
||||
*outputType = kOutputVADPassive;
|
||||
#endif /* NETEQ_VAD */
|
||||
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -1887,13 +1887,13 @@ NetEqOutputType NetEqImpl::LastOutputType() {
|
||||
assert(expand_.get());
|
||||
if (last_mode_ == kModeCodecInternalCng || last_mode_ == kModeRfc3389Cng) {
|
||||
return kOutputCNG;
|
||||
} else if (vad_->running() && !vad_->active_speech()) {
|
||||
return kOutputVADPassive;
|
||||
} else if (last_mode_ == kModeExpand && expand_->MuteFactor(0) == 0) {
|
||||
// Expand mode has faded down to background noise only (very long expand).
|
||||
return kOutputPLCtoCNG;
|
||||
} else if (last_mode_ == kModeExpand) {
|
||||
return kOutputPLC;
|
||||
} else if (vad_->running() && !vad_->active_speech()) {
|
||||
return kOutputVADPassive;
|
||||
} else {
|
||||
return kOutputNormal;
|
||||
}
|
||||
|
@ -102,6 +102,7 @@
|
||||
],
|
||||
'sources': [
|
||||
'audio_coding/main/acm2/acm_receiver_unittest.cc',
|
||||
'audio_coding/main/acm2/call_statistics_unittest.cc',
|
||||
'audio_coding/main/acm2/initial_delay_manager_unittest.cc',
|
||||
'audio_coding/main/acm2/nack_unittest.cc',
|
||||
'audio_coding/main/source/acm_neteq_unittest.cc',
|
||||
|
@ -4619,6 +4619,10 @@ Channel::GetNetworkStatistics(NetworkStatistics& stats)
|
||||
return return_value;
|
||||
}
|
||||
|
||||
void Channel::GetDecodingCallStatistics(AudioDecodingCallStats* stats) const {
|
||||
audio_coding_->GetDecodingCallStatistics(stats);
|
||||
}
|
||||
|
||||
bool Channel::GetDelayEstimate(int* jitter_buffer_delay_ms,
|
||||
int* playout_buffer_delay_ms) const {
|
||||
if (_average_jitter_buffer_delay_us == 0) {
|
||||
|
@ -201,6 +201,7 @@ public:
|
||||
|
||||
// VoENetEqStats
|
||||
int GetNetworkStatistics(NetworkStatistics& stats);
|
||||
void GetDecodingCallStatistics(AudioDecodingCallStats* stats) const;
|
||||
|
||||
// VoEVideoSync
|
||||
bool GetDelayEstimate(int* jitter_buffer_delay_ms,
|
||||
|
@ -35,6 +35,10 @@ public:
|
||||
// The statistics are reset after the query.
|
||||
virtual int GetNetworkStatistics(int channel, NetworkStatistics& stats) = 0;
|
||||
|
||||
// Get statistics of calls to AudioCodingModule::PlayoutData10Ms().
|
||||
virtual int GetDecodingCallStatistics(
|
||||
int channel, AudioDecodingCallStats* stats) const = 0;
|
||||
|
||||
protected:
|
||||
VoENetEqStats() {}
|
||||
virtual ~VoENetEqStats() {}
|
||||
|
@ -17,7 +17,6 @@
|
||||
#include "webrtc/voice_engine/include/voe_errors.h"
|
||||
#include "webrtc/voice_engine/voice_engine_impl.h"
|
||||
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
VoENetEqStats* VoENetEqStats::GetInterface(VoiceEngine* voiceEngine)
|
||||
@ -73,6 +72,27 @@ int VoENetEqStatsImpl::GetNetworkStatistics(int channel,
|
||||
return channelPtr->GetNetworkStatistics(stats);
|
||||
}
|
||||
|
||||
int VoENetEqStatsImpl::GetDecodingCallStatistics(
|
||||
int channel, AudioDecodingCallStats* stats) const {
|
||||
ANDROID_NOT_SUPPORTED(_shared->statistics());
|
||||
|
||||
if (!_shared->statistics().Initialized()) {
|
||||
_shared->SetLastError(VE_NOT_INITED, kTraceError);
|
||||
return -1;
|
||||
}
|
||||
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
|
||||
voe::Channel* channelPtr = ch.channel();
|
||||
if (channelPtr == NULL) {
|
||||
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
|
||||
"GetDecodingCallStatistics() failed to locate "
|
||||
"channel");
|
||||
return -1;
|
||||
}
|
||||
|
||||
channelPtr->GetDecodingCallStatistics(stats);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif // #ifdef WEBRTC_VOICE_ENGINE_NETEQ_STATS_API
|
||||
|
||||
} // namespace webrtc
|
||||
|
@ -13,6 +13,7 @@
|
||||
|
||||
#include "webrtc/voice_engine/include/voe_neteq_stats.h"
|
||||
|
||||
#include "webrtc/common_types.h"
|
||||
#include "webrtc/voice_engine/shared_data.h"
|
||||
|
||||
namespace webrtc {
|
||||
@ -23,6 +24,9 @@ public:
|
||||
virtual int GetNetworkStatistics(int channel,
|
||||
NetworkStatistics& stats);
|
||||
|
||||
virtual int GetDecodingCallStatistics(
|
||||
int channel, AudioDecodingCallStats* stats) const;
|
||||
|
||||
protected:
|
||||
VoENetEqStatsImpl(voe::SharedData* shared);
|
||||
virtual ~VoENetEqStatsImpl();
|
||||
|
285
webrtc/voice_engine/voe_neteq_stats_unittest.cc
Normal file
285
webrtc/voice_engine/voe_neteq_stats_unittest.cc
Normal file
@ -0,0 +1,285 @@
|
||||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/voice_engine/include/voe_neteq_stats.h"
|
||||
|
||||
#include "testing/gtest/include/gtest/gtest.h"
|
||||
#include "webrtc/modules/audio_device/include/fake_audio_device.h"
|
||||
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
|
||||
#include "webrtc/system_wrappers/interface/clock.h"
|
||||
#include "webrtc/test/testsupport/gtest_disable.h"
|
||||
#include "webrtc/voice_engine/include/voe_base.h"
|
||||
#include "webrtc/voice_engine/include/voe_hardware.h"
|
||||
#include "webrtc/voice_engine/voice_engine_defines.h"
|
||||
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
|
||||
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h"
|
||||
#include "webrtc/modules/audio_coding/main/source/audio_coding_module_impl.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace voe {
|
||||
namespace {
|
||||
|
||||
const int kSampleRateHz = 16000;
|
||||
const int kNumSamples10ms = kSampleRateHz / 100;
|
||||
const int kFrameSizeMs = 10; // Multiple of 10.
|
||||
const int kFrameSizeSamples = kFrameSizeMs / 10 * kNumSamples10ms;
|
||||
const int kPayloadSizeBytes = kFrameSizeSamples * sizeof(int16_t);
|
||||
const uint8_t kPayloadType = 111;
|
||||
|
||||
class RtpUtility {
|
||||
public:
|
||||
RtpUtility(int samples_per_packet, uint8_t payload_type)
|
||||
: samples_per_packet_(samples_per_packet), payload_type_(payload_type) {}
|
||||
|
||||
virtual ~RtpUtility() {}
|
||||
|
||||
void Populate(WebRtcRTPHeader* rtp_header) {
|
||||
rtp_header->header.sequenceNumber = 0xABCD;
|
||||
rtp_header->header.timestamp = 0xABCDEF01;
|
||||
rtp_header->header.payloadType = payload_type_;
|
||||
rtp_header->header.markerBit = false;
|
||||
rtp_header->header.ssrc = 0x1234;
|
||||
rtp_header->header.numCSRCs = 0;
|
||||
rtp_header->frameType = kAudioFrameSpeech;
|
||||
|
||||
rtp_header->header.payload_type_frequency = kSampleRateHz;
|
||||
rtp_header->type.Audio.channel = 1;
|
||||
rtp_header->type.Audio.isCNG = false;
|
||||
}
|
||||
|
||||
void Forward(WebRtcRTPHeader* rtp_header) {
|
||||
++rtp_header->header.sequenceNumber;
|
||||
rtp_header->header.timestamp += samples_per_packet_;
|
||||
}
|
||||
|
||||
private:
|
||||
int samples_per_packet_;
|
||||
uint8_t payload_type_;
|
||||
};
|
||||
|
||||
// This factory method allows access to ACM of a channel, facilitating insertion
|
||||
// of packets to and pulling audio of ACM.
|
||||
struct InsertAcm : AudioCodingModuleFactory {
|
||||
explicit InsertAcm(AudioCodingModule* acm) : acm_(acm) {}
|
||||
~InsertAcm() {}
|
||||
virtual AudioCodingModule* Create(int /*id*/) const { return acm_; }
|
||||
|
||||
AudioCodingModule* acm_;
|
||||
};
|
||||
|
||||
class VoENetEqStatsTest : public ::testing::Test {
|
||||
protected:
|
||||
VoENetEqStatsTest()
|
||||
: acm1_(new acm1::AudioCodingModuleImpl(1, Clock::GetRealTimeClock())),
|
||||
acm2_(new acm2::AudioCodingModuleImpl(2)),
|
||||
voe_(VoiceEngine::Create()),
|
||||
base_(VoEBase::GetInterface(voe_)),
|
||||
voe_neteq_stats_(VoENetEqStats::GetInterface(voe_)),
|
||||
channel_acm1_(-1),
|
||||
channel_acm2_(-1),
|
||||
adm_(new FakeAudioDeviceModule),
|
||||
rtp_utility_(new RtpUtility(kFrameSizeSamples, kPayloadType)) {}
|
||||
|
||||
~VoENetEqStatsTest() {}
|
||||
|
||||
void TearDown() {
|
||||
voe_neteq_stats_->Release();
|
||||
base_->DeleteChannel(channel_acm1_);
|
||||
base_->DeleteChannel(channel_acm2_);
|
||||
base_->Terminate();
|
||||
base_->Release();
|
||||
VoiceEngine::Delete(voe_);
|
||||
}
|
||||
|
||||
void SetUp() {
|
||||
// Check if all components are valid.
|
||||
ASSERT_TRUE(voe_ != NULL);
|
||||
ASSERT_TRUE(base_ != NULL);
|
||||
ASSERT_TRUE(adm_.get() != NULL);
|
||||
ASSERT_EQ(0, base_->Init(adm_.get()));
|
||||
|
||||
// Set configs.
|
||||
config_acm1_.Set<AudioCodingModuleFactory>(new InsertAcm(acm1_));
|
||||
config_acm2_.Set<AudioCodingModuleFactory>(new InsertAcm(acm2_));
|
||||
|
||||
// Create channe1s;
|
||||
channel_acm1_ = base_->CreateChannel(config_acm1_);
|
||||
ASSERT_NE(-1, channel_acm1_);
|
||||
|
||||
channel_acm2_ = base_->CreateChannel(config_acm2_);
|
||||
ASSERT_NE(-1, channel_acm2_);
|
||||
|
||||
CodecInst codec;
|
||||
AudioCodingModule::Codec("L16", &codec, kSampleRateHz, 1);
|
||||
codec.pltype = kPayloadType;
|
||||
|
||||
// Register L16 codec in ACMs.
|
||||
ASSERT_EQ(0, acm1_->RegisterReceiveCodec(codec));
|
||||
ASSERT_EQ(0, acm2_->RegisterReceiveCodec(codec));
|
||||
|
||||
rtp_utility_->Populate(&rtp_header_);
|
||||
}
|
||||
|
||||
void InsertPacketAndPullAudio() {
|
||||
AudioFrame audio_frame;
|
||||
const uint8_t kPayload[kPayloadSizeBytes] = {0};
|
||||
|
||||
ASSERT_EQ(0,
|
||||
acm1_->IncomingPacket(kPayload, kPayloadSizeBytes, rtp_header_));
|
||||
ASSERT_EQ(0,
|
||||
acm2_->IncomingPacket(kPayload, kPayloadSizeBytes, rtp_header_));
|
||||
|
||||
ASSERT_EQ(0, acm1_->PlayoutData10Ms(-1, &audio_frame));
|
||||
ASSERT_EQ(0, acm2_->PlayoutData10Ms(-1, &audio_frame));
|
||||
rtp_utility_->Forward(&rtp_header_);
|
||||
}
|
||||
|
||||
void JustPullAudio() {
|
||||
AudioFrame audio_frame;
|
||||
ASSERT_EQ(0, acm1_->PlayoutData10Ms(-1, &audio_frame));
|
||||
ASSERT_EQ(0, acm2_->PlayoutData10Ms(-1, &audio_frame));
|
||||
}
|
||||
|
||||
Config config_acm1_;
|
||||
Config config_acm2_;
|
||||
|
||||
// ACMs are inserted into VoE channels, and this class is not the owner of
|
||||
// them. Therefore, they should not be deleted, not even in destructor.
|
||||
AudioCodingModule* acm1_;
|
||||
AudioCodingModule* acm2_;
|
||||
|
||||
VoiceEngine* voe_;
|
||||
VoEBase* base_;
|
||||
VoENetEqStats* voe_neteq_stats_;
|
||||
int channel_acm1_;
|
||||
int channel_acm2_;
|
||||
scoped_ptr<FakeAudioDeviceModule> adm_;
|
||||
scoped_ptr<RtpUtility> rtp_utility_;
|
||||
WebRtcRTPHeader rtp_header_;
|
||||
};
|
||||
|
||||
// Check if the statistics are initialized correctly. Before any call to ACM
|
||||
// all fields have to be zero.
|
||||
TEST_F(VoENetEqStatsTest, InitializedToZero) {
|
||||
AudioDecodingCallStats stats;
|
||||
ASSERT_EQ(0,
|
||||
voe_neteq_stats_->GetDecodingCallStatistics(channel_acm1_, &stats));
|
||||
EXPECT_EQ(0, stats.calls_to_neteq);
|
||||
EXPECT_EQ(0, stats.calls_to_silence_generator);
|
||||
EXPECT_EQ(0, stats.decoded_normal);
|
||||
EXPECT_EQ(0, stats.decoded_cng);
|
||||
EXPECT_EQ(0, stats.decoded_plc);
|
||||
EXPECT_EQ(0, stats.decoded_plc_cng);
|
||||
|
||||
ASSERT_EQ(0,
|
||||
voe_neteq_stats_->GetDecodingCallStatistics(channel_acm2_, &stats));
|
||||
EXPECT_EQ(0, stats.calls_to_neteq);
|
||||
EXPECT_EQ(0, stats.calls_to_silence_generator);
|
||||
EXPECT_EQ(0, stats.decoded_normal);
|
||||
EXPECT_EQ(0, stats.decoded_cng);
|
||||
EXPECT_EQ(0, stats.decoded_plc);
|
||||
EXPECT_EQ(0, stats.decoded_plc_cng);
|
||||
}
|
||||
|
||||
// Apply an initial playout delay. Calls to AudioCodingModule::PlayoutData10ms()
|
||||
// should result in generating silence, check the associated field.
|
||||
TEST_F(VoENetEqStatsTest, SilenceGeneratorCalled) {
|
||||
AudioDecodingCallStats stats;
|
||||
const int kInitialDelay = 100;
|
||||
|
||||
acm1_->SetInitialPlayoutDelay(kInitialDelay);
|
||||
acm2_->SetInitialPlayoutDelay(kInitialDelay);
|
||||
|
||||
AudioFrame audio_frame;
|
||||
int num_calls = 0;
|
||||
for (int time_ms = 0; time_ms < kInitialDelay;
|
||||
time_ms += kFrameSizeMs, ++num_calls) {
|
||||
InsertPacketAndPullAudio();
|
||||
}
|
||||
ASSERT_EQ(0,
|
||||
voe_neteq_stats_->GetDecodingCallStatistics(channel_acm1_, &stats));
|
||||
EXPECT_EQ(0, stats.calls_to_neteq);
|
||||
EXPECT_EQ(num_calls, stats.calls_to_silence_generator);
|
||||
EXPECT_EQ(0, stats.decoded_normal);
|
||||
EXPECT_EQ(0, stats.decoded_cng);
|
||||
EXPECT_EQ(0, stats.decoded_plc);
|
||||
EXPECT_EQ(0, stats.decoded_plc_cng);
|
||||
|
||||
ASSERT_EQ(0,
|
||||
voe_neteq_stats_->GetDecodingCallStatistics(channel_acm2_, &stats));
|
||||
EXPECT_EQ(0, stats.calls_to_neteq);
|
||||
EXPECT_EQ(num_calls, stats.calls_to_silence_generator);
|
||||
EXPECT_EQ(0, stats.decoded_normal);
|
||||
EXPECT_EQ(0, stats.decoded_cng);
|
||||
EXPECT_EQ(0, stats.decoded_plc);
|
||||
EXPECT_EQ(0, stats.decoded_plc_cng);
|
||||
}
|
||||
|
||||
// Insert some packets and pull audio. Check statistics are valid. Then,
|
||||
// simulate packet loss and check if PLC and PLC-to-CNG statistics are
|
||||
// correctly updated.
|
||||
TEST_F(VoENetEqStatsTest, NetEqCalls) {
|
||||
AudioDecodingCallStats stats;
|
||||
const int kNumNormalCalls = 10;
|
||||
|
||||
AudioFrame audio_frame;
|
||||
for (int num_calls = 0; num_calls < kNumNormalCalls; ++num_calls) {
|
||||
InsertPacketAndPullAudio();
|
||||
}
|
||||
ASSERT_EQ(0,
|
||||
voe_neteq_stats_->GetDecodingCallStatistics(channel_acm1_, &stats));
|
||||
EXPECT_EQ(kNumNormalCalls, stats.calls_to_neteq);
|
||||
EXPECT_EQ(0, stats.calls_to_silence_generator);
|
||||
EXPECT_EQ(kNumNormalCalls, stats.decoded_normal);
|
||||
EXPECT_EQ(0, stats.decoded_cng);
|
||||
EXPECT_EQ(0, stats.decoded_plc);
|
||||
EXPECT_EQ(0, stats.decoded_plc_cng);
|
||||
|
||||
ASSERT_EQ(0,
|
||||
voe_neteq_stats_->GetDecodingCallStatistics(channel_acm2_, &stats));
|
||||
EXPECT_EQ(kNumNormalCalls, stats.calls_to_neteq);
|
||||
EXPECT_EQ(0, stats.calls_to_silence_generator);
|
||||
EXPECT_EQ(kNumNormalCalls, stats.decoded_normal);
|
||||
EXPECT_EQ(0, stats.decoded_cng);
|
||||
EXPECT_EQ(0, stats.decoded_plc);
|
||||
EXPECT_EQ(0, stats.decoded_plc_cng);
|
||||
|
||||
const int kNumPlc = 3;
|
||||
const int kNumPlcCng = 5;
|
||||
|
||||
// Simulate packet-loss. NetEq first performs PLC then PLC fades to CNG.
|
||||
for (int n = 0; n < kNumPlc + kNumPlcCng; ++n) {
|
||||
JustPullAudio();
|
||||
}
|
||||
ASSERT_EQ(0,
|
||||
voe_neteq_stats_->GetDecodingCallStatistics(channel_acm1_, &stats));
|
||||
EXPECT_EQ(kNumNormalCalls + kNumPlc + kNumPlcCng, stats.calls_to_neteq);
|
||||
EXPECT_EQ(0, stats.calls_to_silence_generator);
|
||||
EXPECT_EQ(kNumNormalCalls, stats.decoded_normal);
|
||||
EXPECT_EQ(0, stats.decoded_cng);
|
||||
EXPECT_EQ(kNumPlc, stats.decoded_plc);
|
||||
EXPECT_EQ(kNumPlcCng, stats.decoded_plc_cng);
|
||||
|
||||
ASSERT_EQ(0,
|
||||
voe_neteq_stats_->GetDecodingCallStatistics(channel_acm2_, &stats));
|
||||
EXPECT_EQ(kNumNormalCalls + kNumPlc + kNumPlcCng, stats.calls_to_neteq);
|
||||
EXPECT_EQ(0, stats.calls_to_silence_generator);
|
||||
EXPECT_EQ(kNumNormalCalls, stats.decoded_normal);
|
||||
EXPECT_EQ(0, stats.decoded_cng);
|
||||
EXPECT_EQ(kNumPlc, stats.decoded_plc);
|
||||
EXPECT_EQ(kNumPlcCng, stats.decoded_plc_cng);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
} // namespace voe
|
||||
|
||||
} // namespace webrtc
|
@ -134,6 +134,7 @@
|
||||
'voe_audio_processing_unittest.cc',
|
||||
'voe_base_unittest.cc',
|
||||
'voe_codec_unittest.cc',
|
||||
'voe_neteq_stats_unittest.cc',
|
||||
],
|
||||
'conditions': [
|
||||
# TODO(henrike): remove build_with_chromium==1 when the bots are
|
||||
|
Loading…
Reference in New Issue
Block a user