Cleanup WebRTC tracing

The goal of this change is to:
1. Remove unused tracing events.
2. Organize tracing events to facilitate measurement of end to end latency.

The major change in this CL is to use ASYNC_STEP such that operation
flow can be traced for the same frame.

R=marpan@webrtc.org, pwestin@webrtc.org, turaj@webrtc.org, wu@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/1761004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@4308 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
hclam@chromium.org
2013-07-08 21:31:18 +00:00
parent e80a934b36
commit 1a7b9b94be
15 changed files with 56 additions and 119 deletions

View File

@@ -677,19 +677,19 @@ int32_t ACMNetEQ::RecOut(AudioFrame& audio_frame) {
WebRtcNetEQ_ProcessingActivity processing_stats;
WebRtcNetEQ_GetProcessingActivity(inst_[0], &processing_stats);
TRACE_EVENT2("webrtc", "ACM::RecOut",
"accelerate bgn", processing_stats.accelerate_bgn_samples,
"accelerate normal", processing_stats.accelerate_normal_samples);
TRACE_EVENT2("webrtc", "ACM::RecOut",
"expand bgn", processing_stats.expand_bgn_sampels,
"expand normal", processing_stats.expand_normal_samples);
TRACE_EVENT2("webrtc", "ACM::RecOut",
"preemptive bgn", processing_stats.preemptive_expand_bgn_samples,
"preemptive normal",
processing_stats.preemptive_expand_normal_samples);
TRACE_EVENT2("webrtc", "ACM::RecOut",
"merge bgn", processing_stats.merge_expand_bgn_samples,
"merge normal", processing_stats.merge_expand_normal_samples);
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceAudioCoding, id_,
"ACM::RecOut accelerate_bgn=%d accelerate_normal=%d"
" expand_bgn=%d expand_normal=%d"
" preemptive_bgn=%d preemptive_normal=%d"
" merge_bgn=%d merge_normal=%d",
processing_stats.accelerate_bgn_samples,
processing_stats.accelerate_normal_samples,
processing_stats.expand_bgn_sampels,
processing_stats.expand_normal_samples,
processing_stats.preemptive_expand_bgn_samples,
processing_stats.preemptive_expand_normal_samples,
processing_stats.merge_expand_bgn_samples,
processing_stats.merge_expand_normal_samples);
return 0;
}

View File

@@ -1326,10 +1326,6 @@ int32_t AudioCodingModuleImpl::RegisterIncomingMessagesCallback(
// Add 10MS of raw (PCM) audio data to the encoder.
int32_t AudioCodingModuleImpl::Add10MsData(
const AudioFrame& audio_frame) {
TRACE_EVENT2("webrtc", "ACM::Add10MsData",
"timestamp", audio_frame.timestamp_,
"samples_per_channel", audio_frame.samples_per_channel_);
if (audio_frame.samples_per_channel_ <= 0) {
assert(false);
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
@@ -1378,6 +1374,8 @@ int32_t AudioCodingModuleImpl::Add10MsData(
if (PreprocessToAddData(audio_frame, &ptr_frame) < 0) {
return -1;
}
TRACE_EVENT_ASYNC_BEGIN1("webrtc", "Audio", ptr_frame->timestamp_,
"now", clock_->TimeInMilliseconds());
// Check whether we need an up-mix or down-mix?
bool remix = ptr_frame->num_channels_ != send_codec_inst_.channels;
@@ -2306,11 +2304,11 @@ AudioPlayoutMode AudioCodingModuleImpl::PlayoutMode() const {
// Automatic resample to the requested frequency.
int32_t AudioCodingModuleImpl::PlayoutData10Ms(
int32_t desired_freq_hz, AudioFrame* audio_frame) {
TRACE_EVENT_ASYNC_BEGIN0("webrtc", "ACM::PlayoutData10Ms", 0);
TRACE_EVENT_ASYNC_BEGIN0("webrtc", "ACM::PlayoutData10Ms", this);
bool stereo_mode;
if (GetSilence(desired_freq_hz, audio_frame)) {
TRACE_EVENT_ASYNC_END1("webrtc", "ACM::PlayoutData10Ms", 0,
TRACE_EVENT_ASYNC_END1("webrtc", "ACM::PlayoutData10Ms", this,
"silence", true);
return 0; // Silence is generated, return.
}
@@ -2321,11 +2319,11 @@ int32_t AudioCodingModuleImpl::PlayoutData10Ms(
"PlayoutData failed, RecOut Failed");
return -1;
}
int seq_num;
uint32_t timestamp;
bool update_nack = nack_enabled_ && // Update NACK only if it is enabled.
neteq_.DecodedRtpInfo(&seq_num, &timestamp);
int decoded_seq_num;
uint32_t decoded_timestamp;
bool update_nack =
neteq_.DecodedRtpInfo(&decoded_seq_num, &decoded_timestamp) &&
nack_enabled_; // Update NACK only if it is enabled.
audio_frame->num_channels_ = audio_frame_.num_channels_;
audio_frame->vad_activity_ = audio_frame_.vad_activity_;
audio_frame->speech_type_ = audio_frame_.speech_type_;
@@ -2346,7 +2344,7 @@ int32_t AudioCodingModuleImpl::PlayoutData10Ms(
if (update_nack) {
assert(nack_.get());
nack_->UpdateLastDecodedPacket(seq_num, timestamp);
nack_->UpdateLastDecodedPacket(decoded_seq_num, decoded_timestamp);
}
// If we are in AV-sync and have already received an audio packet, but the
@@ -2368,8 +2366,9 @@ int32_t AudioCodingModuleImpl::PlayoutData10Ms(
}
if ((receive_freq != desired_freq_hz) && (desired_freq_hz != -1)) {
TRACE_EVENT_ASYNC_END2("webrtc", "ACM::PlayoutData10Ms", 0,
"stereo", stereo_mode, "resample", true);
TRACE_EVENT_ASYNC_END2("webrtc", "ACM::PlayoutData10Ms", this,
"seqnum", decoded_seq_num,
"now", clock_->TimeInMilliseconds());
// Resample payload_data.
int16_t temp_len = output_resampler_.Resample10Msec(
audio_frame_.data_, receive_freq, audio_frame->data_,
@@ -2386,8 +2385,9 @@ int32_t AudioCodingModuleImpl::PlayoutData10Ms(
// Set the sampling frequency.
audio_frame->sample_rate_hz_ = desired_freq_hz;
} else {
TRACE_EVENT_ASYNC_END2("webrtc", "ACM::PlayoutData10Ms", 0,
"stereo", stereo_mode, "resample", false);
TRACE_EVENT_ASYNC_END2("webrtc", "ACM::PlayoutData10Ms", this,
"seqnum", decoded_seq_num,
"now", clock_->TimeInMilliseconds());
memcpy(audio_frame->data_, audio_frame_.data_,
audio_frame_.samples_per_channel_ * audio_frame->num_channels_
* sizeof(int16_t));