Cleanup WebRTC tracing

The goal of this change is to:
1. Remove unused tracing events.
2. Organize tracing events to facilitate measurement of end to end latency.

The major change in this CL is to use ASYNC_STEP such that operation
flow can be traced for the same frame.

R=marpan@webrtc.org, pwestin@webrtc.org, turaj@webrtc.org, wu@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/1761004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@4308 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
hclam@chromium.org 2013-07-08 21:31:18 +00:00
parent e80a934b36
commit 1a7b9b94be
15 changed files with 56 additions and 119 deletions

View File

@ -677,19 +677,19 @@ int32_t ACMNetEQ::RecOut(AudioFrame& audio_frame) {
WebRtcNetEQ_ProcessingActivity processing_stats;
WebRtcNetEQ_GetProcessingActivity(inst_[0], &processing_stats);
TRACE_EVENT2("webrtc", "ACM::RecOut",
"accelerate bgn", processing_stats.accelerate_bgn_samples,
"accelerate normal", processing_stats.accelerate_normal_samples);
TRACE_EVENT2("webrtc", "ACM::RecOut",
"expand bgn", processing_stats.expand_bgn_sampels,
"expand normal", processing_stats.expand_normal_samples);
TRACE_EVENT2("webrtc", "ACM::RecOut",
"preemptive bgn", processing_stats.preemptive_expand_bgn_samples,
"preemptive normal",
processing_stats.preemptive_expand_normal_samples);
TRACE_EVENT2("webrtc", "ACM::RecOut",
"merge bgn", processing_stats.merge_expand_bgn_samples,
"merge normal", processing_stats.merge_expand_normal_samples);
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceAudioCoding, id_,
"ACM::RecOut accelerate_bgn=%d accelerate_normal=%d"
" expand_bgn=%d expand_normal=%d"
" preemptive_bgn=%d preemptive_normal=%d"
" merge_bgn=%d merge_normal=%d",
processing_stats.accelerate_bgn_samples,
processing_stats.accelerate_normal_samples,
processing_stats.expand_bgn_sampels,
processing_stats.expand_normal_samples,
processing_stats.preemptive_expand_bgn_samples,
processing_stats.preemptive_expand_normal_samples,
processing_stats.merge_expand_bgn_samples,
processing_stats.merge_expand_normal_samples);
return 0;
}

View File

@ -1326,10 +1326,6 @@ int32_t AudioCodingModuleImpl::RegisterIncomingMessagesCallback(
// Add 10MS of raw (PCM) audio data to the encoder.
int32_t AudioCodingModuleImpl::Add10MsData(
const AudioFrame& audio_frame) {
TRACE_EVENT2("webrtc", "ACM::Add10MsData",
"timestamp", audio_frame.timestamp_,
"samples_per_channel", audio_frame.samples_per_channel_);
if (audio_frame.samples_per_channel_ <= 0) {
assert(false);
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
@ -1378,6 +1374,8 @@ int32_t AudioCodingModuleImpl::Add10MsData(
if (PreprocessToAddData(audio_frame, &ptr_frame) < 0) {
return -1;
}
TRACE_EVENT_ASYNC_BEGIN1("webrtc", "Audio", ptr_frame->timestamp_,
"now", clock_->TimeInMilliseconds());
// Check whether we need an up-mix or down-mix?
bool remix = ptr_frame->num_channels_ != send_codec_inst_.channels;
@ -2306,11 +2304,11 @@ AudioPlayoutMode AudioCodingModuleImpl::PlayoutMode() const {
// Automatic resample to the requested frequency.
int32_t AudioCodingModuleImpl::PlayoutData10Ms(
int32_t desired_freq_hz, AudioFrame* audio_frame) {
TRACE_EVENT_ASYNC_BEGIN0("webrtc", "ACM::PlayoutData10Ms", 0);
TRACE_EVENT_ASYNC_BEGIN0("webrtc", "ACM::PlayoutData10Ms", this);
bool stereo_mode;
if (GetSilence(desired_freq_hz, audio_frame)) {
TRACE_EVENT_ASYNC_END1("webrtc", "ACM::PlayoutData10Ms", 0,
TRACE_EVENT_ASYNC_END1("webrtc", "ACM::PlayoutData10Ms", this,
"silence", true);
return 0; // Silence is generated, return.
}
@ -2321,11 +2319,11 @@ int32_t AudioCodingModuleImpl::PlayoutData10Ms(
"PlayoutData failed, RecOut Failed");
return -1;
}
int seq_num;
uint32_t timestamp;
bool update_nack = nack_enabled_ && // Update NACK only if it is enabled.
neteq_.DecodedRtpInfo(&seq_num, &timestamp);
int decoded_seq_num;
uint32_t decoded_timestamp;
bool update_nack =
neteq_.DecodedRtpInfo(&decoded_seq_num, &decoded_timestamp) &&
nack_enabled_; // Update NACK only if it is enabled.
audio_frame->num_channels_ = audio_frame_.num_channels_;
audio_frame->vad_activity_ = audio_frame_.vad_activity_;
audio_frame->speech_type_ = audio_frame_.speech_type_;
@ -2346,7 +2344,7 @@ int32_t AudioCodingModuleImpl::PlayoutData10Ms(
if (update_nack) {
assert(nack_.get());
nack_->UpdateLastDecodedPacket(seq_num, timestamp);
nack_->UpdateLastDecodedPacket(decoded_seq_num, decoded_timestamp);
}
// If we are in AV-sync and have already received an audio packet, but the
@ -2368,8 +2366,9 @@ int32_t AudioCodingModuleImpl::PlayoutData10Ms(
}
if ((receive_freq != desired_freq_hz) && (desired_freq_hz != -1)) {
TRACE_EVENT_ASYNC_END2("webrtc", "ACM::PlayoutData10Ms", 0,
"stereo", stereo_mode, "resample", true);
TRACE_EVENT_ASYNC_END2("webrtc", "ACM::PlayoutData10Ms", this,
"seqnum", decoded_seq_num,
"now", clock_->TimeInMilliseconds());
// Resample payload_data.
int16_t temp_len = output_resampler_.Resample10Msec(
audio_frame_.data_, receive_freq, audio_frame->data_,
@ -2386,8 +2385,9 @@ int32_t AudioCodingModuleImpl::PlayoutData10Ms(
// Set the sampling frequency.
audio_frame->sample_rate_hz_ = desired_freq_hz;
} else {
TRACE_EVENT_ASYNC_END2("webrtc", "ACM::PlayoutData10Ms", 0,
"stereo", stereo_mode, "resample", false);
TRACE_EVENT_ASYNC_END2("webrtc", "ACM::PlayoutData10Ms", this,
"seqnum", decoded_seq_num,
"now", clock_->TimeInMilliseconds());
memcpy(audio_frame->data_, audio_frame_.data_,
audio_frame_.samples_per_channel_ * audio_frame->num_channels_
* sizeof(int16_t));

View File

@ -14,7 +14,6 @@
#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
#include "webrtc/system_wrappers/interface/trace_event.h"
namespace webrtc {
@ -284,10 +283,6 @@ int32_t ReceiveStatisticsImpl::TimeUntilNextProcess() {
int32_t ReceiveStatisticsImpl::Process() {
incoming_bitrate_.Process();
TRACE_COUNTER_ID1("webrtc_rtp", "RTPReceiverBitrate", ssrc_,
incoming_bitrate_.BitrateLast());
TRACE_COUNTER_ID1("webrtc_rtp", "RTPReceiverPacketRate", ssrc_,
incoming_bitrate_.PacketRate());
return 0;
}

View File

@ -489,10 +489,6 @@ RTCPReceiver::HandleReportBlock(const RTCPUtility::RTCPPacket& rtcpPacket,
_lastReceivedRrMs = _clock->TimeInMilliseconds();
const RTCPPacketReportBlockItem& rb = rtcpPacket.ReportBlockItem;
TRACE_COUNTER_ID1("webrtc_rtp", "RRFractionLost", rb.SSRC, rb.FractionLost);
TRACE_COUNTER_ID1("webrtc_rtp", "RRCumulativeNumOfPacketLost",
rb.SSRC, rb.CumulativeNumOfPacketsLost);
TRACE_COUNTER_ID1("webrtc_rtp", "RRJitter", rb.SSRC, rb.Jitter);
reportBlock->remoteReceiveBlock.remoteSSRC = remoteSSRC;
reportBlock->remoteReceiveBlock.sourceSSRC = rb.SSRC;
reportBlock->remoteReceiveBlock.fractionLost = rb.FractionLost;

View File

@ -1116,7 +1116,6 @@ RTCPSender::BuildREMB(uint8_t* rtcpbuffer, uint32_t& pos)
ModuleRTPUtility::AssignUWord32ToBuffer(rtcpbuffer+pos, _rembSSRC[i]);
pos += 4;
}
TRACE_COUNTER_ID1("webrtc_rtp", "RTCPRembBitrate", _SSRC, _rembBitrate);
return 0;
}

View File

@ -19,7 +19,6 @@
#include "webrtc/modules/rtp_rtcp/source/rtp_receiver_strategy.h"
#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
#include "webrtc/system_wrappers/interface/trace.h"
#include "webrtc/system_wrappers/interface/trace_event.h"
namespace webrtc {
@ -211,7 +210,6 @@ bool RtpReceiverImpl::IncomingRtpPacket(
int packet_length,
PayloadUnion payload_specific,
bool in_order) {
TRACE_EVENT0("webrtc_rtp", "RTPRecv::Packet");
// The rtp_header argument contains the parsed RTP header.
int length = packet_length - rtp_header->paddingLength;

View File

@ -358,22 +358,17 @@ int32_t RTPSender::SendOutgoingData(
return -1;
}
if (frame_type == kVideoFrameKey) {
TRACE_EVENT_INSTANT1("webrtc_rtp", "SendKeyFrame",
"timestamp", capture_timestamp);
} else {
TRACE_EVENT_INSTANT2("webrtc_rtp", "SendFrame",
"timestamp", capture_timestamp,
"frame_type", FrameTypeToString(frame_type));
}
if (audio_configured_) {
TRACE_EVENT_ASYNC_STEP1("webrtc", "Audio", capture_timestamp,
"Send", "type", FrameTypeToString(frame_type));
assert(frame_type == kAudioFrameSpeech || frame_type == kAudioFrameCN ||
frame_type == kFrameEmpty);
return audio_->SendAudio(frame_type, payload_type, capture_timestamp,
payload_data, payload_size, fragmentation);
} else {
TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", capture_time_ms,
"Send", "type", FrameTypeToString(frame_type));
assert(frame_type != kAudioFrameSpeech && frame_type != kAudioFrameCN);
if (frame_type == kFrameEmpty) {

View File

@ -475,9 +475,9 @@ int32_t RTPSenderAudio::SendAudio(
}
_lastPayloadType = payloadType;
} // end critical section
TRACE_EVENT_INSTANT2("webrtc_rtp", "Audio::Send",
"timestamp", captureTimeStamp,
"seqnum", _rtpSender->SequenceNumber());
TRACE_EVENT_ASYNC_END2("webrtc", "Audio", captureTimeStamp,
"timestamp", _rtpSender->Timestamp(),
"seqnum", _rtpSender->SequenceNumber());
return _rtpSender->SendToNetwork(dataBuffer,
payloadSize,
static_cast<uint16_t>(rtpHeaderLength),

View File

@ -476,18 +476,14 @@ RTPSenderVideo::SendVP8(const FrameType frameType,
" %d", _rtpSender.SequenceNumber());
}
}
TRACE_EVENT_ASYNC_END1("webrtc", "Video", capture_time_ms,
"timestamp", _rtpSender.Timestamp());
return 0;
}
void RTPSenderVideo::ProcessBitrate() {
_videoBitrate.Process();
_fecOverheadRate.Process();
TRACE_COUNTER_ID1("webrtc_rtp", "VideoSendBitrate",
_rtpSender.SSRC(),
_videoBitrate.BitrateLast());
TRACE_COUNTER_ID1("webrtc_rtp", "VideoFecOverheadRate",
_rtpSender.SSRC(),
_fecOverheadRate.BitrateLast());
}
uint32_t RTPSenderVideo::VideoBitrateSent() const {

View File

@ -213,9 +213,6 @@ int32_t VideoCaptureImpl::DeliverCapturedFrame(I420VideoFrame& captureFrame,
captureFrame.set_render_time_ms(TickTime::MillisecondTimestamp());
}
TRACE_EVENT1("webrtc", "VC::DeliverCapturedFrame",
"capture_time", capture_time);
if (captureFrame.render_time_ms() == last_capture_time_) {
// We don't allow the same capture time for two frames, drop this one.
return -1;

View File

@ -114,10 +114,6 @@ int FrameList::CleanUpOldOrEmptyFrames(VCMDecodingState* decoding_state,
oldest_frame->TimeStamp());
erase(begin());
}
if (empty()) {
TRACE_EVENT_INSTANT1("webrtc", "JB::FrameListEmptied",
"type", "CleanUpOldOrEmptyFrames");
}
return drop_count;
}
@ -301,7 +297,6 @@ void VCMJitterBuffer::Stop() {
free_frames_.push_back(frame_buffers_[i]);
}
}
TRACE_EVENT_INSTANT1("webrtc", "JB::FrameListEmptied", "type", "Stop");
crit_sect_->Leave();
// Make sure we wake up any threads waiting on these events.
frame_event_->Set();
@ -320,8 +315,6 @@ void VCMJitterBuffer::Flush() {
CriticalSectionScoped cs(crit_sect_);
decodable_frames_.Reset(&free_frames_);
incomplete_frames_.Reset(&free_frames_);
TRACE_EVENT_INSTANT2("webrtc", "JB::FrameListEmptied", "type", "Flush",
"frames", max_number_of_frames_);
last_decoded_state_.Reset(); // TODO(mikhal): sync reset.
num_not_decodable_packets_ = 0;
frame_event_->Reset();
@ -418,8 +411,6 @@ void VCMJitterBuffer::IncomingRateStatistics(unsigned int* framerate,
incoming_frame_rate_ = 0;
incoming_bit_rate_ = 0;
}
TRACE_COUNTER1("webrtc", "JBIncomingFramerate", incoming_frame_rate_);
TRACE_COUNTER1("webrtc", "JBIncomingBitrate", incoming_bit_rate_);
}
// Answers the question:
@ -444,7 +435,6 @@ bool VCMJitterBuffer::CompleteSequenceWithNextFrame() {
// complete frame, |max_wait_time_ms| decided by caller.
bool VCMJitterBuffer::NextCompleteTimestamp(
uint32_t max_wait_time_ms, uint32_t* timestamp) {
TRACE_EVENT0("webrtc", "JB::NextCompleteTimestamp");
crit_sect_->Enter();
if (!running_) {
crit_sect_->Leave();
@ -493,7 +483,6 @@ bool VCMJitterBuffer::NextCompleteTimestamp(
}
bool VCMJitterBuffer::NextMaybeIncompleteTimestamp(uint32_t* timestamp) {
TRACE_EVENT0("webrtc", "JB::NextMaybeIncompleteTimestamp");
CriticalSectionScoped cs(crit_sect_);
if (!running_) {
return false;
@ -526,7 +515,6 @@ bool VCMJitterBuffer::NextMaybeIncompleteTimestamp(uint32_t* timestamp) {
}
VCMEncodedFrame* VCMJitterBuffer::ExtractAndSetDecode(uint32_t timestamp) {
TRACE_EVENT0("webrtc", "JB::ExtractAndSetDecode");
CriticalSectionScoped cs(crit_sect_);
if (!running_) {
@ -542,10 +530,7 @@ VCMEncodedFrame* VCMJitterBuffer::ExtractAndSetDecode(uint32_t timestamp) {
else
return NULL;
}
if (!NextFrame()) {
TRACE_EVENT_INSTANT1("webrtc", "JB::FrameListEmptied",
"type", "ExtractAndSetDecode");
}
TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", timestamp, "Extract");
// Frame pulled out from jitter buffer, update the jitter estimate.
const bool retransmitted = (frame->GetNackCount() > 0);
if (retransmitted) {
@ -598,10 +583,6 @@ VCMFrameBufferEnum VCMJitterBuffer::GetFrame(const VCMPacket& packet,
if (packet.sizeBytes > 0) {
num_discarded_packets_++;
num_consecutive_old_packets_++;
TRACE_EVENT_INSTANT2("webrtc", "JB::OldPacketDropped",
"seqnum", packet.seqNum,
"timestamp", packet.timestamp);
TRACE_COUNTER1("webrtc", "JBDroppedOldPackets", num_discarded_packets_);
}
// Update last decoded sequence number if the packet arrived late and
// belongs to a frame with a timestamp equal to the last decoded
@ -674,8 +655,6 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
// This packet belongs to an old, already decoded frame, we want to update
// the last decoded sequence number.
last_decoded_state_.UpdateOldPacket(&packet);
TRACE_EVENT_INSTANT1("webrtc", "JB::DropLateFrame",
"timestamp", frame->TimeStamp());
drop_count_++;
// Flush() if this happens consistently.
num_consecutive_old_frames_++;
@ -717,6 +696,10 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
buffer_return = frame->InsertPacket(packet, now_ms,
decode_with_errors_,
rtt_ms_);
if (!frame->GetCountedFrame()) {
TRACE_EVENT_ASYNC_BEGIN1("webrtc", "Video", frame->TimeStamp(),
"timestamp", frame->TimeStamp());
}
ret = buffer_return;
if (buffer_return > 0) {
incoming_bit_count_ += packet.sizeBytes << 3;
@ -981,16 +964,12 @@ uint16_t* VCMJitterBuffer::GetNackList(uint16_t* nack_list_size,
}
}
if (TooLargeNackList()) {
TRACE_EVENT_INSTANT1("webrtc", "JB::NackListTooLarge",
"size", missing_sequence_numbers_.size());
*request_key_frame = !HandleTooLargeNackList();
}
if (max_incomplete_time_ms_ > 0) {
int non_continuous_incomplete_duration =
NonContinuousOrIncompleteDuration();
if (non_continuous_incomplete_duration > 90 * max_incomplete_time_ms_) {
TRACE_EVENT_INSTANT1("webrtc", "JB::NonContinuousOrIncompleteDuration",
"duration", non_continuous_incomplete_duration);
LOG_F(LS_INFO) << "Too long non-decodable duration: " <<
non_continuous_incomplete_duration << " > " <<
90 * max_incomplete_time_ms_;
@ -1102,8 +1081,6 @@ bool VCMJitterBuffer::HandleTooOldPackets(uint16_t latest_sequence_number) {
void VCMJitterBuffer::DropPacketsFromNackList(
uint16_t last_decoded_sequence_number) {
TRACE_EVENT_INSTANT1("webrtc", "JB::DropPacketsFromNackList",
"seqnum", last_decoded_sequence_number);
// Erase all sequence numbers from the NACK list which we won't need any
// longer.
missing_sequence_numbers_.erase(missing_sequence_numbers_.begin(),
@ -1170,10 +1147,6 @@ bool VCMJitterBuffer::RecycleFramesUntilKeyFrame() {
dropped_frames += decodable_frames_.RecycleFramesUntilKeyFrame(
&key_frame_it, &free_frames_);
key_frame_found = key_frame_it != decodable_frames_.end();
if (!key_frame_found) {
TRACE_EVENT_INSTANT1("webrtc", "JB::FrameListEmptied", "type",
"RecycleFramesUntilKeyFrame");
}
}
drop_count_ += dropped_frames;
if (dropped_frames) {
@ -1198,21 +1171,17 @@ bool VCMJitterBuffer::RecycleFramesUntilKeyFrame() {
// Must be called under the critical section |crit_sect_|.
void VCMJitterBuffer::CountFrame(const VCMFrameBuffer& frame) {
bool frame_counted = false;
if (!frame.GetCountedFrame()) {
// Ignore ACK frames.
incoming_frame_count_++;
frame_counted = true;
}
if (frame.FrameType() == kVideoFrameKey) {
TRACE_EVENT_INSTANT2("webrtc", "JB::AddKeyFrame",
"timestamp", frame.TimeStamp(),
"retransmit", !frame_counted);
TRACE_EVENT_ASYNC_STEP0("webrtc", "Video",
frame.TimeStamp(), "KeyComplete");
} else {
TRACE_EVENT_INSTANT2("webrtc", "JB::AddFrame",
"timestamp", frame.TimeStamp(),
"retransmit", !frame_counted);
TRACE_EVENT_ASYNC_STEP0("webrtc", "Video",
frame.TimeStamp(), "DeltaComplete");
}
// Update receive statistics. We count all layers, thus when you use layers
@ -1249,7 +1218,6 @@ void VCMJitterBuffer::CleanUpOldOrEmptyFrames() {
drop_count_ +=
incomplete_frames_.CleanUpOldOrEmptyFrames(&last_decoded_state_,
&free_frames_);
TRACE_COUNTER1("webrtc", "JBDroppedLateFrames", drop_count_);
if (!last_decoded_state_.in_initial_state()) {
DropPacketsFromNackList(last_decoded_state_.sequence_num());
}

View File

@ -123,7 +123,6 @@ VCMEncodedFrame* VCMReceiver::FrameForDecoding(
int64_t& next_render_time_ms,
bool render_timing,
VCMReceiver* dual_receiver) {
TRACE_EVENT0("webrtc", "Recv::FrameForDecoding");
const int64_t start_time_ms = clock_->TimeInMilliseconds();
uint32_t frame_timestamp = 0;
// Exhaust wait time to get a complete frame for decoding.
@ -183,7 +182,6 @@ VCMEncodedFrame* VCMReceiver::FrameForDecoding(
if (!render_timing) {
// Decode frame as close as possible to the render timestamp.
TRACE_EVENT0("webrtc", "FrameForRendering");
const int32_t available_wait_time = max_wait_time_ms -
static_cast<int32_t>(clock_->TimeInMilliseconds() - start_time_ms);
uint16_t new_max_wait_time = static_cast<uint16_t>(
@ -207,6 +205,8 @@ VCMEncodedFrame* VCMReceiver::FrameForDecoding(
return NULL;
}
frame->SetRenderTime(next_render_time_ms);
TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", frame->TimeStamp(),
"SetRenderTS", "render_time", next_render_time_ms);
if (dual_receiver != NULL) {
dual_receiver->UpdateState(*frame);
}

View File

@ -890,7 +890,6 @@ int VideoCodingModuleImpl::RegisterRenderBufferSizeCallback(
int32_t
VideoCodingModuleImpl::Decode(uint16_t maxWaitTimeMs)
{
TRACE_EVENT1("webrtc", "VCM::Decode", "max_wait", maxWaitTimeMs);
int64_t nextRenderTimeMs;
{
CriticalSectionScoped cs(_receiveCritSect);
@ -1096,9 +1095,8 @@ VideoCodingModuleImpl::DecodeDualFrame(uint16_t maxWaitTimeMs)
int32_t
VideoCodingModuleImpl::Decode(const VCMEncodedFrame& frame)
{
TRACE_EVENT2("webrtc", "Decode",
"timestamp", frame.TimeStamp(),
"type", frame.FrameType());
TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", frame.TimeStamp(),
"Decode", "type", frame.FrameType());
// Change decoder if payload type has changed
const bool renderTimingBefore = _codecDataBase.SupportsRenderScheduling();
_decoder = _codecDataBase.GetDecoder(frame.PayloadType(),
@ -1161,6 +1159,7 @@ VideoCodingModuleImpl::Decode(const VCMEncodedFrame& frame)
break;
}
}
TRACE_EVENT_ASYNC_END0("webrtc", "Video", frame.TimeStamp());
return ret;
}
@ -1248,10 +1247,6 @@ VideoCodingModuleImpl::IncomingPacket(const uint8_t* incomingPayload,
if (rtpInfo.frameType == kVideoFrameKey) {
TRACE_EVENT1("webrtc", "VCM::PacketKeyFrame",
"seqnum", rtpInfo.header.sequenceNumber);
} else {
TRACE_EVENT2("webrtc", "VCM::Packet",
"seqnum", rtpInfo.header.sequenceNumber,
"type", rtpInfo.frameType);
}
if (incomingPayload == NULL) {
// The jitter buffer doesn't handle non-zero payload lengths for packets

View File

@ -335,8 +335,8 @@ void ViECapturer::OnIncomingCapturedFrame(const int32_t capture_id,
// the camera, and not when the camera actually captured the frame.
video_frame.set_render_time_ms(video_frame.render_time_ms() - FrameDelay());
TRACE_EVENT_INSTANT1("webrtc", "VC::OnIncomingCapturedFrame",
"render_time", video_frame.render_time_ms());
TRACE_EVENT_ASYNC_BEGIN1("webrtc", "Video", video_frame.render_time_ms(),
"render_time", video_frame.render_time_ms());
captured_frame_.SwapFrame(&video_frame);
capture_event_.Set();

View File

@ -566,10 +566,8 @@ void ViEEncoder::DeliverFrame(int id,
kMsToRtpTimestamp *
static_cast<uint32_t>(video_frame->render_time_ms());
TRACE_EVENT2("webrtc", "VE::DeliverFrame",
"timestamp", time_stamp,
"render_time", video_frame->render_time_ms());
TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", video_frame->render_time_ms(),
"Encode");
video_frame->set_timestamp(time_stamp);
{
CriticalSectionScoped cs(callback_cs_.get());