Refactored ViESyncModule.

Only style changes, will follow up with references/ptrs.

Review URL: http://webrtc-codereview.appspot.com/291007

git-svn-id: http://webrtc.googlecode.com/svn/trunk@1068 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
mflodman@webrtc.org 2011-11-30 18:31:36 +00:00
parent 68f2168978
commit 511f82eee9
2 changed files with 302 additions and 344 deletions

View File

@ -9,324 +9,291 @@
*/
#include "vie_sync_module.h"
#include "critical_section_wrapper.h"
#include "voe_video_sync.h"
#include "rtp_rtcp.h"
#include "trace.h"
#include "video_coding.h"
#include "voe_video_sync.h"
namespace webrtc {
enum { kSyncInterval = 1000};
enum { kMaxVideoDiffMs = 80 };
enum { kMaxAudioDiffMs = 80 };
enum { kMaxDelay = 1500 };
ViESyncModule::ViESyncModule(int id, VideoCodingModule& vcm,
RtpRtcp& rtcpModule)
: _dataCritsect(*CriticalSectionWrapper::CreateCriticalSection()), _id(id),
_vcm(vcm), _rtcpModule(rtcpModule), _voiceChannelId(-1),
_voiceSyncInterface(NULL), _lastSyncTime(TickTime::Now())
{
RtpRtcp& rtcp_module)
: data_critsect_(*CriticalSectionWrapper::CreateCriticalSection()),
id_(id),
vcm_(vcm),
rtcp_module_(rtcp_module),
voe_channel_id_(-1),
voe_sync_interface_(NULL),
last_sync_time_(TickTime::Now()) {
}
ViESyncModule::~ViESyncModule()
{
delete &_dataCritsect;
ViESyncModule::~ViESyncModule() {
delete &data_critsect_;
}
int ViESyncModule::SetVoiceChannel(int voiceChannelId,
VoEVideoSync* veSyncInterface)
{
CriticalSectionScoped cs(_dataCritsect);
_voiceChannelId = voiceChannelId;
_voiceSyncInterface = veSyncInterface;
_rtcpModule.DeRegisterSyncModule();
int ViESyncModule::SetVoiceChannel(int voe_channel_id,
VoEVideoSync* voe_sync_interface) {
CriticalSectionScoped cs(data_critsect_);
voe_channel_id_ = voe_channel_id;
voe_sync_interface_ = voe_sync_interface;
rtcp_module_.DeRegisterSyncModule();
if (!veSyncInterface)
{
_voiceChannelId = -1;
if (voiceChannelId >= 0) // trying to set a voice channel but no interface exist
{
if (!voe_sync_interface) {
voe_channel_id_ = -1;
if (voe_channel_id >= 0) {
// Trying to set a voice channel but no interface exist.
return -1;
}
return 0;
}
RtpRtcp* voiceRTPRTCP = NULL;
veSyncInterface->GetRtpRtcp(_voiceChannelId, voiceRTPRTCP);
return _rtcpModule.RegisterSyncModule(voiceRTPRTCP);
RtpRtcp* voe_rtp_rtcp = NULL;
voe_sync_interface->GetRtpRtcp(voe_channel_id_, voe_rtp_rtcp);
return rtcp_module_.RegisterSyncModule(voe_rtp_rtcp);
}
int ViESyncModule::VoiceChannel()
{
return _voiceChannelId;
int ViESyncModule::VoiceChannel() {
return voe_channel_id_;
}
// ----------------------------------------------------------------------------
// SetNetworkDelay
//
// Set how long time in ms voice is ahead of video when received on the network.
// Positive means audio is ahead of video.
// ----------------------------------------------------------------------------
void ViESyncModule::SetNetworkDelay(int networkDelay)
{
_channelDelay.networkDelay = networkDelay;
void ViESyncModule::SetNetworkDelay(int network_delay) {
channel_delay_.network_delay = network_delay;
}
// Implements Module
WebRtc_Word32 ViESyncModule::Version(WebRtc_Word8* version,
WebRtc_UWord32& remainingBufferInBytes,
WebRtc_UWord32& position) const
{
if (version == NULL)
{
WebRtc_UWord32& remaining_buffer_in_bytes,
WebRtc_UWord32& position) const {
if (version == NULL) {
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideo, -1,
"Invalid in argument to ViESyncModule Version()");
return -1;
}
WebRtc_Word8 ourVersion[] = "ViESyncModule 1.1.0";
WebRtc_UWord32 ourLength = (WebRtc_UWord32) strlen(ourVersion);
if (remainingBufferInBytes < ourLength + 1)
{
WebRtc_Word8 our_version[] = "ViESyncModule 1.1.0";
WebRtc_UWord32 our_length = (WebRtc_UWord32) strlen(our_version);
if (remaining_buffer_in_bytes < our_length + 1) {
return -1;
}
memcpy(version, ourVersion, ourLength);
version[ourLength] = '\0'; // null terminaion
remainingBufferInBytes -= (ourLength + 1);
position += (ourLength + 1);
memcpy(version, our_version, our_length);
version[our_length] = '\0';
remaining_buffer_in_bytes -= (our_length + 1);
position += (our_length + 1);
return 0;
}
WebRtc_Word32 ViESyncModule::ChangeUniqueId(const WebRtc_Word32 id)
{
_id = id;
WebRtc_Word32 ViESyncModule::ChangeUniqueId(const WebRtc_Word32 id) {
id_ = id;
return 0;
}
WebRtc_Word32 ViESyncModule::TimeUntilNextProcess()
{
return (WebRtc_Word32) (kSyncInterval - (TickTime::Now()
- _lastSyncTime).Milliseconds());
WebRtc_Word32 ViESyncModule::TimeUntilNextProcess() {
return (WebRtc_Word32)(kSyncInterval -
(TickTime::Now() - last_sync_time_).Milliseconds());
}
// Do the lip sync.
WebRtc_Word32 ViESyncModule::Process()
{
CriticalSectionScoped cs(_dataCritsect);
_lastSyncTime = TickTime::Now();
WebRtc_Word32 ViESyncModule::Process() {
CriticalSectionScoped cs(data_critsect_);
last_sync_time_ = TickTime::Now();
int totalVideoDelayTargetMS = _vcm.Delay();
WEBRTC_TRACE(webrtc::kTraceInfo, webrtc::kTraceVideo, _id,
"Video delay (JB + decoder) is %d ms", totalVideoDelayTargetMS);
int total_video_delay_target_ms = vcm_.Delay();
WEBRTC_TRACE(webrtc::kTraceInfo, webrtc::kTraceVideo, id_,
"Video delay (JB + decoder) is %d ms",
total_video_delay_target_ms);
if (_voiceChannelId != -1)
{
// Get //Sync start
int currentAudioDelayMS = 0;
if (_voiceSyncInterface->GetDelayEstimate(_voiceChannelId,
currentAudioDelayMS) != 0)
{
if (voe_channel_id_ == -1) {
return 0;
}
int current_audio_delay_ms = 0;
if (voe_sync_interface_->GetDelayEstimate(voe_channel_id_,
current_audio_delay_ms) != 0) {
// Could not get VoE delay value, probably not a valid channel Id.
WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceVideo, _id,
"%s: VE_GetDelayEstimate error for voiceChannel %d",
__FUNCTION__, totalVideoDelayTargetMS, _voiceChannelId);
WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceVideo, id_,
"%s: VE_GetDelayEstimate error for voice_channel %d",
__FUNCTION__, total_video_delay_target_ms, voe_channel_id_);
return 0;
}
int currentDiffMS = 0;
int videoDelayMS = 0; // Total video delay
if (currentAudioDelayMS > 40) // Voice Engine report delay estimates even when not started. Ignore
{
WEBRTC_TRACE(webrtc::kTraceInfo, webrtc::kTraceVideo, _id,
int current_diff_ms = 0;
// Total video delay.
int video_delay_ms = 0;
// VoiceEngine report delay estimates even when not started, ignore if the
// reported value is lower than 40 ms.
if (current_audio_delay_ms < 40) {
WEBRTC_TRACE(webrtc::kTraceInfo, webrtc::kTraceVideo, id_,
"A/V Sync: Audio delay < 40, skipping.");
return 0;
}
WEBRTC_TRACE(webrtc::kTraceInfo, webrtc::kTraceVideo, id_,
"Audio delay is: %d for voice channel: %d",
currentAudioDelayMS, _voiceChannelId);
WEBRTC_TRACE(webrtc::kTraceInfo, webrtc::kTraceVideo, _id,
current_audio_delay_ms, voe_channel_id_);
WEBRTC_TRACE(webrtc::kTraceInfo, webrtc::kTraceVideo, id_,
"Network delay diff is: %d for voice channel: %d",
_channelDelay.networkDelay, _voiceChannelId);
// Calculate the diff between the lowest possible
// video delay and the current audio delay
currentDiffMS = totalVideoDelayTargetMS - currentAudioDelayMS
+ _channelDelay.networkDelay;
WEBRTC_TRACE(webrtc::kTraceInfo, webrtc::kTraceVideo, _id,
channel_delay_.network_delay, voe_channel_id_);
// Calculate the difference between the lowest possible video delay and
// the current audio delay.
current_diff_ms = total_video_delay_target_ms - current_audio_delay_ms +
channel_delay_.network_delay;
WEBRTC_TRACE(webrtc::kTraceInfo, webrtc::kTraceVideo, id_,
"Current diff is: %d for audio channel: %d",
currentDiffMS, _voiceChannelId);
current_diff_ms, voe_channel_id_);
if (currentDiffMS > 0)
{
if (current_diff_ms > 0) {
// The minimum video delay is longer than the current audio delay.
// We need to decrease extra video delay, if we have added extra delay
// earlier, or add extra audio delay.
if (_channelDelay.extraVideoDelayMS > 0)
{
// We have extra delay added to ViE.
// Reduce this delay before adding delay to VE.
if (channel_delay_.extra_video_delay_ms > 0) {
// We have extra delay added to ViE. Reduce this delay before adding
// extra delay to VoE.
// This is the desired delay, we can't reduce more than this.
videoDelayMS = totalVideoDelayTargetMS;
video_delay_ms = total_video_delay_target_ms;
// Check we don't reduce the delay too much
if (videoDelayMS < _channelDelay.lastVideoDelayMS
- kMaxVideoDiffMS)
{
// Too large step...
videoDelayMS = _channelDelay.lastVideoDelayMS
- kMaxVideoDiffMS;
_channelDelay.extraVideoDelayMS = videoDelayMS
- totalVideoDelayTargetMS;
} else
{
_channelDelay.extraVideoDelayMS = 0;
// Check that we don't reduce the delay more than what is allowed.
if (video_delay_ms <
channel_delay_.last_video_delay_ms - kMaxVideoDiffMs) {
video_delay_ms =
channel_delay_.last_video_delay_ms - kMaxVideoDiffMs;
channel_delay_.extra_video_delay_ms =
video_delay_ms - total_video_delay_target_ms;
} else {
channel_delay_.extra_video_delay_ms = 0;
}
_channelDelay.lastVideoDelayMS = videoDelayMS;
_channelDelay.lastSyncDelay = -1;
_channelDelay.extraAudioDelayMS = 0;
} else
{
// We have no extra video delay to remove.
// Increase the audio delay
if (_channelDelay.lastSyncDelay >= 0)
{
// We have increased the audio delay earlier,
// increase it even more.
int audioDiffMS = currentDiffMS / 2;
if (audioDiffMS > kMaxAudioDiffMS)
{
channel_delay_.last_video_delay_ms = video_delay_ms;
channel_delay_.last_video_delay_ms = -1;
channel_delay_.extra_audio_delay_ms = 0;
} else { // channel_delay_.extra_video_delay_ms > 0
// We have no extra video delay to remove, increase the audio delay.
if (channel_delay_.last_video_delay_ms >= 0) {
// We have increased the audio delay earlier, increase it even more.
int audio_diff_ms = current_diff_ms / 2;
if (audio_diff_ms > kMaxAudioDiffMs) {
// We only allow a maximum change of KMaxAudioDiffMS for audio
// due to NetEQ maximum changes.
audioDiffMS = kMaxAudioDiffMS;
audio_diff_ms = kMaxAudioDiffMs;
}
// Increase the audio delay
_channelDelay.extraAudioDelayMS += audioDiffMS;
channel_delay_.extra_audio_delay_ms += audio_diff_ms;
// Don't set a too high delay.
if (_channelDelay.extraAudioDelayMS > kMaxDelay)
{
_channelDelay.extraAudioDelayMS = kMaxDelay;
if (channel_delay_.extra_audio_delay_ms > kMaxDelay) {
channel_delay_.extra_audio_delay_ms = kMaxDelay;
}
// Don't add any extra video delay.
videoDelayMS = totalVideoDelayTargetMS;
_channelDelay.extraVideoDelayMS = 0;
_channelDelay.lastVideoDelayMS = videoDelayMS;
video_delay_ms = total_video_delay_target_ms;
channel_delay_.extra_video_delay_ms = 0;
channel_delay_.last_video_delay_ms = video_delay_ms;
_channelDelay.lastSyncDelay = 1;
} else // lastSyncDelay < 0
{
channel_delay_.last_video_delay_ms = 1;
} else { // channel_delay_.last_video_delay_ms >= 0
// First time after a delay change, don't add any extra delay.
// This is to not toggle back and forth too much.
_channelDelay.extraAudioDelayMS = 0;
channel_delay_.extra_audio_delay_ms = 0;
// Set minimum video delay
videoDelayMS = totalVideoDelayTargetMS;
_channelDelay.extraVideoDelayMS = 0;
_channelDelay.lastVideoDelayMS = videoDelayMS;
_channelDelay.lastSyncDelay = 0;
video_delay_ms = total_video_delay_target_ms;
channel_delay_.extra_video_delay_ms = 0;
channel_delay_.last_video_delay_ms = video_delay_ms;
channel_delay_.last_video_delay_ms = 0;
}
}
} else // if (currentDiffMS > 0)
{
} else { // if (current_diffMS > 0)
// The minimum video delay is lower than the current audio delay.
// We need to decrease possible extra audio delay, or
// add extra video delay.
if (_channelDelay.extraAudioDelayMS > 0)
{
if (channel_delay_.extra_audio_delay_ms > 0) {
// We have extra delay in VoiceEngine
// Start with decreasing the voice delay
int audioDiffMS = currentDiffMS / 2; // This is a negative value
if (audioDiffMS < -1 * kMaxAudioDiffMS)
{
int audio_diff_ms = current_diff_ms / 2;
if (audio_diff_ms < -1 * kMaxAudioDiffMs) {
// Don't change the delay too much at once.
audioDiffMS = -1 * kMaxAudioDiffMS;
audio_diff_ms = -1 * kMaxAudioDiffMs;
}
_channelDelay.extraAudioDelayMS += audioDiffMS; // Add the negative change...
// Add the negative difference.
channel_delay_.extra_audio_delay_ms += audio_diff_ms;
if (_channelDelay.extraAudioDelayMS < 0)
{
// Negative values not allowed
_channelDelay.extraAudioDelayMS = 0;
_channelDelay.lastSyncDelay = 0;
} else
{
if (channel_delay_.extra_audio_delay_ms < 0) {
// Negative values not allowed.
channel_delay_.extra_audio_delay_ms = 0;
channel_delay_.last_video_delay_ms = 0;
} else {
// There is more audio delay to use for the next round.
_channelDelay.lastSyncDelay = 1;
channel_delay_.last_video_delay_ms = 1;
}
// Keep the video delay at the minimum values.
videoDelayMS = totalVideoDelayTargetMS;
_channelDelay.extraVideoDelayMS = 0;
_channelDelay.lastVideoDelayMS = videoDelayMS;
} else
{
// We have no extra delay in VoiceEngine
// Increase the video delay
_channelDelay.extraAudioDelayMS = 0;
video_delay_ms = total_video_delay_target_ms;
channel_delay_.extra_video_delay_ms = 0;
channel_delay_.last_video_delay_ms = video_delay_ms;
} else { // channel_delay_.extra_audio_delay_ms > 0
// We have no extra delay in VoiceEngine, increase the video delay.
channel_delay_.extra_audio_delay_ms = 0;
// Make the diff positive
int videoDiffMS = -1 * currentDiffMS;
// Make the difference positive.
int video_diff_ms = -1 * current_diff_ms;
// This is the desired delay we want
videoDelayMS = totalVideoDelayTargetMS + videoDiffMS;
if (videoDelayMS > _channelDelay.lastVideoDelayMS)
{
if (videoDelayMS > _channelDelay.lastVideoDelayMS
+ kMaxVideoDiffMS)
{
// This is the desired delay.
video_delay_ms = total_video_delay_target_ms + video_diff_ms;
if (video_delay_ms > channel_delay_.last_video_delay_ms) {
if (video_delay_ms >
channel_delay_.last_video_delay_ms + kMaxVideoDiffMs) {
// Don't increase the delay too much at once
videoDelayMS = _channelDelay.lastVideoDelayMS
+ kMaxVideoDiffMS;
video_delay_ms =
channel_delay_.last_video_delay_ms + kMaxVideoDiffMs;
}
// Verify we don't go above the maximum allowed delay
if (videoDelayMS > kMaxDelay)
{
videoDelayMS = kMaxDelay;
if (video_delay_ms > kMaxDelay) {
video_delay_ms = kMaxDelay;
}
} else
{
if (videoDelayMS < _channelDelay.lastVideoDelayMS
- kMaxVideoDiffMS)
{
} else {
if (video_delay_ms <
channel_delay_.last_video_delay_ms - kMaxVideoDiffMs) {
// Don't decrease the delay too much at once
videoDelayMS = _channelDelay.lastVideoDelayMS
- kMaxVideoDiffMS;
video_delay_ms =
channel_delay_.last_video_delay_ms - kMaxVideoDiffMs;
}
// Verify we don't go below the minimum delay
if (videoDelayMS < totalVideoDelayTargetMS)
{
videoDelayMS = totalVideoDelayTargetMS;
if (video_delay_ms < total_video_delay_target_ms) {
video_delay_ms = total_video_delay_target_ms;
}
}
// Store the values
_channelDelay.extraVideoDelayMS = videoDelayMS
- totalVideoDelayTargetMS;
_channelDelay.lastVideoDelayMS = videoDelayMS;
_channelDelay.lastSyncDelay = -1;
}
channel_delay_.extra_video_delay_ms =
video_delay_ms - total_video_delay_target_ms;
channel_delay_.last_video_delay_ms = video_delay_ms;
channel_delay_.last_video_delay_ms = -1;
}
}
WEBRTC_TRACE(
webrtc::kTraceInfo,
webrtc::kTraceVideo,
_id,
"Sync video delay %d ms for video channel and audio delay %d for audio channel %d",
videoDelayMS, _channelDelay.extraAudioDelayMS,
_voiceChannelId);
WEBRTC_TRACE(webrtc::kTraceInfo, webrtc::kTraceVideo, id_,
"Sync video delay %d ms for video channel and audio delay %d for audio "
"channel %d",
video_delay_ms, channel_delay_.extra_audio_delay_ms, voe_channel_id_);
// Set the extra audio delay
if (_voiceSyncInterface->SetMinimumPlayoutDelay(_voiceChannelId,
_channelDelay.extraAudioDelayMS) == -1)
{
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideo, _id,
// Set the extra audio delay.synchronization
if (voe_sync_interface_->SetMinimumPlayoutDelay(
voe_channel_id_, channel_delay_.extra_audio_delay_ms) == -1) {
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideo, id_,
"Error setting voice delay");
}
// sanity
// negative not valid
if (videoDelayMS < 0)
{
videoDelayMS = 0;
}
totalVideoDelayTargetMS = (totalVideoDelayTargetMS > videoDelayMS) ?
totalVideoDelayTargetMS : videoDelayMS;
_vcm.SetMinimumPlayoutDelay(totalVideoDelayTargetMS);
WEBRTC_TRACE(webrtc::kTraceInfo, webrtc::kTraceVideo, _id,
"New Video delay target is: %d", totalVideoDelayTargetMS);
if (video_delay_ms < 0) {
video_delay_ms = 0;
}
total_video_delay_target_ms =
(total_video_delay_target_ms > video_delay_ms) ?
total_video_delay_target_ms : video_delay_ms;
vcm_.SetMinimumPlayoutDelay(total_video_delay_target_ms);
WEBRTC_TRACE(webrtc::kTraceInfo, webrtc::kTraceVideo, id_,
"New Video delay target is: %d", total_video_delay_target_ms);
return 0;
}
} // namespace webrtc

View File

@ -8,77 +8,68 @@
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* vie_sync_module.h
* Responsible for doing Audio/Video sync
*/
// ViESyncModule is responsible for synchronization audio and video for a given
// VoE and ViE channel couple.
#ifndef WEBRTC_VIDEO_ENGINE_MAIN_SOURCE_VIE_SYNC_MODULE_H_
#define WEBRTC_VIDEO_ENGINE_MAIN_SOURCE_VIE_SYNC_MODULE_H_
#ifndef WEBRTC_VIDEO_ENGINE_VIE_SYNC_MODULE_H_
#define WEBRTC_VIDEO_ENGINE_VIE_SYNC_MODULE_H_
#include "module.h"
#include "tick_util.h"
namespace webrtc
{
namespace webrtc {
class CriticalSectionWrapper;
class RtpRtcp;
class VideoCodingModule;
class VoEVideoSync;
class ViESyncModule : public Module
{
class ViESyncModule : public Module {
public:
enum { kSyncInterval = 1000};
enum { kMaxVideoDiffMS = 80 }; // Video sync
enum { kMaxAudioDiffMS = 80 }; // Video sync
enum { kMaxDelay = 1500 }; // Video sync
ViESyncModule(int id, VideoCodingModule& vcm,
RtpRtcp& rtcpModule);
ViESyncModule(int id, VideoCodingModule& vcm, RtpRtcp& rtcp_module);
~ViESyncModule();
int SetVoiceChannel(int voiceChannelId, VoEVideoSync* voiceSyncInterface);
int SetVoiceChannel(int voe_channel_id, VoEVideoSync* voe_sync_interface);
int VoiceChannel();
void SetNetworkDelay(int networkDelay);
// Implements Module
// Set how long time, in ms, voice is ahead of video when received on the
// network. Positive value means audio is ahead of video.
void SetNetworkDelay(int network_delay);
// Implements Module.
virtual WebRtc_Word32 Version(WebRtc_Word8* version,
WebRtc_UWord32& remainingBufferInBytes,
WebRtc_UWord32& remaining_buffer_in_bytes,
WebRtc_UWord32& position) const;
virtual WebRtc_Word32 ChangeUniqueId(const WebRtc_Word32 id);
virtual WebRtc_Word32 TimeUntilNextProcess();
virtual WebRtc_Word32 Process();
private:
// Critical sections
CriticalSectionWrapper& _dataCritsect;
int _id;
VideoCodingModule& _vcm;
RtpRtcp& _rtcpModule;
int _voiceChannelId;
VoEVideoSync* _voiceSyncInterface;
TickTime _lastSyncTime;
CriticalSectionWrapper& data_critsect_;
int id_;
VideoCodingModule& vcm_;
RtpRtcp& rtcp_module_;
int voe_channel_id_;
VoEVideoSync* voe_sync_interface_;
TickTime last_sync_time_;
struct ViESyncDelay
{
ViESyncDelay()
{
extraVideoDelayMS = 0;
lastVideoDelayMS = 0;
extraAudioDelayMS = 0;
lastSyncDelay = 0;
networkDelay = 120;
struct ViESyncDelay {
ViESyncDelay() {
extra_video_delay_ms = 0;
last_video_delay_ms = 0;
extra_audio_delay_ms = 0;
last_sync_delay = 0;
network_delay = 120;
}
int extraVideoDelayMS;
int lastVideoDelayMS;
int extraAudioDelayMS; //audioDelayMS;
int lastSyncDelay;
int networkDelay;
int extra_video_delay_ms;
int last_video_delay_ms;
int extra_audio_delay_ms;
int last_sync_delay;
int network_delay;
};
ViESyncDelay channel_delay_;
};
ViESyncDelay _channelDelay;
};
} // namespace webrtc
#endif // WEBRTC_VIDEO_ENGINE_MAIN_SOURCE_VIE_SYNC_MODULE_H_
#endif // WEBRTC_VIDEO_ENGINE_VIE_SYNC_MODULE_H_