Adding a receive side API for buffering mode.

At the same time, renaming the send side API.

Review URL: https://webrtc-codereview.appspot.com/1104004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@3525 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
mikhal@webrtc.org 2013-02-15 23:22:18 +00:00
parent 47fe5736c1
commit ef9f76a59d
28 changed files with 447 additions and 202 deletions

View File

@ -553,6 +553,10 @@ public:
virtual void SetNackSettings(size_t max_nack_list_size,
int max_packet_age_to_nack) = 0;
// Setting a desired delay to the VCM receiver. Video rendering will be
// delayed by at least desired_delay_ms.
virtual int SetMinReceiverDelay(int desired_delay_ms) = 0;
// Enables recording of debugging information.
virtual int StartDebugRecording(const char* file_name_utf8) = 0;

View File

@ -772,10 +772,9 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(VCMEncodedFrame* encoded_frame,
return ret;
}
void VCMJitterBuffer::EnableMaxJitterEstimate(bool enable,
uint32_t initial_delay_ms) {
void VCMJitterBuffer::SetMaxJitterEstimate(uint32_t initial_delay_ms) {
CriticalSectionScoped cs(crit_sect_);
jitter_estimate_.EnableMaxJitterEstimate(enable, initial_delay_ms);
jitter_estimate_.SetMaxJitterEstimate(initial_delay_ms);
}
uint32_t VCMJitterBuffer::EstimatedJitterMs() {

View File

@ -127,10 +127,10 @@ class VCMJitterBuffer {
VCMFrameBufferEnum InsertPacket(VCMEncodedFrame* frame,
const VCMPacket& packet);
// Enable a max filter on the jitter estimate, and setting of the initial
// delay (only when in max mode). When disabled (default), the last jitter
// Enable a max filter on the jitter estimate by setting an initial
// non-zero delay. When set to zero (default), the last jitter
// estimate will be used.
void EnableMaxJitterEstimate(bool enable, uint32_t initial_delay_ms);
void SetMaxJitterEstimate(uint32_t initial_delay_ms);
// Returns the estimated jitter in milliseconds.
uint32_t EstimatedJitterMs();

View File

@ -15,7 +15,7 @@
namespace webrtc {
enum { kMaxNumberOfFrames = 100 };
enum { kMaxNumberOfFrames = 300 };
enum { kStartNumberOfFrames = 6 };
enum { kMaxVideoDelayMs = 2000 };

View File

@ -277,25 +277,15 @@ TEST_F(TestRunningJitterBuffer, JitterEstimateMode) {
InsertFrame(kVideoFrameDelta);
EXPECT_GT(20u, jitter_buffer_->EstimatedJitterMs());
// Set kMaxEstimate with a 2 seconds initial delay.
jitter_buffer_->EnableMaxJitterEstimate(true, 2000u);
jitter_buffer_->SetMaxJitterEstimate(2000u);
EXPECT_EQ(2000u, jitter_buffer_->EstimatedJitterMs());
InsertFrame(kVideoFrameDelta);
EXPECT_EQ(2000u, jitter_buffer_->EstimatedJitterMs());
// Set kMaxEstimate with a 0S initial delay.
jitter_buffer_->EnableMaxJitterEstimate(true, 0u);
EXPECT_GT(20u, jitter_buffer_->EstimatedJitterMs());
// Jitter cannot decrease.
InsertFrames(2, kVideoFrameDelta);
uint32_t je1 = jitter_buffer_->EstimatedJitterMs();
InsertFrames(2, kVideoFrameDelta);
EXPECT_GE(je1, jitter_buffer_->EstimatedJitterMs());
// Set kLastEstimate mode (initial delay is arbitrary in this case and will
// be ignored).
jitter_buffer_->EnableMaxJitterEstimate(false, 2000u);
EXPECT_GT(20u, jitter_buffer_->EstimatedJitterMs());
InsertFrames(10, kVideoFrameDelta);
EXPECT_GT(20u, jitter_buffer_->EstimatedJitterMs());
}
TEST_F(TestJitterBufferNack, TestEmptyPackets) {

View File

@ -409,10 +409,9 @@ VCMJitterEstimator::UpdateMaxFrameSize(WebRtc_UWord32 frameSizeBytes)
}
}
void VCMJitterEstimator::EnableMaxJitterEstimate(bool enable,
uint32_t initial_delay_ms)
void VCMJitterEstimator::SetMaxJitterEstimate(uint32_t initial_delay_ms)
{
if (enable) {
if (initial_delay_ms > 0) {
_maxJitterEstimateMs = initial_delay_ms;
_jitterEstimateMode = kMaxEstimate;
} else {

View File

@ -64,10 +64,10 @@ public:
void UpdateMaxFrameSize(WebRtc_UWord32 frameSizeBytes);
// Enable a max filter on the jitter estimate, and setting of the initial
// delay (only when in max mode). When disabled (default), the last jitter
// Set a max filter on the jitter estimate by setting an initial
// non-zero delay. When set to zero (default), the last jitter
// estimate will be used.
void EnableMaxJitterEstimate(bool enable, uint32_t initial_delay_ms);
void SetMaxJitterEstimate(uint32_t initial_delay_ms);
// A constant describing the delay from the jitter buffer
// to the delay on the receiving side which is not accounted

View File

@ -21,6 +21,8 @@
namespace webrtc {
enum { kMaxReceiverDelayMs = 10000 };
VCMReceiver::VCMReceiver(VCMTiming* timing,
Clock* clock,
int32_t vcm_id,
@ -34,7 +36,8 @@ VCMReceiver::VCMReceiver(VCMTiming* timing,
jitter_buffer_(clock_, vcm_id, receiver_id, master),
timing_(timing),
render_wait_event_(),
state_(kPassive) {}
state_(kPassive),
max_video_delay_ms_(kMaxVideoDelayMs) {}
VCMReceiver::~VCMReceiver() {
render_wait_event_.Set();
@ -108,20 +111,21 @@ int32_t VCMReceiver::InsertPacket(const VCMPacket& packet, uint16_t frame_width,
jitter_buffer_.Flush();
timing_->Reset(clock_->TimeInMilliseconds());
return VCM_FLUSH_INDICATOR;
} else if (render_time_ms < now_ms - kMaxVideoDelayMs) {
} else if (render_time_ms < now_ms - max_video_delay_ms_) {
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding,
VCMId(vcm_id_, receiver_id_),
"This frame should have been rendered more than %u ms ago."
"Flushing jitter buffer and resetting timing.",
kMaxVideoDelayMs);
max_video_delay_ms_);
jitter_buffer_.Flush();
timing_->Reset(clock_->TimeInMilliseconds());
return VCM_FLUSH_INDICATOR;
} else if (timing_->TargetVideoDelay() > kMaxVideoDelayMs) {
} else if (static_cast<int>(timing_->TargetVideoDelay()) >
max_video_delay_ms_) {
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding,
VCMId(vcm_id_, receiver_id_),
"More than %u ms target delay. Flushing jitter buffer and"
"resetting timing.", kMaxVideoDelayMs);
"resetting timing.", max_video_delay_ms_);
jitter_buffer_.Flush();
timing_->Reset(clock_->TimeInMilliseconds());
return VCM_FLUSH_INDICATOR;
@ -402,6 +406,17 @@ VCMReceiverState VCMReceiver::State() const {
return state_;
}
int VCMReceiver::SetMinReceiverDelay(int desired_delay_ms) {
CriticalSectionScoped cs(crit_sect_);
if (desired_delay_ms < 0 || desired_delay_ms > kMaxReceiverDelayMs) {
return -1;
}
jitter_buffer_.SetMaxJitterEstimate(desired_delay_ms);
max_video_delay_ms_ = desired_delay_ms + kMaxVideoDelayMs;
timing_->SetMaxVideoDelay(max_video_delay_ms_);
return 0;
}
void VCMReceiver::UpdateState(VCMReceiverState new_state) {
CriticalSectionScoped cs(crit_sect_);
assert(!(state_ == kPassive && new_state == kWaitForPrimaryDecode));

View File

@ -69,6 +69,9 @@ class VCMReceiver {
VCMReceiver& dual_receiver) const;
VCMReceiverState State() const;
// Receiver video delay.
int SetMinReceiverDelay(int desired_delay_ms);
private:
VCMEncodedFrame* FrameForDecoding(uint16_t max_wait_time_ms,
int64_t nextrender_time_ms,
@ -90,6 +93,7 @@ class VCMReceiver {
VCMTiming* timing_;
VCMEvent render_wait_event_;
VCMReceiverState state_;
int max_video_delay_ms_;
static int32_t receiver_id_counter_;
};

View File

@ -34,7 +34,8 @@ _renderDelayMs(kDefaultRenderDelayMs),
_minTotalDelayMs(0),
_requiredDelayMs(0),
_currentDelayMs(0),
_prevFrameTimestamp(0)
_prevFrameTimestamp(0),
_maxVideoDelayMs(kMaxVideoDelayMs)
{
if (masterTiming == NULL)
{
@ -131,7 +132,7 @@ void VCMTiming::UpdateCurrentDelay(WebRtc_UWord32 frameTimestamp)
WebRtc_Word64 delayDiffMs = static_cast<WebRtc_Word64>(targetDelayMs) -
_currentDelayMs;
// Never change the delay with more than 100 ms every second. If we're changing the
// delay in too large steps we will get noticable freezes. By limiting the change we
// delay in too large steps we will get noticeable freezes. By limiting the change we
// can increase the delay in smaller steps, which will be experienced as the video is
// played in slow motion. When lowering the delay the video will be played at a faster
// pace.
@ -249,7 +250,7 @@ VCMTiming::RenderTimeMsInternal(WebRtc_UWord32 frameTimestamp, WebRtc_Word64 now
{
WebRtc_Word64 estimatedCompleteTimeMs =
_tsExtrapolator->ExtrapolateLocalTime(frameTimestamp);
if (estimatedCompleteTimeMs - nowMs > kMaxVideoDelayMs)
if (estimatedCompleteTimeMs - nowMs > _maxVideoDelayMs)
{
if (_master)
{
@ -323,6 +324,12 @@ VCMTiming::EnoughTimeToDecode(WebRtc_UWord32 availableProcessingTimeMs) const
return static_cast<WebRtc_Word32>(availableProcessingTimeMs) - maxDecodeTimeMs > 0;
}
void VCMTiming::SetMaxVideoDelay(int maxVideoDelayMs)
{
CriticalSectionScoped cs(_critSect);
_maxVideoDelayMs = maxVideoDelayMs;
}
WebRtc_UWord32
VCMTiming::TargetVideoDelay() const
{

View File

@ -82,6 +82,9 @@ public:
// certain amount of processing time.
bool EnoughTimeToDecode(WebRtc_UWord32 availableProcessingTimeMs) const;
// Set the max allowed video delay.
void SetMaxVideoDelay(int maxVideoDelayMs);
enum { kDefaultRenderDelayMs = 10 };
enum { kDelayMaxChangeMsPerS = 100 };
@ -104,6 +107,7 @@ private:
WebRtc_UWord32 _requiredDelayMs;
WebRtc_UWord32 _currentDelayMs;
WebRtc_UWord32 _prevFrameTimestamp;
int _maxVideoDelayMs;
};
} // namespace webrtc

View File

@ -1389,6 +1389,10 @@ void VideoCodingModuleImpl::SetNackSettings(
max_packet_age_to_nack);
}
int VideoCodingModuleImpl::SetMinReceiverDelay(int desired_delay_ms) {
return _receiver.SetMinReceiverDelay(desired_delay_ms);
}
int VideoCodingModuleImpl::StartDebugRecording(const char* file_name_utf8) {
CriticalSectionScoped cs(_sendCritSect);
_encoderInputFile = fopen(file_name_utf8, "wb");

View File

@ -262,6 +262,9 @@ public:
virtual void SetNackSettings(size_t max_nack_list_size,
int max_packet_age_to_nack);
// Set the video delay for the receiver (default = 0).
virtual int SetMinReceiverDelay(int desired_delay_ms);
// Enables recording of debugging information.
virtual int StartDebugRecording(const char* file_name_utf8);

View File

@ -287,4 +287,11 @@ TEST_F(TestVideoCodingModule, PaddingOnlyAndVideo) {
}
}
TEST_F(TestVideoCodingModule, ReceiverDelay) {
EXPECT_EQ(0, vcm_->SetMinReceiverDelay(0));
EXPECT_EQ(0, vcm_->SetMinReceiverDelay(5000));
EXPECT_EQ(-1, vcm_->SetMinReceiverDelay(-100));
EXPECT_EQ(-1, vcm_->SetMinReceiverDelay(10010));
}
} // namespace webrtc

View File

@ -199,11 +199,15 @@ class WEBRTC_DLLEXPORT ViERTP_RTCP {
const unsigned char payload_typeRED,
const unsigned char payload_typeFEC) = 0;
// Enables send side support for delayed video streaming (actual delay will
// Sets send side support for delayed video buffering (actual delay will
// be exhibited on the receiver side).
// Target delay should be set to zero for real-time mode.
virtual int EnableSenderStreamingMode(int video_channel,
int target_delay_ms) = 0;
virtual int SetSenderBufferingMode(int video_channel,
int target_delay_ms) = 0;
// Sets receive side support for delayed video buffering. Target delay should
// be set to zero for real-time mode.
virtual int SetReceiverBufferingMode(int video_channel,
int target_delay_ms) = 0;
// This function enables RTCP key frame requests.
virtual int SetKeyFrameRequestMethod(

View File

@ -20,7 +20,7 @@ namespace webrtc {
const int kMaxVideoDiffMs = 80;
const int kMaxAudioDiffMs = 80;
const int kMaxDelay = 1500;
const int kMaxDeltaDelayMs = 1500;
struct ViESyncDelay {
ViESyncDelay() {
@ -42,7 +42,8 @@ StreamSynchronization::StreamSynchronization(int audio_channel_id,
int video_channel_id)
: channel_delay_(new ViESyncDelay),
audio_channel_id_(audio_channel_id),
video_channel_id_(video_channel_id) {}
video_channel_id_(video_channel_id),
base_target_delay_ms_(0) {}
StreamSynchronization::~StreamSynchronization() {
delete channel_delay_;
@ -76,7 +77,8 @@ bool StreamSynchronization::ComputeRelativeDelay(
*relative_delay_ms = video_measurement.latest_receive_time_ms -
audio_measurement.latest_receive_time_ms -
(video_last_capture_time_ms - audio_last_capture_time_ms);
if (*relative_delay_ms > 1000 || *relative_delay_ms < -1000) {
if (*relative_delay_ms > kMaxDeltaDelayMs ||
*relative_delay_ms < -kMaxDeltaDelayMs) {
return false;
}
return true;
@ -98,11 +100,10 @@ bool StreamSynchronization::ComputeDelays(int relative_delay_ms,
WEBRTC_TRACE(webrtc::kTraceInfo, webrtc::kTraceVideo, video_channel_id_,
"Current diff is: %d for audio channel: %d",
relative_delay_ms, audio_channel_id_);
int current_diff_ms = *total_video_delay_target_ms - current_audio_delay_ms +
relative_delay_ms;
int video_delay_ms = 0;
int video_delay_ms = base_target_delay_ms_;
if (current_diff_ms > 0) {
// The minimum video delay is longer than the current audio delay.
// We need to decrease extra video delay, if we have added extra delay
@ -126,7 +127,7 @@ bool StreamSynchronization::ComputeDelays(int relative_delay_ms,
}
channel_delay_->last_video_delay_ms = video_delay_ms;
channel_delay_->last_sync_delay = -1;
channel_delay_->extra_audio_delay_ms = 0;
channel_delay_->extra_audio_delay_ms = base_target_delay_ms_;
} else { // channel_delay_->extra_video_delay_ms > 0
// We have no extra video delay to remove, increase the audio delay.
if (channel_delay_->last_sync_delay >= 0) {
@ -137,12 +138,14 @@ bool StreamSynchronization::ComputeDelays(int relative_delay_ms,
// due to NetEQ maximum changes.
audio_diff_ms = kMaxAudioDiffMs;
}
// Increase the audio delay
// Increase the audio delay.
channel_delay_->extra_audio_delay_ms += audio_diff_ms;
// Don't set a too high delay.
if (channel_delay_->extra_audio_delay_ms > kMaxDelay) {
channel_delay_->extra_audio_delay_ms = kMaxDelay;
if (channel_delay_->extra_audio_delay_ms >
base_target_delay_ms_ + kMaxDeltaDelayMs) {
channel_delay_->extra_audio_delay_ms =
base_target_delay_ms_ + kMaxDeltaDelayMs;
}
// Don't add any extra video delay.
@ -153,7 +156,7 @@ bool StreamSynchronization::ComputeDelays(int relative_delay_ms,
} else { // channel_delay_->last_sync_delay >= 0
// First time after a delay change, don't add any extra delay.
// This is to not toggle back and forth too much.
channel_delay_->extra_audio_delay_ms = 0;
channel_delay_->extra_audio_delay_ms = base_target_delay_ms_;
// Set minimum video delay
video_delay_ms = *total_video_delay_target_ms;
channel_delay_->extra_video_delay_ms = 0;
@ -161,14 +164,13 @@ bool StreamSynchronization::ComputeDelays(int relative_delay_ms,
channel_delay_->last_sync_delay = 0;
}
}
} else { // if (current_diffMS > 0)
} else { // if (current_diff_ms > 0)
// The minimum video delay is lower than the current audio delay.
// We need to decrease possible extra audio delay, or
// add extra video delay.
if (channel_delay_->extra_audio_delay_ms > 0) {
// We have extra delay in VoiceEngine
// Start with decreasing the voice delay
if (channel_delay_->extra_audio_delay_ms > base_target_delay_ms_) {
// We have extra delay in VoiceEngine.
// Start with decreasing the voice delay.
int audio_diff_ms = current_diff_ms / 2;
if (audio_diff_ms < -1 * kMaxAudioDiffMs) {
// Don't change the delay too much at once.
@ -179,7 +181,7 @@ bool StreamSynchronization::ComputeDelays(int relative_delay_ms,
if (channel_delay_->extra_audio_delay_ms < 0) {
// Negative values not allowed.
channel_delay_->extra_audio_delay_ms = 0;
channel_delay_->extra_audio_delay_ms = base_target_delay_ms_;
channel_delay_->last_sync_delay = 0;
} else {
// There is more audio delay to use for the next round.
@ -192,7 +194,7 @@ bool StreamSynchronization::ComputeDelays(int relative_delay_ms,
channel_delay_->last_video_delay_ms = video_delay_ms;
} else { // channel_delay_->extra_audio_delay_ms > 0
// We have no extra delay in VoiceEngine, increase the video delay.
channel_delay_->extra_audio_delay_ms = 0;
channel_delay_->extra_audio_delay_ms = base_target_delay_ms_;
// Make the difference positive.
int video_diff_ms = -1 * current_diff_ms;
@ -202,27 +204,27 @@ bool StreamSynchronization::ComputeDelays(int relative_delay_ms,
if (video_delay_ms > channel_delay_->last_video_delay_ms) {
if (video_delay_ms >
channel_delay_->last_video_delay_ms + kMaxVideoDiffMs) {
// Don't increase the delay too much at once
// Don't increase the delay too much at once.
video_delay_ms =
channel_delay_->last_video_delay_ms + kMaxVideoDiffMs;
}
// Verify we don't go above the maximum allowed delay
if (video_delay_ms > kMaxDelay) {
video_delay_ms = kMaxDelay;
// Verify we don't go above the maximum allowed delay.
if (video_delay_ms > base_target_delay_ms_ + kMaxDeltaDelayMs) {
video_delay_ms = base_target_delay_ms_ + kMaxDeltaDelayMs;
}
} else {
if (video_delay_ms <
channel_delay_->last_video_delay_ms - kMaxVideoDiffMs) {
// Don't decrease the delay too much at once
// Don't decrease the delay too much at once.
video_delay_ms =
channel_delay_->last_video_delay_ms - kMaxVideoDiffMs;
}
// Verify we don't go below the minimum delay
// Verify we don't go below the minimum delay.
if (video_delay_ms < *total_video_delay_target_ms) {
video_delay_ms = *total_video_delay_target_ms;
}
}
// Store the values
// Store the values.
channel_delay_->extra_video_delay_ms =
video_delay_ms - *total_video_delay_target_ms;
channel_delay_->last_video_delay_ms = video_delay_ms;
@ -245,4 +247,15 @@ bool StreamSynchronization::ComputeDelays(int relative_delay_ms,
*total_video_delay_target_ms : video_delay_ms;
return true;
}
void StreamSynchronization::SetTargetBufferingDelay(int target_delay_ms) {
// Video is already delayed by the desired amount.
base_target_delay_ms_ = target_delay_ms;
// Setting initial extra delay for audio.
channel_delay_->extra_audio_delay_ms += target_delay_ms;
// The video delay is compared to the last value (and how much we can updated
// is limited by that as well).
channel_delay_->last_video_delay_ms += target_delay_ms;
}
} // namespace webrtc

View File

@ -43,11 +43,15 @@ class StreamSynchronization {
static bool ComputeRelativeDelay(const Measurements& audio_measurement,
const Measurements& video_measurement,
int* relative_delay_ms);
// Set target buffering delay - All audio and video will be delayed by at
// least target_delay_ms.
void SetTargetBufferingDelay(int target_delay_ms);
private:
ViESyncDelay* channel_delay_;
int audio_channel_id_;
int video_channel_id_;
int base_target_delay_ms_;
};
} // namespace webrtc

View File

@ -120,9 +120,9 @@ class StreamSynchronizationTest : public ::testing::Test {
// Capture an audio and a video frame at the same time.
audio.latest_timestamp = send_time_->NowRtp(audio_frequency,
audio_offset);
audio_offset);
video.latest_timestamp = send_time_->NowRtp(video_frequency,
video_offset);
video_offset);
if (audio_delay_ms > video_delay_ms) {
// Audio later than video.
@ -154,56 +154,57 @@ class StreamSynchronizationTest : public ::testing::Test {
// TODO(holmer): This is currently wrong! We should simply change
// audio_delay_ms or video_delay_ms since those now include VCM and NetEQ
// delays.
void BothDelayedAudioLaterTest() {
int current_audio_delay_ms = 0;
int audio_delay_ms = 300;
int video_delay_ms = 100;
void BothDelayedAudioLaterTest(int base_target_delay) {
int current_audio_delay_ms = base_target_delay;
int audio_delay_ms = base_target_delay + 300;
int video_delay_ms = base_target_delay + 100;
int extra_audio_delay_ms = 0;
int total_video_delay_ms = 0;
int total_video_delay_ms = base_target_delay;
EXPECT_TRUE(DelayedStreams(audio_delay_ms,
video_delay_ms,
current_audio_delay_ms,
&extra_audio_delay_ms,
&total_video_delay_ms));
EXPECT_EQ(kMaxVideoDiffMs, total_video_delay_ms);
EXPECT_EQ(0, extra_audio_delay_ms);
EXPECT_EQ(base_target_delay + kMaxVideoDiffMs, total_video_delay_ms);
EXPECT_EQ(base_target_delay, extra_audio_delay_ms);
current_audio_delay_ms = extra_audio_delay_ms;
send_time_->IncreaseTimeMs(1000);
receive_time_->IncreaseTimeMs(1000 - std::max(audio_delay_ms,
video_delay_ms));
// Simulate 0 minimum delay in the VCM.
total_video_delay_ms = 0;
// Simulate base_target_delay minimum delay in the VCM.
total_video_delay_ms = base_target_delay;
EXPECT_TRUE(DelayedStreams(audio_delay_ms,
video_delay_ms,
current_audio_delay_ms,
&extra_audio_delay_ms,
&total_video_delay_ms));
EXPECT_EQ(2 * kMaxVideoDiffMs, total_video_delay_ms);
EXPECT_EQ(0, extra_audio_delay_ms);
EXPECT_EQ(base_target_delay + 2 * kMaxVideoDiffMs, total_video_delay_ms);
EXPECT_EQ(base_target_delay, extra_audio_delay_ms);
current_audio_delay_ms = extra_audio_delay_ms;
send_time_->IncreaseTimeMs(1000);
receive_time_->IncreaseTimeMs(1000 - std::max(audio_delay_ms,
video_delay_ms));
// Simulate 0 minimum delay in the VCM.
total_video_delay_ms = 0;
// Simulate base_target_delay minimum delay in the VCM.
total_video_delay_ms = base_target_delay;
EXPECT_TRUE(DelayedStreams(audio_delay_ms,
video_delay_ms,
current_audio_delay_ms,
&extra_audio_delay_ms,
&total_video_delay_ms));
EXPECT_EQ(audio_delay_ms - video_delay_ms, total_video_delay_ms);
EXPECT_EQ(0, extra_audio_delay_ms);
EXPECT_EQ(base_target_delay + audio_delay_ms - video_delay_ms,
total_video_delay_ms);
EXPECT_EQ(base_target_delay, extra_audio_delay_ms);
// Simulate that NetEQ introduces some audio delay.
current_audio_delay_ms = 50;
current_audio_delay_ms = base_target_delay + 50;
send_time_->IncreaseTimeMs(1000);
receive_time_->IncreaseTimeMs(1000 - std::max(audio_delay_ms,
video_delay_ms));
// Simulate 0 minimum delay in the VCM.
total_video_delay_ms = 0;
// Simulate base_target_delay minimum delay in the VCM.
total_video_delay_ms = base_target_delay;
EXPECT_TRUE(DelayedStreams(audio_delay_ms,
video_delay_ms,
current_audio_delay_ms,
@ -211,15 +212,15 @@ class StreamSynchronizationTest : public ::testing::Test {
&total_video_delay_ms));
EXPECT_EQ(audio_delay_ms - video_delay_ms + current_audio_delay_ms,
total_video_delay_ms);
EXPECT_EQ(0, extra_audio_delay_ms);
EXPECT_EQ(base_target_delay, extra_audio_delay_ms);
// Simulate that NetEQ reduces its delay.
current_audio_delay_ms = 10;
current_audio_delay_ms = base_target_delay + 10;
send_time_->IncreaseTimeMs(1000);
receive_time_->IncreaseTimeMs(1000 - std::max(audio_delay_ms,
video_delay_ms));
// Simulate 0 minimum delay in the VCM.
total_video_delay_ms = 0;
// Simulate base_target_delay minimum delay in the VCM.
total_video_delay_ms = base_target_delay;
EXPECT_TRUE(DelayedStreams(audio_delay_ms,
video_delay_ms,
current_audio_delay_ms,
@ -227,12 +228,100 @@ class StreamSynchronizationTest : public ::testing::Test {
&total_video_delay_ms));
EXPECT_EQ(audio_delay_ms - video_delay_ms + current_audio_delay_ms,
total_video_delay_ms);
EXPECT_EQ(0, extra_audio_delay_ms);
EXPECT_EQ(base_target_delay, extra_audio_delay_ms);
}
void BothDelayedVideoLaterTest(int base_target_delay) {
int current_audio_delay_ms = base_target_delay;
int audio_delay_ms = base_target_delay + 100;
int video_delay_ms = base_target_delay + 300;
int extra_audio_delay_ms = 0;
int total_video_delay_ms = base_target_delay;
EXPECT_TRUE(DelayedStreams(audio_delay_ms,
video_delay_ms,
current_audio_delay_ms,
&extra_audio_delay_ms,
&total_video_delay_ms));
EXPECT_EQ(base_target_delay, total_video_delay_ms);
// The audio delay is not allowed to change more than this in 1 second.
EXPECT_EQ(base_target_delay + kMaxAudioDiffMs, extra_audio_delay_ms);
current_audio_delay_ms = extra_audio_delay_ms;
int current_extra_delay_ms = extra_audio_delay_ms;
send_time_->IncreaseTimeMs(1000);
receive_time_->IncreaseTimeMs(800);
EXPECT_TRUE(DelayedStreams(audio_delay_ms,
video_delay_ms,
current_audio_delay_ms,
&extra_audio_delay_ms,
&total_video_delay_ms));
EXPECT_EQ(base_target_delay, total_video_delay_ms);
// The audio delay is not allowed to change more than the half of the
// required change in delay.
EXPECT_EQ(current_extra_delay_ms + MaxAudioDelayIncrease(
current_audio_delay_ms,
base_target_delay + video_delay_ms - audio_delay_ms),
extra_audio_delay_ms);
current_audio_delay_ms = extra_audio_delay_ms;
current_extra_delay_ms = extra_audio_delay_ms;
send_time_->IncreaseTimeMs(1000);
receive_time_->IncreaseTimeMs(800);
EXPECT_TRUE(DelayedStreams(audio_delay_ms,
video_delay_ms,
current_audio_delay_ms,
&extra_audio_delay_ms,
&total_video_delay_ms));
EXPECT_EQ(base_target_delay, total_video_delay_ms);
// The audio delay is not allowed to change more than the half of the
// required change in delay.
EXPECT_EQ(current_extra_delay_ms + MaxAudioDelayIncrease(
current_audio_delay_ms,
base_target_delay + video_delay_ms - audio_delay_ms),
extra_audio_delay_ms);
current_extra_delay_ms = extra_audio_delay_ms;
// Simulate that NetEQ for some reason reduced the delay.
current_audio_delay_ms = base_target_delay + 170;
send_time_->IncreaseTimeMs(1000);
receive_time_->IncreaseTimeMs(800);
EXPECT_TRUE(DelayedStreams(audio_delay_ms,
video_delay_ms,
current_audio_delay_ms,
&extra_audio_delay_ms,
&total_video_delay_ms));
EXPECT_EQ(base_target_delay, total_video_delay_ms);
// Since we only can ask NetEQ for a certain amount of extra delay, and
// we only measure the total NetEQ delay, we will ask for additional delay
// here to try to stay in sync.
EXPECT_EQ(current_extra_delay_ms + MaxAudioDelayIncrease(
current_audio_delay_ms,
base_target_delay + video_delay_ms - audio_delay_ms),
extra_audio_delay_ms);
current_extra_delay_ms = extra_audio_delay_ms;
// Simulate that NetEQ for some reason significantly increased the delay.
current_audio_delay_ms = base_target_delay + 250;
send_time_->IncreaseTimeMs(1000);
receive_time_->IncreaseTimeMs(800);
EXPECT_TRUE(DelayedStreams(audio_delay_ms,
video_delay_ms,
current_audio_delay_ms,
&extra_audio_delay_ms,
&total_video_delay_ms));
EXPECT_EQ(base_target_delay, total_video_delay_ms);
// The audio delay is not allowed to change more than the half of the
// required change in delay.
EXPECT_EQ(current_extra_delay_ms + MaxAudioDelayIncrease(
current_audio_delay_ms,
base_target_delay + video_delay_ms - audio_delay_ms),
extra_audio_delay_ms);
}
int MaxAudioDelayIncrease(int current_audio_delay_ms, int delay_ms) {
return std::min((delay_ms - current_audio_delay_ms) / 2,
static_cast<int>(kMaxAudioDiffMs));
static_cast<int>(kMaxAudioDiffMs));
}
int MaxAudioDelayDecrease(int current_audio_delay_ms, int delay_ms) {
@ -363,100 +452,86 @@ TEST_F(StreamSynchronizationTest, AudioDelay) {
}
TEST_F(StreamSynchronizationTest, BothDelayedVideoLater) {
int current_audio_delay_ms = 0;
int audio_delay_ms = 100;
int video_delay_ms = 300;
int extra_audio_delay_ms = 0;
int total_video_delay_ms = 0;
BothDelayedVideoLaterTest(0);
}
EXPECT_TRUE(DelayedStreams(audio_delay_ms,
video_delay_ms,
current_audio_delay_ms,
&extra_audio_delay_ms,
&total_video_delay_ms));
EXPECT_EQ(0, total_video_delay_ms);
// The audio delay is not allowed to change more than this in 1 second.
EXPECT_EQ(kMaxAudioDiffMs, extra_audio_delay_ms);
current_audio_delay_ms = extra_audio_delay_ms;
int current_extra_delay_ms = extra_audio_delay_ms;
TEST_F(StreamSynchronizationTest, BothDelayedVideoLaterAudioClockDrift) {
audio_clock_drift_ = 1.05;
BothDelayedVideoLaterTest(0);
}
send_time_->IncreaseTimeMs(1000);
receive_time_->IncreaseTimeMs(800);
EXPECT_TRUE(DelayedStreams(audio_delay_ms,
video_delay_ms,
current_audio_delay_ms,
&extra_audio_delay_ms,
&total_video_delay_ms));
EXPECT_EQ(0, total_video_delay_ms);
// The audio delay is not allowed to change more than the half of the required
// change in delay.
EXPECT_EQ(current_extra_delay_ms + MaxAudioDelayIncrease(
current_audio_delay_ms, video_delay_ms - audio_delay_ms),
extra_audio_delay_ms);
current_audio_delay_ms = extra_audio_delay_ms;
current_extra_delay_ms = extra_audio_delay_ms;
send_time_->IncreaseTimeMs(1000);
receive_time_->IncreaseTimeMs(800);
EXPECT_TRUE(DelayedStreams(audio_delay_ms,
video_delay_ms,
current_audio_delay_ms,
&extra_audio_delay_ms,
&total_video_delay_ms));
EXPECT_EQ(0, total_video_delay_ms);
// The audio delay is not allowed to change more than the half of the required
// change in delay.
EXPECT_EQ(current_extra_delay_ms + MaxAudioDelayIncrease(
current_audio_delay_ms, video_delay_ms - audio_delay_ms),
extra_audio_delay_ms);
current_extra_delay_ms = extra_audio_delay_ms;
// Simulate that NetEQ for some reason reduced the delay.
current_audio_delay_ms = 170;
send_time_->IncreaseTimeMs(1000);
receive_time_->IncreaseTimeMs(800);
EXPECT_TRUE(DelayedStreams(audio_delay_ms,
video_delay_ms,
current_audio_delay_ms,
&extra_audio_delay_ms,
&total_video_delay_ms));
EXPECT_EQ(0, total_video_delay_ms);
// Since we only can ask NetEQ for a certain amount of extra delay, and
// we only measure the total NetEQ delay, we will ask for additional delay
// here to try to stay in sync.
EXPECT_EQ(current_extra_delay_ms + MaxAudioDelayIncrease(
current_audio_delay_ms, video_delay_ms - audio_delay_ms),
extra_audio_delay_ms);
current_extra_delay_ms = extra_audio_delay_ms;
// Simulate that NetEQ for some reason significantly increased the delay.
current_audio_delay_ms = 250;
send_time_->IncreaseTimeMs(1000);
receive_time_->IncreaseTimeMs(800);
EXPECT_TRUE(DelayedStreams(audio_delay_ms,
video_delay_ms,
current_audio_delay_ms,
&extra_audio_delay_ms,
&total_video_delay_ms));
EXPECT_EQ(0, total_video_delay_ms);
// The audio delay is not allowed to change more than the half of the required
// change in delay.
EXPECT_EQ(current_extra_delay_ms + MaxAudioDelayIncrease(
current_audio_delay_ms, video_delay_ms - audio_delay_ms),
extra_audio_delay_ms);
TEST_F(StreamSynchronizationTest, BothDelayedVideoLaterVideoClockDrift) {
video_clock_drift_ = 1.05;
BothDelayedVideoLaterTest(0);
}
TEST_F(StreamSynchronizationTest, BothDelayedAudioLater) {
BothDelayedAudioLaterTest();
BothDelayedAudioLaterTest(0);
}
TEST_F(StreamSynchronizationTest, BothDelayedAudioClockDrift) {
audio_clock_drift_ = 1.05;
BothDelayedAudioLaterTest();
BothDelayedAudioLaterTest(0);
}
TEST_F(StreamSynchronizationTest, BothDelayedVideoClockDrift) {
video_clock_drift_ = 1.05;
BothDelayedAudioLaterTest();
BothDelayedAudioLaterTest(0);
}
TEST_F(StreamSynchronizationTest, BaseDelay) {
int base_target_delay_ms = 2000;
int current_audio_delay_ms = 2000;
int extra_audio_delay_ms = 0;
int total_video_delay_ms = base_target_delay_ms;
sync_->SetTargetBufferingDelay(base_target_delay_ms);
EXPECT_TRUE(DelayedStreams(base_target_delay_ms, base_target_delay_ms,
current_audio_delay_ms,
&extra_audio_delay_ms, &total_video_delay_ms));
EXPECT_EQ(base_target_delay_ms, extra_audio_delay_ms);
EXPECT_EQ(base_target_delay_ms, total_video_delay_ms);
}
TEST_F(StreamSynchronizationTest, BothDelayedAudioLaterWithBaseDelay) {
int base_target_delay_ms = 3000;
sync_->SetTargetBufferingDelay(base_target_delay_ms);
BothDelayedAudioLaterTest(base_target_delay_ms);
}
TEST_F(StreamSynchronizationTest, BothDelayedAudioClockDriftWithBaseDelay) {
int base_target_delay_ms = 3000;
sync_->SetTargetBufferingDelay(base_target_delay_ms);
audio_clock_drift_ = 1.05;
BothDelayedAudioLaterTest(base_target_delay_ms);
}
TEST_F(StreamSynchronizationTest, BothDelayedVideoClockDriftWithBaseDelay) {
int base_target_delay_ms = 3000;
sync_->SetTargetBufferingDelay(base_target_delay_ms);
video_clock_drift_ = 1.05;
BothDelayedAudioLaterTest(base_target_delay_ms);
}
TEST_F(StreamSynchronizationTest, BothDelayedVideoLaterWithBaseDelay) {
int base_target_delay_ms = 2000;
sync_->SetTargetBufferingDelay(base_target_delay_ms);
BothDelayedVideoLaterTest(base_target_delay_ms);
}
TEST_F(StreamSynchronizationTest,
BothDelayedVideoLaterAudioClockDriftWithBaseDelay) {
int base_target_delay_ms = 2000;
audio_clock_drift_ = 1.05;
sync_->SetTargetBufferingDelay(base_target_delay_ms);
BothDelayedVideoLaterTest(base_target_delay_ms);
}
TEST_F(StreamSynchronizationTest,
BothDelayedVideoLaterVideoClockDriftWithBaseDelay) {
int base_target_delay_ms = 2000;
video_clock_drift_ = 1.05;
sync_->SetTargetBufferingDelay(base_target_delay_ms);
BothDelayedVideoLaterTest(base_target_delay_ms);
}
} // namespace webrtc

View File

@ -39,6 +39,7 @@
#define DEFAULT_VIDEO_CODEC_MAX_FRAMERATE "30"
#define DEFAULT_VIDEO_PROTECTION_METHOD "None"
#define DEFAULT_TEMPORAL_LAYER "0"
#define DEFAULT_BUFFERING_DELAY_MS "0"
DEFINE_string(render_custom_call_remote_to, "", "Specify to render the remote "
"stream of a custom call to the provided filename instead of showing it in "
@ -153,6 +154,7 @@ bool SetVideoProtection(webrtc::ViECodec* vie_codec,
int video_channel,
VideoProtectionMethod protection_method);
bool GetBitrateSignaling();
int GetBufferingDelay();
// The following are audio helper functions.
bool GetAudioDevices(webrtc::VoEBase* voe_base,
@ -265,6 +267,7 @@ int ViEAutoTest::ViECustomCall() {
webrtc::CodecInst audio_codec;
int audio_channel = -1;
VideoProtectionMethod protection_method = kProtectionMethodNone;
int buffer_delay_ms = 0;
bool is_image_scale_enabled = false;
bool remb = true;
@ -297,6 +300,9 @@ int ViEAutoTest::ViECustomCall() {
// Get the video protection method for the call.
protection_method = GetVideoProtection();
// Get the call mode (Real-Time/Buffered).
buffer_delay_ms = GetBufferingDelay();
// Get the audio device for the call.
memset(audio_capture_device_name, 0, KMaxUniqueIdLength);
memset(audio_playbackDeviceName, 0, KMaxUniqueIdLength);
@ -486,6 +492,16 @@ int ViEAutoTest::ViECustomCall() {
number_of_errors += ViETest::TestError(error == 0,
"ERROR: %s at line %d",
__FUNCTION__, __LINE__);
// Set the call mode (conferencing/buffering)
error = vie_rtp_rtcp->SetSenderBufferingMode(video_channel,
buffer_delay_ms);
number_of_errors += ViETest::TestError(error == 0, "ERROR: %s at line %d",
__FUNCTION__, __LINE__);
error = vie_rtp_rtcp->SetReceiverBufferingMode(video_channel,
buffer_delay_ms);
number_of_errors += ViETest::TestError(error == 0, "ERROR: %s at line %d",
__FUNCTION__, __LINE__);
// Set the Video Protection before start send and receive.
SetVideoProtection(vie_codec, vie_rtp_rtcp,
video_channel, protection_method);
@ -1555,6 +1571,15 @@ bool GetBitrateSignaling() {
return choice == 1;
}
int GetBufferingDelay() {
std::string input = TypedInput("Choose buffering delay (mS).")
.WithDefault(DEFAULT_BUFFERING_DELAY_MS)
.WithInputValidator(new webrtc::IntegerWithinRangeValidator(0, 10000))
.AskForInput();
std::string delay_ms = input;
return atoi(delay_ms.c_str());
}
void PrintRTCCPStatistics(webrtc::ViERTP_RTCP* vie_rtp_rtcp,
int video_channel,
StatisticsType stat_type) {

View File

@ -685,19 +685,32 @@ void ViEAutoTest::ViERtpRtcpAPITest()
EXPECT_EQ(0, ViE.rtp_rtcp->SetTransmissionSmoothingStatus(
tbChannel.videoChannel, false));
// Streaming Mode.
EXPECT_EQ(-1, ViE.rtp_rtcp->EnableSenderStreamingMode(
// Buffering mode - sender side.
EXPECT_EQ(-1, ViE.rtp_rtcp->SetSenderBufferingMode(
invalid_channel_id, 0));
int invalid_delay = -1;
EXPECT_EQ(-1, ViE.rtp_rtcp->EnableSenderStreamingMode(
EXPECT_EQ(-1, ViE.rtp_rtcp->SetSenderBufferingMode(
tbChannel.videoChannel, invalid_delay));
invalid_delay = 15000;
EXPECT_EQ(-1, ViE.rtp_rtcp->EnableSenderStreamingMode(
EXPECT_EQ(-1, ViE.rtp_rtcp->SetSenderBufferingMode(
tbChannel.videoChannel, invalid_delay));
EXPECT_EQ(0, ViE.rtp_rtcp->EnableSenderStreamingMode(
EXPECT_EQ(0, ViE.rtp_rtcp->SetSenderBufferingMode(
tbChannel.videoChannel, 5000));
// Real-time mode.
EXPECT_EQ(0, ViE.rtp_rtcp->EnableSenderStreamingMode(
// Buffering mode - receiver side.
EXPECT_EQ(-1, ViE.rtp_rtcp->SetReceiverBufferingMode(
invalid_channel_id, 0));
EXPECT_EQ(-1, ViE.rtp_rtcp->SetReceiverBufferingMode(
tbChannel.videoChannel, invalid_delay));
invalid_delay = 15000;
EXPECT_EQ(-1, ViE.rtp_rtcp->SetReceiverBufferingMode(
tbChannel.videoChannel, invalid_delay));
EXPECT_EQ(0, ViE.rtp_rtcp->SetReceiverBufferingMode(
tbChannel.videoChannel, 5000));
// Real-time mode - sender side.
EXPECT_EQ(0, ViE.rtp_rtcp->SetSenderBufferingMode(
tbChannel.videoChannel, 0));
// Real-time mode - receiver side.
EXPECT_EQ(0, ViE.rtp_rtcp->SetReceiverBufferingMode(
tbChannel.videoChannel, 0));
//***************************************************************

View File

@ -104,7 +104,8 @@ ViEChannel::ViEChannel(WebRtc_Word32 channel_id,
file_recorder_(channel_id),
mtu_(0),
sender_(sender),
nack_history_size_sender_(kSendSidePacketHistorySize) {
nack_history_size_sender_(kSendSidePacketHistorySize),
max_nack_reordering_threshold_(kMaxPacketAgeToNack) {
WEBRTC_TRACE(kTraceMemory, kTraceVideo, ViEId(engine_id, channel_id),
"ViEChannel::ViEChannel(channel_id: %d, engine_id: %d)",
channel_id, engine_id);
@ -125,7 +126,7 @@ ViEChannel::ViEChannel(WebRtc_Word32 channel_id,
rtp_rtcp_.reset(RtpRtcp::CreateRtpRtcp(configuration));
vie_receiver_.SetRtpRtcpModule(rtp_rtcp_.get());
vcm_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack);
vcm_.SetNackSettings(kMaxNackListSize, max_nack_reordering_threshold_);
}
WebRtc_Word32 ViEChannel::Init() {
@ -298,7 +299,7 @@ WebRtc_Word32 ViEChannel::SetSendCodec(const VideoCodec& video_codec,
}
if (nack_method != kNackOff) {
rtp_rtcp->SetStorePacketsStatus(true, nack_history_size_sender_);
rtp_rtcp->SetNACKStatus(nack_method, kMaxPacketAgeToNack);
rtp_rtcp->SetNACKStatus(nack_method, max_nack_reordering_threshold_);
} else if (paced_sender_) {
rtp_rtcp->SetStorePacketsStatus(true, nack_history_size_sender_);
}
@ -622,7 +623,8 @@ WebRtc_Word32 ViEChannel::ProcessNACKRequest(const bool enable) {
"%s: Could not enable NACK, RTPC not on ", __FUNCTION__);
return -1;
}
if (rtp_rtcp_->SetNACKStatus(nackMethod, kMaxPacketAgeToNack) != 0) {
if (rtp_rtcp_->SetNACKStatus(nackMethod,
max_nack_reordering_threshold_) != 0) {
WEBRTC_TRACE(kTraceError, kTraceVideo, ViEId(engine_id_, channel_id_),
"%s: Could not set NACK method %d", __FUNCTION__,
nackMethod);
@ -640,7 +642,7 @@ WebRtc_Word32 ViEChannel::ProcessNACKRequest(const bool enable) {
it != simulcast_rtp_rtcp_.end();
it++) {
RtpRtcp* rtp_rtcp = *it;
rtp_rtcp->SetNACKStatus(nackMethod, kMaxPacketAgeToNack);
rtp_rtcp->SetNACKStatus(nackMethod, max_nack_reordering_threshold_);
rtp_rtcp->SetStorePacketsStatus(true, nack_history_size_sender_);
}
} else {
@ -652,13 +654,14 @@ WebRtc_Word32 ViEChannel::ProcessNACKRequest(const bool enable) {
if (paced_sender_ == NULL) {
rtp_rtcp->SetStorePacketsStatus(false, 0);
}
rtp_rtcp->SetNACKStatus(kNackOff, kMaxPacketAgeToNack);
rtp_rtcp->SetNACKStatus(kNackOff, max_nack_reordering_threshold_);
}
vcm_.RegisterPacketRequestCallback(NULL);
if (paced_sender_ == NULL) {
rtp_rtcp_->SetStorePacketsStatus(false, 0);
}
if (rtp_rtcp_->SetNACKStatus(kNackOff, kMaxPacketAgeToNack) != 0) {
if (rtp_rtcp_->SetNACKStatus(kNackOff,
max_nack_reordering_threshold_) != 0) {
WEBRTC_TRACE(kTraceError, kTraceVideo, ViEId(engine_id_, channel_id_),
"%s: Could not turn off NACK", __FUNCTION__);
return -1;
@ -723,21 +726,18 @@ WebRtc_Word32 ViEChannel::SetHybridNACKFECStatus(
return ProcessFECRequest(enable, payload_typeRED, payload_typeFEC);
}
int ViEChannel::EnableSenderStreamingMode(int target_delay_ms) {
int ViEChannel::SetSenderBufferingMode(int target_delay_ms) {
if ((target_delay_ms < 0) || (target_delay_ms > kMaxTargetDelayMs)) {
WEBRTC_TRACE(kTraceError, kTraceVideo, ViEId(engine_id_, channel_id_),
"%s: Target streaming delay out of bounds: %d", __FUNCTION__,
target_delay_ms);
"%s: Target sender buffering delay out of bounds: %d",
__FUNCTION__, target_delay_ms);
return -1;
}
if (target_delay_ms == 0) {
// Real-time mode.
nack_history_size_sender_ = kSendSidePacketHistorySize;
} else {
// The max size of the nack list should be large enough to accommodate the
// the number of packets(frames) resulting from the increased delay.
// Roughly estimating for ~20 packets per frame @ 30fps.
nack_history_size_sender_ = target_delay_ms * 20 * 30 / 1000;
nack_history_size_sender_ = GetRequiredNackListSize(target_delay_ms);
// Don't allow a number lower than the default value.
if (nack_history_size_sender_ < kSendSidePacketHistorySize) {
nack_history_size_sender_ = kSendSidePacketHistorySize;
@ -758,6 +758,35 @@ int ViEChannel::EnableSenderStreamingMode(int target_delay_ms) {
return 0;
}
int ViEChannel::SetReceiverBufferingMode(int target_delay_ms) {
if ((target_delay_ms < 0) || (target_delay_ms > kMaxTargetDelayMs)) {
WEBRTC_TRACE(kTraceError, kTraceVideo, ViEId(engine_id_, channel_id_),
"%s: Target receiver buffering delay out of bounds: %d",
__FUNCTION__, target_delay_ms);
return -1;
}
int max_nack_list_size;
if (target_delay_ms == 0) {
// Real-time mode - restore default settings.
max_nack_reordering_threshold_ = kMaxPacketAgeToNack;
max_nack_list_size = kMaxNackListSize;
} else {
max_nack_list_size = 3 / 4 * GetRequiredNackListSize(target_delay_ms);
max_nack_reordering_threshold_ = max_nack_list_size;
}
vcm_.SetNackSettings(max_nack_list_size, max_nack_reordering_threshold_);
vcm_.SetMinReceiverDelay(target_delay_ms);
vie_sync_.SetTargetBufferingDelay(target_delay_ms);
return 0;
}
int ViEChannel::GetRequiredNackListSize(int target_delay_ms) {
// The max size of the nack list should be large enough to accommodate the
// the number of packets (frames) resulting from the increased delay.
// Roughly estimating for ~20 packets per frame @ 30fps.
return target_delay_ms * 20 * 30 / 1000;
}
WebRtc_Word32 ViEChannel::SetKeyFrameRequestMethod(
const KeyFrameRequestMethod method) {
WEBRTC_TRACE(kTraceInfo, kTraceVideo, ViEId(engine_id_, channel_id_),

View File

@ -116,7 +116,8 @@ class ViEChannel
WebRtc_Word32 SetHybridNACKFECStatus(const bool enable,
const unsigned char payload_typeRED,
const unsigned char payload_typeFEC);
int EnableSenderStreamingMode(int target_delay_ms);
int SetSenderBufferingMode(int target_delay_ms);
int SetReceiverBufferingMode(int target_delay_ms);
WebRtc_Word32 SetKeyFrameRequestMethod(const KeyFrameRequestMethod method);
bool EnableRemb(bool enable);
int SetSendTimestampOffsetStatus(bool enable, int id);
@ -365,6 +366,8 @@ class ViEChannel
WebRtc_Word32 ProcessFECRequest(const bool enable,
const unsigned char payload_typeRED,
const unsigned char payload_typeFEC);
// Compute NACK list parameters for the buffering mode.
int GetRequiredNackListSize(int target_delay_ms);
WebRtc_Word32 channel_id_;
WebRtc_Word32 engine_id_;
@ -425,6 +428,7 @@ class ViEChannel
const bool sender_;
int nack_history_size_sender_;
int max_nack_reordering_threshold_;
};
} // namespace webrtc

View File

@ -702,13 +702,13 @@ WebRtc_Word32 ViEEncoder::UpdateProtectionMethod() {
return 0;
}
void ViEEncoder::EnableSenderStreamingMode(int target_delay_ms) {
void ViEEncoder::SetSenderBufferingMode(int target_delay_ms) {
if (target_delay_ms > 0) {
// Disable external frame-droppers.
// Disable external frame-droppers.
vcm_.EnableFrameDropper(false);
vpm_.EnableTemporalDecimation(false);
} else {
// Real-time mode - enabling frame droppers.
// Real-time mode - enable frame droppers.
vpm_.EnableTemporalDecimation(true);
vcm_.EnableFrameDropper(true);
}

View File

@ -113,8 +113,8 @@ class ViEEncoder
// Loss protection.
WebRtc_Word32 UpdateProtectionMethod();
// Streaming mode.
void EnableSenderStreamingMode(int target_delay_ms);
// Buffering mode.
void SetSenderBufferingMode(int target_delay_ms);
// Implements VCMPacketizationCallback.
virtual WebRtc_Word32 SendData(

View File

@ -553,11 +553,11 @@ int ViERTP_RTCPImpl::SetHybridNACKFECStatus(
return 0;
}
int ViERTP_RTCPImpl::EnableSenderStreamingMode(int video_channel,
int ViERTP_RTCPImpl::SetSenderBufferingMode(int video_channel,
int target_delay_ms) {
WEBRTC_TRACE(kTraceApiCall, kTraceVideo,
ViEId(shared_data_->instance_id(), video_channel),
"%s(channel: %d, target_delay: %d)",
"%s(channel: %d, sender target_delay: %d)",
__FUNCTION__, video_channel, target_delay_ms);
ViEChannelManagerScoped cs(*(shared_data_->channel_manager()));
ViEChannel* vie_channel = cs.Channel(video_channel);
@ -578,8 +578,8 @@ int ViERTP_RTCPImpl::EnableSenderStreamingMode(int video_channel,
return -1;
}
// Update the channel's streaming mode settings.
if (vie_channel->EnableSenderStreamingMode(target_delay_ms) != 0) {
// Update the channel with buffering mode settings.
if (vie_channel->SetSenderBufferingMode(target_delay_ms) != 0) {
WEBRTC_TRACE(kTraceError, kTraceVideo,
ViEId(shared_data_->instance_id(), video_channel),
"%s: failed for channel %d", __FUNCTION__, video_channel);
@ -587,8 +587,35 @@ int ViERTP_RTCPImpl::EnableSenderStreamingMode(int video_channel,
return -1;
}
// Update the encoder's streaming mode settings.
vie_encoder->EnableSenderStreamingMode(target_delay_ms);
// Update the encoder's buffering mode settings.
vie_encoder->SetSenderBufferingMode(target_delay_ms);
return 0;
}
int ViERTP_RTCPImpl::SetReceiverBufferingMode(int video_channel,
int target_delay_ms) {
WEBRTC_TRACE(kTraceApiCall, kTraceVideo,
ViEId(shared_data_->instance_id(), video_channel),
"%s(channel: %d, receiver target_delay: %d)",
__FUNCTION__, video_channel, target_delay_ms);
ViEChannelManagerScoped cs(*(shared_data_->channel_manager()));
ViEChannel* vie_channel = cs.Channel(video_channel);
if (!vie_channel) {
WEBRTC_TRACE(kTraceError, kTraceVideo,
ViEId(shared_data_->instance_id(), video_channel),
"%s: Channel %d doesn't exist", __FUNCTION__, video_channel);
shared_data_->SetLastError(kViERtpRtcpInvalidChannelId);
return -1;
}
// Update the channel with buffering mode settings.
if (vie_channel->SetReceiverBufferingMode(target_delay_ms) != 0) {
WEBRTC_TRACE(kTraceError, kTraceVideo,
ViEId(shared_data_->instance_id(), video_channel),
"%s: failed for channel %d", __FUNCTION__, video_channel);
shared_data_->SetLastError(kViERtpRtcpUnknownError);
return -1;
}
return 0;
}

View File

@ -64,8 +64,10 @@ class ViERTP_RTCPImpl
virtual int SetHybridNACKFECStatus(const int video_channel, const bool enable,
const unsigned char payload_typeRED,
const unsigned char payload_typeFEC);
virtual int EnableSenderStreamingMode(int video_channel,
int target_delay_ms);
virtual int SetSenderBufferingMode(int video_channel,
int target_delay_ms);
virtual int SetReceiverBufferingMode(int video_channel,
int target_delay_ms);
virtual int SetKeyFrameRequestMethod(const int video_channel,
const ViEKeyFrameRequestMethod method);
virtual int SetTMMBRStatus(const int video_channel, const bool enable);

View File

@ -172,4 +172,14 @@ WebRtc_Word32 ViESyncModule::Process() {
return 0;
}
void ViESyncModule::SetTargetBufferingDelay(int target_delay_ms) {
CriticalSectionScoped cs(data_cs_.get());
sync_->SetTargetBufferingDelay(target_delay_ms);
// Setting initial playout delay to voice engine (video engine is updated via
// the VCM interface).
assert(voe_sync_interface_ != NULL);
voe_sync_interface_->SetInitialPlayoutDelay(voe_channel_id_,
target_delay_ms);
}
} // namespace webrtc

View File

@ -40,6 +40,9 @@ class ViESyncModule : public Module {
int VoiceChannel();
// Set target delay for buffering mode (0 = real-time mode).
void SetTargetBufferingDelay(int target_delay_ms);
// Implements Module.
virtual WebRtc_Word32 TimeUntilNextProcess();
virtual WebRtc_Word32 Process();