Refactor receiver.h/.cc.
TEST=video_coding_unittests, vie_auto_test --automated Review URL: https://webrtc-codereview.appspot.com/994008 git-svn-id: http://webrtc.googlecode.com/svn/trunk@3336 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
parent
1926d33344
commit
1ea4b502ef
@ -8,488 +8,416 @@
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/video_coding/main/source/receiver.h"
|
||||
#include "webrtc/modules/video_coding/main/source/receiver.h"
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
#include "modules/video_coding/main/interface/video_coding.h"
|
||||
#include "modules/video_coding/main/source/encoded_frame.h"
|
||||
#include "modules/video_coding/main/source/internal_defines.h"
|
||||
#include "modules/video_coding/main/source/media_opt_util.h"
|
||||
#include "modules/video_coding/main/source/tick_time_base.h"
|
||||
#include "system_wrappers/interface/trace.h"
|
||||
#include "webrtc/modules/video_coding/main/interface/video_coding.h"
|
||||
#include "webrtc/modules/video_coding/main/source/encoded_frame.h"
|
||||
#include "webrtc/modules/video_coding/main/source/internal_defines.h"
|
||||
#include "webrtc/modules/video_coding/main/source/media_opt_util.h"
|
||||
#include "webrtc/modules/video_coding/main/source/tick_time_base.h"
|
||||
#include "webrtc/system_wrappers/interface/trace.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
VCMReceiver::VCMReceiver(VCMTiming& timing,
|
||||
VCMReceiver::VCMReceiver(VCMTiming* timing,
|
||||
TickTimeBase* clock,
|
||||
WebRtc_Word32 vcmId,
|
||||
WebRtc_Word32 receiverId,
|
||||
int32_t vcm_id,
|
||||
int32_t receiver_id,
|
||||
bool master)
|
||||
: _critSect(CriticalSectionWrapper::CreateCriticalSection()),
|
||||
_vcmId(vcmId),
|
||||
_clock(clock),
|
||||
_receiverId(receiverId),
|
||||
_master(master),
|
||||
_jitterBuffer(_clock, vcmId, receiverId, master),
|
||||
_timing(timing),
|
||||
_renderWaitEvent(*new VCMEvent()),
|
||||
_state(kPassive) {}
|
||||
: crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
|
||||
vcm_id_(vcm_id),
|
||||
clock_(clock),
|
||||
receiver_id_(receiver_id),
|
||||
master_(master),
|
||||
jitter_buffer_(clock_, vcm_id, receiver_id, master),
|
||||
timing_(timing),
|
||||
render_wait_event_(),
|
||||
state_(kPassive) {}
|
||||
|
||||
VCMReceiver::~VCMReceiver()
|
||||
{
|
||||
_renderWaitEvent.Set();
|
||||
delete &_renderWaitEvent;
|
||||
delete _critSect;
|
||||
VCMReceiver::~VCMReceiver() {
|
||||
render_wait_event_.Set();
|
||||
delete crit_sect_;
|
||||
}
|
||||
|
||||
void
|
||||
VCMReceiver::Reset()
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
if (!_jitterBuffer.Running())
|
||||
{
|
||||
_jitterBuffer.Start();
|
||||
}
|
||||
else
|
||||
{
|
||||
_jitterBuffer.Flush();
|
||||
}
|
||||
_renderWaitEvent.Reset();
|
||||
if (_master)
|
||||
{
|
||||
_state = kReceiving;
|
||||
}
|
||||
else
|
||||
{
|
||||
_state = kPassive;
|
||||
}
|
||||
void VCMReceiver::Reset() {
|
||||
CriticalSectionScoped cs(crit_sect_);
|
||||
if (!jitter_buffer_.Running()) {
|
||||
jitter_buffer_.Start();
|
||||
} else {
|
||||
jitter_buffer_.Flush();
|
||||
}
|
||||
render_wait_event_.Reset();
|
||||
if (master_) {
|
||||
state_ = kReceiving;
|
||||
} else {
|
||||
state_ = kPassive;
|
||||
}
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMReceiver::Initialize()
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
Reset();
|
||||
if (!_master)
|
||||
{
|
||||
SetNackMode(kNoNack);
|
||||
}
|
||||
int32_t VCMReceiver::Initialize() {
|
||||
CriticalSectionScoped cs(crit_sect_);
|
||||
Reset();
|
||||
if (!master_) {
|
||||
SetNackMode(kNoNack);
|
||||
}
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
void VCMReceiver::UpdateRtt(uint32_t rtt) {
|
||||
jitter_buffer_.UpdateRtt(rtt);
|
||||
}
|
||||
|
||||
int32_t VCMReceiver::InsertPacket(const VCMPacket& packet, uint16_t frame_width,
|
||||
uint16_t frame_height) {
|
||||
// Find an empty frame.
|
||||
VCMEncodedFrame* buffer = NULL;
|
||||
const int32_t error = jitter_buffer_.GetFrame(packet, buffer);
|
||||
if (error == VCM_OLD_PACKET_ERROR) {
|
||||
return VCM_OK;
|
||||
} else if (error != VCM_OK) {
|
||||
return error;
|
||||
}
|
||||
assert(buffer);
|
||||
{
|
||||
CriticalSectionScoped cs(crit_sect_);
|
||||
|
||||
if (frame_width && frame_height) {
|
||||
buffer->SetEncodedSize(static_cast<uint32_t>(frame_width),
|
||||
static_cast<uint32_t>(frame_height));
|
||||
}
|
||||
|
||||
if (master_) {
|
||||
// Only trace the primary receiver to make it possible to parse and plot
|
||||
// the trace file.
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding,
|
||||
VCMId(vcm_id_, receiver_id_),
|
||||
"Packet seq_no %u of frame %u at %u",
|
||||
packet.seqNum, packet.timestamp,
|
||||
MaskWord64ToUWord32(clock_->MillisecondTimestamp()));
|
||||
}
|
||||
|
||||
const int64_t now_ms = clock_->MillisecondTimestamp();
|
||||
|
||||
int64_t render_time_ms = timing_->RenderTimeMs(packet.timestamp, now_ms);
|
||||
|
||||
if (render_time_ms < 0) {
|
||||
// Render time error. Assume that this is due to some change in the
|
||||
// incoming video stream and reset the JB and the timing.
|
||||
jitter_buffer_.Flush();
|
||||
timing_->Reset(clock_->MillisecondTimestamp());
|
||||
return VCM_FLUSH_INDICATOR;
|
||||
} else if (render_time_ms < now_ms - kMaxVideoDelayMs) {
|
||||
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding,
|
||||
VCMId(vcm_id_, receiver_id_),
|
||||
"This frame should have been rendered more than %u ms ago."
|
||||
"Flushing jitter buffer and resetting timing.",
|
||||
kMaxVideoDelayMs);
|
||||
jitter_buffer_.Flush();
|
||||
timing_->Reset(clock_->MillisecondTimestamp());
|
||||
return VCM_FLUSH_INDICATOR;
|
||||
} else if (timing_->TargetVideoDelay() > kMaxVideoDelayMs) {
|
||||
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding,
|
||||
VCMId(vcm_id_, receiver_id_),
|
||||
"More than %u ms target delay. Flushing jitter buffer and"
|
||||
"resetting timing.", kMaxVideoDelayMs);
|
||||
jitter_buffer_.Flush();
|
||||
timing_->Reset(clock_->MillisecondTimestamp());
|
||||
return VCM_FLUSH_INDICATOR;
|
||||
}
|
||||
|
||||
// First packet received belonging to this frame.
|
||||
if (buffer->Length() == 0) {
|
||||
const int64_t now_ms = clock_->MillisecondTimestamp();
|
||||
if (master_) {
|
||||
// Only trace the primary receiver to make it possible to parse and plot
|
||||
// the trace file.
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding,
|
||||
VCMId(vcm_id_, receiver_id_),
|
||||
"First packet of frame %u at %u", packet.timestamp,
|
||||
MaskWord64ToUWord32(now_ms));
|
||||
}
|
||||
render_time_ms = timing_->RenderTimeMs(packet.timestamp, now_ms);
|
||||
if (render_time_ms >= 0) {
|
||||
buffer->SetRenderTime(render_time_ms);
|
||||
} else {
|
||||
buffer->SetRenderTime(now_ms);
|
||||
}
|
||||
}
|
||||
|
||||
// Insert packet into the jitter buffer both media and empty packets.
|
||||
const VCMFrameBufferEnum
|
||||
ret = jitter_buffer_.InsertPacket(buffer, packet);
|
||||
if (ret == kFlushIndicator) {
|
||||
return VCM_FLUSH_INDICATOR;
|
||||
} else if (ret < 0) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding,
|
||||
VCMId(vcm_id_, receiver_id_),
|
||||
"Error inserting packet seq_no=%u, time_stamp=%u",
|
||||
packet.seqNum, packet.timestamp);
|
||||
return VCM_JITTER_BUFFER_ERROR;
|
||||
}
|
||||
}
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
void VCMReceiver::UpdateRtt(WebRtc_UWord32 rtt)
|
||||
{
|
||||
_jitterBuffer.UpdateRtt(rtt);
|
||||
VCMEncodedFrame* VCMReceiver::FrameForDecoding(
|
||||
uint16_t max_wait_time_ms,
|
||||
int64_t& next_render_time_ms,
|
||||
bool render_timing,
|
||||
VCMReceiver* dual_receiver) {
|
||||
// No need to enter the critical section here since the jitter buffer
|
||||
// is thread-safe.
|
||||
FrameType incoming_frame_type = kVideoFrameDelta;
|
||||
next_render_time_ms = -1;
|
||||
const int64_t start_time_ms = clock_->MillisecondTimestamp();
|
||||
int64_t ret = jitter_buffer_.NextTimestamp(max_wait_time_ms,
|
||||
&incoming_frame_type,
|
||||
&next_render_time_ms);
|
||||
if (ret < 0) {
|
||||
// No timestamp in jitter buffer at the moment.
|
||||
return NULL;
|
||||
}
|
||||
const uint32_t time_stamp = static_cast<uint32_t>(ret);
|
||||
|
||||
// Update the timing.
|
||||
timing_->SetRequiredDelay(jitter_buffer_.EstimatedJitterMs());
|
||||
timing_->UpdateCurrentDelay(time_stamp);
|
||||
|
||||
const int32_t temp_wait_time = max_wait_time_ms -
|
||||
static_cast<int32_t>(clock_->MillisecondTimestamp() - start_time_ms);
|
||||
uint16_t new_max_wait_time = static_cast<uint16_t>(VCM_MAX(temp_wait_time,
|
||||
0));
|
||||
|
||||
VCMEncodedFrame* frame = NULL;
|
||||
|
||||
if (render_timing) {
|
||||
frame = FrameForDecoding(new_max_wait_time, next_render_time_ms,
|
||||
dual_receiver);
|
||||
} else {
|
||||
frame = FrameForRendering(new_max_wait_time, next_render_time_ms,
|
||||
dual_receiver);
|
||||
}
|
||||
|
||||
if (frame != NULL) {
|
||||
bool retransmitted = false;
|
||||
const int64_t last_packet_time_ms =
|
||||
jitter_buffer_.LastPacketTime(frame, &retransmitted);
|
||||
if (last_packet_time_ms >= 0 && !retransmitted) {
|
||||
// We don't want to include timestamps which have suffered from
|
||||
// retransmission here, since we compensate with extra retransmission
|
||||
// delay within the jitter estimate.
|
||||
timing_->IncomingTimestamp(time_stamp, last_packet_time_ms);
|
||||
}
|
||||
if (dual_receiver != NULL) {
|
||||
dual_receiver->UpdateState(*frame);
|
||||
}
|
||||
}
|
||||
return frame;
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMReceiver::InsertPacket(const VCMPacket& packet,
|
||||
WebRtc_UWord16 frameWidth,
|
||||
WebRtc_UWord16 frameHeight)
|
||||
{
|
||||
// Find an empty frame
|
||||
VCMEncodedFrame *buffer = NULL;
|
||||
const WebRtc_Word32 error = _jitterBuffer.GetFrame(packet, buffer);
|
||||
if (error == VCM_OLD_PACKET_ERROR)
|
||||
{
|
||||
return VCM_OK;
|
||||
VCMEncodedFrame* VCMReceiver::FrameForDecoding(
|
||||
uint16_t max_wait_time_ms,
|
||||
int64_t next_render_time_ms,
|
||||
VCMReceiver* dual_receiver) {
|
||||
// How long can we wait until we must decode the next frame.
|
||||
uint32_t wait_time_ms = timing_->MaxWaitingTime(
|
||||
next_render_time_ms, clock_->MillisecondTimestamp());
|
||||
|
||||
// Try to get a complete frame from the jitter buffer.
|
||||
VCMEncodedFrame* frame = jitter_buffer_.GetCompleteFrameForDecoding(0);
|
||||
|
||||
if (frame == NULL && max_wait_time_ms == 0 && wait_time_ms > 0) {
|
||||
// If we're not allowed to wait for frames to get complete we must
|
||||
// calculate if it's time to decode, and if it's not we will just return
|
||||
// for now.
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (frame == NULL && VCM_MIN(wait_time_ms, max_wait_time_ms) == 0) {
|
||||
// No time to wait for a complete frame, check if we have an incomplete.
|
||||
const bool dual_receiver_enabled_and_passive = (dual_receiver != NULL &&
|
||||
dual_receiver->State() == kPassive &&
|
||||
dual_receiver->NackMode() == kNackInfinite);
|
||||
if (dual_receiver_enabled_and_passive &&
|
||||
!jitter_buffer_.CompleteSequenceWithNextFrame()) {
|
||||
// Jitter buffer state might get corrupt with this frame.
|
||||
dual_receiver->CopyJitterBufferStateFromReceiver(*this);
|
||||
frame = jitter_buffer_.GetFrameForDecoding();
|
||||
assert(frame);
|
||||
} else {
|
||||
frame = jitter_buffer_.GetFrameForDecoding();
|
||||
}
|
||||
else if (error != VCM_OK)
|
||||
{
|
||||
return error;
|
||||
}
|
||||
if (frame == NULL) {
|
||||
// Wait for a complete frame.
|
||||
frame = jitter_buffer_.GetCompleteFrameForDecoding(max_wait_time_ms);
|
||||
}
|
||||
if (frame == NULL) {
|
||||
// Get an incomplete frame.
|
||||
if (timing_->MaxWaitingTime(next_render_time_ms,
|
||||
clock_->MillisecondTimestamp()) > 0) {
|
||||
// Still time to wait for a complete frame.
|
||||
return NULL;
|
||||
}
|
||||
assert(buffer);
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
|
||||
if (frameWidth && frameHeight)
|
||||
{
|
||||
buffer->SetEncodedSize(static_cast<WebRtc_UWord32>(frameWidth),
|
||||
static_cast<WebRtc_UWord32>(frameHeight));
|
||||
}
|
||||
|
||||
if (_master)
|
||||
{
|
||||
// Only trace the primary receiver to make it possible
|
||||
// to parse and plot the trace file.
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding,
|
||||
VCMId(_vcmId, _receiverId),
|
||||
"Packet seqNo %u of frame %u at %u",
|
||||
packet.seqNum, packet.timestamp,
|
||||
MaskWord64ToUWord32(_clock->MillisecondTimestamp()));
|
||||
}
|
||||
|
||||
const WebRtc_Word64 nowMs = _clock->MillisecondTimestamp();
|
||||
|
||||
WebRtc_Word64 renderTimeMs = _timing.RenderTimeMs(packet.timestamp, nowMs);
|
||||
|
||||
if (renderTimeMs < 0)
|
||||
{
|
||||
// Render time error. Assume that this is due to some change in
|
||||
// the incoming video stream and reset the JB and the timing.
|
||||
_jitterBuffer.Flush();
|
||||
_timing.Reset(_clock->MillisecondTimestamp());
|
||||
return VCM_FLUSH_INDICATOR;
|
||||
}
|
||||
else if (renderTimeMs < nowMs - kMaxVideoDelayMs)
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
|
||||
"This frame should have been rendered more than %u ms ago."
|
||||
"Flushing jitter buffer and resetting timing.", kMaxVideoDelayMs);
|
||||
_jitterBuffer.Flush();
|
||||
_timing.Reset(_clock->MillisecondTimestamp());
|
||||
return VCM_FLUSH_INDICATOR;
|
||||
}
|
||||
else if (_timing.TargetVideoDelay() > kMaxVideoDelayMs)
|
||||
{
|
||||
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
|
||||
"More than %u ms target delay. Flushing jitter buffer and resetting timing.",
|
||||
kMaxVideoDelayMs);
|
||||
_jitterBuffer.Flush();
|
||||
_timing.Reset(_clock->MillisecondTimestamp());
|
||||
return VCM_FLUSH_INDICATOR;
|
||||
}
|
||||
|
||||
// First packet received belonging to this frame.
|
||||
if (buffer->Length() == 0)
|
||||
{
|
||||
const WebRtc_Word64 nowMs = _clock->MillisecondTimestamp();
|
||||
if (_master)
|
||||
{
|
||||
// Only trace the primary receiver to make it possible to parse and plot the trace file.
|
||||
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _receiverId),
|
||||
"First packet of frame %u at %u", packet.timestamp,
|
||||
MaskWord64ToUWord32(nowMs));
|
||||
}
|
||||
renderTimeMs = _timing.RenderTimeMs(packet.timestamp, nowMs);
|
||||
if (renderTimeMs >= 0)
|
||||
{
|
||||
buffer->SetRenderTime(renderTimeMs);
|
||||
}
|
||||
else
|
||||
{
|
||||
buffer->SetRenderTime(nowMs);
|
||||
}
|
||||
}
|
||||
|
||||
// Insert packet into the jitter buffer
|
||||
// both media and empty packets
|
||||
const VCMFrameBufferEnum
|
||||
ret = _jitterBuffer.InsertPacket(buffer, packet);
|
||||
if (ret == kFlushIndicator) {
|
||||
return VCM_FLUSH_INDICATOR;
|
||||
} else if (ret < 0) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding,
|
||||
VCMId(_vcmId, _receiverId),
|
||||
"Error inserting packet seqNo=%u, timeStamp=%u",
|
||||
packet.seqNum, packet.timestamp);
|
||||
return VCM_JITTER_BUFFER_ERROR;
|
||||
}
|
||||
// No time left to wait, we must decode this frame now.
|
||||
const bool dual_receiver_enabled_and_passive = (dual_receiver != NULL &&
|
||||
dual_receiver->State() == kPassive &&
|
||||
dual_receiver->NackMode() == kNackInfinite);
|
||||
if (dual_receiver_enabled_and_passive &&
|
||||
!jitter_buffer_.CompleteSequenceWithNextFrame()) {
|
||||
// Jitter buffer state might get corrupt with this frame.
|
||||
dual_receiver->CopyJitterBufferStateFromReceiver(*this);
|
||||
}
|
||||
return VCM_OK;
|
||||
|
||||
frame = jitter_buffer_.GetFrameForDecoding();
|
||||
}
|
||||
return frame;
|
||||
}
|
||||
|
||||
VCMEncodedFrame* VCMReceiver::FrameForDecoding(WebRtc_UWord16 maxWaitTimeMs,
|
||||
WebRtc_Word64& nextRenderTimeMs,
|
||||
bool renderTiming,
|
||||
VCMReceiver* dualReceiver)
|
||||
{
|
||||
// No need to enter the critical section here since the jitter buffer
|
||||
// is thread-safe.
|
||||
FrameType incomingFrameType = kVideoFrameDelta;
|
||||
nextRenderTimeMs = -1;
|
||||
const WebRtc_Word64 startTimeMs = _clock->MillisecondTimestamp();
|
||||
WebRtc_Word64 ret = _jitterBuffer.NextTimestamp(maxWaitTimeMs,
|
||||
&incomingFrameType,
|
||||
&nextRenderTimeMs);
|
||||
if (ret < 0)
|
||||
{
|
||||
// No timestamp in jitter buffer at the moment
|
||||
return NULL;
|
||||
}
|
||||
const WebRtc_UWord32 timeStamp = static_cast<WebRtc_UWord32>(ret);
|
||||
VCMEncodedFrame* VCMReceiver::FrameForRendering(uint16_t max_wait_time_ms,
|
||||
int64_t next_render_time_ms,
|
||||
VCMReceiver* dual_receiver) {
|
||||
// How long MUST we wait until we must decode the next frame. This is
|
||||
// different for the case where we have a renderer which can render at a
|
||||
// specified time. Here we must wait as long as possible before giving the
|
||||
// frame to the decoder, which will render the frame as soon as it has been
|
||||
// decoded.
|
||||
uint32_t wait_time_ms = timing_->MaxWaitingTime(
|
||||
next_render_time_ms, clock_->MillisecondTimestamp());
|
||||
if (max_wait_time_ms < wait_time_ms) {
|
||||
// If we're not allowed to wait until the frame is supposed to be rendered
|
||||
// we will have to return NULL for now.
|
||||
return NULL;
|
||||
}
|
||||
// Wait until it's time to render.
|
||||
render_wait_event_.Wait(wait_time_ms);
|
||||
|
||||
// Update the timing
|
||||
_timing.SetRequiredDelay(_jitterBuffer.EstimatedJitterMs());
|
||||
_timing.UpdateCurrentDelay(timeStamp);
|
||||
// Get a complete frame if possible.
|
||||
VCMEncodedFrame* frame = jitter_buffer_.GetCompleteFrameForDecoding(0);
|
||||
|
||||
const WebRtc_Word32 tempWaitTime = maxWaitTimeMs -
|
||||
static_cast<WebRtc_Word32>(_clock->MillisecondTimestamp() - startTimeMs);
|
||||
WebRtc_UWord16 newMaxWaitTime = static_cast<WebRtc_UWord16>(VCM_MAX(tempWaitTime, 0));
|
||||
|
||||
VCMEncodedFrame* frame = NULL;
|
||||
|
||||
if (renderTiming)
|
||||
{
|
||||
frame = FrameForDecoding(newMaxWaitTime, nextRenderTimeMs, dualReceiver);
|
||||
}
|
||||
else
|
||||
{
|
||||
frame = FrameForRendering(newMaxWaitTime, nextRenderTimeMs, dualReceiver);
|
||||
if (frame == NULL) {
|
||||
// Get an incomplete frame.
|
||||
const bool dual_receiver_enabled_and_passive = (dual_receiver != NULL &&
|
||||
dual_receiver->State() == kPassive &&
|
||||
dual_receiver->NackMode() == kNackInfinite);
|
||||
if (dual_receiver_enabled_and_passive &&
|
||||
!jitter_buffer_.CompleteSequenceWithNextFrame()) {
|
||||
// Jitter buffer state might get corrupt with this frame.
|
||||
dual_receiver->CopyJitterBufferStateFromReceiver(*this);
|
||||
}
|
||||
|
||||
if (frame != NULL)
|
||||
{
|
||||
bool retransmitted = false;
|
||||
const WebRtc_Word64 lastPacketTimeMs =
|
||||
_jitterBuffer.LastPacketTime(frame, &retransmitted);
|
||||
if (lastPacketTimeMs >= 0 && !retransmitted)
|
||||
{
|
||||
// We don't want to include timestamps which have suffered from retransmission
|
||||
// here, since we compensate with extra retransmission delay within
|
||||
// the jitter estimate.
|
||||
_timing.IncomingTimestamp(timeStamp, lastPacketTimeMs);
|
||||
}
|
||||
if (dualReceiver != NULL)
|
||||
{
|
||||
dualReceiver->UpdateState(*frame);
|
||||
}
|
||||
}
|
||||
return frame;
|
||||
frame = jitter_buffer_.GetFrameForDecoding();
|
||||
}
|
||||
return frame;
|
||||
}
|
||||
|
||||
VCMEncodedFrame*
|
||||
VCMReceiver::FrameForDecoding(WebRtc_UWord16 maxWaitTimeMs,
|
||||
WebRtc_Word64 nextRenderTimeMs,
|
||||
VCMReceiver* dualReceiver)
|
||||
{
|
||||
// How long can we wait until we must decode the next frame
|
||||
WebRtc_UWord32 waitTimeMs = _timing.MaxWaitingTime(nextRenderTimeMs,
|
||||
_clock->MillisecondTimestamp());
|
||||
|
||||
// Try to get a complete frame from the jitter buffer
|
||||
VCMEncodedFrame* frame = _jitterBuffer.GetCompleteFrameForDecoding(0);
|
||||
|
||||
if (frame == NULL && maxWaitTimeMs == 0 && waitTimeMs > 0)
|
||||
{
|
||||
// If we're not allowed to wait for frames to get complete we must
|
||||
// calculate if it's time to decode, and if it's not we will just return
|
||||
// for now.
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (frame == NULL && VCM_MIN(waitTimeMs, maxWaitTimeMs) == 0)
|
||||
{
|
||||
// No time to wait for a complete frame,
|
||||
// check if we have an incomplete
|
||||
const bool dualReceiverEnabledAndPassive = (dualReceiver != NULL &&
|
||||
dualReceiver->State() == kPassive &&
|
||||
dualReceiver->NackMode() == kNackInfinite);
|
||||
if (dualReceiverEnabledAndPassive &&
|
||||
!_jitterBuffer.CompleteSequenceWithNextFrame())
|
||||
{
|
||||
// Jitter buffer state might get corrupt with this frame.
|
||||
dualReceiver->CopyJitterBufferStateFromReceiver(*this);
|
||||
frame = _jitterBuffer.GetFrameForDecoding();
|
||||
assert(frame);
|
||||
} else {
|
||||
frame = _jitterBuffer.GetFrameForDecoding();
|
||||
}
|
||||
}
|
||||
if (frame == NULL)
|
||||
{
|
||||
// Wait for a complete frame
|
||||
frame = _jitterBuffer.GetCompleteFrameForDecoding(maxWaitTimeMs);
|
||||
}
|
||||
if (frame == NULL)
|
||||
{
|
||||
// Get an incomplete frame
|
||||
if (_timing.MaxWaitingTime(nextRenderTimeMs,
|
||||
_clock->MillisecondTimestamp()) > 0)
|
||||
{
|
||||
// Still time to wait for a complete frame
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// No time left to wait, we must decode this frame now.
|
||||
const bool dualReceiverEnabledAndPassive = (dualReceiver != NULL &&
|
||||
dualReceiver->State() == kPassive &&
|
||||
dualReceiver->NackMode() == kNackInfinite);
|
||||
if (dualReceiverEnabledAndPassive &&
|
||||
!_jitterBuffer.CompleteSequenceWithNextFrame())
|
||||
{
|
||||
// Jitter buffer state might get corrupt with this frame.
|
||||
dualReceiver->CopyJitterBufferStateFromReceiver(*this);
|
||||
}
|
||||
|
||||
frame = _jitterBuffer.GetFrameForDecoding();
|
||||
}
|
||||
return frame;
|
||||
void VCMReceiver::ReleaseFrame(VCMEncodedFrame* frame) {
|
||||
jitter_buffer_.ReleaseFrame(frame);
|
||||
}
|
||||
|
||||
VCMEncodedFrame*
|
||||
VCMReceiver::FrameForRendering(WebRtc_UWord16 maxWaitTimeMs,
|
||||
WebRtc_Word64 nextRenderTimeMs,
|
||||
VCMReceiver* dualReceiver)
|
||||
{
|
||||
// How long MUST we wait until we must decode the next frame. This is different for the case
|
||||
// where we have a renderer which can render at a specified time. Here we must wait as long
|
||||
// as possible before giving the frame to the decoder, which will render the frame as soon
|
||||
// as it has been decoded.
|
||||
WebRtc_UWord32 waitTimeMs = _timing.MaxWaitingTime(nextRenderTimeMs,
|
||||
_clock->MillisecondTimestamp());
|
||||
if (maxWaitTimeMs < waitTimeMs)
|
||||
{
|
||||
// If we're not allowed to wait until the frame is supposed to be rendered
|
||||
// we will have to return NULL for now.
|
||||
return NULL;
|
||||
}
|
||||
// Wait until it's time to render
|
||||
_renderWaitEvent.Wait(waitTimeMs);
|
||||
|
||||
// Get a complete frame if possible
|
||||
VCMEncodedFrame* frame = _jitterBuffer.GetCompleteFrameForDecoding(0);
|
||||
|
||||
if (frame == NULL)
|
||||
{
|
||||
// Get an incomplete frame
|
||||
const bool dualReceiverEnabledAndPassive = dualReceiver != NULL &&
|
||||
dualReceiver->State() == kPassive &&
|
||||
dualReceiver->NackMode() == kNackInfinite;
|
||||
if (dualReceiverEnabledAndPassive && !_jitterBuffer.CompleteSequenceWithNextFrame())
|
||||
{
|
||||
// Jitter buffer state might get corrupt with this frame.
|
||||
dualReceiver->CopyJitterBufferStateFromReceiver(*this);
|
||||
}
|
||||
|
||||
frame = _jitterBuffer.GetFrameForDecoding();
|
||||
}
|
||||
return frame;
|
||||
void VCMReceiver::ReceiveStatistics(uint32_t* bitrate,
|
||||
uint32_t* framerate) {
|
||||
assert(bitrate);
|
||||
assert(framerate);
|
||||
jitter_buffer_.IncomingRateStatistics(framerate, bitrate);
|
||||
*bitrate /= 1000; // Should be in kbps.
|
||||
}
|
||||
|
||||
void
|
||||
VCMReceiver::ReleaseFrame(VCMEncodedFrame* frame)
|
||||
{
|
||||
_jitterBuffer.ReleaseFrame(frame);
|
||||
void VCMReceiver::ReceivedFrameCount(VCMFrameCount* frame_count) const {
|
||||
assert(frame_count);
|
||||
jitter_buffer_.FrameStatistics(&frame_count->numDeltaFrames,
|
||||
&frame_count->numKeyFrames);
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMReceiver::ReceiveStatistics(WebRtc_UWord32& bitRate, WebRtc_UWord32& frameRate)
|
||||
{
|
||||
_jitterBuffer.IncomingRateStatistics(&frameRate, &bitRate);
|
||||
bitRate /= 1000; // Should be in kbps
|
||||
return 0;
|
||||
uint32_t VCMReceiver::DiscardedPackets() const {
|
||||
return jitter_buffer_.num_discarded_packets();
|
||||
}
|
||||
|
||||
WebRtc_Word32
|
||||
VCMReceiver::ReceivedFrameCount(VCMFrameCount& frameCount) const
|
||||
{
|
||||
_jitterBuffer.FrameStatistics(&frameCount.numDeltaFrames,
|
||||
&frameCount.numKeyFrames);
|
||||
return 0;
|
||||
void VCMReceiver::SetNackMode(VCMNackMode nackMode) {
|
||||
CriticalSectionScoped cs(crit_sect_);
|
||||
// Default to always having NACK enabled in hybrid mode.
|
||||
jitter_buffer_.SetNackMode(nackMode, kLowRttNackMs, -1);
|
||||
if (!master_) {
|
||||
state_ = kPassive; // The dual decoder defaults to passive.
|
||||
}
|
||||
}
|
||||
|
||||
WebRtc_UWord32 VCMReceiver::DiscardedPackets() const {
|
||||
return _jitterBuffer.num_discarded_packets();
|
||||
VCMNackMode VCMReceiver::NackMode() const {
|
||||
CriticalSectionScoped cs(crit_sect_);
|
||||
return jitter_buffer_.nack_mode();
|
||||
}
|
||||
|
||||
void
|
||||
VCMReceiver::SetNackMode(VCMNackMode nackMode)
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
// Default to always having NACK enabled in hybrid mode.
|
||||
_jitterBuffer.SetNackMode(nackMode, kLowRttNackMs, -1);
|
||||
if (!_master)
|
||||
{
|
||||
_state = kPassive; // The dual decoder defaults to passive
|
||||
}
|
||||
VCMNackStatus VCMReceiver::NackList(uint16_t* nack_list,
|
||||
uint16_t* size) {
|
||||
bool extended = false;
|
||||
uint16_t nack_list_size = 0;
|
||||
uint16_t* internal_nack_list = jitter_buffer_.CreateNackList(&nack_list_size,
|
||||
&extended);
|
||||
if (internal_nack_list == NULL && nack_list_size == 0xffff) {
|
||||
// This combination is used to trigger key frame requests.
|
||||
*size = 0;
|
||||
return kNackKeyFrameRequest;
|
||||
}
|
||||
if (nack_list_size > *size) {
|
||||
*size = nack_list_size;
|
||||
return kNackNeedMoreMemory;
|
||||
}
|
||||
if (internal_nack_list != NULL && nack_list_size > 0) {
|
||||
memcpy(nack_list, internal_nack_list, nack_list_size * sizeof(uint16_t));
|
||||
}
|
||||
*size = nack_list_size;
|
||||
return kNackOk;
|
||||
}
|
||||
|
||||
VCMNackMode
|
||||
VCMReceiver::NackMode() const
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
return _jitterBuffer.nack_mode();
|
||||
}
|
||||
|
||||
VCMNackStatus
|
||||
VCMReceiver::NackList(WebRtc_UWord16* nackList, WebRtc_UWord16& size)
|
||||
{
|
||||
bool extended = false;
|
||||
WebRtc_UWord16 nackListSize = 0;
|
||||
WebRtc_UWord16* internalNackList = _jitterBuffer.CreateNackList(
|
||||
&nackListSize, &extended);
|
||||
if (internalNackList == NULL && nackListSize == 0xffff)
|
||||
{
|
||||
// This combination is used to trigger key frame requests.
|
||||
size = 0;
|
||||
return kNackKeyFrameRequest;
|
||||
}
|
||||
if (nackListSize > size)
|
||||
{
|
||||
size = nackListSize;
|
||||
return kNackNeedMoreMemory;
|
||||
}
|
||||
if (internalNackList != NULL && nackListSize > 0) {
|
||||
memcpy(nackList, internalNackList, nackListSize * sizeof(WebRtc_UWord16));
|
||||
}
|
||||
size = nackListSize;
|
||||
return kNackOk;
|
||||
}
|
||||
|
||||
// Decide whether we should change decoder state. This should be done if the dual decoder
|
||||
// has caught up with the decoder decoding with packet losses.
|
||||
bool
|
||||
VCMReceiver::DualDecoderCaughtUp(VCMEncodedFrame* dualFrame, VCMReceiver& dualReceiver) const
|
||||
{
|
||||
if (dualFrame == NULL)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
if (_jitterBuffer.LastDecodedTimestamp() == dualFrame->TimeStamp())
|
||||
{
|
||||
dualReceiver.UpdateState(kWaitForPrimaryDecode);
|
||||
return true;
|
||||
}
|
||||
// Decide whether we should change decoder state. This should be done if the
|
||||
// dual decoder has caught up with the decoder decoding with packet losses.
|
||||
bool VCMReceiver::DualDecoderCaughtUp(VCMEncodedFrame* dual_frame,
|
||||
VCMReceiver& dual_receiver) const {
|
||||
if (dual_frame == NULL) {
|
||||
return false;
|
||||
}
|
||||
if (jitter_buffer_.LastDecodedTimestamp() == dual_frame->TimeStamp()) {
|
||||
dual_receiver.UpdateState(kWaitForPrimaryDecode);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void
|
||||
VCMReceiver::CopyJitterBufferStateFromReceiver(const VCMReceiver& receiver)
|
||||
{
|
||||
_jitterBuffer.CopyFrom(receiver._jitterBuffer);
|
||||
void VCMReceiver::CopyJitterBufferStateFromReceiver(
|
||||
const VCMReceiver& receiver) {
|
||||
jitter_buffer_.CopyFrom(receiver.jitter_buffer_);
|
||||
}
|
||||
|
||||
VCMReceiverState
|
||||
VCMReceiver::State() const
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
return _state;
|
||||
VCMReceiverState VCMReceiver::State() const {
|
||||
CriticalSectionScoped cs(crit_sect_);
|
||||
return state_;
|
||||
}
|
||||
|
||||
void
|
||||
VCMReceiver::UpdateState(VCMReceiverState newState)
|
||||
{
|
||||
CriticalSectionScoped cs(_critSect);
|
||||
assert(!(_state == kPassive && newState == kWaitForPrimaryDecode));
|
||||
// assert(!(_state == kReceiving && newState == kPassive));
|
||||
_state = newState;
|
||||
}
|
||||
|
||||
void
|
||||
VCMReceiver::UpdateState(VCMEncodedFrame& frame)
|
||||
{
|
||||
if (_jitterBuffer.nack_mode() == kNoNack)
|
||||
{
|
||||
// Dual decoder mode has not been enabled.
|
||||
return;
|
||||
}
|
||||
// Update the dual receiver state
|
||||
if (frame.Complete() && frame.FrameType() == kVideoFrameKey)
|
||||
{
|
||||
UpdateState(kPassive);
|
||||
}
|
||||
if (State() == kWaitForPrimaryDecode &&
|
||||
frame.Complete() && !frame.MissingFrame())
|
||||
{
|
||||
UpdateState(kPassive);
|
||||
}
|
||||
if (frame.MissingFrame() || !frame.Complete())
|
||||
{
|
||||
// State was corrupted, enable dual receiver.
|
||||
UpdateState(kReceiving);
|
||||
}
|
||||
void VCMReceiver::UpdateState(VCMReceiverState new_state) {
|
||||
CriticalSectionScoped cs(crit_sect_);
|
||||
assert(!(state_ == kPassive && new_state == kWaitForPrimaryDecode));
|
||||
state_ = new_state;
|
||||
}
|
||||
|
||||
void VCMReceiver::UpdateState(const VCMEncodedFrame& frame) {
|
||||
if (jitter_buffer_.nack_mode() == kNoNack) {
|
||||
// Dual decoder mode has not been enabled.
|
||||
return;
|
||||
}
|
||||
// Update the dual receiver state.
|
||||
if (frame.Complete() && frame.FrameType() == kVideoFrameKey) {
|
||||
UpdateState(kPassive);
|
||||
}
|
||||
if (State() == kWaitForPrimaryDecode &&
|
||||
frame.Complete() && !frame.MissingFrame()) {
|
||||
UpdateState(kPassive);
|
||||
}
|
||||
if (frame.MissingFrame() || !frame.Complete()) {
|
||||
// State was corrupted, enable dual receiver.
|
||||
UpdateState(kReceiving);
|
||||
}
|
||||
}
|
||||
} // namespace webrtc
|
||||
|
@ -8,94 +8,90 @@
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_RECEIVER_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_RECEIVER_H_
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_RECEIVER_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_RECEIVER_H_
|
||||
|
||||
#include "critical_section_wrapper.h"
|
||||
#include "jitter_buffer.h"
|
||||
#include "modules/video_coding/main/source/tick_time_base.h"
|
||||
#include "timing.h"
|
||||
#include "packet.h"
|
||||
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
|
||||
#include "webrtc/modules/video_coding/main/source/jitter_buffer.h"
|
||||
#include "webrtc/modules/video_coding/main/source/packet.h"
|
||||
#include "webrtc/modules/video_coding/main/source/tick_time_base.h"
|
||||
#include "webrtc/modules/video_coding/main/source/timing.h"
|
||||
|
||||
namespace webrtc
|
||||
{
|
||||
namespace webrtc {
|
||||
|
||||
class VCMEncodedFrame;
|
||||
|
||||
enum VCMNackStatus
|
||||
{
|
||||
kNackOk,
|
||||
kNackNeedMoreMemory,
|
||||
kNackKeyFrameRequest
|
||||
enum VCMNackStatus {
|
||||
kNackOk,
|
||||
kNackNeedMoreMemory,
|
||||
kNackKeyFrameRequest
|
||||
};
|
||||
|
||||
|
||||
enum VCMReceiverState
|
||||
{
|
||||
kReceiving,
|
||||
kPassive,
|
||||
kWaitForPrimaryDecode
|
||||
enum VCMReceiverState {
|
||||
kReceiving,
|
||||
kPassive,
|
||||
kWaitForPrimaryDecode
|
||||
};
|
||||
|
||||
class VCMReceiver
|
||||
{
|
||||
public:
|
||||
VCMReceiver(VCMTiming& timing,
|
||||
TickTimeBase* clock,
|
||||
WebRtc_Word32 vcmId = -1,
|
||||
WebRtc_Word32 receiverId = -1,
|
||||
bool master = true);
|
||||
~VCMReceiver();
|
||||
class VCMReceiver {
|
||||
public:
|
||||
VCMReceiver(VCMTiming* timing,
|
||||
TickTimeBase* clock,
|
||||
int32_t vcm_id = -1,
|
||||
int32_t receiver_id = -1,
|
||||
bool master = true);
|
||||
~VCMReceiver();
|
||||
|
||||
void Reset();
|
||||
WebRtc_Word32 Initialize();
|
||||
void UpdateRtt(WebRtc_UWord32 rtt);
|
||||
WebRtc_Word32 InsertPacket(const VCMPacket& packet,
|
||||
WebRtc_UWord16 frameWidth,
|
||||
WebRtc_UWord16 frameHeight);
|
||||
VCMEncodedFrame* FrameForDecoding(WebRtc_UWord16 maxWaitTimeMs,
|
||||
WebRtc_Word64& nextRenderTimeMs,
|
||||
bool renderTiming = true,
|
||||
VCMReceiver* dualReceiver = NULL);
|
||||
void ReleaseFrame(VCMEncodedFrame* frame);
|
||||
WebRtc_Word32 ReceiveStatistics(WebRtc_UWord32& bitRate, WebRtc_UWord32& frameRate);
|
||||
WebRtc_Word32 ReceivedFrameCount(VCMFrameCount& frameCount) const;
|
||||
WebRtc_UWord32 DiscardedPackets() const;
|
||||
void Reset();
|
||||
int32_t Initialize();
|
||||
void UpdateRtt(uint32_t rtt);
|
||||
int32_t InsertPacket(const VCMPacket& packet,
|
||||
uint16_t frame_width,
|
||||
uint16_t frame_height);
|
||||
VCMEncodedFrame* FrameForDecoding(uint16_t max_wait_time_ms,
|
||||
int64_t& next_render_time_ms,
|
||||
bool render_timing = true,
|
||||
VCMReceiver* dual_receiver = NULL);
|
||||
void ReleaseFrame(VCMEncodedFrame* frame);
|
||||
void ReceiveStatistics(uint32_t* bitrate, uint32_t* framerate);
|
||||
void ReceivedFrameCount(VCMFrameCount* frame_count) const;
|
||||
uint32_t DiscardedPackets() const;
|
||||
|
||||
// NACK
|
||||
void SetNackMode(VCMNackMode nackMode);
|
||||
VCMNackMode NackMode() const;
|
||||
VCMNackStatus NackList(WebRtc_UWord16* nackList, WebRtc_UWord16& size);
|
||||
// NACK.
|
||||
void SetNackMode(VCMNackMode nackMode);
|
||||
VCMNackMode NackMode() const;
|
||||
VCMNackStatus NackList(uint16_t* nackList, uint16_t* size);
|
||||
|
||||
// Dual decoder
|
||||
bool DualDecoderCaughtUp(VCMEncodedFrame* dualFrame, VCMReceiver& dualReceiver) const;
|
||||
VCMReceiverState State() const;
|
||||
// Dual decoder.
|
||||
bool DualDecoderCaughtUp(VCMEncodedFrame* dual_frame,
|
||||
VCMReceiver& dual_receiver) const;
|
||||
VCMReceiverState State() const;
|
||||
|
||||
private:
|
||||
VCMEncodedFrame* FrameForDecoding(WebRtc_UWord16 maxWaitTimeMs,
|
||||
WebRtc_Word64 nextrenderTimeMs,
|
||||
VCMReceiver* dualReceiver);
|
||||
VCMEncodedFrame* FrameForRendering(WebRtc_UWord16 maxWaitTimeMs,
|
||||
WebRtc_Word64 nextrenderTimeMs,
|
||||
VCMReceiver* dualReceiver);
|
||||
void CopyJitterBufferStateFromReceiver(const VCMReceiver& receiver);
|
||||
void UpdateState(VCMReceiverState newState);
|
||||
void UpdateState(VCMEncodedFrame& frame);
|
||||
static WebRtc_Word32 GenerateReceiverId();
|
||||
private:
|
||||
VCMEncodedFrame* FrameForDecoding(uint16_t max_wait_time_ms,
|
||||
int64_t nextrender_time_ms,
|
||||
VCMReceiver* dual_receiver);
|
||||
VCMEncodedFrame* FrameForRendering(uint16_t max_wait_time_ms,
|
||||
int64_t nextrender_time_ms,
|
||||
VCMReceiver* dual_receiver);
|
||||
void CopyJitterBufferStateFromReceiver(const VCMReceiver& receiver);
|
||||
void UpdateState(VCMReceiverState new_state);
|
||||
void UpdateState(const VCMEncodedFrame& frame);
|
||||
static int32_t GenerateReceiverId();
|
||||
|
||||
CriticalSectionWrapper* _critSect;
|
||||
WebRtc_Word32 _vcmId;
|
||||
TickTimeBase* _clock;
|
||||
WebRtc_Word32 _receiverId;
|
||||
bool _master;
|
||||
VCMJitterBuffer _jitterBuffer;
|
||||
VCMTiming& _timing;
|
||||
VCMEvent& _renderWaitEvent;
|
||||
VCMReceiverState _state;
|
||||
CriticalSectionWrapper* crit_sect_;
|
||||
int32_t vcm_id_;
|
||||
TickTimeBase* clock_;
|
||||
int32_t receiver_id_;
|
||||
bool master_;
|
||||
VCMJitterBuffer jitter_buffer_;
|
||||
VCMTiming* timing_;
|
||||
VCMEvent render_wait_event_;
|
||||
VCMReceiverState state_;
|
||||
|
||||
static WebRtc_Word32 _receiverIdCounter;
|
||||
static int32_t receiver_id_counter_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_RECEIVER_H_
|
||||
#endif // WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_RECEIVER_H_
|
||||
|
@ -54,8 +54,8 @@ _receiveCritSect(CriticalSectionWrapper::CreateCriticalSection()),
|
||||
_receiverInited(false),
|
||||
_timing(clock_, id, 1),
|
||||
_dualTiming(clock_, id, 2, &_timing),
|
||||
_receiver(_timing, clock_, id, 1),
|
||||
_dualReceiver(_dualTiming, clock_, id, 2, false),
|
||||
_receiver(&_timing, clock_, id, 1),
|
||||
_dualReceiver(&_dualTiming, clock_, id, 2, false),
|
||||
_decodedFrameCallback(_timing, clock_),
|
||||
_dualDecodedFrameCallback(_dualTiming, clock_),
|
||||
_frameTypeCallback(NULL),
|
||||
@ -144,16 +144,8 @@ VideoCodingModuleImpl::Process()
|
||||
{
|
||||
WebRtc_UWord32 bitRate;
|
||||
WebRtc_UWord32 frameRate;
|
||||
const WebRtc_Word32 ret = _receiver.ReceiveStatistics(bitRate,
|
||||
frameRate);
|
||||
if (ret == 0)
|
||||
{
|
||||
_receiveStatsCallback->ReceiveStatistics(bitRate, frameRate);
|
||||
}
|
||||
else if (returnValue == VCM_OK)
|
||||
{
|
||||
returnValue = ret;
|
||||
}
|
||||
_receiver.ReceiveStatistics(&bitRate, &frameRate);
|
||||
_receiveStatsCallback->ReceiveStatistics(bitRate, frameRate);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1255,11 +1247,11 @@ VideoCodingModuleImpl::NackList(WebRtc_UWord16* nackList, WebRtc_UWord16& size)
|
||||
// the dual receiver if the dual receiver is receiving.
|
||||
if (_receiver.NackMode() != kNoNack)
|
||||
{
|
||||
nackStatus = _receiver.NackList(nackList, size);
|
||||
nackStatus = _receiver.NackList(nackList, &size);
|
||||
}
|
||||
else if (_dualReceiver.State() != kPassive)
|
||||
{
|
||||
nackStatus = _dualReceiver.NackList(nackList, size);
|
||||
nackStatus = _dualReceiver.NackList(nackList, &size);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -1294,7 +1286,8 @@ VideoCodingModuleImpl::NackList(WebRtc_UWord16* nackList, WebRtc_UWord16& size)
|
||||
WebRtc_Word32
|
||||
VideoCodingModuleImpl::ReceivedFrameCount(VCMFrameCount& frameCount) const
|
||||
{
|
||||
return _receiver.ReceivedFrameCount(frameCount);
|
||||
_receiver.ReceivedFrameCount(&frameCount);
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
WebRtc_UWord32 VideoCodingModuleImpl::DiscardedPackets() const {
|
||||
|
Loading…
Reference in New Issue
Block a user