Simplifying VideoReceiver and JitterBuffer.
Removing frame_buffers_ array and dual-receiver mechanism. Also adding some thread annotations to VCMJitterBuffer. R=stefan@webrtc.org BUG=4014 Review URL: https://webrtc-codereview.appspot.com/27239004 git-svn-id: http://webrtc.googlecode.com/svn/trunk@7735 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
parent
9334ac2d78
commit
4f16c874c6
@ -68,7 +68,6 @@ public:
|
||||
kNone,
|
||||
kHardNack,
|
||||
kSoftNack,
|
||||
kDualDecoder,
|
||||
kReferenceSelection
|
||||
};
|
||||
|
||||
@ -423,17 +422,6 @@ public:
|
||||
virtual int RegisterRenderBufferSizeCallback(
|
||||
VCMRenderBufferSizeCallback* callback) = 0;
|
||||
|
||||
// Waits for the next frame in the dual jitter buffer to become complete
|
||||
// (waits no longer than maxWaitTimeMs), then passes it to the dual decoder
|
||||
// for decoding. This will never trigger a render callback. Should be
|
||||
// called frequently, and as long as it returns 1 it should be called again
|
||||
// as soon as possible.
|
||||
//
|
||||
// Return value : 1, if a frame was decoded
|
||||
// 0, if no frame was decoded
|
||||
// < 0, on error.
|
||||
virtual int32_t DecodeDualFrame(uint16_t maxWaitTimeMs = 200) = 0;
|
||||
|
||||
// Reset the decoder state to the initial state.
|
||||
//
|
||||
// Return value : VCM_OK, on success.
|
||||
|
@ -49,7 +49,6 @@ enum VCMVideoProtection {
|
||||
kProtectionNack, // Both send-side and receive-side
|
||||
kProtectionNackSender, // Send-side only
|
||||
kProtectionNackReceiver, // Receive-side only
|
||||
kProtectionDualDecoder,
|
||||
kProtectionFEC,
|
||||
kProtectionNackFEC,
|
||||
kProtectionKeyOnLoss,
|
||||
|
@ -196,7 +196,7 @@ bool VCMDecodingState::ContinuousPictureId(int picture_id) const {
|
||||
}
|
||||
|
||||
bool VCMDecodingState::ContinuousSeqNum(uint16_t seq_num) const {
|
||||
return (seq_num == static_cast<uint16_t>(sequence_num_ + 1));
|
||||
return seq_num == static_cast<uint16_t>(sequence_num_ + 1);
|
||||
}
|
||||
|
||||
bool VCMDecodingState::ContinuousLayer(int temporal_id,
|
||||
|
@ -21,7 +21,6 @@ namespace webrtc {
|
||||
VCMFrameBuffer::VCMFrameBuffer()
|
||||
:
|
||||
_state(kStateEmpty),
|
||||
_frameCounted(false),
|
||||
_nackCount(0),
|
||||
_latestPacketTimeMs(-1) {
|
||||
}
|
||||
@ -33,7 +32,6 @@ VCMFrameBuffer::VCMFrameBuffer(const VCMFrameBuffer& rhs)
|
||||
:
|
||||
VCMEncodedFrame(rhs),
|
||||
_state(rhs._state),
|
||||
_frameCounted(rhs._frameCounted),
|
||||
_sessionInfo(),
|
||||
_nackCount(rhs._nackCount),
|
||||
_latestPacketTimeMs(rhs._latestPacketTimeMs) {
|
||||
@ -191,7 +189,6 @@ VCMFrameBuffer::Reset() {
|
||||
_length = 0;
|
||||
_timeStamp = 0;
|
||||
_sessionInfo.Reset();
|
||||
_frameCounted = false;
|
||||
_payloadType = 0;
|
||||
_nackCount = 0;
|
||||
_latestPacketTimeMs = -1;
|
||||
@ -233,15 +230,6 @@ VCMFrameBuffer::SetState(VCMFrameBufferStateEnum state) {
|
||||
_state = state;
|
||||
}
|
||||
|
||||
// Set counted status (as counted by JB or not)
|
||||
void VCMFrameBuffer::SetCountedFrame(bool frameCounted) {
|
||||
_frameCounted = frameCounted;
|
||||
}
|
||||
|
||||
bool VCMFrameBuffer::GetCountedFrame() const {
|
||||
return _frameCounted;
|
||||
}
|
||||
|
||||
// Get current state of frame
|
||||
VCMFrameBufferStateEnum
|
||||
VCMFrameBuffer::GetState() const {
|
||||
|
@ -61,10 +61,6 @@ class VCMFrameBuffer : public VCMEncodedFrame {
|
||||
int Tl0PicId() const;
|
||||
bool NonReference() const;
|
||||
|
||||
// Set counted status (as counted by JB or not)
|
||||
void SetCountedFrame(bool frameCounted);
|
||||
bool GetCountedFrame() const;
|
||||
|
||||
// Increments a counter to keep track of the number of packets of this frame
|
||||
// which were NACKed before they arrived.
|
||||
void IncrementNackCount();
|
||||
@ -85,7 +81,6 @@ class VCMFrameBuffer : public VCMEncodedFrame {
|
||||
void SetState(VCMFrameBufferStateEnum state); // Set state of frame
|
||||
|
||||
VCMFrameBufferStateEnum _state; // Current state of the frame
|
||||
bool _frameCounted; // Was this frame counted by JB?
|
||||
VCMSessionInfo _sessionInfo;
|
||||
uint16_t _nackCount;
|
||||
int64_t _latestPacketTimeMs;
|
||||
|
@ -47,13 +47,6 @@ void FrameList::InsertFrame(VCMFrameBuffer* frame) {
|
||||
insert(rbegin().base(), FrameListPair(frame->TimeStamp(), frame));
|
||||
}
|
||||
|
||||
VCMFrameBuffer* FrameList::FindFrame(uint32_t timestamp) const {
|
||||
FrameList::const_iterator it = find(timestamp);
|
||||
if (it == end())
|
||||
return NULL;
|
||||
return it->second;
|
||||
}
|
||||
|
||||
VCMFrameBuffer* FrameList::PopFrame(uint32_t timestamp) {
|
||||
FrameList::iterator it = find(timestamp);
|
||||
if (it == end())
|
||||
@ -90,9 +83,8 @@ int FrameList::RecycleFramesUntilKeyFrame(FrameList::iterator* key_frame_it,
|
||||
return drop_count;
|
||||
}
|
||||
|
||||
int FrameList::CleanUpOldOrEmptyFrames(VCMDecodingState* decoding_state,
|
||||
void FrameList::CleanUpOldOrEmptyFrames(VCMDecodingState* decoding_state,
|
||||
UnorderedFrameList* free_frames) {
|
||||
int drop_count = 0;
|
||||
while (!empty()) {
|
||||
VCMFrameBuffer* oldest_frame = Front();
|
||||
bool remove_frame = false;
|
||||
@ -107,12 +99,10 @@ int FrameList::CleanUpOldOrEmptyFrames(VCMDecodingState* decoding_state,
|
||||
break;
|
||||
}
|
||||
free_frames->push_back(oldest_frame);
|
||||
++drop_count;
|
||||
TRACE_EVENT_INSTANT1("webrtc", "JB::OldOrEmptyFrameDropped", "timestamp",
|
||||
oldest_frame->TimeStamp());
|
||||
erase(begin());
|
||||
}
|
||||
return drop_count;
|
||||
}
|
||||
|
||||
void FrameList::Reset(UnorderedFrameList* free_frames) {
|
||||
@ -128,9 +118,7 @@ VCMJitterBuffer::VCMJitterBuffer(Clock* clock, EventFactory* event_factory)
|
||||
running_(false),
|
||||
crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
|
||||
frame_event_(event_factory->CreateEvent()),
|
||||
packet_event_(event_factory->CreateEvent()),
|
||||
max_number_of_frames_(kStartNumberOfFrames),
|
||||
frame_buffers_(),
|
||||
free_frames_(),
|
||||
decodable_frames_(),
|
||||
incomplete_frames_(),
|
||||
@ -141,8 +129,6 @@ VCMJitterBuffer::VCMJitterBuffer(Clock* clock, EventFactory* event_factory)
|
||||
time_last_incoming_frame_count_(0),
|
||||
incoming_bit_count_(0),
|
||||
incoming_bit_rate_(0),
|
||||
drop_count_(0),
|
||||
num_consecutive_old_frames_(0),
|
||||
num_consecutive_old_packets_(0),
|
||||
num_packets_(0),
|
||||
num_duplicated_packets_(0),
|
||||
@ -161,88 +147,27 @@ VCMJitterBuffer::VCMJitterBuffer(Clock* clock, EventFactory* event_factory)
|
||||
decode_error_mode_(kNoErrors),
|
||||
average_packets_per_frame_(0.0f),
|
||||
frame_counter_(0) {
|
||||
memset(frame_buffers_, 0, sizeof(frame_buffers_));
|
||||
|
||||
for (int i = 0; i < kStartNumberOfFrames; i++) {
|
||||
frame_buffers_[i] = new VCMFrameBuffer();
|
||||
free_frames_.push_back(frame_buffers_[i]);
|
||||
}
|
||||
for (int i = 0; i < kStartNumberOfFrames; i++)
|
||||
free_frames_.push_back(new VCMFrameBuffer());
|
||||
}
|
||||
|
||||
VCMJitterBuffer::~VCMJitterBuffer() {
|
||||
Stop();
|
||||
for (int i = 0; i < kMaxNumberOfFrames; i++) {
|
||||
if (frame_buffers_[i]) {
|
||||
delete frame_buffers_[i];
|
||||
}
|
||||
for (UnorderedFrameList::iterator it = free_frames_.begin();
|
||||
it != free_frames_.end(); ++it) {
|
||||
delete *it;
|
||||
}
|
||||
for (FrameList::iterator it = incomplete_frames_.begin();
|
||||
it != incomplete_frames_.end(); ++it) {
|
||||
delete it->second;
|
||||
}
|
||||
for (FrameList::iterator it = decodable_frames_.begin();
|
||||
it != decodable_frames_.end(); ++it) {
|
||||
delete it->second;
|
||||
}
|
||||
delete crit_sect_;
|
||||
}
|
||||
|
||||
void VCMJitterBuffer::CopyFrom(const VCMJitterBuffer& rhs) {
|
||||
if (this != &rhs) {
|
||||
crit_sect_->Enter();
|
||||
rhs.crit_sect_->Enter();
|
||||
running_ = rhs.running_;
|
||||
max_number_of_frames_ = rhs.max_number_of_frames_;
|
||||
incoming_frame_rate_ = rhs.incoming_frame_rate_;
|
||||
incoming_frame_count_ = rhs.incoming_frame_count_;
|
||||
time_last_incoming_frame_count_ = rhs.time_last_incoming_frame_count_;
|
||||
incoming_bit_count_ = rhs.incoming_bit_count_;
|
||||
incoming_bit_rate_ = rhs.incoming_bit_rate_;
|
||||
drop_count_ = rhs.drop_count_;
|
||||
num_consecutive_old_frames_ = rhs.num_consecutive_old_frames_;
|
||||
num_consecutive_old_packets_ = rhs.num_consecutive_old_packets_;
|
||||
num_packets_ = rhs.num_packets_;
|
||||
num_duplicated_packets_ = rhs.num_duplicated_packets_;
|
||||
num_discarded_packets_ = rhs.num_discarded_packets_;
|
||||
jitter_estimate_ = rhs.jitter_estimate_;
|
||||
inter_frame_delay_ = rhs.inter_frame_delay_;
|
||||
waiting_for_completion_ = rhs.waiting_for_completion_;
|
||||
rtt_ms_ = rhs.rtt_ms_;
|
||||
first_packet_since_reset_ = rhs.first_packet_since_reset_;
|
||||
last_decoded_state_ = rhs.last_decoded_state_;
|
||||
decode_error_mode_ = rhs.decode_error_mode_;
|
||||
assert(max_nack_list_size_ == rhs.max_nack_list_size_);
|
||||
assert(max_packet_age_to_nack_ == rhs.max_packet_age_to_nack_);
|
||||
assert(max_incomplete_time_ms_ == rhs.max_incomplete_time_ms_);
|
||||
receive_statistics_ = rhs.receive_statistics_;
|
||||
nack_seq_nums_.resize(rhs.nack_seq_nums_.size());
|
||||
missing_sequence_numbers_ = rhs.missing_sequence_numbers_;
|
||||
latest_received_sequence_number_ = rhs.latest_received_sequence_number_;
|
||||
average_packets_per_frame_ = rhs.average_packets_per_frame_;
|
||||
for (int i = 0; i < kMaxNumberOfFrames; i++) {
|
||||
if (frame_buffers_[i] != NULL) {
|
||||
delete frame_buffers_[i];
|
||||
frame_buffers_[i] = NULL;
|
||||
}
|
||||
}
|
||||
free_frames_.clear();
|
||||
decodable_frames_.clear();
|
||||
incomplete_frames_.clear();
|
||||
int i = 0;
|
||||
for (UnorderedFrameList::const_iterator it = rhs.free_frames_.begin();
|
||||
it != rhs.free_frames_.end(); ++it, ++i) {
|
||||
frame_buffers_[i] = new VCMFrameBuffer;
|
||||
free_frames_.push_back(frame_buffers_[i]);
|
||||
}
|
||||
CopyFrames(&decodable_frames_, rhs.decodable_frames_, &i);
|
||||
CopyFrames(&incomplete_frames_, rhs.incomplete_frames_, &i);
|
||||
rhs.crit_sect_->Leave();
|
||||
crit_sect_->Leave();
|
||||
}
|
||||
}
|
||||
|
||||
void VCMJitterBuffer::CopyFrames(FrameList* to_list,
|
||||
const FrameList& from_list, int* index) {
|
||||
to_list->clear();
|
||||
for (FrameList::const_iterator it = from_list.begin();
|
||||
it != from_list.end(); ++it, ++*index) {
|
||||
frame_buffers_[*index] = new VCMFrameBuffer(*it->second);
|
||||
to_list->InsertFrame(frame_buffers_[*index]);
|
||||
}
|
||||
}
|
||||
|
||||
void VCMJitterBuffer::UpdateHistograms() {
|
||||
if (num_packets_ > 0) {
|
||||
RTC_HISTOGRAM_PERCENTAGE("WebRTC.Video.DiscardedPacketsInPercent",
|
||||
@ -262,7 +187,6 @@ void VCMJitterBuffer::Start() {
|
||||
time_last_incoming_frame_count_ = clock_->TimeInMilliseconds();
|
||||
receive_statistics_.clear();
|
||||
|
||||
num_consecutive_old_frames_ = 0;
|
||||
num_consecutive_old_packets_ = 0;
|
||||
num_packets_ = 0;
|
||||
num_duplicated_packets_ = 0;
|
||||
@ -270,7 +194,6 @@ void VCMJitterBuffer::Start() {
|
||||
|
||||
// Start in a non-signaled state.
|
||||
frame_event_->Reset();
|
||||
packet_event_->Reset();
|
||||
waiting_for_completion_.frame_size = 0;
|
||||
waiting_for_completion_.timestamp = 0;
|
||||
waiting_for_completion_.latest_packet_time = -1;
|
||||
@ -284,20 +207,24 @@ void VCMJitterBuffer::Stop() {
|
||||
UpdateHistograms();
|
||||
running_ = false;
|
||||
last_decoded_state_.Reset();
|
||||
free_frames_.clear();
|
||||
// Make sure all frames are free and reset.
|
||||
for (FrameList::iterator it = decodable_frames_.begin();
|
||||
it != decodable_frames_.end(); ++it) {
|
||||
free_frames_.push_back(it->second);
|
||||
}
|
||||
for (FrameList::iterator it = incomplete_frames_.begin();
|
||||
it != incomplete_frames_.end(); ++it) {
|
||||
free_frames_.push_back(it->second);
|
||||
}
|
||||
for (UnorderedFrameList::iterator it = free_frames_.begin();
|
||||
it != free_frames_.end(); ++it) {
|
||||
(*it)->Reset();
|
||||
}
|
||||
decodable_frames_.clear();
|
||||
incomplete_frames_.clear();
|
||||
// Make sure all frames are reset and free.
|
||||
for (int i = 0; i < kMaxNumberOfFrames; i++) {
|
||||
if (frame_buffers_[i] != NULL) {
|
||||
static_cast<VCMFrameBuffer*>(frame_buffers_[i])->Reset();
|
||||
free_frames_.push_back(frame_buffers_[i]);
|
||||
}
|
||||
}
|
||||
crit_sect_->Leave();
|
||||
// Make sure we wake up any threads waiting on these events.
|
||||
frame_event_->Set();
|
||||
packet_event_->Set();
|
||||
}
|
||||
|
||||
bool VCMJitterBuffer::Running() const {
|
||||
@ -311,8 +238,6 @@ void VCMJitterBuffer::Flush() {
|
||||
incomplete_frames_.Reset(&free_frames_);
|
||||
last_decoded_state_.Reset(); // TODO(mikhal): sync reset.
|
||||
frame_event_->Reset();
|
||||
packet_event_->Reset();
|
||||
num_consecutive_old_frames_ = 0;
|
||||
num_consecutive_old_packets_ = 0;
|
||||
// Also reset the jitter and delay estimates
|
||||
jitter_estimate_.Reset();
|
||||
@ -569,53 +494,35 @@ void VCMJitterBuffer::ReleaseFrame(VCMEncodedFrame* frame) {
|
||||
|
||||
// Gets frame to use for this timestamp. If no match, get empty frame.
|
||||
VCMFrameBufferEnum VCMJitterBuffer::GetFrame(const VCMPacket& packet,
|
||||
VCMFrameBuffer** frame) {
|
||||
++num_packets_;
|
||||
// Does this packet belong to an old frame?
|
||||
if (last_decoded_state_.IsOldPacket(&packet)) {
|
||||
// Account only for media packets.
|
||||
if (packet.sizeBytes > 0) {
|
||||
num_discarded_packets_++;
|
||||
num_consecutive_old_packets_++;
|
||||
}
|
||||
// Update last decoded sequence number if the packet arrived late and
|
||||
// belongs to a frame with a timestamp equal to the last decoded
|
||||
// timestamp.
|
||||
last_decoded_state_.UpdateOldPacket(&packet);
|
||||
DropPacketsFromNackList(last_decoded_state_.sequence_num());
|
||||
|
||||
if (num_consecutive_old_packets_ > kMaxConsecutiveOldPackets) {
|
||||
LOG(LS_WARNING) << num_consecutive_old_packets_ << " consecutive old "
|
||||
"packets received. Flushing the jitter buffer.";
|
||||
Flush();
|
||||
return kFlushIndicator;
|
||||
}
|
||||
return kOldPacket;
|
||||
VCMFrameBuffer** frame,
|
||||
FrameList** frame_list) {
|
||||
*frame = incomplete_frames_.PopFrame(packet.timestamp);
|
||||
if (*frame != NULL) {
|
||||
*frame_list = &incomplete_frames_;
|
||||
return kNoError;
|
||||
}
|
||||
num_consecutive_old_packets_ = 0;
|
||||
|
||||
*frame = incomplete_frames_.FindFrame(packet.timestamp);
|
||||
if (*frame)
|
||||
return kNoError;
|
||||
*frame = decodable_frames_.FindFrame(packet.timestamp);
|
||||
if (*frame)
|
||||
*frame = decodable_frames_.PopFrame(packet.timestamp);
|
||||
if (*frame != NULL) {
|
||||
*frame_list = &decodable_frames_;
|
||||
return kNoError;
|
||||
}
|
||||
|
||||
*frame_list = NULL;
|
||||
// No match, return empty frame.
|
||||
*frame = GetEmptyFrame();
|
||||
VCMFrameBufferEnum ret = kNoError;
|
||||
if (!*frame) {
|
||||
if (*frame == NULL) {
|
||||
// No free frame! Try to reclaim some...
|
||||
LOG(LS_WARNING) << "Unable to get empty frame; Recycling.";
|
||||
bool found_key_frame = RecycleFramesUntilKeyFrame();
|
||||
*frame = GetEmptyFrame();
|
||||
assert(*frame);
|
||||
if (!found_key_frame) {
|
||||
ret = kFlushIndicator;
|
||||
free_frames_.push_back(*frame);
|
||||
return kFlushIndicator;
|
||||
}
|
||||
}
|
||||
(*frame)->Reset();
|
||||
return ret;
|
||||
return kNoError;
|
||||
}
|
||||
|
||||
int64_t VCMJitterBuffer::LastPacketTime(const VCMEncodedFrame* frame,
|
||||
@ -632,11 +539,38 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
|
||||
bool* retransmitted) {
|
||||
CriticalSectionScoped cs(crit_sect_);
|
||||
|
||||
VCMFrameBuffer* frame = NULL;
|
||||
const VCMFrameBufferEnum error = GetFrame(packet, &frame);
|
||||
if (error != kNoError && frame == NULL) {
|
||||
return error;
|
||||
++num_packets_;
|
||||
// Does this packet belong to an old frame?
|
||||
if (last_decoded_state_.IsOldPacket(&packet)) {
|
||||
// Account only for media packets.
|
||||
if (packet.sizeBytes > 0) {
|
||||
num_discarded_packets_++;
|
||||
num_consecutive_old_packets_++;
|
||||
}
|
||||
// Update last decoded sequence number if the packet arrived late and
|
||||
// belongs to a frame with a timestamp equal to the last decoded
|
||||
// timestamp.
|
||||
last_decoded_state_.UpdateOldPacket(&packet);
|
||||
DropPacketsFromNackList(last_decoded_state_.sequence_num());
|
||||
|
||||
if (num_consecutive_old_packets_ > kMaxConsecutiveOldPackets) {
|
||||
LOG(LS_WARNING)
|
||||
<< num_consecutive_old_packets_
|
||||
<< " consecutive old packets received. Flushing the jitter buffer.";
|
||||
Flush();
|
||||
return kFlushIndicator;
|
||||
}
|
||||
return kOldPacket;
|
||||
}
|
||||
|
||||
num_consecutive_old_packets_ = 0;
|
||||
|
||||
VCMFrameBuffer* frame;
|
||||
FrameList* frame_list;
|
||||
const VCMFrameBufferEnum error = GetFrame(packet, &frame, &frame_list);
|
||||
if (error != kNoError)
|
||||
return error;
|
||||
|
||||
int64_t now_ms = clock_->TimeInMilliseconds();
|
||||
// We are keeping track of the first and latest seq numbers, and
|
||||
// the number of wraps to be able to calculate how many packets we expect.
|
||||
@ -645,23 +579,6 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
|
||||
// reset the delay estimate.
|
||||
inter_frame_delay_.Reset(now_ms);
|
||||
}
|
||||
if (last_decoded_state_.IsOldPacket(&packet)) {
|
||||
// This packet belongs to an old, already decoded frame, we want to update
|
||||
// the last decoded sequence number.
|
||||
last_decoded_state_.UpdateOldPacket(&packet);
|
||||
drop_count_++;
|
||||
// Flush if this happens consistently.
|
||||
num_consecutive_old_frames_++;
|
||||
if (num_consecutive_old_frames_ > kMaxConsecutiveOldFrames) {
|
||||
LOG(LS_WARNING) << num_consecutive_old_packets_ << " consecutive old "
|
||||
"frames received. Flushing the jitter buffer.";
|
||||
Flush();
|
||||
return kFlushIndicator;
|
||||
}
|
||||
return kNoError;
|
||||
}
|
||||
|
||||
num_consecutive_old_frames_ = 0;
|
||||
|
||||
// Empty packets may bias the jitter estimate (lacking size component),
|
||||
// therefore don't let empty packet trigger the following updates:
|
||||
@ -683,22 +600,18 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
|
||||
|
||||
VCMFrameBufferStateEnum previous_state = frame->GetState();
|
||||
// Insert packet.
|
||||
// Check for first packet. High sequence number will be -1 if neither an empty
|
||||
// packet nor a media packet has been inserted.
|
||||
bool first = (frame->GetHighSeqNum() == -1);
|
||||
FrameData frame_data;
|
||||
frame_data.rtt_ms = rtt_ms_;
|
||||
frame_data.rolling_average_packets_per_frame = average_packets_per_frame_;
|
||||
VCMFrameBufferEnum buffer_return = frame->InsertPacket(packet,
|
||||
now_ms,
|
||||
decode_error_mode_,
|
||||
frame_data);
|
||||
if (!frame->GetCountedFrame()) {
|
||||
VCMFrameBufferEnum buffer_state =
|
||||
frame->InsertPacket(packet, now_ms, decode_error_mode_, frame_data);
|
||||
|
||||
if (previous_state != kStateComplete) {
|
||||
TRACE_EVENT_ASYNC_BEGIN1("webrtc", "Video", frame->TimeStamp(),
|
||||
"timestamp", frame->TimeStamp());
|
||||
}
|
||||
|
||||
if (buffer_return > 0) {
|
||||
if (buffer_state > 0) {
|
||||
incoming_bit_count_ += packet.sizeBytes << 3;
|
||||
if (first_packet_since_reset_) {
|
||||
latest_received_sequence_number_ = packet.seqNum;
|
||||
@ -709,29 +622,27 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
|
||||
}
|
||||
if (!UpdateNackList(packet.seqNum) &&
|
||||
packet.frameType != kVideoFrameKey) {
|
||||
buffer_return = kFlushIndicator;
|
||||
buffer_state = kFlushIndicator;
|
||||
}
|
||||
|
||||
latest_received_sequence_number_ = LatestSequenceNumber(
|
||||
latest_received_sequence_number_, packet.seqNum);
|
||||
}
|
||||
}
|
||||
|
||||
// Is the frame already in the decodable list?
|
||||
bool update_decodable_list = (previous_state != kStateDecodable &&
|
||||
previous_state != kStateComplete);
|
||||
bool continuous = IsContinuous(*frame);
|
||||
switch (buffer_return) {
|
||||
switch (buffer_state) {
|
||||
case kGeneralError:
|
||||
case kTimeStampError:
|
||||
case kSizeError: {
|
||||
// This frame will be cleaned up later from the frame list.
|
||||
frame->Reset();
|
||||
free_frames_.push_back(frame);
|
||||
break;
|
||||
}
|
||||
case kCompleteSession: {
|
||||
if (update_decodable_list) {
|
||||
if (previous_state != kStateDecodable &&
|
||||
previous_state != kStateComplete) {
|
||||
CountFrame(*frame);
|
||||
frame->SetCountedFrame(true);
|
||||
if (continuous) {
|
||||
// Signal that we have a complete session.
|
||||
frame_event_->Set();
|
||||
@ -741,50 +652,42 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
|
||||
// Note: There is no break here - continuing to kDecodableSession.
|
||||
case kDecodableSession: {
|
||||
*retransmitted = (frame->GetNackCount() > 0);
|
||||
// Signal that we have a received packet.
|
||||
packet_event_->Set();
|
||||
if (!update_decodable_list) {
|
||||
break;
|
||||
}
|
||||
if (continuous) {
|
||||
if (!first) {
|
||||
incomplete_frames_.PopFrame(packet.timestamp);
|
||||
}
|
||||
decodable_frames_.InsertFrame(frame);
|
||||
FindAndInsertContinuousFrames(*frame);
|
||||
} else if (first) {
|
||||
} else {
|
||||
incomplete_frames_.InsertFrame(frame);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case kIncomplete: {
|
||||
// No point in storing empty continuous frames.
|
||||
if (frame->GetState() == kStateEmpty &&
|
||||
last_decoded_state_.UpdateEmptyFrame(frame)) {
|
||||
free_frames_.push_back(frame);
|
||||
frame->Reset();
|
||||
frame = NULL;
|
||||
return kNoError;
|
||||
} else if (first) {
|
||||
} else {
|
||||
incomplete_frames_.InsertFrame(frame);
|
||||
}
|
||||
// Signal that we have received a packet.
|
||||
packet_event_->Set();
|
||||
break;
|
||||
}
|
||||
case kNoError:
|
||||
case kOutOfBoundsPacket:
|
||||
case kDuplicatePacket: {
|
||||
// Put back the frame where it came from.
|
||||
if (frame_list != NULL) {
|
||||
frame_list->InsertFrame(frame);
|
||||
} else {
|
||||
free_frames_.push_back(frame);
|
||||
}
|
||||
++num_duplicated_packets_;
|
||||
break;
|
||||
}
|
||||
case kFlushIndicator:
|
||||
free_frames_.push_back(frame);
|
||||
return kFlushIndicator;
|
||||
default: {
|
||||
assert(false && "JitterBuffer::InsertPacket: Undefined value");
|
||||
}
|
||||
default: assert(false);
|
||||
}
|
||||
return buffer_return;
|
||||
return buffer_state;
|
||||
}
|
||||
|
||||
bool VCMJitterBuffer::IsContinuousInState(const VCMFrameBuffer& frame,
|
||||
@ -795,13 +698,9 @@ bool VCMJitterBuffer::IsContinuousInState(const VCMFrameBuffer& frame,
|
||||
// kStateDecodable will never be set when decode_error_mode_ is false
|
||||
// as SessionInfo determines this state based on the error mode (and frame
|
||||
// completeness).
|
||||
if ((frame.GetState() == kStateComplete ||
|
||||
frame.GetState() == kStateDecodable) &&
|
||||
decoding_state.ContinuousFrame(&frame)) {
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
return (frame.GetState() == kStateComplete ||
|
||||
frame.GetState() == kStateDecodable) &&
|
||||
decoding_state.ContinuousFrame(&frame);
|
||||
}
|
||||
|
||||
bool VCMJitterBuffer::IsContinuous(const VCMFrameBuffer& frame) const {
|
||||
@ -944,7 +843,7 @@ uint16_t* VCMJitterBuffer::GetNackList(uint16_t* nack_list_size,
|
||||
return NULL;
|
||||
}
|
||||
if (last_decoded_state_.in_initial_state()) {
|
||||
VCMFrameBuffer* next_frame = NextFrame();
|
||||
VCMFrameBuffer* next_frame = NextFrame();
|
||||
const bool first_frame_is_key = next_frame &&
|
||||
next_frame->FrameType() == kVideoFrameKey &&
|
||||
next_frame->HaveFirstPacket();
|
||||
@ -1131,9 +1030,7 @@ VCMFrameBuffer* VCMJitterBuffer::GetEmptyFrame() {
|
||||
bool VCMJitterBuffer::TryToIncreaseJitterBufferSize() {
|
||||
if (max_number_of_frames_ >= kMaxNumberOfFrames)
|
||||
return false;
|
||||
VCMFrameBuffer* new_frame = new VCMFrameBuffer();
|
||||
frame_buffers_[max_number_of_frames_] = new_frame;
|
||||
free_frames_.push_back(new_frame);
|
||||
free_frames_.push_back(new VCMFrameBuffer());
|
||||
++max_number_of_frames_;
|
||||
TRACE_COUNTER1("webrtc", "JBMaxFrames", max_number_of_frames_);
|
||||
return true;
|
||||
@ -1155,7 +1052,6 @@ bool VCMJitterBuffer::RecycleFramesUntilKeyFrame() {
|
||||
&key_frame_it, &free_frames_);
|
||||
key_frame_found = key_frame_it != decodable_frames_.end();
|
||||
}
|
||||
drop_count_ += dropped_frames;
|
||||
TRACE_EVENT_INSTANT0("webrtc", "JB::RecycleFramesUntilKeyFrame");
|
||||
if (key_frame_found) {
|
||||
LOG(LS_INFO) << "Found key frame while dropping frames.";
|
||||
@ -1174,10 +1070,7 @@ bool VCMJitterBuffer::RecycleFramesUntilKeyFrame() {
|
||||
|
||||
// Must be called under the critical section |crit_sect_|.
|
||||
void VCMJitterBuffer::CountFrame(const VCMFrameBuffer& frame) {
|
||||
if (!frame.GetCountedFrame()) {
|
||||
// Ignore ACK frames.
|
||||
incoming_frame_count_++;
|
||||
}
|
||||
incoming_frame_count_++;
|
||||
|
||||
if (frame.FrameType() == kVideoFrameKey) {
|
||||
TRACE_EVENT_ASYNC_STEP0("webrtc", "Video",
|
||||
@ -1212,12 +1105,10 @@ void VCMJitterBuffer::UpdateAveragePacketsPerFrame(int current_number_packets) {
|
||||
|
||||
// Must be called under the critical section |crit_sect_|.
|
||||
void VCMJitterBuffer::CleanUpOldOrEmptyFrames() {
|
||||
drop_count_ +=
|
||||
decodable_frames_.CleanUpOldOrEmptyFrames(&last_decoded_state_,
|
||||
&free_frames_);
|
||||
drop_count_ +=
|
||||
incomplete_frames_.CleanUpOldOrEmptyFrames(&last_decoded_state_,
|
||||
&free_frames_);
|
||||
decodable_frames_.CleanUpOldOrEmptyFrames(&last_decoded_state_,
|
||||
&free_frames_);
|
||||
incomplete_frames_.CleanUpOldOrEmptyFrames(&last_decoded_state_,
|
||||
&free_frames_);
|
||||
if (!last_decoded_state_.in_initial_state()) {
|
||||
DropPacketsFromNackList(last_decoded_state_.sequence_num());
|
||||
}
|
||||
|
@ -63,14 +63,13 @@ class FrameList
|
||||
: public std::map<uint32_t, VCMFrameBuffer*, TimestampLessThan> {
|
||||
public:
|
||||
void InsertFrame(VCMFrameBuffer* frame);
|
||||
VCMFrameBuffer* FindFrame(uint32_t timestamp) const;
|
||||
VCMFrameBuffer* PopFrame(uint32_t timestamp);
|
||||
VCMFrameBuffer* Front() const;
|
||||
VCMFrameBuffer* Back() const;
|
||||
int RecycleFramesUntilKeyFrame(FrameList::iterator* key_frame_it,
|
||||
UnorderedFrameList* free_frames);
|
||||
int CleanUpOldOrEmptyFrames(VCMDecodingState* decoding_state,
|
||||
UnorderedFrameList* free_frames);
|
||||
void CleanUpOldOrEmptyFrames(VCMDecodingState* decoding_state,
|
||||
UnorderedFrameList* free_frames);
|
||||
void Reset(UnorderedFrameList* free_frames);
|
||||
};
|
||||
|
||||
@ -80,9 +79,6 @@ class VCMJitterBuffer {
|
||||
EventFactory* event_factory);
|
||||
virtual ~VCMJitterBuffer();
|
||||
|
||||
// Makes |this| a deep copy of |rhs|.
|
||||
void CopyFrom(const VCMJitterBuffer& rhs);
|
||||
|
||||
// Initializes and starts jitter buffer.
|
||||
void Start();
|
||||
|
||||
@ -199,35 +195,43 @@ class VCMJitterBuffer {
|
||||
|
||||
// Gets the frame assigned to the timestamp of the packet. May recycle
|
||||
// existing frames if no free frames are available. Returns an error code if
|
||||
// failing, or kNoError on success.
|
||||
VCMFrameBufferEnum GetFrame(const VCMPacket& packet, VCMFrameBuffer** frame);
|
||||
void CopyFrames(FrameList* to_list, const FrameList& from_list);
|
||||
void CopyFrames(FrameList* to_list, const FrameList& from_list, int* index);
|
||||
// failing, or kNoError on success. |frame_list| contains which list the
|
||||
// packet was in, or NULL if it was not in a FrameList (a new frame).
|
||||
VCMFrameBufferEnum GetFrame(const VCMPacket& packet,
|
||||
VCMFrameBuffer** frame,
|
||||
FrameList** frame_list)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
|
||||
// Returns true if |frame| is continuous in |decoding_state|, not taking
|
||||
// decodable frames into account.
|
||||
bool IsContinuousInState(const VCMFrameBuffer& frame,
|
||||
const VCMDecodingState& decoding_state) const;
|
||||
const VCMDecodingState& decoding_state) const
|
||||
EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
|
||||
// Returns true if |frame| is continuous in the |last_decoded_state_|, taking
|
||||
// all decodable frames into account.
|
||||
bool IsContinuous(const VCMFrameBuffer& frame) const;
|
||||
bool IsContinuous(const VCMFrameBuffer& frame) const
|
||||
EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
|
||||
// Looks for frames in |incomplete_frames_| which are continuous in
|
||||
// |last_decoded_state_| taking all decodable frames into account. Starts
|
||||
// the search from |new_frame|.
|
||||
void FindAndInsertContinuousFrames(const VCMFrameBuffer& new_frame);
|
||||
VCMFrameBuffer* NextFrame() const;
|
||||
void FindAndInsertContinuousFrames(const VCMFrameBuffer& new_frame)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
|
||||
VCMFrameBuffer* NextFrame() const EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
|
||||
// Returns true if the NACK list was updated to cover sequence numbers up to
|
||||
// |sequence_number|. If false a key frame is needed to get into a state where
|
||||
// we can continue decoding.
|
||||
bool UpdateNackList(uint16_t sequence_number);
|
||||
bool UpdateNackList(uint16_t sequence_number)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
|
||||
bool TooLargeNackList() const;
|
||||
// Returns true if the NACK list was reduced without problem. If false a key
|
||||
// frame is needed to get into a state where we can continue decoding.
|
||||
bool HandleTooLargeNackList();
|
||||
bool MissingTooOldPacket(uint16_t latest_sequence_number) const;
|
||||
bool HandleTooLargeNackList() EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
|
||||
bool MissingTooOldPacket(uint16_t latest_sequence_number) const
|
||||
EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
|
||||
// Returns true if the too old packets was successfully removed from the NACK
|
||||
// list. If false, a key frame is needed to get into a state where we can
|
||||
// continue decoding.
|
||||
bool HandleTooOldPackets(uint16_t latest_sequence_number);
|
||||
bool HandleTooOldPackets(uint16_t latest_sequence_number)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
|
||||
// Drops all packets in the NACK list up until |last_decoded_sequence_number|.
|
||||
void DropPacketsFromNackList(uint16_t last_decoded_sequence_number);
|
||||
|
||||
@ -235,15 +239,15 @@ class VCMJitterBuffer {
|
||||
|
||||
// Gets an empty frame, creating a new frame if necessary (i.e. increases
|
||||
// jitter buffer size).
|
||||
VCMFrameBuffer* GetEmptyFrame();
|
||||
VCMFrameBuffer* GetEmptyFrame() EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
|
||||
|
||||
// Attempts to increase the size of the jitter buffer. Returns true on
|
||||
// success, false otherwise.
|
||||
bool TryToIncreaseJitterBufferSize();
|
||||
bool TryToIncreaseJitterBufferSize() EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
|
||||
|
||||
// Recycles oldest frames until a key frame is found. Used if jitter buffer is
|
||||
// completely full. Returns true if a key frame was found.
|
||||
bool RecycleFramesUntilKeyFrame();
|
||||
bool RecycleFramesUntilKeyFrame() EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
|
||||
|
||||
// Updates the frame statistics.
|
||||
// Counts only complete frames, so decodable incomplete frames will not be
|
||||
@ -255,7 +259,7 @@ class VCMJitterBuffer {
|
||||
|
||||
// Cleans the frame list in the JB from old/empty frames.
|
||||
// Should only be called prior to actual use.
|
||||
void CleanUpOldOrEmptyFrames();
|
||||
void CleanUpOldOrEmptyFrames() EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
|
||||
|
||||
// Returns true if |packet| is likely to have been retransmitted.
|
||||
bool IsPacketRetransmitted(const VCMPacket& packet) const;
|
||||
@ -273,7 +277,7 @@ class VCMJitterBuffer {
|
||||
// Returns true if we should wait for retransmissions, false otherwise.
|
||||
bool WaitForRetransmissions();
|
||||
|
||||
int NonContinuousOrIncompleteDuration();
|
||||
int NonContinuousOrIncompleteDuration() EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
|
||||
|
||||
uint16_t EstimatedLowSequenceNumber(const VCMFrameBuffer& frame) const;
|
||||
|
||||
@ -285,16 +289,12 @@ class VCMJitterBuffer {
|
||||
CriticalSectionWrapper* crit_sect_;
|
||||
// Event to signal when we have a frame ready for decoder.
|
||||
scoped_ptr<EventWrapper> frame_event_;
|
||||
// Event to signal when we have received a packet.
|
||||
scoped_ptr<EventWrapper> packet_event_;
|
||||
// Number of allocated frames.
|
||||
int max_number_of_frames_;
|
||||
// Array of pointers to the frames in jitter buffer.
|
||||
VCMFrameBuffer* frame_buffers_[kMaxNumberOfFrames];
|
||||
UnorderedFrameList free_frames_;
|
||||
FrameList decodable_frames_;
|
||||
FrameList incomplete_frames_;
|
||||
VCMDecodingState last_decoded_state_;
|
||||
UnorderedFrameList free_frames_ GUARDED_BY(crit_sect_);
|
||||
FrameList decodable_frames_ GUARDED_BY(crit_sect_);
|
||||
FrameList incomplete_frames_ GUARDED_BY(crit_sect_);
|
||||
VCMDecodingState last_decoded_state_ GUARDED_BY(crit_sect_);
|
||||
bool first_packet_since_reset_;
|
||||
|
||||
// Statistics.
|
||||
@ -306,7 +306,6 @@ class VCMJitterBuffer {
|
||||
int64_t time_last_incoming_frame_count_;
|
||||
unsigned int incoming_bit_count_;
|
||||
unsigned int incoming_bit_rate_;
|
||||
unsigned int drop_count_; // Frame drop counter.
|
||||
// Number of frames in a row that have been too old.
|
||||
int num_consecutive_old_frames_;
|
||||
// Number of packets in a row that have been too old.
|
||||
|
@ -269,9 +269,10 @@ TEST_F(TestBasicJitterBuffer, SinglePacketFrame) {
|
||||
bool retransmitted = false;
|
||||
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
|
||||
&retransmitted));
|
||||
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
||||
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
||||
CheckOutFrame(frame_out, size_, false);
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
}
|
||||
|
||||
TEST_F(TestBasicJitterBuffer, DualPacketFrame) {
|
||||
@ -298,6 +299,7 @@ TEST_F(TestBasicJitterBuffer, DualPacketFrame) {
|
||||
CheckOutFrame(frame_out, 2 * size_, false);
|
||||
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
}
|
||||
|
||||
TEST_F(TestBasicJitterBuffer, 100PacketKeyFrame) {
|
||||
@ -340,6 +342,7 @@ TEST_F(TestBasicJitterBuffer, 100PacketKeyFrame) {
|
||||
|
||||
CheckOutFrame(frame_out, 100 * size_, false);
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
}
|
||||
|
||||
TEST_F(TestBasicJitterBuffer, 100PacketDeltaFrame) {
|
||||
@ -353,6 +356,7 @@ TEST_F(TestBasicJitterBuffer, 100PacketDeltaFrame) {
|
||||
&retransmitted));
|
||||
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
||||
EXPECT_FALSE(frame_out == NULL);
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
|
||||
++seq_num_;
|
||||
packet_->seqNum = seq_num_;
|
||||
@ -394,6 +398,7 @@ TEST_F(TestBasicJitterBuffer, 100PacketDeltaFrame) {
|
||||
|
||||
CheckOutFrame(frame_out, 100 * size_, false);
|
||||
EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
}
|
||||
|
||||
TEST_F(TestBasicJitterBuffer, PacketReorderingReverseOrder) {
|
||||
@ -440,6 +445,7 @@ TEST_F(TestBasicJitterBuffer, PacketReorderingReverseOrder) {
|
||||
CheckOutFrame(frame_out, 100 * size_, false);
|
||||
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
}
|
||||
|
||||
TEST_F(TestBasicJitterBuffer, FrameReordering2Frames2PacketsEach) {
|
||||
@ -492,18 +498,14 @@ TEST_F(TestBasicJitterBuffer, FrameReordering2Frames2PacketsEach) {
|
||||
&retransmitted));
|
||||
|
||||
frame_out = DecodeCompleteFrame();
|
||||
|
||||
CheckOutFrame(frame_out, 2 * size_, false);
|
||||
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
|
||||
frame_out = DecodeCompleteFrame();
|
||||
|
||||
CheckOutFrame(frame_out, 2 * size_, false);
|
||||
|
||||
EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
}
|
||||
|
||||
TEST_F(TestBasicJitterBuffer, DuplicatePackets) {
|
||||
@ -525,9 +527,6 @@ TEST_F(TestBasicJitterBuffer, DuplicatePackets) {
|
||||
EXPECT_EQ(1, jitter_buffer_->num_packets());
|
||||
EXPECT_EQ(0, jitter_buffer_->num_duplicated_packets());
|
||||
|
||||
packet_->isFirstPacket = false;
|
||||
packet_->markerBit = true;
|
||||
|
||||
// Insert a packet into a frame.
|
||||
EXPECT_EQ(kDuplicatePacket, jitter_buffer_->InsertPacket(*packet_,
|
||||
&retransmitted));
|
||||
@ -536,17 +535,72 @@ TEST_F(TestBasicJitterBuffer, DuplicatePackets) {
|
||||
|
||||
seq_num_++;
|
||||
packet_->seqNum = seq_num_;
|
||||
packet_->markerBit = true;
|
||||
packet_->isFirstPacket = false;
|
||||
|
||||
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
|
||||
&retransmitted));
|
||||
|
||||
frame_out = DecodeCompleteFrame();
|
||||
|
||||
ASSERT_TRUE(frame_out != NULL);
|
||||
CheckOutFrame(frame_out, 2 * size_, false);
|
||||
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
EXPECT_EQ(3, jitter_buffer_->num_packets());
|
||||
EXPECT_EQ(1, jitter_buffer_->num_duplicated_packets());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
}
|
||||
|
||||
TEST_F(TestBasicJitterBuffer, DuplicatePreviousDeltaFramePacket) {
|
||||
packet_->frameType = kVideoFrameKey;
|
||||
packet_->isFirstPacket = true;
|
||||
packet_->markerBit = true;
|
||||
packet_->seqNum = seq_num_;
|
||||
packet_->timestamp = timestamp_;
|
||||
jitter_buffer_->SetDecodeErrorMode(kNoErrors);
|
||||
EXPECT_EQ(0, jitter_buffer_->num_packets());
|
||||
EXPECT_EQ(0, jitter_buffer_->num_duplicated_packets());
|
||||
|
||||
bool retransmitted = false;
|
||||
// Insert first complete frame.
|
||||
EXPECT_EQ(kCompleteSession,
|
||||
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
|
||||
|
||||
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
||||
ASSERT_TRUE(frame_out != NULL);
|
||||
CheckOutFrame(frame_out, size_, false);
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
|
||||
// Insert 3 delta frames.
|
||||
for (uint16_t i = 1; i <= 3; ++i) {
|
||||
packet_->seqNum = seq_num_ + i;
|
||||
packet_->timestamp = timestamp_ + (i * 33) * 90;
|
||||
packet_->frameType = kVideoFrameDelta;
|
||||
EXPECT_EQ(kCompleteSession,
|
||||
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
|
||||
EXPECT_EQ(i + 1, jitter_buffer_->num_packets());
|
||||
EXPECT_EQ(0, jitter_buffer_->num_duplicated_packets());
|
||||
}
|
||||
|
||||
// Retransmit second delta frame.
|
||||
packet_->seqNum = seq_num_ + 2;
|
||||
packet_->timestamp = timestamp_ + 66 * 90;
|
||||
|
||||
EXPECT_EQ(kDuplicatePacket,
|
||||
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
|
||||
|
||||
EXPECT_EQ(5, jitter_buffer_->num_packets());
|
||||
EXPECT_EQ(1, jitter_buffer_->num_duplicated_packets());
|
||||
|
||||
// Should be able to decode 3 delta frames, key frame already decoded.
|
||||
for (size_t i = 0; i < 3; ++i) {
|
||||
frame_out = DecodeCompleteFrame();
|
||||
ASSERT_TRUE(frame_out != NULL);
|
||||
CheckOutFrame(frame_out, size_, false);
|
||||
EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(TestBasicJitterBuffer, H264InsertStartCode) {
|
||||
@ -577,6 +631,7 @@ TEST_F(TestBasicJitterBuffer, H264InsertStartCode) {
|
||||
frame_out = DecodeCompleteFrame();
|
||||
CheckOutFrame(frame_out, size_ * 2 + 4 * 2, true);
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
}
|
||||
|
||||
// Test threshold conditions of decodable state.
|
||||
@ -615,6 +670,7 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsThresholdCheck) {
|
||||
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
||||
CheckOutFrame(frame_out, 10 * size_, false);
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
|
||||
// An incomplete frame can only be decoded once a subsequent frame has begun
|
||||
// to arrive. Insert packet in distant frame for this purpose.
|
||||
@ -657,6 +713,7 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsThresholdCheck) {
|
||||
ASSERT_FALSE(NULL == frame_out);
|
||||
CheckOutFrame(frame_out, 9 * size_, false);
|
||||
EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
|
||||
packet_->markerBit = true;
|
||||
packet_->seqNum++;
|
||||
@ -680,6 +737,7 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsIncompleteKey) {
|
||||
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
||||
CheckOutFrame(frame_out, size_, false);
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
|
||||
// An incomplete frame can only be decoded once a subsequent frame has begun
|
||||
// to arrive. Insert packet in distant frame for this purpose.
|
||||
@ -724,6 +782,7 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsIncompleteKey) {
|
||||
frame_out = DecodeCompleteFrame();
|
||||
CheckOutFrame(frame_out, 6 * size_, false);
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
}
|
||||
|
||||
// Make sure first packet is present before a frame can be decoded.
|
||||
@ -742,6 +801,7 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsMissingFirstPacket) {
|
||||
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
||||
CheckOutFrame(frame_out, size_, false);
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
|
||||
// An incomplete frame can only be decoded once a subsequent frame has begun
|
||||
// to arrive. Insert packet in distant frame for this purpose.
|
||||
@ -785,6 +845,7 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsMissingFirstPacket) {
|
||||
frame_out = DecodeIncompleteFrame();
|
||||
CheckOutFrame(frame_out, 7 * size_, false);
|
||||
EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
}
|
||||
|
||||
TEST_F(TestBasicJitterBuffer, DiscontinuousStreamWhenDecodingWithErrors) {
|
||||
@ -980,6 +1041,7 @@ TEST_F(TestBasicJitterBuffer, DeltaFrame100PacketsWithSeqNumWrap) {
|
||||
CheckOutFrame(frame_out, 100 * size_, false);
|
||||
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
}
|
||||
|
||||
TEST_F(TestBasicJitterBuffer, PacketReorderingReverseWithNegSeqNumWrap) {
|
||||
@ -1028,6 +1090,7 @@ TEST_F(TestBasicJitterBuffer, PacketReorderingReverseWithNegSeqNumWrap) {
|
||||
frame_out = DecodeCompleteFrame();
|
||||
CheckOutFrame(frame_out, 100 * size_, false);
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
}
|
||||
|
||||
TEST_F(TestBasicJitterBuffer, TestInsertOldFrame) {
|
||||
@ -1049,11 +1112,8 @@ TEST_F(TestBasicJitterBuffer, TestInsertOldFrame) {
|
||||
|
||||
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
||||
EXPECT_EQ(3000u, frame_out->TimeStamp());
|
||||
|
||||
CheckOutFrame(frame_out, size_, false);
|
||||
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
|
||||
seq_num_--;
|
||||
@ -1127,7 +1187,6 @@ TEST_F(TestBasicJitterBuffer, TimestampWrap) {
|
||||
&retransmitted));
|
||||
|
||||
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
||||
|
||||
EXPECT_TRUE(frame_out == NULL);
|
||||
|
||||
seq_num_++;
|
||||
@ -1139,9 +1198,7 @@ TEST_F(TestBasicJitterBuffer, TimestampWrap) {
|
||||
&retransmitted));
|
||||
|
||||
frame_out = DecodeCompleteFrame();
|
||||
|
||||
CheckOutFrame(frame_out, 2 * size_, false);
|
||||
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
|
||||
seq_num_++;
|
||||
@ -1156,7 +1213,6 @@ TEST_F(TestBasicJitterBuffer, TimestampWrap) {
|
||||
&retransmitted));
|
||||
|
||||
frame_out = DecodeCompleteFrame();
|
||||
|
||||
EXPECT_TRUE(frame_out == NULL);
|
||||
|
||||
seq_num_++;
|
||||
@ -1168,10 +1224,9 @@ TEST_F(TestBasicJitterBuffer, TimestampWrap) {
|
||||
&retransmitted));
|
||||
|
||||
frame_out = DecodeCompleteFrame();
|
||||
|
||||
CheckOutFrame(frame_out, 2 * size_, false);
|
||||
|
||||
EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
}
|
||||
|
||||
TEST_F(TestBasicJitterBuffer, 2FrameWithTimestampWrap) {
|
||||
@ -1205,17 +1260,15 @@ TEST_F(TestBasicJitterBuffer, 2FrameWithTimestampWrap) {
|
||||
|
||||
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
||||
EXPECT_EQ(0xffffff00, frame_out->TimeStamp());
|
||||
|
||||
CheckOutFrame(frame_out, size_, false);
|
||||
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
|
||||
VCMEncodedFrame* frame_out2 = DecodeCompleteFrame();
|
||||
EXPECT_EQ(2700u, frame_out2->TimeStamp());
|
||||
|
||||
CheckOutFrame(frame_out2, size_, false);
|
||||
|
||||
EXPECT_EQ(kVideoFrameDelta, frame_out2->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out2);
|
||||
}
|
||||
|
||||
TEST_F(TestBasicJitterBuffer, Insert2FramesReOrderedWithTimestampWrap) {
|
||||
@ -1250,17 +1303,15 @@ TEST_F(TestBasicJitterBuffer, Insert2FramesReOrderedWithTimestampWrap) {
|
||||
|
||||
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
||||
EXPECT_EQ(0xffffff00, frame_out->TimeStamp());
|
||||
|
||||
CheckOutFrame(frame_out, size_, false);
|
||||
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
|
||||
VCMEncodedFrame* frame_out2 = DecodeCompleteFrame();
|
||||
EXPECT_EQ(2700u, frame_out2->TimeStamp());
|
||||
|
||||
CheckOutFrame(frame_out2, size_, false);
|
||||
|
||||
EXPECT_EQ(kVideoFrameDelta, frame_out2->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out2);
|
||||
}
|
||||
|
||||
TEST_F(TestBasicJitterBuffer, DeltaFrameWithMoreThanMaxNumberOfPackets) {
|
||||
@ -1345,15 +1396,14 @@ TEST_F(TestBasicJitterBuffer, ExceedNumOfFrameWithSeqNumWrap) {
|
||||
packet_->timestamp = timestamp_;
|
||||
|
||||
// Now, no free frame - frames will be recycled until first key frame.
|
||||
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
|
||||
&retransmitted));
|
||||
EXPECT_EQ(kFlushIndicator,
|
||||
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
|
||||
|
||||
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
||||
EXPECT_EQ(first_key_frame_timestamp, frame_out->TimeStamp());
|
||||
|
||||
CheckOutFrame(frame_out, size_, false);
|
||||
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
}
|
||||
|
||||
TEST_F(TestBasicJitterBuffer, EmptyLastFrame) {
|
||||
@ -1378,6 +1428,7 @@ TEST_F(TestBasicJitterBuffer, EmptyLastFrame) {
|
||||
// Timestamp should never be the last TS inserted.
|
||||
if (testFrame != NULL) {
|
||||
EXPECT_TRUE(testFrame->TimeStamp() < timestamp_);
|
||||
jitter_buffer_->ReleaseFrame(testFrame);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1530,7 +1581,8 @@ TEST_F(TestBasicJitterBuffer, H264IncompleteNalu) {
|
||||
// Will be sent to the decoder, as a packet belonging to a subsequent frame
|
||||
// has arrived.
|
||||
frame_out = DecodeIncompleteFrame();
|
||||
|
||||
EXPECT_TRUE(frame_out != NULL);
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
|
||||
// Test that a frame can include an empty packet.
|
||||
seq_num_++;
|
||||
@ -1559,6 +1611,7 @@ TEST_F(TestBasicJitterBuffer, H264IncompleteNalu) {
|
||||
frame_out = DecodeCompleteFrame();
|
||||
// Only last NALU is complete
|
||||
CheckOutFrame(frame_out, packet_->sizeBytes, false);
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
}
|
||||
|
||||
TEST_F(TestBasicJitterBuffer, NextFrameWhenIncomplete) {
|
||||
@ -1576,6 +1629,7 @@ TEST_F(TestBasicJitterBuffer, NextFrameWhenIncomplete) {
|
||||
&retransmitted));
|
||||
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
||||
EXPECT_TRUE(frame_out != NULL);
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
|
||||
packet_->seqNum += 2;
|
||||
packet_->timestamp += 33 * 90;
|
||||
@ -1598,8 +1652,8 @@ TEST_F(TestBasicJitterBuffer, NextFrameWhenIncomplete) {
|
||||
&retransmitted));
|
||||
|
||||
frame_out = DecodeIncompleteFrame();
|
||||
|
||||
CheckOutFrame(frame_out, packet_->sizeBytes, false);
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
}
|
||||
|
||||
TEST_F(TestRunningJitterBuffer, Full) {
|
||||
@ -1614,7 +1668,7 @@ TEST_F(TestRunningJitterBuffer, Full) {
|
||||
// This frame will make the jitter buffer recycle frames until a key frame.
|
||||
// Since none is found it will have to wait until the next key frame before
|
||||
// decoding.
|
||||
EXPECT_GE(InsertFrame(kVideoFrameDelta), kNoError);
|
||||
EXPECT_EQ(kFlushIndicator, InsertFrame(kVideoFrameDelta));
|
||||
EXPECT_FALSE(DecodeCompleteFrame());
|
||||
}
|
||||
|
||||
|
@ -31,7 +31,6 @@ VCMReceiver::VCMReceiver(VCMTiming* timing,
|
||||
bool master)
|
||||
: crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
|
||||
clock_(clock),
|
||||
master_(master),
|
||||
jitter_buffer_(clock_, event_factory),
|
||||
timing_(timing),
|
||||
render_wait_event_(event_factory->CreateEvent()),
|
||||
@ -51,19 +50,12 @@ void VCMReceiver::Reset() {
|
||||
jitter_buffer_.Flush();
|
||||
}
|
||||
render_wait_event_->Reset();
|
||||
if (master_) {
|
||||
state_ = kReceiving;
|
||||
} else {
|
||||
state_ = kPassive;
|
||||
}
|
||||
state_ = kReceiving;
|
||||
}
|
||||
|
||||
int32_t VCMReceiver::Initialize() {
|
||||
Reset();
|
||||
CriticalSectionScoped cs(crit_sect_);
|
||||
if (!master_) {
|
||||
SetNackMode(kNoNack, -1, -1);
|
||||
}
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
@ -95,34 +87,20 @@ int32_t VCMReceiver::InsertPacket(const VCMPacket& packet,
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
VCMEncodedFrame* VCMReceiver::FrameForDecoding(
|
||||
uint16_t max_wait_time_ms,
|
||||
int64_t& next_render_time_ms,
|
||||
bool render_timing,
|
||||
VCMReceiver* dual_receiver) {
|
||||
VCMEncodedFrame* VCMReceiver::FrameForDecoding(uint16_t max_wait_time_ms,
|
||||
int64_t& next_render_time_ms,
|
||||
bool render_timing) {
|
||||
const int64_t start_time_ms = clock_->TimeInMilliseconds();
|
||||
uint32_t frame_timestamp = 0;
|
||||
// Exhaust wait time to get a complete frame for decoding.
|
||||
bool found_frame = jitter_buffer_.NextCompleteTimestamp(
|
||||
max_wait_time_ms, &frame_timestamp);
|
||||
|
||||
if (!found_frame) {
|
||||
// Get an incomplete frame when enabled.
|
||||
const bool dual_receiver_enabled_and_passive = (dual_receiver != NULL &&
|
||||
dual_receiver->State() == kPassive &&
|
||||
dual_receiver->NackMode() == kNack);
|
||||
if (dual_receiver_enabled_and_passive &&
|
||||
!jitter_buffer_.CompleteSequenceWithNextFrame()) {
|
||||
// Jitter buffer state might get corrupt with this frame.
|
||||
dual_receiver->CopyJitterBufferStateFromReceiver(*this);
|
||||
}
|
||||
found_frame = jitter_buffer_.NextMaybeIncompleteTimestamp(
|
||||
&frame_timestamp);
|
||||
}
|
||||
if (!found_frame)
|
||||
found_frame = jitter_buffer_.NextMaybeIncompleteTimestamp(&frame_timestamp);
|
||||
|
||||
if (!found_frame) {
|
||||
if (!found_frame)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// We have a frame - Set timing and render timestamp.
|
||||
timing_->SetJitterDelay(jitter_buffer_.EstimatedJitterMs());
|
||||
@ -182,9 +160,6 @@ VCMEncodedFrame* VCMReceiver::FrameForDecoding(
|
||||
frame->SetRenderTime(next_render_time_ms);
|
||||
TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", frame->TimeStamp(),
|
||||
"SetRenderTS", "render_time", next_render_time_ms);
|
||||
if (dual_receiver != NULL) {
|
||||
dual_receiver->UpdateState(*frame);
|
||||
}
|
||||
if (!frame->Complete()) {
|
||||
// Update stats for incomplete frames.
|
||||
bool retransmitted = false;
|
||||
@ -229,9 +204,6 @@ void VCMReceiver::SetNackMode(VCMNackMode nackMode,
|
||||
// Default to always having NACK enabled in hybrid mode.
|
||||
jitter_buffer_.SetNackMode(nackMode, low_rtt_nack_threshold_ms,
|
||||
high_rtt_nack_threshold_ms);
|
||||
if (!master_) {
|
||||
state_ = kPassive; // The dual decoder defaults to passive.
|
||||
}
|
||||
}
|
||||
|
||||
void VCMReceiver::SetNackSettings(size_t max_nack_list_size,
|
||||
@ -263,25 +235,6 @@ VCMNackStatus VCMReceiver::NackList(uint16_t* nack_list,
|
||||
return kNackOk;
|
||||
}
|
||||
|
||||
// Decide whether we should change decoder state. This should be done if the
|
||||
// dual decoder has caught up with the decoder decoding with packet losses.
|
||||
bool VCMReceiver::DualDecoderCaughtUp(VCMEncodedFrame* dual_frame,
|
||||
VCMReceiver& dual_receiver) const {
|
||||
if (dual_frame == NULL) {
|
||||
return false;
|
||||
}
|
||||
if (jitter_buffer_.LastDecodedTimestamp() == dual_frame->TimeStamp()) {
|
||||
dual_receiver.UpdateState(kWaitForPrimaryDecode);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void VCMReceiver::CopyJitterBufferStateFromReceiver(
|
||||
const VCMReceiver& receiver) {
|
||||
jitter_buffer_.CopyFrom(receiver.jitter_buffer_);
|
||||
}
|
||||
|
||||
VCMReceiverState VCMReceiver::State() const {
|
||||
CriticalSectionScoped cs(crit_sect_);
|
||||
return state_;
|
||||
@ -323,29 +276,4 @@ int VCMReceiver::RenderBufferSizeMs() {
|
||||
uint32_t render_end = timing_->RenderTimeMs(timestamp_end, now_ms);
|
||||
return render_end - render_start;
|
||||
}
|
||||
|
||||
void VCMReceiver::UpdateState(VCMReceiverState new_state) {
|
||||
CriticalSectionScoped cs(crit_sect_);
|
||||
assert(!(state_ == kPassive && new_state == kWaitForPrimaryDecode));
|
||||
state_ = new_state;
|
||||
}
|
||||
|
||||
void VCMReceiver::UpdateState(const VCMEncodedFrame& frame) {
|
||||
if (jitter_buffer_.nack_mode() == kNoNack) {
|
||||
// Dual decoder mode has not been enabled.
|
||||
return;
|
||||
}
|
||||
// Update the dual receiver state.
|
||||
if (frame.Complete() && frame.FrameType() == kVideoFrameKey) {
|
||||
UpdateState(kPassive);
|
||||
}
|
||||
if (State() == kWaitForPrimaryDecode &&
|
||||
frame.Complete() && !frame.MissingFrame()) {
|
||||
UpdateState(kPassive);
|
||||
}
|
||||
if (frame.MissingFrame() || !frame.Complete()) {
|
||||
// State was corrupted, enable dual receiver.
|
||||
UpdateState(kReceiving);
|
||||
}
|
||||
}
|
||||
} // namespace webrtc
|
||||
|
@ -50,8 +50,7 @@ class VCMReceiver {
|
||||
uint16_t frame_height);
|
||||
VCMEncodedFrame* FrameForDecoding(uint16_t max_wait_time_ms,
|
||||
int64_t& next_render_time_ms,
|
||||
bool render_timing = true,
|
||||
VCMReceiver* dual_receiver = NULL);
|
||||
bool render_timing = true);
|
||||
void ReleaseFrame(VCMEncodedFrame* frame);
|
||||
void ReceiveStatistics(uint32_t* bitrate, uint32_t* framerate);
|
||||
void ReceivedFrameCount(VCMFrameCount* frame_count) const;
|
||||
@ -67,10 +66,6 @@ class VCMReceiver {
|
||||
VCMNackMode NackMode() const;
|
||||
VCMNackStatus NackList(uint16_t* nackList, uint16_t size,
|
||||
uint16_t* nack_list_length);
|
||||
|
||||
// Dual decoder.
|
||||
bool DualDecoderCaughtUp(VCMEncodedFrame* dual_frame,
|
||||
VCMReceiver& dual_receiver) const;
|
||||
VCMReceiverState State() const;
|
||||
|
||||
// Receiver video delay.
|
||||
@ -86,14 +81,10 @@ class VCMReceiver {
|
||||
int RenderBufferSizeMs();
|
||||
|
||||
private:
|
||||
void CopyJitterBufferStateFromReceiver(const VCMReceiver& receiver);
|
||||
void UpdateState(VCMReceiverState new_state);
|
||||
void UpdateState(const VCMEncodedFrame& frame);
|
||||
static int32_t GenerateReceiverId();
|
||||
|
||||
CriticalSectionWrapper* crit_sect_;
|
||||
Clock* clock_;
|
||||
bool master_;
|
||||
VCMJitterBuffer jitter_buffer_;
|
||||
VCMTiming* timing_;
|
||||
scoped_ptr<EventWrapper> render_wait_event_;
|
||||
|
@ -81,8 +81,8 @@ class TestVCMReceiver : public ::testing::Test {
|
||||
|
||||
bool DecodeNextFrame() {
|
||||
int64_t render_time_ms = 0;
|
||||
VCMEncodedFrame* frame = receiver_.FrameForDecoding(0, render_time_ms,
|
||||
false, NULL);
|
||||
VCMEncodedFrame* frame =
|
||||
receiver_.FrameForDecoding(0, render_time_ms, false);
|
||||
if (!frame)
|
||||
return false;
|
||||
receiver_.ReleaseFrame(frame);
|
||||
|
@ -487,7 +487,7 @@ int VCMSessionInfo::InsertPacket(const VCMPacket& packet,
|
||||
// Store the sequence number for the first packet.
|
||||
first_packet_seq_num_ = static_cast<int>(packet.seqNum);
|
||||
} else if (first_packet_seq_num_ != -1 &&
|
||||
!IsNewerSequenceNumber(packet.seqNum, first_packet_seq_num_)) {
|
||||
IsNewerSequenceNumber(first_packet_seq_num_, packet.seqNum)) {
|
||||
LOG(LS_WARNING) << "Received packet with a sequence number which is out "
|
||||
"of frame boundaries";
|
||||
return -3;
|
||||
|
@ -286,10 +286,6 @@ class VideoCodingModuleImpl : public VideoCodingModule {
|
||||
return receiver_->Decode(maxWaitTimeMs);
|
||||
}
|
||||
|
||||
virtual int32_t DecodeDualFrame(uint16_t maxWaitTimeMs) OVERRIDE {
|
||||
return receiver_->DecodeDualFrame(maxWaitTimeMs);
|
||||
}
|
||||
|
||||
virtual int32_t ResetDecoder() OVERRIDE { return receiver_->ResetDecoder(); }
|
||||
|
||||
virtual int32_t ReceiveCodec(VideoCodec* currentReceiveCodec) const OVERRIDE {
|
||||
|
@ -153,7 +153,6 @@ class VideoReceiver {
|
||||
int RegisterRenderBufferSizeCallback(VCMRenderBufferSizeCallback* callback);
|
||||
|
||||
int32_t Decode(uint16_t maxWaitTimeMs);
|
||||
int32_t DecodeDualFrame(uint16_t maxWaitTimeMs);
|
||||
int32_t ResetDecoder();
|
||||
|
||||
int32_t ReceiveCodec(VideoCodec* currentReceiveCodec) const;
|
||||
@ -206,11 +205,8 @@ class VideoReceiver {
|
||||
CriticalSectionWrapper* _receiveCritSect;
|
||||
bool _receiverInited GUARDED_BY(_receiveCritSect);
|
||||
VCMTiming _timing;
|
||||
VCMTiming _dualTiming;
|
||||
VCMReceiver _receiver;
|
||||
VCMReceiver _dualReceiver;
|
||||
VCMDecodedFrameCallback _decodedFrameCallback;
|
||||
VCMDecodedFrameCallback _dualDecodedFrameCallback;
|
||||
VCMFrameTypeCallback* _frameTypeCallback GUARDED_BY(process_crit_sect_);
|
||||
VCMReceiveStatisticsCallback* _receiveStatsCallback
|
||||
GUARDED_BY(process_crit_sect_);
|
||||
@ -221,7 +217,6 @@ class VideoReceiver {
|
||||
VCMRenderBufferSizeCallback* render_buffer_callback_
|
||||
GUARDED_BY(process_crit_sect_);
|
||||
VCMGenericDecoder* _decoder;
|
||||
VCMGenericDecoder* _dualDecoder;
|
||||
#ifdef DEBUG_DECODER_BIT_STREAM
|
||||
FILE* _bitStreamBeforeDecoder;
|
||||
#endif
|
||||
|
@ -161,100 +161,6 @@ TEST_F(VCMRobustnessTest, TestHardNackNoneDecoded) {
|
||||
ASSERT_EQ(VCM_OK, vcm_->Process());
|
||||
}
|
||||
|
||||
TEST_F(VCMRobustnessTest, TestDualDecoder) {
|
||||
Sequence s1, s2;
|
||||
EXPECT_CALL(request_callback_, ResendPackets(_, 1))
|
||||
.With(Args<0, 1>(ElementsAre(4)))
|
||||
.Times(1);
|
||||
|
||||
EXPECT_CALL(decoder_, Copy())
|
||||
.Times(1)
|
||||
.WillOnce(Return(&decoderCopy_));
|
||||
EXPECT_CALL(decoderCopy_, Copy())
|
||||
.Times(1)
|
||||
.WillOnce(Return(&decoder_));
|
||||
|
||||
// Decode operations
|
||||
EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, 0),
|
||||
Field(&EncodedImage::_completeFrame,
|
||||
true)),
|
||||
false, _, _, _))
|
||||
.Times(1)
|
||||
.InSequence(s1);
|
||||
EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, 3000),
|
||||
Field(&EncodedImage::_completeFrame,
|
||||
false)),
|
||||
false, _, _, _))
|
||||
.Times(1)
|
||||
.InSequence(s1);
|
||||
EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, 6000),
|
||||
Field(&EncodedImage::_completeFrame,
|
||||
true)),
|
||||
false, _, _, _))
|
||||
.Times(1)
|
||||
.InSequence(s1);
|
||||
EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, 9000),
|
||||
Field(&EncodedImage::_completeFrame,
|
||||
true)),
|
||||
false, _, _, _))
|
||||
.Times(1)
|
||||
.InSequence(s1);
|
||||
|
||||
EXPECT_CALL(decoderCopy_, Decode(AllOf(Field(&EncodedImage::_timeStamp, 3000),
|
||||
Field(&EncodedImage::_completeFrame,
|
||||
true)),
|
||||
false, _, _, _))
|
||||
.Times(1)
|
||||
.InSequence(s2);
|
||||
EXPECT_CALL(decoderCopy_, Decode(AllOf(Field(&EncodedImage::_timeStamp, 6000),
|
||||
Field(&EncodedImage::_completeFrame,
|
||||
true)),
|
||||
false, _, _, _))
|
||||
.Times(1)
|
||||
.InSequence(s2);
|
||||
|
||||
|
||||
ASSERT_EQ(VCM_OK, vcm_->SetReceiverRobustnessMode(
|
||||
VideoCodingModule::kDualDecoder, kWithErrors));
|
||||
|
||||
InsertPacket(0, 0, true, false, kVideoFrameKey);
|
||||
InsertPacket(0, 1, false, false, kVideoFrameKey);
|
||||
InsertPacket(0, 2, false, true, kVideoFrameKey);
|
||||
EXPECT_EQ(VCM_OK, vcm_->Decode(0)); // Decode timestamp 0.
|
||||
|
||||
clock_->AdvanceTimeMilliseconds(33);
|
||||
InsertPacket(3000, 3, true, false, kVideoFrameDelta);
|
||||
// Packet 4 missing.
|
||||
InsertPacket(3000, 5, false, true, kVideoFrameDelta);
|
||||
EXPECT_EQ(VCM_FRAME_NOT_READY, vcm_->Decode(0));
|
||||
|
||||
clock_->AdvanceTimeMilliseconds(33);
|
||||
InsertPacket(6000, 6, true, false, kVideoFrameDelta);
|
||||
InsertPacket(6000, 7, false, false, kVideoFrameDelta);
|
||||
InsertPacket(6000, 8, false, true, kVideoFrameDelta);
|
||||
|
||||
EXPECT_EQ(VCM_OK, vcm_->Decode(0)); // Decode timestamp 3000 incomplete.
|
||||
// Spawn a decoder copy.
|
||||
EXPECT_EQ(0, vcm_->DecodeDualFrame(0)); // Expect no dual decoder action.
|
||||
|
||||
clock_->AdvanceTimeMilliseconds(10);
|
||||
EXPECT_EQ(VCM_OK, vcm_->Process()); // Generate NACK list.
|
||||
|
||||
EXPECT_EQ(VCM_OK, vcm_->Decode(0)); // Decode timestamp 6000 complete.
|
||||
EXPECT_EQ(0, vcm_->DecodeDualFrame(0)); // Expect no dual decoder action.
|
||||
|
||||
InsertPacket(3000, 4, false, false, kVideoFrameDelta);
|
||||
EXPECT_EQ(1, vcm_->DecodeDualFrame(0)); // Dual decode of timestamp 3000.
|
||||
EXPECT_EQ(1, vcm_->DecodeDualFrame(0)); // Dual decode of timestamp 6000.
|
||||
EXPECT_EQ(0, vcm_->DecodeDualFrame(0)); // No more frames.
|
||||
|
||||
InsertPacket(9000, 9, true, false, kVideoFrameDelta);
|
||||
InsertPacket(9000, 10, false, false, kVideoFrameDelta);
|
||||
InsertPacket(9000, 11, false, true, kVideoFrameDelta);
|
||||
EXPECT_EQ(VCM_OK, vcm_->Decode(0)); // Decode timestamp 9000 complete.
|
||||
EXPECT_EQ(0, vcm_->DecodeDualFrame(0)); // Expect no dual decoder action.
|
||||
}
|
||||
|
||||
TEST_F(VCMRobustnessTest, TestModeNoneWithErrors) {
|
||||
EXPECT_CALL(decoder_, InitDecode(_, _)).Times(1);
|
||||
EXPECT_CALL(decoder_, Release()).Times(1);
|
||||
|
@ -30,18 +30,14 @@ VideoReceiver::VideoReceiver(Clock* clock, EventFactory* event_factory)
|
||||
_receiveCritSect(CriticalSectionWrapper::CreateCriticalSection()),
|
||||
_receiverInited(false),
|
||||
_timing(clock_),
|
||||
_dualTiming(clock_, &_timing),
|
||||
_receiver(&_timing, clock_, event_factory, true),
|
||||
_dualReceiver(&_dualTiming, clock_, event_factory, false),
|
||||
_decodedFrameCallback(_timing, clock_),
|
||||
_dualDecodedFrameCallback(_dualTiming, clock_),
|
||||
_frameTypeCallback(NULL),
|
||||
_receiveStatsCallback(NULL),
|
||||
_decoderTimingCallback(NULL),
|
||||
_packetRequestCallback(NULL),
|
||||
render_buffer_callback_(NULL),
|
||||
_decoder(NULL),
|
||||
_dualDecoder(NULL),
|
||||
#ifdef DEBUG_DECODER_BIT_STREAM
|
||||
_bitStreamBeforeDecoder(NULL),
|
||||
#endif
|
||||
@ -61,9 +57,6 @@ VideoReceiver::VideoReceiver(Clock* clock, EventFactory* event_factory)
|
||||
}
|
||||
|
||||
VideoReceiver::~VideoReceiver() {
|
||||
if (_dualDecoder != NULL) {
|
||||
_codecDataBase.ReleaseDecoder(_dualDecoder);
|
||||
}
|
||||
delete _receiveCritSect;
|
||||
#ifdef DEBUG_DECODER_BIT_STREAM
|
||||
fclose(_bitStreamBeforeDecoder);
|
||||
@ -163,8 +156,7 @@ int32_t VideoReceiver::Process() {
|
||||
|
||||
int32_t VideoReceiver::TimeUntilNextProcess() {
|
||||
uint32_t timeUntilNextProcess = _receiveStatsTimer.TimeUntilProcess();
|
||||
if ((_receiver.NackMode() != kNoNack) ||
|
||||
(_dualReceiver.State() != kPassive)) {
|
||||
if (_receiver.NackMode() != kNoNack) {
|
||||
// We need a Process call more often if we are relying on
|
||||
// retransmissions
|
||||
timeUntilNextProcess =
|
||||
@ -190,8 +182,6 @@ int32_t VideoReceiver::SetVideoProtection(VCMVideoProtection videoProtection,
|
||||
bool enable) {
|
||||
// By default, do not decode with errors.
|
||||
_receiver.SetDecodeErrorMode(kNoErrors);
|
||||
// The dual decoder should always be error free.
|
||||
_dualReceiver.SetDecodeErrorMode(kNoErrors);
|
||||
switch (videoProtection) {
|
||||
case kProtectionNack:
|
||||
case kProtectionNackReceiver: {
|
||||
@ -205,22 +195,6 @@ int32_t VideoReceiver::SetVideoProtection(VCMVideoProtection videoProtection,
|
||||
break;
|
||||
}
|
||||
|
||||
case kProtectionDualDecoder: {
|
||||
CriticalSectionScoped cs(_receiveCritSect);
|
||||
if (enable) {
|
||||
// Enable NACK but don't wait for retransmissions and don't
|
||||
// add any extra delay.
|
||||
_receiver.SetNackMode(kNack, 0, 0);
|
||||
// Enable NACK and always wait for retransmissions and
|
||||
// compensate with extra delay.
|
||||
_dualReceiver.SetNackMode(kNack, -1, -1);
|
||||
_receiver.SetDecodeErrorMode(kWithErrors);
|
||||
} else {
|
||||
_dualReceiver.SetNackMode(kNoNack, -1, -1);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case kProtectionKeyOnLoss: {
|
||||
CriticalSectionScoped cs(_receiveCritSect);
|
||||
if (enable) {
|
||||
@ -276,11 +250,6 @@ int32_t VideoReceiver::InitializeReceiver() {
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = _dualReceiver.Initialize();
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
{
|
||||
CriticalSectionScoped receive_cs(_receiveCritSect);
|
||||
_codecDataBase.ResetReceiver();
|
||||
@ -381,32 +350,8 @@ int32_t VideoReceiver::Decode(uint16_t maxWaitTimeMs) {
|
||||
supports_render_scheduling = _codecDataBase.SupportsRenderScheduling();
|
||||
}
|
||||
|
||||
const bool dualReceiverEnabledNotReceiving = (
|
||||
_dualReceiver.State() != kReceiving && _dualReceiver.NackMode() == kNack);
|
||||
|
||||
VCMEncodedFrame* frame =
|
||||
_receiver.FrameForDecoding(maxWaitTimeMs,
|
||||
nextRenderTimeMs,
|
||||
supports_render_scheduling,
|
||||
&_dualReceiver);
|
||||
|
||||
if (dualReceiverEnabledNotReceiving && _dualReceiver.State() == kReceiving) {
|
||||
// Dual receiver is enabled (kNACK enabled), but was not receiving
|
||||
// before the call to FrameForDecoding(). After the call the state
|
||||
// changed to receiving, and therefore we must copy the primary decoder
|
||||
// state to the dual decoder to make it possible for the dual decoder to
|
||||
// start decoding retransmitted frames and recover.
|
||||
CriticalSectionScoped cs(_receiveCritSect);
|
||||
if (_dualDecoder != NULL) {
|
||||
_codecDataBase.ReleaseDecoder(_dualDecoder);
|
||||
}
|
||||
_dualDecoder = _codecDataBase.CreateDecoderCopy();
|
||||
if (_dualDecoder != NULL) {
|
||||
_dualDecoder->RegisterDecodeCompleteCallback(&_dualDecodedFrameCallback);
|
||||
} else {
|
||||
_dualReceiver.Reset();
|
||||
}
|
||||
}
|
||||
VCMEncodedFrame* frame = _receiver.FrameForDecoding(
|
||||
maxWaitTimeMs, nextRenderTimeMs, supports_render_scheduling);
|
||||
|
||||
if (frame == NULL) {
|
||||
return VCM_FRAME_NOT_READY;
|
||||
@ -473,45 +418,6 @@ int32_t VideoReceiver::RequestKeyFrame() {
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
int32_t VideoReceiver::DecodeDualFrame(uint16_t maxWaitTimeMs) {
|
||||
CriticalSectionScoped cs(_receiveCritSect);
|
||||
if (_dualReceiver.State() != kReceiving ||
|
||||
_dualReceiver.NackMode() != kNack) {
|
||||
// The dual receiver is currently not receiving or
|
||||
// dual decoder mode is disabled.
|
||||
return VCM_OK;
|
||||
}
|
||||
int64_t dummyRenderTime;
|
||||
int32_t decodeCount = 0;
|
||||
// The dual decoder's state is copied from the main decoder, which may
|
||||
// decode with errors. Make sure that the dual decoder does not introduce
|
||||
// error.
|
||||
_dualReceiver.SetDecodeErrorMode(kNoErrors);
|
||||
VCMEncodedFrame* dualFrame =
|
||||
_dualReceiver.FrameForDecoding(maxWaitTimeMs, dummyRenderTime);
|
||||
if (dualFrame != NULL && _dualDecoder != NULL) {
|
||||
// Decode dualFrame and try to catch up
|
||||
int32_t ret =
|
||||
_dualDecoder->Decode(*dualFrame, clock_->TimeInMilliseconds());
|
||||
if (ret != WEBRTC_VIDEO_CODEC_OK) {
|
||||
LOG(LS_ERROR) << "Failed to decode frame with dual decoder. Error code: "
|
||||
<< ret;
|
||||
_dualReceiver.ReleaseFrame(dualFrame);
|
||||
return VCM_CODEC_ERROR;
|
||||
}
|
||||
if (_receiver.DualDecoderCaughtUp(dualFrame, _dualReceiver)) {
|
||||
// Copy the complete decoder state of the dual decoder
|
||||
// to the primary decoder.
|
||||
_codecDataBase.CopyDecoder(*_dualDecoder);
|
||||
_codecDataBase.ReleaseDecoder(_dualDecoder);
|
||||
_dualDecoder = NULL;
|
||||
}
|
||||
decodeCount++;
|
||||
}
|
||||
_dualReceiver.ReleaseFrame(dualFrame);
|
||||
return decodeCount;
|
||||
}
|
||||
|
||||
// Must be called from inside the receive side critical section.
|
||||
int32_t VideoReceiver::Decode(const VCMEncodedFrame& frame) {
|
||||
TRACE_EVENT_ASYNC_STEP1("webrtc",
|
||||
@ -584,13 +490,6 @@ int32_t VideoReceiver::ResetDecoder() {
|
||||
reset_key_request = true;
|
||||
_decoder->Reset();
|
||||
}
|
||||
if (_dualReceiver.State() != kPassive) {
|
||||
_dualReceiver.Initialize();
|
||||
}
|
||||
if (_dualDecoder != NULL) {
|
||||
_codecDataBase.ReleaseDecoder(_dualDecoder);
|
||||
_dualDecoder = NULL;
|
||||
}
|
||||
}
|
||||
if (reset_key_request) {
|
||||
CriticalSectionScoped cs(process_crit_sect_.get());
|
||||
@ -646,19 +545,8 @@ int32_t VideoReceiver::IncomingPacket(const uint8_t* incomingPayload,
|
||||
payloadLength = 0;
|
||||
}
|
||||
const VCMPacket packet(incomingPayload, payloadLength, rtpInfo);
|
||||
int32_t ret;
|
||||
if (_dualReceiver.State() != kPassive) {
|
||||
ret = _dualReceiver.InsertPacket(
|
||||
packet, rtpInfo.type.Video.width, rtpInfo.type.Video.height);
|
||||
if (ret == VCM_FLUSH_INDICATOR) {
|
||||
RequestKeyFrame();
|
||||
ResetDecoder();
|
||||
} else if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
ret = _receiver.InsertPacket(
|
||||
packet, rtpInfo.type.Video.width, rtpInfo.type.Video.height);
|
||||
int32_t ret = _receiver.InsertPacket(packet, rtpInfo.type.Video.width,
|
||||
rtpInfo.type.Video.height);
|
||||
// TODO(holmer): Investigate if this somehow should use the key frame
|
||||
// request scheduling to throttle the requests.
|
||||
if (ret == VCM_FLUSH_INDICATOR) {
|
||||
@ -693,14 +581,10 @@ int32_t VideoReceiver::NackList(uint16_t* nackList, uint16_t* size) {
|
||||
VCMNackStatus nackStatus = kNackOk;
|
||||
uint16_t nack_list_length = 0;
|
||||
// Collect sequence numbers from the default receiver
|
||||
// if in normal nack mode. Otherwise collect them from
|
||||
// the dual receiver if the dual receiver is receiving.
|
||||
// if in normal nack mode.
|
||||
if (_receiver.NackMode() != kNoNack) {
|
||||
nackStatus = _receiver.NackList(nackList, *size, &nack_list_length);
|
||||
}
|
||||
if (nack_list_length == 0 && _dualReceiver.State() != kPassive) {
|
||||
nackStatus = _dualReceiver.NackList(nackList, *size, &nack_list_length);
|
||||
}
|
||||
*size = nack_list_length;
|
||||
if (nackStatus == kNackKeyFrameRequest) {
|
||||
return RequestKeyFrame();
|
||||
@ -724,7 +608,6 @@ int VideoReceiver::SetReceiverRobustnessMode(
|
||||
switch (robustnessMode) {
|
||||
case VideoCodingModule::kNone:
|
||||
_receiver.SetNackMode(kNoNack, -1, -1);
|
||||
_dualReceiver.SetNackMode(kNoNack, -1, -1);
|
||||
if (decode_error_mode == kNoErrors) {
|
||||
_keyRequestMode = kKeyOnLoss;
|
||||
} else {
|
||||
@ -734,7 +617,6 @@ int VideoReceiver::SetReceiverRobustnessMode(
|
||||
case VideoCodingModule::kHardNack:
|
||||
// Always wait for retransmissions (except when decoding with errors).
|
||||
_receiver.SetNackMode(kNack, -1, -1);
|
||||
_dualReceiver.SetNackMode(kNoNack, -1, -1);
|
||||
_keyRequestMode = kKeyOnError; // TODO(hlundin): On long NACK list?
|
||||
break;
|
||||
case VideoCodingModule::kSoftNack:
|
||||
@ -745,21 +627,9 @@ int VideoReceiver::SetReceiverRobustnessMode(
|
||||
// Enable hybrid NACK/FEC. Always wait for retransmissions and don't add
|
||||
// extra delay when RTT is above kLowRttNackMs.
|
||||
_receiver.SetNackMode(kNack, media_optimization::kLowRttNackMs, -1);
|
||||
_dualReceiver.SetNackMode(kNoNack, -1, -1);
|
||||
_keyRequestMode = kKeyOnError;
|
||||
break;
|
||||
#endif
|
||||
case VideoCodingModule::kDualDecoder:
|
||||
if (decode_error_mode == kNoErrors) {
|
||||
return VCM_PARAMETER_ERROR;
|
||||
}
|
||||
// Enable NACK but don't wait for retransmissions and don't add any extra
|
||||
// delay.
|
||||
_receiver.SetNackMode(kNack, 0, 0);
|
||||
// Enable NACK, compensate with extra delay and wait for retransmissions.
|
||||
_dualReceiver.SetNackMode(kNack, -1, -1);
|
||||
_keyRequestMode = kKeyOnError;
|
||||
break;
|
||||
case VideoCodingModule::kReferenceSelection:
|
||||
#if 1
|
||||
assert(false); // TODO(hlundin): Not completed.
|
||||
@ -769,13 +639,10 @@ int VideoReceiver::SetReceiverRobustnessMode(
|
||||
return VCM_PARAMETER_ERROR;
|
||||
}
|
||||
_receiver.SetNackMode(kNoNack, -1, -1);
|
||||
_dualReceiver.SetNackMode(kNoNack, -1, -1);
|
||||
break;
|
||||
#endif
|
||||
}
|
||||
_receiver.SetDecodeErrorMode(decode_error_mode);
|
||||
// The dual decoder should never decode with errors.
|
||||
_dualReceiver.SetDecodeErrorMode(kNoErrors);
|
||||
return VCM_OK;
|
||||
}
|
||||
|
||||
@ -793,8 +660,6 @@ void VideoReceiver::SetNackSettings(size_t max_nack_list_size,
|
||||
}
|
||||
_receiver.SetNackSettings(
|
||||
max_nack_list_size, max_packet_age_to_nack, max_incomplete_time_ms);
|
||||
_dualReceiver.SetNackSettings(
|
||||
max_nack_list_size, max_packet_age_to_nack, max_incomplete_time_ms);
|
||||
}
|
||||
|
||||
int VideoReceiver::SetMinReceiverDelay(int desired_delay_ms) {
|
||||
|
@ -338,7 +338,6 @@ int32_t VideoSender::SetVideoProtection(VCMVideoProtection videoProtection,
|
||||
break;
|
||||
}
|
||||
case kProtectionNackReceiver:
|
||||
case kProtectionDualDecoder:
|
||||
case kProtectionKeyOnLoss:
|
||||
case kProtectionKeyOnKeyLoss:
|
||||
// Ignore decoder modes.
|
||||
|
@ -77,8 +77,6 @@ class VcmPayloadSinkFactory::VcmPayloadSink
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
while (decode_dual_frame && vcm_->DecodeDualFrame(0) == 1) {
|
||||
}
|
||||
return Process() ? 0 : -1;
|
||||
}
|
||||
|
||||
@ -93,8 +91,6 @@ class VcmPayloadSinkFactory::VcmPayloadSink
|
||||
|
||||
bool Decode() {
|
||||
vcm_->Decode(10000);
|
||||
while (vcm_->DecodeDualFrame(0) == 1) {
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -24,7 +24,7 @@ namespace {
|
||||
|
||||
const bool kConfigProtectionEnabled = true;
|
||||
const webrtc::VCMVideoProtection kConfigProtectionMethod =
|
||||
webrtc::kProtectionDualDecoder;
|
||||
webrtc::kProtectionNack;
|
||||
const float kConfigLossRate = 0.05f;
|
||||
const uint32_t kConfigRttMs = 50;
|
||||
const bool kConfigReordering = false;
|
||||
|
Loading…
Reference in New Issue
Block a user