Refactor jitter buffer to use separate lists for decodable and incomplete frames.

This changes the design of the jitter buffer to keeping track of decodable frames from the point when packets are inserted in the buffer, instead of searching for decodable frames when they are needed.

To accomplish this the frame_list_, which previously contained all frames (incomplete or complete, continuous or not), is split into a list of decodable_frames_ (complete, continuous) and a list of incomplete_frames_ (either incomplete or non-continuous). These frame lists are updated every time a packet is inserted.

This is another step in the direction of doing most of the work in the jitter buffer only once, when packets are inserted, instead of doing it every time we look for a frame or try to get a nack list.

BUG=1798
TEST=vie_auto_test, trybots
R=mikhal@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/1522005

git-svn-id: http://webrtc.googlecode.com/svn/trunk@4104 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
stefan@webrtc.org 2013-05-27 07:02:45 +00:00
parent ead3c6d508
commit 7f3f8bc5a6
7 changed files with 434 additions and 382 deletions

View File

@ -82,11 +82,21 @@ void VCMDecodingState::CopyFrom(const VCMDecodingState& state) {
in_initial_state_ = state.in_initial_state_;
}
void VCMDecodingState::UpdateEmptyFrame(const VCMFrameBuffer* frame) {
if (ContinuousFrame(frame)) {
time_stamp_ = frame->TimeStamp();
sequence_num_ = frame->GetHighSeqNum();
bool VCMDecodingState::UpdateEmptyFrame(const VCMFrameBuffer* frame) {
bool empty_packet = frame->GetHighSeqNum() == frame->GetLowSeqNum();
if (in_initial_state_ && empty_packet) {
// Drop empty packets as long as we are in the initial state.
return true;
}
if ((empty_packet && ContinuousSeqNum(frame->GetHighSeqNum())) ||
ContinuousFrame(frame)) {
// Continuous empty packets or continuous frames can be dropped if we
// advance the sequence number.
sequence_num_ = frame->GetHighSeqNum();
time_stamp_ = frame->TimeStamp();
return true;
}
return false;
}
void VCMDecodingState::UpdateOldPacket(const VCMPacket* packet) {
@ -139,11 +149,14 @@ bool VCMDecodingState::ContinuousFrame(const VCMFrameBuffer* frame) const {
// Return true when in initial state.
// Note that when a method is not applicable it will return false.
assert(frame != NULL);
if (in_initial_state_) {
// Always start with a key frame.
if (frame->FrameType() == kVideoFrameKey) return true;
// A key frame is always considered continuous as it doesn't refer to any
// frames and therefore won't introduce any errors even if prior frames are
// missing.
if (frame->FrameType() == kVideoFrameKey)
return true;
// When in the initial state we always require a key frame to start decoding.
if (in_initial_state_)
return false;
}
if (!ContinuousLayer(frame->TemporalId(), frame->Tl0PicId())) {
// Base layers are not continuous or temporal layers are inactive.

View File

@ -32,7 +32,7 @@ class VCMDecodingState {
bool ContinuousFrame(const VCMFrameBuffer* frame) const;
void SetState(const VCMFrameBuffer* frame);
void CopyFrom(const VCMDecodingState& state);
void UpdateEmptyFrame(const VCMFrameBuffer* frame);
bool UpdateEmptyFrame(const VCMFrameBuffer* frame);
// Update the sequence number if the timestamp matches current state and the
// sequence number is higher than the current one. This accounts for packets
// arriving late.

View File

@ -73,6 +73,82 @@ bool HasNonEmptyState(VCMFrameBuffer* frame) {
return frame->GetState() != kStateEmpty;
}
void FrameList::InsertFrame(VCMFrameBuffer* frame) {
reverse_iterator rit = std::find_if(
rbegin(), rend(), FrameSmallerTimestamp(frame->TimeStamp()));
insert(rit.base(), frame);
}
VCMFrameBuffer* FrameList::FindFrame(uint32_t timestamp) const {
FrameList::const_iterator it = std::find_if(begin(), end(),
FrameEqualTimestamp(timestamp));
if (it == end())
return NULL;
return *it;
}
VCMFrameBuffer* FrameList::PopFrame(uint32_t timestamp) {
FrameList::iterator it = std::find_if(begin(), end(),
FrameEqualTimestamp(timestamp));
if (it == end())
return NULL;
VCMFrameBuffer* frame = *it;
erase(it);
return frame;
}
int FrameList::RecycleFramesUntilKeyFrame(FrameList::iterator* key_frame_it) {
int drop_count = 0;
*key_frame_it = begin();
while (!empty()) {
// Throw at least one frame.
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding, -1,
"Recycling: type=%s, low seqnum=%u",
(**key_frame_it)->FrameType() == kVideoFrameKey ?
"key" : "delta", (**key_frame_it)->GetLowSeqNum());
if ((**key_frame_it)->GetState() != kStateDecoding) {
(**key_frame_it)->SetState(kStateFree);
}
*key_frame_it = erase(*key_frame_it);
++drop_count;
if (*key_frame_it != end() &&
(**key_frame_it)->FrameType() == kVideoFrameKey) {
return drop_count;
}
}
return drop_count;
}
int FrameList::CleanUpOldOrEmptyFrames(VCMDecodingState* decoding_state) {
int drop_count = 0;
while (!empty()) {
VCMFrameBuffer* oldest_frame = front();
bool remove_frame = false;
if (oldest_frame->GetState() == kStateEmpty && size() > 1) {
// This frame is empty, try to update the last decoded state and drop it
// if successful.
remove_frame = decoding_state->UpdateEmptyFrame(oldest_frame);
} else {
remove_frame = decoding_state->IsOldFrame(oldest_frame);
}
if (!remove_frame) {
break;
}
if (front()->GetState() != kStateDecoding) {
front()->SetState(kStateFree);
}
++drop_count;
TRACE_EVENT_INSTANT1("webrtc", "JB::OldOrEmptyFrameDropped", "timestamp",
oldest_frame->TimeStamp());
erase(begin());
}
if (empty()) {
TRACE_EVENT_INSTANT1("webrtc", "JB::FrameListEmptied",
"type", "CleanUpOldOrEmptyFrames");
}
return drop_count;
}
VCMJitterBuffer::VCMJitterBuffer(Clock* clock,
EventFactory* event_factory,
int vcm_id,
@ -88,7 +164,8 @@ VCMJitterBuffer::VCMJitterBuffer(Clock* clock,
packet_event_(event_factory->CreateEvent()),
max_number_of_frames_(kStartNumberOfFrames),
frame_buffers_(),
frame_list_(),
decodable_frames_(),
incomplete_frames_(),
last_decoded_state_(),
first_packet_since_reset_(true),
num_not_decodable_packets_(0),
@ -172,15 +249,18 @@ void VCMJitterBuffer::CopyFrom(const VCMJitterBuffer& rhs) {
frame_buffers_[i] = NULL;
}
}
frame_list_.clear();
for (int i = 0; i < max_number_of_frames_; i++) {
frame_buffers_[i] = new VCMFrameBuffer(*(rhs.frame_buffers_[i]));
if (frame_buffers_[i]->Length() > 0) {
FrameList::reverse_iterator rit = std::find_if(
frame_list_.rbegin(), frame_list_.rend(),
FrameSmallerTimestamp(frame_buffers_[i]->TimeStamp()));
frame_list_.insert(rit.base(), frame_buffers_[i]);
}
decodable_frames_.clear();
incomplete_frames_.clear();
int i = 0;
for (FrameList::const_iterator it = rhs.decodable_frames_.begin();
it != rhs.decodable_frames_.end(); ++it, ++i) {
frame_buffers_[i] = new VCMFrameBuffer(**it);
decodable_frames_.push_back(frame_buffers_[i]);
}
for (FrameList::const_iterator it = rhs.incomplete_frames_.begin();
it != rhs.incomplete_frames_.end(); ++it, ++i) {
frame_buffers_[i] = new VCMFrameBuffer(**it);
incomplete_frames_.push_back(frame_buffers_[i]);
}
rhs.crit_sect_->Leave();
crit_sect_->Leave();
@ -221,7 +301,8 @@ void VCMJitterBuffer::Stop() {
crit_sect_->Enter();
running_ = false;
last_decoded_state_.Reset();
frame_list_.clear();
decodable_frames_.clear();
incomplete_frames_.clear();
TRACE_EVENT_INSTANT1("webrtc", "JB::FrameListEmptied", "type", "Stop");
for (int i = 0; i < kMaxNumberOfFrames; i++) {
if (frame_buffers_[i] != NULL) {
@ -246,7 +327,8 @@ bool VCMJitterBuffer::Running() const {
void VCMJitterBuffer::Flush() {
CriticalSectionScoped cs(crit_sect_);
// Erase all frames from the sorted list and set their state to free.
frame_list_.clear();
decodable_frames_.clear();
incomplete_frames_.clear();
TRACE_EVENT_INSTANT2("webrtc", "JB::FrameListEmptied", "type", "Flush",
"frames", max_number_of_frames_);
for (int i = 0; i < max_number_of_frames_; i++) {
@ -361,32 +443,13 @@ bool VCMJitterBuffer::CompleteSequenceWithNextFrame() {
CriticalSectionScoped cs(crit_sect_);
// Finding oldest frame ready for decoder, check sequence number and size
CleanUpOldOrEmptyFrames();
if (frame_list_.empty())
if (!decodable_frames_.empty())
return true;
VCMFrameBuffer* oldest_frame = frame_list_.front();
if (frame_list_.size() <= 1 &&
oldest_frame->GetState() != kStateComplete) {
if (incomplete_frames_.size() <= 1) {
// Frame not ready to be decoded.
return true;
}
if (oldest_frame->GetState() != kStateComplete) {
return false;
}
// See if we have lost a frame before this one.
if (last_decoded_state_.in_initial_state()) {
// Following start, reset or flush -> check for key frame.
if (oldest_frame->FrameType() != kVideoFrameKey) {
return false;
}
} else if (oldest_frame->GetLowSeqNum() == -1) {
return false;
} else if (!last_decoded_state_.ContinuousFrame(oldest_frame)) {
return false;
}
return true;
return false;
}
// Returns immediately or a |max_wait_time_ms| ms event hang waiting for a
@ -396,13 +459,12 @@ bool VCMJitterBuffer::NextCompleteTimestamp(
TRACE_EVENT0("webrtc", "JB::NextCompleteTimestamp");
crit_sect_->Enter();
if (!running_) {
crit_sect_->Leave();
return false;
}
CleanUpOldOrEmptyFrames();
FrameList::iterator it = FindOldestCompleteContinuousFrame(
frame_list_.begin(), &last_decoded_state_);
if (it == frame_list_.end()) {
if (decodable_frames_.empty()) {
const int64_t end_wait_time_ms = clock_->TimeInMilliseconds() +
max_wait_time_ms;
int64_t wait_time_ms = max_wait_time_ms;
@ -412,7 +474,7 @@ bool VCMJitterBuffer::NextCompleteTimestamp(
frame_event_->Wait(static_cast<uint32_t>(wait_time_ms));
crit_sect_->Enter();
if (ret == kEventSignaled) {
// Are we closing down the Jitter buffer?
// Are we shutting down the jitter buffer?
if (!running_) {
crit_sect_->Leave();
return false;
@ -420,9 +482,7 @@ bool VCMJitterBuffer::NextCompleteTimestamp(
// Finding oldest frame ready for decoder, but check
// sequence number and size
CleanUpOldOrEmptyFrames();
it = FindOldestCompleteContinuousFrame(
frame_list_.begin(), &last_decoded_state_);
if (it == frame_list_.end()) {
if (decodable_frames_.empty()) {
wait_time_ms = end_wait_time_ms - clock_->TimeInMilliseconds();
} else {
break;
@ -433,31 +493,19 @@ bool VCMJitterBuffer::NextCompleteTimestamp(
}
// Inside |crit_sect_|.
} else {
// We already have a frame reset the event.
// We already have a frame, reset the event.
frame_event_->Reset();
}
if (!decode_with_errors_ && it == frame_list_.end()) {
// We're still missing a complete continuous frame.
// Look for a complete key frame if we're not decoding with errors.
it = find_if(frame_list_.begin(), frame_list_.end(),
CompleteKeyFrameCriteria());
if (decodable_frames_.empty()) {
crit_sect_->Leave();
return false;
}
if (it == frame_list_.end()) {
crit_sect_->Leave();
return false;
}
VCMFrameBuffer* oldest_frame = *it;
*timestamp = oldest_frame->TimeStamp();
*timestamp = decodable_frames_.front()->TimeStamp();
crit_sect_->Leave();
return true;
}
bool VCMJitterBuffer::NextMaybeIncompleteTimestamp(
uint32_t* timestamp) {
bool VCMJitterBuffer::NextMaybeIncompleteTimestamp(uint32_t* timestamp) {
TRACE_EVENT0("webrtc", "JB::NextMaybeIncompleteTimestamp");
CriticalSectionScoped cs(crit_sect_);
if (!running_) {
@ -470,17 +518,17 @@ bool VCMJitterBuffer::NextMaybeIncompleteTimestamp(
CleanUpOldOrEmptyFrames();
if (frame_list_.empty()) {
VCMFrameBuffer* oldest_frame = NextFrame();
if (!oldest_frame) {
return false;
}
VCMFrameBuffer* oldest_frame = frame_list_.front();
// If we have only one frame in the buffer, release it only if it is complete.
if (frame_list_.size() <= 1 && oldest_frame->GetState() != kStateComplete) {
if (decodable_frames_.empty() && incomplete_frames_.size() <= 1 &&
oldest_frame->GetState() == kStateIncomplete) {
// If we have only one frame in the buffer, release it only if it is
// complete.
return false;
}
// Always start with a key frame.
// Always start with a complete key frame.
if (last_decoded_state_.in_initial_state() &&
oldest_frame->FrameType() != kVideoFrameKey) {
return false;
@ -498,18 +546,13 @@ VCMEncodedFrame* VCMJitterBuffer::ExtractAndSetDecode(uint32_t timestamp) {
return NULL;
}
// Extract the frame with the desired timestamp.
FrameList::iterator it = std::find_if(
frame_list_.begin(),
frame_list_.end(),
FrameEqualTimestamp(timestamp));
if (it == frame_list_.end()) {
return NULL;
VCMFrameBuffer* frame = decodable_frames_.PopFrame(timestamp);
if (!frame) {
frame = incomplete_frames_.PopFrame(timestamp);
if (!frame)
return NULL;
}
// We got the frame.
VCMFrameBuffer* frame = *it;
frame_list_.erase(it);
if (frame_list_.empty()) {
if (!NextFrame()) {
TRACE_EVENT_INSTANT1("webrtc", "JB::FrameListEmptied",
"type", "ExtractAndSetDecode");
}
@ -585,16 +628,14 @@ VCMFrameBufferEnum VCMJitterBuffer::GetFrame(const VCMPacket& packet,
}
num_consecutive_old_packets_ = 0;
FrameList::iterator it = std::find_if(
frame_list_.begin(),
frame_list_.end(),
FrameEqualTimestamp(packet.timestamp));
if (it != frame_list_.end()) {
*frame = *it;
*frame = incomplete_frames_.FindFrame(packet.timestamp);
if (*frame) {
return kNoError;
}
*frame = decodable_frames_.FindFrame(packet.timestamp);
if (*frame) {
return kNoError;
}
// No match, return empty frame.
*frame = GetEmptyFrame();
if (*frame != NULL) {
@ -640,6 +681,23 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
// reset the delay estimate.
inter_frame_delay_.Reset(clock_->TimeInMilliseconds());
}
if (last_decoded_state_.IsOldPacket(&packet)) {
// This packet belongs to an old, already decoded frame, we want to update
// the last decoded sequence number.
last_decoded_state_.UpdateOldPacket(&packet);
frame->SetState(kStateFree);
TRACE_EVENT_INSTANT1("webrtc", "JB::DropLateFrame",
"timestamp", frame->TimeStamp());
drop_count_++;
// Flush() if this happens consistently.
num_consecutive_old_frames_++;
if (num_consecutive_old_frames_ > kMaxConsecutiveOldFrames) {
Flush();
return kFlushIndicator;
}
return kNoError;
}
num_consecutive_old_frames_ = 0;
// Empty packets may bias the jitter estimate (lacking size component),
// therefore don't let empty packet trigger the following updates:
@ -659,7 +717,7 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
}
}
VCMFrameBufferStateEnum state = frame->GetState();
VCMFrameBufferStateEnum previous_state = frame->GetState();
// Insert packet.
// Check for first packet. High sequence number will be -1 if neither an empty
// packet nor a media packet has been inserted.
@ -674,17 +732,6 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
ret = buffer_return;
if (buffer_return > 0) {
incoming_bit_count_ += packet.sizeBytes << 3;
// Insert each frame once on the arrival of the first packet
// belonging to that frame (media or empty).
if (state == kStateEmpty && first) {
ret = kFirstPacket;
FrameList::reverse_iterator rit = std::find_if(
frame_list_.rbegin(),
frame_list_.rend(),
FrameSmallerTimestamp(frame->TimeStamp()));
frame_list_.insert(rit.base(), frame);
}
if (first_packet_since_reset_) {
latest_received_sequence_number_ = packet.seqNum;
first_packet_since_reset_ = false;
@ -705,9 +752,8 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
case kTimeStampError:
case kSizeError: {
if (frame != NULL) {
// Will be released when it gets old.
frame->Reset();
frame->SetState(kStateEmpty);
frame->SetState(kStateFree);
}
break;
}
@ -715,17 +761,36 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
// Don't let the first packet be overridden by a complete session.
ret = kCompleteSession;
// Only update return value for a JB flush indicator.
if (UpdateFrameState(frame) == kFlushIndicator)
ret = kFlushIndicator;
UpdateFrameState(frame);
*retransmitted = (frame->GetNackCount() > 0);
if (IsContinuous(*frame) && previous_state != kStateComplete) {
if (!first) {
incomplete_frames_.PopFrame(packet.timestamp);
}
decodable_frames_.InsertFrame(frame);
FindAndInsertContinuousFrames(*frame);
// Signal that we have a decodable frame.
frame_event_->Set();
} else if (first) {
incomplete_frames_.InsertFrame(frame);
}
// Signal that we have a received packet.
packet_event_->Set();
break;
}
case kDecodableSession:
case kIncomplete: {
// Signal that we have a received packet.
packet_event_->Set();
// No point in storing empty continuous frames.
if (frame->GetState() == kStateEmpty &&
last_decoded_state_.UpdateEmptyFrame(frame)) {
frame->SetState(kStateFree);
ret = kNoError;
} else if (first) {
ret = kFirstPacket;
incomplete_frames_.InsertFrame(frame);
// Signal that we have received a packet.
packet_event_->Set();
}
break;
}
case kNoError:
@ -742,6 +807,66 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
return ret;
}
bool VCMJitterBuffer::IsContinuousInState(const VCMFrameBuffer& frame,
const VCMDecodingState& decoding_state) const {
// Is this frame complete or decodable and continuous?
if ((frame.GetState() == kStateComplete ||
(decode_with_errors_ && frame.GetState() == kStateDecodable)) &&
decoding_state.ContinuousFrame(&frame)) {
return true;
} else {
return false;
}
}
bool VCMJitterBuffer::IsContinuous(const VCMFrameBuffer& frame) const {
if (IsContinuousInState(frame, last_decoded_state_)) {
return true;
}
VCMDecodingState decoding_state;
decoding_state.CopyFrom(last_decoded_state_);
for (FrameList::const_iterator it = decodable_frames_.begin();
it != decodable_frames_.end(); ++it) {
VCMFrameBuffer* decodable_frame = *it;
if (IsNewerTimestamp(decodable_frame->TimeStamp(), frame.TimeStamp())) {
break;
}
decoding_state.SetState(decodable_frame);
if (IsContinuousInState(frame, decoding_state)) {
return true;
}
}
return false;
}
void VCMJitterBuffer::FindAndInsertContinuousFrames(
const VCMFrameBuffer& new_frame) {
VCMDecodingState decoding_state;
decoding_state.CopyFrom(last_decoded_state_);
decoding_state.SetState(&new_frame);
// When temporal layers are available, we search for a complete or decodable
// frame until we hit one of the following:
// 1. Continuous base or sync layer.
// 2. The end of the list was reached.
for (FrameList::iterator it = incomplete_frames_.begin();
it != incomplete_frames_.end();) {
VCMFrameBuffer* frame = *it;
if (IsNewerTimestamp(new_frame.TimeStamp(), frame->TimeStamp())) {
++it;
continue;
}
if (IsContinuousInState(*frame, decoding_state)) {
decodable_frames_.InsertFrame(frame);
it = incomplete_frames_.erase(it);
decoding_state.SetState(frame);
} else if (frame->TemporalId() <= 0) {
break;
} else {
++it;
}
}
}
void VCMJitterBuffer::SetMaxJitterEstimate(bool enable) {
CriticalSectionScoped cs(crit_sect_);
jitter_estimate_.SetMaxJitterEstimate(enable);
@ -809,16 +934,14 @@ VCMNackMode VCMJitterBuffer::nack_mode() const {
}
int VCMJitterBuffer::NonContinuousOrIncompleteDuration() {
if (frame_list_.empty()) {
if (incomplete_frames_.empty()) {
return 0;
}
FrameList::iterator start_it;
FrameList::iterator end_it;
RenderBuffer(&start_it, &end_it);
if (end_it == frame_list_.end())
end_it = frame_list_.begin();
return frame_list_.back()->TimeStamp() -
(*end_it)->TimeStamp();
uint32_t start_timestamp = incomplete_frames_.front()->TimeStamp();
if (!decodable_frames_.empty()) {
start_timestamp = decodable_frames_.back()->TimeStamp();
}
return incomplete_frames_.back()->TimeStamp() - start_timestamp;
}
uint16_t VCMJitterBuffer::EstimatedLowSequenceNumber(
@ -841,13 +964,19 @@ uint16_t* VCMJitterBuffer::GetNackList(uint16_t* nack_list_size,
return NULL;
}
if (last_decoded_state_.in_initial_state()) {
bool first_frame_is_key = !frame_list_.empty() &&
frame_list_.front()->FrameType() == kVideoFrameKey &&
frame_list_.front()->HaveFirstPacket();
const bool first_frame_is_key = NextFrame() &&
NextFrame()->FrameType() == kVideoFrameKey &&
NextFrame()->HaveFirstPacket();
if (!first_frame_is_key) {
const bool have_non_empty_frame = frame_list_.end() != find_if(
frame_list_.begin(), frame_list_.end(), HasNonEmptyState);
LOG_F(LS_INFO) << "First frame is not key; Recycling.";
bool have_non_empty_frame = decodable_frames_.end() != find_if(
decodable_frames_.begin(), decodable_frames_.end(),
HasNonEmptyState);
if (!have_non_empty_frame) {
have_non_empty_frame = incomplete_frames_.end() != find_if(
incomplete_frames_.begin(), incomplete_frames_.end(),
HasNonEmptyState);
}
bool found_key_frame = RecycleFramesUntilKeyFrame();
if (!found_key_frame) {
*request_key_frame = have_non_empty_frame;
@ -870,10 +999,9 @@ uint16_t* VCMJitterBuffer::GetNackList(uint16_t* nack_list_size,
LOG_F(LS_INFO) << "Too long non-decodable duration: " <<
non_continuous_incomplete_duration << " > " <<
90 * max_incomplete_time_ms_;
FrameList::reverse_iterator rit = find_if(frame_list_.rbegin(),
frame_list_.rend(),
KeyFrameCriteria());
if (rit == frame_list_.rend()) {
FrameList::reverse_iterator rit = find_if(incomplete_frames_.rbegin(),
incomplete_frames_.rend(), KeyFrameCriteria());
if (rit == incomplete_frames_.rend()) {
// Request a key frame if we don't have one already.
*request_key_frame = true;
*nack_list_size = 0;
@ -897,6 +1025,14 @@ uint16_t* VCMJitterBuffer::GetNackList(uint16_t* nack_list_size,
return &nack_seq_nums_[0];
}
VCMFrameBuffer* VCMJitterBuffer::NextFrame() const {
if (!decodable_frames_.empty())
return decodable_frames_.front();
if (!incomplete_frames_.empty())
return incomplete_frames_.front();
return NULL;
}
bool VCMJitterBuffer::UpdateNackList(uint16_t sequence_number) {
if (nack_mode_ == kNoNack) {
return true;
@ -985,73 +1121,17 @@ int64_t VCMJitterBuffer::LastDecodedTimestamp() const {
return last_decoded_state_.time_stamp();
}
FrameList::iterator VCMJitterBuffer::FindLastContinuousAndComplete(
FrameList::iterator start_it) {
// Search for a complete and continuous sequence (starting from the last
// decoded state or current frame if in initial state).
VCMDecodingState previous_state;
previous_state.SetState(*start_it);
FrameList::iterator previous_it = start_it;
++start_it;
while (start_it != frame_list_.end()) {
start_it = FindOldestCompleteContinuousFrame(start_it, &previous_state);
if (start_it == frame_list_.end())
break;
previous_state.SetState(*start_it);
previous_it = start_it;
++start_it;
}
// Desired frame is the previous one.
return previous_it;
}
void VCMJitterBuffer::RenderBuffer(FrameList::iterator* start_it,
FrameList::iterator* end_it) {
*start_it = FindOldestCompleteContinuousFrame(
frame_list_.begin(), &last_decoded_state_);
if (!decode_with_errors_ && *start_it == frame_list_.end()) {
// No complete continuous frame found.
// Look for a complete key frame if we're not decoding with errors.
*start_it = find_if(frame_list_.begin(), frame_list_.end(),
CompleteKeyFrameCriteria());
}
if (*start_it == frame_list_.end()) {
*end_it = *start_it;
} else {
*end_it = *start_it;
// Look for the last complete key frame and use that as the end of the
// render buffer it's later than the last complete continuous frame.
FrameList::reverse_iterator rend(*end_it);
FrameList::reverse_iterator rit = find_if(frame_list_.rbegin(),
rend,
CompleteKeyFrameCriteria());
if (rit != rend) {
// A key frame was found. The reverse iterator base points to the
// frame after it, so subtracting 1.
*end_it = rit.base();
--*end_it;
}
*end_it = FindLastContinuousAndComplete(*end_it);
}
}
void VCMJitterBuffer::RenderBufferSize(uint32_t* timestamp_start,
uint32_t* timestamp_end) {
CriticalSectionScoped cs(crit_sect_);
CleanUpOldOrEmptyFrames();
*timestamp_start = 0;
*timestamp_end = 0;
if (frame_list_.empty()) {
if (decodable_frames_.empty()) {
return;
}
FrameList::iterator start_it;
FrameList::iterator end_it;
RenderBuffer(&start_it, &end_it);
if (start_it == frame_list_.end()) {
return;
}
*timestamp_start = (*start_it)->TimeStamp();
*timestamp_end = (*end_it)->TimeStamp();
*timestamp_start = decodable_frames_.front()->TimeStamp();
*timestamp_end = decodable_frames_.back()->TimeStamp();
}
// Set the frame state to free and remove it from the sorted
@ -1093,48 +1173,42 @@ VCMFrameBuffer* VCMJitterBuffer::GetEmptyFrame() {
// Recycle oldest frames up to a key frame, used if jitter buffer is completely
// full.
bool VCMJitterBuffer::RecycleFramesUntilKeyFrame() {
// Remove up to oldest key frame
while (!frame_list_.empty()) {
// Throw at least one frame.
drop_count_++;
FrameList::iterator it = frame_list_.begin();
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding,
VCMId(vcm_id_, receiver_id_),
"Jitter buffer drop count:%d, low_seq %d, frame type: %s",
drop_count_, (*it)->GetLowSeqNum(),
(*it)->FrameType() == kVideoFrameKey ? "key" : "delta");
TRACE_EVENT_INSTANT0("webrtc", "JB::RecycleFramesUntilKeyFrame");
ReleaseFrameIfNotDecoding(*it);
it = frame_list_.erase(it);
if (it != frame_list_.end() && (*it)->FrameType() == kVideoFrameKey) {
// Reset last decoded state to make sure the next frame decoded is a key
// frame, and start NACKing from here.
// Note that the estimated low sequence number is correct for VP8
// streams because only the first packet of a key frame is marked.
last_decoded_state_.Reset();
DropPacketsFromNackList(EstimatedLowSequenceNumber(**it));
return true;
// First release incomplete frames, and only release decodable frames if there
// are no incomplete ones.
FrameList::iterator key_frame_it;
bool key_frame_found = false;
int dropped_frames = 0;
dropped_frames += incomplete_frames_.RecycleFramesUntilKeyFrame(
&key_frame_it);
key_frame_found = key_frame_it != incomplete_frames_.end();
if (dropped_frames == 0) {
dropped_frames += decodable_frames_.RecycleFramesUntilKeyFrame(
&key_frame_it);
key_frame_found = key_frame_it != decodable_frames_.end();
if (!key_frame_found) {
TRACE_EVENT_INSTANT1("webrtc", "JB::FrameListEmptied", "type",
"RecycleFramesUntilKeyFrame");
}
}
if (frame_list_.empty()) {
TRACE_EVENT_INSTANT1("webrtc", "JB::FrameListEmptied",
"type", "RecycleFramesUntilKeyFrame");
drop_count_ += dropped_frames;
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding,
VCMId(vcm_id_, receiver_id_),
"Jitter buffer drop count:%u", drop_count_);
TRACE_EVENT_INSTANT0("webrtc", "JB::RecycleFramesUntilKeyFrame");
if (key_frame_found) {
// Reset last decoded state to make sure the next frame decoded is a key
// frame, and start NACKing from here.
last_decoded_state_.Reset();
DropPacketsFromNackList(EstimatedLowSequenceNumber(**key_frame_it));
} else if (decodable_frames_.empty()) {
last_decoded_state_.Reset(); // TODO(mikhal): No sync.
missing_sequence_numbers_.clear();
}
last_decoded_state_.Reset(); // TODO(mikhal): No sync.
missing_sequence_numbers_.clear();
return false;
return key_frame_found;
}
// Must be called under the critical section |crit_sect_|.
VCMFrameBufferEnum VCMJitterBuffer::UpdateFrameState(VCMFrameBuffer* frame) {
if (frame == NULL) {
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding,
VCMId(vcm_id_, receiver_id_), "JB(0x%x) FB(0x%x): "
"UpdateFrameState NULL frame pointer", this, frame);
return kNoError;
}
int length = frame->Length();
void VCMJitterBuffer::UpdateFrameState(VCMFrameBuffer* frame) {
if (master_) {
// Only trace the primary jitter buffer to make it possible to parse
// and plot the trace file.
@ -1142,43 +1216,17 @@ VCMFrameBufferEnum VCMJitterBuffer::UpdateFrameState(VCMFrameBuffer* frame) {
VCMId(vcm_id_, receiver_id_),
"JB(0x%x) FB(0x%x): Complete frame added to jitter buffer,"
" size:%d type %d",
this, frame, length, frame->FrameType());
this, frame, frame->Length(), frame->FrameType());
}
bool frame_counted = false;
if (length != 0 && !frame->GetCountedFrame()) {
if (!frame->GetCountedFrame()) {
// Ignore ACK frames.
incoming_frame_count_++;
frame->SetCountedFrame(true);
frame_counted = true;
}
// Check if we should drop the frame. A complete frame can arrive too late.
if (last_decoded_state_.IsOldFrame(frame)) {
// Frame is older than the latest decoded frame, drop it. Will be
// released by CleanUpOldFrames later.
TRACE_EVENT_INSTANT1("webrtc", "JB::DropLateFrame",
"timestamp", frame->TimeStamp());
frame->Reset();
frame->SetState(kStateEmpty);
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding,
VCMId(vcm_id_, receiver_id_),
"JB(0x%x) FB(0x%x): Dropping old frame in Jitter buffer",
this, frame);
drop_count_++;
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding,
VCMId(vcm_id_, receiver_id_),
"Jitter buffer drop count: %d, consecutive drops: %u",
drop_count_, num_consecutive_old_frames_);
// Flush() if this happens consistently.
num_consecutive_old_frames_++;
if (num_consecutive_old_frames_ > kMaxConsecutiveOldFrames) {
Flush();
return kFlushIndicator;
}
return kNoError;
}
num_consecutive_old_frames_ = 0;
frame->SetState(kStateComplete);
if (frame->FrameType() == kVideoFrameKey) {
TRACE_EVENT_INSTANT2("webrtc", "JB::AddKeyFrame",
@ -1214,83 +1262,15 @@ VCMFrameBufferEnum VCMJitterBuffer::UpdateFrameState(VCMFrameBuffer* frame) {
assert(false);
}
}
const FrameList::iterator it = FindOldestCompleteContinuousFrame(
frame_list_.begin(), &last_decoded_state_);
VCMFrameBuffer* old_frame = NULL;
if (it != frame_list_.end()) {
old_frame = *it;
}
// Only signal if this is the oldest frame.
// Not necessarily the case due to packet reordering or NACK.
if (!WaitForRetransmissions() || (old_frame != NULL && old_frame == frame)) {
frame_event_->Set();
}
return kNoError;
}
// Find oldest complete frame used for getting next frame to decode
// Must be called under critical section
FrameList::iterator VCMJitterBuffer::FindOldestCompleteContinuousFrame(
FrameList::iterator start_it,
const VCMDecodingState* decoding_state) {
// If we have more than one frame done since last time, pick oldest.
VCMFrameBuffer* oldest_frame = NULL;
// When temporal layers are available, we search for a complete or decodable
// frame until we hit one of the following:
// 1. Continuous base or sync layer.
// 2. The end of the list was reached.
for (; start_it != frame_list_.end(); ++start_it) {
oldest_frame = *start_it;
VCMFrameBufferStateEnum state = oldest_frame->GetState();
// Is this frame complete or decodable and continuous?
if ((state == kStateComplete ||
(decode_with_errors_ && state == kStateDecodable)) &&
decoding_state->ContinuousFrame(oldest_frame)) {
break;
} else {
int temporal_id = oldest_frame->TemporalId();
oldest_frame = NULL;
if (temporal_id <= 0) {
// When temporal layers are disabled or we have hit a base layer
// we break (regardless of continuity and completeness).
break;
}
}
}
if (oldest_frame == NULL) {
// No complete frame no point to continue.
return frame_list_.end();
}
// We have a complete continuous frame.
return start_it;
}
// Must be called under the critical section |crit_sect_|.
void VCMJitterBuffer::CleanUpOldOrEmptyFrames() {
while (frame_list_.size() > 0) {
VCMFrameBuffer* oldest_frame = frame_list_.front();
if (oldest_frame->GetState() == kStateEmpty && frame_list_.size() > 1) {
// This frame is empty, mark it as decoded, thereby making it old.
last_decoded_state_.UpdateEmptyFrame(oldest_frame);
}
if (last_decoded_state_.IsOldFrame(oldest_frame)) {
ReleaseFrameIfNotDecoding(frame_list_.front());
TRACE_EVENT_INSTANT1("webrtc", "JB::OldFrameDropped",
"timestamp", oldest_frame->TimeStamp());
TRACE_COUNTER1("webrtc", "JBDroppedLateFrames", drop_count_);
frame_list_.erase(frame_list_.begin());
} else {
break;
}
}
if (frame_list_.empty()) {
TRACE_EVENT_INSTANT1("webrtc", "JB::FrameListEmptied",
"type", "CleanUpOldOrEmptyFrames");
}
drop_count_ +=
decodable_frames_.CleanUpOldOrEmptyFrames(&last_decoded_state_);
drop_count_ +=
incomplete_frames_.CleanUpOldOrEmptyFrames(&last_decoded_state_);
TRACE_COUNTER1("webrtc", "JBDroppedLateFrames", drop_count_);
if (!last_decoded_state_.in_initial_state()) {
DropPacketsFromNackList(last_decoded_state_.sequence_num());
}

View File

@ -32,8 +32,6 @@ enum VCMNackMode {
kNoNack
};
typedef std::list<VCMFrameBuffer*> FrameList;
// forward declarations
class Clock;
class EventFactory;
@ -49,6 +47,15 @@ struct VCMJitterSample {
int64_t latest_packet_time;
};
class FrameList : public std::list<VCMFrameBuffer*> {
public:
void InsertFrame(VCMFrameBuffer* frame);
VCMFrameBuffer* FindFrame(uint32_t timestamp) const;
VCMFrameBuffer* PopFrame(uint32_t timestamp);
int RecycleFramesUntilKeyFrame(FrameList::iterator* key_frame_it);
int CleanUpOldOrEmptyFrames(VCMDecodingState* decoding_state);
};
class VCMJitterBuffer {
public:
VCMJitterBuffer(Clock* clock,
@ -120,6 +127,8 @@ class VCMJitterBuffer {
bool* retransmitted) const;
// Inserts a packet into a frame returned from GetFrame().
// If the return value is <= 0, |frame| is invalidated and the pointer must
// be dropped after this function returns.
VCMFrameBufferEnum InsertPacket(const VCMPacket& packet,
bool* retransmitted);
@ -175,6 +184,18 @@ class VCMJitterBuffer {
// existing frames if no free frames are available. Returns an error code if
// failing, or kNoError on success.
VCMFrameBufferEnum GetFrame(const VCMPacket& packet, VCMFrameBuffer** frame);
// Returns true if |frame| is continuous in |decoding_state|, not taking
// decodable frames into account.
bool IsContinuousInState(const VCMFrameBuffer& frame,
const VCMDecodingState& decoding_state) const;
// Returns true if |frame| is continuous in the |last_decoded_state_|, taking
// all decodable frames into account.
bool IsContinuous(const VCMFrameBuffer& frame) const;
// Looks for frames in |incomplete_frames_| which are continuous in
// |last_decoded_state_| taking all decodable frames into account. Starts
// the search from |new_frame|.
void FindAndInsertContinuousFrames(const VCMFrameBuffer& new_frame);
VCMFrameBuffer* NextFrame() const;
// Returns true if the NACK list was updated to cover sequence numbers up to
// |sequence_number|. If false a key frame is needed to get into a state where
// we can continue decoding.
@ -202,19 +223,8 @@ class VCMJitterBuffer {
bool RecycleFramesUntilKeyFrame();
// Sets the state of |frame| to complete if it's not too old to be decoded.
// Also updates the frame statistics. Signals the |frame_event| if this is
// the next frame to be decoded.
VCMFrameBufferEnum UpdateFrameState(VCMFrameBuffer* frame);
// Finds the oldest complete frame, used for getting next frame to decode.
// Can return a decodable, incomplete frame when enabled.
FrameList::iterator FindOldestCompleteContinuousFrame(
FrameList::iterator start_it,
const VCMDecodingState* decoding_state);
FrameList::iterator FindLastContinuousAndComplete(
FrameList::iterator start_it);
void RenderBuffer(FrameList::iterator* start_it,
FrameList::iterator* end_it);
// Also updates the frame statistics.
void UpdateFrameState(VCMFrameBuffer* frame);
// Cleans the frame list in the JB from old/empty frames.
// Should only be called prior to actual use.
@ -263,7 +273,8 @@ class VCMJitterBuffer {
int max_number_of_frames_;
// Array of pointers to the frames in jitter buffer.
VCMFrameBuffer* frame_buffers_[kMaxNumberOfFrames];
FrameList frame_list_;
FrameList decodable_frames_;
FrameList incomplete_frames_;
VCMDecodingState last_decoded_state_;
bool first_packet_since_reset_;

View File

@ -69,10 +69,10 @@ class TestBasicJitterBuffer : public ::testing::Test {
VCMEncodedFrame* frame = jitter_buffer_->ExtractAndSetDecode(timestamp);
return frame;
}
int CheckOutFrame(VCMEncodedFrame* frame_out,
void CheckOutFrame(VCMEncodedFrame* frame_out,
unsigned int size,
bool startCode) {
EXPECT_FALSE(frame_out == NULL);
ASSERT_TRUE(frame_out);
const uint8_t* outData = frame_out->Buffer();
unsigned int i = 0;
@ -104,7 +104,6 @@ class TestBasicJitterBuffer : public ::testing::Test {
}
}
}
return 0;
}
uint16_t seq_num_;
@ -188,7 +187,9 @@ class TestRunningJitterBuffer : public ::testing::Test {
void DropFrame(int num_packets) {
stream_generator_->GenerateFrame(kVideoFrameDelta, num_packets, 0,
clock_->TimeInMilliseconds());
clock_->TimeInMilliseconds());
for (int i = 0; i < num_packets; ++i)
stream_generator_->DropLastPacket();
clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
}
@ -261,7 +262,7 @@ TEST_F(TestBasicJitterBuffer, SinglePacketFrame) {
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
&retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
EXPECT_EQ(0, CheckOutFrame(frame_out, size_, false));
CheckOutFrame(frame_out, size_, false);
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
}
@ -286,7 +287,7 @@ TEST_F(TestBasicJitterBuffer, DualPacketFrame) {
&retransmitted));
frame_out = DecodeCompleteFrame();
EXPECT_EQ(0, CheckOutFrame(frame_out, 2 * size_, false));
CheckOutFrame(frame_out, 2 * size_, false);
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
}
@ -329,7 +330,7 @@ TEST_F(TestBasicJitterBuffer, 100PacketKeyFrame) {
frame_out = DecodeCompleteFrame();
EXPECT_EQ(0, CheckOutFrame(frame_out, 100 * size_, false));
CheckOutFrame(frame_out, 100 * size_, false);
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
}
@ -383,7 +384,7 @@ TEST_F(TestBasicJitterBuffer, 100PacketDeltaFrame) {
frame_out = DecodeCompleteFrame();
EXPECT_EQ(0, CheckOutFrame(frame_out, 100 * size_, false));
CheckOutFrame(frame_out, 100 * size_, false);
EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
}
@ -428,7 +429,7 @@ TEST_F(TestBasicJitterBuffer, PacketReorderingReverseOrder) {
frame_out = DecodeCompleteFrame();;
EXPECT_EQ(0, CheckOutFrame(frame_out, 100 * size_, false));
CheckOutFrame(frame_out, 100 * size_, false);
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
}
@ -484,7 +485,7 @@ TEST_F(TestBasicJitterBuffer, FrameReordering2Frames2PacketsEach) {
frame_out = DecodeCompleteFrame();
EXPECT_EQ(0, CheckOutFrame(frame_out, 2 * size_, false));
CheckOutFrame(frame_out, 2 * size_, false);
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
@ -492,7 +493,7 @@ TEST_F(TestBasicJitterBuffer, FrameReordering2Frames2PacketsEach) {
frame_out = DecodeCompleteFrame();
EXPECT_EQ(0, CheckOutFrame(frame_out, 2 * size_, false));
CheckOutFrame(frame_out, 2 * size_, false);
EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
}
@ -527,7 +528,7 @@ TEST_F(TestBasicJitterBuffer, DuplicatePackets) {
frame_out = DecodeCompleteFrame();
EXPECT_EQ(0, CheckOutFrame(frame_out, 2 * size_, false));
CheckOutFrame(frame_out, 2 * size_, false);
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
}
@ -559,7 +560,7 @@ TEST_F(TestBasicJitterBuffer, H264InsertStartCode) {
frame_out = DecodeCompleteFrame();
EXPECT_EQ(0, CheckOutFrame(frame_out, size_ * 2 + 4 * 2, true));
CheckOutFrame(frame_out, size_ * 2 + 4 * 2, true);
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
}
@ -631,7 +632,7 @@ TEST_F(TestBasicJitterBuffer, PacketLoss) {
// One of the packets has been discarded by the jitter buffer.
// Last frame can't be extracted yet.
if (i < 10) {
EXPECT_EQ(0, CheckOutFrame(frame_out, size_, false));
CheckOutFrame(frame_out, size_, false);
if (i == 0) {
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
@ -716,7 +717,7 @@ TEST_F(TestBasicJitterBuffer, DeltaFrame100PacketsWithSeqNumWrap) {
frame_out = DecodeCompleteFrame();
EXPECT_EQ(0, CheckOutFrame(frame_out, 100 * size_, false));
CheckOutFrame(frame_out, 100 * size_, false);
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
}
@ -765,7 +766,7 @@ TEST_F(TestBasicJitterBuffer, PacketReorderingReverseWithNegSeqNumWrap) {
&retransmitted));
frame_out = DecodeCompleteFrame();
EXPECT_EQ(0, CheckOutFrame(frame_out, 100 * size_, false));
CheckOutFrame(frame_out, 100 * size_, false);
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
}
@ -789,7 +790,7 @@ TEST_F(TestBasicJitterBuffer, TestInsertOldFrame) {
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
EXPECT_EQ(3000u, frame_out->TimeStamp());
EXPECT_EQ(0, CheckOutFrame(frame_out, size_, false));
CheckOutFrame(frame_out, size_, false);
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
@ -828,7 +829,7 @@ TEST_F(TestBasicJitterBuffer, TestInsertOldFrameWithSeqNumWrap) {
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
EXPECT_EQ(timestamp_, frame_out->TimeStamp());
EXPECT_EQ(0, CheckOutFrame(frame_out, size_, false));
CheckOutFrame(frame_out, size_, false);
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
@ -879,7 +880,7 @@ TEST_F(TestBasicJitterBuffer, TimestampWrap) {
frame_out = DecodeCompleteFrame();
EXPECT_EQ(0, CheckOutFrame(frame_out, 2 * size_, false));
CheckOutFrame(frame_out, 2 * size_, false);
jitter_buffer_->ReleaseFrame(frame_out);
@ -908,7 +909,7 @@ TEST_F(TestBasicJitterBuffer, TimestampWrap) {
frame_out = DecodeCompleteFrame();
EXPECT_EQ(0, CheckOutFrame(frame_out, 2 * size_, false));
CheckOutFrame(frame_out, 2 * size_, false);
EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
}
@ -945,14 +946,14 @@ TEST_F(TestBasicJitterBuffer, 2FrameWithTimestampWrap) {
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
EXPECT_EQ(0xffffff00, frame_out->TimeStamp());
EXPECT_EQ(0, CheckOutFrame(frame_out, size_, false));
CheckOutFrame(frame_out, size_, false);
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
VCMEncodedFrame* frame_out2 = DecodeCompleteFrame();
EXPECT_EQ(2700u, frame_out2->TimeStamp());
EXPECT_EQ(0, CheckOutFrame(frame_out2, size_, false));
CheckOutFrame(frame_out2, size_, false);
EXPECT_EQ(kVideoFrameDelta, frame_out2->FrameType());
}
@ -990,14 +991,14 @@ TEST_F(TestBasicJitterBuffer, Insert2FramesReOrderedWithTimestampWrap) {
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
EXPECT_EQ(0xffffff00, frame_out->TimeStamp());
EXPECT_EQ(0, CheckOutFrame(frame_out, size_, false));
CheckOutFrame(frame_out, size_, false);
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
VCMEncodedFrame* frame_out2 = DecodeCompleteFrame();
EXPECT_EQ(2700u, frame_out2->TimeStamp());
EXPECT_EQ(0, CheckOutFrame(frame_out2, size_, false));
CheckOutFrame(frame_out2, size_, false);
EXPECT_EQ(kVideoFrameDelta, frame_out2->FrameType());
}
@ -1090,7 +1091,7 @@ TEST_F(TestBasicJitterBuffer, ExceedNumOfFrameWithSeqNumWrap) {
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
EXPECT_EQ(first_key_frame_timestamp, frame_out->TimeStamp());
EXPECT_EQ(0, CheckOutFrame(frame_out, size_, false));
CheckOutFrame(frame_out, size_, false);
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
}
@ -1111,8 +1112,8 @@ TEST_F(TestBasicJitterBuffer, EmptyLastFrame) {
packet_->timestamp = timestamp_;
packet_->frameType = kFrameEmpty;
EXPECT_EQ(kFirstPacket, jitter_buffer_->InsertPacket(*packet_,
&retransmitted));
EXPECT_EQ(kNoError, jitter_buffer_->InsertPacket(*packet_,
&retransmitted));
VCMEncodedFrame* testFrame = DecodeIncompleteFrame();
// Timestamp should never be the last TS inserted.
if (testFrame != NULL) {
@ -1184,7 +1185,7 @@ TEST_F(TestBasicJitterBuffer, H264IncompleteNalu) {
// We can decode everything from a NALU until a packet has been lost.
// Thus we can decode the first packet of the first NALU and the second NALU
// which consists of one packet.
EXPECT_EQ(0, CheckOutFrame(frame_out, packet_->sizeBytes * 2, false));
CheckOutFrame(frame_out, packet_->sizeBytes * 2, false);
jitter_buffer_->ReleaseFrame(frame_out);
// Test reordered start frame + 1 lost.
@ -1251,7 +1252,7 @@ TEST_F(TestBasicJitterBuffer, H264IncompleteNalu) {
// is the last.
frame_out = DecodeIncompleteFrame();
// Only last NALU is complete.
EXPECT_EQ(0, CheckOutFrame(frame_out, insertedLength, false));
CheckOutFrame(frame_out, insertedLength, false);
jitter_buffer_->ReleaseFrame(frame_out);
@ -1302,7 +1303,7 @@ TEST_F(TestBasicJitterBuffer, H264IncompleteNalu) {
frame_out = DecodeCompleteFrame();
// Only last NALU is complete
EXPECT_EQ(0, CheckOutFrame(frame_out, packet_->sizeBytes, false));
CheckOutFrame(frame_out, packet_->sizeBytes, false);
}
TEST_F(TestBasicJitterBuffer, NextFrameWhenIncomplete) {
@ -1344,7 +1345,7 @@ TEST_F(TestBasicJitterBuffer, NextFrameWhenIncomplete) {
frame_out = DecodeIncompleteFrame();
EXPECT_EQ(0, CheckOutFrame(frame_out, packet_->sizeBytes, false));
CheckOutFrame(frame_out, packet_->sizeBytes, false);
}
TEST_F(TestRunningJitterBuffer, Full) {
@ -1366,16 +1367,19 @@ TEST_F(TestRunningJitterBuffer, Full) {
TEST_F(TestRunningJitterBuffer, EmptyPackets) {
// Make sure a frame can get complete even though empty packets are missing.
stream_generator_->GenerateFrame(kVideoFrameKey, 3, 3,
clock_->TimeInMilliseconds());
clock_->TimeInMilliseconds());
bool request_key_frame = false;
EXPECT_EQ(kFirstPacket, InsertPacketAndPop(4));
// Insert empty packet.
EXPECT_EQ(kNoError, InsertPacketAndPop(4));
EXPECT_FALSE(request_key_frame);
EXPECT_EQ(kIncomplete, InsertPacketAndPop(4));
// Insert 3 media packets.
EXPECT_EQ(kFirstPacket, InsertPacketAndPop(0));
EXPECT_FALSE(request_key_frame);
EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
EXPECT_FALSE(request_key_frame);
EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
EXPECT_EQ(kCompleteSession, InsertPacketAndPop(0));
EXPECT_FALSE(request_key_frame);
// Insert empty packet.
EXPECT_EQ(kCompleteSession, InsertPacketAndPop(0));
EXPECT_FALSE(request_key_frame);
}
@ -1482,6 +1486,22 @@ TEST_F(TestRunningJitterBuffer, KeyDeltaKeyDelta) {
}
}
TEST_F(TestRunningJitterBuffer, TwoPacketsNonContinuous) {
InsertFrame(kVideoFrameKey);
EXPECT_TRUE(DecodeCompleteFrame());
stream_generator_->GenerateFrame(kVideoFrameDelta, 1, 0,
clock_->TimeInMilliseconds());
clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
stream_generator_->GenerateFrame(kVideoFrameDelta, 2, 0,
clock_->TimeInMilliseconds());
EXPECT_EQ(kFirstPacket, InsertPacketAndPop(1));
EXPECT_EQ(kCompleteSession, InsertPacketAndPop(1));
EXPECT_FALSE(DecodeCompleteFrame());
EXPECT_EQ(kCompleteSession, InsertPacketAndPop(0));
EXPECT_TRUE(DecodeCompleteFrame());
EXPECT_TRUE(DecodeCompleteFrame());
}
TEST_F(TestJitterBufferNack, EmptyPackets) {
// Make sure empty packets doesn't clog the jitter buffer.
jitter_buffer_->SetNackMode(kNack, media_optimization::kLowRttNackMs, -1);
@ -1499,7 +1519,7 @@ TEST_F(TestJitterBufferNack, NackTooOldPackets) {
// old packet.
DropFrame(1);
// Insert a frame which should trigger a recycle until the next key frame.
EXPECT_EQ(kFlushIndicator, InsertFrames(oldest_packet_to_nack_,
EXPECT_EQ(kFlushIndicator, InsertFrames(oldest_packet_to_nack_ + 1,
kVideoFrameDelta));
EXPECT_FALSE(DecodeCompleteFrame());
@ -1555,7 +1575,7 @@ TEST_F(TestJitterBufferNack, NackListFull) {
EXPECT_TRUE(DecodeCompleteFrame());
// Generate and drop |kNackHistoryLength| packets to fill the NACK list.
DropFrame(max_nack_list_size_);
DropFrame(max_nack_list_size_ + 1);
// Insert a frame which should trigger a recycle until the next key frame.
EXPECT_EQ(kFlushIndicator, InsertFrame(kVideoFrameDelta));
EXPECT_FALSE(DecodeCompleteFrame());
@ -1612,6 +1632,33 @@ TEST_F(TestJitterBufferNack, NackListBuiltBeforeFirstDecode) {
EXPECT_TRUE(list != NULL);
}
TEST_F(TestJitterBufferNack, VerifyRetransmittedFlag) {
stream_generator_->Init(0, 0, clock_->TimeInMilliseconds());
stream_generator_->GenerateFrame(kVideoFrameKey, 3, 0,
clock_->TimeInMilliseconds());
VCMPacket packet;
stream_generator_->PopPacket(&packet, 0);
bool retransmitted = false;
EXPECT_EQ(kFirstPacket, jitter_buffer_->InsertPacket(packet, &retransmitted));
EXPECT_FALSE(retransmitted);
// Drop second packet.
stream_generator_->PopPacket(&packet, 1);
EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(packet, &retransmitted));
EXPECT_FALSE(retransmitted);
EXPECT_FALSE(DecodeCompleteFrame());
uint16_t nack_list_size = 0;
bool extended = false;
uint16_t* list = jitter_buffer_->GetNackList(&nack_list_size, &extended);
EXPECT_EQ(1, nack_list_size);
ASSERT_TRUE(list != NULL);
stream_generator_->PopPacket(&packet, 0);
EXPECT_EQ(packet.seqNum, list[0]);
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(packet,
&retransmitted));
EXPECT_TRUE(retransmitted);
EXPECT_TRUE(DecodeCompleteFrame());
}
TEST_F(TestJitterBufferNack, UseNackToRecoverFirstKeyFrame) {
stream_generator_->Init(0, 0, clock_->TimeInMilliseconds());
stream_generator_->GenerateFrame(kVideoFrameKey, 3, 0,

View File

@ -44,10 +44,6 @@ void StreamGenerator::GenerateFrame(FrameType type,
int num_empty_packets,
int64_t current_time) {
timestamp_ = 90 * (current_time - start_time_);
// Move the sequence number counter if all packets from the previous frame
// wasn't collected.
sequence_number_ += packets_.size();
packets_.clear();
for (int i = 0; i < num_media_packets; ++i) {
const int packet_size = (kFrameSize + num_media_packets / 2) /
num_media_packets;
@ -123,6 +119,10 @@ bool StreamGenerator::NextPacket(VCMPacket* packet) {
return true;
}
void StreamGenerator::DropLastPacket() {
packets_.pop_back();
}
uint16_t StreamGenerator::NextSequenceNumber() const {
if (packets_.empty())
return sequence_number_;

View File

@ -46,6 +46,7 @@ class StreamGenerator {
FrameType type);
bool PopPacket(VCMPacket* packet, int index);
void DropLastPacket();
bool GetPacket(VCMPacket* packet, int index);