Reland "Avoid critsect for protection- and qm setting callbacks in

VideoSender."

The original Cl is uploaded as patch set 1, the fix in ps#2 and I'll rebase in ps#3.

BUG=4534
R=pbos@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/46769004

Cr-Commit-Position: refs/heads/master@{#9000}
This commit is contained in:
mflodman 2015-04-14 21:28:08 +02:00
parent 73ba7a690f
commit fcf54bdabb
18 changed files with 383 additions and 458 deletions

View File

@ -88,7 +88,6 @@ ProducerFec::ProducerFec(ForwardErrorCorrection* fec)
media_packets_fec_(),
fec_packets_(),
num_frames_(0),
incomplete_frame_(false),
num_first_partition_(0),
minimum_media_packets_fec_(1),
params_(),
@ -125,9 +124,8 @@ RedPacket* ProducerFec::BuildRedPacket(const uint8_t* data_buffer,
size_t payload_length,
size_t rtp_header_length,
int red_pl_type) {
RedPacket* red_packet = new RedPacket(payload_length +
kREDForFECHeaderLength +
rtp_header_length);
RedPacket* red_packet = new RedPacket(
payload_length + kREDForFECHeaderLength + rtp_header_length);
int pl_type = data_buffer[1] & 0x7f;
red_packet->CreateHeader(data_buffer, rtp_header_length,
red_pl_type, pl_type);
@ -142,7 +140,7 @@ int ProducerFec::AddRtpPacketAndGenerateFec(const uint8_t* data_buffer,
if (media_packets_fec_.empty()) {
params_ = new_params_;
}
incomplete_frame_ = true;
bool complete_frame = false;
const bool marker_bit = (data_buffer[1] & kRtpMarkerBitMask) ? true : false;
if (media_packets_fec_.size() < ForwardErrorCorrection::kMaxMediaPackets) {
// Generic FEC can only protect up to kMaxMediaPackets packets.
@ -153,13 +151,13 @@ int ProducerFec::AddRtpPacketAndGenerateFec(const uint8_t* data_buffer,
}
if (marker_bit) {
++num_frames_;
incomplete_frame_ = false;
complete_frame = true;
}
// Produce FEC over at most |params_.max_fec_frames| frames, or as soon as:
// (1) the excess overhead (actual overhead - requested/target overhead) is
// less than |kMaxExcessOverhead|, and
// (2) at least |minimum_media_packets_fec_| media packets is reached.
if (!incomplete_frame_ &&
if (complete_frame &&
(num_frames_ == params_.max_fec_frames ||
(ExcessOverheadBelowMax() && MinimumMediaPacketsReached()))) {
assert(num_first_partition_ <=
@ -206,37 +204,43 @@ bool ProducerFec::MinimumMediaPacketsReached() {
}
bool ProducerFec::FecAvailable() const {
return (fec_packets_.size() > 0);
return !fec_packets_.empty();
}
RedPacket* ProducerFec::GetFecPacket(int red_pl_type,
int fec_pl_type,
uint16_t seq_num,
size_t rtp_header_length) {
if (fec_packets_.empty())
return NULL;
// Build FEC packet. The FEC packets in |fec_packets_| doesn't
// have RTP headers, so we're reusing the header from the last
// media packet.
ForwardErrorCorrection::Packet* packet_to_send = fec_packets_.front();
ForwardErrorCorrection::Packet* last_media_packet = media_packets_fec_.back();
RedPacket* return_packet = new RedPacket(packet_to_send->length +
kREDForFECHeaderLength +
rtp_header_length);
return_packet->CreateHeader(last_media_packet->data,
rtp_header_length,
red_pl_type,
fec_pl_type);
return_packet->SetSeqNum(seq_num);
return_packet->ClearMarkerBit();
return_packet->AssignPayload(packet_to_send->data, packet_to_send->length);
fec_packets_.pop_front();
if (fec_packets_.empty()) {
// Done with all the FEC packets. Reset for next run.
DeletePackets();
num_frames_ = 0;
size_t ProducerFec::NumAvailableFecPackets() const {
return fec_packets_.size();
}
std::vector<RedPacket*> ProducerFec::GetFecPackets(int red_pl_type,
int fec_pl_type,
uint16_t first_seq_num,
size_t rtp_header_length) {
std::vector<RedPacket*> fec_packets;
fec_packets.reserve(fec_packets_.size());
uint16_t sequence_number = first_seq_num;
while (!fec_packets_.empty()) {
// Build FEC packet. The FEC packets in |fec_packets_| doesn't
// have RTP headers, so we're reusing the header from the last
// media packet.
ForwardErrorCorrection::Packet* packet_to_send = fec_packets_.front();
ForwardErrorCorrection::Packet* last_media_packet =
media_packets_fec_.back();
RedPacket* red_packet = new RedPacket(
packet_to_send->length + kREDForFECHeaderLength + rtp_header_length);
red_packet->CreateHeader(last_media_packet->data, rtp_header_length,
red_pl_type, fec_pl_type);
red_packet->SetSeqNum(sequence_number++);
red_packet->ClearMarkerBit();
red_packet->AssignPayload(packet_to_send->data, packet_to_send->length);
fec_packets.push_back(red_packet);
fec_packets_.pop_front();
}
return return_packet;
DeletePackets();
num_frames_ = 0;
return fec_packets;
}
int ProducerFec::Overhead() const {

View File

@ -12,6 +12,7 @@
#define WEBRTC_MODULES_RTP_RTCP_SOURCE_PRODUCER_FEC_H_
#include <list>
#include <vector>
#include "webrtc/modules/rtp_rtcp/source/forward_error_correction.h"
@ -45,6 +46,7 @@ class ProducerFec {
void SetFecParameters(const FecProtectionParams* params,
int max_fec_frames);
// The caller is expected to delete the memory when done.
RedPacket* BuildRedPacket(const uint8_t* data_buffer,
size_t payload_length,
size_t rtp_header_length,
@ -59,11 +61,14 @@ class ProducerFec {
bool MinimumMediaPacketsReached();
bool FecAvailable() const;
size_t NumAvailableFecPackets() const;
RedPacket* GetFecPacket(int red_pl_type,
int fec_pl_type,
uint16_t seq_num,
size_t rtp_header_length);
// GetFecPackets allocates memory and creates FEC packets, but the caller is
// assumed to delete the memory when done with the packets.
std::vector<RedPacket*> GetFecPackets(int red_pl_type,
int fec_pl_type,
uint16_t first_seq_num,
size_t rtp_header_length);
private:
void DeletePackets();
@ -72,7 +77,6 @@ class ProducerFec {
std::list<ForwardErrorCorrection::Packet*> media_packets_fec_;
std::list<ForwardErrorCorrection::Packet*> fec_packets_;
int num_frames_;
bool incomplete_frame_;
int num_first_partition_;
int minimum_media_packets_fec_;
FecProtectionParams params_;

View File

@ -77,19 +77,19 @@ TEST_F(ProducerFecTest, OneFrameFec) {
}
EXPECT_TRUE(producer_->FecAvailable());
uint16_t seq_num = generator_->NextSeqNum();
RedPacket* packet = producer_->GetFecPacket(kRedPayloadType,
kFecPayloadType,
seq_num,
kRtpHeaderSize);
std::vector<RedPacket*> packets = producer_->GetFecPackets(kRedPayloadType,
kFecPayloadType,
seq_num,
kRtpHeaderSize);
EXPECT_FALSE(producer_->FecAvailable());
ASSERT_TRUE(packet != NULL);
ASSERT_EQ(1u, packets.size());
VerifyHeader(seq_num, last_timestamp,
kRedPayloadType, kFecPayloadType, packet, false);
kRedPayloadType, kFecPayloadType, packets.front(), false);
while (!rtp_packets.empty()) {
delete rtp_packets.front();
rtp_packets.pop_front();
}
delete packet;
delete packets.front();
}
TEST_F(ProducerFecTest, TwoFrameFec) {
@ -120,39 +120,36 @@ TEST_F(ProducerFecTest, TwoFrameFec) {
}
EXPECT_TRUE(producer_->FecAvailable());
uint16_t seq_num = generator_->NextSeqNum();
RedPacket* packet = producer_->GetFecPacket(kRedPayloadType,
kFecPayloadType,
seq_num,
kRtpHeaderSize);
std::vector<RedPacket*> packets = producer_->GetFecPackets(kRedPayloadType,
kFecPayloadType,
seq_num,
kRtpHeaderSize);
EXPECT_FALSE(producer_->FecAvailable());
EXPECT_TRUE(packet != NULL);
VerifyHeader(seq_num, last_timestamp,
kRedPayloadType, kFecPayloadType, packet, false);
ASSERT_EQ(1u, packets.size());
VerifyHeader(seq_num, last_timestamp, kRedPayloadType, kFecPayloadType,
packets.front(), false);
while (!rtp_packets.empty()) {
delete rtp_packets.front();
rtp_packets.pop_front();
}
delete packet;
delete packets.front();
}
TEST_F(ProducerFecTest, BuildRedPacket) {
generator_->NewFrame(1);
RtpPacket* packet = generator_->NextPacket(0, 10);
RedPacket* red_packet = producer_->BuildRedPacket(packet->data,
packet->length -
kRtpHeaderSize,
kRtpHeaderSize,
kRedPayloadType);
rtc::scoped_ptr<RedPacket> red_packet(producer_->BuildRedPacket(
packet->data, packet->length - kRtpHeaderSize, kRtpHeaderSize,
kRedPayloadType));
EXPECT_EQ(packet->length + 1, red_packet->length());
VerifyHeader(packet->header.header.sequenceNumber,
packet->header.header.timestamp,
kRedPayloadType,
packet->header.header.payloadType,
red_packet,
red_packet.get(),
true); // Marker bit set.
for (int i = 0; i < 10; ++i)
EXPECT_EQ(i, red_packet->data()[kRtpHeaderSize + 1 + i]);
delete red_packet;
delete packet;
}

View File

@ -407,7 +407,7 @@ int32_t ModuleRtpRtcpImpl::SendOutgoingData(
}
return rtp_sender_.SendOutgoingData(
frame_type, payload_type, time_stamp, capture_time_ms, payload_data,
payload_size, fragmentation, NULL, rtp_video_hdr);
payload_size, fragmentation, rtp_video_hdr);
}
bool ModuleRtpRtcpImpl::TimeToSendPacket(uint32_t ssrc,

View File

@ -323,14 +323,14 @@ int32_t RTPSender::RegisterPayload(
}
return -1;
}
int32_t ret_val = -1;
int32_t ret_val = 0;
RtpUtility::Payload* payload = NULL;
if (audio_configured_) {
// TODO(mflodman): Change to CreateAudioPayload and make static.
ret_val = audio_->RegisterAudioPayload(payload_name, payload_number,
frequency, channels, rate, payload);
} else {
ret_val = video_->RegisterVideoPayload(payload_name, payload_number, rate,
payload);
payload = video_->CreateVideoPayload(payload_name, payload_number, rate);
}
if (payload) {
payload_type_map_[payload_number] = payload;
@ -489,7 +489,6 @@ int32_t RTPSender::SendOutgoingData(FrameType frame_type,
const uint8_t* payload_data,
size_t payload_size,
const RTPFragmentationHeader* fragmentation,
VideoCodecInformation* codec_info,
const RTPVideoHeader* rtp_hdr) {
uint32_t ssrc;
{
@ -526,7 +525,7 @@ int32_t RTPSender::SendOutgoingData(FrameType frame_type,
ret_val =
video_->SendVideo(video_type, frame_type, payload_type,
capture_timestamp, capture_time_ms, payload_data,
payload_size, fragmentation, codec_info, rtp_hdr);
payload_size, fragmentation, rtp_hdr);
}
CriticalSectionScoped cs(statistics_crit_.get());
@ -745,7 +744,8 @@ int RTPSender::SelectiveRetransmissions() const {
int RTPSender::SetSelectiveRetransmissions(uint8_t settings) {
if (!video_)
return -1;
return video_->SetSelectiveRetransmissions(settings);
video_->SetSelectiveRetransmissions(settings);
return 0;
}
void RTPSender::OnReceivedNACK(const std::list<uint16_t>& nack_sequence_numbers,
@ -1086,9 +1086,11 @@ size_t RTPSender::RTPHeaderLength() const {
return rtp_header_length;
}
uint16_t RTPSender::IncrementSequenceNumber() {
uint16_t RTPSender::AllocateSequenceNumber(uint16_t packets_to_send) {
CriticalSectionScoped cs(send_critsect_.get());
return sequence_number_++;
uint16_t first_allocated_sequence_number = sequence_number_;
sequence_number_ += packets_to_send;
return first_allocated_sequence_number;
}
void RTPSender::ResetDataCounters() {
@ -1729,14 +1731,6 @@ int32_t RTPSender::RED(int8_t *payload_type) const {
return audio_->RED(*payload_type);
}
// Video
VideoCodecInformation *RTPSender::CodecInformationVideo() {
if (audio_configured_) {
return NULL;
}
return video_->CodecInformationVideo();
}
RtpVideoCodecTypes RTPSender::VideoCodecType() const {
assert(!audio_configured_ && "Sender is an audio stream!");
return video_->VideoCodecType();
@ -1762,8 +1756,8 @@ int32_t RTPSender::SetGenericFECStatus(bool enable,
if (audio_configured_) {
return -1;
}
return video_->SetGenericFECStatus(enable, payload_type_red,
payload_type_fec);
video_->SetGenericFECStatus(enable, payload_type_red, payload_type_fec);
return 0;
}
int32_t RTPSender::GenericFECStatus(bool* enable,
@ -1772,8 +1766,8 @@ int32_t RTPSender::GenericFECStatus(bool* enable,
if (audio_configured_) {
return -1;
}
return video_->GenericFECStatus(
*enable, *payload_type_red, *payload_type_fec);
video_->GenericFECStatus(*enable, *payload_type_red, *payload_type_fec);
return 0;
}
int32_t RTPSender::SetFecParameters(
@ -1782,7 +1776,8 @@ int32_t RTPSender::SetFecParameters(
if (audio_configured_) {
return -1;
}
return video_->SetFecParameters(delta_params, key_params);
video_->SetFecParameters(delta_params, key_params);
return 0;
}
void RTPSender::BuildRtxPacket(uint8_t* buffer, size_t* length,

View File

@ -24,8 +24,8 @@
#include "webrtc/modules/rtp_rtcp/source/rtp_header_extension.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_packet_history.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_rtcp_config.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
#include "webrtc/modules/rtp_rtcp/source/ssrc_database.h"
#include "webrtc/modules/rtp_rtcp/source/video_codec_information.h"
#define MAX_INIT_RTP_SEQ_NUMBER 32767 // 2^15 -1.
@ -61,7 +61,10 @@ class RTPSenderInterface {
bool inc_sequence_number = true) = 0;
virtual size_t RTPHeaderLength() const = 0;
virtual uint16_t IncrementSequenceNumber() = 0;
// Returns the next sequence number to use for a packet and allocates
// 'packets_to_send' number of sequence numbers. It's important all allocated
// sequence numbers are used in sequence to avoid perceived packet loss.
virtual uint16_t AllocateSequenceNumber(uint16_t packets_to_send) = 0;
virtual uint16_t SequenceNumber() const = 0;
virtual size_t MaxPayloadLength() const = 0;
virtual size_t MaxDataPayloadLength() const = 0;
@ -155,7 +158,6 @@ class RTPSender : public RTPSenderInterface {
const uint8_t* payload_data,
size_t payload_size,
const RTPFragmentationHeader* fragmentation,
VideoCodecInformation* codec_info = NULL,
const RTPVideoHeader* rtp_hdr = NULL);
// RTP header extension
@ -227,7 +229,7 @@ class RTPSender : public RTPSenderInterface {
const bool inc_sequence_number = true) override;
size_t RTPHeaderLength() const override;
uint16_t IncrementSequenceNumber() override;
uint16_t AllocateSequenceNumber(uint16_t packets_to_send) override;
size_t MaxPayloadLength() const override;
uint16_t PacketOverHead() const override;
@ -261,9 +263,6 @@ class RTPSender : public RTPSenderInterface {
// Get payload type for Redundant Audio Data RFC 2198.
int32_t RED(int8_t *payload_type) const;
// Video.
VideoCodecInformation *CodecInformationVideo();
RtpVideoCodecTypes VideoCodecType() const;
uint32_t MaxConfiguredBitrateVideo() const;

View File

@ -1353,7 +1353,7 @@ TEST_F(RtpSenderVideoTest, SendVideoWithCVO) {
rtp_sender_video_->SendVideo(kRtpVideoGeneric, kVideoFrameKey, kPayload,
kTimestamp, 0, packet_, sizeof(packet_), NULL,
NULL, &hdr);
&hdr);
RtpHeaderExtensionMap map;
map.Register(kRtpExtensionVideoRotation, kVideoRotationExtensionId);

View File

@ -10,10 +10,12 @@
#include "webrtc/modules/rtp_rtcp/source/rtp_sender_video.h"
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <vector>
#include "webrtc/base/checks.h"
#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
#include "webrtc/modules/rtp_rtcp/source/producer_fec.h"
@ -33,8 +35,8 @@ struct RtpPacket {
RTPSenderVideo::RTPSenderVideo(Clock* clock, RTPSenderInterface* rtpSender)
: _rtpSender(*rtpSender),
crit_(CriticalSectionWrapper::CreateCriticalSection()),
_videoType(kRtpVideoGeneric),
_videoCodecInformation(NULL),
_maxBitrate(0),
_retransmissionSettings(kRetransmitBaseLayer),
@ -43,7 +45,6 @@ RTPSenderVideo::RTPSenderVideo(Clock* clock, RTPSenderInterface* rtpSender)
_fecEnabled(false),
_payloadTypeRED(-1),
_payloadTypeFEC(-1),
_numberFirstPartition(0),
delta_fec_params_(),
key_fec_params_(),
producer_fec_(&_fec),
@ -57,9 +58,6 @@ RTPSenderVideo::RTPSenderVideo(Clock* clock, RTPSenderInterface* rtpSender)
}
RTPSenderVideo::~RTPSenderVideo() {
if (_videoCodecInformation) {
delete _videoCodecInformation;
}
}
void RTPSenderVideo::SetVideoCodecType(RtpVideoCodecTypes videoType) {
@ -70,11 +68,11 @@ RtpVideoCodecTypes RTPSenderVideo::VideoCodecType() const {
return _videoType;
}
int32_t RTPSenderVideo::RegisterVideoPayload(
// Static.
RtpUtility::Payload* RTPSenderVideo::CreateVideoPayload(
const char payloadName[RTP_PAYLOAD_NAME_SIZE],
const int8_t payloadType,
const uint32_t maxBitRate,
RtpUtility::Payload*& payload) {
const uint32_t maxBitRate) {
RtpVideoCodecTypes videoType = kRtpVideoGeneric;
if (RtpUtility::StringCompare(payloadName, "VP8", 3)) {
videoType = kRtpVideoVp8;
@ -85,103 +83,94 @@ int32_t RTPSenderVideo::RegisterVideoPayload(
} else {
videoType = kRtpVideoGeneric;
}
payload = new RtpUtility::Payload;
RtpUtility::Payload* payload = new RtpUtility::Payload();
payload->name[RTP_PAYLOAD_NAME_SIZE - 1] = 0;
strncpy(payload->name, payloadName, RTP_PAYLOAD_NAME_SIZE - 1);
payload->typeSpecific.Video.videoCodecType = videoType;
payload->typeSpecific.Video.maxRate = maxBitRate;
payload->audio = false;
return 0;
return payload;
}
int32_t RTPSenderVideo::SendVideoPacket(uint8_t* data_buffer,
const size_t payload_length,
const size_t rtp_header_length,
const uint32_t capture_timestamp,
int64_t capture_time_ms,
StorageType storage,
bool protect) {
if (_fecEnabled) {
int ret = 0;
size_t fec_overhead_sent = 0;
size_t video_sent = 0;
void RTPSenderVideo::SendVideoPacket(uint8_t* data_buffer,
const size_t payload_length,
const size_t rtp_header_length,
uint16_t seq_num,
const uint32_t capture_timestamp,
int64_t capture_time_ms,
StorageType storage) {
if (_rtpSender.SendToNetwork(data_buffer, payload_length, rtp_header_length,
capture_time_ms, storage,
PacedSender::kNormalPriority) == 0) {
_videoBitrate.Update(payload_length + rtp_header_length);
TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"),
"Video::PacketNormal", "timestamp", capture_timestamp,
"seqnum", seq_num);
} else {
LOG(LS_WARNING) << "Failed to send video packet " << seq_num;
}
}
RedPacket* red_packet = producer_fec_.BuildRedPacket(
data_buffer, payload_length, rtp_header_length, _payloadTypeRED);
void RTPSenderVideo::SendVideoPacketAsRed(uint8_t* data_buffer,
const size_t payload_length,
const size_t rtp_header_length,
uint16_t media_seq_num,
const uint32_t capture_timestamp,
int64_t capture_time_ms,
StorageType media_packet_storage,
bool protect) {
rtc::scoped_ptr<RedPacket> red_packet;
std::vector<RedPacket*> fec_packets;
StorageType fec_storage = kDontRetransmit;
uint16_t next_fec_sequence_number = 0;
{
// Only protect while creating RED and FEC packets, not when sending.
CriticalSectionScoped cs(crit_.get());
red_packet.reset(producer_fec_.BuildRedPacket(
data_buffer, payload_length, rtp_header_length, _payloadTypeRED));
if (protect) {
producer_fec_.AddRtpPacketAndGenerateFec(data_buffer, payload_length,
rtp_header_length);
}
uint16_t num_fec_packets = producer_fec_.NumAvailableFecPackets();
if (num_fec_packets > 0) {
next_fec_sequence_number =
_rtpSender.AllocateSequenceNumber(num_fec_packets);
fec_packets = producer_fec_.GetFecPackets(
_payloadTypeRED, _payloadTypeFEC, next_fec_sequence_number,
rtp_header_length);
DCHECK_EQ(num_fec_packets, fec_packets.size());
if (_retransmissionSettings & kRetransmitFECPackets)
fec_storage = kAllowRetransmission;
}
}
if (_rtpSender.SendToNetwork(
red_packet->data(), red_packet->length() - rtp_header_length,
rtp_header_length, capture_time_ms, media_packet_storage,
PacedSender::kNormalPriority) == 0) {
_videoBitrate.Update(red_packet->length());
TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"),
"Video::PacketRed", "timestamp", capture_timestamp,
"seqnum", _rtpSender.SequenceNumber());
// Sending the media packet with RED header.
int packet_success =
_rtpSender.SendToNetwork(red_packet->data(),
red_packet->length() - rtp_header_length,
rtp_header_length,
capture_time_ms,
storage,
PacedSender::kNormalPriority);
ret |= packet_success;
if (packet_success == 0) {
video_sent += red_packet->length();
}
delete red_packet;
red_packet = NULL;
if (protect) {
ret = producer_fec_.AddRtpPacketAndGenerateFec(
data_buffer, payload_length, rtp_header_length);
if (ret != 0)
return ret;
}
while (producer_fec_.FecAvailable()) {
red_packet =
producer_fec_.GetFecPacket(_payloadTypeRED,
_payloadTypeFEC,
_rtpSender.IncrementSequenceNumber(),
rtp_header_length);
StorageType storage = kDontRetransmit;
if (_retransmissionSettings & kRetransmitFECPackets) {
storage = kAllowRetransmission;
}
"seqnum", media_seq_num);
} else {
LOG(LS_WARNING) << "Failed to send RED packet " << media_seq_num;
}
for (RedPacket* fec_packet : fec_packets) {
if (_rtpSender.SendToNetwork(
fec_packet->data(), fec_packet->length() - rtp_header_length,
rtp_header_length, capture_time_ms, fec_storage,
PacedSender::kNormalPriority) == 0) {
_fecOverheadRate.Update(fec_packet->length());
TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"),
"Video::PacketFec", "timestamp", capture_timestamp,
"seqnum", _rtpSender.SequenceNumber());
// Sending FEC packet with RED header.
int packet_success =
_rtpSender.SendToNetwork(red_packet->data(),
red_packet->length() - rtp_header_length,
rtp_header_length,
capture_time_ms,
storage,
PacedSender::kNormalPriority);
ret |= packet_success;
if (packet_success == 0) {
fec_overhead_sent += red_packet->length();
}
delete red_packet;
red_packet = NULL;
"seqnum", next_fec_sequence_number);
} else {
LOG(LS_WARNING) << "Failed to send FEC packet "
<< next_fec_sequence_number;
}
_videoBitrate.Update(video_sent);
_fecOverheadRate.Update(fec_overhead_sent);
return ret;
delete fec_packet;
++next_fec_sequence_number;
}
TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"),
"Video::PacketNormal", "timestamp", capture_timestamp,
"seqnum", _rtpSender.SequenceNumber());
int ret = _rtpSender.SendToNetwork(data_buffer,
payload_length,
rtp_header_length,
capture_time_ms,
storage,
PacedSender::kNormalPriority);
if (ret == 0) {
_videoBitrate.Update(payload_length + rtp_header_length);
}
return ret;
}
int32_t RTPSenderVideo::SendRTPIntraRequest() {
@ -204,9 +193,10 @@ int32_t RTPSenderVideo::SendRTPIntraRequest() {
data, 0, length, -1, kDontStore, PacedSender::kNormalPriority);
}
int32_t RTPSenderVideo::SetGenericFECStatus(const bool enable,
const uint8_t payloadTypeRED,
const uint8_t payloadTypeFEC) {
void RTPSenderVideo::SetGenericFECStatus(const bool enable,
const uint8_t payloadTypeRED,
const uint8_t payloadTypeFEC) {
CriticalSectionScoped cs(crit_.get());
_fecEnabled = enable;
_payloadTypeRED = payloadTypeRED;
_payloadTypeFEC = payloadTypeFEC;
@ -215,19 +205,19 @@ int32_t RTPSenderVideo::SetGenericFECStatus(const bool enable,
delta_fec_params_.max_fec_frames = key_fec_params_.max_fec_frames = 1;
delta_fec_params_.fec_mask_type = key_fec_params_.fec_mask_type =
kFecMaskRandom;
return 0;
}
int32_t RTPSenderVideo::GenericFECStatus(bool& enable,
uint8_t& payloadTypeRED,
uint8_t& payloadTypeFEC) const {
void RTPSenderVideo::GenericFECStatus(bool& enable,
uint8_t& payloadTypeRED,
uint8_t& payloadTypeFEC) const {
CriticalSectionScoped cs(crit_.get());
enable = _fecEnabled;
payloadTypeRED = _payloadTypeRED;
payloadTypeFEC = _payloadTypeFEC;
return 0;
}
size_t RTPSenderVideo::FECPacketOverhead() const {
CriticalSectionScoped cs(crit_.get());
if (_fecEnabled) {
// Overhead is FEC headers plus RED for FEC header plus anything in RTP
// header beyond the 12 bytes base header (CSRC list, extensions...)
@ -240,14 +230,13 @@ size_t RTPSenderVideo::FECPacketOverhead() const {
return 0;
}
int32_t RTPSenderVideo::SetFecParameters(
const FecProtectionParams* delta_params,
const FecProtectionParams* key_params) {
assert(delta_params);
assert(key_params);
void RTPSenderVideo::SetFecParameters(const FecProtectionParams* delta_params,
const FecProtectionParams* key_params) {
CriticalSectionScoped cs(crit_.get());
DCHECK(delta_params);
DCHECK(key_params);
delta_fec_params_ = *delta_params;
key_fec_params_ = *key_params;
return 0;
}
int32_t RTPSenderVideo::SendVideo(const RtpVideoCodecTypes videoType,
@ -258,49 +247,26 @@ int32_t RTPSenderVideo::SendVideo(const RtpVideoCodecTypes videoType,
const uint8_t* payloadData,
const size_t payloadSize,
const RTPFragmentationHeader* fragmentation,
VideoCodecInformation* codecInfo,
const RTPVideoHeader* rtpHdr) {
if (payloadSize == 0) {
return -1;
}
if (frameType == kVideoFrameKey) {
producer_fec_.SetFecParameters(&key_fec_params_, _numberFirstPartition);
} else {
producer_fec_.SetFecParameters(&delta_fec_params_, _numberFirstPartition);
rtc::scoped_ptr<RtpPacketizer> packetizer(
RtpPacketizer::Create(videoType, _rtpSender.MaxDataPayloadLength(),
&(rtpHdr->codecHeader), frameType));
StorageType storage = kDontStore;
bool fec_enabled = false;
{
CriticalSectionScoped cs(crit_.get());
FecProtectionParams* fec_params =
frameType == kVideoFrameKey ? &key_fec_params_ : &delta_fec_params_;
producer_fec_.SetFecParameters(fec_params, 0);
storage = packetizer->GetStorageType(_retransmissionSettings);
fec_enabled = _fecEnabled;
}
// Default setting for number of first partition packets:
// Will be extracted in SendVP8 for VP8 codec; other codecs use 0
_numberFirstPartition = 0;
return Send(videoType, frameType, payloadType, captureTimeStamp,
capture_time_ms, payloadData, payloadSize, fragmentation, rtpHdr)
? 0
: -1;
}
VideoCodecInformation* RTPSenderVideo::CodecInformationVideo() {
return _videoCodecInformation;
}
void RTPSenderVideo::SetMaxConfiguredBitrateVideo(const uint32_t maxBitrate) {
_maxBitrate = maxBitrate;
}
uint32_t RTPSenderVideo::MaxConfiguredBitrateVideo() const {
return _maxBitrate;
}
bool RTPSenderVideo::Send(const RtpVideoCodecTypes videoType,
const FrameType frameType,
const int8_t payloadType,
const uint32_t captureTimeStamp,
int64_t capture_time_ms,
const uint8_t* payloadData,
const size_t payloadSize,
const RTPFragmentationHeader* fragmentation,
const RTPVideoHeader* rtpHdr) {
// Register CVO rtp header extension at the first time when we receive a frame
// with pending rotation.
RTPSenderInterface::CVOMode cvo_mode = RTPSenderInterface::kCVONone;
@ -311,10 +277,6 @@ bool RTPSenderVideo::Send(const RtpVideoCodecTypes videoType,
uint16_t rtp_header_length = _rtpSender.RTPHeaderLength();
size_t payload_bytes_to_send = payloadSize;
const uint8_t* data = payloadData;
size_t max_payload_length = _rtpSender.MaxDataPayloadLength();
rtc::scoped_ptr<RtpPacketizer> packetizer(RtpPacketizer::Create(
videoType, max_payload_length, &(rtpHdr->codecHeader), frameType));
// TODO(changbin): we currently don't support to configure the codec to
// output multiple partitions for VP8. Should remove below check after the
@ -328,16 +290,14 @@ bool RTPSenderVideo::Send(const RtpVideoCodecTypes videoType,
while (!last) {
uint8_t dataBuffer[IP_PACKET_SIZE] = {0};
size_t payload_bytes_in_packet = 0;
if (!packetizer->NextPacket(
&dataBuffer[rtp_header_length], &payload_bytes_in_packet, &last)) {
return false;
if (!packetizer->NextPacket(&dataBuffer[rtp_header_length],
&payload_bytes_in_packet, &last)) {
return -1;
}
// Write RTP header.
// Set marker bit true if this is the last packet in frame.
_rtpSender.BuildRTPheader(
dataBuffer, payloadType, last, captureTimeStamp, capture_time_ms);
// According to
// http://www.etsi.org/deliver/etsi_ts/126100_126199/126114/12.07.00_60/
// ts_126114v120700p.pdf Section 7.4.5:
@ -350,7 +310,7 @@ bool RTPSenderVideo::Send(const RtpVideoCodecTypes videoType,
// value sent.
// Here we are adding it to every packet of every frame at this point.
if (!rtpHdr) {
assert(!_rtpSender.IsRtpHeaderExtensionRegistered(
DCHECK(!_rtpSender.IsRtpHeaderExtensionRegistered(
kRtpExtensionVideoRotation));
} else if (cvo_mode == RTPSenderInterface::kCVOActivated) {
// Checking whether CVO header extension is registered will require taking
@ -365,22 +325,29 @@ bool RTPSenderVideo::Send(const RtpVideoCodecTypes videoType,
_rtpSender.UpdateVideoRotation(dataBuffer, packetSize, rtp_header,
rtpHdr->rotation);
}
if (SendVideoPacket(dataBuffer,
payload_bytes_in_packet,
rtp_header_length,
captureTimeStamp,
capture_time_ms,
packetizer->GetStorageType(_retransmissionSettings),
packetizer->GetProtectionType() == kProtectedPacket)) {
LOG(LS_WARNING) << packetizer->ToString()
<< " failed to send packet number "
<< _rtpSender.SequenceNumber();
if (fec_enabled) {
SendVideoPacketAsRed(dataBuffer, payload_bytes_in_packet,
rtp_header_length, _rtpSender.SequenceNumber(),
captureTimeStamp, capture_time_ms, storage,
packetizer->GetProtectionType() == kProtectedPacket);
} else {
SendVideoPacket(dataBuffer, payload_bytes_in_packet, rtp_header_length,
_rtpSender.SequenceNumber(), captureTimeStamp,
capture_time_ms, storage);
}
}
TRACE_EVENT_ASYNC_END1(
"webrtc", "Video", capture_time_ms, "timestamp", _rtpSender.Timestamp());
return true;
return 0;
}
void RTPSenderVideo::SetMaxConfiguredBitrateVideo(const uint32_t maxBitrate) {
_maxBitrate = maxBitrate;
}
uint32_t RTPSenderVideo::MaxConfiguredBitrateVideo() const {
return _maxBitrate;
}
void RTPSenderVideo::ProcessBitrate() {
@ -397,12 +364,13 @@ uint32_t RTPSenderVideo::FecOverheadRate() const {
}
int RTPSenderVideo::SelectiveRetransmissions() const {
CriticalSectionScoped cs(crit_.get());
return _retransmissionSettings;
}
int RTPSenderVideo::SetSelectiveRetransmissions(uint8_t settings) {
void RTPSenderVideo::SetSelectiveRetransmissions(uint8_t settings) {
CriticalSectionScoped cs(crit_.get());
_retransmissionSettings = settings;
return 0;
}
} // namespace webrtc

View File

@ -13,6 +13,8 @@
#include <list>
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/thread_annotations.h"
#include "webrtc/common_types.h"
#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/bitrate.h"
@ -37,10 +39,10 @@ class RTPSenderVideo {
size_t FECPacketOverhead() const;
int32_t RegisterVideoPayload(const char payloadName[RTP_PAYLOAD_NAME_SIZE],
const int8_t payloadType,
const uint32_t maxBitRate,
RtpUtility::Payload*& payload);
static RtpUtility::Payload* CreateVideoPayload(
const char payloadName[RTP_PAYLOAD_NAME_SIZE],
const int8_t payloadType,
const uint32_t maxBitRate);
int32_t SendVideo(const RtpVideoCodecTypes videoType,
const FrameType frameType,
@ -50,30 +52,27 @@ class RTPSenderVideo {
const uint8_t* payloadData,
const size_t payloadSize,
const RTPFragmentationHeader* fragmentation,
VideoCodecInformation* codecInfo,
const RTPVideoHeader* rtpHdr);
int32_t SendRTPIntraRequest();
void SetVideoCodecType(RtpVideoCodecTypes type);
VideoCodecInformation* CodecInformationVideo();
void SetMaxConfiguredBitrateVideo(const uint32_t maxBitrate);
uint32_t MaxConfiguredBitrateVideo() const;
// FEC
int32_t SetGenericFECStatus(const bool enable,
const uint8_t payloadTypeRED,
const uint8_t payloadTypeFEC);
void SetGenericFECStatus(const bool enable,
const uint8_t payloadTypeRED,
const uint8_t payloadTypeFEC);
int32_t GenericFECStatus(bool& enable,
uint8_t& payloadTypeRED,
uint8_t& payloadTypeFEC) const;
void GenericFECStatus(bool& enable,
uint8_t& payloadTypeRED,
uint8_t& payloadTypeFEC) const;
int32_t SetFecParameters(const FecProtectionParams* delta_params,
const FecProtectionParams* key_params);
void SetFecParameters(const FecProtectionParams* delta_params,
const FecProtectionParams* key_params);
void ProcessBitrate();
@ -81,45 +80,43 @@ class RTPSenderVideo {
uint32_t FecOverheadRate() const;
int SelectiveRetransmissions() const;
int SetSelectiveRetransmissions(uint8_t settings);
void SetSelectiveRetransmissions(uint8_t settings);
protected:
virtual int32_t SendVideoPacket(uint8_t* dataBuffer,
const size_t payloadLength,
const size_t rtpHeaderLength,
const uint32_t capture_timestamp,
int64_t capture_time_ms,
StorageType storage,
bool protect);
private:
void SendVideoPacket(uint8_t* dataBuffer,
const size_t payloadLength,
const size_t rtpHeaderLength,
uint16_t seq_num,
const uint32_t capture_timestamp,
int64_t capture_time_ms,
StorageType storage);
private:
bool Send(const RtpVideoCodecTypes videoType,
const FrameType frameType,
const int8_t payloadType,
const uint32_t captureTimeStamp,
int64_t capture_time_ms,
const uint8_t* payloadData,
const size_t payloadSize,
const RTPFragmentationHeader* fragmentation,
const RTPVideoHeader* rtpHdr);
void SendVideoPacketAsRed(uint8_t* dataBuffer,
const size_t payloadLength,
const size_t rtpHeaderLength,
uint16_t video_seq_num,
const uint32_t capture_timestamp,
int64_t capture_time_ms,
StorageType media_packet_storage,
bool protect);
private:
RTPSenderInterface& _rtpSender;
// Should never be held when calling out of this class.
const rtc::scoped_ptr<CriticalSectionWrapper> crit_;
RtpVideoCodecTypes _videoType;
VideoCodecInformation* _videoCodecInformation;
uint32_t _maxBitrate;
int32_t _retransmissionSettings;
int32_t _retransmissionSettings GUARDED_BY(crit_);
// FEC
ForwardErrorCorrection _fec;
bool _fecEnabled;
int8_t _payloadTypeRED;
int8_t _payloadTypeFEC;
unsigned int _numberFirstPartition;
FecProtectionParams delta_fec_params_;
FecProtectionParams key_fec_params_;
ProducerFec producer_fec_;
bool _fecEnabled GUARDED_BY(crit_);
int8_t _payloadTypeRED GUARDED_BY(crit_);
int8_t _payloadTypeFEC GUARDED_BY(crit_);
FecProtectionParams delta_fec_params_ GUARDED_BY(crit_);
FecProtectionParams key_fec_params_ GUARDED_BY(crit_);
ProducerFec producer_fec_ GUARDED_BY(crit_);
// Bitrate used for FEC payload, RED headers, RTP headers for FEC packets
// and any padding overhead.

View File

@ -82,7 +82,9 @@ public:
};
static VideoCodingModule* Create(
VideoEncoderRateObserver* encoder_rate_observer);
Clock* clock,
VideoEncoderRateObserver* encoder_rate_observer,
VCMQMSettingsCallback* qm_settings_callback);
static VideoCodingModule* Create(Clock* clock, EventFactory* event_factory);
@ -267,16 +269,6 @@ public:
virtual int32_t RegisterSendStatisticsCallback(
VCMSendStatisticsCallback* sendStats) = 0;
// Register a video quality settings callback which will be called when
// frame rate/dimensions need to be updated for video quality optimization
//
// Input:
// - videoQMSettings : The callback object to register.
//
// Return value : VCM_OK, on success.
// < 0, on error
virtual int32_t RegisterVideoQMCallback(VCMQMSettingsCallback* videoQMSettings) = 0;
// Register a video protection callback which will be called to deliver
// the requested FEC rate and NACK status (on/off).
//

View File

@ -245,7 +245,7 @@ uint32_t MediaOptimization::SetTargetRates(
// Update protection settings, when applicable.
float sent_video_rate_kbps = 0.0f;
if (selected_method) {
if (loss_prot_logic_->SelectedType() != kNone) {
// Update protection method with content metrics.
selected_method->UpdateContentMetrics(content_->ShortTermAvgData());

View File

@ -74,11 +74,13 @@ class VideoCodingModuleImpl : public VideoCodingModule {
VideoCodingModuleImpl(Clock* clock,
EventFactory* event_factory,
bool owns_event_factory,
VideoEncoderRateObserver* encoder_rate_observer)
VideoEncoderRateObserver* encoder_rate_observer,
VCMQMSettingsCallback* qm_settings_callback)
: VideoCodingModule(),
sender_(new vcm::VideoSender(clock,
&post_encode_callback_,
encoder_rate_observer)),
encoder_rate_observer,
qm_settings_callback)),
receiver_(new vcm::VideoReceiver(clock, event_factory)),
own_event_factory_(owns_event_factory ? event_factory : NULL) {}
@ -161,11 +163,6 @@ class VideoCodingModuleImpl : public VideoCodingModule {
return sender_->RegisterSendStatisticsCallback(sendStats);
}
int32_t RegisterVideoQMCallback(
VCMQMSettingsCallback* videoQMSettings) override {
return sender_->RegisterVideoQMCallback(videoQMSettings);
}
int32_t RegisterProtectionCallback(
VCMProtectionCallback* protection) override {
return sender_->RegisterProtectionCallback(protection);
@ -359,10 +356,11 @@ int32_t VideoCodingModule::Codec(VideoCodecType codecType, VideoCodec* codec) {
}
VideoCodingModule* VideoCodingModule::Create(
VideoEncoderRateObserver* encoder_rate_observer) {
return new VideoCodingModuleImpl(Clock::GetRealTimeClock(),
new EventFactoryImpl, true,
encoder_rate_observer);
Clock* clock,
VideoEncoderRateObserver* encoder_rate_observer,
VCMQMSettingsCallback* qm_settings_callback) {
return new VideoCodingModuleImpl(clock, new EventFactoryImpl, true,
encoder_rate_observer, qm_settings_callback);
}
VideoCodingModule* VideoCodingModule::Create(
@ -370,7 +368,8 @@ VideoCodingModule* VideoCodingModule::Create(
EventFactory* event_factory) {
assert(clock);
assert(event_factory);
return new VideoCodingModuleImpl(clock, event_factory, false, nullptr);
return new VideoCodingModuleImpl(clock, event_factory, false, nullptr,
nullptr);
}
void VideoCodingModule::Destroy(VideoCodingModule* module) {

View File

@ -58,7 +58,8 @@ class VideoSender {
VideoSender(Clock* clock,
EncodedImageCallback* post_encode_callback,
VideoEncoderRateObserver* encoder_rate_observer);
VideoEncoderRateObserver* encoder_rate_observer,
VCMQMSettingsCallback* qm_settings_callback);
~VideoSender();
@ -99,7 +100,6 @@ class VideoSender {
int32_t RegisterTransportCallback(VCMPacketizationCallback* transport);
int32_t RegisterSendStatisticsCallback(VCMSendStatisticsCallback* sendStats);
int32_t RegisterVideoQMCallback(VCMQMSettingsCallback* videoQMSettings);
int32_t RegisterProtectionCallback(VCMProtectionCallback* protection);
void SetVideoProtection(bool enable, VCMVideoProtection videoProtection);
@ -139,7 +139,7 @@ class VideoSender {
VideoCodec current_codec_;
rtc::ThreadChecker main_thread_;
VCMQMSettingsCallback* qm_settings_callback_;
VCMQMSettingsCallback* const qm_settings_callback_;
VCMProtectionCallback* protection_callback_;
};

View File

@ -26,7 +26,7 @@ namespace vcm {
class DebugRecorder {
public:
DebugRecorder()
: cs_(CriticalSectionWrapper::CreateCriticalSection()), file_(NULL) {}
: cs_(CriticalSectionWrapper::CreateCriticalSection()), file_(nullptr) {}
~DebugRecorder() { Stop(); }
@ -44,7 +44,7 @@ class DebugRecorder {
CriticalSectionScoped cs(cs_.get());
if (file_) {
fclose(file_);
file_ = NULL;
file_ = nullptr;
}
}
@ -61,7 +61,8 @@ class DebugRecorder {
VideoSender::VideoSender(Clock* clock,
EncodedImageCallback* post_encode_callback,
VideoEncoderRateObserver* encoder_rate_observer)
VideoEncoderRateObserver* encoder_rate_observer,
VCMQMSettingsCallback* qm_settings_callback)
: clock_(clock),
recorder_(new DebugRecorder()),
process_crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
@ -70,16 +71,17 @@ VideoSender::VideoSender(Clock* clock,
_encodedFrameCallback(post_encode_callback),
_nextFrameTypes(1, kVideoFrameDelta),
_mediaOpt(clock_),
_sendStatsCallback(NULL),
_sendStatsCallback(nullptr),
_codecDataBase(encoder_rate_observer),
frame_dropper_enabled_(true),
_sendStatsTimer(1000, clock_),
current_codec_(),
qm_settings_callback_(NULL),
protection_callback_(NULL) {
qm_settings_callback_(qm_settings_callback),
protection_callback_(nullptr) {
// Allow VideoSender to be created on one thread but used on another, post
// construction. This is currently how this class is being used by at least
// one external project (diffractor).
_mediaOpt.EnableQM(qm_settings_callback_ != nullptr);
main_thread_.DetachFromThread();
}
@ -93,7 +95,7 @@ int32_t VideoSender::Process() {
if (_sendStatsTimer.TimeUntilProcess() == 0) {
_sendStatsTimer.Processed();
CriticalSectionScoped cs(process_crit_sect_.get());
if (_sendStatsCallback != NULL) {
if (_sendStatsCallback != nullptr) {
uint32_t bitRate = _mediaOpt.SentBitRate();
uint32_t frameRate = _mediaOpt.SentFrameRate();
_sendStatsCallback->SendStatistics(bitRate, frameRate);
@ -108,8 +110,8 @@ int32_t VideoSender::InitializeSender() {
DCHECK(main_thread_.CalledOnValidThread());
CriticalSectionScoped cs(_sendCritSect);
_codecDataBase.ResetSender();
_encoder = NULL;
_encodedFrameCallback.SetTransportCallback(NULL);
_encoder = nullptr;
_encodedFrameCallback.SetTransportCallback(nullptr);
_mediaOpt.Reset(); // Resetting frame dropper
return VCM_OK;
}
@ -124,7 +126,7 @@ int32_t VideoSender::RegisterSendCodec(const VideoCodec* sendCodec,
uint32_t maxPayloadSize) {
DCHECK(main_thread_.CalledOnValidThread());
CriticalSectionScoped cs(_sendCritSect);
if (sendCodec == NULL) {
if (sendCodec == nullptr) {
return VCM_PARAMETER_ERROR;
}
@ -177,7 +179,7 @@ const VideoCodec& VideoSender::GetSendCodec() const {
int32_t VideoSender::SendCodecBlocking(VideoCodec* currentSendCodec) const {
CriticalSectionScoped cs(_sendCritSect);
if (currentSendCodec == NULL) {
if (currentSendCodec == nullptr) {
return VCM_PARAMETER_ERROR;
}
return _codecDataBase.SendCodec(currentSendCodec) ? 0 : -1;
@ -197,13 +199,13 @@ int32_t VideoSender::RegisterExternalEncoder(VideoEncoder* externalEncoder,
CriticalSectionScoped cs(_sendCritSect);
if (externalEncoder == NULL) {
if (externalEncoder == nullptr) {
bool wasSendCodec = false;
const bool ret =
_codecDataBase.DeregisterExternalEncoder(payloadType, &wasSendCodec);
if (wasSendCodec) {
// Make sure the VCM doesn't use the de-registered codec
_encoder = NULL;
_encoder = nullptr;
}
return ret ? 0 : -1;
}
@ -216,7 +218,7 @@ int32_t VideoSender::RegisterExternalEncoder(VideoEncoder* externalEncoder,
int32_t VideoSender::CodecConfigParameters(uint8_t* buffer,
int32_t size) const {
CriticalSectionScoped cs(_sendCritSect);
if (_encoder != NULL) {
if (_encoder != nullptr) {
return _encoder->CodecConfigParameters(buffer, size);
}
return VCM_UNINITIALIZED;
@ -266,7 +268,6 @@ int32_t VideoSender::SetChannelParameters(uint32_t target_bitrate,
// here? This effectively means that the network thread will be blocked for
// as much as frame encoding period.
CriticalSectionScoped sendCs(_sendCritSect);
uint32_t target_rate = _mediaOpt.SetTargetRates(target_bitrate,
lossRate,
rtt,
@ -274,10 +275,11 @@ int32_t VideoSender::SetChannelParameters(uint32_t target_bitrate,
qm_settings_callback_);
uint32_t input_frame_rate = _mediaOpt.InputFrameRate();
CriticalSectionScoped sendCs(_sendCritSect);
int32_t ret = VCM_UNINITIALIZED;
static_assert(VCM_UNINITIALIZED < 0, "VCM_UNINITIALIZED must be negative.");
if (_encoder != NULL) {
if (_encoder != nullptr) {
ret = _encoder->SetChannelParameters(lossRate, rtt);
if (ret >= 0) {
ret = _encoder->SetRates(target_rate, input_frame_rate);
@ -304,27 +306,13 @@ int32_t VideoSender::RegisterSendStatisticsCallback(
return VCM_OK;
}
// Register a video quality settings callback which will be called when frame
// rate/dimensions need to be updated for video quality optimization
int32_t VideoSender::RegisterVideoQMCallback(
VCMQMSettingsCallback* qm_settings_callback) {
CriticalSectionScoped cs(_sendCritSect);
DCHECK(qm_settings_callback_ == qm_settings_callback ||
!qm_settings_callback_ ||
!qm_settings_callback) << "Overwriting the previous callback?";
qm_settings_callback_ = qm_settings_callback;
_mediaOpt.EnableQM(qm_settings_callback_ != NULL);
return VCM_OK;
}
// Register a video protection callback which will be called to deliver the
// requested FEC rate and NACK status (on/off).
// Note: this callback is assumed to only be registered once and before it is
// used in this class.
int32_t VideoSender::RegisterProtectionCallback(
VCMProtectionCallback* protection_callback) {
CriticalSectionScoped cs(_sendCritSect);
DCHECK(protection_callback_ == protection_callback ||
!protection_callback_ ||
!protection_callback) << "Overwriting the previous callback?";
DCHECK(protection_callback == nullptr || protection_callback_ == nullptr);
protection_callback_ = protection_callback;
return VCM_OK;
}
@ -359,7 +347,7 @@ int32_t VideoSender::AddVideoFrame(const I420VideoFrame& videoFrame,
const VideoContentMetrics* contentMetrics,
const CodecSpecificInfo* codecSpecificInfo) {
CriticalSectionScoped cs(_sendCritSect);
if (_encoder == NULL) {
if (_encoder == nullptr) {
return VCM_UNINITIALIZED;
}
// TODO(holmer): Add support for dropping frames per stream. Currently we
@ -398,7 +386,7 @@ int32_t VideoSender::IntraFrameRequest(int stream_index) {
return -1;
}
_nextFrameTypes[stream_index] = kVideoFrameKey;
if (_encoder != NULL && _encoder->InternalSource()) {
if (_encoder != nullptr && _encoder->InternalSource()) {
// Try to request the frame if we have an external encoder with
// internal source since AddVideoFrame never will be called.
if (_encoder->RequestFrame(_nextFrameTypes) == WEBRTC_VIDEO_CODEC_OK) {

View File

@ -177,7 +177,8 @@ class TestVideoSender : public ::testing::Test {
TestVideoSender() : clock_(1000), packetization_callback_(&clock_) {}
void SetUp() override {
sender_.reset(new VideoSender(&clock_, &post_encode_callback_, nullptr));
sender_.reset(
new VideoSender(&clock_, &post_encode_callback_, nullptr, nullptr));
EXPECT_EQ(0, sender_->InitializeSender());
EXPECT_EQ(0, sender_->RegisterTransportCallback(&packetization_callback_));
}

View File

@ -104,7 +104,9 @@ ViEChannel::ViEChannel(int32_t channel_id,
rtp_rtcp_cs_(CriticalSectionWrapper::CreateCriticalSection()),
send_payload_router_(new PayloadRouter()),
vcm_protection_callback_(new ViEChannelProtectionCallback(this)),
vcm_(VideoCodingModule::Create(nullptr)),
vcm_(VideoCodingModule::Create(Clock::GetRealTimeClock(),
nullptr,
nullptr)),
vie_receiver_(channel_id, vcm_, remote_bitrate_estimator, this),
vie_sender_(channel_id),
vie_sync_(vcm_, this),

View File

@ -111,10 +111,12 @@ ViEEncoder::ViEEncoder(int32_t channel_id,
: channel_id_(channel_id),
number_of_cores_(number_of_cores),
disable_default_encoder_(disable_default_encoder),
vcm_(*webrtc::VideoCodingModule::Create(this)),
vpm_(*webrtc::VideoProcessingModule::Create(ViEModuleId(-1, channel_id))),
vpm_(VideoProcessingModule::Create(ViEModuleId(-1, channel_id))),
qm_callback_(new QMVideoSettingsCallback(vpm_.get())),
vcm_(VideoCodingModule::Create(Clock::GetRealTimeClock(),
this,
qm_callback_.get())),
send_payload_router_(NULL),
vcm_protection_callback_(NULL),
callback_cs_(CriticalSectionWrapper::CreateCriticalSection()),
data_cs_(CriticalSectionWrapper::CreateCriticalSection()),
pacer_(pacer),
@ -137,7 +139,6 @@ ViEEncoder::ViEEncoder(int32_t channel_id,
picture_id_sli_(0),
has_received_rpsi_(false),
picture_id_rpsi_(0),
qm_callback_(NULL),
video_suspended_(false),
pre_encode_callback_(NULL),
start_ms_(Clock::GetRealTimeClock()->TimeInMilliseconds()),
@ -146,18 +147,13 @@ ViEEncoder::ViEEncoder(int32_t channel_id,
}
bool ViEEncoder::Init() {
if (vcm_.InitializeSender() != 0) {
if (vcm_->InitializeSender() != 0) {
return false;
}
vpm_.EnableTemporalDecimation(true);
vpm_->EnableTemporalDecimation(true);
// Enable/disable content analysis: off by default for now.
vpm_.EnableContentAnalysis(false);
if (qm_callback_) {
delete qm_callback_;
}
qm_callback_ = new QMVideoSettingsCallback(&vpm_);
vpm_->EnableContentAnalysis(false);
if (!disable_default_encoder_) {
#ifdef VIDEOCODEC_VP8
@ -166,25 +162,23 @@ bool ViEEncoder::Init() {
VideoCodecType codec_type = webrtc::kVideoCodecI420;
#endif
VideoCodec video_codec;
if (vcm_.Codec(codec_type, &video_codec) != VCM_OK) {
if (vcm_->Codec(codec_type, &video_codec) != VCM_OK) {
return false;
}
{
CriticalSectionScoped cs(data_cs_.get());
send_padding_ = video_codec.numberOfSimulcastStreams > 1;
}
if (vcm_.RegisterSendCodec(&video_codec, number_of_cores_,
PayloadRouter::DefaultMaxPayloadLength()) != 0) {
if (vcm_->RegisterSendCodec(&video_codec, number_of_cores_,
PayloadRouter::DefaultMaxPayloadLength()) !=
0) {
return false;
}
}
if (vcm_.RegisterTransportCallback(this) != 0) {
if (vcm_->RegisterTransportCallback(this) != 0) {
return false;
}
if (vcm_.RegisterSendStatisticsCallback(this) != 0) {
return false;
}
if (vcm_.RegisterVideoQMCallback(qm_callback_) != 0) {
if (vcm_->RegisterSendStatisticsCallback(this) != 0) {
return false;
}
return true;
@ -194,28 +188,21 @@ void ViEEncoder::StartThreadsAndSetSharedMembers(
scoped_refptr<PayloadRouter> send_payload_router,
VCMProtectionCallback* vcm_protection_callback) {
DCHECK(send_payload_router_ == NULL);
DCHECK(vcm_protection_callback_ == NULL);
send_payload_router_ = send_payload_router;
vcm_protection_callback_ = vcm_protection_callback;
module_process_thread_.RegisterModule(&vcm_);
vcm_->RegisterProtectionCallback(vcm_protection_callback);
module_process_thread_.RegisterModule(vcm_.get());
}
void ViEEncoder::StopThreadsAndRemoveSharedMembers() {
vcm_.RegisterProtectionCallback(NULL);
vcm_protection_callback_ = NULL;
module_process_thread_.DeRegisterModule(&vcm_);
module_process_thread_.DeRegisterModule(&vpm_);
if (bitrate_allocator_)
bitrate_allocator_->RemoveBitrateObserver(bitrate_observer_.get());
module_process_thread_.DeRegisterModule(vcm_.get());
module_process_thread_.DeRegisterModule(vpm_.get());
}
ViEEncoder::~ViEEncoder() {
UpdateHistograms();
if (bitrate_allocator_)
bitrate_allocator_->RemoveBitrateObserver(bitrate_observer_.get());
VideoCodingModule::Destroy(&vcm_);
VideoProcessingModule::Destroy(&vpm_);
delete qm_callback_;
}
void ViEEncoder::UpdateHistograms() {
@ -225,7 +212,7 @@ void ViEEncoder::UpdateHistograms() {
return;
}
webrtc::VCMFrameCount frames;
if (vcm_.SentFrameCount(frames) != VCM_OK) {
if (vcm_->SentFrameCount(frames) != VCM_OK) {
return;
}
uint32_t total_frames = frames.numKeyFrames + frames.numDeltaFrames;
@ -263,11 +250,11 @@ void ViEEncoder::Restart() {
}
uint8_t ViEEncoder::NumberOfCodecs() {
return vcm_.NumberOfCodecs();
return vcm_->NumberOfCodecs();
}
int32_t ViEEncoder::GetCodec(uint8_t list_index, VideoCodec* video_codec) {
if (vcm_.Codec(list_index, video_codec) != 0) {
if (vcm_->Codec(list_index, video_codec) != 0) {
return -1;
}
return 0;
@ -279,7 +266,7 @@ int32_t ViEEncoder::RegisterExternalEncoder(webrtc::VideoEncoder* encoder,
if (encoder == NULL)
return -1;
if (vcm_.RegisterExternalEncoder(encoder, pl_type, internal_source) !=
if (vcm_->RegisterExternalEncoder(encoder, pl_type, internal_source) !=
VCM_OK) {
return -1;
}
@ -289,15 +276,15 @@ int32_t ViEEncoder::RegisterExternalEncoder(webrtc::VideoEncoder* encoder,
int32_t ViEEncoder::DeRegisterExternalEncoder(uint8_t pl_type) {
DCHECK(send_payload_router_ != NULL);
webrtc::VideoCodec current_send_codec;
if (vcm_.SendCodec(&current_send_codec) == VCM_OK) {
if (vcm_->SendCodec(&current_send_codec) == VCM_OK) {
uint32_t current_bitrate_bps = 0;
if (vcm_.Bitrate(&current_bitrate_bps) != 0) {
if (vcm_->Bitrate(&current_bitrate_bps) != 0) {
LOG(LS_WARNING) << "Failed to get the current encoder target bitrate.";
}
current_send_codec.startBitrate = (current_bitrate_bps + 500) / 1000;
}
if (vcm_.RegisterExternalEncoder(NULL, pl_type) != VCM_OK) {
if (vcm_->RegisterExternalEncoder(NULL, pl_type) != VCM_OK) {
return -1;
}
@ -317,8 +304,8 @@ int32_t ViEEncoder::DeRegisterExternalEncoder(uint8_t pl_type) {
// for realz. https://code.google.com/p/chromium/issues/detail?id=348222
current_send_codec.extra_options = NULL;
size_t max_data_payload_length = send_payload_router_->MaxPayloadLength();
if (vcm_.RegisterSendCodec(&current_send_codec, number_of_cores_,
max_data_payload_length) != VCM_OK) {
if (vcm_->RegisterSendCodec(&current_send_codec, number_of_cores_,
max_data_payload_length) != VCM_OK) {
LOG(LS_INFO) << "De-registered the currently used external encoder ("
<< static_cast<int>(pl_type) << ") and therefore tried to "
<< "register the corresponding internal encoder, but none "
@ -331,8 +318,8 @@ int32_t ViEEncoder::DeRegisterExternalEncoder(uint8_t pl_type) {
int32_t ViEEncoder::SetEncoder(const webrtc::VideoCodec& video_codec) {
DCHECK(send_payload_router_ != NULL);
// Setting target width and height for VPM.
if (vpm_.SetTargetResolution(video_codec.width, video_codec.height,
video_codec.maxFramerate) != VPM_OK) {
if (vpm_->SetTargetResolution(video_codec.width, video_codec.height,
video_codec.maxFramerate) != VPM_OK) {
return -1;
}
@ -372,15 +359,15 @@ int32_t ViEEncoder::SetEncoder(const webrtc::VideoCodec& video_codec) {
modified_video_codec.startBitrate = allocated_bitrate_bps / 1000;
size_t max_data_payload_length = send_payload_router_->MaxPayloadLength();
if (vcm_.RegisterSendCodec(&modified_video_codec, number_of_cores_,
max_data_payload_length) != VCM_OK) {
if (vcm_->RegisterSendCodec(&modified_video_codec, number_of_cores_,
max_data_payload_length) != VCM_OK) {
return -1;
}
return 0;
}
int32_t ViEEncoder::GetEncoder(VideoCodec* video_codec) {
*video_codec = vcm_.GetSendCodec();
*video_codec = vcm_->GetSendCodec();
return 0;
}
@ -388,7 +375,7 @@ int32_t ViEEncoder::GetCodecConfigParameters(
unsigned char config_parameters[kConfigParameterSize],
unsigned char& config_parameters_size) {
int32_t num_parameters =
vcm_.CodecConfigParameters(config_parameters, kConfigParameterSize);
vcm_->CodecConfigParameters(config_parameters, kConfigParameterSize);
if (num_parameters <= 0) {
config_parameters_size = 0;
return -1;
@ -405,7 +392,7 @@ int32_t ViEEncoder::ScaleInputImage(bool enable) {
LOG_F(LS_ERROR) << "Not supported.";
return -1;
}
vpm_.SetInputFrameResampleMode(resampling_mode);
vpm_->SetInputFrameResampleMode(resampling_mode);
return 0;
}
@ -424,14 +411,14 @@ int ViEEncoder::GetPaddingNeededBps(int bitrate_bps) const {
}
VideoCodec send_codec;
if (vcm_.SendCodec(&send_codec) != 0)
if (vcm_->SendCodec(&send_codec) != 0)
return 0;
SimulcastStream* stream_configs = send_codec.simulcastStream;
// Allocate the bandwidth between the streams.
std::vector<uint32_t> stream_bitrates = AllocateStreamBitrates(
bitrate_bps, stream_configs, send_codec.numberOfSimulcastStreams);
bool video_is_suspended = vcm_.VideoSuspended();
bool video_is_suspended = vcm_->VideoSuspended();
// Find the max amount of padding we can allow ourselves to send at this
// point, based on which streams are currently active and what our current
@ -551,7 +538,7 @@ void ViEEncoder::DeliverFrame(int id,
}
// Pass frame via preprocessor.
const int ret = vpm_.PreprocessFrame(video_frame, &decimated_frame);
const int ret = vpm_->PreprocessFrame(video_frame, &decimated_frame);
if (ret == 1) {
// Drop this frame.
return;
@ -587,7 +574,7 @@ void ViEEncoder::DeliverFrame(int id,
}
#ifdef VIDEOCODEC_VP8
if (vcm_.SendCodec() == webrtc::kVideoCodecVP8) {
if (vcm_->SendCodec() == webrtc::kVideoCodecVP8) {
webrtc::CodecSpecificInfo codec_specific_info;
codec_specific_info.codecType = webrtc::kVideoCodecVP8;
{
@ -604,12 +591,12 @@ void ViEEncoder::DeliverFrame(int id,
has_received_rpsi_ = false;
}
vcm_.AddVideoFrame(*output_frame, vpm_.ContentMetrics(),
&codec_specific_info);
vcm_->AddVideoFrame(*output_frame, vpm_->ContentMetrics(),
&codec_specific_info);
return;
}
#endif
vcm_.AddVideoFrame(*output_frame);
vcm_->AddVideoFrame(*output_frame);
}
void ViEEncoder::DelayChanged(int id, int frame_delay) {
@ -620,7 +607,7 @@ int ViEEncoder::GetPreferedFrameSettings(int* width,
int* frame_rate) {
webrtc::VideoCodec video_codec;
memset(&video_codec, 0, sizeof(video_codec));
if (vcm_.SendCodec(&video_codec) != VCM_OK) {
if (vcm_->SendCodec(&video_codec) != VCM_OK) {
return -1;
}
@ -631,13 +618,13 @@ int ViEEncoder::GetPreferedFrameSettings(int* width,
}
int ViEEncoder::SendKeyFrame() {
return vcm_.IntraFrameRequest(0);
return vcm_->IntraFrameRequest(0);
}
int32_t ViEEncoder::SendCodecStatistics(
uint32_t* num_key_frames, uint32_t* num_delta_frames) {
webrtc::VCMFrameCount sent_frames;
if (vcm_.SentFrameCount(sent_frames) != VCM_OK) {
if (vcm_->SentFrameCount(sent_frames) != VCM_OK) {
return -1;
}
*num_key_frames = sent_frames.numKeyFrames;
@ -651,14 +638,13 @@ uint32_t ViEEncoder::LastObservedBitrateBps() const {
}
int ViEEncoder::CodecTargetBitrate(uint32_t* bitrate) const {
if (vcm_.Bitrate(bitrate) != 0)
if (vcm_->Bitrate(bitrate) != 0)
return -1;
return 0;
}
int32_t ViEEncoder::UpdateProtectionMethod(bool nack, bool fec) {
DCHECK(send_payload_router_ != NULL);
DCHECK(vcm_protection_callback_ != NULL);
if (fec_enabled_ == fec && nack_enabled_ == nack) {
// No change needed, we're already in correct state.
@ -669,35 +655,30 @@ int32_t ViEEncoder::UpdateProtectionMethod(bool nack, bool fec) {
// Set Video Protection for VCM.
if (fec_enabled_ && nack_enabled_) {
vcm_.SetVideoProtection(webrtc::kProtectionNackFEC, true);
vcm_->SetVideoProtection(webrtc::kProtectionNackFEC, true);
} else {
vcm_.SetVideoProtection(webrtc::kProtectionFEC, fec_enabled_);
vcm_.SetVideoProtection(webrtc::kProtectionNackSender, nack_enabled_);
vcm_.SetVideoProtection(webrtc::kProtectionNackFEC, false);
vcm_->SetVideoProtection(webrtc::kProtectionFEC, fec_enabled_);
vcm_->SetVideoProtection(webrtc::kProtectionNackSender, nack_enabled_);
vcm_->SetVideoProtection(webrtc::kProtectionNackFEC, false);
}
if (fec_enabled_ || nack_enabled_) {
vcm_.RegisterProtectionCallback(vcm_protection_callback_);
// The send codec must be registered to set correct MTU.
webrtc::VideoCodec codec;
if (vcm_.SendCodec(&codec) == 0) {
if (vcm_->SendCodec(&codec) == 0) {
uint32_t current_bitrate_bps = 0;
if (vcm_.Bitrate(&current_bitrate_bps) != 0) {
if (vcm_->Bitrate(&current_bitrate_bps) != 0) {
LOG_F(LS_WARNING) <<
"Failed to get the current encoder target bitrate.";
}
// Convert to start bitrate in kbps.
codec.startBitrate = (current_bitrate_bps + 500) / 1000;
size_t max_payload_length = send_payload_router_->MaxPayloadLength();
if (vcm_.RegisterSendCodec(&codec, number_of_cores_,
max_payload_length) != 0) {
if (vcm_->RegisterSendCodec(&codec, number_of_cores_,
max_payload_length) != 0) {
return -1;
}
}
return 0;
} else {
// FEC and NACK are disabled.
vcm_.RegisterProtectionCallback(NULL);
}
return 0;
}
@ -709,12 +690,12 @@ void ViEEncoder::SetSenderBufferingMode(int target_delay_ms) {
}
if (target_delay_ms > 0) {
// Disable external frame-droppers.
vcm_.EnableFrameDropper(false);
vpm_.EnableTemporalDecimation(false);
vcm_->EnableFrameDropper(false);
vpm_->EnableTemporalDecimation(false);
} else {
// Real-time mode - enable frame droppers.
vpm_.EnableTemporalDecimation(true);
vcm_.EnableFrameDropper(true);
vpm_->EnableTemporalDecimation(true);
vcm_->EnableFrameDropper(true);
}
}
@ -804,7 +785,7 @@ void ViEEncoder::OnReceivedIntraFrameRequest(uint32_t ssrc) {
idx = stream_it->second;
}
// Release the critsect before triggering key frame.
vcm_.IntraFrameRequest(idx);
vcm_->IntraFrameRequest(idx);
}
void ViEEncoder::OnLocalSsrcChanged(uint32_t old_ssrc, uint32_t new_ssrc) {
@ -829,7 +810,7 @@ void ViEEncoder::OnLocalSsrcChanged(uint32_t old_ssrc, uint32_t new_ssrc) {
bool ViEEncoder::SetSsrcs(const std::list<unsigned int>& ssrcs) {
VideoCodec codec;
if (vcm_.SendCodec(&codec) != 0)
if (vcm_->SendCodec(&codec) != 0)
return false;
if (codec.numberOfSimulcastStreams > 0 &&
@ -863,11 +844,11 @@ void ViEEncoder::OnNetworkChanged(uint32_t bitrate_bps,
<< " packet loss " << fraction_lost
<< " rtt " << round_trip_time_ms;
DCHECK(send_payload_router_ != NULL);
vcm_.SetChannelParameters(bitrate_bps, fraction_lost, round_trip_time_ms);
bool video_is_suspended = vcm_.VideoSuspended();
vcm_->SetChannelParameters(bitrate_bps, fraction_lost, round_trip_time_ms);
bool video_is_suspended = vcm_->VideoSuspended();
VideoCodec send_codec;
if (vcm_.SendCodec(&send_codec) != 0) {
if (vcm_->SendCodec(&send_codec) != 0) {
return;
}
SimulcastStream* stream_configs = send_codec.simulcastStream;
@ -903,15 +884,15 @@ int32_t ViEEncoder::RegisterEffectFilter(ViEEffectFilter* effect_filter) {
}
int ViEEncoder::StartDebugRecording(const char* fileNameUTF8) {
return vcm_.StartDebugRecording(fileNameUTF8);
return vcm_->StartDebugRecording(fileNameUTF8);
}
int ViEEncoder::StopDebugRecording() {
return vcm_.StopDebugRecording();
return vcm_->StopDebugRecording();
}
void ViEEncoder::SuspendBelowMinBitrate() {
vcm_.SuspendBelowMinBitrate();
vcm_->SuspendBelowMinBitrate();
bitrate_allocator_->EnforceMinBitrate(false);
}
@ -928,11 +909,11 @@ void ViEEncoder::DeRegisterPreEncodeCallback() {
void ViEEncoder::RegisterPostEncodeImageCallback(
EncodedImageCallback* post_encode_callback) {
vcm_.RegisterPostEncodeImageCallback(post_encode_callback);
vcm_->RegisterPostEncodeImageCallback(post_encode_callback);
}
void ViEEncoder::DeRegisterPostEncodeImageCallback() {
vcm_.RegisterPostEncodeImageCallback(NULL);
vcm_->RegisterPostEncodeImageCallback(NULL);
}
void ViEEncoder::RegisterSendStatisticsProxy(

View File

@ -197,10 +197,10 @@ class ViEEncoder
const uint32_t number_of_cores_;
const bool disable_default_encoder_;
VideoCodingModule& vcm_;
VideoProcessingModule& vpm_;
const rtc::scoped_ptr<VideoProcessingModule> vpm_;
const rtc::scoped_ptr<QMVideoSettingsCallback> qm_callback_;
const rtc::scoped_ptr<VideoCodingModule> vcm_;
scoped_refptr<PayloadRouter> send_payload_router_;
VCMProtectionCallback* vcm_protection_callback_;
rtc::scoped_ptr<CriticalSectionWrapper> callback_cs_;
rtc::scoped_ptr<CriticalSectionWrapper> data_cs_;
@ -234,8 +234,6 @@ class ViEEncoder
uint64_t picture_id_rpsi_ GUARDED_BY(data_cs_);
std::map<unsigned int, int> ssrc_streams_ GUARDED_BY(data_cs_);
// Quality modes callback
QMVideoSettingsCallback* qm_callback_;
bool video_suspended_ GUARDED_BY(data_cs_);
I420FrameCallback* pre_encode_callback_ GUARDED_BY(callback_cs_);
const int64_t start_ms_;