Remove potential deadlock in RTPSenderAudio.
Removes lock-order inversion formed by RTPSenderAudio->RTPSender calls by doing a lot shorter locking which fetches a current state of RTPSenderAudio variables before sending. Thread annotates locked variables and removes one lock in RTPSenderAudio, bonus fixes data races reported in voe_auto_test --automated under TSan (DTMF data race). Also includes some bonus cleanup of RTPSenderVideo which removes the send critsect completely as all methods using it was always called from RTPSender under its send_critsect. R=henrik.lundin@webrtc.org, stefan@webrtc.org, tommi@webrtc.org BUG=3001, chromium:454654 Review URL: https://webrtc-codereview.appspot.com/41869004 Cr-Commit-Position: refs/heads/master@{#8348} git-svn-id: http://webrtc.googlecode.com/svn/trunk@8348 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
parent
ff689be3c0
commit
7c4d20fd6c
@ -68,7 +68,6 @@ char kTSanDefaultSuppressions[] =
|
||||
// https://code.google.com/p/webrtc/issues/detail?id=3509
|
||||
"deadlock:webrtc::ProcessThreadImpl::RegisterModule\n"
|
||||
"deadlock:webrtc::RTCPReceiver::SetSsrcs\n"
|
||||
"deadlock:webrtc::RTPSenderAudio::RegisterAudioPayload\n"
|
||||
"deadlock:webrtc::test::UdpSocketManagerPosixImpl::RemoveSocket\n"
|
||||
"deadlock:webrtc::vcm::VideoReceiver::RegisterPacketRequestCallback\n"
|
||||
"deadlock:webrtc::ViECaptureImpl::ConnectCaptureDevice\n"
|
||||
|
@ -570,16 +570,6 @@ class RtpRtcp : public Module {
|
||||
*/
|
||||
virtual int32_t SetAudioPacketSize(uint16_t packetSizeSamples) = 0;
|
||||
|
||||
/*
|
||||
* SendTelephoneEventActive
|
||||
*
|
||||
* return true if we currently send a telephone event and 100 ms after an
|
||||
* event is sent used to prevent the telephone event tone to be recorded
|
||||
* by the microphone and send inband just after the tone has ended.
|
||||
*/
|
||||
virtual bool SendTelephoneEventActive(
|
||||
int8_t& telephoneEvent) const = 0;
|
||||
|
||||
/*
|
||||
* Send a TelephoneEvent tone using RFC 2833 (4733)
|
||||
*
|
||||
|
@ -219,8 +219,6 @@ class MockRtpRtcp : public RtpRtcp {
|
||||
int32_t(RtpAudioFeedback* messagesCallback));
|
||||
MOCK_METHOD1(SetAudioPacketSize,
|
||||
int32_t(const uint16_t packetSizeSamples));
|
||||
MOCK_CONST_METHOD1(SendTelephoneEventActive,
|
||||
bool(int8_t& telephoneEvent));
|
||||
MOCK_METHOD3(SendTelephoneEventOutband,
|
||||
int32_t(const uint8_t key, const uint16_t time_ms, const uint8_t level));
|
||||
MOCK_METHOD1(SetSendREDPayloadType,
|
||||
|
@ -39,10 +39,9 @@ int32_t DTMFqueue::AddDTMF(uint8_t key, uint16_t len, uint8_t level) {
|
||||
|
||||
int8_t DTMFqueue::NextDTMF(uint8_t* dtmf_key, uint16_t* len, uint8_t* level) {
|
||||
CriticalSectionScoped lock(dtmf_critsect_);
|
||||
|
||||
if (!PendingDTMF()) {
|
||||
if (next_empty_index_ == 0)
|
||||
return -1;
|
||||
}
|
||||
|
||||
*dtmf_key = dtmf_key_[0];
|
||||
*len = dtmf_length[0];
|
||||
*level = dtmf_level_[0];
|
||||
@ -58,7 +57,13 @@ int8_t DTMFqueue::NextDTMF(uint8_t* dtmf_key, uint16_t* len, uint8_t* level) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool DTMFqueue::PendingDTMF() { return (next_empty_index_ > 0); }
|
||||
bool DTMFqueue::PendingDTMF() {
|
||||
CriticalSectionScoped lock(dtmf_critsect_);
|
||||
return next_empty_index_ > 0;
|
||||
}
|
||||
|
||||
void DTMFqueue::ResetDTMF() { next_empty_index_ = 0; }
|
||||
void DTMFqueue::ResetDTMF() {
|
||||
CriticalSectionScoped lock(dtmf_critsect_);
|
||||
next_empty_index_ = 0;
|
||||
}
|
||||
} // namespace webrtc
|
||||
|
@ -904,11 +904,6 @@ int32_t ModuleRtpRtcpImpl::SendTelephoneEventOutband(
|
||||
return rtp_sender_.SendTelephoneEvent(key, time_ms, level);
|
||||
}
|
||||
|
||||
bool ModuleRtpRtcpImpl::SendTelephoneEventActive(
|
||||
int8_t& telephone_event) const {
|
||||
return rtp_sender_.SendTelephoneEventActive(&telephone_event);
|
||||
}
|
||||
|
||||
// Set audio packet size, used to determine when it's time to send a DTMF
|
||||
// packet in silence (CNG).
|
||||
int32_t ModuleRtpRtcpImpl::SetAudioPacketSize(
|
||||
|
@ -266,8 +266,6 @@ class ModuleRtpRtcpImpl : public RtpRtcp {
|
||||
// packet in silence (CNG).
|
||||
virtual int32_t SetAudioPacketSize(uint16_t packet_size_samples) OVERRIDE;
|
||||
|
||||
virtual bool SendTelephoneEventActive(int8_t& telephone_event) const OVERRIDE;
|
||||
|
||||
// Send a TelephoneEvent tone using RFC 2833 (4733).
|
||||
virtual int32_t SendTelephoneEventOutband(uint8_t key,
|
||||
uint16_t time_ms,
|
||||
|
@ -109,8 +109,9 @@ RTPSender::RTPSender(int32_t id,
|
||||
total_bitrate_sent_(clock, bitrates_->total_bitrate_observer()),
|
||||
id_(id),
|
||||
audio_configured_(audio),
|
||||
audio_(NULL),
|
||||
video_(NULL),
|
||||
audio_(audio ? new RTPSenderAudio(id, clock, this, audio_feedback)
|
||||
: nullptr),
|
||||
video_(audio ? nullptr : new RTPSenderVideo(clock, this)),
|
||||
paced_sender_(paced_sender),
|
||||
last_capture_time_ms_sent_(0),
|
||||
send_critsect_(CriticalSectionWrapper::CreateCriticalSection()),
|
||||
@ -160,13 +161,6 @@ RTPSender::RTPSender(int32_t id,
|
||||
// Random start, 16 bits. Can't be 0.
|
||||
sequence_number_rtx_ = static_cast<uint16_t>(rand() + 1) & 0x7FFF;
|
||||
sequence_number_ = static_cast<uint16_t>(rand() + 1) & 0x7FFF;
|
||||
|
||||
if (audio) {
|
||||
audio_ = new RTPSenderAudio(id, clock_, this);
|
||||
audio_->RegisterAudioCallback(audio_feedback);
|
||||
} else {
|
||||
video_ = new RTPSenderVideo(clock_, this);
|
||||
}
|
||||
}
|
||||
|
||||
RTPSender::~RTPSender() {
|
||||
@ -176,15 +170,12 @@ RTPSender::~RTPSender() {
|
||||
ssrc_db_.ReturnSSRC(ssrc_);
|
||||
|
||||
SSRCDatabase::ReturnSSRCDatabase();
|
||||
delete send_critsect_;
|
||||
while (!payload_type_map_.empty()) {
|
||||
std::map<int8_t, RtpUtility::Payload*>::iterator it =
|
||||
payload_type_map_.begin();
|
||||
delete it->second;
|
||||
payload_type_map_.erase(it);
|
||||
}
|
||||
delete audio_;
|
||||
delete video_;
|
||||
}
|
||||
|
||||
void RTPSender::SetTargetBitrate(uint32_t bitrate) {
|
||||
@ -241,7 +232,7 @@ int32_t RTPSender::SetTransmissionTimeOffset(int32_t transmission_time_offset) {
|
||||
transmission_time_offset < -(0x800000 - 1)) { // Word24.
|
||||
return -1;
|
||||
}
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
transmission_time_offset_ = transmission_time_offset;
|
||||
return 0;
|
||||
}
|
||||
@ -250,24 +241,24 @@ int32_t RTPSender::SetAbsoluteSendTime(uint32_t absolute_send_time) {
|
||||
if (absolute_send_time > 0xffffff) { // UWord24.
|
||||
return -1;
|
||||
}
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
absolute_send_time_ = absolute_send_time;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t RTPSender::RegisterRtpHeaderExtension(RTPExtensionType type,
|
||||
uint8_t id) {
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
return rtp_header_extension_map_.Register(type, id);
|
||||
}
|
||||
|
||||
int32_t RTPSender::DeregisterRtpHeaderExtension(RTPExtensionType type) {
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
return rtp_header_extension_map_.Deregister(type);
|
||||
}
|
||||
|
||||
size_t RTPSender::RtpHeaderExtensionTotalLength() const {
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
return rtp_header_extension_map_.GetTotalLengthInBytes();
|
||||
}
|
||||
|
||||
@ -278,7 +269,7 @@ int32_t RTPSender::RegisterPayload(
|
||||
uint8_t channels,
|
||||
uint32_t rate) {
|
||||
assert(payload_name);
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
|
||||
std::map<int8_t, RtpUtility::Payload*>::iterator it =
|
||||
payload_type_map_.find(payload_number);
|
||||
@ -321,7 +312,7 @@ int32_t RTPSender::RegisterPayload(
|
||||
}
|
||||
|
||||
int32_t RTPSender::DeRegisterSendPayload(int8_t payload_type) {
|
||||
CriticalSectionScoped lock(send_critsect_);
|
||||
CriticalSectionScoped lock(send_critsect_.get());
|
||||
|
||||
std::map<int8_t, RtpUtility::Payload*>::iterator it =
|
||||
payload_type_map_.find(payload_type);
|
||||
@ -336,12 +327,12 @@ int32_t RTPSender::DeRegisterSendPayload(int8_t payload_type) {
|
||||
}
|
||||
|
||||
void RTPSender::SetSendPayloadType(int8_t payload_type) {
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
payload_type_ = payload_type;
|
||||
}
|
||||
|
||||
int8_t RTPSender::SendPayloadType() const {
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
return payload_type_;
|
||||
}
|
||||
|
||||
@ -356,7 +347,7 @@ int32_t RTPSender::SetMaxPayloadLength(size_t max_payload_length,
|
||||
LOG(LS_ERROR) << "Invalid max payload length: " << max_payload_length;
|
||||
return -1;
|
||||
}
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
max_payload_length_ = max_payload_length;
|
||||
packet_over_head_ = packet_over_head;
|
||||
return 0;
|
||||
@ -365,7 +356,7 @@ int32_t RTPSender::SetMaxPayloadLength(size_t max_payload_length,
|
||||
size_t RTPSender::MaxDataPayloadLength() const {
|
||||
int rtx;
|
||||
{
|
||||
CriticalSectionScoped rtx_lock(send_critsect_);
|
||||
CriticalSectionScoped rtx_lock(send_critsect_.get());
|
||||
rtx = rtx_;
|
||||
}
|
||||
if (audio_configured_) {
|
||||
@ -384,33 +375,33 @@ size_t RTPSender::MaxPayloadLength() const {
|
||||
uint16_t RTPSender::PacketOverHead() const { return packet_over_head_; }
|
||||
|
||||
void RTPSender::SetRtxStatus(int mode) {
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
rtx_ = mode;
|
||||
}
|
||||
|
||||
int RTPSender::RtxStatus() const {
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
return rtx_;
|
||||
}
|
||||
|
||||
void RTPSender::SetRtxSsrc(uint32_t ssrc) {
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
ssrc_rtx_ = ssrc;
|
||||
}
|
||||
|
||||
uint32_t RTPSender::RtxSsrc() const {
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
return ssrc_rtx_;
|
||||
}
|
||||
|
||||
void RTPSender::SetRtxPayloadType(int payload_type) {
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
payload_type_rtx_ = payload_type;
|
||||
}
|
||||
|
||||
int32_t RTPSender::CheckPayloadType(int8_t payload_type,
|
||||
RtpVideoCodecTypes* video_type) {
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
|
||||
if (payload_type < 0) {
|
||||
LOG(LS_ERROR) << "Invalid payload_type " << payload_type;
|
||||
@ -461,7 +452,7 @@ int32_t RTPSender::SendOutgoingData(FrameType frame_type,
|
||||
uint32_t ssrc;
|
||||
{
|
||||
// Drop this packet if we're not sending media packets.
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
ssrc = ssrc_;
|
||||
if (!sending_media_) {
|
||||
return 0;
|
||||
@ -514,7 +505,7 @@ int32_t RTPSender::SendOutgoingData(FrameType frame_type,
|
||||
|
||||
size_t RTPSender::TrySendRedundantPayloads(size_t bytes_to_send) {
|
||||
{
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
if ((rtx_ & kRtxRedundantPayloads) == 0)
|
||||
return 0;
|
||||
}
|
||||
@ -558,7 +549,7 @@ size_t RTPSender::TrySendPadData(size_t bytes) {
|
||||
int64_t capture_time_ms;
|
||||
uint32_t timestamp;
|
||||
{
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
timestamp = timestamp_;
|
||||
capture_time_ms = capture_time_ms_;
|
||||
if (last_timestamp_time_ms_ > 0) {
|
||||
@ -586,7 +577,7 @@ size_t RTPSender::SendPadData(uint32_t timestamp,
|
||||
int payload_type;
|
||||
bool over_rtx;
|
||||
{
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
// Only send padding packets following the last packet of a frame,
|
||||
// indicated by the marker bit.
|
||||
if (rtx_ == kRtxOff) {
|
||||
@ -681,7 +672,7 @@ int32_t RTPSender::ReSendPacket(uint16_t packet_id, int64_t min_resend_time) {
|
||||
}
|
||||
int rtx = kRtxOff;
|
||||
{
|
||||
CriticalSectionScoped lock(send_critsect_);
|
||||
CriticalSectionScoped lock(send_critsect_.get());
|
||||
rtx = rtx_;
|
||||
}
|
||||
return PrepareAndSendPacket(data_buffer, length, capture_time_ms,
|
||||
@ -767,7 +758,7 @@ bool RTPSender::ProcessNACKBitRate(uint32_t now) {
|
||||
const uint32_t kAvgIntervalMs = 1000;
|
||||
uint32_t target_bitrate = GetTargetBitrate();
|
||||
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
|
||||
if (target_bitrate == 0) {
|
||||
return true;
|
||||
@ -792,7 +783,7 @@ bool RTPSender::ProcessNACKBitRate(uint32_t now) {
|
||||
}
|
||||
|
||||
void RTPSender::UpdateNACKBitRate(uint32_t bytes, int64_t now) {
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
if (bytes == 0)
|
||||
return;
|
||||
nack_bitrate_.Update(bytes);
|
||||
@ -828,7 +819,7 @@ bool RTPSender::TimeToSendPacket(uint16_t sequence_number,
|
||||
}
|
||||
int rtx;
|
||||
{
|
||||
CriticalSectionScoped lock(send_critsect_);
|
||||
CriticalSectionScoped lock(send_critsect_.get());
|
||||
rtx = rtx_;
|
||||
}
|
||||
return PrepareAndSendPacket(data_buffer,
|
||||
@ -869,7 +860,7 @@ bool RTPSender::PrepareAndSendPacket(uint8_t* buffer,
|
||||
UpdateAbsoluteSendTime(buffer_to_send_ptr, length, rtp_header, now_ms);
|
||||
bool ret = SendPacketToNetwork(buffer_to_send_ptr, length);
|
||||
if (ret) {
|
||||
CriticalSectionScoped lock(send_critsect_);
|
||||
CriticalSectionScoped lock(send_critsect_.get());
|
||||
media_has_been_sent_ = true;
|
||||
}
|
||||
UpdateRtpStats(buffer_to_send_ptr, length, rtp_header, send_over_rtx,
|
||||
@ -927,7 +918,7 @@ bool RTPSender::IsFecPacket(const uint8_t* buffer,
|
||||
|
||||
size_t RTPSender::TimeToSendPadding(size_t bytes) {
|
||||
{
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
if (!sending_media_) return 0;
|
||||
}
|
||||
if (bytes == 0)
|
||||
@ -1003,7 +994,7 @@ int32_t RTPSender::SendToNetwork(
|
||||
return -1;
|
||||
|
||||
{
|
||||
CriticalSectionScoped lock(send_critsect_);
|
||||
CriticalSectionScoped lock(send_critsect_.get());
|
||||
media_has_been_sent_ = true;
|
||||
}
|
||||
UpdateRtpStats(buffer, length, rtp_header, false, false);
|
||||
@ -1015,7 +1006,7 @@ void RTPSender::UpdateDelayStatistics(int64_t capture_time_ms, int64_t now_ms) {
|
||||
int avg_delay_ms = 0;
|
||||
int max_delay_ms = 0;
|
||||
{
|
||||
CriticalSectionScoped lock(send_critsect_);
|
||||
CriticalSectionScoped lock(send_critsect_.get());
|
||||
ssrc = ssrc_;
|
||||
}
|
||||
{
|
||||
@ -1034,7 +1025,7 @@ void RTPSender::UpdateDelayStatistics(int64_t capture_time_ms, int64_t now_ms) {
|
||||
}
|
||||
|
||||
void RTPSender::ProcessBitrate() {
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
total_bitrate_sent_.Process();
|
||||
nack_bitrate_.Process();
|
||||
if (audio_configured_) {
|
||||
@ -1044,7 +1035,7 @@ void RTPSender::ProcessBitrate() {
|
||||
}
|
||||
|
||||
size_t RTPSender::RTPHeaderLength() const {
|
||||
CriticalSectionScoped lock(send_critsect_);
|
||||
CriticalSectionScoped lock(send_critsect_.get());
|
||||
size_t rtp_header_length = 12;
|
||||
rtp_header_length += sizeof(uint32_t) * csrcs_.size();
|
||||
rtp_header_length += RtpHeaderExtensionTotalLength();
|
||||
@ -1052,7 +1043,7 @@ size_t RTPSender::RTPHeaderLength() const {
|
||||
}
|
||||
|
||||
uint16_t RTPSender::IncrementSequenceNumber() {
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
return sequence_number_++;
|
||||
}
|
||||
|
||||
@ -1060,7 +1051,7 @@ void RTPSender::ResetDataCounters() {
|
||||
uint32_t ssrc;
|
||||
uint32_t ssrc_rtx;
|
||||
{
|
||||
CriticalSectionScoped ssrc_lock(send_critsect_);
|
||||
CriticalSectionScoped ssrc_lock(send_critsect_.get());
|
||||
ssrc = ssrc_;
|
||||
ssrc_rtx = ssrc_rtx_;
|
||||
}
|
||||
@ -1125,7 +1116,7 @@ int32_t RTPSender::BuildRTPheader(uint8_t* data_buffer,
|
||||
bool timestamp_provided,
|
||||
bool inc_sequence_number) {
|
||||
assert(payload_type >= 0);
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
|
||||
if (timestamp_provided) {
|
||||
timestamp_ = start_timestamp_ + capture_timestamp;
|
||||
@ -1307,7 +1298,7 @@ void RTPSender::UpdateTransmissionTimeOffset(uint8_t* rtp_packet,
|
||||
size_t rtp_packet_length,
|
||||
const RTPHeader& rtp_header,
|
||||
int64_t time_diff_ms) const {
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
// Get id.
|
||||
uint8_t id = 0;
|
||||
if (rtp_header_extension_map_.GetId(kRtpExtensionTransmissionTimeOffset,
|
||||
@ -1355,7 +1346,7 @@ bool RTPSender::UpdateAudioLevel(uint8_t* rtp_packet,
|
||||
const RTPHeader& rtp_header,
|
||||
bool is_voiced,
|
||||
uint8_t dBov) const {
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
|
||||
// Get id.
|
||||
uint8_t id = 0;
|
||||
@ -1397,7 +1388,7 @@ void RTPSender::UpdateAbsoluteSendTime(uint8_t* rtp_packet,
|
||||
size_t rtp_packet_length,
|
||||
const RTPHeader& rtp_header,
|
||||
int64_t now_ms) const {
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
|
||||
// Get id.
|
||||
uint8_t id = 0;
|
||||
@ -1447,7 +1438,7 @@ void RTPSender::SetSendingStatus(bool enabled) {
|
||||
// Will be ignored if it's already configured via API.
|
||||
SetStartTimestamp(RTPtime, false);
|
||||
} else {
|
||||
CriticalSectionScoped lock(send_critsect_);
|
||||
CriticalSectionScoped lock(send_critsect_.get());
|
||||
if (!ssrc_forced_) {
|
||||
// Generate a new SSRC.
|
||||
ssrc_db_.ReturnSSRC(ssrc_);
|
||||
@ -1464,22 +1455,22 @@ void RTPSender::SetSendingStatus(bool enabled) {
|
||||
}
|
||||
|
||||
void RTPSender::SetSendingMediaStatus(bool enabled) {
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
sending_media_ = enabled;
|
||||
}
|
||||
|
||||
bool RTPSender::SendingMedia() const {
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
return sending_media_;
|
||||
}
|
||||
|
||||
uint32_t RTPSender::Timestamp() const {
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
return timestamp_;
|
||||
}
|
||||
|
||||
void RTPSender::SetStartTimestamp(uint32_t timestamp, bool force) {
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
if (force) {
|
||||
start_timestamp_forced_ = true;
|
||||
start_timestamp_ = timestamp;
|
||||
@ -1491,13 +1482,13 @@ void RTPSender::SetStartTimestamp(uint32_t timestamp, bool force) {
|
||||
}
|
||||
|
||||
uint32_t RTPSender::StartTimestamp() const {
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
return start_timestamp_;
|
||||
}
|
||||
|
||||
uint32_t RTPSender::GenerateNewSSRC() {
|
||||
// If configured via API, return 0.
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
|
||||
if (ssrc_forced_) {
|
||||
return 0;
|
||||
@ -1509,7 +1500,7 @@ uint32_t RTPSender::GenerateNewSSRC() {
|
||||
|
||||
void RTPSender::SetSSRC(uint32_t ssrc) {
|
||||
// This is configured via the API.
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
|
||||
if (ssrc_ == ssrc && ssrc_forced_) {
|
||||
return; // Since it's same ssrc, don't reset anything.
|
||||
@ -1526,24 +1517,24 @@ void RTPSender::SetSSRC(uint32_t ssrc) {
|
||||
}
|
||||
|
||||
uint32_t RTPSender::SSRC() const {
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
return ssrc_;
|
||||
}
|
||||
|
||||
void RTPSender::SetCsrcs(const std::vector<uint32_t>& csrcs) {
|
||||
assert(csrcs.size() <= kRtpCsrcSize);
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
csrcs_ = csrcs;
|
||||
}
|
||||
|
||||
void RTPSender::SetSequenceNumber(uint16_t seq) {
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
sequence_number_forced_ = true;
|
||||
sequence_number_ = seq;
|
||||
}
|
||||
|
||||
uint16_t RTPSender::SequenceNumber() const {
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
return sequence_number_;
|
||||
}
|
||||
|
||||
@ -1557,13 +1548,6 @@ int32_t RTPSender::SendTelephoneEvent(uint8_t key,
|
||||
return audio_->SendTelephoneEvent(key, time_ms, level);
|
||||
}
|
||||
|
||||
bool RTPSender::SendTelephoneEventActive(int8_t *telephone_event) const {
|
||||
if (!audio_configured_) {
|
||||
return false;
|
||||
}
|
||||
return audio_->SendTelephoneEventActive(*telephone_event);
|
||||
}
|
||||
|
||||
int32_t RTPSender::SetAudioPacketSize(uint16_t packet_size_samples) {
|
||||
if (!audio_configured_) {
|
||||
return -1;
|
||||
@ -1647,7 +1631,7 @@ int32_t RTPSender::SetFecParameters(
|
||||
|
||||
void RTPSender::BuildRtxPacket(uint8_t* buffer, size_t* length,
|
||||
uint8_t* buffer_rtx) {
|
||||
CriticalSectionScoped cs(send_critsect_);
|
||||
CriticalSectionScoped cs(send_critsect_.get());
|
||||
uint8_t* data_buffer_rtx = buffer_rtx;
|
||||
// Add RTX header.
|
||||
RtpUtility::RtpHeaderParser rtp_parser(
|
||||
@ -1702,7 +1686,7 @@ uint32_t RTPSender::BitrateSent() const {
|
||||
|
||||
void RTPSender::SetRtpState(const RtpState& rtp_state) {
|
||||
SetStartTimestamp(rtp_state.start_timestamp, true);
|
||||
CriticalSectionScoped lock(send_critsect_);
|
||||
CriticalSectionScoped lock(send_critsect_.get());
|
||||
sequence_number_ = rtp_state.sequence_number;
|
||||
sequence_number_forced_ = true;
|
||||
timestamp_ = rtp_state.timestamp;
|
||||
@ -1712,7 +1696,7 @@ void RTPSender::SetRtpState(const RtpState& rtp_state) {
|
||||
}
|
||||
|
||||
RtpState RTPSender::GetRtpState() const {
|
||||
CriticalSectionScoped lock(send_critsect_);
|
||||
CriticalSectionScoped lock(send_critsect_.get());
|
||||
|
||||
RtpState state;
|
||||
state.sequence_number = sequence_number_;
|
||||
@ -1726,12 +1710,12 @@ RtpState RTPSender::GetRtpState() const {
|
||||
}
|
||||
|
||||
void RTPSender::SetRtxRtpState(const RtpState& rtp_state) {
|
||||
CriticalSectionScoped lock(send_critsect_);
|
||||
CriticalSectionScoped lock(send_critsect_.get());
|
||||
sequence_number_rtx_ = rtp_state.sequence_number;
|
||||
}
|
||||
|
||||
RtpState RTPSender::GetRtxRtpState() const {
|
||||
CriticalSectionScoped lock(send_critsect_);
|
||||
CriticalSectionScoped lock(send_critsect_.get());
|
||||
|
||||
RtpState state;
|
||||
state.sequence_number = sequence_number_rtx_;
|
||||
|
@ -221,8 +221,6 @@ class RTPSender : public RTPSenderInterface {
|
||||
// Send a DTMF tone using RFC 2833 (4733).
|
||||
int32_t SendTelephoneEvent(uint8_t key, uint16_t time_ms, uint8_t level);
|
||||
|
||||
bool SendTelephoneEventActive(int8_t *telephone_event) const;
|
||||
|
||||
// Set audio packet size, used to determine when it's time to send a DTMF
|
||||
// packet in silence (CNG).
|
||||
int32_t SetAudioPacketSize(uint16_t packet_size_samples);
|
||||
@ -334,13 +332,14 @@ class RTPSender : public RTPSenderInterface {
|
||||
Bitrate total_bitrate_sent_;
|
||||
|
||||
int32_t id_;
|
||||
|
||||
const bool audio_configured_;
|
||||
RTPSenderAudio *audio_;
|
||||
RTPSenderVideo *video_;
|
||||
scoped_ptr<RTPSenderAudio> audio_;
|
||||
scoped_ptr<RTPSenderVideo> video_;
|
||||
|
||||
PacedSender *paced_sender_;
|
||||
int64_t last_capture_time_ms_sent_;
|
||||
CriticalSectionWrapper *send_critsect_;
|
||||
scoped_ptr<CriticalSectionWrapper> send_critsect_;
|
||||
|
||||
Transport *transport_;
|
||||
bool sending_media_ GUARDED_BY(send_critsect_);
|
||||
|
@ -16,15 +16,18 @@
|
||||
#include "webrtc/system_wrappers/interface/trace_event.h"
|
||||
|
||||
namespace webrtc {
|
||||
RTPSenderAudio::RTPSenderAudio(const int32_t id, Clock* clock,
|
||||
RTPSender* rtpSender) :
|
||||
|
||||
static const int kDtmfFrequencyHz = 8000;
|
||||
|
||||
RTPSenderAudio::RTPSenderAudio(const int32_t id,
|
||||
Clock* clock,
|
||||
RTPSender* rtpSender,
|
||||
RtpAudioFeedback* audio_feedback) :
|
||||
_id(id),
|
||||
_clock(clock),
|
||||
_rtpSender(rtpSender),
|
||||
_audioFeedbackCritsect(CriticalSectionWrapper::CreateCriticalSection()),
|
||||
_audioFeedback(NULL),
|
||||
_audioFeedback(audio_feedback),
|
||||
_sendAudioCritsect(CriticalSectionWrapper::CreateCriticalSection()),
|
||||
_frequency(8000),
|
||||
_packetSizeSamples(160),
|
||||
_dtmfEventIsOn(false),
|
||||
_dtmfEventFirstPacketSent(false),
|
||||
@ -43,41 +46,21 @@ RTPSenderAudio::RTPSenderAudio(const int32_t id, Clock* clock,
|
||||
_cngFBPayloadType(-1),
|
||||
_lastPayloadType(-1),
|
||||
_audioLevel_dBov(0) {
|
||||
};
|
||||
|
||||
RTPSenderAudio::~RTPSenderAudio()
|
||||
{
|
||||
delete _sendAudioCritsect;
|
||||
delete _audioFeedbackCritsect;
|
||||
}
|
||||
|
||||
int32_t
|
||||
RTPSenderAudio::RegisterAudioCallback(RtpAudioFeedback* messagesCallback)
|
||||
{
|
||||
CriticalSectionScoped cs(_audioFeedbackCritsect);
|
||||
_audioFeedback = messagesCallback;
|
||||
return 0;
|
||||
RTPSenderAudio::~RTPSenderAudio() {
|
||||
}
|
||||
|
||||
void
|
||||
RTPSenderAudio::SetAudioFrequency(const uint32_t f)
|
||||
{
|
||||
CriticalSectionScoped cs(_sendAudioCritsect);
|
||||
_frequency = f;
|
||||
int RTPSenderAudio::AudioFrequency() const {
|
||||
return kDtmfFrequencyHz;
|
||||
}
|
||||
|
||||
int
|
||||
RTPSenderAudio::AudioFrequency() const
|
||||
{
|
||||
CriticalSectionScoped cs(_sendAudioCritsect);
|
||||
return _frequency;
|
||||
}
|
||||
|
||||
// set audio packet size, used to determine when it's time to send a DTMF packet in silence (CNG)
|
||||
// set audio packet size, used to determine when it's time to send a DTMF packet
|
||||
// in silence (CNG)
|
||||
int32_t
|
||||
RTPSenderAudio::SetAudioPacketSize(const uint16_t packetSizeSamples)
|
||||
{
|
||||
CriticalSectionScoped cs(_sendAudioCritsect);
|
||||
CriticalSectionScoped cs(_sendAudioCritsect.get());
|
||||
|
||||
_packetSizeSamples = packetSizeSamples;
|
||||
return 0;
|
||||
@ -90,27 +73,27 @@ int32_t RTPSenderAudio::RegisterAudioPayload(
|
||||
const uint8_t channels,
|
||||
const uint32_t rate,
|
||||
RtpUtility::Payload*& payload) {
|
||||
CriticalSectionScoped cs(_sendAudioCritsect);
|
||||
|
||||
if (RtpUtility::StringCompare(payloadName, "cn", 2)) {
|
||||
CriticalSectionScoped cs(_sendAudioCritsect.get());
|
||||
// we can have multiple CNG payload types
|
||||
if (frequency == 8000) {
|
||||
_cngNBPayloadType = payloadType;
|
||||
|
||||
} else if (frequency == 16000) {
|
||||
_cngWBPayloadType = payloadType;
|
||||
|
||||
} else if (frequency == 32000) {
|
||||
_cngSWBPayloadType = payloadType;
|
||||
|
||||
} else if (frequency == 48000) {
|
||||
_cngFBPayloadType = payloadType;
|
||||
|
||||
} else {
|
||||
return -1;
|
||||
switch (frequency) {
|
||||
case 8000:
|
||||
_cngNBPayloadType = payloadType;
|
||||
break;
|
||||
case 16000:
|
||||
_cngWBPayloadType = payloadType;
|
||||
break;
|
||||
case 32000:
|
||||
_cngSWBPayloadType = payloadType;
|
||||
break;
|
||||
case 48000:
|
||||
_cngFBPayloadType = payloadType;
|
||||
break;
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
if (RtpUtility::StringCompare(payloadName, "telephone-event", 15)) {
|
||||
} else if (RtpUtility::StringCompare(payloadName, "telephone-event", 15)) {
|
||||
CriticalSectionScoped cs(_sendAudioCritsect.get());
|
||||
// Don't add it to the list
|
||||
// we dont want to allow send with a DTMF payloadtype
|
||||
_dtmfPayloadType = payloadType;
|
||||
@ -122,78 +105,45 @@ int32_t RTPSenderAudio::RegisterAudioPayload(
|
||||
payload->typeSpecific.Audio.channels = channels;
|
||||
payload->typeSpecific.Audio.rate = rate;
|
||||
payload->audio = true;
|
||||
payload->name[RTP_PAYLOAD_NAME_SIZE - 1] = 0;
|
||||
payload->name[RTP_PAYLOAD_NAME_SIZE - 1] = '\0';
|
||||
strncpy(payload->name, payloadName, RTP_PAYLOAD_NAME_SIZE - 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool
|
||||
RTPSenderAudio::MarkerBit(const FrameType frameType,
|
||||
const int8_t payloadType)
|
||||
const int8_t payload_type)
|
||||
{
|
||||
CriticalSectionScoped cs(_sendAudioCritsect);
|
||||
|
||||
CriticalSectionScoped cs(_sendAudioCritsect.get());
|
||||
// for audio true for first packet in a speech burst
|
||||
bool markerBit = false;
|
||||
if(_lastPayloadType != payloadType)
|
||||
{
|
||||
if(_cngNBPayloadType != -1)
|
||||
{
|
||||
// we have configured NB CNG
|
||||
if(_cngNBPayloadType == payloadType)
|
||||
{
|
||||
// only set a marker bit when we change payload type to a non CNG
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if(_cngWBPayloadType != -1)
|
||||
{
|
||||
// we have configured WB CNG
|
||||
if(_cngWBPayloadType == payloadType)
|
||||
{
|
||||
// only set a marker bit when we change payload type to a non CNG
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if(_cngSWBPayloadType != -1)
|
||||
{
|
||||
// we have configured SWB CNG
|
||||
if(_cngSWBPayloadType == payloadType)
|
||||
{
|
||||
// only set a marker bit when we change payload type to a non CNG
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if(_cngFBPayloadType != -1)
|
||||
{
|
||||
// we have configured SWB CNG
|
||||
if(_cngFBPayloadType == payloadType)
|
||||
{
|
||||
// only set a marker bit when we change payload type to a non CNG
|
||||
return false;
|
||||
}
|
||||
}
|
||||
// payloadType differ
|
||||
if(_lastPayloadType == -1)
|
||||
{
|
||||
if(frameType != kAudioFrameCN)
|
||||
{
|
||||
// first packet and NOT CNG
|
||||
return true;
|
||||
if (_lastPayloadType != payload_type) {
|
||||
if (payload_type != -1 && (_cngNBPayloadType == payload_type ||
|
||||
_cngWBPayloadType == payload_type ||
|
||||
_cngSWBPayloadType == payload_type ||
|
||||
_cngFBPayloadType == payload_type)) {
|
||||
// Only set a marker bit when we change payload type to a non CNG
|
||||
return false;
|
||||
}
|
||||
|
||||
}else
|
||||
{
|
||||
// first packet and CNG
|
||||
_inbandVADactive = true;
|
||||
return false;
|
||||
}
|
||||
// payload_type differ
|
||||
if (_lastPayloadType == -1) {
|
||||
if (frameType != kAudioFrameCN) {
|
||||
// first packet and NOT CNG
|
||||
return true;
|
||||
} else {
|
||||
// first packet and CNG
|
||||
_inbandVADactive = true;
|
||||
return false;
|
||||
}
|
||||
// not first packet AND
|
||||
// not CNG AND
|
||||
// payloadType changed
|
||||
}
|
||||
|
||||
// set a marker bit when we change payload type
|
||||
markerBit = true;
|
||||
// not first packet AND
|
||||
// not CNG AND
|
||||
// payload_type changed
|
||||
|
||||
// set a marker bit when we change payload type
|
||||
markerBit = true;
|
||||
}
|
||||
|
||||
// For G.723 G.729, AMR etc we can have inband VAD
|
||||
@ -209,25 +159,6 @@ RTPSenderAudio::MarkerBit(const FrameType frameType,
|
||||
return markerBit;
|
||||
}
|
||||
|
||||
bool
|
||||
RTPSenderAudio::SendTelephoneEventActive(int8_t& telephoneEvent) const
|
||||
{
|
||||
if(_dtmfEventIsOn)
|
||||
{
|
||||
telephoneEvent = _dtmfKey;
|
||||
return true;
|
||||
}
|
||||
int64_t delaySinceLastDTMF = _clock->TimeInMilliseconds() -
|
||||
_dtmfTimeLastSent;
|
||||
if(delaySinceLastDTMF < 100)
|
||||
{
|
||||
telephoneEvent = _dtmfKey;
|
||||
return true;
|
||||
}
|
||||
telephoneEvent = -1;
|
||||
return false;
|
||||
}
|
||||
|
||||
int32_t RTPSenderAudio::SendAudio(
|
||||
const FrameType frameType,
|
||||
const int8_t payloadType,
|
||||
@ -241,11 +172,20 @@ int32_t RTPSenderAudio::SendAudio(
|
||||
bool dtmfToneStarted = false;
|
||||
uint16_t dtmfLengthMS = 0;
|
||||
uint8_t key = 0;
|
||||
int red_payload_type;
|
||||
uint8_t audio_level_dbov;
|
||||
int8_t dtmf_payload_type;
|
||||
uint16_t packet_size_samples;
|
||||
{
|
||||
CriticalSectionScoped cs(_sendAudioCritsect.get());
|
||||
red_payload_type = _REDPayloadType;
|
||||
audio_level_dbov = _audioLevel_dBov;
|
||||
dtmf_payload_type = _dtmfPayloadType;
|
||||
packet_size_samples = _packetSizeSamples;
|
||||
}
|
||||
|
||||
// Check if we have pending DTMFs to send
|
||||
if (!_dtmfEventIsOn && PendingDTMF()) {
|
||||
CriticalSectionScoped cs(_sendAudioCritsect);
|
||||
|
||||
int64_t delaySinceLastDTMF = _clock->TimeInMilliseconds() -
|
||||
_dtmfTimeLastSent;
|
||||
|
||||
@ -255,83 +195,69 @@ int32_t RTPSenderAudio::SendAudio(
|
||||
if (NextDTMF(&key, &dtmfLengthMS, &_dtmfLevel) >= 0) {
|
||||
_dtmfEventFirstPacketSent = false;
|
||||
_dtmfKey = key;
|
||||
_dtmfLengthSamples = (_frequency / 1000) * dtmfLengthMS;
|
||||
_dtmfLengthSamples = (kDtmfFrequencyHz / 1000) * dtmfLengthMS;
|
||||
dtmfToneStarted = true;
|
||||
_dtmfEventIsOn = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (dtmfToneStarted) {
|
||||
CriticalSectionScoped cs(_audioFeedbackCritsect);
|
||||
if (_audioFeedback) {
|
||||
if (_audioFeedback)
|
||||
_audioFeedback->OnPlayTelephoneEvent(_id, key, dtmfLengthMS, _dtmfLevel);
|
||||
}
|
||||
}
|
||||
|
||||
// A source MAY send events and coded audio packets for the same time
|
||||
// but we don't support it
|
||||
{
|
||||
_sendAudioCritsect->Enter();
|
||||
|
||||
if (_dtmfEventIsOn) {
|
||||
if (frameType == kFrameEmpty) {
|
||||
// kFrameEmpty is used to drive the DTMF when in CN mode
|
||||
// it can be triggered more frequently than we want to send the
|
||||
// DTMF packets.
|
||||
if (_packetSizeSamples > (captureTimeStamp - _dtmfTimestampLastSent)) {
|
||||
// not time to send yet
|
||||
_sendAudioCritsect->Leave();
|
||||
return 0;
|
||||
}
|
||||
if (_dtmfEventIsOn) {
|
||||
if (frameType == kFrameEmpty) {
|
||||
// kFrameEmpty is used to drive the DTMF when in CN mode
|
||||
// it can be triggered more frequently than we want to send the
|
||||
// DTMF packets.
|
||||
if (packet_size_samples > (captureTimeStamp - _dtmfTimestampLastSent)) {
|
||||
// not time to send yet
|
||||
return 0;
|
||||
}
|
||||
_dtmfTimestampLastSent = captureTimeStamp;
|
||||
uint32_t dtmfDurationSamples = captureTimeStamp - _dtmfTimestamp;
|
||||
bool ended = false;
|
||||
bool send = true;
|
||||
|
||||
if (_dtmfLengthSamples > dtmfDurationSamples) {
|
||||
if (dtmfDurationSamples <= 0) {
|
||||
// Skip send packet at start, since we shouldn't use duration 0
|
||||
send = false;
|
||||
}
|
||||
} else {
|
||||
ended = true;
|
||||
_dtmfEventIsOn = false;
|
||||
_dtmfTimeLastSent = _clock->TimeInMilliseconds();
|
||||
}
|
||||
// don't hold the critsect while calling SendTelephoneEventPacket
|
||||
_sendAudioCritsect->Leave();
|
||||
if (send) {
|
||||
if (dtmfDurationSamples > 0xffff) {
|
||||
// RFC 4733 2.5.2.3 Long-Duration Events
|
||||
SendTelephoneEventPacket(ended, _dtmfTimestamp,
|
||||
static_cast<uint16_t>(0xffff), false);
|
||||
|
||||
// set new timestap for this segment
|
||||
_dtmfTimestamp = captureTimeStamp;
|
||||
dtmfDurationSamples -= 0xffff;
|
||||
_dtmfLengthSamples -= 0xffff;
|
||||
|
||||
return SendTelephoneEventPacket(
|
||||
ended,
|
||||
_dtmfTimestamp,
|
||||
static_cast<uint16_t>(dtmfDurationSamples),
|
||||
false);
|
||||
} else {
|
||||
if (SendTelephoneEventPacket(
|
||||
ended,
|
||||
_dtmfTimestamp,
|
||||
static_cast<uint16_t>(dtmfDurationSamples),
|
||||
!_dtmfEventFirstPacketSent) != 0) {
|
||||
return -1;
|
||||
}
|
||||
_dtmfEventFirstPacketSent = true;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
_sendAudioCritsect->Leave();
|
||||
_dtmfTimestampLastSent = captureTimeStamp;
|
||||
uint32_t dtmfDurationSamples = captureTimeStamp - _dtmfTimestamp;
|
||||
bool ended = false;
|
||||
bool send = true;
|
||||
|
||||
if (_dtmfLengthSamples > dtmfDurationSamples) {
|
||||
if (dtmfDurationSamples <= 0) {
|
||||
// Skip send packet at start, since we shouldn't use duration 0
|
||||
send = false;
|
||||
}
|
||||
} else {
|
||||
ended = true;
|
||||
_dtmfEventIsOn = false;
|
||||
_dtmfTimeLastSent = _clock->TimeInMilliseconds();
|
||||
}
|
||||
if (send) {
|
||||
if (dtmfDurationSamples > 0xffff) {
|
||||
// RFC 4733 2.5.2.3 Long-Duration Events
|
||||
SendTelephoneEventPacket(ended, dtmf_payload_type, _dtmfTimestamp,
|
||||
static_cast<uint16_t>(0xffff), false);
|
||||
|
||||
// set new timestap for this segment
|
||||
_dtmfTimestamp = captureTimeStamp;
|
||||
dtmfDurationSamples -= 0xffff;
|
||||
_dtmfLengthSamples -= 0xffff;
|
||||
|
||||
return SendTelephoneEventPacket(
|
||||
ended, dtmf_payload_type, _dtmfTimestamp,
|
||||
static_cast<uint16_t>(dtmfDurationSamples), false);
|
||||
} else {
|
||||
if (SendTelephoneEventPacket(ended, dtmf_payload_type, _dtmfTimestamp,
|
||||
static_cast<uint16_t>(dtmfDurationSamples),
|
||||
!_dtmfEventFirstPacketSent) != 0) {
|
||||
return -1;
|
||||
}
|
||||
_dtmfEventFirstPacketSent = true;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
if (payloadSize == 0 || payloadData == NULL) {
|
||||
if (frameType == kFrameEmpty) {
|
||||
@ -347,12 +273,12 @@ int32_t RTPSenderAudio::SendAudio(
|
||||
int32_t rtpHeaderLength = 0;
|
||||
uint16_t timestampOffset = 0;
|
||||
|
||||
if (_REDPayloadType >= 0 && fragmentation && !markerBit &&
|
||||
if (red_payload_type >= 0 && fragmentation && !markerBit &&
|
||||
fragmentation->fragmentationVectorSize > 1) {
|
||||
// have we configured RED? use its payload type
|
||||
// we need to get the current timestamp to calc the diff
|
||||
uint32_t oldTimeStamp = _rtpSender->Timestamp();
|
||||
rtpHeaderLength = _rtpSender->BuildRTPheader(dataBuffer, _REDPayloadType,
|
||||
rtpHeaderLength = _rtpSender->BuildRTPheader(dataBuffer, red_payload_type,
|
||||
markerBit, captureTimeStamp,
|
||||
_clock->TimeInMilliseconds());
|
||||
|
||||
@ -369,24 +295,21 @@ int32_t RTPSenderAudio::SendAudio(
|
||||
// Too large payload buffer.
|
||||
return -1;
|
||||
}
|
||||
{
|
||||
CriticalSectionScoped cs(_sendAudioCritsect);
|
||||
if (_REDPayloadType >= 0 && // Have we configured RED?
|
||||
fragmentation &&
|
||||
fragmentation->fragmentationVectorSize > 1 &&
|
||||
if (red_payload_type >= 0 && // Have we configured RED?
|
||||
fragmentation && fragmentation->fragmentationVectorSize > 1 &&
|
||||
!markerBit) {
|
||||
if (timestampOffset <= 0x3fff) {
|
||||
if(fragmentation->fragmentationVectorSize != 2) {
|
||||
if (fragmentation->fragmentationVectorSize != 2) {
|
||||
// we only support 2 codecs when using RED
|
||||
return -1;
|
||||
}
|
||||
// only 0x80 if we have multiple blocks
|
||||
dataBuffer[rtpHeaderLength++] = 0x80 +
|
||||
fragmentation->fragmentationPlType[1];
|
||||
dataBuffer[rtpHeaderLength++] =
|
||||
0x80 + fragmentation->fragmentationPlType[1];
|
||||
size_t blockLength = fragmentation->fragmentationLength[1];
|
||||
|
||||
// sanity blockLength
|
||||
if(blockLength > 0x3ff) { // block length 10 bits 1023 bytes
|
||||
if (blockLength > 0x3ff) { // block length 10 bits 1023 bytes
|
||||
return -1;
|
||||
}
|
||||
uint32_t REDheader = (timestampOffset << 10) + blockLength;
|
||||
@ -396,22 +319,22 @@ int32_t RTPSenderAudio::SendAudio(
|
||||
|
||||
dataBuffer[rtpHeaderLength++] = fragmentation->fragmentationPlType[0];
|
||||
// copy the RED data
|
||||
memcpy(dataBuffer+rtpHeaderLength,
|
||||
memcpy(dataBuffer + rtpHeaderLength,
|
||||
payloadData + fragmentation->fragmentationOffset[1],
|
||||
fragmentation->fragmentationLength[1]);
|
||||
|
||||
// copy the normal data
|
||||
memcpy(dataBuffer+rtpHeaderLength +
|
||||
fragmentation->fragmentationLength[1],
|
||||
memcpy(dataBuffer + rtpHeaderLength +
|
||||
fragmentation->fragmentationLength[1],
|
||||
payloadData + fragmentation->fragmentationOffset[0],
|
||||
fragmentation->fragmentationLength[0]);
|
||||
|
||||
payloadSize = fragmentation->fragmentationLength[0] +
|
||||
fragmentation->fragmentationLength[1];
|
||||
fragmentation->fragmentationLength[1];
|
||||
} else {
|
||||
// silence for too long send only new data
|
||||
dataBuffer[rtpHeaderLength++] = fragmentation->fragmentationPlType[0];
|
||||
memcpy(dataBuffer+rtpHeaderLength,
|
||||
memcpy(dataBuffer + rtpHeaderLength,
|
||||
payloadData + fragmentation->fragmentationOffset[0],
|
||||
fragmentation->fragmentationLength[0]);
|
||||
|
||||
@ -421,38 +344,34 @@ int32_t RTPSenderAudio::SendAudio(
|
||||
if (fragmentation && fragmentation->fragmentationVectorSize > 0) {
|
||||
// use the fragment info if we have one
|
||||
dataBuffer[rtpHeaderLength++] = fragmentation->fragmentationPlType[0];
|
||||
memcpy( dataBuffer+rtpHeaderLength,
|
||||
payloadData + fragmentation->fragmentationOffset[0],
|
||||
fragmentation->fragmentationLength[0]);
|
||||
memcpy(dataBuffer + rtpHeaderLength,
|
||||
payloadData + fragmentation->fragmentationOffset[0],
|
||||
fragmentation->fragmentationLength[0]);
|
||||
|
||||
payloadSize = fragmentation->fragmentationLength[0];
|
||||
} else {
|
||||
memcpy(dataBuffer+rtpHeaderLength, payloadData, payloadSize);
|
||||
memcpy(dataBuffer + rtpHeaderLength, payloadData, payloadSize);
|
||||
}
|
||||
}
|
||||
_lastPayloadType = payloadType;
|
||||
|
||||
// Update audio level extension, if included.
|
||||
{
|
||||
size_t packetSize = payloadSize + rtpHeaderLength;
|
||||
RtpUtility::RtpHeaderParser rtp_parser(dataBuffer, packetSize);
|
||||
RTPHeader rtp_header;
|
||||
rtp_parser.Parse(rtp_header);
|
||||
_rtpSender->UpdateAudioLevel(dataBuffer, packetSize, rtp_header,
|
||||
(frameType == kAudioFrameSpeech),
|
||||
_audioLevel_dBov);
|
||||
CriticalSectionScoped cs(_sendAudioCritsect.get());
|
||||
_lastPayloadType = payloadType;
|
||||
}
|
||||
} // end critical section
|
||||
TRACE_EVENT_ASYNC_END2("webrtc", "Audio", captureTimeStamp,
|
||||
"timestamp", _rtpSender->Timestamp(),
|
||||
"seqnum", _rtpSender->SequenceNumber());
|
||||
return _rtpSender->SendToNetwork(dataBuffer,
|
||||
payloadSize,
|
||||
rtpHeaderLength,
|
||||
-1,
|
||||
kAllowRetransmission,
|
||||
PacedSender::kHighPriority);
|
||||
}
|
||||
// Update audio level extension, if included.
|
||||
size_t packetSize = payloadSize + rtpHeaderLength;
|
||||
RtpUtility::RtpHeaderParser rtp_parser(dataBuffer, packetSize);
|
||||
RTPHeader rtp_header;
|
||||
rtp_parser.Parse(rtp_header);
|
||||
_rtpSender->UpdateAudioLevel(dataBuffer, packetSize, rtp_header,
|
||||
(frameType == kAudioFrameSpeech),
|
||||
audio_level_dbov);
|
||||
TRACE_EVENT_ASYNC_END2("webrtc", "Audio", captureTimeStamp, "timestamp",
|
||||
_rtpSender->Timestamp(), "seqnum",
|
||||
_rtpSender->SequenceNumber());
|
||||
return _rtpSender->SendToNetwork(dataBuffer, payloadSize, rtpHeaderLength,
|
||||
-1, kAllowRetransmission,
|
||||
PacedSender::kHighPriority);
|
||||
}
|
||||
|
||||
// Audio level magnitude and voice activity flag are set for each RTP packet
|
||||
int32_t
|
||||
@ -462,7 +381,7 @@ RTPSenderAudio::SetAudioLevel(const uint8_t level_dBov)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
CriticalSectionScoped cs(_sendAudioCritsect);
|
||||
CriticalSectionScoped cs(_sendAudioCritsect.get());
|
||||
_audioLevel_dBov = level_dBov;
|
||||
return 0;
|
||||
}
|
||||
@ -475,6 +394,7 @@ RTPSenderAudio::SetRED(const int8_t payloadType)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
CriticalSectionScoped cs(_sendAudioCritsect.get());
|
||||
_REDPayloadType = payloadType;
|
||||
return 0;
|
||||
}
|
||||
@ -483,6 +403,7 @@ RTPSenderAudio::SetRED(const int8_t payloadType)
|
||||
int32_t
|
||||
RTPSenderAudio::RED(int8_t& payloadType) const
|
||||
{
|
||||
CriticalSectionScoped cs(_sendAudioCritsect.get());
|
||||
if(_REDPayloadType == -1)
|
||||
{
|
||||
// not configured
|
||||
@ -493,25 +414,25 @@ RTPSenderAudio::RED(int8_t& payloadType) const
|
||||
}
|
||||
|
||||
// Send a TelephoneEvent tone using RFC 2833 (4733)
|
||||
int32_t
|
||||
RTPSenderAudio::SendTelephoneEvent(const uint8_t key,
|
||||
const uint16_t time_ms,
|
||||
const uint8_t level)
|
||||
{
|
||||
// DTMF is protected by its own critsect
|
||||
if(_dtmfPayloadType < 0)
|
||||
{
|
||||
// TelephoneEvent payloadtype not configured
|
||||
return -1;
|
||||
int32_t RTPSenderAudio::SendTelephoneEvent(const uint8_t key,
|
||||
const uint16_t time_ms,
|
||||
const uint8_t level) {
|
||||
{
|
||||
CriticalSectionScoped lock(_sendAudioCritsect.get());
|
||||
if (_dtmfPayloadType < 0) {
|
||||
// TelephoneEvent payloadtype not configured
|
||||
return -1;
|
||||
}
|
||||
return AddDTMF(key, time_ms, level);
|
||||
}
|
||||
return AddDTMF(key, time_ms, level);
|
||||
}
|
||||
|
||||
int32_t
|
||||
RTPSenderAudio::SendTelephoneEventPacket(const bool ended,
|
||||
const uint32_t dtmfTimeStamp,
|
||||
const uint16_t duration,
|
||||
const bool markerBit)
|
||||
RTPSenderAudio::SendTelephoneEventPacket(bool ended,
|
||||
int8_t dtmf_payload_type,
|
||||
uint32_t dtmfTimeStamp,
|
||||
uint16_t duration,
|
||||
bool markerBit)
|
||||
{
|
||||
uint8_t dtmfbuffer[IP_PACKET_SIZE];
|
||||
uint8_t sendCount = 1;
|
||||
@ -524,10 +445,8 @@ RTPSenderAudio::SendTelephoneEventPacket(const bool ended,
|
||||
}
|
||||
do
|
||||
{
|
||||
_sendAudioCritsect->Enter();
|
||||
|
||||
//Send DTMF data
|
||||
_rtpSender->BuildRTPheader(dtmfbuffer, _dtmfPayloadType, markerBit,
|
||||
_rtpSender->BuildRTPheader(dtmfbuffer, dtmf_payload_type, markerBit,
|
||||
dtmfTimeStamp, _clock->TimeInMilliseconds());
|
||||
|
||||
// reset CSRC and X bit
|
||||
@ -547,19 +466,13 @@ RTPSenderAudio::SendTelephoneEventPacket(const bool ended,
|
||||
uint8_t volume = _dtmfLevel;
|
||||
|
||||
// First packet un-ended
|
||||
uint8_t E = 0x00;
|
||||
|
||||
if(ended)
|
||||
{
|
||||
E = 0x80;
|
||||
}
|
||||
uint8_t E = ended ? 0x80 : 0x00;
|
||||
|
||||
// First byte is Event number, equals key number
|
||||
dtmfbuffer[12] = _dtmfKey;
|
||||
dtmfbuffer[13] = E|R|volume;
|
||||
RtpUtility::AssignUWord16ToBuffer(dtmfbuffer + 14, duration);
|
||||
|
||||
_sendAudioCritsect->Leave();
|
||||
TRACE_EVENT_INSTANT2("webrtc_rtp",
|
||||
"Audio::SendTelephoneEvent",
|
||||
"timestamp", dtmfTimeStamp,
|
||||
|
@ -22,8 +22,10 @@ namespace webrtc {
|
||||
class RTPSenderAudio: public DTMFqueue
|
||||
{
|
||||
public:
|
||||
RTPSenderAudio(const int32_t id, Clock* clock,
|
||||
RTPSender* rtpSender);
|
||||
RTPSenderAudio(const int32_t id,
|
||||
Clock* clock,
|
||||
RTPSender* rtpSender,
|
||||
RtpAudioFeedback* audio_feedback);
|
||||
virtual ~RTPSenderAudio();
|
||||
|
||||
int32_t RegisterAudioPayload(const char payloadName[RTP_PAYLOAD_NAME_SIZE],
|
||||
@ -48,13 +50,9 @@ public:
|
||||
int32_t SetAudioLevel(const uint8_t level_dBov);
|
||||
|
||||
// Send a DTMF tone using RFC 2833 (4733)
|
||||
int32_t SendTelephoneEvent(const uint8_t key,
|
||||
const uint16_t time_ms,
|
||||
const uint8_t level);
|
||||
|
||||
bool SendTelephoneEventActive(int8_t& telephoneEvent) const;
|
||||
|
||||
void SetAudioFrequency(const uint32_t f);
|
||||
int32_t SendTelephoneEvent(const uint8_t key,
|
||||
const uint16_t time_ms,
|
||||
const uint8_t level);
|
||||
|
||||
int AudioFrequency() const;
|
||||
|
||||
@ -64,52 +62,50 @@ public:
|
||||
// Get payload type for Redundant Audio Data RFC 2198
|
||||
int32_t RED(int8_t& payloadType) const;
|
||||
|
||||
int32_t RegisterAudioCallback(RtpAudioFeedback* messagesCallback);
|
||||
|
||||
protected:
|
||||
int32_t SendTelephoneEventPacket(const bool ended,
|
||||
const uint32_t dtmfTimeStamp,
|
||||
const uint16_t duration,
|
||||
const bool markerBit); // set on first packet in talk burst
|
||||
int32_t SendTelephoneEventPacket(bool ended,
|
||||
int8_t dtmf_payload_type,
|
||||
uint32_t dtmfTimeStamp,
|
||||
uint16_t duration,
|
||||
bool markerBit); // set on first packet in talk burst
|
||||
|
||||
bool MarkerBit(const FrameType frameType,
|
||||
const int8_t payloadType);
|
||||
|
||||
private:
|
||||
int32_t _id;
|
||||
Clock* _clock;
|
||||
RTPSender* _rtpSender;
|
||||
CriticalSectionWrapper* _audioFeedbackCritsect;
|
||||
RtpAudioFeedback* _audioFeedback;
|
||||
const int32_t _id;
|
||||
Clock* const _clock;
|
||||
RTPSender* const _rtpSender;
|
||||
RtpAudioFeedback* const _audioFeedback;
|
||||
|
||||
CriticalSectionWrapper* _sendAudioCritsect;
|
||||
scoped_ptr<CriticalSectionWrapper> _sendAudioCritsect;
|
||||
|
||||
uint32_t _frequency;
|
||||
uint16_t _packetSizeSamples;
|
||||
uint16_t _packetSizeSamples GUARDED_BY(_sendAudioCritsect);
|
||||
|
||||
// DTMF
|
||||
bool _dtmfEventIsOn;
|
||||
bool _dtmfEventFirstPacketSent;
|
||||
int8_t _dtmfPayloadType;
|
||||
uint32_t _dtmfTimestamp;
|
||||
uint8_t _dtmfKey;
|
||||
uint32_t _dtmfLengthSamples;
|
||||
uint8_t _dtmfLevel;
|
||||
int64_t _dtmfTimeLastSent;
|
||||
uint32_t _dtmfTimestampLastSent;
|
||||
// DTMF
|
||||
bool _dtmfEventIsOn;
|
||||
bool _dtmfEventFirstPacketSent;
|
||||
int8_t _dtmfPayloadType GUARDED_BY(_sendAudioCritsect);
|
||||
uint32_t _dtmfTimestamp;
|
||||
uint8_t _dtmfKey;
|
||||
uint32_t _dtmfLengthSamples;
|
||||
uint8_t _dtmfLevel;
|
||||
int64_t _dtmfTimeLastSent;
|
||||
uint32_t _dtmfTimestampLastSent;
|
||||
|
||||
int8_t _REDPayloadType;
|
||||
int8_t _REDPayloadType GUARDED_BY(_sendAudioCritsect);
|
||||
|
||||
// VAD detection, used for markerbit
|
||||
bool _inbandVADactive;
|
||||
int8_t _cngNBPayloadType;
|
||||
int8_t _cngWBPayloadType;
|
||||
int8_t _cngSWBPayloadType;
|
||||
int8_t _cngFBPayloadType;
|
||||
int8_t _lastPayloadType;
|
||||
// VAD detection, used for markerbit
|
||||
bool _inbandVADactive GUARDED_BY(_sendAudioCritsect);
|
||||
int8_t _cngNBPayloadType GUARDED_BY(_sendAudioCritsect);
|
||||
int8_t _cngWBPayloadType GUARDED_BY(_sendAudioCritsect);
|
||||
int8_t _cngSWBPayloadType GUARDED_BY(_sendAudioCritsect);
|
||||
int8_t _cngFBPayloadType GUARDED_BY(_sendAudioCritsect);
|
||||
int8_t _lastPayloadType GUARDED_BY(_sendAudioCritsect);
|
||||
|
||||
// Audio level indication (https://datatracker.ietf.org/doc/draft-lennox-avt-rtp-audio-level-exthdr/)
|
||||
uint8_t _audioLevel_dBov;
|
||||
// Audio level indication
|
||||
// (https://datatracker.ietf.org/doc/draft-lennox-avt-rtp-audio-level-exthdr/)
|
||||
uint8_t _audioLevel_dBov GUARDED_BY(_sendAudioCritsect);
|
||||
};
|
||||
} // namespace webrtc
|
||||
|
||||
|
@ -33,7 +33,6 @@ struct RtpPacket {
|
||||
|
||||
RTPSenderVideo::RTPSenderVideo(Clock* clock, RTPSenderInterface* rtpSender)
|
||||
: _rtpSender(*rtpSender),
|
||||
_sendVideoCritsect(CriticalSectionWrapper::CreateCriticalSection()),
|
||||
_videoType(kRtpVideoGeneric),
|
||||
_videoCodecInformation(NULL),
|
||||
_maxBitrate(0),
|
||||
@ -61,11 +60,9 @@ RTPSenderVideo::~RTPSenderVideo() {
|
||||
if (_videoCodecInformation) {
|
||||
delete _videoCodecInformation;
|
||||
}
|
||||
delete _sendVideoCritsect;
|
||||
}
|
||||
|
||||
void RTPSenderVideo::SetVideoCodecType(RtpVideoCodecTypes videoType) {
|
||||
CriticalSectionScoped cs(_sendVideoCritsect);
|
||||
_videoType = videoType;
|
||||
}
|
||||
|
||||
@ -78,8 +75,6 @@ int32_t RTPSenderVideo::RegisterVideoPayload(
|
||||
const int8_t payloadType,
|
||||
const uint32_t maxBitRate,
|
||||
RtpUtility::Payload*& payload) {
|
||||
CriticalSectionScoped cs(_sendVideoCritsect);
|
||||
|
||||
RtpVideoCodecTypes videoType = kRtpVideoGeneric;
|
||||
if (RtpUtility::StringCompare(payloadName, "VP8", 3)) {
|
||||
videoType = kRtpVideoVp8;
|
||||
|
@ -106,7 +106,6 @@ class RTPSenderVideo {
|
||||
private:
|
||||
RTPSenderInterface& _rtpSender;
|
||||
|
||||
CriticalSectionWrapper* _sendVideoCritsect;
|
||||
RtpVideoCodecTypes _videoType;
|
||||
VideoCodecInformation* _videoCodecInformation;
|
||||
uint32_t _maxBitrate;
|
||||
|
@ -203,7 +203,6 @@ TEST_F(RtpRtcpAudioTest, Basic) {
|
||||
voice_codec.plfreq,
|
||||
voice_codec.channels,
|
||||
(voice_codec.rate < 0) ? 0 : voice_codec.rate));
|
||||
printf("4\n");
|
||||
|
||||
const uint8_t test[5] = "test";
|
||||
EXPECT_EQ(0, module1->SendOutgoingData(webrtc::kAudioFrameSpeech, 96,
|
||||
|
Loading…
Reference in New Issue
Block a user