Lint-cleaned video and audio receivers.

BUG=
TESTED=trybots

Review URL: https://webrtc-codereview.appspot.com/1093004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@3471 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
phoglund@webrtc.org 2013-02-05 15:12:39 +00:00
parent c4e45f67c0
commit a7303bdfb5
4 changed files with 683 additions and 704 deletions

View File

@ -8,141 +8,122 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#include "rtp_receiver_audio.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_receiver_audio.h"
#include <math.h> // pow()
#include <cassert> // assert
#include <cstring> // memcpy()
#include <math.h> // pow()
#include "critical_section_wrapper.h"
#include "trace.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#include "webrtc/system_wrappers/interface/trace.h"
namespace webrtc {
RTPReceiverAudio::RTPReceiverAudio(const WebRtc_Word32 id,
RtpData* data_callback,
RtpAudioFeedback* incomingMessagesCallback)
RtpAudioFeedback* incoming_messages_callback)
: RTPReceiverStrategy(data_callback),
_id(id),
_criticalSectionRtpReceiverAudio(
id_(id),
critical_section_rtp_receiver_audio_(
CriticalSectionWrapper::CreateCriticalSection()),
_lastReceivedFrequency(8000),
_telephoneEvent(false),
_telephoneEventForwardToDecoder(false),
_telephoneEventDetectEndOfTone(false),
_telephoneEventPayloadType(-1),
_cngNBPayloadType(-1),
_cngWBPayloadType(-1),
_cngSWBPayloadType(-1),
_cngFBPayloadType(-1),
_cngPayloadType(-1),
_G722PayloadType(-1),
_lastReceivedG722(false),
_cbAudioFeedback(incomingMessagesCallback)
{
last_received_frequency_(8000),
telephone_event_(false),
telephone_event_forward_to_decoder_(false),
telephone_event_detect_end_of_tone_(false),
telephone_event_payload_type_(-1),
cng_nb_payload_type_(-1),
cng_wb_payload_type_(-1),
cng_swb_payload_type_(-1),
cng_fb_payload_type_(-1),
cng_payload_type_(-1),
g722_payload_type_(-1),
last_received_g722_(false),
cb_audio_feedback_(incoming_messages_callback) {
last_payload_.Audio.channels = 1;
}
WebRtc_UWord32
RTPReceiverAudio::AudioFrequency() const
{
CriticalSectionScoped lock(_criticalSectionRtpReceiverAudio.get());
if(_lastReceivedG722)
{
WebRtc_UWord32 RTPReceiverAudio::AudioFrequency() const {
CriticalSectionScoped lock(critical_section_rtp_receiver_audio_.get());
if (last_received_g722_) {
return 8000;
}
return _lastReceivedFrequency;
return last_received_frequency_;
}
// Outband TelephoneEvent(DTMF) detection
WebRtc_Word32
RTPReceiverAudio::SetTelephoneEventStatus(const bool enable,
const bool forwardToDecoder,
const bool detectEndOfTone)
{
CriticalSectionScoped lock(_criticalSectionRtpReceiverAudio.get());
_telephoneEvent= enable;
_telephoneEventDetectEndOfTone = detectEndOfTone;
_telephoneEventForwardToDecoder = forwardToDecoder;
WebRtc_Word32 RTPReceiverAudio::SetTelephoneEventStatus(
const bool enable,
const bool forward_to_decoder,
const bool detect_end_of_tone) {
CriticalSectionScoped lock(critical_section_rtp_receiver_audio_.get());
telephone_event_ = enable;
telephone_event_detect_end_of_tone_ = detect_end_of_tone;
telephone_event_forward_to_decoder_ = forward_to_decoder;
return 0;
}
// Is outband TelephoneEvent(DTMF) turned on/off?
bool
RTPReceiverAudio::TelephoneEvent() const
{
CriticalSectionScoped lock(_criticalSectionRtpReceiverAudio.get());
return _telephoneEvent;
bool RTPReceiverAudio::TelephoneEvent() const {
CriticalSectionScoped lock(critical_section_rtp_receiver_audio_.get());
return telephone_event_;
}
// Is forwarding of outband telephone events turned on/off?
bool
RTPReceiverAudio::TelephoneEventForwardToDecoder() const
{
CriticalSectionScoped lock(_criticalSectionRtpReceiverAudio.get());
return _telephoneEventForwardToDecoder;
bool RTPReceiverAudio::TelephoneEventForwardToDecoder() const {
CriticalSectionScoped lock(critical_section_rtp_receiver_audio_.get());
return telephone_event_forward_to_decoder_;
}
bool
RTPReceiverAudio::TelephoneEventPayloadType(const WebRtc_Word8 payloadType) const
{
CriticalSectionScoped lock(_criticalSectionRtpReceiverAudio.get());
return (_telephoneEventPayloadType == payloadType)?true:false;
bool RTPReceiverAudio::TelephoneEventPayloadType(
const WebRtc_Word8 payload_type) const {
CriticalSectionScoped lock(critical_section_rtp_receiver_audio_.get());
return (telephone_event_payload_type_ == payload_type) ? true : false;
}
bool
RTPReceiverAudio::CNGPayloadType(const WebRtc_Word8 payloadType,
bool RTPReceiverAudio::CNGPayloadType(const WebRtc_Word8 payload_type,
WebRtc_UWord32* frequency,
bool* cngPayloadTypeHasChanged)
{
CriticalSectionScoped lock(_criticalSectionRtpReceiverAudio.get());
*cngPayloadTypeHasChanged = false;
bool* cng_payload_type_has_changed) {
CriticalSectionScoped lock(critical_section_rtp_receiver_audio_.get());
*cng_payload_type_has_changed = false;
// We can have four CNG on 8000Hz, 16000Hz, 32000Hz and 48000Hz.
if(_cngNBPayloadType == payloadType)
{
if (cng_nb_payload_type_ == payload_type) {
*frequency = 8000;
if ((_cngPayloadType != -1) &&(_cngPayloadType !=_cngNBPayloadType))
*cngPayloadTypeHasChanged = true;
if (cng_payload_type_ != -1 && cng_payload_type_ != cng_nb_payload_type_)
*cng_payload_type_has_changed = true;
_cngPayloadType = _cngNBPayloadType;
cng_payload_type_ = cng_nb_payload_type_;
return true;
} else if(_cngWBPayloadType == payloadType)
{
} else if (cng_wb_payload_type_ == payload_type) {
// if last received codec is G.722 we must use frequency 8000
if(_lastReceivedG722)
{
if (last_received_g722_) {
*frequency = 8000;
} else
{
} else {
*frequency = 16000;
}
if ((_cngPayloadType != -1) &&(_cngPayloadType !=_cngWBPayloadType))
*cngPayloadTypeHasChanged = true;
_cngPayloadType = _cngWBPayloadType;
if (cng_payload_type_ != -1 && cng_payload_type_ != cng_wb_payload_type_)
*cng_payload_type_has_changed = true;
cng_payload_type_ = cng_wb_payload_type_;
return true;
}else if(_cngSWBPayloadType == payloadType)
{
} else if (cng_swb_payload_type_ == payload_type) {
*frequency = 32000;
if ((_cngPayloadType != -1) &&(_cngPayloadType !=_cngSWBPayloadType))
*cngPayloadTypeHasChanged = true;
_cngPayloadType = _cngSWBPayloadType;
if ((cng_payload_type_ != -1) &&
(cng_payload_type_ != cng_swb_payload_type_))
*cng_payload_type_has_changed = true;
cng_payload_type_ = cng_swb_payload_type_;
return true;
}else if(_cngFBPayloadType == payloadType)
{
} else if (cng_fb_payload_type_ == payload_type) {
*frequency = 48000;
if ((_cngPayloadType != -1) &&(_cngPayloadType !=_cngFBPayloadType))
*cngPayloadTypeHasChanged = true;
_cngPayloadType = _cngFBPayloadType;
if (cng_payload_type_ != -1 && cng_payload_type_ != cng_fb_payload_type_)
*cng_payload_type_has_changed = true;
cng_payload_type_ = cng_fb_payload_type_;
return true;
}else
{
} else {
// not CNG
if(_G722PayloadType == payloadType)
{
_lastReceivedG722 = true;
}else
{
_lastReceivedG722 = false;
if (g722_payload_type_ == payload_type) {
last_received_g722_ = true;
} else {
last_received_g722_ = false;
}
}
return false;
@ -154,59 +135,57 @@ bool RTPReceiverAudio::ShouldReportCsrcChanges(
return !TelephoneEventPayloadType(payload_type);
}
/*
Sample based or frame based codecs based on RFC 3551
NOTE! There is one error in the RFC, stating G.722 uses 8 bits/samples.
The correct rate is 4 bits/sample.
name of sampling default
encoding sample/frame bits/sample rate ms/frame ms/packet
Sample based audio codecs
DVI4 sample 4 var. 20
G722 sample 4 16,000 20
G726-40 sample 5 8,000 20
G726-32 sample 4 8,000 20
G726-24 sample 3 8,000 20
G726-16 sample 2 8,000 20
L8 sample 8 var. 20
L16 sample 16 var. 20
PCMA sample 8 var. 20
PCMU sample 8 var. 20
Frame based audio codecs
G723 frame N/A 8,000 30 30
G728 frame N/A 8,000 2.5 20
G729 frame N/A 8,000 10 20
G729D frame N/A 8,000 10 20
G729E frame N/A 8,000 10 20
GSM frame N/A 8,000 20 20
GSM-EFR frame N/A 8,000 20 20
LPC frame N/A 8,000 20 20
MPA frame N/A var. var.
G7221 frame N/A
*/
// - Sample based or frame based codecs based on RFC 3551
// -
// - NOTE! There is one error in the RFC, stating G.722 uses 8 bits/samples.
// - The correct rate is 4 bits/sample.
// -
// - name of sampling default
// - encoding sample/frame bits/sample rate ms/frame ms/packet
// -
// - Sample based audio codecs
// - DVI4 sample 4 var. 20
// - G722 sample 4 16,000 20
// - G726-40 sample 5 8,000 20
// - G726-32 sample 4 8,000 20
// - G726-24 sample 3 8,000 20
// - G726-16 sample 2 8,000 20
// - L8 sample 8 var. 20
// - L16 sample 16 var. 20
// - PCMA sample 8 var. 20
// - PCMU sample 8 var. 20
// -
// - Frame based audio codecs
// - G723 frame N/A 8,000 30 30
// - G728 frame N/A 8,000 2.5 20
// - G729 frame N/A 8,000 10 20
// - G729D frame N/A 8,000 10 20
// - G729E frame N/A 8,000 10 20
// - GSM frame N/A 8,000 20 20
// - GSM-EFR frame N/A 8,000 20 20
// - LPC frame N/A 8,000 20 20
// - MPA frame N/A var. var.
// -
// - G7221 frame N/A
WebRtc_Word32 RTPReceiverAudio::OnNewPayloadTypeCreated(
const char payloadName[RTP_PAYLOAD_NAME_SIZE],
const WebRtc_Word8 payloadType,
const char payload_name[RTP_PAYLOAD_NAME_SIZE],
const WebRtc_Word8 payload_type,
const WebRtc_UWord32 frequency) {
CriticalSectionScoped lock(_criticalSectionRtpReceiverAudio.get());
CriticalSectionScoped lock(critical_section_rtp_receiver_audio_.get());
if (ModuleRTPUtility::StringCompare(payloadName, "telephone-event", 15)) {
_telephoneEventPayloadType = payloadType;
if (ModuleRTPUtility::StringCompare(payload_name, "telephone-event", 15)) {
telephone_event_payload_type_ = payload_type;
}
if (ModuleRTPUtility::StringCompare(payloadName, "cn", 2)) {
if (ModuleRTPUtility::StringCompare(payload_name, "cn", 2)) {
// we can have three CNG on 8000Hz, 16000Hz and 32000Hz
if (frequency == 8000) {
_cngNBPayloadType = payloadType;
cng_nb_payload_type_ = payload_type;
} else if (frequency == 16000) {
_cngWBPayloadType = payloadType;
cng_wb_payload_type_ = payload_type;
} else if (frequency == 32000) {
_cngSWBPayloadType = payloadType;
cng_swb_payload_type_ = payload_type;
} else if (frequency == 48000) {
_cngFBPayloadType = payloadType;
cng_fb_payload_type_ = payload_type;
} else {
assert(false);
return -1;
@ -216,50 +195,52 @@ WebRtc_Word32 RTPReceiverAudio::OnNewPayloadTypeCreated(
}
void RTPReceiverAudio::SendTelephoneEvents(
WebRtc_UWord8 numberOfNewEvents,
WebRtc_UWord8 newEvents[MAX_NUMBER_OF_PARALLEL_TELEPHONE_EVENTS],
WebRtc_UWord8 numberOfRemovedEvents,
WebRtc_UWord8 removedEvents[MAX_NUMBER_OF_PARALLEL_TELEPHONE_EVENTS]) {
WebRtc_UWord8 number_of_new_events,
WebRtc_UWord8 new_events[MAX_NUMBER_OF_PARALLEL_TELEPHONE_EVENTS],
WebRtc_UWord8 number_of_removed_events,
WebRtc_UWord8 removed_events[MAX_NUMBER_OF_PARALLEL_TELEPHONE_EVENTS]) {
// Copy these variables since we can't hold the critsect when we call the
// callback. _cbAudioFeedback and _id are immutable though.
bool telephoneEvent;
bool telephoneEventDetectEndOfTone;
// callback. cb_audio_feedback_ and id_ are immutable though.
bool telephone_event;
bool telephone_event_detect_end_of_tone;
{
CriticalSectionScoped lock(_criticalSectionRtpReceiverAudio.get());
telephoneEvent = _telephoneEvent;
telephoneEventDetectEndOfTone = _telephoneEventDetectEndOfTone;
CriticalSectionScoped lock(critical_section_rtp_receiver_audio_.get());
telephone_event = telephone_event_;
telephone_event_detect_end_of_tone = telephone_event_detect_end_of_tone_;
}
if (telephoneEvent) {
for (int n = 0; n < numberOfNewEvents; ++n) {
_cbAudioFeedback->OnReceivedTelephoneEvent(
_id, newEvents[n], false);
if (telephone_event) {
for (int n = 0; n < number_of_new_events; ++n) {
cb_audio_feedback_->OnReceivedTelephoneEvent(id_, new_events[n], false);
}
if (telephoneEventDetectEndOfTone) {
for (int n = 0; n < numberOfRemovedEvents; ++n) {
_cbAudioFeedback->OnReceivedTelephoneEvent(
_id, removedEvents[n], true);
if (telephone_event_detect_end_of_tone) {
for (int n = 0; n < number_of_removed_events; ++n) {
cb_audio_feedback_->OnReceivedTelephoneEvent(
id_, removed_events[n], true);
}
}
}
}
WebRtc_Word32 RTPReceiverAudio::ParseRtpPacket(
WebRtcRTPHeader* rtpHeader,
const ModuleRTPUtility::PayloadUnion& specificPayload,
const bool isRed,
WebRtcRTPHeader* rtp_header,
const ModuleRTPUtility::PayloadUnion& specific_payload,
const bool is_red,
const WebRtc_UWord8* packet,
const WebRtc_UWord16 packetLength,
const WebRtc_Word64 timestampMs,
const bool isFirstPacket) {
const WebRtc_UWord16 packet_length,
const WebRtc_Word64 timestamp_ms,
const bool is_first_packet) {
const WebRtc_UWord8* payloadData =
ModuleRTPUtility::GetPayloadData(rtpHeader, packet);
const WebRtc_UWord16 payloadDataLength =
ModuleRTPUtility::GetPayloadDataLength(rtpHeader, packetLength);
const WebRtc_UWord8* payload_data =
ModuleRTPUtility::GetPayloadData(rtp_header, packet);
const WebRtc_UWord16 payload_data_length =
ModuleRTPUtility::GetPayloadDataLength(rtp_header, packet_length);
return ParseAudioCodecSpecific(rtpHeader, payloadData, payloadDataLength,
specificPayload.Audio, isRed);
return ParseAudioCodecSpecific(rtp_header,
payload_data,
payload_data_length,
specific_payload.Audio,
is_red);
}
WebRtc_Word32 RTPReceiverAudio::GetFrequencyHz() const {
@ -267,43 +248,41 @@ WebRtc_Word32 RTPReceiverAudio::GetFrequencyHz() const {
}
RTPAliveType RTPReceiverAudio::ProcessDeadOrAlive(
WebRtc_UWord16 lastPayloadLength) const {
WebRtc_UWord16 last_payload_length) const {
// Our CNG is 9 bytes; if it's a likely CNG the receiver needs to check
// kRtpNoRtp against NetEq speechType kOutputPLCtoCNG.
if(lastPayloadLength < 10) // our CNG is 9 bytes
{
// kRtpNoRtp against NetEq speech_type kOutputPLCtoCNG.
if (last_payload_length < 10) { // our CNG is 9 bytes
return kRtpNoRtp;
} else
{
} else {
return kRtpDead;
}
}
void RTPReceiverAudio::CheckPayloadChanged(
const WebRtc_Word8 payloadType,
ModuleRTPUtility::PayloadUnion* specificPayload,
bool* shouldResetStatistics,
bool* shouldDiscardChanges) {
*shouldDiscardChanges = false;
*shouldResetStatistics = false;
const WebRtc_Word8 payload_type,
ModuleRTPUtility::PayloadUnion* specific_payload,
bool* should_reset_statistics,
bool* should_discard_changes) {
*should_discard_changes = false;
*should_reset_statistics = false;
if (TelephoneEventPayloadType(payloadType)) {
if (TelephoneEventPayloadType(payload_type)) {
// Don't do callbacks for DTMF packets.
*shouldDiscardChanges = true;
*should_discard_changes = true;
return;
}
// frequency is updated for CNG
bool cngPayloadTypeHasChanged = false;
bool isCngPayloadType = CNGPayloadType(
payloadType, &specificPayload->Audio.frequency,
&cngPayloadTypeHasChanged);
bool cng_payload_type_has_changed = false;
bool is_cng_payload_type = CNGPayloadType(payload_type,
&specific_payload->Audio.frequency,
&cng_payload_type_has_changed);
*shouldResetStatistics = cngPayloadTypeHasChanged;
*should_reset_statistics = cng_payload_type_has_changed;
if (isCngPayloadType) {
if (is_cng_payload_type) {
// Don't do callbacks for DTMF packets.
*shouldDiscardChanges = true;
*should_discard_changes = true;
return;
}
}
@ -311,88 +290,82 @@ void RTPReceiverAudio::CheckPayloadChanged(
WebRtc_Word32 RTPReceiverAudio::InvokeOnInitializeDecoder(
RtpFeedback* callback,
const WebRtc_Word32 id,
const WebRtc_Word8 payloadType,
const char payloadName[RTP_PAYLOAD_NAME_SIZE],
const ModuleRTPUtility::PayloadUnion& specificPayload) const {
if (-1 == callback->OnInitializeDecoder(
id, payloadType, payloadName, specificPayload.Audio.frequency,
specificPayload.Audio.channels, specificPayload.Audio.rate)) {
WEBRTC_TRACE(kTraceError, kTraceRtpRtcp, id,
const WebRtc_Word8 payload_type,
const char payload_name[RTP_PAYLOAD_NAME_SIZE],
const ModuleRTPUtility::PayloadUnion& specific_payload) const {
if (-1 == callback->OnInitializeDecoder(id,
payload_type,
payload_name,
specific_payload.Audio.frequency,
specific_payload.Audio.channels,
specific_payload.Audio.rate)) {
WEBRTC_TRACE(kTraceError,
kTraceRtpRtcp,
id,
"Failed to create video decoder for payload type:%d",
payloadType);
payload_type);
return -1;
}
return 0;
}
// we are not allowed to have any critsects when calling CallbackOfReceivedPayloadData
WebRtc_Word32
RTPReceiverAudio::ParseAudioCodecSpecific(WebRtcRTPHeader* rtpHeader,
const WebRtc_UWord8* payloadData,
const WebRtc_UWord16 payloadLength,
const ModuleRTPUtility::AudioPayload& audioSpecific,
const bool isRED)
{
WebRtc_UWord8 newEvents[MAX_NUMBER_OF_PARALLEL_TELEPHONE_EVENTS];
WebRtc_UWord8 removedEvents[MAX_NUMBER_OF_PARALLEL_TELEPHONE_EVENTS];
WebRtc_UWord8 numberOfNewEvents = 0;
WebRtc_UWord8 numberOfRemovedEvents = 0;
// We are not allowed to have any critsects when calling data_callback.
WebRtc_Word32 RTPReceiverAudio::ParseAudioCodecSpecific(
WebRtcRTPHeader* rtp_header,
const WebRtc_UWord8* payload_data,
const WebRtc_UWord16 payload_length,
const ModuleRTPUtility::AudioPayload& audio_specific,
const bool is_red) {
WebRtc_UWord8 new_events[MAX_NUMBER_OF_PARALLEL_TELEPHONE_EVENTS];
WebRtc_UWord8 removed_events[MAX_NUMBER_OF_PARALLEL_TELEPHONE_EVENTS];
WebRtc_UWord8 number_of_new_events = 0;
WebRtc_UWord8 number_of_removed_events = 0;
if(payloadLength == 0)
{
if (payload_length == 0) {
return 0;
}
bool telephoneEventPacket = TelephoneEventPayloadType(rtpHeader->header.payloadType);
if(telephoneEventPacket)
{
CriticalSectionScoped lock(_criticalSectionRtpReceiverAudio.get());
bool telephone_event_packet =
TelephoneEventPayloadType(rtp_header->header.payloadType);
if (telephone_event_packet) {
CriticalSectionScoped lock(critical_section_rtp_receiver_audio_.get());
// RFC 4733 2.3
/*
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| event |E|R| volume | duration |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
if(payloadLength % 4 != 0)
{
// 0 1 2 3
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | event |E|R| volume | duration |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
//
if (payload_length % 4 != 0) {
return -1;
}
WebRtc_UWord8 numberOfEvents = payloadLength / 4;
WebRtc_UWord8 number_of_events = payload_length / 4;
// sanity
if(numberOfEvents >= MAX_NUMBER_OF_PARALLEL_TELEPHONE_EVENTS)
{
numberOfEvents = MAX_NUMBER_OF_PARALLEL_TELEPHONE_EVENTS;
if (number_of_events >= MAX_NUMBER_OF_PARALLEL_TELEPHONE_EVENTS) {
number_of_events = MAX_NUMBER_OF_PARALLEL_TELEPHONE_EVENTS;
}
for (int n = 0; n < numberOfEvents; n++)
{
bool end = (payloadData[(4*n)+1] & 0x80)? true:false;
for (int n = 0; n < number_of_events; ++n) {
bool end = (payload_data[(4 * n) + 1] & 0x80) ? true : false;
std::set<WebRtc_UWord8>::iterator event =
_telephoneEventReported.find(payloadData[4*n]);
telephone_event_reported_.find(payload_data[4 * n]);
if(event != _telephoneEventReported.end())
{
if (event != telephone_event_reported_.end()) {
// we have already seen this event
if(end)
{
removedEvents[numberOfRemovedEvents]= payloadData[4*n];
numberOfRemovedEvents++;
_telephoneEventReported.erase(payloadData[4*n]);
if (end) {
removed_events[number_of_removed_events] = payload_data[4 * n];
number_of_removed_events++;
telephone_event_reported_.erase(payload_data[4 * n]);
}
}else
{
if(end)
{
} else {
if (end) {
// don't add if it's a end of a tone
}else
{
newEvents[numberOfNewEvents] = payloadData[4*n];
numberOfNewEvents++;
_telephoneEventReported.insert(payloadData[4*n]);
} else {
new_events[number_of_new_events] = payload_data[4 * n];
number_of_new_events++;
telephone_event_reported_.insert(payload_data[4 * n]);
}
}
}
@ -404,60 +377,56 @@ RTPReceiverAudio::ParseAudioCodecSpecific(WebRtcRTPHeader* rtpHeader,
}
// This needs to be called without locks held.
SendTelephoneEvents(numberOfNewEvents, newEvents, numberOfRemovedEvents,
removedEvents);
SendTelephoneEvents(number_of_new_events,
new_events,
number_of_removed_events,
removed_events);
{
CriticalSectionScoped lock(_criticalSectionRtpReceiverAudio.get());
CriticalSectionScoped lock(critical_section_rtp_receiver_audio_.get());
if(! telephoneEventPacket )
{
_lastReceivedFrequency = audioSpecific.frequency;
if (!telephone_event_packet) {
last_received_frequency_ = audio_specific.frequency;
}
// Check if this is a CNG packet, receiver might want to know
WebRtc_UWord32 ignored;
bool alsoIgnored;
if(CNGPayloadType(rtpHeader->header.payloadType, &ignored, &alsoIgnored))
{
rtpHeader->type.Audio.isCNG=true;
rtpHeader->frameType = kAudioFrameCN;
}else
{
rtpHeader->frameType = kAudioFrameSpeech;
rtpHeader->type.Audio.isCNG=false;
bool also_ignored;
if (CNGPayloadType(rtp_header->header.payloadType,
&ignored,
&also_ignored)) {
rtp_header->type.Audio.isCNG = true;
rtp_header->frameType = kAudioFrameCN;
} else {
rtp_header->frameType = kAudioFrameSpeech;
rtp_header->type.Audio.isCNG = false;
}
// check if it's a DTMF event, hence something we can playout
if(telephoneEventPacket)
{
if(!_telephoneEventForwardToDecoder)
{
if (telephone_event_packet) {
if (!telephone_event_forward_to_decoder_) {
// don't forward event to decoder
return 0;
}
std::set<WebRtc_UWord8>::iterator first =
_telephoneEventReported.begin();
if(first != _telephoneEventReported.end() && *first > 15)
{
telephone_event_reported_.begin();
if (first != telephone_event_reported_.end() && *first > 15) {
// don't forward non DTMF events
return 0;
}
}
}
if(isRED && !(payloadData[0] & 0x80))
{
if (is_red && !(payload_data[0] & 0x80)) {
// we recive only one frame packed in a RED packet remove the RED wrapper
rtpHeader->header.payloadType = payloadData[0];
rtp_header->header.payloadType = payload_data[0];
// only one frame in the RED strip the one byte to help NetEq
return data_callback_->OnReceivedPayloadData(payloadData+1,
payloadLength-1,
rtpHeader);
return data_callback_->OnReceivedPayloadData(
payload_data + 1, payload_length - 1, rtp_header);
}
rtpHeader->type.Audio.channel = audioSpecific.channels;
rtp_header->type.Audio.channel = audio_specific.channels;
return data_callback_->OnReceivedPayloadData(
payloadData, payloadLength, rtpHeader);
payload_data, payload_length, rtp_header);
}
} // namespace webrtc

View File

@ -13,30 +13,30 @@
#include <set>
#include "rtp_receiver.h"
#include "rtp_receiver_strategy.h"
#include "rtp_rtcp_defines.h"
#include "rtp_utility.h"
#include "scoped_ptr.h"
#include "typedefs.h"
#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_receiver.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_receiver_strategy.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
#include "webrtc/typedefs.h"
namespace webrtc {
class CriticalSectionWrapper;
// Handles audio RTP packets. This class is thread-safe.
class RTPReceiverAudio : public RTPReceiverStrategy
{
class RTPReceiverAudio : public RTPReceiverStrategy {
public:
RTPReceiverAudio(const WebRtc_Word32 id,
RtpData* data_callback,
RtpAudioFeedback* incomingMessagesCallback);
RtpAudioFeedback* incoming_messages_callback);
WebRtc_UWord32 AudioFrequency() const;
// Outband TelephoneEvent (DTMF) detection
WebRtc_Word32 SetTelephoneEventStatus(const bool enable,
const bool forwardToDecoder,
const bool detectEndOfTone);
const bool forward_to_decoder,
const bool detect_end_of_tone);
// Is outband DTMF(AVT) turned on/off?
bool TelephoneEvent() const;
@ -44,95 +44,97 @@ public:
// Is forwarding of outband telephone events turned on/off?
bool TelephoneEventForwardToDecoder() const;
// Is TelephoneEvent configured with payload type payloadType
bool TelephoneEventPayloadType(const WebRtc_Word8 payloadType) const;
// Is TelephoneEvent configured with payload type payload_type
bool TelephoneEventPayloadType(const WebRtc_Word8 payload_type) const;
// Returns true if CNG is configured with payload type payloadType. If so,
// the frequency and cngPayloadTypeHasChanged are filled in.
bool CNGPayloadType(const WebRtc_Word8 payloadType,
// Returns true if CNG is configured with payload type payload_type. If so,
// the frequency and cng_payload_type_has_changed are filled in.
bool CNGPayloadType(const WebRtc_Word8 payload_type,
WebRtc_UWord32* frequency,
bool* cngPayloadTypeHasChanged);
bool* cng_payload_type_has_changed);
WebRtc_Word32 ParseRtpPacket(
WebRtcRTPHeader* rtpHeader,
const ModuleRTPUtility::PayloadUnion& specificPayload,
const bool isRed,
WebRtcRTPHeader* rtp_header,
const ModuleRTPUtility::PayloadUnion& specific_payload,
const bool is_red,
const WebRtc_UWord8* packet,
const WebRtc_UWord16 packetLength,
const WebRtc_Word64 timestampMs,
const bool isFirstPacket);
const WebRtc_UWord16 packet_length,
const WebRtc_Word64 timestamp_ms,
const bool is_first_packet);
WebRtc_Word32 GetFrequencyHz() const;
RTPAliveType ProcessDeadOrAlive(WebRtc_UWord16 lastPayloadLength) const;
RTPAliveType ProcessDeadOrAlive(WebRtc_UWord16 last_payload_length) const;
bool ShouldReportCsrcChanges(WebRtc_UWord8 payload_type) const;
WebRtc_Word32 OnNewPayloadTypeCreated(
const char payloadName[RTP_PAYLOAD_NAME_SIZE],
const WebRtc_Word8 payloadType,
const char payload_name[RTP_PAYLOAD_NAME_SIZE],
const WebRtc_Word8 payload_type,
const WebRtc_UWord32 frequency);
WebRtc_Word32 InvokeOnInitializeDecoder(
RtpFeedback* callback,
const WebRtc_Word32 id,
const WebRtc_Word8 payloadType,
const char payloadName[RTP_PAYLOAD_NAME_SIZE],
const ModuleRTPUtility::PayloadUnion& specificPayload) const;
const WebRtc_Word8 payload_type,
const char payload_name[RTP_PAYLOAD_NAME_SIZE],
const ModuleRTPUtility::PayloadUnion& specific_payload) const;
// We do not allow codecs to have multiple payload types for audio, so we
// need to override the default behavior (which is to do nothing).
void PossiblyRemoveExistingPayloadType(
ModuleRTPUtility::PayloadTypeMap* payloadTypeMap,
const char payloadName[RTP_PAYLOAD_NAME_SIZE],
const size_t payloadNameLength,
ModuleRTPUtility::PayloadTypeMap* payload_type_map,
const char payload_name[RTP_PAYLOAD_NAME_SIZE],
const size_t payload_name_length,
const WebRtc_UWord32 frequency,
const WebRtc_UWord8 channels,
const WebRtc_UWord32 rate) const;
// We need to look out for special payload types here and sometimes reset
// statistics. In addition we sometimes need to tweak the frequency.
void CheckPayloadChanged(
const WebRtc_Word8 payloadType,
ModuleRTPUtility::PayloadUnion* specificPayload,
bool* shouldResetStatistics,
bool* shouldDiscardChanges);
void CheckPayloadChanged(const WebRtc_Word8 payload_type,
ModuleRTPUtility::PayloadUnion* specific_payload,
bool* should_reset_statistics,
bool* should_discard_changes);
private:
void SendTelephoneEvents(
WebRtc_UWord8 numberOfNewEvents,
WebRtc_UWord8 newEvents[MAX_NUMBER_OF_PARALLEL_TELEPHONE_EVENTS],
WebRtc_UWord8 numberOfRemovedEvents,
WebRtc_UWord8 removedEvents[MAX_NUMBER_OF_PARALLEL_TELEPHONE_EVENTS]);
WebRtc_UWord8 number_of_new_events,
WebRtc_UWord8 new_events[MAX_NUMBER_OF_PARALLEL_TELEPHONE_EVENTS],
WebRtc_UWord8 number_of_removed_events,
WebRtc_UWord8 removed_events[MAX_NUMBER_OF_PARALLEL_TELEPHONE_EVENTS]);
WebRtc_Word32 ParseAudioCodecSpecific(
WebRtcRTPHeader* rtpHeader,
const WebRtc_UWord8* payloadData,
const WebRtc_UWord16 payloadLength,
const ModuleRTPUtility::AudioPayload& audioSpecific,
const bool isRED);
WebRtcRTPHeader* rtp_header,
const WebRtc_UWord8* payload_data,
const WebRtc_UWord16 payload_length,
const ModuleRTPUtility::AudioPayload& audio_specific,
const bool is_red);
WebRtc_Word32 _id;
scoped_ptr<CriticalSectionWrapper> _criticalSectionRtpReceiverAudio;
WebRtc_Word32 id_;
scoped_ptr<CriticalSectionWrapper> critical_section_rtp_receiver_audio_;
WebRtc_UWord32 _lastReceivedFrequency;
WebRtc_UWord32 last_received_frequency_;
bool _telephoneEvent;
bool _telephoneEventForwardToDecoder;
bool _telephoneEventDetectEndOfTone;
WebRtc_Word8 _telephoneEventPayloadType;
std::set<WebRtc_UWord8> _telephoneEventReported;
bool telephone_event_;
bool telephone_event_forward_to_decoder_;
bool telephone_event_detect_end_of_tone_;
WebRtc_Word8 telephone_event_payload_type_;
std::set<WebRtc_UWord8> telephone_event_reported_;
WebRtc_Word8 _cngNBPayloadType;
WebRtc_Word8 _cngWBPayloadType;
WebRtc_Word8 _cngSWBPayloadType;
WebRtc_Word8 _cngFBPayloadType;
WebRtc_Word8 _cngPayloadType;
WebRtc_Word8 cng_nb_payload_type_;
WebRtc_Word8 cng_wb_payload_type_;
WebRtc_Word8 cng_swb_payload_type_;
WebRtc_Word8 cng_fb_payload_type_;
WebRtc_Word8 cng_payload_type_;
// G722 is special since it use the wrong number of RTP samples in timestamp VS. number of samples in the frame
WebRtc_Word8 _G722PayloadType;
bool _lastReceivedG722;
// G722 is special since it use the wrong number of RTP samples in timestamp
// VS. number of samples in the frame
WebRtc_Word8 g722_payload_type_;
bool last_received_g722_;
RtpAudioFeedback* _cbAudioFeedback;
RtpAudioFeedback* cb_audio_feedback_;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_RECEIVER_AUDIO_H_

View File

@ -8,41 +8,41 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#include "rtp_receiver_video.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_receiver_video.h"
#include <math.h>
#include <cassert> // assert
#include <cstring> // memcpy()
#include <math.h>
#include "critical_section_wrapper.h"
#include "receiver_fec.h"
#include "rtp_payload_registry.h"
#include "rtp_rtcp_impl.h"
#include "rtp_utility.h"
#include "trace.h"
#include "webrtc/modules/rtp_rtcp/source/receiver_fec.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_payload_registry.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#include "webrtc/system_wrappers/interface/trace.h"
namespace webrtc {
WebRtc_UWord32 BitRateBPS(WebRtc_UWord16 x )
{
WebRtc_UWord32 BitRateBPS(WebRtc_UWord16 x) {
return (x & 0x3fff) * WebRtc_UWord32(pow(10.0f, (2 + (x >> 14))));
}
RTPReceiverVideo::RTPReceiverVideo(
const WebRtc_Word32 id,
const RTPPayloadRegistry* rtpRtpPayloadRegistry,
const RTPPayloadRegistry* rtp_rtp_payload_registry,
RtpData* data_callback)
: RTPReceiverStrategy(data_callback),
_id(id),
_rtpRtpPayloadRegistry(rtpRtpPayloadRegistry),
_criticalSectionReceiverVideo(
id_(id),
rtp_rtp_payload_registry_(rtp_rtp_payload_registry),
critical_section_receiver_video_(
CriticalSectionWrapper::CreateCriticalSection()),
_currentFecFrameDecoded(false),
_receiveFEC(NULL) {
current_fec_frame_decoded_(false),
receive_fec_(NULL) {
}
RTPReceiverVideo::~RTPReceiverVideo() {
delete _criticalSectionReceiverVideo;
delete _receiveFEC;
delete critical_section_receiver_video_;
delete receive_fec_;
}
bool RTPReceiverVideo::ShouldReportCsrcChanges(
@ -52,35 +52,40 @@ bool RTPReceiverVideo::ShouldReportCsrcChanges(
}
WebRtc_Word32 RTPReceiverVideo::OnNewPayloadTypeCreated(
const char payloadName[RTP_PAYLOAD_NAME_SIZE],
const WebRtc_Word8 payloadType,
const char payload_name[RTP_PAYLOAD_NAME_SIZE],
const WebRtc_Word8 payload_type,
const WebRtc_UWord32 frequency) {
if (ModuleRTPUtility::StringCompare(payloadName, "ULPFEC", 6)) {
if (ModuleRTPUtility::StringCompare(payload_name, "ULPFEC", 6)) {
// Enable FEC if not enabled.
if (_receiveFEC == NULL) {
_receiveFEC = new ReceiverFEC(_id, this);
if (receive_fec_ == NULL) {
receive_fec_ = new ReceiverFEC(id_, this);
}
_receiveFEC->SetPayloadTypeFEC(payloadType);
receive_fec_->SetPayloadTypeFEC(payload_type);
}
return 0;
}
WebRtc_Word32 RTPReceiverVideo::ParseRtpPacket(
WebRtcRTPHeader* rtpHeader,
const ModuleRTPUtility::PayloadUnion& specificPayload,
const bool isRed,
WebRtcRTPHeader* rtp_header,
const ModuleRTPUtility::PayloadUnion& specific_payload,
const bool is_red,
const WebRtc_UWord8* packet,
const WebRtc_UWord16 packetLength,
const WebRtc_Word64 timestampMs,
const bool isFirstPacket) {
const WebRtc_UWord8* payloadData =
ModuleRTPUtility::GetPayloadData(rtpHeader, packet);
const WebRtc_UWord16 payloadDataLength =
ModuleRTPUtility::GetPayloadDataLength(rtpHeader, packetLength);
return ParseVideoCodecSpecific(
rtpHeader, payloadData, payloadDataLength,
specificPayload.Video.videoCodecType, isRed, packet, packetLength,
timestampMs, isFirstPacket);
const WebRtc_UWord16 packet_length,
const WebRtc_Word64 timestamp_ms,
const bool is_first_packet) {
const WebRtc_UWord8* payload_data =
ModuleRTPUtility::GetPayloadData(rtp_header, packet);
const WebRtc_UWord16 payload_data_length =
ModuleRTPUtility::GetPayloadDataLength(rtp_header, packet_length);
return ParseVideoCodecSpecific(rtp_header,
payload_data,
payload_data_length,
specific_payload.Video.videoCodecType,
is_red,
packet,
packet_length,
timestamp_ms,
is_first_packet);
}
WebRtc_Word32 RTPReceiverVideo::GetFrequencyHz() const {
@ -88,22 +93,24 @@ WebRtc_Word32 RTPReceiverVideo::GetFrequencyHz() const {
}
RTPAliveType RTPReceiverVideo::ProcessDeadOrAlive(
WebRtc_UWord16 lastPayloadLength) const {
WebRtc_UWord16 last_payload_length) const {
return kRtpDead;
}
WebRtc_Word32 RTPReceiverVideo::InvokeOnInitializeDecoder(
RtpFeedback* callback,
const WebRtc_Word32 id,
const WebRtc_Word8 payloadType,
const char payloadName[RTP_PAYLOAD_NAME_SIZE],
const ModuleRTPUtility::PayloadUnion& specificPayload) const {
const WebRtc_Word8 payload_type,
const char payload_name[RTP_PAYLOAD_NAME_SIZE],
const ModuleRTPUtility::PayloadUnion& specific_payload) const {
// For video we just go with default values.
if (-1 == callback->OnInitializeDecoder(
id, payloadType, payloadName, kDefaultVideoFrequency, 1, 0)) {
WEBRTC_TRACE(kTraceError, kTraceRtpRtcp, id,
id, payload_type, payload_name, kDefaultVideoFrequency, 1, 0)) {
WEBRTC_TRACE(kTraceError,
kTraceRtpRtcp,
id,
"Failed to create video decoder for payload type:%d",
payloadType);
payload_type);
return -1;
}
return 0;
@ -113,269 +120,269 @@ WebRtc_Word32 RTPReceiverVideo::InvokeOnInitializeDecoder(
// we are not allowed to have any critsects when calling
// CallbackOfReceivedPayloadData
WebRtc_Word32 RTPReceiverVideo::ParseVideoCodecSpecific(
WebRtcRTPHeader* rtpHeader,
const WebRtc_UWord8* payloadData,
const WebRtc_UWord16 payloadDataLength,
const RtpVideoCodecTypes videoType,
const bool isRED,
const WebRtc_UWord8* incomingRtpPacket,
const WebRtc_UWord16 incomingRtpPacketSize,
const WebRtc_Word64 nowMS,
const bool isFirstPacket) {
WebRtc_Word32 retVal = 0;
WebRtcRTPHeader* rtp_header,
const WebRtc_UWord8* payload_data,
const WebRtc_UWord16 payload_data_length,
const RtpVideoCodecTypes video_type,
const bool is_red,
const WebRtc_UWord8* incoming_rtp_packet,
const WebRtc_UWord16 incoming_rtp_packet_size,
const WebRtc_Word64 now_ms,
const bool is_first_packet) {
WebRtc_Word32 ret_val = 0;
_criticalSectionReceiverVideo->Enter();
critical_section_receiver_video_->Enter();
if (isRED) {
if(_receiveFEC == NULL) {
_criticalSectionReceiverVideo->Leave();
if (is_red) {
if (receive_fec_ == NULL) {
critical_section_receiver_video_->Leave();
return -1;
}
bool FECpacket = false;
retVal = _receiveFEC->AddReceivedFECPacket(
rtpHeader,
incomingRtpPacket,
payloadDataLength,
FECpacket);
if (retVal != -1) {
retVal = _receiveFEC->ProcessReceivedFEC();
ret_val = receive_fec_->AddReceivedFECPacket(
rtp_header, incoming_rtp_packet, payload_data_length, FECpacket);
if (ret_val != -1) {
ret_val = receive_fec_->ProcessReceivedFEC();
}
_criticalSectionReceiverVideo->Leave();
critical_section_receiver_video_->Leave();
if(retVal == 0 && FECpacket) {
if (ret_val == 0 && FECpacket) {
// Callback with the received FEC packet.
// The normal packets are delivered after parsing.
// This contains the original RTP packet header but with
// empty payload and data length.
rtpHeader->frameType = kFrameEmpty;
rtp_header->frameType = kFrameEmpty;
// We need this for the routing.
WebRtc_Word32 retVal = SetCodecType(videoType, rtpHeader);
if(retVal != 0) {
return retVal;
WebRtc_Word32 ret_val = SetCodecType(video_type, rtp_header);
if (ret_val != 0) {
return ret_val;
}
// Pass the length of FEC packets so that they can be accounted for in
// the bandwidth estimator.
retVal = data_callback_->OnReceivedPayloadData(NULL, payloadDataLength,
rtpHeader);
ret_val = data_callback_->OnReceivedPayloadData(
NULL, payload_data_length, rtp_header);
}
} else {
// will leave the _criticalSectionReceiverVideo critsect
retVal = ParseVideoCodecSpecificSwitch(rtpHeader,
payloadData,
payloadDataLength,
videoType,
isFirstPacket);
// will leave the critical_section_receiver_video_ critsect
ret_val = ParseVideoCodecSpecificSwitch(rtp_header,
payload_data,
payload_data_length,
video_type,
is_first_packet);
}
return retVal;
return ret_val;
}
WebRtc_Word32 RTPReceiverVideo::BuildRTPheader(
const WebRtcRTPHeader* rtpHeader,
WebRtc_UWord8* dataBuffer) const {
dataBuffer[0] = static_cast<WebRtc_UWord8>(0x80); // version 2
dataBuffer[1] = static_cast<WebRtc_UWord8>(rtpHeader->header.payloadType);
if (rtpHeader->header.markerBit) {
dataBuffer[1] |= kRtpMarkerBitMask; // MarkerBit is 1
const WebRtcRTPHeader* rtp_header,
WebRtc_UWord8* data_buffer) const {
data_buffer[0] = static_cast<WebRtc_UWord8>(0x80); // version 2
data_buffer[1] = static_cast<WebRtc_UWord8>(rtp_header->header.payloadType);
if (rtp_header->header.markerBit) {
data_buffer[1] |= kRtpMarkerBitMask; // MarkerBit is 1
}
ModuleRTPUtility::AssignUWord16ToBuffer(dataBuffer + 2,
rtpHeader->header.sequenceNumber);
ModuleRTPUtility::AssignUWord32ToBuffer(dataBuffer + 4,
rtpHeader->header.timestamp);
ModuleRTPUtility::AssignUWord32ToBuffer(dataBuffer + 8,
rtpHeader->header.ssrc);
ModuleRTPUtility::AssignUWord16ToBuffer(data_buffer + 2,
rtp_header->header.sequenceNumber);
ModuleRTPUtility::AssignUWord32ToBuffer(data_buffer + 4,
rtp_header->header.timestamp);
ModuleRTPUtility::AssignUWord32ToBuffer(data_buffer + 8,
rtp_header->header.ssrc);
WebRtc_Word32 rtpHeaderLength = 12;
WebRtc_Word32 rtp_header_length = 12;
// Add the CSRCs if any
if (rtpHeader->header.numCSRCs > 0) {
if (rtpHeader->header.numCSRCs > 16) {
if (rtp_header->header.numCSRCs > 0) {
if (rtp_header->header.numCSRCs > 16) {
// error
assert(false);
}
WebRtc_UWord8* ptr = &dataBuffer[rtpHeaderLength];
for (WebRtc_UWord32 i = 0; i < rtpHeader->header.numCSRCs; ++i) {
WebRtc_UWord8* ptr = &data_buffer[rtp_header_length];
for (WebRtc_UWord32 i = 0; i < rtp_header->header.numCSRCs; ++i) {
ModuleRTPUtility::AssignUWord32ToBuffer(ptr,
rtpHeader->header.arrOfCSRCs[i]);
rtp_header->header.arrOfCSRCs[i]);
ptr += 4;
}
dataBuffer[0] = (dataBuffer[0]&0xf0) | rtpHeader->header.numCSRCs;
data_buffer[0] = (data_buffer[0] & 0xf0) | rtp_header->header.numCSRCs;
// Update length of header
rtpHeaderLength += sizeof(WebRtc_UWord32)*rtpHeader->header.numCSRCs;
rtp_header_length += sizeof(WebRtc_UWord32) * rtp_header->header.numCSRCs;
}
return rtpHeaderLength;
return rtp_header_length;
}
WebRtc_Word32 RTPReceiverVideo::ReceiveRecoveredPacketCallback(
WebRtcRTPHeader* rtpHeader,
const WebRtc_UWord8* payloadData,
const WebRtc_UWord16 payloadDataLength) {
WebRtcRTPHeader* rtp_header,
const WebRtc_UWord8* payload_data,
const WebRtc_UWord16 payload_data_length) {
// TODO(pwestin) Re-factor this to avoid the messy critsect handling.
_criticalSectionReceiverVideo->Enter();
critical_section_receiver_video_->Enter();
_currentFecFrameDecoded = true;
current_fec_frame_decoded_ = true;
ModuleRTPUtility::Payload* payload = NULL;
if (_rtpRtpPayloadRegistry->PayloadTypeToPayload(
rtpHeader->header.payloadType, payload) != 0) {
_criticalSectionReceiverVideo->Leave();
if (rtp_rtp_payload_registry_->PayloadTypeToPayload(
rtp_header->header.payloadType, payload) != 0) {
critical_section_receiver_video_->Leave();
return -1;
}
// here we can re-create the original lost packet so that we can use it for
// the relay we need to re-create the RED header too
WebRtc_UWord8 recoveredPacket[IP_PACKET_SIZE];
WebRtc_UWord16 rtpHeaderLength = (WebRtc_UWord16)BuildRTPheader(
rtpHeader, recoveredPacket);
WebRtc_UWord8 recovered_packet[IP_PACKET_SIZE];
WebRtc_UWord16 rtp_header_length =
(WebRtc_UWord16) BuildRTPheader(rtp_header, recovered_packet);
const WebRtc_UWord8 REDForFECHeaderLength = 1;
const WebRtc_UWord8 kREDForFECHeaderLength = 1;
// replace pltype
recoveredPacket[1] &= 0x80; // Reset.
recoveredPacket[1] += _rtpRtpPayloadRegistry->red_payload_type();
recovered_packet[1] &= 0x80; // Reset.
recovered_packet[1] += rtp_rtp_payload_registry_->red_payload_type();
// add RED header
recoveredPacket[rtpHeaderLength] = rtpHeader->header.payloadType;
recovered_packet[rtp_header_length] = rtp_header->header.payloadType;
// f-bit always 0
memcpy(recoveredPacket + rtpHeaderLength + REDForFECHeaderLength, payloadData,
payloadDataLength);
memcpy(recovered_packet + rtp_header_length + kREDForFECHeaderLength,
payload_data,
payload_data_length);
// A recovered packet can be the first packet, but we lack the ability to
// detect it at the moment since we do not store the history of recently
// received packets. Most codecs like VP8 deal with this in other ways.
bool isFirstPacket = false;
bool is_first_packet = false;
return ParseVideoCodecSpecificSwitch(
rtpHeader,
payloadData,
payloadDataLength,
rtp_header,
payload_data,
payload_data_length,
payload->typeSpecific.Video.videoCodecType,
isFirstPacket);
is_first_packet);
}
WebRtc_Word32 RTPReceiverVideo::SetCodecType(const RtpVideoCodecTypes videoType,
WebRtcRTPHeader* rtpHeader) const {
switch (videoType) {
WebRtc_Word32 RTPReceiverVideo::SetCodecType(
const RtpVideoCodecTypes video_type,
WebRtcRTPHeader* rtp_header) const {
switch (video_type) {
case kRtpNoVideo:
rtpHeader->type.Video.codec = kRTPVideoGeneric;
rtp_header->type.Video.codec = kRTPVideoGeneric;
break;
case kRtpVp8Video:
rtpHeader->type.Video.codec = kRTPVideoVP8;
rtp_header->type.Video.codec = kRTPVideoVP8;
break;
case kRtpFecVideo:
rtpHeader->type.Video.codec = kRTPVideoFEC;
rtp_header->type.Video.codec = kRTPVideoFEC;
break;
}
return 0;
}
WebRtc_Word32 RTPReceiverVideo::ParseVideoCodecSpecificSwitch(
WebRtcRTPHeader* rtpHeader,
const WebRtc_UWord8* payloadData,
const WebRtc_UWord16 payloadDataLength,
const RtpVideoCodecTypes videoType,
const bool isFirstPacket) {
WebRtc_Word32 retVal = SetCodecType(videoType, rtpHeader);
if (retVal != 0) {
_criticalSectionReceiverVideo->Leave();
return retVal;
WebRtcRTPHeader* rtp_header,
const WebRtc_UWord8* payload_data,
const WebRtc_UWord16 payload_data_length,
const RtpVideoCodecTypes video_type,
const bool is_first_packet) {
WebRtc_Word32 ret_val = SetCodecType(video_type, rtp_header);
if (ret_val != 0) {
critical_section_receiver_video_->Leave();
return ret_val;
}
WEBRTC_TRACE(kTraceStream, kTraceRtpRtcp, _id, "%s(timestamp:%u)",
__FUNCTION__, rtpHeader->header.timestamp);
WEBRTC_TRACE(kTraceStream,
kTraceRtpRtcp,
id_,
"%s(timestamp:%u)",
__FUNCTION__,
rtp_header->header.timestamp);
// All receive functions release _criticalSectionReceiverVideo before
// All receive functions release critical_section_receiver_video_ before
// returning.
switch (videoType) {
switch (video_type) {
case kRtpNoVideo:
rtpHeader->type.Video.isFirstPacket = isFirstPacket;
return ReceiveGenericCodec(rtpHeader, payloadData, payloadDataLength);
rtp_header->type.Video.isFirstPacket = is_first_packet;
return ReceiveGenericCodec(rtp_header, payload_data, payload_data_length);
case kRtpVp8Video:
return ReceiveVp8Codec(rtpHeader, payloadData, payloadDataLength);
return ReceiveVp8Codec(rtp_header, payload_data, payload_data_length);
case kRtpFecVideo:
break;
}
_criticalSectionReceiverVideo->Leave();
critical_section_receiver_video_->Leave();
return -1;
}
WebRtc_Word32 RTPReceiverVideo::ReceiveVp8Codec(
WebRtcRTPHeader* rtpHeader,
const WebRtc_UWord8* payloadData,
const WebRtc_UWord16 payloadDataLength) {
WebRtcRTPHeader* rtp_header,
const WebRtc_UWord8* payload_data,
const WebRtc_UWord16 payload_data_length) {
bool success;
ModuleRTPUtility::RTPPayload parsedPacket;
if (payloadDataLength == 0) {
ModuleRTPUtility::RTPPayload parsed_packet;
if (payload_data_length == 0) {
success = true;
parsedPacket.info.VP8.dataLength = 0;
parsed_packet.info.VP8.dataLength = 0;
} else {
ModuleRTPUtility::RTPPayloadParser rtpPayloadParser(kRtpVp8Video,
payloadData,
payloadDataLength,
_id);
ModuleRTPUtility::RTPPayloadParser rtp_payload_parser(
kRtpVp8Video, payload_data, payload_data_length, id_);
success = rtpPayloadParser.Parse(parsedPacket);
success = rtp_payload_parser.Parse(parsed_packet);
}
// from here down we only work on local data
_criticalSectionReceiverVideo->Leave();
critical_section_receiver_video_->Leave();
if (!success) {
return -1;
}
if (parsedPacket.info.VP8.dataLength == 0) {
if (parsed_packet.info.VP8.dataLength == 0) {
// we have an "empty" VP8 packet, it's ok, could be one way video
// Inform the jitter buffer about this packet.
rtpHeader->frameType = kFrameEmpty;
if (data_callback_->OnReceivedPayloadData(NULL, 0, rtpHeader) != 0) {
rtp_header->frameType = kFrameEmpty;
if (data_callback_->OnReceivedPayloadData(NULL, 0, rtp_header) != 0) {
return -1;
}
return 0;
}
rtpHeader->frameType = (parsedPacket.frameType == ModuleRTPUtility::kIFrame) ?
kVideoFrameKey : kVideoFrameDelta;
rtp_header->frameType = (parsed_packet.frameType == ModuleRTPUtility::kIFrame)
? kVideoFrameKey : kVideoFrameDelta;
RTPVideoHeaderVP8 *toHeader = &rtpHeader->type.Video.codecHeader.VP8;
ModuleRTPUtility::RTPPayloadVP8 *fromHeader = &parsedPacket.info.VP8;
RTPVideoHeaderVP8* to_header = &rtp_header->type.Video.codecHeader.VP8;
ModuleRTPUtility::RTPPayloadVP8* from_header = &parsed_packet.info.VP8;
rtpHeader->type.Video.isFirstPacket = fromHeader->beginningOfPartition
&& (fromHeader->partitionID == 0);
toHeader->nonReference = fromHeader->nonReferenceFrame;
toHeader->pictureId = fromHeader->hasPictureID ? fromHeader->pictureID :
kNoPictureId;
toHeader->tl0PicIdx = fromHeader->hasTl0PicIdx ? fromHeader->tl0PicIdx :
kNoTl0PicIdx;
if (fromHeader->hasTID) {
toHeader->temporalIdx = fromHeader->tID;
toHeader->layerSync = fromHeader->layerSync;
rtp_header->type.Video.isFirstPacket =
from_header->beginningOfPartition && (from_header->partitionID == 0);
to_header->nonReference = from_header->nonReferenceFrame;
to_header->pictureId =
from_header->hasPictureID ? from_header->pictureID : kNoPictureId;
to_header->tl0PicIdx =
from_header->hasTl0PicIdx ? from_header->tl0PicIdx : kNoTl0PicIdx;
if (from_header->hasTID) {
to_header->temporalIdx = from_header->tID;
to_header->layerSync = from_header->layerSync;
} else {
toHeader->temporalIdx = kNoTemporalIdx;
toHeader->layerSync = false;
to_header->temporalIdx = kNoTemporalIdx;
to_header->layerSync = false;
}
toHeader->keyIdx = fromHeader->hasKeyIdx ? fromHeader->keyIdx : kNoKeyIdx;
to_header->keyIdx = from_header->hasKeyIdx ? from_header->keyIdx : kNoKeyIdx;
toHeader->frameWidth = fromHeader->frameWidth;
toHeader->frameHeight = fromHeader->frameHeight;
to_header->frameWidth = from_header->frameWidth;
to_header->frameHeight = from_header->frameHeight;
toHeader->partitionId = fromHeader->partitionID;
toHeader->beginningOfPartition = fromHeader->beginningOfPartition;
to_header->partitionId = from_header->partitionID;
to_header->beginningOfPartition = from_header->beginningOfPartition;
if(data_callback_->OnReceivedPayloadData(parsedPacket.info.VP8.data,
parsedPacket.info.VP8.dataLength,
rtpHeader) != 0) {
if (data_callback_->OnReceivedPayloadData(parsed_packet.info.VP8.data,
parsed_packet.info.VP8.dataLength,
rtp_header) != 0) {
return -1;
}
return 0;
}
WebRtc_Word32 RTPReceiverVideo::ReceiveGenericCodec(
WebRtcRTPHeader* rtpHeader,
const WebRtc_UWord8* payloadData,
const WebRtc_UWord16 payloadDataLength) {
rtpHeader->frameType = kVideoFrameKey;
WebRtcRTPHeader* rtp_header,
const WebRtc_UWord8* payload_data,
const WebRtc_UWord16 payload_data_length) {
rtp_header->frameType = kVideoFrameKey;
_criticalSectionReceiverVideo->Leave();
critical_section_receiver_video_->Leave();
if (data_callback_->OnReceivedPayloadData(payloadData, payloadDataLength,
rtpHeader) != 0) {
if (data_callback_->OnReceivedPayloadData(
payload_data, payload_data_length, rtp_header) != 0) {
return -1;
}
return 0;

View File

@ -11,12 +11,12 @@
#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_RECEIVER_VIDEO_H_
#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_RECEIVER_VIDEO_H_
#include "bitrate.h"
#include "rtp_receiver_strategy.h"
#include "rtp_rtcp_defines.h"
#include "rtp_utility.h"
#include "scoped_ptr.h"
#include "typedefs.h"
#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/bitrate.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_receiver_strategy.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
#include "webrtc/typedefs.h"
namespace webrtc {
class CriticalSectionWrapper;
@ -35,7 +35,7 @@ class RTPReceiverVideo : public RTPReceiverStrategy {
WebRtc_Word32 ParseRtpPacket(
WebRtcRTPHeader* rtp_header,
const ModuleRTPUtility::PayloadUnion& specificPayload,
const ModuleRTPUtility::PayloadUnion& specific_payload,
const bool is_red,
const WebRtc_UWord8* packet,
const WebRtc_UWord16 packet_length,
@ -44,71 +44,72 @@ class RTPReceiverVideo : public RTPReceiverStrategy {
WebRtc_Word32 GetFrequencyHz() const;
RTPAliveType ProcessDeadOrAlive(WebRtc_UWord16 lastPayloadLength) const;
RTPAliveType ProcessDeadOrAlive(WebRtc_UWord16 last_payload_length) const;
bool ShouldReportCsrcChanges(WebRtc_UWord8 payload_type) const;
WebRtc_Word32 OnNewPayloadTypeCreated(
const char payloadName[RTP_PAYLOAD_NAME_SIZE],
const WebRtc_Word8 payloadType,
const char payload_name[RTP_PAYLOAD_NAME_SIZE],
const WebRtc_Word8 payload_type,
const WebRtc_UWord32 frequency);
WebRtc_Word32 InvokeOnInitializeDecoder(
RtpFeedback* callback,
const WebRtc_Word32 id,
const WebRtc_Word8 payloadType,
const char payloadName[RTP_PAYLOAD_NAME_SIZE],
const ModuleRTPUtility::PayloadUnion& specificPayload) const;
const WebRtc_Word8 payload_type,
const char payload_name[RTP_PAYLOAD_NAME_SIZE],
const ModuleRTPUtility::PayloadUnion& specific_payload) const;
virtual WebRtc_Word32 ReceiveRecoveredPacketCallback(
WebRtcRTPHeader* rtpHeader,
const WebRtc_UWord8* payloadData,
const WebRtc_UWord16 payloadDataLength);
WebRtcRTPHeader* rtp_header,
const WebRtc_UWord8* payload_data,
const WebRtc_UWord16 payload_data_length);
void SetPacketOverHead(WebRtc_UWord16 packetOverHead);
void SetPacketOverHead(WebRtc_UWord16 packet_over_head);
protected:
WebRtc_Word32 SetCodecType(const RtpVideoCodecTypes videoType,
WebRtcRTPHeader* rtpHeader) const;
WebRtc_Word32 SetCodecType(const RtpVideoCodecTypes video_type,
WebRtcRTPHeader* rtp_header) const;
WebRtc_Word32 ParseVideoCodecSpecificSwitch(
WebRtcRTPHeader* rtpHeader,
const WebRtc_UWord8* payloadData,
const WebRtc_UWord16 payloadDataLength,
const RtpVideoCodecTypes videoType,
const bool isFirstPacket);
WebRtcRTPHeader* rtp_header,
const WebRtc_UWord8* payload_data,
const WebRtc_UWord16 payload_data_length,
const RtpVideoCodecTypes video_type,
const bool is_first_packet);
WebRtc_Word32 ReceiveGenericCodec(WebRtcRTPHeader *rtpHeader,
const WebRtc_UWord8* payloadData,
const WebRtc_UWord16 payloadDataLength);
WebRtc_Word32 ReceiveGenericCodec(WebRtcRTPHeader* rtp_header,
const WebRtc_UWord8* payload_data,
const WebRtc_UWord16 payload_data_length);
WebRtc_Word32 ReceiveVp8Codec(WebRtcRTPHeader *rtpHeader,
const WebRtc_UWord8* payloadData,
const WebRtc_UWord16 payloadDataLength);
WebRtc_Word32 ReceiveVp8Codec(WebRtcRTPHeader* rtp_header,
const WebRtc_UWord8* payload_data,
const WebRtc_UWord16 payload_data_length);
WebRtc_Word32 BuildRTPheader(const WebRtcRTPHeader* rtpHeader,
WebRtc_UWord8* dataBuffer) const;
WebRtc_Word32 BuildRTPheader(const WebRtcRTPHeader* rtp_header,
WebRtc_UWord8* data_buffer) const;
private:
WebRtc_Word32 ParseVideoCodecSpecific(
WebRtcRTPHeader* rtpHeader,
const WebRtc_UWord8* payloadData,
const WebRtc_UWord16 payloadDataLength,
const RtpVideoCodecTypes videoType,
const bool isRED,
const WebRtc_UWord8* incomingRtpPacket,
const WebRtc_UWord16 incomingRtpPacketSize,
const WebRtc_Word64 nowMS,
const bool isFirstPacket);
WebRtcRTPHeader* rtp_header,
const WebRtc_UWord8* payload_data,
const WebRtc_UWord16 payload_data_length,
const RtpVideoCodecTypes video_type,
const bool is_red,
const WebRtc_UWord8* incoming_rtp_packet,
const WebRtc_UWord16 incoming_rtp_packet_size,
const WebRtc_Word64 now_ms,
const bool is_first_packet);
WebRtc_Word32 _id;
const RTPPayloadRegistry* _rtpRtpPayloadRegistry;
WebRtc_Word32 id_;
const RTPPayloadRegistry* rtp_rtp_payload_registry_;
CriticalSectionWrapper* _criticalSectionReceiverVideo;
CriticalSectionWrapper* critical_section_receiver_video_;
// FEC
bool _currentFecFrameDecoded;
ReceiverFEC* _receiveFEC;
bool current_fec_frame_decoded_;
ReceiverFEC* receive_fec_;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_RECEIVER_VIDEO_H_