1. Make a clear distinction between codec internal FEC and RED, confusing mentioning of FEC in the old codes is replaced by RED
2. Add two new APIs to configure codec internal FEC 3. Add a test and listened to results. This is based modifying EncodeDecodeTest and deriving a new class from it. New ACM gives good result. Old ACM does not use NetEq 4, so FEC won't be decoded. BUG= R=tina.legrand@webrtc.org, turaj@webrtc.org Review URL: https://webrtc-codereview.appspot.com/11759004 git-svn-id: http://webrtc.googlecode.com/svn/trunk@6233 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
@@ -59,6 +59,7 @@ ACMGenericCodec::ACMGenericCodec()
|
||||
num_lpc_params_(kNewCNGNumPLCParams),
|
||||
sent_cn_previous_(false),
|
||||
prev_frame_cng_(0),
|
||||
has_internal_fec_(false),
|
||||
neteq_decode_lock_(NULL),
|
||||
codec_wrapper_lock_(*RWLockWrapper::CreateRWLock()),
|
||||
last_timestamp_(0xD87F3F9F),
|
||||
|
||||
@@ -560,6 +560,46 @@ class ACMGenericCodec {
|
||||
//
|
||||
virtual AudioDecoder* Decoder(int /* codec_id */) { return NULL; }
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// bool HasInternalFEC()
|
||||
// Used to check if the codec has internal FEC.
|
||||
//
|
||||
// Return value:
|
||||
// true if the codec has an internal FEC, e.g. Opus.
|
||||
// false otherwise.
|
||||
//
|
||||
bool HasInternalFEC() const { return has_internal_fec_; }
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// int SetFEC();
|
||||
// Sets the codec internal FEC. No effects on codecs that do not provide
|
||||
// internal FEC.
|
||||
//
|
||||
// Input:
|
||||
// -enable_fec : if true FEC will be enabled otherwise the FEC is
|
||||
// disabled.
|
||||
//
|
||||
// Return value:
|
||||
// -1 if failed, or the codec does not support FEC
|
||||
// 0 if succeeded.
|
||||
//
|
||||
virtual int SetFEC(bool /* enable_fec */) { return -1; }
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// int SetPacketLossRate()
|
||||
// Sets expected packet loss rate for encoding. Some encoders provide packet
|
||||
// loss gnostic encoding to make stream less sensitive to packet losses,
|
||||
// through e.g., FEC. No effects on codecs that do not provide such encoding.
|
||||
//
|
||||
// Input:
|
||||
// -loss_rate : expected packet loss rate (0 -- 100 inclusive).
|
||||
//
|
||||
// Return value:
|
||||
// -1 if failed, or codec does not support packet loss gnostic encoding,
|
||||
// 0 if succeeded.
|
||||
//
|
||||
virtual int SetPacketLossRate(int /* loss_rate */) { return -1; }
|
||||
|
||||
protected:
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// All the functions with FunctionNameSafe(...) contain the actual
|
||||
@@ -899,6 +939,9 @@ class ACMGenericCodec {
|
||||
bool sent_cn_previous_;
|
||||
int16_t prev_frame_cng_;
|
||||
|
||||
// FEC.
|
||||
bool has_internal_fec_;
|
||||
|
||||
WebRtcACMCodecParams encoder_params_;
|
||||
|
||||
// Used as a global lock for all available decoders
|
||||
|
||||
@@ -75,6 +75,8 @@ ACMOpus::ACMOpus(int16_t codec_id)
|
||||
// Opus has internal DTX, but we dont use it for now.
|
||||
has_internal_dtx_ = false;
|
||||
|
||||
has_internal_fec_ = true;
|
||||
|
||||
if (codec_id_ != ACMCodecDB::kOpus) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"Wrong codec id for Opus.");
|
||||
@@ -198,6 +200,31 @@ int16_t ACMOpus::SetBitRateSafe(const int32_t rate) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int ACMOpus::SetFEC(bool enable_fec) {
|
||||
// Ask the encoder to enable FEC.
|
||||
if (enable_fec) {
|
||||
if (WebRtcOpus_EnableFec(encoder_inst_ptr_) == 0) {
|
||||
fec_enabled_ = true;
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
if (WebRtcOpus_DisableFec(encoder_inst_ptr_) == 0) {
|
||||
fec_enabled_ = false;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
int ACMOpus::SetPacketLossRate(int loss_rate) {
|
||||
// Ask the encoder to change the target packet loss rate.
|
||||
if (WebRtcOpus_SetPacketLossRate(encoder_inst_ptr_, loss_rate) == 0) {
|
||||
packet_loss_rate_ = loss_rate;
|
||||
return 0;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
#endif // WEBRTC_CODEC_OPUS
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
@@ -32,6 +32,10 @@ class ACMOpus : public ACMGenericCodec {
|
||||
|
||||
int16_t InternalInitEncoder(WebRtcACMCodecParams *codec_params);
|
||||
|
||||
virtual int SetFEC(bool enable_fec) OVERRIDE;
|
||||
|
||||
virtual int SetPacketLossRate(int loss_rate) OVERRIDE;
|
||||
|
||||
protected:
|
||||
void DestructEncoderSafe();
|
||||
|
||||
@@ -45,6 +49,9 @@ class ACMOpus : public ACMGenericCodec {
|
||||
uint16_t sample_freq_;
|
||||
uint16_t bitrate_;
|
||||
int channels_;
|
||||
|
||||
bool fec_enabled_;
|
||||
int packet_loss_rate_;
|
||||
};
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
@@ -39,11 +39,11 @@ enum {
|
||||
kMaxPacketSize = 2560
|
||||
};
|
||||
|
||||
// Maximum number of payloads that can be packed in one RED payload. For
|
||||
// regular FEC, we only pack two payloads. In case of dual-streaming, in worst
|
||||
// case we might pack 3 payloads in one RED payload.
|
||||
// Maximum number of payloads that can be packed in one RED packet. For
|
||||
// regular RED, we only pack two payloads. In case of dual-streaming, in worst
|
||||
// case we might pack 3 payloads in one RED packet.
|
||||
enum {
|
||||
kNumFecFragmentationVectors = 2,
|
||||
kNumRedFragmentationVectors = 2,
|
||||
kMaxNumFragmentationVectors = 3
|
||||
};
|
||||
|
||||
@@ -136,8 +136,9 @@ AudioCodingModuleImpl::AudioCodingModuleImpl(
|
||||
acm_crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
|
||||
vad_callback_(NULL),
|
||||
is_first_red_(true),
|
||||
fec_enabled_(false),
|
||||
last_fec_timestamp_(0),
|
||||
red_enabled_(false),
|
||||
last_red_timestamp_(0),
|
||||
codec_fec_enabled_(false),
|
||||
previous_pltype_(255),
|
||||
aux_rtp_header_(NULL),
|
||||
receiver_initialized_(false),
|
||||
@@ -349,7 +350,7 @@ int AudioCodingModuleImpl::ProcessDualStream() {
|
||||
int16_t len_bytes = MAX_PAYLOAD_SIZE_BYTE;
|
||||
WebRtcACMEncodingType encoding_type;
|
||||
if (secondary_encoder_->Encode(red_buffer_, &len_bytes,
|
||||
&last_fec_timestamp_,
|
||||
&last_red_timestamp_,
|
||||
&encoding_type) < 0) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
|
||||
"ProcessDual(): Encoding of secondary encoder Failed");
|
||||
@@ -372,7 +373,7 @@ int AudioCodingModuleImpl::ProcessDualStream() {
|
||||
index_primary = secondary_ready_to_encode ?
|
||||
TimestampLessThan(primary_timestamp, secondary_timestamp) : 0;
|
||||
index_primary += has_previous_payload ?
|
||||
TimestampLessThan(primary_timestamp, last_fec_timestamp_) : 0;
|
||||
TimestampLessThan(primary_timestamp, last_red_timestamp_) : 0;
|
||||
}
|
||||
|
||||
if (secondary_ready_to_encode) {
|
||||
@@ -384,7 +385,7 @@ int AudioCodingModuleImpl::ProcessDualStream() {
|
||||
|
||||
if (has_previous_payload) {
|
||||
index_previous_secondary = primary_ready_to_encode ?
|
||||
(1 - TimestampLessThan(primary_timestamp, last_fec_timestamp_)) : 0;
|
||||
(1 - TimestampLessThan(primary_timestamp, last_red_timestamp_)) : 0;
|
||||
// If secondary is ready it always have a timestamp larger than previous
|
||||
// secondary. So the index is either 0 or 1.
|
||||
index_previous_secondary += secondary_ready_to_encode ? 1 : 0;
|
||||
@@ -405,7 +406,7 @@ int AudioCodingModuleImpl::ProcessDualStream() {
|
||||
} else if (index_secondary == 0) {
|
||||
current_timestamp = secondary_timestamp;
|
||||
} else {
|
||||
current_timestamp = last_fec_timestamp_;
|
||||
current_timestamp = last_red_timestamp_;
|
||||
}
|
||||
|
||||
fragmentation_.fragmentationVectorSize = 0;
|
||||
@@ -420,7 +421,7 @@ int AudioCodingModuleImpl::ProcessDualStream() {
|
||||
fragmentation_.fragmentationPlType[index_previous_secondary] =
|
||||
secondary_send_codec_inst_.pltype;
|
||||
fragmentation_.fragmentationTimeDiff[index_previous_secondary] =
|
||||
static_cast<uint16_t>(current_timestamp - last_fec_timestamp_);
|
||||
static_cast<uint16_t>(current_timestamp - last_red_timestamp_);
|
||||
fragmentation_.fragmentationVectorSize++;
|
||||
}
|
||||
|
||||
@@ -462,7 +463,7 @@ int AudioCodingModuleImpl::ProcessDualStream() {
|
||||
{
|
||||
CriticalSectionScoped lock(callback_crit_sect_);
|
||||
if (packetization_callback_ != NULL) {
|
||||
// Callback with payload data, including redundant data (FEC/RED).
|
||||
// Callback with payload data, including redundant data (RED).
|
||||
if (packetization_callback_->SendData(kAudioFrameSpeech,
|
||||
my_red_payload_type,
|
||||
current_timestamp, stream,
|
||||
@@ -495,7 +496,7 @@ int AudioCodingModuleImpl::ProcessSingleStream() {
|
||||
FrameType frame_type = kAudioFrameSpeech;
|
||||
uint8_t current_payload_type = 0;
|
||||
bool has_data_to_send = false;
|
||||
bool fec_active = false;
|
||||
bool red_active = false;
|
||||
RTPFragmentationHeader my_fragmentation;
|
||||
|
||||
// Keep the scope of the ACM critical section limited.
|
||||
@@ -562,15 +563,15 @@ int AudioCodingModuleImpl::ProcessSingleStream() {
|
||||
// Redundancy encode is done here. The two bitstreams packetized into
|
||||
// one RTP packet and the fragmentation points are set.
|
||||
// Only apply RED on speech data.
|
||||
if ((fec_enabled_) &&
|
||||
if ((red_enabled_) &&
|
||||
((encoding_type == kActiveNormalEncoded) ||
|
||||
(encoding_type == kPassiveNormalEncoded))) {
|
||||
// FEC is enabled within this scope.
|
||||
// RED is enabled within this scope.
|
||||
//
|
||||
// Note that, a special solution exists for iSAC since it is the only
|
||||
// codec for which GetRedPayload has a non-empty implementation.
|
||||
//
|
||||
// Summary of the FEC scheme below (use iSAC as example):
|
||||
// Summary of the RED scheme below (use iSAC as example):
|
||||
//
|
||||
// 1st (is_first_red_ is true) encoded iSAC frame (primary #1) =>
|
||||
// - call GetRedPayload() and store redundancy for packet #1 in
|
||||
@@ -581,7 +582,7 @@ int AudioCodingModuleImpl::ProcessSingleStream() {
|
||||
// - store primary #2 in 1st fragment of RED buffer and send the
|
||||
// combined packet
|
||||
// - the transmitted packet contains primary #2 (new) and
|
||||
// reduncancy for packet #1 (old)
|
||||
// redundancy for packet #1 (old)
|
||||
// - call GetRed_Payload() and store redundancy for packet #2 in
|
||||
// second fragment of RED buffer
|
||||
//
|
||||
@@ -604,19 +605,19 @@ int AudioCodingModuleImpl::ProcessSingleStream() {
|
||||
//
|
||||
// Hence, even if every second packet is dropped, perfect
|
||||
// reconstruction is possible.
|
||||
fec_active = true;
|
||||
red_active = true;
|
||||
|
||||
has_data_to_send = false;
|
||||
// Skip the following part for the first packet in a RED session.
|
||||
if (!is_first_red_) {
|
||||
// Rearrange stream such that FEC packets are included.
|
||||
// Rearrange stream such that RED packets are included.
|
||||
// Replace stream now that we have stored current stream.
|
||||
memcpy(stream + fragmentation_.fragmentationOffset[1], red_buffer_,
|
||||
fragmentation_.fragmentationLength[1]);
|
||||
// Update the fragmentation time difference vector, in number of
|
||||
// timestamps.
|
||||
uint16_t time_since_last = static_cast<uint16_t>(
|
||||
rtp_timestamp - last_fec_timestamp_);
|
||||
rtp_timestamp - last_red_timestamp_);
|
||||
|
||||
// Update fragmentation vectors.
|
||||
fragmentation_.fragmentationPlType[1] =
|
||||
@@ -630,7 +631,7 @@ int AudioCodingModuleImpl::ProcessSingleStream() {
|
||||
|
||||
// Insert new packet payload type.
|
||||
fragmentation_.fragmentationPlType[0] = current_payload_type;
|
||||
last_fec_timestamp_ = rtp_timestamp;
|
||||
last_red_timestamp_ = rtp_timestamp;
|
||||
|
||||
// Can be modified by the GetRedPayload() call if iSAC is utilized.
|
||||
red_length_bytes = length_bytes;
|
||||
@@ -650,7 +651,7 @@ int AudioCodingModuleImpl::ProcessSingleStream() {
|
||||
if (codecs_[current_send_codec_idx_]->GetRedPayload(
|
||||
red_buffer_, &red_length_bytes) == -1) {
|
||||
// The codec was not iSAC => use current encoder output as redundant
|
||||
// data instead (trivial FEC scheme).
|
||||
// data instead (trivial RED scheme).
|
||||
memcpy(red_buffer_, stream, red_length_bytes);
|
||||
}
|
||||
|
||||
@@ -658,7 +659,7 @@ int AudioCodingModuleImpl::ProcessSingleStream() {
|
||||
// Update payload type with RED payload type.
|
||||
current_payload_type = red_pltype_;
|
||||
// We have packed 2 payloads.
|
||||
fragmentation_.fragmentationVectorSize = kNumFecFragmentationVectors;
|
||||
fragmentation_.fragmentationVectorSize = kNumRedFragmentationVectors;
|
||||
|
||||
// Copy to local variable, as it will be used outside ACM lock.
|
||||
my_fragmentation.CopyFrom(fragmentation_);
|
||||
@@ -672,8 +673,8 @@ int AudioCodingModuleImpl::ProcessSingleStream() {
|
||||
CriticalSectionScoped lock(callback_crit_sect_);
|
||||
|
||||
if (packetization_callback_ != NULL) {
|
||||
if (fec_active) {
|
||||
// Callback with payload data, including redundant data (FEC/RED).
|
||||
if (red_active) {
|
||||
// Callback with payload data, including redundant data (RED).
|
||||
packetization_callback_->SendData(frame_type, current_payload_type,
|
||||
rtp_timestamp, stream, length_bytes,
|
||||
&my_fragmentation);
|
||||
@@ -713,14 +714,14 @@ int AudioCodingModuleImpl::InitializeSender() {
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize FEC/RED.
|
||||
// Initialize RED.
|
||||
is_first_red_ = true;
|
||||
if (fec_enabled_ || secondary_encoder_.get() != NULL) {
|
||||
if (red_enabled_ || secondary_encoder_.get() != NULL) {
|
||||
if (red_buffer_ != NULL) {
|
||||
memset(red_buffer_, 0, MAX_PAYLOAD_SIZE_BYTE);
|
||||
}
|
||||
if (fec_enabled_) {
|
||||
ResetFragmentation(kNumFecFragmentationVectors);
|
||||
if (red_enabled_) {
|
||||
ResetFragmentation(kNumRedFragmentationVectors);
|
||||
} else {
|
||||
ResetFragmentation(0);
|
||||
}
|
||||
@@ -1031,10 +1032,20 @@ int AudioCodingModuleImpl::RegisterSendCodec(const CodecInst& send_codec) {
|
||||
|
||||
// Everything is fine so we can replace the previous codec with this one.
|
||||
if (send_codec_registered_) {
|
||||
// If we change codec we start fresh with FEC.
|
||||
// If we change codec we start fresh with RED.
|
||||
// This is not strictly required by the standard.
|
||||
is_first_red_ = true;
|
||||
codec_ptr->SetVAD(&dtx_enabled_, &vad_enabled_, &vad_mode_);
|
||||
|
||||
if (!codec_ptr->HasInternalFEC()) {
|
||||
codec_fec_enabled_ = false;
|
||||
} else {
|
||||
if (codec_ptr->SetFEC(codec_fec_enabled_) < 0) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
|
||||
"Cannot set codec FEC");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
current_send_codec_idx_ = codec_id;
|
||||
@@ -1120,8 +1131,18 @@ int AudioCodingModuleImpl::RegisterSendCodec(const CodecInst& send_codec) {
|
||||
}
|
||||
send_codec_inst_.rate = send_codec.rate;
|
||||
}
|
||||
previous_pltype_ = send_codec_inst_.pltype;
|
||||
|
||||
if (!codecs_[codec_id]->HasInternalFEC()) {
|
||||
codec_fec_enabled_ = false;
|
||||
} else {
|
||||
if (codecs_[codec_id]->SetFEC(codec_fec_enabled_) < 0) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
|
||||
"Cannot set codec FEC");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
previous_pltype_ = send_codec_inst_.pltype;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@@ -1384,41 +1405,86 @@ int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
|
||||
}
|
||||
|
||||
/////////////////////////////////////////
|
||||
// (FEC) Forward Error Correction
|
||||
// (RED) Redundant Coding
|
||||
//
|
||||
|
||||
bool AudioCodingModuleImpl::FECStatus() const {
|
||||
bool AudioCodingModuleImpl::REDStatus() const {
|
||||
CriticalSectionScoped lock(acm_crit_sect_);
|
||||
return fec_enabled_;
|
||||
|
||||
return red_enabled_;
|
||||
}
|
||||
|
||||
// Configure FEC status i.e on/off.
|
||||
int AudioCodingModuleImpl::SetFECStatus(
|
||||
// Configure RED status i.e on/off.
|
||||
int AudioCodingModuleImpl::SetREDStatus(
|
||||
#ifdef WEBRTC_CODEC_RED
|
||||
bool enable_fec) {
|
||||
bool enable_red) {
|
||||
CriticalSectionScoped lock(acm_crit_sect_);
|
||||
|
||||
if (fec_enabled_ != enable_fec) {
|
||||
if (enable_red == true && codec_fec_enabled_ == true) {
|
||||
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, id_,
|
||||
"Codec internal FEC and RED cannot be co-enabled.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (red_enabled_ != enable_red) {
|
||||
// Reset the RED buffer.
|
||||
memset(red_buffer_, 0, MAX_PAYLOAD_SIZE_BYTE);
|
||||
|
||||
// Reset fragmentation buffers.
|
||||
ResetFragmentation(kNumFecFragmentationVectors);
|
||||
// Set fec_enabled_.
|
||||
fec_enabled_ = enable_fec;
|
||||
ResetFragmentation(kNumRedFragmentationVectors);
|
||||
// Set red_enabled_.
|
||||
red_enabled_ = enable_red;
|
||||
}
|
||||
is_first_red_ = true; // Make sure we restart FEC.
|
||||
is_first_red_ = true; // Make sure we restart RED.
|
||||
return 0;
|
||||
#else
|
||||
bool /* enable_fec */) {
|
||||
fec_enabled_ = false;
|
||||
bool /* enable_red */) {
|
||||
red_enabled_ = false;
|
||||
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, id_,
|
||||
" WEBRTC_CODEC_RED is undefined => fec_enabled_ = %d",
|
||||
fec_enabled_);
|
||||
" WEBRTC_CODEC_RED is undefined => red_enabled_ = %d",
|
||||
red_enabled_);
|
||||
return -1;
|
||||
#endif
|
||||
}
|
||||
|
||||
/////////////////////////////////////////
|
||||
// (FEC) Forward Error Correction (codec internal)
|
||||
//
|
||||
|
||||
bool AudioCodingModuleImpl::CodecFEC() const {
|
||||
return codec_fec_enabled_;
|
||||
}
|
||||
|
||||
int AudioCodingModuleImpl::SetCodecFEC(bool enable_codec_fec) {
|
||||
CriticalSectionScoped lock(acm_crit_sect_);
|
||||
|
||||
if (enable_codec_fec == true && red_enabled_ == true) {
|
||||
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, id_,
|
||||
"Codec internal FEC and RED cannot be co-enabled.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Set codec FEC.
|
||||
if (HaveValidEncoder("SetCodecFEC") &&
|
||||
codecs_[current_send_codec_idx_]->SetFEC(enable_codec_fec) < 0) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
|
||||
"Set codec internal FEC failed.");
|
||||
return -1;
|
||||
}
|
||||
codec_fec_enabled_ = enable_codec_fec;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int AudioCodingModuleImpl::SetPacketLossRate(int loss_rate) {
|
||||
if (HaveValidEncoder("SetPacketLossRate") &&
|
||||
codecs_[current_send_codec_idx_]->SetPacketLossRate(loss_rate) < 0) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
|
||||
"Set packet loss rate failed.");
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/////////////////////////////////////////
|
||||
// (VAD) Voice Activity Detection
|
||||
//
|
||||
|
||||
@@ -92,14 +92,27 @@ class AudioCodingModuleImpl : public AudioCodingModule {
|
||||
int Add10MsData(const AudioFrame& audio_frame);
|
||||
|
||||
/////////////////////////////////////////
|
||||
// (FEC) Forward Error Correction
|
||||
// (RED) Redundant Coding
|
||||
//
|
||||
|
||||
// Configure FEC status i.e on/off.
|
||||
int SetFECStatus(bool enable_fec);
|
||||
// Configure RED status i.e. on/off.
|
||||
int SetREDStatus(bool enable_red);
|
||||
|
||||
// Get RED status.
|
||||
bool REDStatus() const;
|
||||
|
||||
/////////////////////////////////////////
|
||||
// (FEC) Forward Error Correction (codec internal)
|
||||
//
|
||||
|
||||
// Configure FEC status i.e. on/off.
|
||||
int SetCodecFEC(bool enabled_codec_fec);
|
||||
|
||||
// Get FEC status.
|
||||
bool FECStatus() const;
|
||||
bool CodecFEC() const;
|
||||
|
||||
// Set target packet loss rate
|
||||
int SetPacketLossRate(int loss_rate);
|
||||
|
||||
/////////////////////////////////////////
|
||||
// (VAD) Voice Activity Detection
|
||||
@@ -313,21 +326,24 @@ class AudioCodingModuleImpl : public AudioCodingModule {
|
||||
CriticalSectionWrapper* acm_crit_sect_;
|
||||
ACMVADCallback* vad_callback_;
|
||||
|
||||
// RED/FEC.
|
||||
// RED.
|
||||
bool is_first_red_;
|
||||
bool fec_enabled_;
|
||||
bool red_enabled_;
|
||||
|
||||
// TODO(turajs): |red_buffer_| is allocated in constructor, why having them
|
||||
// as pointers and not an array. If concerned about the memory, then make a
|
||||
// set-up function to allocate them only when they are going to be used, i.e.
|
||||
// FEC or Dual-streaming is enabled.
|
||||
// RED or Dual-streaming is enabled.
|
||||
uint8_t* red_buffer_;
|
||||
|
||||
// TODO(turajs): we actually don't need |fragmentation_| as a member variable.
|
||||
// It is sufficient to keep the length & payload type of previous payload in
|
||||
// member variables.
|
||||
RTPFragmentationHeader fragmentation_;
|
||||
uint32_t last_fec_timestamp_;
|
||||
uint32_t last_red_timestamp_;
|
||||
|
||||
// Codec internal FEC
|
||||
bool codec_fec_enabled_;
|
||||
|
||||
// This is to keep track of CN instances where we can send DTMFs.
|
||||
uint8_t previous_pltype_;
|
||||
|
||||
Reference in New Issue
Block a user