Remove the state_ member from AudioDecoder
The subclasses that need a state pointer should declare them---with the right type, not void*, to get rid of all those casts. Two small but not quite trivial cleanups are included because they blocked the state_ removal: - AudioDecoderG722Stereo now inherits directly from AudioDecoder instead of being a subclass of AudioDecoderG722. - AudioDecoder now has a CngDecoderInstance member function, which is implemented only by AudioDecoderCng. This replaces the previous practice of calling AudioDecoder::state() and casting the result to a CNG_dec_inst*. It still isn't pretty, but now the blemish is plainly visible in the AudioDecoder class declaration. R=henrik.lundin@webrtc.org Review URL: https://webrtc-codereview.appspot.com/24169005 git-svn-id: http://webrtc.googlecode.com/svn/trunk@7644 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
@@ -277,7 +277,6 @@ ACMISAC::ACMISAC(int16_t codec_id)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
codec_inst_ptr_->inst = NULL;
|
codec_inst_ptr_->inst = NULL;
|
||||||
state_ = codec_inst_ptr_;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ACMISAC::~ACMISAC() {
|
ACMISAC::~ACMISAC() {
|
||||||
|
@@ -12,6 +12,7 @@
|
|||||||
|
|
||||||
#include <assert.h>
|
#include <assert.h>
|
||||||
|
|
||||||
|
#include "webrtc/base/checks.h"
|
||||||
#include "webrtc/modules/audio_coding/neteq/audio_decoder_impl.h"
|
#include "webrtc/modules/audio_coding/neteq/audio_decoder_impl.h"
|
||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
@@ -51,6 +52,11 @@ bool AudioDecoder::PacketHasFec(const uint8_t* encoded,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
CNG_dec_inst* AudioDecoder::CngDecoderInstance() {
|
||||||
|
FATAL() << "Not a CNG decoder";
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
bool AudioDecoder::CodecSupported(NetEqDecoder codec_type) {
|
bool AudioDecoder::CodecSupported(NetEqDecoder codec_type) {
|
||||||
switch (codec_type) {
|
switch (codec_type) {
|
||||||
case kDecoderPCMu:
|
case kDecoderPCMu:
|
||||||
|
@@ -103,17 +103,17 @@ AudioDecoderPcm16BMultiCh::AudioDecoderPcm16BMultiCh(int num_channels) {
|
|||||||
// iLBC
|
// iLBC
|
||||||
#ifdef WEBRTC_CODEC_ILBC
|
#ifdef WEBRTC_CODEC_ILBC
|
||||||
AudioDecoderIlbc::AudioDecoderIlbc() {
|
AudioDecoderIlbc::AudioDecoderIlbc() {
|
||||||
WebRtcIlbcfix_DecoderCreate(reinterpret_cast<iLBC_decinst_t**>(&state_));
|
WebRtcIlbcfix_DecoderCreate(&dec_state_);
|
||||||
}
|
}
|
||||||
|
|
||||||
AudioDecoderIlbc::~AudioDecoderIlbc() {
|
AudioDecoderIlbc::~AudioDecoderIlbc() {
|
||||||
WebRtcIlbcfix_DecoderFree(static_cast<iLBC_decinst_t*>(state_));
|
WebRtcIlbcfix_DecoderFree(dec_state_);
|
||||||
}
|
}
|
||||||
|
|
||||||
int AudioDecoderIlbc::Decode(const uint8_t* encoded, size_t encoded_len,
|
int AudioDecoderIlbc::Decode(const uint8_t* encoded, size_t encoded_len,
|
||||||
int16_t* decoded, SpeechType* speech_type) {
|
int16_t* decoded, SpeechType* speech_type) {
|
||||||
int16_t temp_type = 1; // Default is speech.
|
int16_t temp_type = 1; // Default is speech.
|
||||||
int16_t ret = WebRtcIlbcfix_Decode(static_cast<iLBC_decinst_t*>(state_),
|
int16_t ret = WebRtcIlbcfix_Decode(dec_state_,
|
||||||
reinterpret_cast<const int16_t*>(encoded),
|
reinterpret_cast<const int16_t*>(encoded),
|
||||||
static_cast<int16_t>(encoded_len), decoded,
|
static_cast<int16_t>(encoded_len), decoded,
|
||||||
&temp_type);
|
&temp_type);
|
||||||
@@ -122,12 +122,11 @@ int AudioDecoderIlbc::Decode(const uint8_t* encoded, size_t encoded_len,
|
|||||||
}
|
}
|
||||||
|
|
||||||
int AudioDecoderIlbc::DecodePlc(int num_frames, int16_t* decoded) {
|
int AudioDecoderIlbc::DecodePlc(int num_frames, int16_t* decoded) {
|
||||||
return WebRtcIlbcfix_NetEqPlc(static_cast<iLBC_decinst_t*>(state_),
|
return WebRtcIlbcfix_NetEqPlc(dec_state_, decoded, num_frames);
|
||||||
decoded, num_frames);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int AudioDecoderIlbc::Init() {
|
int AudioDecoderIlbc::Init() {
|
||||||
return WebRtcIlbcfix_Decoderinit30Ms(static_cast<iLBC_decinst_t*>(state_));
|
return WebRtcIlbcfix_Decoderinit30Ms(dec_state_);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@@ -135,19 +134,18 @@ int AudioDecoderIlbc::Init() {
|
|||||||
#ifdef WEBRTC_CODEC_ISAC
|
#ifdef WEBRTC_CODEC_ISAC
|
||||||
AudioDecoderIsac::AudioDecoderIsac(int decode_sample_rate_hz) {
|
AudioDecoderIsac::AudioDecoderIsac(int decode_sample_rate_hz) {
|
||||||
DCHECK(decode_sample_rate_hz == 16000 || decode_sample_rate_hz == 32000);
|
DCHECK(decode_sample_rate_hz == 16000 || decode_sample_rate_hz == 32000);
|
||||||
WebRtcIsac_Create(reinterpret_cast<ISACStruct**>(&state_));
|
WebRtcIsac_Create(&isac_state_);
|
||||||
WebRtcIsac_SetDecSampRate(static_cast<ISACStruct*>(state_),
|
WebRtcIsac_SetDecSampRate(isac_state_, decode_sample_rate_hz);
|
||||||
decode_sample_rate_hz);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
AudioDecoderIsac::~AudioDecoderIsac() {
|
AudioDecoderIsac::~AudioDecoderIsac() {
|
||||||
WebRtcIsac_Free(static_cast<ISACStruct*>(state_));
|
WebRtcIsac_Free(isac_state_);
|
||||||
}
|
}
|
||||||
|
|
||||||
int AudioDecoderIsac::Decode(const uint8_t* encoded, size_t encoded_len,
|
int AudioDecoderIsac::Decode(const uint8_t* encoded, size_t encoded_len,
|
||||||
int16_t* decoded, SpeechType* speech_type) {
|
int16_t* decoded, SpeechType* speech_type) {
|
||||||
int16_t temp_type = 1; // Default is speech.
|
int16_t temp_type = 1; // Default is speech.
|
||||||
int16_t ret = WebRtcIsac_Decode(static_cast<ISACStruct*>(state_),
|
int16_t ret = WebRtcIsac_Decode(isac_state_,
|
||||||
encoded,
|
encoded,
|
||||||
static_cast<int16_t>(encoded_len), decoded,
|
static_cast<int16_t>(encoded_len), decoded,
|
||||||
&temp_type);
|
&temp_type);
|
||||||
@@ -159,7 +157,7 @@ int AudioDecoderIsac::DecodeRedundant(const uint8_t* encoded,
|
|||||||
size_t encoded_len, int16_t* decoded,
|
size_t encoded_len, int16_t* decoded,
|
||||||
SpeechType* speech_type) {
|
SpeechType* speech_type) {
|
||||||
int16_t temp_type = 1; // Default is speech.
|
int16_t temp_type = 1; // Default is speech.
|
||||||
int16_t ret = WebRtcIsac_DecodeRcu(static_cast<ISACStruct*>(state_),
|
int16_t ret = WebRtcIsac_DecodeRcu(isac_state_,
|
||||||
encoded,
|
encoded,
|
||||||
static_cast<int16_t>(encoded_len), decoded,
|
static_cast<int16_t>(encoded_len), decoded,
|
||||||
&temp_type);
|
&temp_type);
|
||||||
@@ -168,12 +166,11 @@ int AudioDecoderIsac::DecodeRedundant(const uint8_t* encoded,
|
|||||||
}
|
}
|
||||||
|
|
||||||
int AudioDecoderIsac::DecodePlc(int num_frames, int16_t* decoded) {
|
int AudioDecoderIsac::DecodePlc(int num_frames, int16_t* decoded) {
|
||||||
return WebRtcIsac_DecodePlc(static_cast<ISACStruct*>(state_),
|
return WebRtcIsac_DecodePlc(isac_state_, decoded, num_frames);
|
||||||
decoded, num_frames);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int AudioDecoderIsac::Init() {
|
int AudioDecoderIsac::Init() {
|
||||||
return WebRtcIsac_DecoderInit(static_cast<ISACStruct*>(state_));
|
return WebRtcIsac_DecoderInit(isac_state_);
|
||||||
}
|
}
|
||||||
|
|
||||||
int AudioDecoderIsac::IncomingPacket(const uint8_t* payload,
|
int AudioDecoderIsac::IncomingPacket(const uint8_t* payload,
|
||||||
@@ -181,7 +178,7 @@ int AudioDecoderIsac::IncomingPacket(const uint8_t* payload,
|
|||||||
uint16_t rtp_sequence_number,
|
uint16_t rtp_sequence_number,
|
||||||
uint32_t rtp_timestamp,
|
uint32_t rtp_timestamp,
|
||||||
uint32_t arrival_timestamp) {
|
uint32_t arrival_timestamp) {
|
||||||
return WebRtcIsac_UpdateBwEstimate(static_cast<ISACStruct*>(state_),
|
return WebRtcIsac_UpdateBwEstimate(isac_state_,
|
||||||
payload,
|
payload,
|
||||||
static_cast<int32_t>(payload_len),
|
static_cast<int32_t>(payload_len),
|
||||||
rtp_sequence_number,
|
rtp_sequence_number,
|
||||||
@@ -190,24 +187,24 @@ int AudioDecoderIsac::IncomingPacket(const uint8_t* payload,
|
|||||||
}
|
}
|
||||||
|
|
||||||
int AudioDecoderIsac::ErrorCode() {
|
int AudioDecoderIsac::ErrorCode() {
|
||||||
return WebRtcIsac_GetErrorCode(static_cast<ISACStruct*>(state_));
|
return WebRtcIsac_GetErrorCode(isac_state_);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// iSAC fix
|
// iSAC fix
|
||||||
#ifdef WEBRTC_CODEC_ISACFX
|
#ifdef WEBRTC_CODEC_ISACFX
|
||||||
AudioDecoderIsacFix::AudioDecoderIsacFix() {
|
AudioDecoderIsacFix::AudioDecoderIsacFix() {
|
||||||
WebRtcIsacfix_Create(reinterpret_cast<ISACFIX_MainStruct**>(&state_));
|
WebRtcIsacfix_Create(&isac_state_);
|
||||||
}
|
}
|
||||||
|
|
||||||
AudioDecoderIsacFix::~AudioDecoderIsacFix() {
|
AudioDecoderIsacFix::~AudioDecoderIsacFix() {
|
||||||
WebRtcIsacfix_Free(static_cast<ISACFIX_MainStruct*>(state_));
|
WebRtcIsacfix_Free(isac_state_);
|
||||||
}
|
}
|
||||||
|
|
||||||
int AudioDecoderIsacFix::Decode(const uint8_t* encoded, size_t encoded_len,
|
int AudioDecoderIsacFix::Decode(const uint8_t* encoded, size_t encoded_len,
|
||||||
int16_t* decoded, SpeechType* speech_type) {
|
int16_t* decoded, SpeechType* speech_type) {
|
||||||
int16_t temp_type = 1; // Default is speech.
|
int16_t temp_type = 1; // Default is speech.
|
||||||
int16_t ret = WebRtcIsacfix_Decode(static_cast<ISACFIX_MainStruct*>(state_),
|
int16_t ret = WebRtcIsacfix_Decode(isac_state_,
|
||||||
encoded,
|
encoded,
|
||||||
static_cast<int16_t>(encoded_len), decoded,
|
static_cast<int16_t>(encoded_len), decoded,
|
||||||
&temp_type);
|
&temp_type);
|
||||||
@@ -216,7 +213,7 @@ int AudioDecoderIsacFix::Decode(const uint8_t* encoded, size_t encoded_len,
|
|||||||
}
|
}
|
||||||
|
|
||||||
int AudioDecoderIsacFix::Init() {
|
int AudioDecoderIsacFix::Init() {
|
||||||
return WebRtcIsacfix_DecoderInit(static_cast<ISACFIX_MainStruct*>(state_));
|
return WebRtcIsacfix_DecoderInit(isac_state_);
|
||||||
}
|
}
|
||||||
|
|
||||||
int AudioDecoderIsacFix::IncomingPacket(const uint8_t* payload,
|
int AudioDecoderIsacFix::IncomingPacket(const uint8_t* payload,
|
||||||
@@ -225,32 +222,32 @@ int AudioDecoderIsacFix::IncomingPacket(const uint8_t* payload,
|
|||||||
uint32_t rtp_timestamp,
|
uint32_t rtp_timestamp,
|
||||||
uint32_t arrival_timestamp) {
|
uint32_t arrival_timestamp) {
|
||||||
return WebRtcIsacfix_UpdateBwEstimate(
|
return WebRtcIsacfix_UpdateBwEstimate(
|
||||||
static_cast<ISACFIX_MainStruct*>(state_),
|
isac_state_,
|
||||||
payload,
|
payload,
|
||||||
static_cast<int32_t>(payload_len),
|
static_cast<int32_t>(payload_len),
|
||||||
rtp_sequence_number, rtp_timestamp, arrival_timestamp);
|
rtp_sequence_number, rtp_timestamp, arrival_timestamp);
|
||||||
}
|
}
|
||||||
|
|
||||||
int AudioDecoderIsacFix::ErrorCode() {
|
int AudioDecoderIsacFix::ErrorCode() {
|
||||||
return WebRtcIsacfix_GetErrorCode(static_cast<ISACFIX_MainStruct*>(state_));
|
return WebRtcIsacfix_GetErrorCode(isac_state_);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// G.722
|
// G.722
|
||||||
#ifdef WEBRTC_CODEC_G722
|
#ifdef WEBRTC_CODEC_G722
|
||||||
AudioDecoderG722::AudioDecoderG722() {
|
AudioDecoderG722::AudioDecoderG722() {
|
||||||
WebRtcG722_CreateDecoder(reinterpret_cast<G722DecInst**>(&state_));
|
WebRtcG722_CreateDecoder(&dec_state_);
|
||||||
}
|
}
|
||||||
|
|
||||||
AudioDecoderG722::~AudioDecoderG722() {
|
AudioDecoderG722::~AudioDecoderG722() {
|
||||||
WebRtcG722_FreeDecoder(static_cast<G722DecInst*>(state_));
|
WebRtcG722_FreeDecoder(dec_state_);
|
||||||
}
|
}
|
||||||
|
|
||||||
int AudioDecoderG722::Decode(const uint8_t* encoded, size_t encoded_len,
|
int AudioDecoderG722::Decode(const uint8_t* encoded, size_t encoded_len,
|
||||||
int16_t* decoded, SpeechType* speech_type) {
|
int16_t* decoded, SpeechType* speech_type) {
|
||||||
int16_t temp_type = 1; // Default is speech.
|
int16_t temp_type = 1; // Default is speech.
|
||||||
int16_t ret = WebRtcG722_Decode(
|
int16_t ret = WebRtcG722_Decode(
|
||||||
static_cast<G722DecInst*>(state_),
|
dec_state_,
|
||||||
const_cast<int16_t*>(reinterpret_cast<const int16_t*>(encoded)),
|
const_cast<int16_t*>(reinterpret_cast<const int16_t*>(encoded)),
|
||||||
static_cast<int16_t>(encoded_len), decoded, &temp_type);
|
static_cast<int16_t>(encoded_len), decoded, &temp_type);
|
||||||
*speech_type = ConvertSpeechType(temp_type);
|
*speech_type = ConvertSpeechType(temp_type);
|
||||||
@@ -258,7 +255,7 @@ int AudioDecoderG722::Decode(const uint8_t* encoded, size_t encoded_len,
|
|||||||
}
|
}
|
||||||
|
|
||||||
int AudioDecoderG722::Init() {
|
int AudioDecoderG722::Init() {
|
||||||
return WebRtcG722_DecoderInit(static_cast<G722DecInst*>(state_));
|
return WebRtcG722_DecoderInit(dec_state_);
|
||||||
}
|
}
|
||||||
|
|
||||||
int AudioDecoderG722::PacketDuration(const uint8_t* encoded,
|
int AudioDecoderG722::PacketDuration(const uint8_t* encoded,
|
||||||
@@ -267,18 +264,15 @@ int AudioDecoderG722::PacketDuration(const uint8_t* encoded,
|
|||||||
return static_cast<int>(2 * encoded_len / channels_);
|
return static_cast<int>(2 * encoded_len / channels_);
|
||||||
}
|
}
|
||||||
|
|
||||||
AudioDecoderG722Stereo::AudioDecoderG722Stereo()
|
AudioDecoderG722Stereo::AudioDecoderG722Stereo() {
|
||||||
: AudioDecoderG722(),
|
|
||||||
state_left_(state_), // Base member |state_| is used for left channel.
|
|
||||||
state_right_(NULL) {
|
|
||||||
channels_ = 2;
|
channels_ = 2;
|
||||||
// |state_left_| already created by the base class AudioDecoderG722.
|
WebRtcG722_CreateDecoder(&dec_state_left_);
|
||||||
WebRtcG722_CreateDecoder(reinterpret_cast<G722DecInst**>(&state_right_));
|
WebRtcG722_CreateDecoder(&dec_state_right_);
|
||||||
}
|
}
|
||||||
|
|
||||||
AudioDecoderG722Stereo::~AudioDecoderG722Stereo() {
|
AudioDecoderG722Stereo::~AudioDecoderG722Stereo() {
|
||||||
// |state_left_| will be freed by the base class AudioDecoderG722.
|
WebRtcG722_FreeDecoder(dec_state_left_);
|
||||||
WebRtcG722_FreeDecoder(static_cast<G722DecInst*>(state_right_));
|
WebRtcG722_FreeDecoder(dec_state_right_);
|
||||||
}
|
}
|
||||||
|
|
||||||
int AudioDecoderG722Stereo::Decode(const uint8_t* encoded, size_t encoded_len,
|
int AudioDecoderG722Stereo::Decode(const uint8_t* encoded, size_t encoded_len,
|
||||||
@@ -289,13 +283,13 @@ int AudioDecoderG722Stereo::Decode(const uint8_t* encoded, size_t encoded_len,
|
|||||||
SplitStereoPacket(encoded, encoded_len, encoded_deinterleaved);
|
SplitStereoPacket(encoded, encoded_len, encoded_deinterleaved);
|
||||||
// Decode left and right.
|
// Decode left and right.
|
||||||
int16_t ret = WebRtcG722_Decode(
|
int16_t ret = WebRtcG722_Decode(
|
||||||
static_cast<G722DecInst*>(state_left_),
|
dec_state_left_,
|
||||||
reinterpret_cast<int16_t*>(encoded_deinterleaved),
|
reinterpret_cast<int16_t*>(encoded_deinterleaved),
|
||||||
static_cast<int16_t>(encoded_len / 2), decoded, &temp_type);
|
static_cast<int16_t>(encoded_len / 2), decoded, &temp_type);
|
||||||
if (ret >= 0) {
|
if (ret >= 0) {
|
||||||
int decoded_len = ret;
|
int decoded_len = ret;
|
||||||
ret = WebRtcG722_Decode(
|
ret = WebRtcG722_Decode(
|
||||||
static_cast<G722DecInst*>(state_right_),
|
dec_state_right_,
|
||||||
reinterpret_cast<int16_t*>(&encoded_deinterleaved[encoded_len / 2]),
|
reinterpret_cast<int16_t*>(&encoded_deinterleaved[encoded_len / 2]),
|
||||||
static_cast<int16_t>(encoded_len / 2), &decoded[decoded_len], &temp_type);
|
static_cast<int16_t>(encoded_len / 2), &decoded[decoded_len], &temp_type);
|
||||||
if (ret == decoded_len) {
|
if (ret == decoded_len) {
|
||||||
@@ -317,11 +311,10 @@ int AudioDecoderG722Stereo::Decode(const uint8_t* encoded, size_t encoded_len,
|
|||||||
}
|
}
|
||||||
|
|
||||||
int AudioDecoderG722Stereo::Init() {
|
int AudioDecoderG722Stereo::Init() {
|
||||||
int ret = WebRtcG722_DecoderInit(static_cast<G722DecInst*>(state_right_));
|
int r = WebRtcG722_DecoderInit(dec_state_left_);
|
||||||
if (ret != 0) {
|
if (r != 0)
|
||||||
return ret;
|
return r;
|
||||||
}
|
return WebRtcG722_DecoderInit(dec_state_right_);
|
||||||
return AudioDecoderG722::Init();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Split the stereo packet and place left and right channel after each other
|
// Split the stereo packet and place left and right channel after each other
|
||||||
@@ -401,18 +394,17 @@ int AudioDecoderCelt::DecodePlc(int num_frames, int16_t* decoded) {
|
|||||||
AudioDecoderOpus::AudioDecoderOpus(int num_channels) {
|
AudioDecoderOpus::AudioDecoderOpus(int num_channels) {
|
||||||
DCHECK(num_channels == 1 || num_channels == 2);
|
DCHECK(num_channels == 1 || num_channels == 2);
|
||||||
channels_ = num_channels;
|
channels_ = num_channels;
|
||||||
WebRtcOpus_DecoderCreate(reinterpret_cast<OpusDecInst**>(&state_),
|
WebRtcOpus_DecoderCreate(&dec_state_, static_cast<int>(channels_));
|
||||||
static_cast<int>(channels_));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
AudioDecoderOpus::~AudioDecoderOpus() {
|
AudioDecoderOpus::~AudioDecoderOpus() {
|
||||||
WebRtcOpus_DecoderFree(static_cast<OpusDecInst*>(state_));
|
WebRtcOpus_DecoderFree(dec_state_);
|
||||||
}
|
}
|
||||||
|
|
||||||
int AudioDecoderOpus::Decode(const uint8_t* encoded, size_t encoded_len,
|
int AudioDecoderOpus::Decode(const uint8_t* encoded, size_t encoded_len,
|
||||||
int16_t* decoded, SpeechType* speech_type) {
|
int16_t* decoded, SpeechType* speech_type) {
|
||||||
int16_t temp_type = 1; // Default is speech.
|
int16_t temp_type = 1; // Default is speech.
|
||||||
int16_t ret = WebRtcOpus_DecodeNew(static_cast<OpusDecInst*>(state_), encoded,
|
int16_t ret = WebRtcOpus_DecodeNew(dec_state_, encoded,
|
||||||
static_cast<int16_t>(encoded_len), decoded,
|
static_cast<int16_t>(encoded_len), decoded,
|
||||||
&temp_type);
|
&temp_type);
|
||||||
if (ret > 0)
|
if (ret > 0)
|
||||||
@@ -425,7 +417,7 @@ int AudioDecoderOpus::DecodeRedundant(const uint8_t* encoded,
|
|||||||
size_t encoded_len, int16_t* decoded,
|
size_t encoded_len, int16_t* decoded,
|
||||||
SpeechType* speech_type) {
|
SpeechType* speech_type) {
|
||||||
int16_t temp_type = 1; // Default is speech.
|
int16_t temp_type = 1; // Default is speech.
|
||||||
int16_t ret = WebRtcOpus_DecodeFec(static_cast<OpusDecInst*>(state_), encoded,
|
int16_t ret = WebRtcOpus_DecodeFec(dec_state_, encoded,
|
||||||
static_cast<int16_t>(encoded_len), decoded,
|
static_cast<int16_t>(encoded_len), decoded,
|
||||||
&temp_type);
|
&temp_type);
|
||||||
if (ret > 0)
|
if (ret > 0)
|
||||||
@@ -435,12 +427,12 @@ int AudioDecoderOpus::DecodeRedundant(const uint8_t* encoded,
|
|||||||
}
|
}
|
||||||
|
|
||||||
int AudioDecoderOpus::Init() {
|
int AudioDecoderOpus::Init() {
|
||||||
return WebRtcOpus_DecoderInitNew(static_cast<OpusDecInst*>(state_));
|
return WebRtcOpus_DecoderInitNew(dec_state_);
|
||||||
}
|
}
|
||||||
|
|
||||||
int AudioDecoderOpus::PacketDuration(const uint8_t* encoded,
|
int AudioDecoderOpus::PacketDuration(const uint8_t* encoded,
|
||||||
size_t encoded_len) {
|
size_t encoded_len) {
|
||||||
return WebRtcOpus_DurationEst(static_cast<OpusDecInst*>(state_),
|
return WebRtcOpus_DurationEst(dec_state_,
|
||||||
encoded, static_cast<int>(encoded_len));
|
encoded, static_cast<int>(encoded_len));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -458,19 +450,15 @@ bool AudioDecoderOpus::PacketHasFec(const uint8_t* encoded,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
AudioDecoderCng::AudioDecoderCng() {
|
AudioDecoderCng::AudioDecoderCng() {
|
||||||
WebRtcCng_CreateDec(reinterpret_cast<CNG_dec_inst**>(&state_));
|
CHECK_EQ(0, WebRtcCng_CreateDec(&dec_state_));
|
||||||
assert(state_);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
AudioDecoderCng::~AudioDecoderCng() {
|
AudioDecoderCng::~AudioDecoderCng() {
|
||||||
if (state_) {
|
WebRtcCng_FreeDec(dec_state_);
|
||||||
WebRtcCng_FreeDec(static_cast<CNG_dec_inst*>(state_));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int AudioDecoderCng::Init() {
|
int AudioDecoderCng::Init() {
|
||||||
assert(state_);
|
return WebRtcCng_InitDec(dec_state_);
|
||||||
return WebRtcCng_InitDec(static_cast<CNG_dec_inst*>(state_));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
@@ -19,6 +19,22 @@
|
|||||||
#include "webrtc/engine_configurations.h"
|
#include "webrtc/engine_configurations.h"
|
||||||
#endif
|
#endif
|
||||||
#include "webrtc/base/constructormagic.h"
|
#include "webrtc/base/constructormagic.h"
|
||||||
|
#include "webrtc/modules/audio_coding/codecs/cng/include/webrtc_cng.h"
|
||||||
|
#ifdef WEBRTC_CODEC_G722
|
||||||
|
#include "webrtc/modules/audio_coding/codecs/g722/include/g722_interface.h"
|
||||||
|
#endif
|
||||||
|
#ifdef WEBRTC_CODEC_ILBC
|
||||||
|
#include "webrtc/modules/audio_coding/codecs/ilbc/interface/ilbc.h"
|
||||||
|
#endif
|
||||||
|
#ifdef WEBRTC_CODEC_ISACFX
|
||||||
|
#include "webrtc/modules/audio_coding/codecs/isac/fix/interface/isacfix.h"
|
||||||
|
#endif
|
||||||
|
#ifdef WEBRTC_CODEC_ISAC
|
||||||
|
#include "webrtc/modules/audio_coding/codecs/isac/main/interface/isac.h"
|
||||||
|
#endif
|
||||||
|
#ifdef WEBRTC_CODEC_OPUS
|
||||||
|
#include "webrtc/modules/audio_coding/codecs/opus/interface/opus_interface.h"
|
||||||
|
#endif
|
||||||
#include "webrtc/modules/audio_coding/neteq/interface/audio_decoder.h"
|
#include "webrtc/modules/audio_coding/neteq/interface/audio_decoder.h"
|
||||||
#include "webrtc/typedefs.h"
|
#include "webrtc/typedefs.h"
|
||||||
|
|
||||||
@@ -109,6 +125,7 @@ class AudioDecoderIlbc : public AudioDecoder {
|
|||||||
virtual int Init();
|
virtual int Init();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
iLBC_decinst_t* dec_state_;
|
||||||
DISALLOW_COPY_AND_ASSIGN(AudioDecoderIlbc);
|
DISALLOW_COPY_AND_ASSIGN(AudioDecoderIlbc);
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
@@ -133,6 +150,7 @@ class AudioDecoderIsac : public AudioDecoder {
|
|||||||
virtual int ErrorCode();
|
virtual int ErrorCode();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
ISACStruct* isac_state_;
|
||||||
DISALLOW_COPY_AND_ASSIGN(AudioDecoderIsac);
|
DISALLOW_COPY_AND_ASSIGN(AudioDecoderIsac);
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
@@ -153,6 +171,7 @@ class AudioDecoderIsacFix : public AudioDecoder {
|
|||||||
virtual int ErrorCode();
|
virtual int ErrorCode();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
ISACFIX_MainStruct* isac_state_;
|
||||||
DISALLOW_COPY_AND_ASSIGN(AudioDecoderIsacFix);
|
DISALLOW_COPY_AND_ASSIGN(AudioDecoderIsacFix);
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
@@ -169,10 +188,11 @@ class AudioDecoderG722 : public AudioDecoder {
|
|||||||
virtual int PacketDuration(const uint8_t* encoded, size_t encoded_len);
|
virtual int PacketDuration(const uint8_t* encoded, size_t encoded_len);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
G722DecInst* dec_state_;
|
||||||
DISALLOW_COPY_AND_ASSIGN(AudioDecoderG722);
|
DISALLOW_COPY_AND_ASSIGN(AudioDecoderG722);
|
||||||
};
|
};
|
||||||
|
|
||||||
class AudioDecoderG722Stereo : public AudioDecoderG722 {
|
class AudioDecoderG722Stereo : public AudioDecoder {
|
||||||
public:
|
public:
|
||||||
AudioDecoderG722Stereo();
|
AudioDecoderG722Stereo();
|
||||||
virtual ~AudioDecoderG722Stereo();
|
virtual ~AudioDecoderG722Stereo();
|
||||||
@@ -189,8 +209,8 @@ class AudioDecoderG722Stereo : public AudioDecoderG722 {
|
|||||||
void SplitStereoPacket(const uint8_t* encoded, size_t encoded_len,
|
void SplitStereoPacket(const uint8_t* encoded, size_t encoded_len,
|
||||||
uint8_t* encoded_deinterleaved);
|
uint8_t* encoded_deinterleaved);
|
||||||
|
|
||||||
void* const state_left_;
|
G722DecInst* dec_state_left_;
|
||||||
void* state_right_;
|
G722DecInst* dec_state_right_;
|
||||||
|
|
||||||
DISALLOW_COPY_AND_ASSIGN(AudioDecoderG722Stereo);
|
DISALLOW_COPY_AND_ASSIGN(AudioDecoderG722Stereo);
|
||||||
};
|
};
|
||||||
@@ -229,6 +249,7 @@ class AudioDecoderOpus : public AudioDecoder {
|
|||||||
virtual bool PacketHasFec(const uint8_t* encoded, size_t encoded_len) const;
|
virtual bool PacketHasFec(const uint8_t* encoded, size_t encoded_len) const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
OpusDecInst* dec_state_;
|
||||||
DISALLOW_COPY_AND_ASSIGN(AudioDecoderOpus);
|
DISALLOW_COPY_AND_ASSIGN(AudioDecoderOpus);
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
@@ -252,7 +273,10 @@ class AudioDecoderCng : public AudioDecoder {
|
|||||||
uint32_t rtp_timestamp,
|
uint32_t rtp_timestamp,
|
||||||
uint32_t arrival_timestamp) { return -1; }
|
uint32_t arrival_timestamp) { return -1; }
|
||||||
|
|
||||||
|
virtual CNG_dec_inst* CngDecoderInstance() OVERRIDE { return dec_state_; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
CNG_dec_inst* dec_state_;
|
||||||
DISALLOW_COPY_AND_ASSIGN(AudioDecoderCng);
|
DISALLOW_COPY_AND_ASSIGN(AudioDecoderCng);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@@ -36,7 +36,7 @@ int ComfortNoise::UpdateParameters(Packet* packet) {
|
|||||||
return kUnknownPayloadType;
|
return kUnknownPayloadType;
|
||||||
}
|
}
|
||||||
decoder_database_->SetActiveCngDecoder(packet->header.payloadType);
|
decoder_database_->SetActiveCngDecoder(packet->header.payloadType);
|
||||||
CNG_dec_inst* cng_inst = static_cast<CNG_dec_inst*>(cng_decoder->state());
|
CNG_dec_inst* cng_inst = cng_decoder->CngDecoderInstance();
|
||||||
int16_t ret = WebRtcCng_UpdateSid(cng_inst,
|
int16_t ret = WebRtcCng_UpdateSid(cng_inst,
|
||||||
packet->payload,
|
packet->payload,
|
||||||
packet->payload_length);
|
packet->payload_length);
|
||||||
@@ -72,7 +72,7 @@ int ComfortNoise::Generate(size_t requested_length,
|
|||||||
if (!cng_decoder) {
|
if (!cng_decoder) {
|
||||||
return kUnknownPayloadType;
|
return kUnknownPayloadType;
|
||||||
}
|
}
|
||||||
CNG_dec_inst* cng_inst = static_cast<CNG_dec_inst*>(cng_decoder->state());
|
CNG_dec_inst* cng_inst = cng_decoder->CngDecoderInstance();
|
||||||
// The expression &(*output)[0][0] is a pointer to the first element in
|
// The expression &(*output)[0][0] is a pointer to the first element in
|
||||||
// the first channel.
|
// the first channel.
|
||||||
if (WebRtcCng_Generate(cng_inst, &(*output)[0][0],
|
if (WebRtcCng_Generate(cng_inst, &(*output)[0][0],
|
||||||
|
@@ -14,6 +14,7 @@
|
|||||||
#include <stdlib.h> // NULL
|
#include <stdlib.h> // NULL
|
||||||
|
|
||||||
#include "webrtc/base/constructormagic.h"
|
#include "webrtc/base/constructormagic.h"
|
||||||
|
#include "webrtc/modules/audio_coding/codecs/cng/include/webrtc_cng.h"
|
||||||
#include "webrtc/typedefs.h"
|
#include "webrtc/typedefs.h"
|
||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
@@ -63,7 +64,7 @@ class AudioDecoder {
|
|||||||
// Used by PacketDuration below. Save the value -1 for errors.
|
// Used by PacketDuration below. Save the value -1 for errors.
|
||||||
enum { kNotImplemented = -2 };
|
enum { kNotImplemented = -2 };
|
||||||
|
|
||||||
AudioDecoder() : channels_(1), state_(NULL) {}
|
AudioDecoder() : channels_(1) {}
|
||||||
virtual ~AudioDecoder() {}
|
virtual ~AudioDecoder() {}
|
||||||
|
|
||||||
// Decodes |encode_len| bytes from |encoded| and writes the result in
|
// Decodes |encode_len| bytes from |encoded| and writes the result in
|
||||||
@@ -114,8 +115,9 @@ class AudioDecoder {
|
|||||||
// Returns true if the packet has FEC and false otherwise.
|
// Returns true if the packet has FEC and false otherwise.
|
||||||
virtual bool PacketHasFec(const uint8_t* encoded, size_t encoded_len) const;
|
virtual bool PacketHasFec(const uint8_t* encoded, size_t encoded_len) const;
|
||||||
|
|
||||||
// Returns the underlying decoder state.
|
// If this is a CNG decoder, return the underlying CNG_dec_inst*. If this
|
||||||
void* state() { return state_; }
|
// isn't a CNG decoder, don't call this method.
|
||||||
|
virtual CNG_dec_inst* CngDecoderInstance();
|
||||||
|
|
||||||
// Returns true if |codec_type| is supported.
|
// Returns true if |codec_type| is supported.
|
||||||
static bool CodecSupported(NetEqDecoder codec_type);
|
static bool CodecSupported(NetEqDecoder codec_type);
|
||||||
@@ -134,7 +136,6 @@ class AudioDecoder {
|
|||||||
static SpeechType ConvertSpeechType(int16_t type);
|
static SpeechType ConvertSpeechType(int16_t type);
|
||||||
|
|
||||||
size_t channels_;
|
size_t channels_;
|
||||||
void* state_;
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
DISALLOW_COPY_AND_ASSIGN(AudioDecoder);
|
DISALLOW_COPY_AND_ASSIGN(AudioDecoder);
|
||||||
|
@@ -147,9 +147,9 @@ int Normal::Process(const int16_t* input,
|
|||||||
AudioDecoder* cng_decoder = decoder_database_->GetActiveCngDecoder();
|
AudioDecoder* cng_decoder = decoder_database_->GetActiveCngDecoder();
|
||||||
|
|
||||||
if (cng_decoder) {
|
if (cng_decoder) {
|
||||||
CNG_dec_inst* cng_inst = static_cast<CNG_dec_inst*>(cng_decoder->state());
|
|
||||||
// Generate long enough for 32kHz.
|
// Generate long enough for 32kHz.
|
||||||
if (WebRtcCng_Generate(cng_inst, cng_output, kCngLength, 0) < 0) {
|
if (WebRtcCng_Generate(cng_decoder->CngDecoderInstance(), cng_output,
|
||||||
|
kCngLength, 0) < 0) {
|
||||||
// Error returned; set return vector to all zeros.
|
// Error returned; set return vector to all zeros.
|
||||||
memset(cng_output, 0, sizeof(cng_output));
|
memset(cng_output, 0, sizeof(cng_output));
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user