Remove the state_ member from AudioDecoder

The subclasses that need a state pointer should declare them---with
the right type, not void*, to get rid of all those casts.

Two small but not quite trivial cleanups are included because they
blocked the state_ removal:

  - AudioDecoderG722Stereo now inherits directly from AudioDecoder
    instead of being a subclass of AudioDecoderG722.

  - AudioDecoder now has a CngDecoderInstance member function, which
    is implemented only by AudioDecoderCng. This replaces the previous
    practice of calling AudioDecoder::state() and casting the result
    to a CNG_dec_inst*. It still isn't pretty, but now the blemish is
    plainly visible in the AudioDecoder class declaration.

R=henrik.lundin@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/24169005

git-svn-id: http://webrtc.googlecode.com/svn/trunk@7623 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
kwiberg@webrtc.org 2014-11-04 21:18:47 +00:00
parent 7c29e8c2f3
commit 9e525585fd
6 changed files with 84 additions and 68 deletions

View File

@ -277,7 +277,6 @@ ACMISAC::ACMISAC(int16_t codec_id)
return;
}
codec_inst_ptr_->inst = NULL;
state_ = codec_inst_ptr_;
}
ACMISAC::~ACMISAC() {

View File

@ -103,17 +103,17 @@ AudioDecoderPcm16BMultiCh::AudioDecoderPcm16BMultiCh(int num_channels) {
// iLBC
#ifdef WEBRTC_CODEC_ILBC
AudioDecoderIlbc::AudioDecoderIlbc() {
WebRtcIlbcfix_DecoderCreate(reinterpret_cast<iLBC_decinst_t**>(&state_));
WebRtcIlbcfix_DecoderCreate(&dec_state_);
}
AudioDecoderIlbc::~AudioDecoderIlbc() {
WebRtcIlbcfix_DecoderFree(static_cast<iLBC_decinst_t*>(state_));
WebRtcIlbcfix_DecoderFree(dec_state_);
}
int AudioDecoderIlbc::Decode(const uint8_t* encoded, size_t encoded_len,
int16_t* decoded, SpeechType* speech_type) {
int16_t temp_type = 1; // Default is speech.
int16_t ret = WebRtcIlbcfix_Decode(static_cast<iLBC_decinst_t*>(state_),
int16_t ret = WebRtcIlbcfix_Decode(dec_state_,
reinterpret_cast<const int16_t*>(encoded),
static_cast<int16_t>(encoded_len), decoded,
&temp_type);
@ -122,12 +122,11 @@ int AudioDecoderIlbc::Decode(const uint8_t* encoded, size_t encoded_len,
}
int AudioDecoderIlbc::DecodePlc(int num_frames, int16_t* decoded) {
return WebRtcIlbcfix_NetEqPlc(static_cast<iLBC_decinst_t*>(state_),
decoded, num_frames);
return WebRtcIlbcfix_NetEqPlc(dec_state_, decoded, num_frames);
}
int AudioDecoderIlbc::Init() {
return WebRtcIlbcfix_Decoderinit30Ms(static_cast<iLBC_decinst_t*>(state_));
return WebRtcIlbcfix_Decoderinit30Ms(dec_state_);
}
#endif
@ -135,19 +134,18 @@ int AudioDecoderIlbc::Init() {
#ifdef WEBRTC_CODEC_ISAC
AudioDecoderIsac::AudioDecoderIsac(int decode_sample_rate_hz) {
DCHECK(decode_sample_rate_hz == 16000 || decode_sample_rate_hz == 32000);
WebRtcIsac_Create(reinterpret_cast<ISACStruct**>(&state_));
WebRtcIsac_SetDecSampRate(static_cast<ISACStruct*>(state_),
decode_sample_rate_hz);
WebRtcIsac_Create(&isac_state_);
WebRtcIsac_SetDecSampRate(isac_state_, decode_sample_rate_hz);
}
AudioDecoderIsac::~AudioDecoderIsac() {
WebRtcIsac_Free(static_cast<ISACStruct*>(state_));
WebRtcIsac_Free(isac_state_);
}
int AudioDecoderIsac::Decode(const uint8_t* encoded, size_t encoded_len,
int16_t* decoded, SpeechType* speech_type) {
int16_t temp_type = 1; // Default is speech.
int16_t ret = WebRtcIsac_Decode(static_cast<ISACStruct*>(state_),
int16_t ret = WebRtcIsac_Decode(isac_state_,
encoded,
static_cast<int16_t>(encoded_len), decoded,
&temp_type);
@ -159,7 +157,7 @@ int AudioDecoderIsac::DecodeRedundant(const uint8_t* encoded,
size_t encoded_len, int16_t* decoded,
SpeechType* speech_type) {
int16_t temp_type = 1; // Default is speech.
int16_t ret = WebRtcIsac_DecodeRcu(static_cast<ISACStruct*>(state_),
int16_t ret = WebRtcIsac_DecodeRcu(isac_state_,
encoded,
static_cast<int16_t>(encoded_len), decoded,
&temp_type);
@ -168,12 +166,11 @@ int AudioDecoderIsac::DecodeRedundant(const uint8_t* encoded,
}
int AudioDecoderIsac::DecodePlc(int num_frames, int16_t* decoded) {
return WebRtcIsac_DecodePlc(static_cast<ISACStruct*>(state_),
decoded, num_frames);
return WebRtcIsac_DecodePlc(isac_state_, decoded, num_frames);
}
int AudioDecoderIsac::Init() {
return WebRtcIsac_DecoderInit(static_cast<ISACStruct*>(state_));
return WebRtcIsac_DecoderInit(isac_state_);
}
int AudioDecoderIsac::IncomingPacket(const uint8_t* payload,
@ -181,7 +178,7 @@ int AudioDecoderIsac::IncomingPacket(const uint8_t* payload,
uint16_t rtp_sequence_number,
uint32_t rtp_timestamp,
uint32_t arrival_timestamp) {
return WebRtcIsac_UpdateBwEstimate(static_cast<ISACStruct*>(state_),
return WebRtcIsac_UpdateBwEstimate(isac_state_,
payload,
static_cast<int32_t>(payload_len),
rtp_sequence_number,
@ -190,24 +187,24 @@ int AudioDecoderIsac::IncomingPacket(const uint8_t* payload,
}
int AudioDecoderIsac::ErrorCode() {
return WebRtcIsac_GetErrorCode(static_cast<ISACStruct*>(state_));
return WebRtcIsac_GetErrorCode(isac_state_);
}
#endif
// iSAC fix
#ifdef WEBRTC_CODEC_ISACFX
AudioDecoderIsacFix::AudioDecoderIsacFix() {
WebRtcIsacfix_Create(reinterpret_cast<ISACFIX_MainStruct**>(&state_));
WebRtcIsacfix_Create(&isac_state_);
}
AudioDecoderIsacFix::~AudioDecoderIsacFix() {
WebRtcIsacfix_Free(static_cast<ISACFIX_MainStruct*>(state_));
WebRtcIsacfix_Free(isac_state_);
}
int AudioDecoderIsacFix::Decode(const uint8_t* encoded, size_t encoded_len,
int16_t* decoded, SpeechType* speech_type) {
int16_t temp_type = 1; // Default is speech.
int16_t ret = WebRtcIsacfix_Decode(static_cast<ISACFIX_MainStruct*>(state_),
int16_t ret = WebRtcIsacfix_Decode(isac_state_,
encoded,
static_cast<int16_t>(encoded_len), decoded,
&temp_type);
@ -216,7 +213,7 @@ int AudioDecoderIsacFix::Decode(const uint8_t* encoded, size_t encoded_len,
}
int AudioDecoderIsacFix::Init() {
return WebRtcIsacfix_DecoderInit(static_cast<ISACFIX_MainStruct*>(state_));
return WebRtcIsacfix_DecoderInit(isac_state_);
}
int AudioDecoderIsacFix::IncomingPacket(const uint8_t* payload,
@ -225,32 +222,32 @@ int AudioDecoderIsacFix::IncomingPacket(const uint8_t* payload,
uint32_t rtp_timestamp,
uint32_t arrival_timestamp) {
return WebRtcIsacfix_UpdateBwEstimate(
static_cast<ISACFIX_MainStruct*>(state_),
isac_state_,
payload,
static_cast<int32_t>(payload_len),
rtp_sequence_number, rtp_timestamp, arrival_timestamp);
}
int AudioDecoderIsacFix::ErrorCode() {
return WebRtcIsacfix_GetErrorCode(static_cast<ISACFIX_MainStruct*>(state_));
return WebRtcIsacfix_GetErrorCode(isac_state_);
}
#endif
// G.722
#ifdef WEBRTC_CODEC_G722
AudioDecoderG722::AudioDecoderG722() {
WebRtcG722_CreateDecoder(reinterpret_cast<G722DecInst**>(&state_));
WebRtcG722_CreateDecoder(&dec_state_);
}
AudioDecoderG722::~AudioDecoderG722() {
WebRtcG722_FreeDecoder(static_cast<G722DecInst*>(state_));
WebRtcG722_FreeDecoder(dec_state_);
}
int AudioDecoderG722::Decode(const uint8_t* encoded, size_t encoded_len,
int16_t* decoded, SpeechType* speech_type) {
int16_t temp_type = 1; // Default is speech.
int16_t ret = WebRtcG722_Decode(
static_cast<G722DecInst*>(state_),
dec_state_,
const_cast<int16_t*>(reinterpret_cast<const int16_t*>(encoded)),
static_cast<int16_t>(encoded_len), decoded, &temp_type);
*speech_type = ConvertSpeechType(temp_type);
@ -258,7 +255,7 @@ int AudioDecoderG722::Decode(const uint8_t* encoded, size_t encoded_len,
}
int AudioDecoderG722::Init() {
return WebRtcG722_DecoderInit(static_cast<G722DecInst*>(state_));
return WebRtcG722_DecoderInit(dec_state_);
}
int AudioDecoderG722::PacketDuration(const uint8_t* encoded,
@ -267,18 +264,15 @@ int AudioDecoderG722::PacketDuration(const uint8_t* encoded,
return static_cast<int>(2 * encoded_len / channels_);
}
AudioDecoderG722Stereo::AudioDecoderG722Stereo()
: AudioDecoderG722(),
state_left_(state_), // Base member |state_| is used for left channel.
state_right_(NULL) {
AudioDecoderG722Stereo::AudioDecoderG722Stereo() {
channels_ = 2;
// |state_left_| already created by the base class AudioDecoderG722.
WebRtcG722_CreateDecoder(reinterpret_cast<G722DecInst**>(&state_right_));
WebRtcG722_CreateDecoder(&dec_state_left_);
WebRtcG722_CreateDecoder(&dec_state_right_);
}
AudioDecoderG722Stereo::~AudioDecoderG722Stereo() {
// |state_left_| will be freed by the base class AudioDecoderG722.
WebRtcG722_FreeDecoder(static_cast<G722DecInst*>(state_right_));
WebRtcG722_FreeDecoder(dec_state_left_);
WebRtcG722_FreeDecoder(dec_state_right_);
}
int AudioDecoderG722Stereo::Decode(const uint8_t* encoded, size_t encoded_len,
@ -289,13 +283,13 @@ int AudioDecoderG722Stereo::Decode(const uint8_t* encoded, size_t encoded_len,
SplitStereoPacket(encoded, encoded_len, encoded_deinterleaved);
// Decode left and right.
int16_t ret = WebRtcG722_Decode(
static_cast<G722DecInst*>(state_left_),
dec_state_left_,
reinterpret_cast<int16_t*>(encoded_deinterleaved),
static_cast<int16_t>(encoded_len / 2), decoded, &temp_type);
if (ret >= 0) {
int decoded_len = ret;
ret = WebRtcG722_Decode(
static_cast<G722DecInst*>(state_right_),
dec_state_right_,
reinterpret_cast<int16_t*>(&encoded_deinterleaved[encoded_len / 2]),
static_cast<int16_t>(encoded_len / 2), &decoded[decoded_len], &temp_type);
if (ret == decoded_len) {
@ -317,11 +311,10 @@ int AudioDecoderG722Stereo::Decode(const uint8_t* encoded, size_t encoded_len,
}
int AudioDecoderG722Stereo::Init() {
int ret = WebRtcG722_DecoderInit(static_cast<G722DecInst*>(state_right_));
if (ret != 0) {
return ret;
}
return AudioDecoderG722::Init();
int r = WebRtcG722_DecoderInit(dec_state_left_);
if (r != 0)
return r;
return WebRtcG722_DecoderInit(dec_state_right_);
}
// Split the stereo packet and place left and right channel after each other
@ -401,18 +394,17 @@ int AudioDecoderCelt::DecodePlc(int num_frames, int16_t* decoded) {
AudioDecoderOpus::AudioDecoderOpus(int num_channels) {
DCHECK(num_channels == 1 || num_channels == 2);
channels_ = num_channels;
WebRtcOpus_DecoderCreate(reinterpret_cast<OpusDecInst**>(&state_),
static_cast<int>(channels_));
WebRtcOpus_DecoderCreate(&dec_state_, static_cast<int>(channels_));
}
AudioDecoderOpus::~AudioDecoderOpus() {
WebRtcOpus_DecoderFree(static_cast<OpusDecInst*>(state_));
WebRtcOpus_DecoderFree(dec_state_);
}
int AudioDecoderOpus::Decode(const uint8_t* encoded, size_t encoded_len,
int16_t* decoded, SpeechType* speech_type) {
int16_t temp_type = 1; // Default is speech.
int16_t ret = WebRtcOpus_DecodeNew(static_cast<OpusDecInst*>(state_), encoded,
int16_t ret = WebRtcOpus_DecodeNew(dec_state_, encoded,
static_cast<int16_t>(encoded_len), decoded,
&temp_type);
if (ret > 0)
@ -425,7 +417,7 @@ int AudioDecoderOpus::DecodeRedundant(const uint8_t* encoded,
size_t encoded_len, int16_t* decoded,
SpeechType* speech_type) {
int16_t temp_type = 1; // Default is speech.
int16_t ret = WebRtcOpus_DecodeFec(static_cast<OpusDecInst*>(state_), encoded,
int16_t ret = WebRtcOpus_DecodeFec(dec_state_, encoded,
static_cast<int16_t>(encoded_len), decoded,
&temp_type);
if (ret > 0)
@ -435,12 +427,12 @@ int AudioDecoderOpus::DecodeRedundant(const uint8_t* encoded,
}
int AudioDecoderOpus::Init() {
return WebRtcOpus_DecoderInitNew(static_cast<OpusDecInst*>(state_));
return WebRtcOpus_DecoderInitNew(dec_state_);
}
int AudioDecoderOpus::PacketDuration(const uint8_t* encoded,
size_t encoded_len) {
return WebRtcOpus_DurationEst(static_cast<OpusDecInst*>(state_),
return WebRtcOpus_DurationEst(dec_state_,
encoded, static_cast<int>(encoded_len));
}
@ -458,19 +450,15 @@ bool AudioDecoderOpus::PacketHasFec(const uint8_t* encoded,
#endif
AudioDecoderCng::AudioDecoderCng() {
WebRtcCng_CreateDec(reinterpret_cast<CNG_dec_inst**>(&state_));
assert(state_);
DCHECK_EQ(0, WebRtcCng_CreateDec(&dec_state_));
}
AudioDecoderCng::~AudioDecoderCng() {
if (state_) {
WebRtcCng_FreeDec(static_cast<CNG_dec_inst*>(state_));
}
WebRtcCng_FreeDec(dec_state_);
}
int AudioDecoderCng::Init() {
assert(state_);
return WebRtcCng_InitDec(static_cast<CNG_dec_inst*>(state_));
return WebRtcCng_InitDec(dec_state_);
}
} // namespace webrtc

View File

@ -19,6 +19,22 @@
#include "webrtc/engine_configurations.h"
#endif
#include "webrtc/base/constructormagic.h"
#include "webrtc/modules/audio_coding/codecs/cng/include/webrtc_cng.h"
#ifdef WEBRTC_CODEC_G722
#include "webrtc/modules/audio_coding/codecs/g722/include/g722_interface.h"
#endif
#ifdef WEBRTC_CODEC_ILBC
#include "webrtc/modules/audio_coding/codecs/ilbc/interface/ilbc.h"
#endif
#ifdef WEBRTC_CODEC_ISACFX
#include "webrtc/modules/audio_coding/codecs/isac/fix/interface/isacfix.h"
#endif
#ifdef WEBRTC_CODEC_ISAC
#include "webrtc/modules/audio_coding/codecs/isac/main/interface/isac.h"
#endif
#ifdef WEBRTC_CODEC_OPUS
#include "webrtc/modules/audio_coding/codecs/opus/interface/opus_interface.h"
#endif
#include "webrtc/modules/audio_coding/neteq/interface/audio_decoder.h"
#include "webrtc/typedefs.h"
@ -109,6 +125,7 @@ class AudioDecoderIlbc : public AudioDecoder {
virtual int Init();
private:
iLBC_decinst_t* dec_state_;
DISALLOW_COPY_AND_ASSIGN(AudioDecoderIlbc);
};
#endif
@ -133,6 +150,7 @@ class AudioDecoderIsac : public AudioDecoder {
virtual int ErrorCode();
private:
ISACStruct* isac_state_;
DISALLOW_COPY_AND_ASSIGN(AudioDecoderIsac);
};
#endif
@ -153,6 +171,7 @@ class AudioDecoderIsacFix : public AudioDecoder {
virtual int ErrorCode();
private:
ISACFIX_MainStruct* isac_state_;
DISALLOW_COPY_AND_ASSIGN(AudioDecoderIsacFix);
};
#endif
@ -169,10 +188,11 @@ class AudioDecoderG722 : public AudioDecoder {
virtual int PacketDuration(const uint8_t* encoded, size_t encoded_len);
private:
G722DecInst* dec_state_;
DISALLOW_COPY_AND_ASSIGN(AudioDecoderG722);
};
class AudioDecoderG722Stereo : public AudioDecoderG722 {
class AudioDecoderG722Stereo : public AudioDecoder {
public:
AudioDecoderG722Stereo();
virtual ~AudioDecoderG722Stereo();
@ -189,8 +209,8 @@ class AudioDecoderG722Stereo : public AudioDecoderG722 {
void SplitStereoPacket(const uint8_t* encoded, size_t encoded_len,
uint8_t* encoded_deinterleaved);
void* const state_left_;
void* state_right_;
G722DecInst* dec_state_left_;
G722DecInst* dec_state_right_;
DISALLOW_COPY_AND_ASSIGN(AudioDecoderG722Stereo);
};
@ -229,6 +249,7 @@ class AudioDecoderOpus : public AudioDecoder {
virtual bool PacketHasFec(const uint8_t* encoded, size_t encoded_len) const;
private:
OpusDecInst* dec_state_;
DISALLOW_COPY_AND_ASSIGN(AudioDecoderOpus);
};
#endif
@ -252,7 +273,10 @@ class AudioDecoderCng : public AudioDecoder {
uint32_t rtp_timestamp,
uint32_t arrival_timestamp) { return -1; }
virtual CNG_dec_inst* CngDecoderInstance() OVERRIDE { return dec_state_; }
private:
CNG_dec_inst* dec_state_;
DISALLOW_COPY_AND_ASSIGN(AudioDecoderCng);
};

View File

@ -36,7 +36,7 @@ int ComfortNoise::UpdateParameters(Packet* packet) {
return kUnknownPayloadType;
}
decoder_database_->SetActiveCngDecoder(packet->header.payloadType);
CNG_dec_inst* cng_inst = static_cast<CNG_dec_inst*>(cng_decoder->state());
CNG_dec_inst* cng_inst = cng_decoder->CngDecoderInstance();
int16_t ret = WebRtcCng_UpdateSid(cng_inst,
packet->payload,
packet->payload_length);
@ -72,7 +72,7 @@ int ComfortNoise::Generate(size_t requested_length,
if (!cng_decoder) {
return kUnknownPayloadType;
}
CNG_dec_inst* cng_inst = static_cast<CNG_dec_inst*>(cng_decoder->state());
CNG_dec_inst* cng_inst = cng_decoder->CngDecoderInstance();
// The expression &(*output)[0][0] is a pointer to the first element in
// the first channel.
if (WebRtcCng_Generate(cng_inst, &(*output)[0][0],

View File

@ -13,7 +13,9 @@
#include <stdlib.h> // NULL
#include "webrtc/base/checks.h"
#include "webrtc/base/constructormagic.h"
#include "webrtc/modules/audio_coding/codecs/cng/include/webrtc_cng.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@ -63,7 +65,7 @@ class AudioDecoder {
// Used by PacketDuration below. Save the value -1 for errors.
enum { kNotImplemented = -2 };
AudioDecoder() : channels_(1), state_(NULL) {}
AudioDecoder() : channels_(1) {}
virtual ~AudioDecoder() {}
// Decodes |encode_len| bytes from |encoded| and writes the result in
@ -114,8 +116,12 @@ class AudioDecoder {
// Returns true if the packet has FEC and false otherwise.
virtual bool PacketHasFec(const uint8_t* encoded, size_t encoded_len) const;
// Returns the underlying decoder state.
void* state() { return state_; }
// If this is a CNG decoder, return the underlying CNG_dec_inst*. If this
// isn't a CNG decoder, don't call this method.
virtual CNG_dec_inst* CngDecoderInstance() {
FATAL() << "Not a CNG decoder";
return NULL;
}
// Returns true if |codec_type| is supported.
static bool CodecSupported(NetEqDecoder codec_type);
@ -134,7 +140,6 @@ class AudioDecoder {
static SpeechType ConvertSpeechType(int16_t type);
size_t channels_;
void* state_;
private:
DISALLOW_COPY_AND_ASSIGN(AudioDecoder);

View File

@ -147,9 +147,9 @@ int Normal::Process(const int16_t* input,
AudioDecoder* cng_decoder = decoder_database_->GetActiveCngDecoder();
if (cng_decoder) {
CNG_dec_inst* cng_inst = static_cast<CNG_dec_inst*>(cng_decoder->state());
// Generate long enough for 32kHz.
if (WebRtcCng_Generate(cng_inst, cng_output, kCngLength, 0) < 0) {
if (WebRtcCng_Generate(cng_decoder->CngDecoderInstance(), cng_output,
kCngLength, 0) < 0) {
// Error returned; set return vector to all zeros.
memset(cng_output, 0, sizeof(cng_output));
}