Dual-stream implementation, not including VoE APIs.

Review URL: https://webrtc-codereview.appspot.com/933015

git-svn-id: http://webrtc.googlecode.com/svn/trunk@3230 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
turaj@webrtc.org 2012-12-03 22:13:31 +00:00
parent 277ec8e3f5
commit 226db898f7
7 changed files with 1305 additions and 235 deletions

View File

@ -13,8 +13,10 @@
#include "audio_coding_module_typedefs.h"
#include "module.h"
// TODO(turajs): If possible, forward declare and remove the following include.
#include "module_common_types.h"
namespace webrtc {
// forward declarations
@ -204,6 +206,10 @@ class AudioCodingModule: public Module {
// Note: If a stereo codec is registered as send codec, VAD/DTX will
// automatically be turned off, since it is not supported for stereo sending.
//
// Note: If a secondary encoder is already registered, and the new send-codec
// has a sampling rate that does not match the secondary encoder, the
// secondary encoder will be unregistered.
//
// Input:
// -sendCodec : Parameters of the codec to be registered, c.f.
// common_types.h for the definition of
@ -215,6 +221,33 @@ class AudioCodingModule: public Module {
//
virtual WebRtc_Word32 RegisterSendCodec(const CodecInst& sendCodec) = 0;
///////////////////////////////////////////////////////////////////////////
// int RegisterSecondarySendCodec()
// Register a secondary encoder to enable dual-streaming. If a secondary
// codec is already registered, it will be removed before the new one is
// registered.
//
// Note: The secondary encoder will be unregistered if a primary codec
// is set with a sampling rate which does not match that of the existing
// secondary codec.
//
// Input:
// -send_codec : Parameters of the codec to be registered, c.f.
// common_types.h for the definition of
// CodecInst.
//
// Return value:
// -1 if failed to register,
// 0 if succeeded.
//
virtual int RegisterSecondarySendCodec(const CodecInst& send_codec) = 0;
///////////////////////////////////////////////////////////////////////////
// void UnregisterSecondarySendCodec()
// Unregister the secondary encoder to disable dual-streaming.
//
virtual void UnregisterSecondarySendCodec() = 0;
///////////////////////////////////////////////////////////////////////////
// WebRtc_Word32 SendCodec()
// Get parameters for the codec currently registered as send codec.
@ -228,6 +261,19 @@ class AudioCodingModule: public Module {
//
virtual WebRtc_Word32 SendCodec(CodecInst& currentSendCodec) const = 0;
///////////////////////////////////////////////////////////////////////////
// int SecondarySendCodec()
// Get the codec parameters for the current secondary send codec.
//
// Output:
// -secondary_codec : parameters of the secondary send codec.
//
// Return value:
// -1 if failed to get send codec,
// 0 if succeeded.
//
virtual int SecondarySendCodec(CodecInst* secondary_codec) const = 0;
///////////////////////////////////////////////////////////////////////////
// WebRtc_Word32 SendFrequency()
// Get the sampling frequency of the current encoder in Hertz.

View File

@ -29,7 +29,7 @@ enum {
// Interval for sending new CNG parameters (SID frames) is 100 msec.
enum {
kAcmSidIntervalMsec = 100
kCngSidIntervalMsec = 100
};
// We set some of the variables to invalid values as a check point
@ -70,7 +70,6 @@ ACMGenericCodec::ACMGenericCodec()
for (int i = 0; i < MAX_FRAME_SIZE_10MSEC; i++) {
_vadLabel[i] = 0;
}
// Nullify memory for encoder and decoder, and set payload type to an
// invalid value.
memset(&_encoderParams, 0, sizeof(WebRtcACMCodecParams));
@ -143,7 +142,7 @@ int32_t ACMGenericCodec::Add10MsDataSafe(const uint32_t timestamp,
_lastTimestamp = timestamp;
// If the data exceeds the buffer size, we through away the oldest data and
// If the data exceeds the buffer size, we throw away the oldest data and
// add the newly received 10 msec at the end.
if ((_inAudioIxWrite + lengthSmpl * audioChannel) > AUDIO_BUFFER_SIZE_W16) {
// Get the number of samples to be overwritten.
@ -191,29 +190,27 @@ int32_t ACMGenericCodec::Add10MsDataSafe(const uint32_t timestamp,
return 0;
}
bool ACMGenericCodec::HasFrameToEncode() const {
ReadLockScoped lockCodec(_codecWrapperLock);
if (_inAudioIxWrite < _frameLenSmpl * _noChannels)
return false;
return true;
}
int16_t ACMGenericCodec::Encode(uint8_t* bitStream,
int16_t* bitStreamLenByte,
uint32_t* timeStamp,
WebRtcACMEncodingType* encodingType) {
WriteLockScoped lockCodec(_codecWrapperLock);
ReadLockScoped lockNetEq(*_netEqDecodeLock);
return EncodeSafe(bitStream, bitStreamLenByte, timeStamp, encodingType);
}
int16_t ACMGenericCodec::EncodeSafe(uint8_t* bitStream,
int16_t* bitStreamLenByte,
uint32_t* timeStamp,
WebRtcACMEncodingType* encodingType) {
// Only encode if we have enough data to encode. If not wait until we have a
// full frame to encode.
if (_inAudioIxWrite < _frameLenSmpl * _noChannels) {
// There is not enough audio.
if (!HasFrameToEncode()) {
// There is not enough audio
*timeStamp = 0;
*bitStreamLenByte = 0;
// Doesn't really matter what this parameter set to.
// Doesn't really matter what this parameter set to
*encodingType = kNoEncoding;
return 0;
}
WriteLockScoped lockCodec(_codecWrapperLock);
ReadLockScoped lockNetEq(*_netEqDecodeLock);
// Not all codecs accept the whole frame to be pushed into encoder at once.
// Some codecs needs to be feed with a specific number of samples different
@ -230,7 +227,6 @@ int16_t ACMGenericCodec::EncodeSafe(uint8_t* bitStream,
"EncodeSafe: error, basic coding sample block is negative");
return -1;
}
// This makes the internal encoder read from the beginning of the buffer.
_inAudioIxRead = 0;
*timeStamp = _inTimestamp[0];
@ -932,7 +928,7 @@ int16_t ACMGenericCodec::EnableDTX() {
}
uint16_t freqHz;
EncoderSampFreq(freqHz);
if (WebRtcCng_InitEnc(_ptrDTXInst, freqHz, kAcmSidIntervalMsec,
if (WebRtcCng_InitEnc(_ptrDTXInst, freqHz, kCngSidIntervalMsec,
_numLPCParams) < 0) {
// Couldn't initialize, has to return -1, and free the memory.
WebRtcCng_FreeEnc(_ptrDTXInst);

View File

@ -711,20 +711,20 @@ class ACMGenericCodec {
///////////////////////////////////////////////////////////////////////////
// REDPayloadISAC()
// This is an iSAC-specific function. The function is called to get RED
// paylaod from a default-encoder.
// payload from a default-encoder.
//
// Inputs:
// -isacRate : the target rate of the main payload. A RED
// paylaod is generated according to the rate of
// main paylaod. Note that we are not specifying the
// payload is generated according to the rate of
// main payload. Note that we are not specifying the
// rate of RED payload, but the main payload.
// -isacBwEstimate : bandwidth information should be inserted in
// RED payload.
//
// Output:
// -payload : pointer to a buffer where the RED paylaod will
// -payload : pointer to a buffer where the RED payload will
// written to.
// -paylaodLenBytes : a place-holder to write the length of the RED
// -payloadLenBytes : a place-holder to write the length of the RED
// payload in Bytes.
//
// Return value:
@ -749,6 +749,13 @@ class ACMGenericCodec {
return false;
}
///////////////////////////////////////////////////////////////////////////
// HasFrameToEncode()
// Returns true if there is enough audio buffered for encoding, such that
// calling Encode() will return a payload.
//
bool HasFrameToEncode() const;
protected:
///////////////////////////////////////////////////////////////////////////
// All the functions with FunctionNameSafe(...) contain the actual
@ -758,15 +765,6 @@ class ACMGenericCodec {
// and return value we refer to FunctionName()
//
///////////////////////////////////////////////////////////////////////////
// See Encode() for the description of function, input(s)/output(s) and
// return value.
//
WebRtc_Word16 EncodeSafe(WebRtc_UWord8* bitStream,
WebRtc_Word16* bitStreamLenByte,
WebRtc_UWord32* timeStamp,
WebRtcACMEncodingType* encodingType);
///////////////////////////////////////////////////////////////////////////
// See Decode() for the description of function, input(s)/output(s) and
// return value.

View File

@ -119,6 +119,10 @@
'<(webrtc_root)/test/test.gyp:test_support_main',
'<(DEPTH)/testing/gtest.gyp:gtest',
'<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
'<(webrtc_root)/modules/modules.gyp:webrtc_utility',
],
'include_dirs': [
'<(webrtc_root)/common_audio/resampler/include',
],
'defines': [
'<@(audio_coding_defines)',
@ -127,6 +131,7 @@
'../test/ACMTest.cc',
'../test/APITest.cc',
'../test/Channel.cc',
'../test/dual_stream_unittest.cc',
'../test/EncodeDecodeTest.cc',
'../test/iSACTest.cc',
'../test/PCMFile.cc',

View File

@ -16,6 +16,7 @@
#include "acm_resampler.h"
#include "common_types.h"
#include "engine_configurations.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
namespace webrtc {
@ -59,6 +60,17 @@ class AudioCodingModuleImpl : public AudioCodingModule {
// Can be called multiple times for Codec, CNG, RED.
WebRtc_Word32 RegisterSendCodec(const CodecInst& send_codec);
// Register Secondary codec for dual-streaming. Dual-streaming is activated
// right after the secondary codec is registered.
int RegisterSecondarySendCodec(const CodecInst& send_codec);
// Unregister the secondary codec. Dual-streaming is deactivated right after
// deregistering secondary codec.
void UnregisterSecondarySendCodec();
// Get the secondary codec.
int SecondarySendCodec(CodecInst* secondary_codec) const;
// Get current send codec.
WebRtc_Word32 SendCodec(CodecInst& current_codec) const;
@ -248,6 +260,35 @@ class AudioCodingModuleImpl : public AudioCodingModule {
WebRtc_Word16 mirror_id,
ACMNetEQ::JB jitter_buffer);
// Set VAD/DTX status. This function does not acquire a lock, and it is
// created to be called only from inside a critical section.
int SetVADSafe(bool enable_dtx, bool enable_vad, ACMVADMode mode);
// Process buffered audio when dual-streaming is not enabled (When RED is
// enabled still this function is used.)
int ProcessSingleStream();
// Process buffered audio when dual-streaming is enabled, i.e. secondary send
// codec is registered.
int ProcessDualStream();
// Preprocessing of input audio, including resampling and down-mixing if
// required, before pushing audio into encoder'r buffer.
//
// in_frame: input audio-frame
// out_frame: output audio_frame, the output is valid only if a preprocessing
// is required.
//
// Return value:
// -1: if encountering an error.
// kPreprocessingSuccessful: if a preprocessing successfully performed.
// kNoPreprocessingRequired: if there was no need for preprocessing. In
// this case |out_frame| is not updated and
// |in_frame| has to be used for further
// operations.
int PreprocessToAddData(const AudioFrame& in_frame,
const AudioFrame** ptr_out);
private:
// Change required states after starting to receive the codec corresponding
// to |index|.
@ -261,15 +302,12 @@ class AudioCodingModuleImpl : public AudioCodingModule {
// is a stereo codec, RED or CN.
bool IsCodecForSlave(int index) const;
// Returns true if the |codec| is RED.
bool IsCodecRED(const CodecInst* codec) const;
// ...or if its |index| is RED.
bool IsCodecRED(int index) const;
int EncodeFragmentation(int fragmentation_index, int payload_type,
uint32_t current_timestamp,
ACMGenericCodec* _secondary_encoder,
uint8_t* stream);
// Returns true if the |codec| is CN.
bool IsCodecCN(int index) const;
// ...or if its |index| is CN.
bool IsCodecCN(const CodecInst* codec) const;
void ResetFragmentation(int vector_size);
AudioPacketizationCallback* _packetizationCallback;
WebRtc_Word32 _id;
@ -305,8 +343,15 @@ class AudioCodingModuleImpl : public AudioCodingModule {
// RED/FEC.
bool _isFirstRED;
bool _fecEnabled;
// TODO(turajs): |_redBuffer| is allocated in constructor, why having them
// as pointers and not an array. If concerned about the memory, then make a
// set-up function to allocate them only when they are going to be used, i.e.
// FEC or Dual-streaming is enabled.
WebRtc_UWord8* _redBuffer;
RTPFragmentationHeader* _fragmentation;
// TODO(turajs): we actually don't need |_fragmentation| as a member variable.
// It is sufficient to keep the length & payload type of previous payload in
// member variables.
RTPFragmentationHeader _fragmentation;
WebRtc_UWord32 _lastFECTimestamp;
// If no RED is registered as receive codec this
// will have an invalid value.
@ -334,7 +379,9 @@ class AudioCodingModuleImpl : public AudioCodingModule {
CriticalSectionWrapper* _callbackCritSect;
AudioFrame _audioFrame;
AudioFrame _preprocess_frame;
CodecInst _secondarySendCodecInst;
scoped_ptr<ACMGenericCodec> _secondaryEncoder;
#ifdef ACM_QA_TEST
FILE* _outgoingPL;
FILE* _incomingPL;

View File

@ -0,0 +1,515 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "../source/acm_common_defs.h"
#include "gtest/gtest.h"
#include "audio_coding_module.h"
#include "PCMFile.h"
#include "module_common_types.h"
#include "scoped_ptr.h"
#include "testsupport/fileutils.h"
#include "typedefs.h"
namespace webrtc {
class DualStreamTest : public AudioPacketizationCallback,
public ::testing::Test {
protected:
DualStreamTest();
~DualStreamTest();
WebRtc_Word32 SendData(FrameType frameType, WebRtc_UWord8 payload_type,
WebRtc_UWord32 timestamp,
const WebRtc_UWord8* payload_data,
WebRtc_UWord16 payload_size,
const RTPFragmentationHeader* fragmentation);
void Perform(bool start_in_sync, int num_channels_input);
void InitializeSender(int frame_size_primary_samples,
int num_channels_primary,
int sampling_rate);
void PopulateCodecInstances(int frame_size_primary_ms,
int num_channels_primary,
int sampling_rate);
void Validate(bool start_in_sync, int tolerance);
bool EqualTimestamp(int stream, int position);
int EqualPayloadLength(int stream, int position);
bool EqualPayloadData(int stream, int position);
static const int kMaxNumStoredPayloads = 2;
enum {kPrimary = 0, kSecondary, kMaxNumStreams};
AudioCodingModule* acm_dual_stream_;
AudioCodingModule* acm_ref_primary_;
AudioCodingModule* acm_ref_secondary_;
CodecInst primary_encoder_;
CodecInst secondary_encoder_;
CodecInst red_encoder_;
int payload_ref_is_stored_[kMaxNumStreams][kMaxNumStoredPayloads];
int payload_dual_is_stored_[kMaxNumStreams][kMaxNumStoredPayloads];
uint32_t timestamp_ref_[kMaxNumStreams][kMaxNumStoredPayloads];
uint32_t timestamp_dual_[kMaxNumStreams][kMaxNumStoredPayloads];
int payload_len_ref_[kMaxNumStreams][kMaxNumStoredPayloads];
int payload_len_dual_[kMaxNumStreams][kMaxNumStoredPayloads];
uint8_t payload_data_ref_[kMaxNumStreams]
[MAX_PAYLOAD_SIZE_BYTE * kMaxNumStoredPayloads];
uint8_t payload_data_dual_[kMaxNumStreams]
[MAX_PAYLOAD_SIZE_BYTE * kMaxNumStoredPayloads];
int num_received_payloads_dual_[kMaxNumStreams];
int num_received_payloads_ref_[kMaxNumStreams];
int num_compared_payloads_[kMaxNumStreams];
uint32_t last_timestamp_[kMaxNumStreams];
bool received_payload_[kMaxNumStreams];
};
DualStreamTest::DualStreamTest()
: acm_dual_stream_(AudioCodingModule::Create(0)),
acm_ref_primary_(AudioCodingModule::Create(1)),
acm_ref_secondary_(AudioCodingModule::Create(2)),
payload_ref_is_stored_(),
payload_dual_is_stored_(),
timestamp_ref_(),
num_received_payloads_dual_(),
num_received_payloads_ref_(),
num_compared_payloads_(),
last_timestamp_(),
received_payload_() {}
DualStreamTest::~DualStreamTest() {
AudioCodingModule::Destroy(acm_dual_stream_);
AudioCodingModule::Destroy(acm_ref_primary_);
AudioCodingModule::Destroy(acm_ref_secondary_);
}
void DualStreamTest::PopulateCodecInstances(int frame_size_primary_ms,
int num_channels_primary,
int sampling_rate) {
CodecInst my_codec;
// Invalid values. To check later on if the codec are found in the database.
primary_encoder_.pltype = -1;
secondary_encoder_.pltype = -1;
red_encoder_.pltype = -1;
for (int n = 0; n < AudioCodingModule::NumberOfCodecs(); n++) {
AudioCodingModule::Codec(n, my_codec);
if (strcmp(my_codec.plname, "ISAC") == 0 &&
my_codec.plfreq == sampling_rate) {
my_codec.rate = 32000;
my_codec.pacsize = 30 * sampling_rate / 1000;
memcpy(&secondary_encoder_, &my_codec, sizeof(my_codec));
} else if (strcmp(my_codec.plname, "L16") == 0 &&
my_codec.channels == num_channels_primary &&
my_codec.plfreq == sampling_rate) {
my_codec.pacsize = frame_size_primary_ms * sampling_rate / 1000;
memcpy(&primary_encoder_, &my_codec, sizeof(my_codec));
} else if (strcmp(my_codec.plname, "red") == 0) {
memcpy(&red_encoder_, &my_codec, sizeof(my_codec));
}
}
ASSERT_GE(primary_encoder_.pltype, 0);
ASSERT_GE(secondary_encoder_.pltype, 0);
ASSERT_GE(red_encoder_.pltype, 0);
}
void DualStreamTest::InitializeSender(int frame_size_primary_samples,
int num_channels_primary,
int sampling_rate) {
ASSERT_TRUE(acm_dual_stream_ != NULL);
ASSERT_TRUE(acm_ref_primary_ != NULL);
ASSERT_TRUE(acm_ref_secondary_ != NULL);
ASSERT_EQ(0, acm_dual_stream_->InitializeSender());
ASSERT_EQ(0, acm_ref_primary_->InitializeSender());
ASSERT_EQ(0, acm_ref_secondary_->InitializeSender());
PopulateCodecInstances(frame_size_primary_samples, num_channels_primary,
sampling_rate);
ASSERT_EQ(0, acm_ref_primary_->RegisterSendCodec(primary_encoder_));
ASSERT_EQ(0, acm_ref_secondary_->RegisterSendCodec(secondary_encoder_));
ASSERT_EQ(0, acm_dual_stream_->RegisterSendCodec(primary_encoder_));
ASSERT_EQ(0,
acm_dual_stream_->RegisterSecondarySendCodec(secondary_encoder_));
ASSERT_EQ(0, acm_ref_primary_->RegisterTransportCallback(this));
ASSERT_EQ(0, acm_ref_secondary_->RegisterTransportCallback(this));
ASSERT_EQ(0, acm_dual_stream_->RegisterTransportCallback(this));
}
void DualStreamTest::Perform(bool start_in_sync, int num_channels_input) {
PCMFile pcm_file;
std::string file_name = test::ResourcePath(
(num_channels_input == 1) ? "audio_coding/testfile32kHz" :
"audio_coding/teststereo32kHz", "pcm");
pcm_file.Open(file_name, 32000, "rb");
pcm_file.ReadStereo(num_channels_input == 2);
AudioFrame audio_frame;
int tolerance = 0;
if (num_channels_input == 2 && primary_encoder_.channels == 2 &&
secondary_encoder_.channels == 1) {
tolerance = 12;
}
if (!start_in_sync) {
pcm_file.Read10MsData(audio_frame);
// Unregister secondary codec and feed only the primary
acm_dual_stream_->UnregisterSecondarySendCodec();
EXPECT_EQ(0, acm_dual_stream_->Add10MsData(audio_frame));
EXPECT_EQ(0, acm_ref_primary_->Add10MsData(audio_frame));
ASSERT_EQ(0,
acm_dual_stream_->RegisterSecondarySendCodec(secondary_encoder_));
}
const int kNumFramesToProcess = 100;
int frame_cntr = 0;
while (!pcm_file.EndOfFile() && frame_cntr < kNumFramesToProcess) {
pcm_file.Read10MsData(audio_frame);
frame_cntr++;
EXPECT_EQ(0, acm_dual_stream_->Add10MsData(audio_frame));
EXPECT_EQ(0, acm_ref_primary_->Add10MsData(audio_frame));
EXPECT_EQ(0, acm_ref_secondary_->Add10MsData(audio_frame));
EXPECT_GE(acm_dual_stream_->Process(), 0);
EXPECT_GE(acm_ref_primary_->Process(), 0);
EXPECT_GE(acm_ref_secondary_->Process(), 0);
if (start_in_sync || frame_cntr > 7) {
// If we haven't started in sync the first few audio frames might
// slightly differ due to the difference in the state of the resamplers
// of dual-ACM and reference-ACM.
Validate(start_in_sync, tolerance);
} else {
// SendData stores the payloads, if we are not comparing we have to free
// the space by resetting these flags.
memset(payload_ref_is_stored_, 0, sizeof(payload_ref_is_stored_));
memset(payload_dual_is_stored_, 0, sizeof(payload_dual_is_stored_));
}
}
pcm_file.Close();
// Make sure that number of received payloads match. In case of secondary
// encoder, the dual-stream might deliver one lesser payload. The reason is
// that some secondary payloads are stored to be sent with a payload generated
// later and the input file may end before the "next" payload .
EXPECT_EQ(num_received_payloads_ref_[kPrimary],
num_received_payloads_dual_[kPrimary]);
EXPECT_TRUE(num_received_payloads_ref_[kSecondary] ==
num_received_payloads_dual_[kSecondary] ||
num_received_payloads_ref_[kSecondary] ==
(num_received_payloads_dual_[kSecondary] + 1));
// Make sure all received payloads are compared.
if (start_in_sync) {
EXPECT_EQ(num_received_payloads_dual_[kPrimary],
num_compared_payloads_[kPrimary]);
EXPECT_EQ(num_received_payloads_dual_[kSecondary],
num_compared_payloads_[kSecondary]);
} else {
// In asynchronous test we don't compare couple of first frames, so we
// should account for them in our counting.
EXPECT_GE(num_compared_payloads_[kPrimary],
num_received_payloads_dual_[kPrimary] - 4);
EXPECT_GE(num_compared_payloads_[kSecondary],
num_received_payloads_dual_[kSecondary] - 4);
}
}
bool DualStreamTest::EqualTimestamp(int stream_index, int position) {
if (timestamp_dual_[stream_index][position] !=
timestamp_ref_[stream_index][position]) {
return false;
}
return true;
}
int DualStreamTest::EqualPayloadLength(int stream_index, int position) {
return abs(payload_len_dual_[stream_index][position] -
payload_len_ref_[stream_index][position]);
}
bool DualStreamTest::EqualPayloadData(int stream_index, int position) {
assert(payload_len_dual_[stream_index][position] ==
payload_len_ref_[stream_index][position]);
int offset = position * MAX_PAYLOAD_SIZE_BYTE;
for (int n = 0; n < payload_len_dual_[stream_index][position]; n++) {
if (payload_data_dual_[stream_index][offset + n] !=
payload_data_ref_[stream_index][offset + n]) {
return false;
}
}
return true;
}
void DualStreamTest::Validate(bool start_in_sync, int tolerance) {
for (int stream_index = 0; stream_index < kMaxNumStreams; stream_index++) {
int my_tolerance = stream_index == kPrimary ? 0 : tolerance;
for (int position = 0; position < kMaxNumStoredPayloads; position++) {
if (payload_ref_is_stored_[stream_index][position] == 1 &&
payload_dual_is_stored_[stream_index][position] == 1) {
// Check timestamps only if codecs started in sync or it is primary.
if (start_in_sync || stream_index == 0)
EXPECT_TRUE(EqualTimestamp(stream_index, position));
EXPECT_LE(EqualPayloadLength(stream_index, position), my_tolerance);
if (my_tolerance == 0)
EXPECT_TRUE(EqualPayloadData(stream_index, position));
num_compared_payloads_[stream_index]++;
payload_ref_is_stored_[stream_index][position] = 0;
payload_dual_is_stored_[stream_index][position] = 0;
}
}
}
}
WebRtc_Word32 DualStreamTest::SendData(
FrameType frameType, WebRtc_UWord8 payload_type, WebRtc_UWord32 timestamp,
const WebRtc_UWord8* payload_data, WebRtc_UWord16 payload_size,
const RTPFragmentationHeader* fragmentation) {
int position;
int stream_index;
if (payload_type == red_encoder_.pltype) {
if (fragmentation == NULL) {
assert(false);
return -1;
}
// As the oldest payloads are in the higher indices of fragmentation,
// to be able to check the increment of timestamps are correct we loop
// backward.
for (int n = fragmentation->fragmentationVectorSize - 1; n >= 0 ; --n) {
if (fragmentation->fragmentationPlType[n] == primary_encoder_.pltype) {
// Received primary payload from dual stream.
stream_index = kPrimary;
} else if (fragmentation->fragmentationPlType[n] ==
secondary_encoder_.pltype) {
// Received secondary payload from dual stream.
stream_index = kSecondary;
} else {
assert(false);
return -1;
}
num_received_payloads_dual_[stream_index]++;
if (payload_dual_is_stored_[stream_index][0] == 0) {
position = 0;
} else if (payload_dual_is_stored_[stream_index][1] == 0) {
position = 1;
} else {
assert(false);
return -1;
}
timestamp_dual_[stream_index][position] = timestamp -
fragmentation->fragmentationTimeDiff[n];
payload_len_dual_[stream_index][position] =
fragmentation->fragmentationLength[n];
memcpy(
&payload_data_dual_[stream_index][position * MAX_PAYLOAD_SIZE_BYTE],
&payload_data[fragmentation->fragmentationOffset[n]],
fragmentation->fragmentationLength[n]);
payload_dual_is_stored_[stream_index][position] = 1;
// Check if timestamps are incremented correctly.
if (received_payload_[stream_index]) {
int t = timestamp_dual_[stream_index][position] -
last_timestamp_[stream_index];
if ((stream_index == kPrimary) && (t != primary_encoder_.pacsize)) {
assert(false);
return -1;
}
if ((stream_index == kSecondary) && (t != secondary_encoder_.pacsize)) {
assert(false);
return -1;
}
} else {
received_payload_[stream_index] = true;
}
last_timestamp_[stream_index] = timestamp_dual_[stream_index][position];
}
} else {
if (fragmentation != NULL) {
assert(false);
return -1;
}
if (payload_type == primary_encoder_.pltype) {
stream_index = kPrimary;
} else if (payload_type == secondary_encoder_.pltype) {
stream_index = kSecondary;
} else {
assert(false);
return -1;
}
num_received_payloads_ref_[stream_index]++;
if (payload_ref_is_stored_[stream_index][0] == 0) {
position = 0;
} else if (payload_ref_is_stored_[stream_index][1] == 0) {
position = 1;
} else {
assert(false);
return -1;
}
timestamp_ref_[stream_index][position] = timestamp;
payload_len_ref_[stream_index][position] = payload_size;
memcpy(&payload_data_ref_[stream_index][position * MAX_PAYLOAD_SIZE_BYTE],
payload_data, payload_size);
payload_ref_is_stored_[stream_index][position] = 1;
}
return 0;
}
// Mono input, mono primary WB 20 ms frame.
TEST_F(DualStreamTest, BitExactSyncMonoInputMonoPrimaryWb20Ms) {
InitializeSender(20, 1, 16000);
Perform(true, 1);
}
// Mono input, stereo primary WB 20 ms frame.
TEST_F(DualStreamTest, BitExactSyncMonoInput_StereoPrimaryWb20Ms) {
InitializeSender(20, 2, 16000);
Perform(true, 1);
}
// Mono input, mono primary SWB 20 ms frame.
TEST_F(DualStreamTest, BitExactSyncMonoInputMonoPrimarySwb20Ms) {
InitializeSender(20, 1, 32000);
Perform(true, 1);
}
// Mono input, stereo primary SWB 20 ms frame.
TEST_F(DualStreamTest, BitExactSyncMonoInputStereoPrimarySwb20Ms) {
InitializeSender(20, 2, 32000);
Perform(true, 1);
}
// Mono input, mono primary WB 40 ms frame.
TEST_F(DualStreamTest, BitExactSyncMonoInputMonoPrimaryWb40Ms) {
InitializeSender(40, 1, 16000);
Perform(true, 1);
}
// Mono input, stereo primary WB 40 ms frame
TEST_F(DualStreamTest, BitExactSyncMonoInputStereoPrimaryWb40Ms) {
InitializeSender(40, 2, 16000);
Perform(true, 1);
}
// Stereo input, mono primary WB 20 ms frame.
TEST_F(DualStreamTest, BitExactSyncStereoInputMonoPrimaryWb20Ms) {
InitializeSender(20, 1, 16000);
Perform(true, 2);
}
// Stereo input, stereo primary WB 20 ms frame.
TEST_F(DualStreamTest, BitExactSyncStereoInputStereoPrimaryWb20Ms) {
InitializeSender(20, 2, 16000);
Perform(true, 2);
}
// Stereo input, mono primary SWB 20 ms frame.
TEST_F(DualStreamTest, BitExactSyncStereoInputMonoPrimarySwb20Ms) {
InitializeSender(20, 1, 32000);
Perform(true, 2);
}
// Stereo input, stereo primary SWB 20 ms frame.
TEST_F(DualStreamTest, BitExactSyncStereoInputStereoPrimarySwb20Ms) {
InitializeSender(20, 2, 32000);
Perform(true, 2);
}
// Stereo input, mono primary WB 40 ms frame.
TEST_F(DualStreamTest, BitExactSyncStereoInputMonoPrimaryWb40Ms) {
InitializeSender(40, 1, 16000);
Perform(true, 2);
}
// Stereo input, stereo primary WB 40 ms frame.
TEST_F(DualStreamTest, BitExactSyncStereoInputStereoPrimaryWb40Ms) {
InitializeSender(40, 2, 16000);
Perform(true, 2);
}
// Asynchronous test, ACM is fed with data then secondary coder is registered.
// Mono input, mono primary WB 20 ms frame.
TEST_F(DualStreamTest, BitExactAsyncMonoInputMonoPrimaryWb20Ms) {
InitializeSender(20, 1, 16000);
Perform(false, 1);
}
// Mono input, mono primary WB 20 ms frame.
TEST_F(DualStreamTest, BitExactAsyncMonoInputMonoPrimaryWb40Ms) {
InitializeSender(40, 1, 16000);
Perform(false, 1);
}
TEST_F(DualStreamTest, Api) {
PopulateCodecInstances(20, 1, 16000);
CodecInst my_codec;
ASSERT_EQ(0, acm_dual_stream_->InitializeSender());
ASSERT_EQ(-1, acm_dual_stream_->SecondarySendCodec(&my_codec));
// Not allowed to register secondary codec if primary is not registered yet.
ASSERT_EQ(-1,
acm_dual_stream_->RegisterSecondarySendCodec(secondary_encoder_));
ASSERT_EQ(-1, acm_dual_stream_->SecondarySendCodec(&my_codec));
ASSERT_EQ(0, acm_dual_stream_->RegisterSendCodec(primary_encoder_));
ASSERT_EQ(0, acm_dual_stream_->SetVAD(true, true, VADNormal));
// Make sure vad is activated.
bool vad_status;
bool dtx_status;
ACMVADMode vad_mode;
EXPECT_EQ(0, acm_dual_stream_->VAD(vad_status, dtx_status, vad_mode));
EXPECT_TRUE(vad_status);
EXPECT_TRUE(dtx_status);
EXPECT_EQ(VADNormal, vad_mode);
ASSERT_EQ(0,
acm_dual_stream_->RegisterSecondarySendCodec(secondary_encoder_));
ASSERT_EQ(0, acm_dual_stream_->SecondarySendCodec(&my_codec));
ASSERT_EQ(0, memcmp(&my_codec, &secondary_encoder_, sizeof(my_codec)));
// Test if VAD get disabled after registering secondary codec.
EXPECT_EQ(0, acm_dual_stream_->VAD(vad_status, dtx_status, vad_mode));
EXPECT_FALSE(vad_status);
EXPECT_FALSE(dtx_status);
// Activating VAD should fail.
ASSERT_EQ(-1, acm_dual_stream_->SetVAD(true, true, VADNormal));
// Unregister secondary encoder and it should be possible to activate VAD.
acm_dual_stream_->UnregisterSecondarySendCodec();
// Should fail.
ASSERT_EQ(-1, acm_dual_stream_->SecondarySendCodec(&my_codec));
ASSERT_EQ(0, acm_dual_stream_->SetVAD(true, true, VADVeryAggr));
// Make sure VAD is activated.
EXPECT_EQ(0, acm_dual_stream_->VAD(vad_status, dtx_status, vad_mode));
EXPECT_TRUE(vad_status);
EXPECT_TRUE(dtx_status);
EXPECT_EQ(VADVeryAggr, vad_mode);
}
} // namespace webrtc