Split of stereo packets moved

In this CL I have rewritten the way we handle stereo packets in VoE.
Before this change we split the packets in the RTP module and added two packets to ACM, one for the left channel and one for the right. This lead to timing problems caused when a different thread called RecOut in between the two calls to add stereo packet to ACM. (RecOut is called to pull audio data, decode packets, on the receiving side).

While doing the change I also took the opportunity to changed some functions so that the data stream is uint8 everywhere.

The list of files in this CL is long, but should be fairly easy to review. It is difficult to see what has been changed  in some of the tests, but I can explain offline.

Reviewers:
Björn - /src/modules/audio_coding
Patrik - /src/modules/rtp_rtcp
Patrik -/src/modules/utility
Henrik A - /src/voice_engine

BUG=410
TEST=voe_cmd_test

Review URL: https://webrtc-codereview.appspot.com/473003

git-svn-id: http://webrtc.googlecode.com/svn/trunk@2012 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
tina.legrand@webrtc.org 2012-04-12 11:02:38 +00:00
parent ce33035dee
commit 16b6b90a82
35 changed files with 406 additions and 442 deletions

View File

@ -546,7 +546,7 @@ class AudioCodingModule: public Module {
// -1 if failed to push in the payload // -1 if failed to push in the payload
// 0 if payload is successfully pushed in. // 0 if payload is successfully pushed in.
// //
virtual WebRtc_Word32 IncomingPacket(const WebRtc_Word8* incomingPayload, virtual WebRtc_Word32 IncomingPacket(const WebRtc_UWord8* incomingPayload,
const WebRtc_Word32 payloadLengthByte, const WebRtc_Word32 payloadLengthByte,
const WebRtcRTPHeader& rtpInfo) = 0; const WebRtcRTPHeader& rtpInfo) = 0;
@ -574,7 +574,7 @@ class AudioCodingModule: public Module {
// -1 if failed to push in the payload // -1 if failed to push in the payload
// 0 if payload is successfully pushed in. // 0 if payload is successfully pushed in.
// //
virtual WebRtc_Word32 IncomingPayload(const WebRtc_Word8* incomingPayload, virtual WebRtc_Word32 IncomingPayload(const WebRtc_UWord8* incomingPayload,
const WebRtc_Word32 payloadLengthByte, const WebRtc_Word32 payloadLengthByte,
const WebRtc_UWord8 payloadType, const WebRtc_UWord8 payloadType,
const WebRtc_UWord32 timestamp = 0) = 0; const WebRtc_UWord32 timestamp = 0) = 0;

View File

@ -103,6 +103,9 @@ int16_t ACMCELT::SetBitRateSafe(const int32_t /*rate*/) {
return -1; return -1;
} }
void ACMCELT::SplitStereoPacket(uint8_t* /*payload*/,
int32_t* /*payload_length*/) {}
#else //===================== Actual Implementation ======================= #else //===================== Actual Implementation =======================
ACMCELT::ACMCELT(int16_t codecID) ACMCELT::ACMCELT(int16_t codecID)
@ -327,6 +330,19 @@ int16_t ACMCELT::SetBitRateSafe(const int32_t rate) {
} }
} }
// Copy the stereo packet so that NetEq will insert into both master and slave.
void ACMCELT::SplitStereoPacket(uint8_t* payload, int32_t* payload_length) {
// Check for valid inputs.
assert(payload != NULL);
assert(*payload_length > 0);
// Duplicate the payload.
memcpy(&payload[*payload_length], &payload[0],
sizeof(uint8_t) * (*payload_length));
// Double the size of the packet.
*payload_length *= 2;
}
#endif #endif
} // namespace webrtc } // namespace webrtc

View File

@ -61,6 +61,8 @@ class ACMCELT : public ACMGenericCodec {
int16_t SetBitRateSafe(const int32_t rate); int16_t SetBitRateSafe(const int32_t rate);
void SplitStereoPacket(uint8_t* payload, int32_t* payload_length);
CELT_encinst_t_* enc_inst_ptr_; CELT_encinst_t_* enc_inst_ptr_;
CELT_decinst_t_* dec_inst_ptr_; CELT_decinst_t_* dec_inst_ptr_;
uint16_t sampling_freq_; uint16_t sampling_freq_;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
* *
* Use of this source code is governed by a BSD-style license * Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source * that can be found in the LICENSE file in the root of the source
@ -99,6 +99,9 @@ WebRtc_Word16 ACMG722::UnregisterFromNetEqSafe(
return -1; return -1;
} }
void ACMG722::SplitStereoPacket(uint8_t* /*payload*/,
int32_t* /*payload_length*/) {}
#else //===================== Actual Implementation ======================= #else //===================== Actual Implementation =======================
// Encoder and decoder memory // Encoder and decoder memory
@ -332,6 +335,35 @@ WebRtc_Word16 ACMG722::UnregisterFromNetEqSafe(ACMNetEQ* netEq,
return netEq->RemoveCodec(kDecoderG722); return netEq->RemoveCodec(kDecoderG722);
} }
// Split the stereo packet and place left and right channel after each other
// in the payload vector.
void ACMG722::SplitStereoPacket(uint8_t* payload, int32_t* payload_length) {
uint8_t right_byte;
// Check for valid inputs.
assert(payload != NULL);
assert(*payload_length > 0);
// Regroup the 4 bits/sample so to |l1 l2| |r1 r2| |l3 l4| |r3 r4| ...,
// where "lx" is 4 bits representing left sample number x, and "rx" right
// sample. Two samples fits in one byte, represented with |...|.
for (int i = 0; i < *payload_length; i += 2) {
right_byte = ((payload[i] & 0x0F) << 4) + (payload[i + 1] & 0x0F);
payload[i] = (payload[i] & 0xF0) + (payload[i + 1] >> 4);
payload[i + 1] = right_byte;
}
// Move one byte representing right channel each loop, and place it at the
// end of the bytestream vector. After looping the data is reordered to:
// |l1 l2| |l3 l4| ... |l(N-1) lN| |r1 r2| |r3 r4| ... |r(N-1) r(N)|,
// where N is the total number of samples.
for (int i = 0; i < *payload_length / 2; i++) {
right_byte = payload[i + 1];
memmove(&payload[i + 1], &payload[i + 2], *payload_length - i - 2);
payload[*payload_length - 1] = right_byte;
}
}
#endif #endif
} // namespace webrtc } // namespace webrtc

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
* *
* Use of this source code is governed by a BSD-style license * Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source * that can be found in the LICENSE file in the root of the source
@ -63,6 +63,8 @@ class ACMG722: public ACMGenericCodec {
WebRtc_Word16 UnregisterFromNetEqSafe(ACMNetEQ* netEq, WebRtc_Word16 UnregisterFromNetEqSafe(ACMNetEQ* netEq,
WebRtc_Word16 payloadType); WebRtc_Word16 payloadType);
void SplitStereoPacket(uint8_t* payload, int32_t* payload_length);
ACMG722EncStr* _ptrEncStr; ACMG722EncStr* _ptrEncStr;
ACMG722DecStr* _ptrDecStr; ACMG722DecStr* _ptrDecStr;

View File

@ -141,6 +141,23 @@ public:
WebRtc_Word16* audioSamples, WebRtc_Word16* audioSamples,
WebRtc_Word8* speechType); WebRtc_Word8* speechType);
///////////////////////////////////////////////////////////////////////////
// void SplitStereoPacket()
// This function is used to split stereo payloads in left and right channel.
// Codecs which has stereo support has there own implementation of the
// function.
//
// Input/Output:
// -payload : a vector with the received payload data.
// The function will reorder the data so that
// first half holds the left channel data, and the
// second half the right channel data.
// -payload_length : length of payload in bytes. Will be changed to
// twice the input in case of true stereo, where
// we simply copy the data and return it both for
// left channel and right channel decoding.
virtual void SplitStereoPacket(WebRtc_UWord8* /* payload */,
WebRtc_Word32* /* payload_length */) {}
/////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////
// bool EncoderInitialized(); // bool EncoderInitialized();

View File

@ -513,10 +513,12 @@ ACMNetEQ::NetworkStatistics(
WebRtc_Word32 WebRtc_Word32
ACMNetEQ::RecIn( ACMNetEQ::RecIn(
const WebRtc_Word8* incomingPayload, const WebRtc_UWord8* incomingPayload,
const WebRtc_Word32 payloadLength, const WebRtc_Word32 payloadLength,
const WebRtcRTPHeader& rtpInfo) const WebRtcRTPHeader& rtpInfo)
{ {
WebRtc_Word16 payload_length = static_cast<WebRtc_Word16>(payloadLength);
// translate to NetEq struct // translate to NetEq struct
WebRtcNetEQ_RTPInfo netEqRTPInfo; WebRtcNetEQ_RTPInfo netEqRTPInfo;
netEqRTPInfo.payloadType = rtpInfo.header.payloadType; netEqRTPInfo.payloadType = rtpInfo.header.payloadType;
@ -536,28 +538,32 @@ ACMNetEQ::RecIn(
(_currentSampFreqKHz * nowInMs); (_currentSampFreqKHz * nowInMs);
int status; int status;
// In case of stereo payload, first half of the data should be pushed into
if(rtpInfo.type.Audio.channel == 1) // master, and the second half into slave.
{ if (rtpInfo.type.Audio.channel == 2) {
if(!_isInitialized[0]) payload_length = payload_length / 2;
{
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
"RecIn: NetEq is not initialized.");
return -1;
}
// PUSH into Master
status = WebRtcNetEQ_RecInRTPStruct(_inst[0], &netEqRTPInfo,
(WebRtc_UWord8 *)incomingPayload, (WebRtc_Word16)payloadLength,
recvTimestamp);
if(status < 0)
{
LogError("RecInRTPStruct", 0);
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
"RecIn: NetEq, error in pushing in Master");
return -1;
}
} }
else if(rtpInfo.type.Audio.channel == 2)
// Check that master is initialized.
if(!_isInitialized[0])
{
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
"RecIn: NetEq is not initialized.");
return -1;
}
// PUSH into Master
status = WebRtcNetEQ_RecInRTPStruct(_inst[0], &netEqRTPInfo,
incomingPayload, payload_length, recvTimestamp);
if(status < 0)
{
LogError("RecInRTPStruct", 0);
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
"RecIn: NetEq, error in pushing in Master");
return -1;
}
// If the received stream is stereo, insert second half of paket into slave.
if(rtpInfo.type.Audio.channel == 2)
{ {
if(!_isInitialized[1]) if(!_isInitialized[1])
{ {
@ -567,8 +573,9 @@ ACMNetEQ::RecIn(
} }
// PUSH into Slave // PUSH into Slave
status = WebRtcNetEQ_RecInRTPStruct(_inst[1], &netEqRTPInfo, status = WebRtcNetEQ_RecInRTPStruct(_inst[1], &netEqRTPInfo,
(WebRtc_UWord8 *)incomingPayload, (WebRtc_Word16)payloadLength, &incomingPayload[payload_length],
recvTimestamp); payload_length,
recvTimestamp);
if(status < 0) if(status < 0)
{ {
LogError("RecInRTPStruct", 1); LogError("RecInRTPStruct", 1);
@ -577,14 +584,6 @@ ACMNetEQ::RecIn(
return -1; return -1;
} }
} }
else
{
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
"RecIn: NetEq, error invalid numbe of channels %d \
(1, for Master stream, and 2, for slave stream, are valid values)",
rtpInfo.type.Audio.channel);
return -1;
}
return 0; return 0;
} }
@ -677,6 +676,7 @@ ACMNetEQ::RecOut(
} }
} }
} }
if(payloadLenSample != payloadLenSampleSlave) if(payloadLenSample != payloadLenSampleSlave)
{ {
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, _id, WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, _id,

View File

@ -85,7 +85,7 @@ public:
// <0 if NetEQ returned an error. // <0 if NetEQ returned an error.
// //
WebRtc_Word32 RecIn( WebRtc_Word32 RecIn(
const WebRtc_Word8* incomingPayload, const WebRtc_UWord8* incomingPayload,
const WebRtc_Word32 payloadLength, const WebRtc_Word32 payloadLength,
const WebRtcRTPHeader& rtpInfo); const WebRtcRTPHeader& rtpInfo);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
* *
* Use of this source code is governed by a BSD-style license * Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source * that can be found in the LICENSE file in the root of the source
@ -67,7 +67,7 @@ void AcmNetEqTest::InsertZeroPacket(uint16_t sequence_number,
rtp_header.header.payloadType = payload_type; rtp_header.header.payloadType = payload_type;
rtp_header.header.markerBit = marker_bit; rtp_header.header.markerBit = marker_bit;
rtp_header.type.Audio.channel = 1; rtp_header.type.Audio.channel = 1;
ASSERT_EQ(0, neteq_.RecIn(reinterpret_cast<WebRtc_Word8*>(payload), ASSERT_EQ(0, neteq_.RecIn(reinterpret_cast<WebRtc_UWord8*>(payload),
len_payload_bytes, rtp_header)); len_payload_bytes, rtp_header));
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
* *
* Use of this source code is governed by a BSD-style license * Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source * that can be found in the LICENSE file in the root of the source
@ -135,6 +135,8 @@ ACMPCM16B::UnregisterFromNetEqSafe(
} }
void ACMPCM16B::SplitStereoPacket(uint8_t* /*payload*/,
int32_t* /*payload_length*/) {}
#else //===================== Actual Implementation ======================= #else //===================== Actual Implementation =======================
@ -329,6 +331,29 @@ the stored payload type",
} }
} }
// Split the stereo packet and place left and right channel after each other
// in the payload vector.
void ACMPCM16B::SplitStereoPacket(uint8_t* payload, int32_t* payload_length) {
uint8_t right_byte_msb;
uint8_t right_byte_lsb;
// Check for valid inputs.
assert(payload != NULL);
assert(*payload_length > 0);
// Move two bytes representing right channel each loop, and place it at the
// end of the bytestream vector. After looping the data is reordered to:
// l1 l2 l3 l4 ... l(N-1) lN r1 r2 r3 r4 ... r(N-1) r(N),
// where N is the total number of samples.
for (int i = 0; i < *payload_length / 2; i += 2) {
right_byte_msb = payload[i + 2];
right_byte_lsb = payload[i + 3];
memmove(&payload[i + 2], &payload[i + 4], *payload_length - i - 4);
payload[*payload_length - 2] = right_byte_msb;
payload[*payload_length - 1] = right_byte_lsb;
}
}
#endif #endif
} // namespace webrtc } // namespace webrtc

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
* *
* Use of this source code is governed by a BSD-style license * Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source * that can be found in the LICENSE file in the root of the source
@ -61,6 +61,8 @@ protected:
void InternalDestructEncoderInst( void InternalDestructEncoderInst(
void* ptrInst); void* ptrInst);
void SplitStereoPacket(uint8_t* payload, int32_t* payload_length);
WebRtc_Word32 _samplingFreqHz; WebRtc_Word32 _samplingFreqHz;
}; };

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
* *
* Use of this source code is governed by a BSD-style license * Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source * that can be found in the LICENSE file in the root of the source
@ -162,4 +162,24 @@ the stored payload type",
return netEq->RemoveCodec(kDecoderPCMa); return netEq->RemoveCodec(kDecoderPCMa);
} }
// Split the stereo packet and place left and right channel after each other
// in the payload vector.
void ACMPCMA::SplitStereoPacket(uint8_t* payload, int32_t* payload_length) {
uint8_t right_byte;
// Check for valid inputs.
assert(payload != NULL);
assert(*payload_length > 0);
// Move one bytes representing right channel each loop, and place it at the
// end of the bytestream vector. After looping the data is reordered to:
// l1 l2 l3 l4 ... l(N-1) lN r1 r2 r3 r4 ... r(N-1) r(N),
// where N is the total number of samples.
for (int i = 0; i < *payload_length / 2; i ++) {
right_byte = payload[i + 1];
memmove(&payload[i + 1], &payload[i + 2], *payload_length - i - 2);
payload[*payload_length - 1] = right_byte;
}
}
} // namespace webrtc } // namespace webrtc

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
* *
* Use of this source code is governed by a BSD-style license * Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source * that can be found in the LICENSE file in the root of the source
@ -60,6 +60,8 @@ protected:
void InternalDestructEncoderInst( void InternalDestructEncoderInst(
void* ptrInst); void* ptrInst);
void SplitStereoPacket(uint8_t* payload, int32_t* payload_length);
}; };
} // namespace webrtc } // namespace webrtc

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
* *
* Use of this source code is governed by a BSD-style license * Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source * that can be found in the LICENSE file in the root of the source
@ -164,4 +164,24 @@ the stored payload type",
return netEq->RemoveCodec(kDecoderPCMu); return netEq->RemoveCodec(kDecoderPCMu);
} }
// Split the stereo packet and place left and right channel after each other
// in the payload vector.
void ACMPCMU::SplitStereoPacket(uint8_t* payload, int32_t* payload_length) {
uint8_t right_byte;
// Check for valid inputs.
assert(payload != NULL);
assert(*payload_length > 0);
// Move one bytes representing right channel each loop, and place it at the
// end of the bytestream vector. After looping the data is reordered to:
// l1 l2 l3 l4 ... l(N-1) lN r1 r2 r3 r4 ... r(N-1) r(N),
// where N is the total number of samples.
for (int i = 0; i < *payload_length / 2; i ++) {
right_byte = payload[i + 1];
memmove(&payload[i + 1], &payload[i + 2], *payload_length - i - 2);
payload[*payload_length - 1] = right_byte;
}
}
} // namespace webrtc } // namespace webrtc

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
* *
* Use of this source code is governed by a BSD-style license * Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source * that can be found in the LICENSE file in the root of the source
@ -60,6 +60,8 @@ protected:
void InternalDestructEncoderInst( void InternalDestructEncoderInst(
void* ptrInst); void* ptrInst);
void SplitStereoPacket(uint8_t* payload, int32_t* payload_length);
}; };
} // namespace webrtc } // namespace webrtc

View File

@ -44,6 +44,11 @@ enum {
kACMToneEnd = 999 kACMToneEnd = 999
}; };
// Maximum number of bytes in one packet (PCM16B, 20 ms packets, stereo)
enum {
kMaxPacketSize = 2560
};
AudioCodingModuleImpl::AudioCodingModuleImpl( AudioCodingModuleImpl::AudioCodingModuleImpl(
const WebRtc_Word32 id): const WebRtc_Word32 id):
_packetizationCallback(NULL), _packetizationCallback(NULL),
@ -62,7 +67,8 @@ AudioCodingModuleImpl::AudioCodingModuleImpl(
_stereoSend(false), _stereoSend(false),
_prev_received_channel(0), _prev_received_channel(0),
_expected_channels(1), _expected_channels(1),
_currentSendCodecIdx(-1), // invalid value _currentSendCodecIdx(-1), // invalid value
_current_receive_codec_idx(-1), // invalid value
_sendCodecRegistered(false), _sendCodecRegistered(false),
_acmCritSect(CriticalSectionWrapper::CreateCriticalSection()), _acmCritSect(CriticalSectionWrapper::CreateCriticalSection()),
_vadCallback(NULL), _vadCallback(NULL),
@ -1777,10 +1783,14 @@ AudioCodingModuleImpl::ReceiveCodec(
// Incoming packet from network parsed and ready for decode // Incoming packet from network parsed and ready for decode
WebRtc_Word32 WebRtc_Word32
AudioCodingModuleImpl::IncomingPacket( AudioCodingModuleImpl::IncomingPacket(
const WebRtc_Word8* incomingPayload, const WebRtc_UWord8* incomingPayload,
const WebRtc_Word32 payloadLength, const WebRtc_Word32 payloadLength,
const WebRtcRTPHeader& rtpInfo) const WebRtcRTPHeader& rtpInfo)
{ {
WebRtcRTPHeader rtp_header;
memcpy(&rtp_header, &rtpInfo, sizeof(WebRtcRTPHeader));
if (payloadLength < 0) if (payloadLength < 0)
{ {
// Log error // Log error
@ -1807,7 +1817,7 @@ AudioCodingModuleImpl::IncomingPacket(
if(rtpInfo.header.payloadType == _receiveREDPayloadType) if(rtpInfo.header.payloadType == _receiveREDPayloadType)
{ {
// get the primary payload-type. // get the primary payload-type.
myPayloadType = (WebRtc_UWord8)(incomingPayload[0] & 0x7F); myPayloadType = incomingPayload[0] & 0x7F;
} }
else else
{ {
@ -1841,12 +1851,14 @@ AudioCodingModuleImpl::IncomingPacket(
} }
_codecs[i]->UpdateDecoderSampFreq(i); _codecs[i]->UpdateDecoderSampFreq(i);
_netEq.SetReceivedStereo(_stereoReceive[i]); _netEq.SetReceivedStereo(_stereoReceive[i]);
_current_receive_codec_idx = i;
// If we have a change in expected number of channels, // If we have a change in expected number of channels,
// flush packet buffers in NetEQ. // flush packet buffers in NetEQ.
if ((_stereoReceive[i] && (_expected_channels == 1)) || if ((_stereoReceive[i] && (_expected_channels == 1)) ||
(!_stereoReceive[i] && (_expected_channels == 2))) { (!_stereoReceive[i] && (_expected_channels == 2))) {
_netEq.FlushBuffers(); _netEq.FlushBuffers();
_codecs[i]->ResetDecoder(myPayloadType);
} }
// Store number of channels we expect to receive for the // Store number of channels we expect to receive for the
@ -1868,28 +1880,21 @@ AudioCodingModuleImpl::IncomingPacket(
} }
} }
// Check that number of received channels match the setup for the // Split the payload for stereo packets, so that first half of payload
// received codec. // vector holds left channel, and second half holds right channel.
if (_expected_channels == 2) { if (_expected_channels == 2) {
if ((_prev_received_channel == 1) && (rtpInfo.type.Audio.channel == 1)) { // Create a new vector for the payload, maximum payload size.
// We expect every second call to this function to be for channel 2, WebRtc_Word32 length = payloadLength;
// since we are in stereo-receive mode. WebRtc_UWord8 payload[kMaxPacketSize];
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id, assert(payloadLength <= kMaxPacketSize);
"IncomingPacket() Error, payload is" memcpy(payload, incomingPayload, payloadLength);
"mono, but codec registered as stereo."); _codecs[_current_receive_codec_idx]->SplitStereoPacket(payload, &length);
return -1; rtp_header.type.Audio.channel = 2;
} // Insert packet into NetEQ.
_prev_received_channel = rtpInfo.type.Audio.channel; return _netEq.RecIn(payload, length, rtp_header);
} else if (rtpInfo.type.Audio.channel == 2) { } else {
// Codec is registered as mono, but we receive a stereo packet. return _netEq.RecIn(incomingPayload, payloadLength, rtp_header);
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
"IncomingPacket() Error, payload is"
"stereo, but codec registered as mono.");
return -1;
} }
// Insert packet into NetEQ.
return _netEq.RecIn(incomingPayload, payloadLength, rtpInfo);
} }
// Minimum playout delay (Used for lip-sync) // Minimum playout delay (Used for lip-sync)
@ -2263,9 +2268,10 @@ AudioCodingModuleImpl::RegisterVADCallback(
return 0; return 0;
} }
// TODO(tlegrand): Modify this function to work for stereo, and add tests.
WebRtc_Word32 WebRtc_Word32
AudioCodingModuleImpl::IncomingPayload( AudioCodingModuleImpl::IncomingPayload(
const WebRtc_Word8* incomingPayload, const WebRtc_UWord8* incomingPayload,
const WebRtc_Word32 payloadLength, const WebRtc_Word32 payloadLength,
const WebRtc_UWord8 payloadType, const WebRtc_UWord8 payloadType,
const WebRtc_UWord32 timestamp) const WebRtc_UWord32 timestamp)

View File

@ -187,14 +187,14 @@ public:
// incoming packet from network parsed and ready for decode // incoming packet from network parsed and ready for decode
WebRtc_Word32 IncomingPacket( WebRtc_Word32 IncomingPacket(
const WebRtc_Word8* incomingPayload, const WebRtc_UWord8* incomingPayload,
const WebRtc_Word32 payloadLength, const WebRtc_Word32 payloadLength,
const WebRtcRTPHeader& rtpInfo); const WebRtcRTPHeader& rtpInfo);
// Incoming payloads, without rtp-info, the rtp-info will be created in ACM. // Incoming payloads, without rtp-info, the rtp-info will be created in ACM.
// One usage for this API is when pre-encoded files are pushed in ACM. // One usage for this API is when pre-encoded files are pushed in ACM.
WebRtc_Word32 IncomingPayload( WebRtc_Word32 IncomingPayload(
const WebRtc_Word8* incomingPayload, const WebRtc_UWord8* incomingPayload,
const WebRtc_Word32 payloadLength, const WebRtc_Word32 payloadLength,
const WebRtc_UWord8 payloadType, const WebRtc_UWord8 payloadType,
const WebRtc_UWord32 timestamp = 0); const WebRtc_UWord32 timestamp = 0);
@ -328,6 +328,7 @@ private:
int _prev_received_channel; int _prev_received_channel;
int _expected_channels; int _expected_channels;
WebRtc_Word32 _currentSendCodecIdx; WebRtc_Word32 _currentSendCodecIdx;
int _current_receive_codec_idx;
bool _sendCodecRegistered; bool _sendCodecRegistered;
ACMResampler _inputResampler; ACMResampler _inputResampler;
ACMResampler _outputResampler; ACMResampler _outputResampler;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
* *
* Use of this source code is governed by a BSD-style license * Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source * that can be found in the LICENSE file in the root of the source
@ -129,13 +129,8 @@ Channel::SendData(
} }
} }
status = _receiverACM->IncomingPacket(_payloadData, payloadDataSize,
//status = _receiverACM->IncomingPayload((WebRtc_Word8*)_payloadData, payloadSize, payloadType, timeStamp); rtpInfo);
status = _receiverACM->IncomingPacket((WebRtc_Word8*)_payloadData, payloadDataSize, rtpInfo);
//delete [] payloadData;
return status; return status;
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
* *
* Use of this source code is governed by a BSD-style license * Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source * that can be found in the LICENSE file in the root of the source
@ -86,7 +86,7 @@ class Receiver {
PCMFile _pcmFile; PCMFile _pcmFile;
WebRtc_Word16* _playoutBuffer; WebRtc_Word16* _playoutBuffer;
WebRtc_UWord16 _playoutLengthSmpls; WebRtc_UWord16 _playoutLengthSmpls;
WebRtc_Word8 _incomingPayload[MAX_INCOMING_PAYLOAD]; WebRtc_UWord8 _incomingPayload[MAX_INCOMING_PAYLOAD];
WebRtc_UWord16 _payloadSizeBytes; WebRtc_UWord16 _payloadSizeBytes;
WebRtc_UWord16 _realPayloadSizeBytes; WebRtc_UWord16 _realPayloadSizeBytes;
WebRtc_Word32 _frequency; WebRtc_Word32 _frequency;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
* *
* Use of this source code is governed by a BSD-style license * Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source * that can be found in the LICENSE file in the root of the source
@ -106,7 +106,7 @@ RTPBuffer::Write(const WebRtc_UWord8 payloadType, const WebRtc_UWord32 timeStamp
WebRtc_UWord16 WebRtc_UWord16
RTPBuffer::Read(WebRtcRTPHeader* rtpInfo, RTPBuffer::Read(WebRtcRTPHeader* rtpInfo,
WebRtc_Word8* payloadData, WebRtc_UWord8* payloadData,
WebRtc_UWord16 payloadSize, WebRtc_UWord16 payloadSize,
WebRtc_UWord32* offset) WebRtc_UWord32* offset)
{ {
@ -213,7 +213,7 @@ void RTPFile::Write(const WebRtc_UWord8 payloadType, const WebRtc_UWord32 timeSt
} }
WebRtc_UWord16 RTPFile::Read(WebRtcRTPHeader* rtpInfo, WebRtc_UWord16 RTPFile::Read(WebRtcRTPHeader* rtpInfo,
WebRtc_Word8* payloadData, WebRtc_UWord8* payloadData,
WebRtc_UWord16 payloadSize, WebRtc_UWord16 payloadSize,
WebRtc_UWord32* offset) WebRtc_UWord32* offset)
{ {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
* *
* Use of this source code is governed by a BSD-style license * Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source * that can be found in the LICENSE file in the root of the source
@ -32,7 +32,7 @@ public:
// Returns the packet's payload size. Zero should be treated as an // Returns the packet's payload size. Zero should be treated as an
// end-of-stream (in the case that EndOfFile() is true) or an error. // end-of-stream (in the case that EndOfFile() is true) or an error.
virtual WebRtc_UWord16 Read(WebRtcRTPHeader* rtpInfo, virtual WebRtc_UWord16 Read(WebRtcRTPHeader* rtpInfo,
WebRtc_Word8* payloadData, WebRtc_UWord8* payloadData,
WebRtc_UWord16 payloadSize, WebRtc_UWord16 payloadSize,
WebRtc_UWord32* offset) = 0; WebRtc_UWord32* offset) = 0;
virtual bool EndOfFile() const = 0; virtual bool EndOfFile() const = 0;
@ -68,7 +68,7 @@ public:
const WebRtc_Word16 seqNo, const WebRtc_UWord8* payloadData, const WebRtc_Word16 seqNo, const WebRtc_UWord8* payloadData,
const WebRtc_UWord16 payloadSize, WebRtc_UWord32 frequency); const WebRtc_UWord16 payloadSize, WebRtc_UWord32 frequency);
WebRtc_UWord16 Read(WebRtcRTPHeader* rtpInfo, WebRtc_UWord16 Read(WebRtcRTPHeader* rtpInfo,
WebRtc_Word8* payloadData, WebRtc_UWord8* payloadData,
WebRtc_UWord16 payloadSize, WebRtc_UWord16 payloadSize,
WebRtc_UWord32* offset); WebRtc_UWord32* offset);
virtual bool EndOfFile() const; virtual bool EndOfFile() const;
@ -90,7 +90,7 @@ public:
const WebRtc_Word16 seqNo, const WebRtc_UWord8* payloadData, const WebRtc_Word16 seqNo, const WebRtc_UWord8* payloadData,
const WebRtc_UWord16 payloadSize, WebRtc_UWord32 frequency); const WebRtc_UWord16 payloadSize, WebRtc_UWord32 frequency);
WebRtc_UWord16 Read(WebRtcRTPHeader* rtpInfo, WebRtc_UWord16 Read(WebRtcRTPHeader* rtpInfo,
WebRtc_Word8* payloadData, WebRtc_UWord8* payloadData,
WebRtc_UWord16 payloadSize, WebRtc_UWord16 payloadSize,
WebRtc_UWord32* offset); WebRtc_UWord32* offset);
bool EndOfFile() const { return _rtpEOF; } bool EndOfFile() const { return _rtpEOF; }

View File

@ -77,7 +77,8 @@ TestPack::SendData(
rtpInfo.type.Audio.channel = 1; rtpInfo.type.Audio.channel = 1;
memcpy(_payloadData, payloadData, payloadDataSize); memcpy(_payloadData, payloadData, payloadDataSize);
status = _receiverACM->IncomingPacket((WebRtc_Word8*)_payloadData, payloadDataSize, rtpInfo); status = _receiverACM->IncomingPacket(_payloadData, payloadDataSize,
rtpInfo);
_payloadSize = payloadDataSize; _payloadSize = payloadDataSize;
_timeStampDiff = timeStamp - _lastInTimestamp; _timeStampDiff = timeStamp - _lastInTimestamp;

View File

@ -30,8 +30,8 @@ _timeStampDiff(0),
_lastInTimestamp(0), _lastInTimestamp(0),
_totalBytes(0), _totalBytes(0),
_payloadSize(0), _payloadSize(0),
_noChannels(1), _codec_mode(kNotSet),
_codecType(0) _lost_packet(false)
{ {
} }
TestPackStereo::~TestPackStereo() TestPackStereo::~TestPackStereo()
@ -46,101 +46,48 @@ TestPackStereo::RegisterReceiverACM(AudioCodingModule* acm)
} }
WebRtc_Word32 WebRtc_Word32 TestPackStereo::SendData(
TestPackStereo::SendData( const FrameType frameType,
const FrameType frameType, const WebRtc_UWord8 payloadType,
const WebRtc_UWord8 payloadType, const WebRtc_UWord32 timeStamp,
const WebRtc_UWord32 timeStamp, const WebRtc_UWord8* payloadData,
const WebRtc_UWord8* payloadData, const WebRtc_UWord16 payloadSize,
const WebRtc_UWord16 payloadSize, const RTPFragmentationHeader* fragmentation) {
const RTPFragmentationHeader* fragmentation) WebRtcRTPHeader rtpInfo;
{ WebRtc_Word32 status = 0;
WebRtcRTPHeader rtpInfo;
WebRtc_Word32 status;
WebRtc_UWord16 payloadDataSize = payloadSize;
WebRtc_UWord8 payloadDataMaster[60 * 32 * 2 * 2];
WebRtc_UWord8 payloadDataSlave[60 * 32 * 2 * 2];
rtpInfo.header.markerBit = false; rtpInfo.header.markerBit = false;
rtpInfo.header.ssrc = 0; rtpInfo.header.ssrc = 0;
rtpInfo.header.sequenceNumber = _seqNo++; rtpInfo.header.sequenceNumber = _seqNo++;
rtpInfo.header.payloadType = payloadType; rtpInfo.header.payloadType = payloadType;
rtpInfo.header.timestamp = timeStamp; rtpInfo.header.timestamp = timeStamp;
if(frameType == kFrameEmpty) if (frameType == kFrameEmpty) {
{ // Skip this frame
// Skip this frame return 0;
return 0; }
}
if(frameType != kAudioFrameCN)
{
rtpInfo.type.Audio.isCNG = false;
// For stereo we need to call ACM with two incoming packets, one for each channel. if (_lost_packet == false) {
// Different packet-splitting depending on codec. if (frameType != kAudioFrameCN) {
if (_codecType == 0) { rtpInfo.type.Audio.isCNG = false;
// one byte per sample rtpInfo.type.Audio.channel = (int) _codec_mode;
for (int i=0, j=0; i<payloadDataSize; i+=2, j++)
{
payloadDataMaster[j] = payloadData[i];
payloadDataSlave[j] = payloadData[i+1];
}
} else if (_codecType == 1) {
// two bytes per sample
for (int i=0, j=0; i<payloadDataSize; i+=4, j+=2)
{
payloadDataMaster[j] = payloadData[i];
payloadDataMaster[j+1] = payloadData[i+1];
payloadDataSlave[j] = payloadData[i+2];
payloadDataSlave[j+1] = payloadData[i+3];
}
} else if (_codecType == 2) {
// frameBased
memcpy(payloadDataMaster, &payloadData[0], payloadDataSize/2);
memcpy(payloadDataSlave, &payloadData[payloadDataSize/2], payloadDataSize/2);
} else if (_codecType == 3) {
// four bits per sample
for (int i=0, j=0; i<payloadDataSize; i+=2, j++)
{
payloadDataMaster[j] = (payloadData[i] & 0xF0) + (payloadData[i+1] >> 4);
payloadDataSlave[j] = ((payloadData[i] & 0x0F) << 4) + (payloadData[i+1] & 0x0F);
}
} else if (_codecType == 4) {
// True stereo, call both master and slave with whole stream.
memcpy(payloadDataMaster, payloadData, payloadSize);
memcpy(payloadDataSlave, payloadData, payloadSize);
payloadDataSize = payloadSize*2;
}
}
else
{
// If CNG packet, send the same packet to both master and slave.
rtpInfo.type.Audio.isCNG = true;
memcpy(payloadDataMaster, payloadData, payloadSize);
memcpy(payloadDataSlave, payloadData, payloadSize);
payloadDataSize = payloadSize*2;
}
if (_codecType != 5) {
// Call ACM with two packets, one for each channel
rtpInfo.type.Audio.channel = 1;
status = _receiverACM->IncomingPacket((WebRtc_Word8*)payloadDataMaster, payloadDataSize/2, rtpInfo);
rtpInfo.type.Audio.channel = 2;
status = _receiverACM->IncomingPacket((WebRtc_Word8*)payloadDataSlave, payloadDataSize/2, rtpInfo);
} else { } else {
// Mono case, call ACM with one packet. rtpInfo.type.Audio.isCNG = true;
rtpInfo.type.Audio.channel = 1; rtpInfo.type.Audio.channel = (int) kMono;
status = _receiverACM->IncomingPacket((WebRtc_Word8*)payloadData, payloadDataSize, rtpInfo);
} }
status = _receiverACM->IncomingPacket(payloadData, payloadSize,
rtpInfo);
if (frameType != kAudioFrameCN) { if (frameType != kAudioFrameCN) {
_payloadSize = payloadDataSize; _payloadSize = payloadSize;
} else { } else {
_payloadSize = -1; _payloadSize = -1;
} }
_timeStampDiff = timeStamp - _lastInTimestamp; _timeStampDiff = timeStamp - _lastInTimestamp;
_lastInTimestamp = timeStamp; _lastInTimestamp = timeStamp;
_totalBytes += payloadDataSize; _totalBytes += payloadSize;
return status; }
return status;
} }
WebRtc_UWord16 WebRtc_UWord16
@ -162,10 +109,12 @@ TestPackStereo::ResetPayloadSize()
_payloadSize = 0; _payloadSize = 0;
} }
void void TestPackStereo::set_codec_mode(enum StereoMonoMode mode) {
TestPackStereo::SetCodecType(int codecType) _codec_mode = mode;
{ }
_codecType = codecType;
void TestPackStereo::set_lost_packet(bool lost) {
_lost_packet = lost;
} }
TestStereo::TestStereo(int testMode): TestStereo::TestStereo(int testMode):
@ -388,16 +337,17 @@ void TestStereo::Perform()
audio_channels = 2; audio_channels = 2;
codec_channels = 2; codec_channels = 2;
// All codecs are tested for all allowed sampling frequencies, rates and packet sizes // All codecs are tested for all allowed sampling frequencies, rates and
// packet sizes.
#ifdef WEBRTC_CODEC_G722 #ifdef WEBRTC_CODEC_G722
if(_testMode != 0) { if(_testMode != 0) {
printf("=======================================================================\n"); printf("===========================================================\n");
printf("Test number: %d\n",_testCntr + 1); printf("Test number: %d\n",_testCntr + 1);
printf("Test type: Stereo-to-stereo\n"); printf("Test type: Stereo-to-stereo\n");
} else { } else {
printf("."); printf(".");
} }
_channelA2B->SetCodecType(3); _channelA2B->set_codec_mode(kStereo);
_testCntr++; _testCntr++;
OpenOutFile(_testCntr); OpenOutFile(_testCntr);
char codecG722[] = "G722"; char codecG722[] = "G722";
@ -428,13 +378,13 @@ void TestStereo::Perform()
#endif #endif
#ifdef WEBRTC_CODEC_PCM16 #ifdef WEBRTC_CODEC_PCM16
if(_testMode != 0) { if(_testMode != 0) {
printf("=======================================================================\n"); printf("===========================================================\n");
printf("Test number: %d\n",_testCntr + 1); printf("Test number: %d\n",_testCntr + 1);
printf("Test type: Stereo-to-stereo\n"); printf("Test type: Stereo-to-stereo\n");
} else { } else {
printf("."); printf(".");
} }
_channelA2B->SetCodecType(1); _channelA2B->set_codec_mode(kStereo);
_testCntr++; _testCntr++;
OpenOutFile(_testCntr); OpenOutFile(_testCntr);
char codecL16[] = "L16"; char codecL16[] = "L16";
@ -458,7 +408,7 @@ void TestStereo::Perform()
_outFileB.Close(); _outFileB.Close();
if(_testMode != 0) { if(_testMode != 0) {
printf("=======================================================================\n"); printf("===========================================================\n");
printf("Test number: %d\n",_testCntr + 1); printf("Test number: %d\n",_testCntr + 1);
printf("Test type: Stereo-to-stereo\n"); printf("Test type: Stereo-to-stereo\n");
} else { } else {
@ -486,7 +436,7 @@ void TestStereo::Perform()
_outFileB.Close(); _outFileB.Close();
if(_testMode != 0) { if(_testMode != 0) {
printf("=======================================================================\n"); printf("===========================================================\n");
printf("Test number: %d\n",_testCntr + 1); printf("Test number: %d\n",_testCntr + 1);
printf("Test type: Stereo-to-stereo\n"); printf("Test type: Stereo-to-stereo\n");
} else { } else {
@ -510,13 +460,13 @@ void TestStereo::Perform()
#define PCMA_AND_PCMU #define PCMA_AND_PCMU
#ifdef PCMA_AND_PCMU #ifdef PCMA_AND_PCMU
if(_testMode != 0) { if(_testMode != 0) {
printf("=======================================================================\n"); printf("===========================================================\n");
printf("Test number: %d\n",_testCntr + 1); printf("Test number: %d\n",_testCntr + 1);
printf("Test type: Stereo-to-stereo\n"); printf("Test type: Stereo-to-stereo\n");
} else { } else {
printf("."); printf(".");
} }
_channelA2B->SetCodecType(0); _channelA2B->set_codec_mode(kStereo);
audio_channels = 2; audio_channels = 2;
codec_channels = 2; codec_channels = 2;
_testCntr++; _testCntr++;
@ -547,7 +497,7 @@ void TestStereo::Perform()
_acmA->SetVAD(false, false, VADNormal); _acmA->SetVAD(false, false, VADNormal);
_outFileB.Close(); _outFileB.Close();
if(_testMode != 0) { if(_testMode != 0) {
printf("=======================================================================\n"); printf("===========================================================\n");
printf("Test number: %d\n",_testCntr + 1); printf("Test number: %d\n",_testCntr + 1);
printf("Test type: Stereo-to-stereo\n"); printf("Test type: Stereo-to-stereo\n");
} else { } else {
@ -583,13 +533,13 @@ void TestStereo::Perform()
#endif #endif
#ifdef WEBRTC_CODEC_CELT #ifdef WEBRTC_CODEC_CELT
if(_testMode != 0) { if(_testMode != 0) {
printf("=======================================================================\n"); printf("===========================================================\n");
printf("Test number: %d\n",_testCntr + 1); printf("Test number: %d\n",_testCntr + 1);
printf("Test type: Stereo-to-stereo\n"); printf("Test type: Stereo-to-stereo\n");
} else { } else {
printf("."); printf(".");
} }
_channelA2B->SetCodecType(4); _channelA2B->set_codec_mode(kStereo);
audio_channels = 2; audio_channels = 2;
codec_channels = 2; codec_channels = 2;
_testCntr++; _testCntr++;
@ -624,7 +574,7 @@ void TestStereo::Perform()
printf("Test type: Mono-to-stereo\n"); printf("Test type: Mono-to-stereo\n");
} }
_testCntr++; _testCntr++;
_channelA2B->SetCodecType(3); _channelA2B->set_codec_mode(kStereo);
OpenOutFile(_testCntr); OpenOutFile(_testCntr);
RegisterSendCodec('A', codecG722, 16000, 64000, 160, codec_channels, RegisterSendCodec('A', codecG722, 16000, 64000, 160, codec_channels,
g722_pltype_); g722_pltype_);
@ -638,7 +588,7 @@ void TestStereo::Perform()
printf("Test type: Mono-to-stereo\n"); printf("Test type: Mono-to-stereo\n");
} }
_testCntr++; _testCntr++;
_channelA2B->SetCodecType(1); _channelA2B->set_codec_mode(kStereo);
OpenOutFile(_testCntr); OpenOutFile(_testCntr);
RegisterSendCodec('A', codecL16, 8000, 128000, 80, codec_channels, RegisterSendCodec('A', codecL16, 8000, 128000, 80, codec_channels,
l16_8khz_pltype_); l16_8khz_pltype_);
@ -674,7 +624,7 @@ void TestStereo::Perform()
printf("Test type: Mono-to-stereo\n"); printf("Test type: Mono-to-stereo\n");
} }
_testCntr++; _testCntr++;
_channelA2B->SetCodecType(0); _channelA2B->set_codec_mode(kStereo);
OpenOutFile(_testCntr); OpenOutFile(_testCntr);
RegisterSendCodec('A', codecPCMU, 8000, 64000, 80, codec_channels, RegisterSendCodec('A', codecPCMU, 8000, 64000, 80, codec_channels,
pcmu_pltype_); pcmu_pltype_);
@ -691,7 +641,7 @@ void TestStereo::Perform()
printf("Test type: Mono-to-stereo\n"); printf("Test type: Mono-to-stereo\n");
} }
_testCntr++; _testCntr++;
_channelA2B->SetCodecType(4); _channelA2B->set_codec_mode(kStereo);
OpenOutFile(_testCntr); OpenOutFile(_testCntr);
RegisterSendCodec('A', codecCELT, 32000, 64000, 320, codec_channels, RegisterSendCodec('A', codecCELT, 32000, 64000, 320, codec_channels,
celt_pltype_); celt_pltype_);
@ -704,7 +654,7 @@ void TestStereo::Perform()
// //
audio_channels = 2; audio_channels = 2;
codec_channels = 1; codec_channels = 1;
_channelA2B->SetCodecType(5); _channelA2B->set_codec_mode(kMono);
// Register receivers as mono. // Register receivers as mono.
for(WebRtc_UWord8 n = 0; n < numEncoders; n++) { for(WebRtc_UWord8 n = 0; n < numEncoders; n++) {
@ -767,7 +717,7 @@ void TestStereo::Perform()
Run(_channelA2B, audio_channels, codec_channels); Run(_channelA2B, audio_channels, codec_channels);
_outFileB.Close(); _outFileB.Close();
if(_testMode != 0) { if(_testMode != 0) {
printf("===============================================================\n"); printf("==============================================================\n");
printf("Test number: %d\n",_testCntr + 1); printf("Test number: %d\n",_testCntr + 1);
printf("Test type: Stereo-to-mono\n"); printf("Test type: Stereo-to-mono\n");
} }
@ -819,7 +769,8 @@ void TestStereo::Perform()
#endif #endif
printf(" G.711\n"); printf(" G.711\n");
printf("\nTo complete the test, listen to the %d number of output files.\n", _testCntr); printf("\nTo complete the test, listen to the %d number of output "
"files.\n", _testCntr);
} else { } else {
printf("Done!\n"); printf("Done!\n");
} }
@ -845,15 +796,23 @@ WebRtc_Word16 TestStereo::RegisterSendCodec(char side,
{ {
if(_testMode != 0) { if(_testMode != 0) {
// Print out codec and settings // Print out codec and settings
printf("Codec: %s Freq: %d Rate: %d PackSize: %d", codecName, samplingFreqHz, rate, packSize); printf("Codec: %s Freq: %d Rate: %d PackSize: %d", codecName,
samplingFreqHz, rate, packSize);
} }
// Store packetsize in samples, used to validate the recieved packet // Store packetsize in samples, used to validate the received packet
_packSizeSamp = packSize; _packSizeSamp = packSize;
// Store the expected packet size in bytes, used to validate the recieved packet // Store the expected packet size in bytes, used to validate the received
// Add 0.875 to always round up to a whole byte // packet. Add 0.875 to always round up to a whole byte.
_packSizeBytes = (WebRtc_UWord16)((float)(packSize*rate)/(float)(samplingFreqHz*8)+0.875); // For Celt the packet size in bytes is already counting the stereo part.
if (!strcmp(codecName, "CELT")) {
_packSizeBytes = (WebRtc_UWord16)((float)(packSize*rate)/
(float)(samplingFreqHz*8)+0.875) / channels;
} else {
_packSizeBytes = (WebRtc_UWord16)((float)(packSize*rate)/
(float)(samplingFreqHz*8)+0.875);
}
// Set pointer to the ACM where to register the codec // Set pointer to the ACM where to register the codec
AudioCodingModule* myACM; AudioCodingModule* myACM;
@ -881,7 +840,8 @@ WebRtc_Word16 TestStereo::RegisterSendCodec(char side,
CodecInst myCodecParam; CodecInst myCodecParam;
// Get all codec parameters before registering // Get all codec parameters before registering
CHECK_ERROR(AudioCodingModule::Codec(codecName, myCodecParam, samplingFreqHz)); CHECK_ERROR(AudioCodingModule::Codec(codecName, myCodecParam,
samplingFreqHz));
myCodecParam.rate = rate; myCodecParam.rate = rate;
myCodecParam.pacsize = packSize; myCodecParam.pacsize = packSize;
myCodecParam.pltype = payload_type; myCodecParam.pltype = payload_type;
@ -892,8 +852,8 @@ WebRtc_Word16 TestStereo::RegisterSendCodec(char side,
return 0; return 0;
} }
void TestStereo::Run(TestPackStereo* channel, int in_channels, int out_channels) void TestStereo::Run(TestPackStereo* channel, int in_channels, int out_channels,
{ int percent_loss) {
AudioFrame audioFrame; AudioFrame audioFrame;
WebRtc_Word32 outFreqHzB = _outFileB.SamplingFrequency(); WebRtc_Word32 outFreqHzB = _outFileB.SamplingFrequency();
@ -905,8 +865,20 @@ void TestStereo::Run(TestPackStereo* channel, int in_channels, int out_channels)
// Only run 1 second for each test case // Only run 1 second for each test case
// TODO(tlegrand): either remove |_counter| or start using it as the comment // TODO(tlegrand): either remove |_counter| or start using it as the comment
// above says. Now |_counter| is always 0. // above says. Now |_counter| is always 0.
while(_counter<1000) while(1)
{ {
// Simulate packet loss by setting |packet_loss_| to "true" in
// |percent_loss| percent of the loops.
if (percent_loss > 0) {
if (_counter == floor((100 / percent_loss) + 0.5)) {
_counter = 0;
channel->set_lost_packet(true);
} else {
channel->set_lost_packet(false);
}
_counter++;
}
// Add 10 msec to ACM // Add 10 msec to ACM
if (in_channels == 1) { if (in_channels == 1) {
if (_in_file_mono.EndOfFile()) { if (_in_file_mono.EndOfFile()) {
@ -967,6 +939,8 @@ void TestStereo::Run(TestPackStereo* channel, int in_channels, int out_channels)
if (_in_file_stereo.EndOfFile()) { if (_in_file_stereo.EndOfFile()) {
_in_file_stereo.Rewind(); _in_file_stereo.Rewind();
} }
// Reset in case we ended with a lost packet
channel->set_lost_packet(false);
} }
void TestStereo::OpenOutFile(WebRtc_Word16 testNumber) void TestStereo::OpenOutFile(WebRtc_Word16 testNumber)

View File

@ -11,12 +11,20 @@
#ifndef TEST_STEREO_H #ifndef TEST_STEREO_H
#define TEST_STEREO_H #define TEST_STEREO_H
#include <math.h>
#include "ACMTest.h" #include "ACMTest.h"
#include "Channel.h" #include "Channel.h"
#include "PCMFile.h" #include "PCMFile.h"
namespace webrtc { namespace webrtc {
enum StereoMonoMode {
kNotSet,
kMono,
kStereo
};
class TestPackStereo : public AudioPacketizationCallback class TestPackStereo : public AudioPacketizationCallback
{ {
public: public:
@ -35,8 +43,8 @@ public:
WebRtc_UWord16 GetPayloadSize(); WebRtc_UWord16 GetPayloadSize();
WebRtc_UWord32 GetTimeStampDiff(); WebRtc_UWord32 GetTimeStampDiff();
void ResetPayloadSize(); void ResetPayloadSize();
void SetCodecType(int codecType); void set_codec_mode(StereoMonoMode mode);
void set_lost_packet(bool lost);
private: private:
AudioCodingModule* _receiverACM; AudioCodingModule* _receiverACM;
@ -46,8 +54,9 @@ private:
WebRtc_UWord32 _lastInTimestamp; WebRtc_UWord32 _lastInTimestamp;
WebRtc_UWord64 _totalBytes; WebRtc_UWord64 _totalBytes;
WebRtc_UWord16 _payloadSize; WebRtc_UWord16 _payloadSize;
WebRtc_UWord16 _noChannels; StereoMonoMode _codec_mode;
int _codecType; // Simulate packet losses
bool _lost_packet;
}; };
class TestStereo : public ACMTest class TestStereo : public ACMTest
@ -69,7 +78,8 @@ private:
int channels, int channels,
int payload_type); int payload_type);
void Run(TestPackStereo* channel, int in_channels, int out_channels); void Run(TestPackStereo* channel, int in_channels, int out_channels,
int percent_loss = 0);
void OpenOutFile(WebRtc_Word16 testNumber); void OpenOutFile(WebRtc_Word16 testNumber);
void DisplaySendReceiveCodec(); void DisplaySendReceiveCodec();
@ -95,7 +105,6 @@ private:
WebRtc_UWord16 _packSizeSamp; WebRtc_UWord16 _packSizeSamp;
WebRtc_UWord16 _packSizeBytes; WebRtc_UWord16 _packSizeBytes;
int _counter; int _counter;
int _codecType;
// Payload types for stereo codecs and CNG // Payload types for stereo codecs and CNG
int g722_pltype_; int g722_pltype_;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
* *
* Use of this source code is governed by a BSD-style license * Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source * that can be found in the LICENSE file in the root of the source
@ -61,7 +61,7 @@ void PopulateTests(std::vector<ACMTest*>* tests)
tests->push_back(new webrtc::TwoWayCommunication(0)); tests->push_back(new webrtc::TwoWayCommunication(0));
tests->push_back(new webrtc::TestAllCodecs(0)); tests->push_back(new webrtc::TestAllCodecs(0));
tests->push_back(new webrtc::TestStereo(0)); tests->push_back(new webrtc::TestStereo(0));
tests->push_back(new webrtc::SpatialAudio(0)); // tests->push_back(new webrtc::SpatialAudio(0));
tests->push_back(new webrtc::TestVADDTX(0)); tests->push_back(new webrtc::TestVADDTX(0));
tests->push_back(new webrtc::TestFEC(0)); tests->push_back(new webrtc::TestFEC(0));
tests->push_back(new webrtc::ISACTest(0)); tests->push_back(new webrtc::ISACTest(0));
@ -81,7 +81,7 @@ void PopulateTests(std::vector<ACMTest*>* tests)
#ifdef ACM_TEST_STEREO #ifdef ACM_TEST_STEREO
printf(" ACM stereo test\n"); printf(" ACM stereo test\n");
tests->push_back(new webrtc::TestStereo(1)); tests->push_back(new webrtc::TestStereo(1));
tests->push_back(new webrtc::SpatialAudio(2)); //tests->push_back(new webrtc::SpatialAudio(2));
#endif #endif
#ifdef ACM_TEST_VAD_DTX #ifdef ACM_TEST_VAD_DTX
printf(" ACM VAD-DTX test\n"); printf(" ACM VAD-DTX test\n");

View File

@ -586,7 +586,7 @@ decoder_CELT::decoder_CELT(WebRtc_UWord8 pt, WebRtc_UWord16 fs)
: :
NETEQTEST_Decoder(kDecoderCELT_32, fs, "CELT", pt) NETEQTEST_Decoder(kDecoderCELT_32, fs, "CELT", pt)
{ {
if (WebRtcCelt_CreateDec((CELT_decinst_t **) &_decoder, 1)) if (WebRtcCelt_CreateDec((CELT_decinst_t **) &_decoder, 2))
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
} }
@ -603,6 +603,27 @@ int decoder_CELT::loadToNetEQ(NETEQTEST_NetEQClass & neteq)
return(NETEQTEST_Decoder::loadToNetEQ(neteq, codecInst)); return(NETEQTEST_Decoder::loadToNetEQ(neteq, codecInst));
} }
decoder_CELTslave::decoder_CELTslave(WebRtc_UWord8 pt, WebRtc_UWord16 fs)
:
NETEQTEST_Decoder(kDecoderCELT_32, fs, "CELT", pt)
{
if (WebRtcCelt_CreateDec((CELT_decinst_t **) &_decoder, 2))
exit(EXIT_FAILURE);
}
decoder_CELTslave::~decoder_CELTslave()
{
WebRtcCelt_FreeDec((CELT_decinst_t *) _decoder);
}
int decoder_CELTslave::loadToNetEQ(NETEQTEST_NetEQClass & neteq)
{
WebRtcNetEQ_CodecDef codecInst;
SET_CELTSLAVE_FUNCTIONS(codecInst);
return(NETEQTEST_Decoder::loadToNetEQ(neteq, codecInst));
}
#endif #endif
#ifdef CODEC_RED #ifdef CODEC_RED

View File

@ -272,6 +272,13 @@ public:
virtual ~decoder_CELT(); virtual ~decoder_CELT();
int loadToNetEQ(NETEQTEST_NetEQClass & neteq); int loadToNetEQ(NETEQTEST_NetEQClass & neteq);
}; };
class decoder_CELTslave : public NETEQTEST_Decoder
{
public:
decoder_CELTslave(WebRtc_UWord8 pt = 0, WebRtc_UWord16 fs = 32000);
virtual ~decoder_CELTslave();
int loadToNetEQ(NETEQTEST_NetEQClass & neteq);
};
class decoder_RED : public NETEQTEST_Decoder class decoder_RED : public NETEQTEST_Decoder
{ {

View File

@ -598,6 +598,12 @@ int NETEQTEST_RTPpacket::splitStereo(NETEQTEST_RTPpacket* slaveRtp,
splitStereoFrame(slaveRtp); splitStereoFrame(slaveRtp);
break; break;
} }
case stereoModeDuplicate:
{
// frame based codec, send the whole packet to both master and slave
splitStereoDouble(slaveRtp);
break;
}
case stereoModeMono: case stereoModeMono:
{ {
assert(false); assert(false);
@ -780,6 +786,17 @@ void NETEQTEST_RTPpacket::splitStereoFrame(NETEQTEST_RTPpacket* slaveRtp)
_payloadLen /= 2; _payloadLen /= 2;
slaveRtp->_payloadLen = _payloadLen; slaveRtp->_payloadLen = _payloadLen;
} }
void NETEQTEST_RTPpacket::splitStereoDouble(NETEQTEST_RTPpacket* slaveRtp)
{
if(!_payloadPtr || !slaveRtp || !slaveRtp->_payloadPtr
|| _payloadLen <= 0 || slaveRtp->_memSize < _memSize)
{
return;
}
memcpy(slaveRtp->_payloadPtr, _payloadPtr, _payloadLen);
slaveRtp->_payloadLen = _payloadLen;
}
// Get the RTP header for the RED payload indicated by argument index. // Get the RTP header for the RED payload indicated by argument index.
// The first RED payload is index = 0. // The first RED payload is index = 0.

View File

@ -20,7 +20,8 @@ enum stereoModes {
stereoModeMono, stereoModeMono,
stereoModeSample1, stereoModeSample1,
stereoModeSample2, stereoModeSample2,
stereoModeFrame stereoModeFrame,
stereoModeDuplicate
}; };
class NETEQTEST_RTPpacket class NETEQTEST_RTPpacket
@ -98,6 +99,7 @@ private:
int calcPadLength(int i_P) const; int calcPadLength(int i_P) const;
void splitStereoSample(NETEQTEST_RTPpacket* slaveRtp, int stride); void splitStereoSample(NETEQTEST_RTPpacket* slaveRtp, int stride);
void splitStereoFrame(NETEQTEST_RTPpacket* slaveRtp); void splitStereoFrame(NETEQTEST_RTPpacket* slaveRtp);
void splitStereoDouble(NETEQTEST_RTPpacket* slaveRtp);
}; };
#endif //NETEQTEST_RTPPACKET_H #endif //NETEQTEST_RTPPACKET_H

View File

@ -695,9 +695,6 @@ int main(int argc, char* argv[])
printf(" RecIn complexity : %.2f MCPS\n", NetEQvector[0]->getRecInTime() / ((float) 1000*(simClock-start_clock))); printf(" RecIn complexity : %.2f MCPS\n", NetEQvector[0]->getRecInTime() / ((float) 1000*(simClock-start_clock)));
printf(" RecOut complexity : %.2f MCPS\n", NetEQvector[0]->getRecOutTime() / ((float) 1000*(simClock-start_clock))); printf(" RecOut complexity : %.2f MCPS\n", NetEQvector[0]->getRecOutTime() / ((float) 1000*(simClock-start_clock)));
delete rtp;
delete slaveRtp;
free_coders(decoders); free_coders(decoders);
//free_coders(0 /* first channel */); //free_coders(0 /* first channel */);
// if (stereoMode > stereoModeMono) { // if (stereoMode > stereoModeMono) {
@ -1196,6 +1193,11 @@ void parsePtypeFile(FILE *ptypeFile, std::map<WebRtc_UWord8, decoderStruct>* dec
break; break;
} }
case kDecoderCELT_32:
{
tempDecoder.stereo = stereoModeDuplicate;
break;
}
// fixed-rate frame codecs // fixed-rate frame codecs
// case kDecoderG729: // case kDecoderG729:
// case NETEQ_CODEC_G729D: // case NETEQ_CODEC_G729D:
@ -1462,7 +1464,10 @@ void createAndInsertDecoders (NETEQTEST_NetEQClass *neteq, std::map<WebRtc_UWord
#endif #endif
#ifdef CODEC_CELT_32 #ifdef CODEC_CELT_32
case kDecoderCELT_32: case kDecoderCELT_32:
if (channelNumber == 0)
*dec = new decoder_CELT( pt, 32000 ); *dec = new decoder_CELT( pt, 32000 );
else
*dec = new decoder_CELTslave( pt, 32000 );
break; break;
#endif #endif
#ifdef CODEC_RED #ifdef CODEC_RED

View File

@ -783,7 +783,6 @@ WebRtc_Word32 RTPReceiver::IncomingRTPPacket(
video_specific.videoCodecType = kRtpNoVideo; video_specific.videoCodecType = kRtpNoVideo;
AudioPayload audio_specific; AudioPayload audio_specific;
audio_specific.bitsPerSample = 0;
audio_specific.channels = 0; audio_specific.channels = 0;
audio_specific.frequency = 0; audio_specific.frequency = 0;

View File

@ -224,45 +224,13 @@ ModuleRTPUtility::Payload* RTPReceiverAudio::RegisterReceiveAudioPayload(
return NULL; return NULL;
} }
} }
WebRtc_UWord8 bitsPerSample = 0; // zero implies frame based
bool isTrueStereo = false; // Default value
if (ModuleRTPUtility::StringCompare(payloadName, "DVI4", 4)) {
bitsPerSample = 4;
} else if(ModuleRTPUtility::StringCompare(payloadName, "G722", 4)) {
if(ModuleRTPUtility::StringCompare(payloadName, "G7221", 5)) {
// frame based
} else {
_G722PayloadType = payloadType;
bitsPerSample = 4;
}
} else if(ModuleRTPUtility::StringCompare(payloadName,"G726-40",7)) {
bitsPerSample = 5;
} else if(ModuleRTPUtility::StringCompare(payloadName,"G726-32",7)) {
bitsPerSample = 4;
} else if(ModuleRTPUtility::StringCompare(payloadName,"G726-24",7)) {
bitsPerSample = 3;
} else if(ModuleRTPUtility::StringCompare(payloadName,"G726-16",7)) {
bitsPerSample = 2;
} else if(ModuleRTPUtility::StringCompare(payloadName,"L8",2)) {
bitsPerSample = 8;
} else if(ModuleRTPUtility::StringCompare(payloadName,"L16",3)) {
bitsPerSample = 16;
} else if(ModuleRTPUtility::StringCompare(payloadName,"PCMU",4)) {
bitsPerSample = 8;
} else if(ModuleRTPUtility::StringCompare(payloadName,"PCMA",4)) {
bitsPerSample = 8;
} else if(ModuleRTPUtility::StringCompare(payloadName,"CELT",4))
{
isTrueStereo = true;
}
ModuleRTPUtility::Payload* payload = new ModuleRTPUtility::Payload; ModuleRTPUtility::Payload* payload = new ModuleRTPUtility::Payload;
payload->name[RTP_PAYLOAD_NAME_SIZE - 1] = 0; payload->name[RTP_PAYLOAD_NAME_SIZE - 1] = 0;
strncpy(payload->name, payloadName, RTP_PAYLOAD_NAME_SIZE - 1); strncpy(payload->name, payloadName, RTP_PAYLOAD_NAME_SIZE - 1);
payload->typeSpecific.Audio.frequency = frequency; payload->typeSpecific.Audio.frequency = frequency;
payload->typeSpecific.Audio.channels = channels; payload->typeSpecific.Audio.channels = channels;
payload->typeSpecific.Audio.bitsPerSample = bitsPerSample;
payload->typeSpecific.Audio.rate = rate; payload->typeSpecific.Audio.rate = rate;
payload->typeSpecific.Audio.trueStereoCodec = isTrueStereo;
payload->audio = true; payload->audio = true;
return payload; return payload;
} }
@ -404,187 +372,8 @@ RTPReceiverAudio::ParseAudioCodecSpecific(WebRtcRTPHeader* rtpHeader,
payloadLength-1, payloadLength-1,
rtpHeader); rtpHeader);
} }
if(audioSpecific.channels > 1)
{
WebRtc_Word32 retVal = 0;
WebRtc_UWord16 channelLength = payloadLength/audioSpecific.channels;
if(audioSpecific.bitsPerSample > 0) rtpHeader->type.Audio.channel = audioSpecific.channels;
{ return CallbackOfReceivedPayloadData(payloadData, payloadLength, rtpHeader);
// sanity
assert((payloadLength*8)%audioSpecific.bitsPerSample == 0);
// sample based codec
// build matrix
WebRtc_UWord8 matrix[IP_PACKET_SIZE];
WebRtc_UWord32 offsetBytes = 0;
WebRtc_UWord32 offsetBytesInsert = 0;
// initialize matrix to 0
memset(matrix, 0, audioSpecific.channels*channelLength);
switch(audioSpecific.bitsPerSample)
{
case 1:
case 2:
case 3:
case 4:
case 5:
case 6:
case 7:
{
WebRtc_UWord32 offsetSamples = 0;
WebRtc_UWord32 offsetSamplesInsert = 0;
WebRtc_UWord16 bitMask = (WebRtc_UWord16)ModuleRTPUtility::pow2(audioSpecific.bitsPerSample)-1;
WebRtc_UWord16 samplesPerChannel =payloadLength*8/audioSpecific.bitsPerSample/audioSpecific.channels;
for(WebRtc_UWord32 i = 0; i < samplesPerChannel; i++)
{
WebRtc_UWord8 insertShift = (WebRtc_UWord8)((offsetSamplesInsert+audioSpecific.bitsPerSample)%16);
insertShift = 16 - insertShift; // inverse the calculation
for(WebRtc_UWord32 j = 0; j < audioSpecific.channels; j++)
{
// get sample
WebRtc_UWord16 s = payloadData[offsetBytes] << 8;
// check that we don't read outside the memory
if(offsetBytes < (WebRtc_UWord32)payloadLength - 1)
{
s += payloadData[offsetBytes+1];
}
WebRtc_UWord8 readShift = (WebRtc_UWord8)((offsetSamples+audioSpecific.bitsPerSample)%16);
readShift = 16 - readShift; // inverse the calculation
s >>= readShift;
s &= bitMask;
// prepare for reading next sample
offsetSamples += audioSpecific.bitsPerSample;
if(readShift <= audioSpecific.bitsPerSample)
{
// next does not fit
// or fit exactly
offsetSamples -= 8;
offsetBytes++;
}
// insert sample into matrix
WebRtc_UWord32 columOffset = j*channelLength;
WebRtc_UWord16 insert = s << insertShift;
#if defined(WEBRTC_LITTLE_ENDIAN)
matrix[columOffset+offsetBytesInsert] |= static_cast<WebRtc_UWord8>(insert>>8);
matrix[columOffset+offsetBytesInsert+1] |= static_cast<WebRtc_UWord8>(insert);
#else
WebRtc_UWord16* matrixU16 = (WebRtc_UWord16*)&(matrix[columOffset+offsetBytesInsert]);
matrixU16[0] |= (s << insertShift);
#endif
}
// prepare for writing next sample
offsetSamplesInsert += audioSpecific.bitsPerSample;
if(insertShift <= audioSpecific.bitsPerSample)
{
// next does not fit
// or fit exactly
offsetSamplesInsert -= 8;
offsetBytesInsert++;
}
}
}
break;
case 8:
{
WebRtc_UWord32 sample = 0;
for(WebRtc_UWord32 i = 0; i < channelLength; i++)
{
for(WebRtc_UWord32 j = 0; j < audioSpecific.channels; j++)
{
WebRtc_UWord32 columOffset = j*channelLength;
matrix[columOffset + i] = payloadData[sample++];
}
}
}
break;
case 16:
{
WebRtc_UWord32 sample = 0;
for(WebRtc_UWord32 i = 0; i < channelLength; i +=2)
{
for(WebRtc_UWord32 j = 0; j < audioSpecific.channels; j++)
{
WebRtc_UWord32 columOffset = j*channelLength;
matrix[columOffset + i] = payloadData[sample++];
matrix[columOffset + i + 1] = payloadData[sample++];
}
}
}
break;
default:
assert(false);
return -1;
}
// we support 16 bits sample
// callback for all channels
for(int channel = 0; channel < audioSpecific.channels && retVal == 0; channel++)
{
// one callback per channel
rtpHeader->type.Audio.channel = channel+1;
if(channel == 0)
{
// include the original packet only in the first callback
retVal = CallbackOfReceivedPayloadData(&matrix[channel*channelLength],
channelLength,
rtpHeader);
} else
{
retVal = CallbackOfReceivedPayloadData(&matrix[channel*channelLength],
channelLength,
rtpHeader);
}
}
} else if (audioSpecific.trueStereoCodec)
{
// One callback with the whole payload for each channel.
for(int channel = 1; (channel <= audioSpecific.channels) &&
(retVal == 0); channel++)
{
// One callback per channel.
rtpHeader->type.Audio.channel = channel;
retVal = CallbackOfReceivedPayloadData(payloadData,
payloadLength,
rtpHeader);
}
} else
{
for(int channel = 1; channel <= audioSpecific.channels && retVal == 0; channel++)
{
// one callback per channel
rtpHeader->type.Audio.channel = channel;
if(channel == 1)
{
// include the original packet only in the first callback
retVal = CallbackOfReceivedPayloadData(payloadData,
channelLength,
rtpHeader);
} else
{
retVal = CallbackOfReceivedPayloadData(payloadData,
channelLength,
rtpHeader);
}
payloadData += channelLength;
}
}
return retVal;
}else
{
rtpHeader->type.Audio.channel = 1;
return CallbackOfReceivedPayloadData(payloadData,
payloadLength,
rtpHeader);
}
} }
} // namespace webrtc } // namespace webrtc

View File

@ -40,9 +40,7 @@ namespace ModuleRTPUtility
{ {
WebRtc_UWord32 frequency; WebRtc_UWord32 frequency;
WebRtc_UWord8 channels; WebRtc_UWord8 channels;
WebRtc_UWord8 bitsPerSample;
WebRtc_UWord32 rate; WebRtc_UWord32 rate;
bool trueStereoCodec;
}; };
struct VideoPayload struct VideoPayload
{ {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
* *
* Use of this source code is governed by a BSD-style license * Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source * that can be found in the LICENSE file in the root of the source
@ -69,7 +69,7 @@ WebRtc_Word32 AudioCoder::Decode(AudioFrame& decodedAudio,
{ {
const WebRtc_UWord8 payloadType = _receiveCodec.pltype; const WebRtc_UWord8 payloadType = _receiveCodec.pltype;
_decodeTimestamp += _receiveCodec.pacsize; _decodeTimestamp += _receiveCodec.pacsize;
if(_acm->IncomingPayload(incomingPayload, if(_acm->IncomingPayload((const WebRtc_UWord8*) incomingPayload,
payloadLength, payloadLength,
payloadType, payloadType,
_decodeTimestamp) == -1) _decodeTimestamp) == -1)

View File

@ -805,7 +805,7 @@ Channel::OnReceivedPayloadData(const WebRtc_UWord8* payloadData,
} }
// Push the incoming payload (parsed and ready for decoding) into the ACM // Push the incoming payload (parsed and ready for decoding) into the ACM
if (_audioCodingModule.IncomingPacket((const WebRtc_Word8*) payloadData, if (_audioCodingModule.IncomingPacket(payloadData,
payloadSize, payloadSize,
*rtpHeader) != 0) *rtpHeader) != 0)
{ {