diff --git a/webrtc/voice_engine/channel.cc b/webrtc/voice_engine/channel.cc index d89a2637a..3480564c6 100644 --- a/webrtc/voice_engine/channel.cc +++ b/webrtc/voice_engine/channel.cc @@ -33,12 +33,12 @@ namespace webrtc { namespace voe { -WebRtc_Word32 +int32_t Channel::SendData(FrameType frameType, - WebRtc_UWord8 payloadType, - WebRtc_UWord32 timeStamp, - const WebRtc_UWord8* payloadData, - WebRtc_UWord16 payloadSize, + uint8_t payloadType, + uint32_t timeStamp, + const uint8_t* payloadData, + uint16_t payloadSize, const RTPFragmentationHeader* fragmentation) { WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId), @@ -81,8 +81,8 @@ Channel::SendData(FrameType frameType, return 0; } -WebRtc_Word32 -Channel::InFrameType(WebRtc_Word16 frameType) +int32_t +Channel::InFrameType(int16_t frameType) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), "Channel::InFrameType(frameType=%d)", frameType); @@ -93,7 +93,7 @@ Channel::InFrameType(WebRtc_Word16 frameType) return 0; } -WebRtc_Word32 +int32_t Channel::OnRxVadDetected(const int vadDecision) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), @@ -129,8 +129,8 @@ Channel::SendPacket(int channel, const void *data, int len) // API if (_insertExtraRTPPacket) { - WebRtc_UWord8* rtpHdr = (WebRtc_UWord8*)data; - WebRtc_UWord8 M_PT(0); + uint8_t* rtpHdr = (uint8_t*)data; + uint8_t M_PT(0); if (_extraMarkerBit) { M_PT = 0x80; // set the M-bit @@ -140,11 +140,11 @@ Channel::SendPacket(int channel, const void *data, int len) _insertExtraRTPPacket = false; // insert one packet only } - WebRtc_UWord8* bufferToSendPtr = (WebRtc_UWord8*)data; - WebRtc_Word32 bufferLength = len; + uint8_t* bufferToSendPtr = (uint8_t*)data; + int32_t bufferLength = len; // Dump the RTP packet to a file (if RTP dump is enabled). - if (_rtpDumpOut.DumpPacket((const WebRtc_UWord8*)data, len) == -1) + if (_rtpDumpOut.DumpPacket((const uint8_t*)data, len) == -1) { WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId), @@ -162,13 +162,13 @@ Channel::SendPacket(int channel, const void *data, int len) { // Allocate memory for encryption buffer one time only _encryptionRTPBufferPtr = - new WebRtc_UWord8[kVoiceEngineMaxIpPacketSizeBytes]; + new uint8_t[kVoiceEngineMaxIpPacketSizeBytes]; memset(_encryptionRTPBufferPtr, 0, kVoiceEngineMaxIpPacketSizeBytes); } // Perform encryption (SRTP or external) - WebRtc_Word32 encryptedBufferLength = 0; + int32_t encryptedBufferLength = 0; _encryptionPtr->encrypt(_channelId, bufferToSendPtr, _encryptionRTPBufferPtr, @@ -244,11 +244,11 @@ Channel::SendRTCPPacket(int channel, const void *data, int len) } } - WebRtc_UWord8* bufferToSendPtr = (WebRtc_UWord8*)data; - WebRtc_Word32 bufferLength = len; + uint8_t* bufferToSendPtr = (uint8_t*)data; + int32_t bufferLength = len; // Dump the RTCP packet to a file (if RTP dump is enabled). - if (_rtpDumpOut.DumpPacket((const WebRtc_UWord8*)data, len) == -1) + if (_rtpDumpOut.DumpPacket((const uint8_t*)data, len) == -1) { WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId), @@ -266,11 +266,11 @@ Channel::SendRTCPPacket(int channel, const void *data, int len) { // Allocate memory for encryption buffer one time only _encryptionRTCPBufferPtr = - new WebRtc_UWord8[kVoiceEngineMaxIpPacketSizeBytes]; + new uint8_t[kVoiceEngineMaxIpPacketSizeBytes]; } // Perform encryption (SRTP or external). - WebRtc_Word32 encryptedBufferLength = 0; + int32_t encryptedBufferLength = 0; _encryptionPtr->encrypt_rtcp(_channelId, bufferToSendPtr, _encryptionRTCPBufferPtr, @@ -332,10 +332,10 @@ Channel::SendRTCPPacket(int channel, const void *data, int len) } void -Channel::OnPlayTelephoneEvent(const WebRtc_Word32 id, - const WebRtc_UWord8 event, - const WebRtc_UWord16 lengthMs, - const WebRtc_UWord8 volume) +Channel::OnPlayTelephoneEvent(const int32_t id, + const uint8_t event, + const uint16_t lengthMs, + const uint8_t volume) { WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId), "Channel::OnPlayTelephoneEvent(id=%d, event=%u, lengthMs=%u," @@ -356,14 +356,14 @@ Channel::OnPlayTelephoneEvent(const WebRtc_Word32 id, } void -Channel::OnIncomingSSRCChanged(const WebRtc_Word32 id, - const WebRtc_UWord32 SSRC) +Channel::OnIncomingSSRCChanged(const int32_t id, + const uint32_t SSRC) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), "Channel::OnIncomingSSRCChanged(id=%d, SSRC=%d)", id, SSRC); - WebRtc_Word32 channel = VoEChannelId(id); + int32_t channel = VoEChannelId(id); assert(channel == _channelId); // Reset RTP-module counters since a new incoming RTP stream is detected @@ -382,15 +382,15 @@ Channel::OnIncomingSSRCChanged(const WebRtc_Word32 id, } } -void Channel::OnIncomingCSRCChanged(const WebRtc_Word32 id, - const WebRtc_UWord32 CSRC, +void Channel::OnIncomingCSRCChanged(const int32_t id, + const uint32_t CSRC, const bool added) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), "Channel::OnIncomingCSRCChanged(id=%d, CSRC=%d, added=%d)", id, CSRC, added); - WebRtc_Word32 channel = VoEChannelId(id); + int32_t channel = VoEChannelId(id); assert(channel == _channelId); if (_rtpObserver) @@ -405,18 +405,18 @@ void Channel::OnIncomingCSRCChanged(const WebRtc_Word32 id, } void -Channel::OnApplicationDataReceived(const WebRtc_Word32 id, - const WebRtc_UWord8 subType, - const WebRtc_UWord32 name, - const WebRtc_UWord16 length, - const WebRtc_UWord8* data) +Channel::OnApplicationDataReceived(const int32_t id, + const uint8_t subType, + const uint32_t name, + const uint16_t length, + const uint8_t* data) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), "Channel::OnApplicationDataReceived(id=%d, subType=%u," " name=%u, length=%u)", id, subType, name, length); - WebRtc_Word32 channel = VoEChannelId(id); + int32_t channel = VoEChannelId(id); assert(channel == _channelId); if (_rtcpObserver) @@ -434,14 +434,14 @@ Channel::OnApplicationDataReceived(const WebRtc_Word32 id, } } -WebRtc_Word32 +int32_t Channel::OnInitializeDecoder( - const WebRtc_Word32 id, - const WebRtc_Word8 payloadType, + const int32_t id, + const int8_t payloadType, const char payloadName[RTP_PAYLOAD_NAME_SIZE], const int frequency, - const WebRtc_UWord8 channels, - const WebRtc_UWord32 rate) + const uint8_t channels, + const uint32_t rate) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), "Channel::OnInitializeDecoder(id=%d, payloadType=%d, " @@ -477,7 +477,7 @@ Channel::OnInitializeDecoder( } void -Channel::OnPacketTimeout(const WebRtc_Word32 id) +Channel::OnPacketTimeout(const int32_t id) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), "Channel::OnPacketTimeout(id=%d)", id); @@ -487,7 +487,7 @@ Channel::OnPacketTimeout(const WebRtc_Word32 id) { if (_receiving || _externalTransport) { - WebRtc_Word32 channel = VoEChannelId(id); + int32_t channel = VoEChannelId(id); assert(channel == _channelId); // Ensure that next OnReceivedPacket() callback will trigger // a VE_PACKET_RECEIPT_RESTARTED callback. @@ -504,7 +504,7 @@ Channel::OnPacketTimeout(const WebRtc_Word32 id) } void -Channel::OnReceivedPacket(const WebRtc_Word32 id, +Channel::OnReceivedPacket(const int32_t id, const RtpRtcpPacketType packetType) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), @@ -519,7 +519,7 @@ Channel::OnReceivedPacket(const WebRtc_Word32 id, CriticalSectionScoped cs(_callbackCritSectPtr); if (_voiceEngineObserverPtr) { - WebRtc_Word32 channel = VoEChannelId(id); + int32_t channel = VoEChannelId(id); assert(channel == _channelId); // Reset timeout mechanism _rtpPacketTimedOut = false; @@ -536,7 +536,7 @@ Channel::OnReceivedPacket(const WebRtc_Word32 id, } void -Channel::OnPeriodicDeadOrAlive(const WebRtc_Word32 id, +Channel::OnPeriodicDeadOrAlive(const int32_t id, const RTPAliveType alive) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), @@ -548,7 +548,7 @@ Channel::OnPeriodicDeadOrAlive(const WebRtc_Word32 id, return; } - WebRtc_Word32 channel = VoEChannelId(id); + int32_t channel = VoEChannelId(id); assert(channel == _channelId); // Use Alive as default to limit risk of false Dead detections @@ -588,9 +588,9 @@ Channel::OnPeriodicDeadOrAlive(const WebRtc_Word32 id, } } -WebRtc_Word32 -Channel::OnReceivedPayloadData(const WebRtc_UWord8* payloadData, - const WebRtc_UWord16 payloadSize, +int32_t +Channel::OnReceivedPayloadData(const uint8_t* payloadData, + const uint16_t payloadSize, const WebRtcRTPHeader* rtpHeader) { WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId), @@ -632,8 +632,7 @@ Channel::OnReceivedPayloadData(const WebRtc_UWord8* payloadData, return 0; } -WebRtc_Word32 Channel::GetAudioFrame(const WebRtc_Word32 id, - AudioFrame& audioFrame) +int32_t Channel::GetAudioFrame(const int32_t id, AudioFrame& audioFrame) { WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId), "Channel::GetAudioFrame(id=%d)", id); @@ -715,7 +714,7 @@ WebRtc_Word32 Channel::GetAudioFrame(const WebRtc_Word32 id, _outputExternalMediaCallbackPtr->Process( _channelId, kPlaybackPerChannel, - (WebRtc_Word16*)audioFrame.data_, + (int16_t*)audioFrame.data_, audioFrame.samples_per_channel_, audioFrame.sample_rate_hz_, isStereo); @@ -738,8 +737,8 @@ WebRtc_Word32 Channel::GetAudioFrame(const WebRtc_Word32 id, return 0; } -WebRtc_Word32 -Channel::NeededFrequency(const WebRtc_Word32 id) +int32_t +Channel::NeededFrequency(const int32_t id) { WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId), "Channel::NeededFrequency(id=%d)", id); @@ -747,7 +746,7 @@ Channel::NeededFrequency(const WebRtc_Word32 id) int highestNeeded = 0; // Determine highest needed receive frequency - WebRtc_Word32 receiveFrequency = _audioCodingModule.ReceiveFrequency(); + int32_t receiveFrequency = _audioCodingModule.ReceiveFrequency(); // Return the bigger of playout and receive frequency in the ACM. if (_audioCodingModule.PlayoutFrequency() > receiveFrequency) @@ -778,10 +777,10 @@ Channel::NeededFrequency(const WebRtc_Word32 id) return(highestNeeded); } -WebRtc_Word32 +int32_t Channel::CreateChannel(Channel*& channel, - const WebRtc_Word32 channelId, - const WebRtc_UWord32 instanceId) + const int32_t channelId, + const uint32_t instanceId) { WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId,channelId), "Channel::CreateChannel(channelId=%d, instanceId=%d)", @@ -800,8 +799,7 @@ Channel::CreateChannel(Channel*& channel, } void -Channel::PlayNotification(const WebRtc_Word32 id, - const WebRtc_UWord32 durationMs) +Channel::PlayNotification(const int32_t id, const uint32_t durationMs) { WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId), "Channel::PlayNotification(id=%d, durationMs=%d)", @@ -811,8 +809,7 @@ Channel::PlayNotification(const WebRtc_Word32 id, } void -Channel::RecordNotification(const WebRtc_Word32 id, - const WebRtc_UWord32 durationMs) +Channel::RecordNotification(const int32_t id, const uint32_t durationMs) { WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId), "Channel::RecordNotification(id=%d, durationMs=%d)", @@ -822,7 +819,7 @@ Channel::RecordNotification(const WebRtc_Word32 id, } void -Channel::PlayFileEnded(const WebRtc_Word32 id) +Channel::PlayFileEnded(const int32_t id) { WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId), "Channel::PlayFileEnded(id=%d)", id); @@ -850,7 +847,7 @@ Channel::PlayFileEnded(const WebRtc_Word32 id) } void -Channel::RecordFileEnded(const WebRtc_Word32 id) +Channel::RecordFileEnded(const int32_t id) { WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId), "Channel::RecordFileEnded(id=%d)", id); @@ -866,8 +863,8 @@ Channel::RecordFileEnded(const WebRtc_Word32 id) " shutdown"); } -Channel::Channel(const WebRtc_Word32 channelId, - const WebRtc_UWord32 instanceId) : +Channel::Channel(const int32_t channelId, + const uint32_t instanceId) : _fileCritSect(*CriticalSectionWrapper::CreateCriticalSection()), _callbackCritSect(*CriticalSectionWrapper::CreateCriticalSection()), _instanceId(instanceId), @@ -1072,7 +1069,7 @@ Channel::~Channel() delete &_fileCritSect; } -WebRtc_Word32 +int32_t Channel::Init() { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), @@ -1153,7 +1150,7 @@ Channel::Init() // RTP/RTCP module CodecInst codec; - const WebRtc_UWord8 nSupportedCodecs = AudioCodingModule::NumberOfCodecs(); + const uint8_t nSupportedCodecs = AudioCodingModule::NumberOfCodecs(); for (int idx = 0; idx < nSupportedCodecs; idx++) { @@ -1299,7 +1296,7 @@ Channel::Init() return 0; } -WebRtc_Word32 +int32_t Channel::SetEngineInformation(Statistics& engineStatistics, OutputMixer& outputMixer, voe::TransmitMixer& transmitMixer, @@ -1320,7 +1317,7 @@ Channel::SetEngineInformation(Statistics& engineStatistics, return 0; } -WebRtc_Word32 +int32_t Channel::UpdateLocalTimeStamp() { @@ -1328,7 +1325,7 @@ Channel::UpdateLocalTimeStamp() return 0; } -WebRtc_Word32 +int32_t Channel::StartPlayout() { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), @@ -1357,7 +1354,7 @@ Channel::StartPlayout() return 0; } -WebRtc_Word32 +int32_t Channel::StopPlayout() { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), @@ -1384,7 +1381,7 @@ Channel::StopPlayout() return 0; } -WebRtc_Word32 +int32_t Channel::StartSend() { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), @@ -1414,7 +1411,7 @@ Channel::StartSend() return 0; } -WebRtc_Word32 +int32_t Channel::StopSend() { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), @@ -1444,7 +1441,7 @@ Channel::StopSend() return 0; } -WebRtc_Word32 +int32_t Channel::StartReceiving() { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), @@ -1458,7 +1455,7 @@ Channel::StartReceiving() return 0; } -WebRtc_Word32 +int32_t Channel::StopReceiving() { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), @@ -1469,7 +1466,7 @@ Channel::StopReceiving() } // Recover DTMF detection status. - WebRtc_Word32 ret = _rtpRtcpModule->SetTelephoneEventForwardToDecoder(true); + int32_t ret = _rtpRtcpModule->SetTelephoneEventForwardToDecoder(true); if (ret != 0) { _engineStatisticsPtr->SetLastError( VE_INVALID_OPERATION, kTraceWarning, @@ -1480,7 +1477,7 @@ Channel::StopReceiving() return 0; } -WebRtc_Word32 +int32_t Channel::SetNetEQPlayoutMode(NetEqModes mode) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), @@ -1511,7 +1508,7 @@ Channel::SetNetEQPlayoutMode(NetEqModes mode) return 0; } -WebRtc_Word32 +int32_t Channel::GetNetEQPlayoutMode(NetEqModes& mode) { const AudioPlayoutMode playoutMode = _audioCodingModule.PlayoutMode(); @@ -1535,7 +1532,7 @@ Channel::GetNetEQPlayoutMode(NetEqModes& mode) return 0; } -WebRtc_Word32 +int32_t Channel::SetOnHoldStatus(bool enable, OnHoldModes mode) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), @@ -1556,7 +1553,7 @@ Channel::SetOnHoldStatus(bool enable, OnHoldModes mode) return 0; } -WebRtc_Word32 +int32_t Channel::GetOnHoldStatus(bool& enabled, OnHoldModes& mode) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), @@ -1580,7 +1577,7 @@ Channel::GetOnHoldStatus(bool& enabled, OnHoldModes& mode) return 0; } -WebRtc_Word32 +int32_t Channel::RegisterVoiceEngineObserver(VoiceEngineObserver& observer) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), @@ -1598,7 +1595,7 @@ Channel::RegisterVoiceEngineObserver(VoiceEngineObserver& observer) return 0; } -WebRtc_Word32 +int32_t Channel::DeRegisterVoiceEngineObserver() { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), @@ -1616,19 +1613,19 @@ Channel::DeRegisterVoiceEngineObserver() return 0; } -WebRtc_Word32 +int32_t Channel::GetSendCodec(CodecInst& codec) { return (_audioCodingModule.SendCodec(&codec)); } -WebRtc_Word32 +int32_t Channel::GetRecCodec(CodecInst& codec) { return (_audioCodingModule.ReceiveCodec(&codec)); } -WebRtc_Word32 +int32_t Channel::SetSendCodec(const CodecInst& codec) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), @@ -1664,7 +1661,7 @@ Channel::SetSendCodec(const CodecInst& codec) return 0; } -WebRtc_Word32 +int32_t Channel::SetVADStatus(bool enableVAD, ACMVADMode mode, bool disableDTX) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), @@ -1681,7 +1678,7 @@ Channel::SetVADStatus(bool enableVAD, ACMVADMode mode, bool disableDTX) return 0; } -WebRtc_Word32 +int32_t Channel::GetVADStatus(bool& enabledVAD, ACMVADMode& mode, bool& disabledDTX) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), @@ -1697,7 +1694,7 @@ Channel::GetVADStatus(bool& enabledVAD, ACMVADMode& mode, bool& disabledDTX) return 0; } -WebRtc_Word32 +int32_t Channel::SetRecPayloadType(const CodecInst& codec) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), @@ -1722,7 +1719,7 @@ Channel::SetRecPayloadType(const CodecInst& codec) { // De-register the selected codec (RTP/RTCP module and ACM) - WebRtc_Word8 pltype(-1); + int8_t pltype(-1); CodecInst rxCodec = codec; // Get payload type for the given codec @@ -1774,12 +1771,12 @@ Channel::SetRecPayloadType(const CodecInst& codec) return 0; } -WebRtc_Word32 +int32_t Channel::GetRecPayloadType(CodecInst& codec) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), "Channel::GetRecPayloadType()"); - WebRtc_Word8 payloadType(-1); + int8_t payloadType(-1); if (_rtpRtcpModule->ReceivePayloadType(codec, &payloadType) != 0) { _engineStatisticsPtr->SetLastError( @@ -1793,7 +1790,7 @@ Channel::GetRecPayloadType(CodecInst& codec) return 0; } -WebRtc_Word32 +int32_t Channel::SetAMREncFormat(AmrMode mode) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), @@ -1803,7 +1800,7 @@ Channel::SetAMREncFormat(AmrMode mode) return -1; } -WebRtc_Word32 +int32_t Channel::SetAMRDecFormat(AmrMode mode) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), @@ -1813,7 +1810,7 @@ Channel::SetAMRDecFormat(AmrMode mode) return -1; } -WebRtc_Word32 +int32_t Channel::SetAMRWbEncFormat(AmrMode mode) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), @@ -1824,7 +1821,7 @@ Channel::SetAMRWbEncFormat(AmrMode mode) } -WebRtc_Word32 +int32_t Channel::SetAMRWbDecFormat(AmrMode mode) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), @@ -1834,14 +1831,14 @@ Channel::SetAMRWbDecFormat(AmrMode mode) return -1; } -WebRtc_Word32 +int32_t Channel::SetSendCNPayloadType(int type, PayloadFrequencies frequency) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), "Channel::SetSendCNPayloadType()"); CodecInst codec; - WebRtc_Word32 samplingFreqHz(-1); + int32_t samplingFreqHz(-1); const int kMono = 1; if (frequency == kFreq32000Hz) samplingFreqHz = 32000; @@ -1883,7 +1880,7 @@ Channel::SetSendCNPayloadType(int type, PayloadFrequencies frequency) return 0; } -WebRtc_Word32 +int32_t Channel::SetISACInitTargetRate(int rateBps, bool useFixedFrameSize) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), @@ -1909,7 +1906,7 @@ Channel::SetISACInitTargetRate(int rateBps, bool useFixedFrameSize) return -1; } - WebRtc_UWord8 initFrameSizeMsec(0); + uint8_t initFrameSizeMsec(0); if (16000 == sendCodec.plfreq) { // Note that 0 is a valid and corresponds to "use default @@ -1923,7 +1920,7 @@ Channel::SetISACInitTargetRate(int rateBps, bool useFixedFrameSize) return -1; } // 30 or 60ms - initFrameSizeMsec = (WebRtc_UWord8)(sendCodec.pacsize / 16); + initFrameSizeMsec = (uint8_t)(sendCodec.pacsize / 16); } else if (32000 == sendCodec.plfreq) { @@ -1936,7 +1933,7 @@ Channel::SetISACInitTargetRate(int rateBps, bool useFixedFrameSize) "SetISACInitTargetRate() invalid target rate - 2"); return -1; } - initFrameSizeMsec = (WebRtc_UWord8)(sendCodec.pacsize / 32); // 30ms + initFrameSizeMsec = (uint8_t)(sendCodec.pacsize / 32); // 30ms } if (_audioCodingModule.ConfigISACBandwidthEstimator( @@ -1951,7 +1948,7 @@ Channel::SetISACInitTargetRate(int rateBps, bool useFixedFrameSize) return 0; } -WebRtc_Word32 +int32_t Channel::SetISACMaxRate(int rateBps) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), @@ -2016,7 +2013,7 @@ Channel::SetISACMaxRate(int rateBps) return 0; } -WebRtc_Word32 +int32_t Channel::SetISACMaxPayloadSize(int sizeBytes) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), @@ -2076,7 +2073,7 @@ Channel::SetISACMaxPayloadSize(int sizeBytes) return 0; } -WebRtc_Word32 Channel::RegisterExternalTransport(Transport& transport) +int32_t Channel::RegisterExternalTransport(Transport& transport) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), "Channel::RegisterExternalTransport()"); @@ -2095,7 +2092,7 @@ WebRtc_Word32 Channel::RegisterExternalTransport(Transport& transport) return 0; } -WebRtc_Word32 +int32_t Channel::DeRegisterExternalTransport() { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), @@ -2118,20 +2115,19 @@ Channel::DeRegisterExternalTransport() return 0; } -WebRtc_Word32 Channel::ReceivedRTPPacket(const WebRtc_Word8* data, - WebRtc_Word32 length) { +int32_t Channel::ReceivedRTPPacket(const int8_t* data, int32_t length) { WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId), "Channel::ReceivedRTPPacket()"); // Store playout timestamp for the received RTP packet - WebRtc_UWord32 playoutTimestamp(0); + uint32_t playoutTimestamp(0); if (GetPlayoutTimeStamp(playoutTimestamp) == 0) { _playoutTimeStampRTP = playoutTimestamp; } // Dump the RTP packet to a file (if RTP dump is enabled). - if (_rtpDumpIn.DumpPacket((const WebRtc_UWord8*)data, - (WebRtc_UWord16)length) == -1) { + if (_rtpDumpIn.DumpPacket((const uint8_t*)data, + (uint16_t)length) == -1) { WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId), "Channel::SendPacket() RTP dump to input file failed"); @@ -2140,8 +2136,8 @@ WebRtc_Word32 Channel::ReceivedRTPPacket(const WebRtc_Word8* data, // Deliver RTP packet to RTP/RTCP module for parsing // The packet will be pushed back to the channel thru the // OnReceivedPayloadData callback so we don't push it to the ACM here - if (_rtpRtcpModule->IncomingPacket((const WebRtc_UWord8*)data, - (WebRtc_UWord16)length) == -1) { + if (_rtpRtcpModule->IncomingPacket((const uint8_t*)data, + (uint16_t)length) == -1) { _engineStatisticsPtr->SetLastError( VE_SOCKET_TRANSPORT_MODULE_ERROR, kTraceWarning, "Channel::IncomingRTPPacket() RTP packet is invalid"); @@ -2149,27 +2145,26 @@ WebRtc_Word32 Channel::ReceivedRTPPacket(const WebRtc_Word8* data, return 0; } -WebRtc_Word32 Channel::ReceivedRTCPPacket(const WebRtc_Word8* data, - WebRtc_Word32 length) { +int32_t Channel::ReceivedRTCPPacket(const int8_t* data, int32_t length) { WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId), "Channel::ReceivedRTCPPacket()"); // Store playout timestamp for the received RTCP packet - WebRtc_UWord32 playoutTimestamp(0); + uint32_t playoutTimestamp(0); if (GetPlayoutTimeStamp(playoutTimestamp) == 0) { _playoutTimeStampRTCP = playoutTimestamp; } // Dump the RTCP packet to a file (if RTP dump is enabled). - if (_rtpDumpIn.DumpPacket((const WebRtc_UWord8*)data, - (WebRtc_UWord16)length) == -1) { + if (_rtpDumpIn.DumpPacket((const uint8_t*)data, + (uint16_t)length) == -1) { WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId), "Channel::SendPacket() RTCP dump to input file failed"); } // Deliver RTCP packet to RTP/RTCP module for parsing - if (_rtpRtcpModule->IncomingPacket((const WebRtc_UWord8*)data, - (WebRtc_UWord16)length) == -1) { + if (_rtpRtcpModule->IncomingPacket((const uint8_t*)data, + (uint16_t)length) == -1) { _engineStatisticsPtr->SetLastError( VE_SOCKET_TRANSPORT_MODULE_ERROR, kTraceWarning, "Channel::IncomingRTPPacket() RTCP packet is invalid"); @@ -2177,15 +2172,15 @@ WebRtc_Word32 Channel::ReceivedRTCPPacket(const WebRtc_Word8* data, return 0; } -WebRtc_Word32 +int32_t Channel::SetPacketTimeoutNotification(bool enable, int timeoutSeconds) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), "Channel::SetPacketTimeoutNotification()"); if (enable) { - const WebRtc_UWord32 RTPtimeoutMS = 1000*timeoutSeconds; - const WebRtc_UWord32 RTCPtimeoutMS = 0; + const uint32_t RTPtimeoutMS = 1000*timeoutSeconds; + const uint32_t RTCPtimeoutMS = 0; _rtpRtcpModule->SetPacketTimeout(RTPtimeoutMS, RTCPtimeoutMS); _rtpPacketTimeOutIsEnabled = true; _rtpTimeOutSeconds = timeoutSeconds; @@ -2199,7 +2194,7 @@ Channel::SetPacketTimeoutNotification(bool enable, int timeoutSeconds) return 0; } -WebRtc_Word32 +int32_t Channel::GetPacketTimeoutNotification(bool& enabled, int& timeoutSeconds) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), @@ -2216,7 +2211,7 @@ Channel::GetPacketTimeoutNotification(bool& enabled, int& timeoutSeconds) return 0; } -WebRtc_Word32 +int32_t Channel::RegisterDeadOrAliveObserver(VoEConnectionObserver& observer) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), @@ -2236,7 +2231,7 @@ Channel::RegisterDeadOrAliveObserver(VoEConnectionObserver& observer) return 0; } -WebRtc_Word32 +int32_t Channel::DeRegisterDeadOrAliveObserver() { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), @@ -2257,7 +2252,7 @@ Channel::DeRegisterDeadOrAliveObserver() return 0; } -WebRtc_Word32 +int32_t Channel::SetPeriodicDeadOrAliveStatus(bool enable, int sampleTimeSeconds) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), @@ -2273,12 +2268,12 @@ Channel::SetPeriodicDeadOrAliveStatus(bool enable, int sampleTimeSeconds) ResetDeadOrAliveCounters(); } bool enabled(false); - WebRtc_UWord8 currentSampleTimeSec(0); + uint8_t currentSampleTimeSec(0); // Store last state (will be used later if dead-or-alive is disabled). _rtpRtcpModule->PeriodicDeadOrAliveStatus(enabled, currentSampleTimeSec); // Update the dead-or-alive state. if (_rtpRtcpModule->SetPeriodicDeadOrAliveStatus( - enable, (WebRtc_UWord8)sampleTimeSeconds) != 0) + enable, (uint8_t)sampleTimeSeconds) != 0) { _engineStatisticsPtr->SetLastError( VE_RTP_RTCP_MODULE_ERROR, @@ -2299,12 +2294,12 @@ Channel::SetPeriodicDeadOrAliveStatus(bool enable, int sampleTimeSeconds) return 0; } -WebRtc_Word32 +int32_t Channel::GetPeriodicDeadOrAliveStatus(bool& enabled, int& sampleTimeSeconds) { _rtpRtcpModule->PeriodicDeadOrAliveStatus( enabled, - (WebRtc_UWord8&)sampleTimeSeconds); + (uint8_t&)sampleTimeSeconds); WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1), "GetPeriodicDeadOrAliveStatus() => enabled=%d," " sampleTimeSeconds=%d", @@ -2355,7 +2350,7 @@ int Channel::StartPlayingFileLocally(const char* fileName, return -1; } - const WebRtc_UWord32 notificationTime(0); + const uint32_t notificationTime(0); if (_outputFilePlayerPtr->StartPlayingFile( fileName, @@ -2437,7 +2432,7 @@ int Channel::StartPlayingFileLocally(InStream* stream, return -1; } - const WebRtc_UWord32 notificationTime(0); + const uint32_t notificationTime(0); if (_outputFilePlayerPtr->StartPlayingFile(*stream, startPosition, volumeScaling, @@ -2510,7 +2505,7 @@ int Channel::IsPlayingFileLocally() const WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), "Channel::IsPlayingFileLocally()"); - return (WebRtc_Word32)_outputFilePlaying; + return (int32_t)_outputFilePlaying; } int Channel::RegisterFilePlayingToMixer() @@ -2574,7 +2569,7 @@ int Channel::GetLocalPlayoutPosition(int& positionMs) WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), "Channel::GetLocalPlayoutPosition(position=?)"); - WebRtc_UWord32 position; + uint32_t position; CriticalSectionScoped cs(&_fileCritSect); @@ -2642,7 +2637,7 @@ int Channel::StartPlayingFileAsMicrophone(const char* fileName, return -1; } - const WebRtc_UWord32 notificationTime(0); + const uint32_t notificationTime(0); if (_inputFilePlayerPtr->StartPlayingFile( fileName, @@ -2717,7 +2712,7 @@ int Channel::StartPlayingFileAsMicrophone(InStream* stream, return -1; } - const WebRtc_UWord32 notificationTime(0); + const uint32_t notificationTime(0); if (_inputFilePlayerPtr->StartPlayingFile(*stream, startPosition, volumeScaling, notificationTime, @@ -2816,7 +2811,7 @@ int Channel::StartRecordingPlayout(const char* fileName, } FileFormats format; - const WebRtc_UWord32 notificationTime(0); // Not supported in VoE + const uint32_t notificationTime(0); // Not supported in VoE CodecInst dummyCodec={100,"L16",16000,320,1,320000}; if ((codecInst != NULL) && @@ -2894,7 +2889,7 @@ int Channel::StartRecordingPlayout(OutStream* stream, } FileFormats format; - const WebRtc_UWord32 notificationTime(0); // Not supported in VoE + const uint32_t notificationTime(0); // Not supported in VoE CodecInst dummyCodec={100,"L16",16000,320,1,320000}; if (codecInst != NULL && codecInst->channels != 1) @@ -2995,10 +2990,10 @@ Channel::SetMixWithMicStatus(bool mix) } int -Channel::GetSpeechOutputLevel(WebRtc_UWord32& level) const +Channel::GetSpeechOutputLevel(uint32_t& level) const { - WebRtc_Word8 currentLevel = _outputAudioLevel.Level(); - level = static_cast (currentLevel); + int8_t currentLevel = _outputAudioLevel.Level(); + level = static_cast (currentLevel); WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,_channelId), "GetSpeechOutputLevel() => level=%u", level); @@ -3006,10 +3001,10 @@ Channel::GetSpeechOutputLevel(WebRtc_UWord32& level) const } int -Channel::GetSpeechOutputLevelFullRange(WebRtc_UWord32& level) const +Channel::GetSpeechOutputLevelFullRange(uint32_t& level) const { - WebRtc_Word16 currentLevel = _outputAudioLevel.LevelFullRange(); - level = static_cast (currentLevel); + int16_t currentLevel = _outputAudioLevel.LevelFullRange(); + level = static_cast (currentLevel); WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,_channelId), "GetSpeechOutputLevelFullRange() => level=%u", level); @@ -3664,12 +3659,12 @@ Channel::GetRemoteCSRCs(unsigned int arrCSRC[15]) "GetRemoteCSRCs() invalid array argument"); return -1; } - WebRtc_UWord32 arrOfCSRC[kRtpCsrcSize]; - WebRtc_Word32 CSRCs(0); + uint32_t arrOfCSRC[kRtpCsrcSize]; + int32_t CSRCs(0); CSRCs = _rtpRtcpModule->CSRCs(arrOfCSRC); if (CSRCs > 0) { - memcpy(arrCSRC, arrOfCSRC, CSRCs * sizeof(WebRtc_UWord32)); + memcpy(arrCSRC, arrOfCSRC, CSRCs * sizeof(uint32_t)); for (int i = 0; i < (int) CSRCs; i++) { WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, @@ -3789,7 +3784,7 @@ Channel::GetRemoteRTCP_CNAME(char cName[256]) return -1; } char cname[RTCP_CNAME_SIZE]; - const WebRtc_UWord32 remoteSSRC = _rtpRtcpModule->RemoteSSRC(); + const uint32_t remoteSSRC = _rtpRtcpModule->RemoteSSRC(); if (_rtpRtcpModule->RemoteCNAME(remoteSSRC, cname) != 0) { _engineStatisticsPtr->SetLastError( @@ -3864,7 +3859,7 @@ Channel::GetRemoteRTCPData( return -1; } - WebRtc_UWord32 remoteSSRC = _rtpRtcpModule->RemoteSSRC(); + uint32_t remoteSSRC = _rtpRtcpModule->RemoteSSRC(); std::vector::const_iterator it = remote_stats.begin(); for (; it != remote_stats.end(); ++it) { if (it->remoteSSRC == remoteSSRC) @@ -3956,11 +3951,11 @@ Channel::GetRTPStatistics( unsigned int& maxJitterMs, unsigned int& discardedPackets) { - WebRtc_UWord8 fraction_lost(0); - WebRtc_UWord32 cum_lost(0); - WebRtc_UWord32 ext_max(0); - WebRtc_UWord32 jitter(0); - WebRtc_UWord32 max_jitter(0); + uint8_t fraction_lost(0); + uint32_t cum_lost(0); + uint32_t ext_max(0); + uint32_t jitter(0); + uint32_t max_jitter(0); // The jitter statistics is updated for each received RTP packet and is // based on received packets. @@ -3976,7 +3971,7 @@ Channel::GetRTPStatistics( "RTP/RTCP module"); } - const WebRtc_Word32 playoutFrequency = + const int32_t playoutFrequency = _audioCodingModule.PlayoutFrequency(); if (playoutFrequency > 0) { @@ -4058,11 +4053,11 @@ int Channel::GetRemoteRTCPReportBlocks( int Channel::GetRTPStatistics(CallStatistics& stats) { - WebRtc_UWord8 fraction_lost(0); - WebRtc_UWord32 cum_lost(0); - WebRtc_UWord32 ext_max(0); - WebRtc_UWord32 jitter(0); - WebRtc_UWord32 max_jitter(0); + uint8_t fraction_lost(0); + uint32_t cum_lost(0); + uint32_t ext_max(0); + uint32_t jitter(0); + uint32_t max_jitter(0); // --- Part one of the final structure (four values) @@ -4094,7 +4089,7 @@ Channel::GetRTPStatistics(CallStatistics& stats) // --- Part two of the final structure (one value) - WebRtc_UWord16 RTT(0); + uint16_t RTT(0); RTCPMethod method = _rtpRtcpModule->RTCP(); if (method == kRtcpOff) { @@ -4105,12 +4100,12 @@ Channel::GetRTPStatistics(CallStatistics& stats) } else { // The remote SSRC will be zero if no RTP packet has been received. - WebRtc_UWord32 remoteSSRC = _rtpRtcpModule->RemoteSSRC(); + uint32_t remoteSSRC = _rtpRtcpModule->RemoteSSRC(); if (remoteSSRC > 0) { - WebRtc_UWord16 avgRTT(0); - WebRtc_UWord16 maxRTT(0); - WebRtc_UWord16 minRTT(0); + uint16_t avgRTT(0); + uint16_t maxRTT(0); + uint16_t minRTT(0); if (_rtpRtcpModule->RTT(remoteSSRC, &RTT, &avgRTT, &minRTT, &maxRTT) != 0) @@ -4137,10 +4132,10 @@ Channel::GetRTPStatistics(CallStatistics& stats) // --- Part three of the final structure (four values) - WebRtc_UWord32 bytesSent(0); - WebRtc_UWord32 packetsSent(0); - WebRtc_UWord32 bytesReceived(0); - WebRtc_UWord32 packetsReceived(0); + uint32_t bytesSent(0); + uint32_t packetsSent(0); + uint32_t bytesReceived(0); + uint32_t packetsReceived(0); if (_rtpRtcpModule->DataCountersRTP(&bytesSent, &packetsSent, @@ -4203,7 +4198,7 @@ Channel::GetFECStatus(bool& enabled, int& redPayloadtype) enabled = _audioCodingModule.FECStatus(); if (enabled) { - WebRtc_Word8 payloadType(0); + int8_t payloadType(0); if (_rtpRtcpModule->SendREDPayloadType(payloadType) != 0) { _engineStatisticsPtr->SetLastError( @@ -4360,7 +4355,7 @@ Channel::InsertExtraRTPPacket(unsigned char payloadType, // received from the capture device as // undefined for voice for now. -1, - (const WebRtc_UWord8*) payloadData, + (const uint8_t*) payloadData, payloadSize) != 0) { _engineStatisticsPtr->SetLastError( @@ -4372,7 +4367,7 @@ Channel::InsertExtraRTPPacket(unsigned char payloadType, return 0; } -WebRtc_UWord32 +uint32_t Channel::Demultiplex(const AudioFrame& audioFrame) { WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId), @@ -4382,7 +4377,7 @@ Channel::Demultiplex(const AudioFrame& audioFrame) return 0; } -WebRtc_UWord32 +uint32_t Channel::PrepareEncodeAndSend(int mixingFrequency) { WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId), @@ -4414,7 +4409,7 @@ Channel::PrepareEncodeAndSend(int mixingFrequency) _inputExternalMediaCallbackPtr->Process( _channelId, kRecordingPerChannel, - (WebRtc_Word16*)_audioFrame.data_, + (int16_t*)_audioFrame.data_, _audioFrame.samples_per_channel_, _audioFrame.sample_rate_hz_, isStereo); @@ -4460,7 +4455,7 @@ Channel::PrepareEncodeAndSend(int mixingFrequency) return 0; } -WebRtc_UWord32 +uint32_t Channel::EncodeAndSend() { WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId), @@ -4594,7 +4589,7 @@ Channel::ResetRTCPStatistics() { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), "Channel::ResetRTCPStatistics()"); - WebRtc_UWord32 remoteSSRC(0); + uint32_t remoteSSRC(0); remoteSSRC = _rtpRtcpModule->RemoteSSRC(); return _rtpRtcpModule->ResetRTT(remoteSSRC); } @@ -4618,11 +4613,11 @@ Channel::GetRoundTripTimeSummary(StatVal& delaysMs) const return 0; } - WebRtc_UWord32 remoteSSRC; - WebRtc_UWord16 RTT; - WebRtc_UWord16 avgRTT; - WebRtc_UWord16 maxRTT; - WebRtc_UWord16 minRTT; + uint32_t remoteSSRC; + uint16_t RTT; + uint16_t avgRTT; + uint16_t maxRTT; + uint16_t minRTT; // The remote SSRC will be zero if no RTP packet has been received. remoteSSRC = _rtpRtcpModule->RemoteSSRC(); if (remoteSSRC == 0) @@ -4724,7 +4719,7 @@ Channel::GetPlayoutTimestamp(unsigned int& timestamp) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), "Channel::GetPlayoutTimestamp()"); - WebRtc_UWord32 playoutTimestamp(0); + uint32_t playoutTimestamp(0); if (GetPlayoutTimeStamp(playoutTimestamp) != 0) { _engineStatisticsPtr->SetLastError( @@ -4793,10 +4788,10 @@ Channel::GetRtpRtcp(RtpRtcp* &rtpRtcpModule) const // TODO(andrew): refactor Mix functions here and in transmit_mixer.cc to use // a shared helper. -WebRtc_Word32 +int32_t Channel::MixOrReplaceAudioWithFile(const int mixingFrequency) { - scoped_array fileBuffer(new WebRtc_Word16[640]); + scoped_array fileBuffer(new int16_t[640]); int fileSamples(0); { @@ -4860,13 +4855,13 @@ Channel::MixOrReplaceAudioWithFile(const int mixingFrequency) return 0; } -WebRtc_Word32 +int32_t Channel::MixAudioWithFile(AudioFrame& audioFrame, const int mixingFrequency) { assert(mixingFrequency <= 32000); - scoped_array fileBuffer(new WebRtc_Word16[640]); + scoped_array fileBuffer(new int16_t[640]); int fileSamples(0); { @@ -4923,9 +4918,9 @@ Channel::InsertInbandDtmfTone() _inbandDtmfGenerator.DelaySinceLastTone() > kMinTelephoneEventSeparationMs) { - WebRtc_Word8 eventCode(0); - WebRtc_UWord16 lengthMs(0); - WebRtc_UWord8 attenuationDb(0); + int8_t eventCode(0); + uint16_t lengthMs(0); + uint8_t attenuationDb(0); eventCode = _inbandDtmfQueue.NextDtmf(&lengthMs, &attenuationDb); _inbandDtmfGenerator.AddTone(eventCode, lengthMs, attenuationDb); @@ -4940,7 +4935,7 @@ Channel::InsertInbandDtmfTone() if (_inbandDtmfGenerator.IsAddingTone()) { - WebRtc_UWord16 frequency(0); + uint16_t frequency(0); _inbandDtmfGenerator.GetSampleRate(frequency); if (frequency != _audioFrame.sample_rate_hz_) @@ -4948,14 +4943,14 @@ Channel::InsertInbandDtmfTone() // Update sample rate of Dtmf tone since the mixing frequency // has changed. _inbandDtmfGenerator.SetSampleRate( - (WebRtc_UWord16) (_audioFrame.sample_rate_hz_)); + (uint16_t) (_audioFrame.sample_rate_hz_)); // Reset the tone to be added taking the new sample rate into // account. _inbandDtmfGenerator.ResetTone(); } - WebRtc_Word16 toneBuffer[320]; - WebRtc_UWord16 toneSamples(0); + int16_t toneBuffer[320]; + uint16_t toneSamples(0); // Get 10ms tone segment and set time since last tone to zero if (_inbandDtmfGenerator.Get10msTone(toneBuffer, toneSamples) == -1) { @@ -4988,10 +4983,10 @@ Channel::InsertInbandDtmfTone() return 0; } -WebRtc_Word32 -Channel::GetPlayoutTimeStamp(WebRtc_UWord32& playoutTimestamp) +int32_t +Channel::GetPlayoutTimeStamp(uint32_t& playoutTimestamp) { - WebRtc_UWord32 timestamp(0); + uint32_t timestamp(0); CodecInst currRecCodec; if (_audioCodingModule.PlayoutTimestamp(×tamp) == -1) @@ -5002,7 +4997,7 @@ Channel::GetPlayoutTimeStamp(WebRtc_UWord32& playoutTimestamp) return -1; } - WebRtc_UWord16 delayMS(0); + uint16_t delayMS(0); if (_audioDeviceModulePtr->PlayoutDelay(&delayMS) == -1) { WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId), @@ -5011,7 +5006,7 @@ Channel::GetPlayoutTimeStamp(WebRtc_UWord32& playoutTimestamp) return -1; } - WebRtc_Word32 playoutFrequency = _audioCodingModule.PlayoutFrequency(); + int32_t playoutFrequency = _audioCodingModule.PlayoutFrequency(); if (_audioCodingModule.ReceiveCodec(&currRecCodec) == 0) { if (STR_CASE_CMP("G722", currRecCodec.plname) == 0) { playoutFrequency = 8000; @@ -5049,7 +5044,7 @@ int Channel::GetDeadOrAliveCounters(int& countDead, int& countAlive) const { bool enabled; - WebRtc_UWord8 timeSec; + uint8_t timeSec; _rtpRtcpModule->PeriodicDeadOrAliveStatus(enabled, timeSec); if (!enabled) @@ -5060,7 +5055,7 @@ Channel::GetDeadOrAliveCounters(int& countDead, int& countAlive) const return 0; } -WebRtc_Word32 +int32_t Channel::SendPacketRaw(const void *data, int len, bool RTCP) { if (_transportPtr == NULL) @@ -5077,15 +5072,15 @@ Channel::SendPacketRaw(const void *data, int len, bool RTCP) } } -WebRtc_Word32 -Channel::UpdatePacketDelay(const WebRtc_UWord32 timestamp, - const WebRtc_UWord16 sequenceNumber) +int32_t +Channel::UpdatePacketDelay(const uint32_t timestamp, + const uint16_t sequenceNumber) { WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId), "Channel::UpdatePacketDelay(timestamp=%lu, sequenceNumber=%u)", timestamp, sequenceNumber); - WebRtc_Word32 rtpReceiveFrequency(0); + int32_t rtpReceiveFrequency(0); // Get frequency of last received payload rtpReceiveFrequency = _audioCodingModule.ReceiveFrequency(); @@ -5107,23 +5102,23 @@ Channel::UpdatePacketDelay(const WebRtc_UWord32 timestamp, } } - const WebRtc_UWord32 timeStampDiff = timestamp - _playoutTimeStampRTP; - WebRtc_UWord32 timeStampDiffMs(0); + const uint32_t timeStampDiff = timestamp - _playoutTimeStampRTP; + uint32_t timeStampDiffMs(0); if (timeStampDiff > 0) { switch (rtpReceiveFrequency) { case 8000: - timeStampDiffMs = static_cast(timeStampDiff >> 3); + timeStampDiffMs = static_cast(timeStampDiff >> 3); break; case 16000: - timeStampDiffMs = static_cast(timeStampDiff >> 4); + timeStampDiffMs = static_cast(timeStampDiff >> 4); break; case 32000: - timeStampDiffMs = static_cast(timeStampDiff >> 5); + timeStampDiffMs = static_cast(timeStampDiff >> 5); break; case 48000: - timeStampDiffMs = static_cast(timeStampDiff / 48); + timeStampDiffMs = static_cast(timeStampDiff / 48); break; default: WEBRTC_TRACE(kTraceWarning, kTraceVoice, @@ -5152,22 +5147,22 @@ Channel::UpdatePacketDelay(const WebRtc_UWord32 timestamp, if (sequenceNumber - _previousSequenceNumber == 1) { - WebRtc_UWord16 packetDelayMs = 0; + uint16_t packetDelayMs = 0; switch (rtpReceiveFrequency) { case 8000: - packetDelayMs = static_cast( + packetDelayMs = static_cast( (timestamp - _previousTimestamp) >> 3); break; case 16000: - packetDelayMs = static_cast( + packetDelayMs = static_cast( (timestamp - _previousTimestamp) >> 4); break; case 32000: - packetDelayMs = static_cast( + packetDelayMs = static_cast( (timestamp - _previousTimestamp) >> 5); break; case 48000: - packetDelayMs = static_cast( + packetDelayMs = static_cast( (timestamp - _previousTimestamp) / 48); break; } @@ -5191,7 +5186,7 @@ Channel::RegisterReceiveCodecsToRTPModule() CodecInst codec; - const WebRtc_UWord8 nSupportedCodecs = AudioCodingModule::NumberOfCodecs(); + const uint8_t nSupportedCodecs = AudioCodingModule::NumberOfCodecs(); for (int idx = 0; idx < nSupportedCodecs; idx++) { diff --git a/webrtc/voice_engine/channel.h b/webrtc/voice_engine/channel.h index 1084b4330..972b5abc8 100644 --- a/webrtc/voice_engine/channel.h +++ b/webrtc/voice_engine/channel.h @@ -71,12 +71,12 @@ public: enum {KNumberOfSocketBuffers = 8}; public: virtual ~Channel(); - static WebRtc_Word32 CreateChannel(Channel*& channel, - const WebRtc_Word32 channelId, - const WebRtc_UWord32 instanceId); - Channel(const WebRtc_Word32 channelId, const WebRtc_UWord32 instanceId); - WebRtc_Word32 Init(); - WebRtc_Word32 SetEngineInformation( + static int32_t CreateChannel(Channel*& channel, + const int32_t channelId, + const uint32_t instanceId); + Channel(const int32_t channelId, const uint32_t instanceId); + int32_t Init(); + int32_t SetEngineInformation( Statistics& engineStatistics, OutputMixer& outputMixer, TransmitMixer& transmitMixer, @@ -84,44 +84,42 @@ public: AudioDeviceModule& audioDeviceModule, VoiceEngineObserver* voiceEngineObserver, CriticalSectionWrapper* callbackCritSect); - WebRtc_Word32 UpdateLocalTimeStamp(); + int32_t UpdateLocalTimeStamp(); public: // API methods // VoEBase - WebRtc_Word32 StartPlayout(); - WebRtc_Word32 StopPlayout(); - WebRtc_Word32 StartSend(); - WebRtc_Word32 StopSend(); - WebRtc_Word32 StartReceiving(); - WebRtc_Word32 StopReceiving(); + int32_t StartPlayout(); + int32_t StopPlayout(); + int32_t StartSend(); + int32_t StopSend(); + int32_t StartReceiving(); + int32_t StopReceiving(); - WebRtc_Word32 SetNetEQPlayoutMode(NetEqModes mode); - WebRtc_Word32 GetNetEQPlayoutMode(NetEqModes& mode); - WebRtc_Word32 SetOnHoldStatus(bool enable, OnHoldModes mode); - WebRtc_Word32 GetOnHoldStatus(bool& enabled, OnHoldModes& mode); - WebRtc_Word32 RegisterVoiceEngineObserver(VoiceEngineObserver& observer); - WebRtc_Word32 DeRegisterVoiceEngineObserver(); + int32_t SetNetEQPlayoutMode(NetEqModes mode); + int32_t GetNetEQPlayoutMode(NetEqModes& mode); + int32_t SetOnHoldStatus(bool enable, OnHoldModes mode); + int32_t GetOnHoldStatus(bool& enabled, OnHoldModes& mode); + int32_t RegisterVoiceEngineObserver(VoiceEngineObserver& observer); + int32_t DeRegisterVoiceEngineObserver(); // VoECodec - WebRtc_Word32 GetSendCodec(CodecInst& codec); - WebRtc_Word32 GetRecCodec(CodecInst& codec); - WebRtc_Word32 SetSendCodec(const CodecInst& codec); - WebRtc_Word32 SetVADStatus(bool enableVAD, ACMVADMode mode, - bool disableDTX); - WebRtc_Word32 GetVADStatus(bool& enabledVAD, ACMVADMode& mode, - bool& disabledDTX); - WebRtc_Word32 SetRecPayloadType(const CodecInst& codec); - WebRtc_Word32 GetRecPayloadType(CodecInst& codec); - WebRtc_Word32 SetAMREncFormat(AmrMode mode); - WebRtc_Word32 SetAMRDecFormat(AmrMode mode); - WebRtc_Word32 SetAMRWbEncFormat(AmrMode mode); - WebRtc_Word32 SetAMRWbDecFormat(AmrMode mode); - WebRtc_Word32 SetSendCNPayloadType(int type, PayloadFrequencies frequency); - WebRtc_Word32 SetISACInitTargetRate(int rateBps, bool useFixedFrameSize); - WebRtc_Word32 SetISACMaxRate(int rateBps); - WebRtc_Word32 SetISACMaxPayloadSize(int sizeBytes); + int32_t GetSendCodec(CodecInst& codec); + int32_t GetRecCodec(CodecInst& codec); + int32_t SetSendCodec(const CodecInst& codec); + int32_t SetVADStatus(bool enableVAD, ACMVADMode mode, bool disableDTX); + int32_t GetVADStatus(bool& enabledVAD, ACMVADMode& mode, bool& disabledDTX); + int32_t SetRecPayloadType(const CodecInst& codec); + int32_t GetRecPayloadType(CodecInst& codec); + int32_t SetAMREncFormat(AmrMode mode); + int32_t SetAMRDecFormat(AmrMode mode); + int32_t SetAMRWbEncFormat(AmrMode mode); + int32_t SetAMRWbDecFormat(AmrMode mode); + int32_t SetSendCNPayloadType(int type, PayloadFrequencies frequency); + int32_t SetISACInitTargetRate(int rateBps, bool useFixedFrameSize); + int32_t SetISACMaxRate(int rateBps); + int32_t SetISACMaxPayloadSize(int sizeBytes); // VoE dual-streaming. int SetSecondarySendCodec(const CodecInst& codec, int red_payload_type); @@ -129,21 +127,16 @@ public: int GetSecondarySendCodec(CodecInst* codec); // VoENetwork - WebRtc_Word32 RegisterExternalTransport(Transport& transport); - WebRtc_Word32 DeRegisterExternalTransport(); - WebRtc_Word32 ReceivedRTPPacket(const WebRtc_Word8* data, - WebRtc_Word32 length); - WebRtc_Word32 ReceivedRTCPPacket(const WebRtc_Word8* data, - WebRtc_Word32 length); - WebRtc_Word32 SetPacketTimeoutNotification(bool enable, int timeoutSeconds); - WebRtc_Word32 GetPacketTimeoutNotification(bool& enabled, - int& timeoutSeconds); - WebRtc_Word32 RegisterDeadOrAliveObserver(VoEConnectionObserver& observer); - WebRtc_Word32 DeRegisterDeadOrAliveObserver(); - WebRtc_Word32 SetPeriodicDeadOrAliveStatus(bool enable, - int sampleTimeSeconds); - WebRtc_Word32 GetPeriodicDeadOrAliveStatus(bool& enabled, - int& sampleTimeSeconds); + int32_t RegisterExternalTransport(Transport& transport); + int32_t DeRegisterExternalTransport(); + int32_t ReceivedRTPPacket(const int8_t* data, int32_t length); + int32_t ReceivedRTCPPacket(const int8_t* data, int32_t length); + int32_t SetPacketTimeoutNotification(bool enable, int timeoutSeconds); + int32_t GetPacketTimeoutNotification(bool& enabled, int& timeoutSeconds); + int32_t RegisterDeadOrAliveObserver(VoEConnectionObserver& observer); + int32_t DeRegisterDeadOrAliveObserver(); + int32_t SetPeriodicDeadOrAliveStatus(bool enable, int sampleTimeSeconds); + int32_t GetPeriodicDeadOrAliveStatus(bool& enabled, int& sampleTimeSeconds); // VoEFile int StartPlayingFileLocally(const char* fileName, const bool loop, @@ -190,8 +183,8 @@ public: int SetExternalMixing(bool enabled); // VoEVolumeControl - int GetSpeechOutputLevel(WebRtc_UWord32& level) const; - int GetSpeechOutputLevelFullRange(WebRtc_UWord32& level) const; + int GetSpeechOutputLevel(uint32_t& level) const; + int GetSpeechOutputLevelFullRange(uint32_t& level) const; int SetMute(const bool enable); bool Mute() const; int SetOutputVolumePan(float left, float right); @@ -290,66 +283,65 @@ public: public: // From AudioPacketizationCallback in the ACM - WebRtc_Word32 SendData(FrameType frameType, - WebRtc_UWord8 payloadType, - WebRtc_UWord32 timeStamp, - const WebRtc_UWord8* payloadData, - WebRtc_UWord16 payloadSize, - const RTPFragmentationHeader* fragmentation); + int32_t SendData(FrameType frameType, + uint8_t payloadType, + uint32_t timeStamp, + const uint8_t* payloadData, + uint16_t payloadSize, + const RTPFragmentationHeader* fragmentation); // From ACMVADCallback in the ACM - WebRtc_Word32 InFrameType(WebRtc_Word16 frameType); + int32_t InFrameType(int16_t frameType); public: - WebRtc_Word32 OnRxVadDetected(const int vadDecision); + int32_t OnRxVadDetected(const int vadDecision); public: // From RtpData in the RTP/RTCP module - WebRtc_Word32 OnReceivedPayloadData(const WebRtc_UWord8* payloadData, - const WebRtc_UWord16 payloadSize, - const WebRtcRTPHeader* rtpHeader); + int32_t OnReceivedPayloadData(const uint8_t* payloadData, + const uint16_t payloadSize, + const WebRtcRTPHeader* rtpHeader); public: // From RtpFeedback in the RTP/RTCP module - WebRtc_Word32 OnInitializeDecoder( - const WebRtc_Word32 id, - const WebRtc_Word8 payloadType, + int32_t OnInitializeDecoder( + const int32_t id, + const int8_t payloadType, const char payloadName[RTP_PAYLOAD_NAME_SIZE], const int frequency, - const WebRtc_UWord8 channels, - const WebRtc_UWord32 rate); + const uint8_t channels, + const uint32_t rate); - void OnPacketTimeout(const WebRtc_Word32 id); + void OnPacketTimeout(const int32_t id); - void OnReceivedPacket(const WebRtc_Word32 id, - const RtpRtcpPacketType packetType); + void OnReceivedPacket(const int32_t id, const RtpRtcpPacketType packetType); - void OnPeriodicDeadOrAlive(const WebRtc_Word32 id, + void OnPeriodicDeadOrAlive(const int32_t id, const RTPAliveType alive); - void OnIncomingSSRCChanged(const WebRtc_Word32 id, - const WebRtc_UWord32 SSRC); + void OnIncomingSSRCChanged(const int32_t id, + const uint32_t SSRC); - void OnIncomingCSRCChanged(const WebRtc_Word32 id, - const WebRtc_UWord32 CSRC, const bool added); + void OnIncomingCSRCChanged(const int32_t id, + const uint32_t CSRC, const bool added); public: // From RtcpFeedback in the RTP/RTCP module - void OnApplicationDataReceived(const WebRtc_Word32 id, - const WebRtc_UWord8 subType, - const WebRtc_UWord32 name, - const WebRtc_UWord16 length, - const WebRtc_UWord8* data); + void OnApplicationDataReceived(const int32_t id, + const uint8_t subType, + const uint32_t name, + const uint16_t length, + const uint8_t* data); public: // From RtpAudioFeedback in the RTP/RTCP module - void OnReceivedTelephoneEvent(const WebRtc_Word32 id, - const WebRtc_UWord8 event, + void OnReceivedTelephoneEvent(const int32_t id, + const uint8_t event, const bool endOfEvent); - void OnPlayTelephoneEvent(const WebRtc_Word32 id, - const WebRtc_UWord8 event, - const WebRtc_UWord16 lengthMs, - const WebRtc_UWord8 volume); + void OnPlayTelephoneEvent(const int32_t id, + const uint8_t event, + const uint16_t lengthMs, + const uint8_t volume); public: // From Transport (called by the RTP/RTCP module) @@ -358,9 +350,8 @@ public: public: // From MixerParticipant - WebRtc_Word32 GetAudioFrame(const WebRtc_Word32 id, - AudioFrame& audioFrame); - WebRtc_Word32 NeededFrequency(const WebRtc_Word32 id); + int32_t GetAudioFrame(const int32_t id, AudioFrame& audioFrame); + int32_t NeededFrequency(const int32_t id); public: // From MonitorObserver @@ -368,19 +359,19 @@ public: public: // From FileCallback - void PlayNotification(const WebRtc_Word32 id, - const WebRtc_UWord32 durationMs); - void RecordNotification(const WebRtc_Word32 id, - const WebRtc_UWord32 durationMs); - void PlayFileEnded(const WebRtc_Word32 id); - void RecordFileEnded(const WebRtc_Word32 id); + void PlayNotification(const int32_t id, + const uint32_t durationMs); + void RecordNotification(const int32_t id, + const uint32_t durationMs); + void PlayFileEnded(const int32_t id); + void RecordFileEnded(const int32_t id); public: - WebRtc_UWord32 InstanceId() const + uint32_t InstanceId() const { return _instanceId; } - WebRtc_Word32 ChannelId() const + int32_t ChannelId() const { return _channelId; } @@ -420,25 +411,24 @@ public: { return _rtpRtcpModule.get(); } - WebRtc_Word8 OutputEnergyLevel() const + int8_t OutputEnergyLevel() const { return _outputAudioLevel.Level(); } - WebRtc_UWord32 Demultiplex(const AudioFrame& audioFrame); - WebRtc_UWord32 PrepareEncodeAndSend(int mixingFrequency); - WebRtc_UWord32 EncodeAndSend(); + uint32_t Demultiplex(const AudioFrame& audioFrame); + uint32_t PrepareEncodeAndSend(int mixingFrequency); + uint32_t EncodeAndSend(); private: int InsertInbandDtmfTone(); - WebRtc_Word32 + int32_t MixOrReplaceAudioWithFile(const int mixingFrequency); - WebRtc_Word32 MixAudioWithFile(AudioFrame& audioFrame, - const int mixingFrequency); - WebRtc_Word32 GetPlayoutTimeStamp(WebRtc_UWord32& playoutTimestamp); + int32_t MixAudioWithFile(AudioFrame& audioFrame, const int mixingFrequency); + int32_t GetPlayoutTimeStamp(uint32_t& playoutTimestamp); void UpdateDeadOrAliveCounters(bool alive); - WebRtc_Word32 SendPacketRaw(const void *data, int len, bool RTCP); - WebRtc_Word32 UpdatePacketDelay(const WebRtc_UWord32 timestamp, - const WebRtc_UWord16 sequenceNumber); + int32_t SendPacketRaw(const void *data, int len, bool RTCP); + int32_t UpdatePacketDelay(const uint32_t timestamp, + const uint16_t sequenceNumber); void RegisterReceiveCodecsToRTPModule(); int ApmProcessRx(AudioFrame& audioFrame); @@ -446,8 +436,8 @@ private: private: CriticalSectionWrapper& _fileCritSect; CriticalSectionWrapper& _callbackCritSect; - WebRtc_UWord32 _instanceId; - WebRtc_Word32 _channelId; + uint32_t _instanceId; + int32_t _channelId; private: scoped_ptr _rtpRtcpModule; @@ -458,7 +448,7 @@ private: AudioLevel _outputAudioLevel; bool _externalTransport; AudioFrame _audioFrame; - WebRtc_UWord8 _audioLevel_dBov; + uint8_t _audioLevel_dBov; FilePlayer* _inputFilePlayerPtr; FilePlayer* _outputFilePlayerPtr; FileRecorder* _outputFileRecorderPtr; @@ -474,15 +464,15 @@ private: bool _outputExternalMedia; VoEMediaProcess* _inputExternalMediaCallbackPtr; VoEMediaProcess* _outputExternalMediaCallbackPtr; - WebRtc_UWord8* _encryptionRTPBufferPtr; - WebRtc_UWord8* _decryptionRTPBufferPtr; - WebRtc_UWord8* _encryptionRTCPBufferPtr; - WebRtc_UWord8* _decryptionRTCPBufferPtr; - WebRtc_UWord32 _timeStamp; - WebRtc_UWord8 _sendTelephoneEventPayloadType; - WebRtc_UWord32 _playoutTimeStampRTP; - WebRtc_UWord32 _playoutTimeStampRTCP; - WebRtc_UWord32 _numberOfDiscardedPackets; + uint8_t* _encryptionRTPBufferPtr; + uint8_t* _decryptionRTPBufferPtr; + uint8_t* _encryptionRTCPBufferPtr; + uint8_t* _decryptionRTCPBufferPtr; + uint32_t _timeStamp; + uint8_t _sendTelephoneEventPayloadType; + uint32_t _playoutTimeStampRTP; + uint32_t _playoutTimeStampRTCP; + uint32_t _numberOfDiscardedPackets; private: // uses Statistics* _engineStatisticsPtr; @@ -497,8 +487,8 @@ private: scoped_ptr _rtpAudioProc; AudioProcessing* _rxAudioProcessingModulePtr; // far end AudioProcessing VoERxVadCallback* _rxVadObserverPtr; - WebRtc_Word32 _oldVadDecision; - WebRtc_Word32 _sendFrameType; // Send data is voice, 1-voice, 0-otherwise + int32_t _oldVadDecision; + int32_t _sendFrameType; // Send data is voice, 1-voice, 0-otherwise VoERTPObserver* _rtpObserverPtr; VoERTCPObserver* _rtcpObserverPtr; private: @@ -525,27 +515,27 @@ private: bool _playOutbandDtmfEvent; bool _playInbandDtmfEvent; // VoeRTP_RTCP - WebRtc_UWord8 _extraPayloadType; + uint8_t _extraPayloadType; bool _insertExtraRTPPacket; bool _extraMarkerBit; - WebRtc_UWord32 _lastLocalTimeStamp; + uint32_t _lastLocalTimeStamp; uint32_t _lastRemoteTimeStamp; - WebRtc_Word8 _lastPayloadType; + int8_t _lastPayloadType; bool _includeAudioLevelIndication; // VoENetwork bool _rtpPacketTimedOut; bool _rtpPacketTimeOutIsEnabled; - WebRtc_UWord32 _rtpTimeOutSeconds; + uint32_t _rtpTimeOutSeconds; bool _connectionObserver; VoEConnectionObserver* _connectionObserverPtr; - WebRtc_UWord32 _countAliveDetections; - WebRtc_UWord32 _countDeadDetections; + uint32_t _countAliveDetections; + uint32_t _countDeadDetections; AudioFrame::SpeechType _outputSpeechType; // VoEVideoSync - WebRtc_UWord32 _averageDelayMs; - WebRtc_UWord16 _previousSequenceNumber; - WebRtc_UWord32 _previousTimestamp; - WebRtc_UWord16 _recPacketDelayMs; + uint32_t _averageDelayMs; + uint16_t _previousSequenceNumber; + uint32_t _previousTimestamp; + uint16_t _recPacketDelayMs; // VoEAudioProcessing bool _RxVadDetection; bool _rxApmIsEnabled; diff --git a/webrtc/voice_engine/channel_manager.cc b/webrtc/voice_engine/channel_manager.cc index 47cec4adf..72426e45f 100644 --- a/webrtc/voice_engine/channel_manager.cc +++ b/webrtc/voice_engine/channel_manager.cc @@ -17,7 +17,7 @@ namespace webrtc namespace voe { -ChannelManager::ChannelManager(const WebRtc_UWord32 instanceId) : +ChannelManager::ChannelManager(const uint32_t instanceId) : ChannelManagerBase(), _instanceId(instanceId) { @@ -28,12 +28,12 @@ ChannelManager::~ChannelManager() ChannelManagerBase::DestroyAllItems(); } -bool ChannelManager::CreateChannel(WebRtc_Word32& channelId) +bool ChannelManager::CreateChannel(int32_t& channelId) { return ChannelManagerBase::CreateItem(channelId); } -WebRtc_Word32 ChannelManager::DestroyChannel(const WebRtc_Word32 channelId) +int32_t ChannelManager::DestroyChannel(const int32_t channelId) { Channel* deleteChannel = static_cast (ChannelManagerBase::RemoveItem(channelId)); @@ -45,17 +45,17 @@ WebRtc_Word32 ChannelManager::DestroyChannel(const WebRtc_Word32 channelId) return 0; } -WebRtc_Word32 ChannelManager::NumOfChannels() const +int32_t ChannelManager::NumOfChannels() const { return ChannelManagerBase::NumOfItems(); } -WebRtc_Word32 ChannelManager::MaxNumOfChannels() const +int32_t ChannelManager::MaxNumOfChannels() const { return ChannelManagerBase::MaxNumOfItems(); } -void* ChannelManager::NewItem(WebRtc_Word32 itemID) +void* ChannelManager::NewItem(int32_t itemID) { Channel* channel; if (Channel::CreateChannel(channel, itemID, _instanceId) == -1) @@ -71,7 +71,7 @@ void ChannelManager::DeleteItem(void* item) delete deleteItem; } -Channel* ChannelManager::GetChannel(const WebRtc_Word32 channelId) const +Channel* ChannelManager::GetChannel(const int32_t channelId) const { return static_cast (ChannelManagerBase::GetItem(channelId)); } @@ -81,8 +81,8 @@ void ChannelManager::ReleaseChannel() ChannelManagerBase::ReleaseItem(); } -void ChannelManager::GetChannelIds(WebRtc_Word32* channelsArray, - WebRtc_Word32& numOfChannels) const +void ChannelManager::GetChannelIds(int32_t* channelsArray, + int32_t& numOfChannels) const { ChannelManagerBase::GetItemIds(channelsArray, numOfChannels); } @@ -104,7 +104,7 @@ ScopedChannel::ScopedChannel(ChannelManager& chManager) : } ScopedChannel::ScopedChannel(ChannelManager& chManager, - WebRtc_Word32 channelId) : + int32_t channelId) : _chManager(chManager), _channelPtr(NULL) { diff --git a/webrtc/voice_engine/channel_manager.h b/webrtc/voice_engine/channel_manager.h index 6c40ef117..defade1c7 100644 --- a/webrtc/voice_engine/channel_manager.h +++ b/webrtc/voice_engine/channel_manager.h @@ -28,18 +28,18 @@ class ChannelManager: private ChannelManagerBase friend class ScopedChannel; public: - bool CreateChannel(WebRtc_Word32& channelId); + bool CreateChannel(int32_t& channelId); - WebRtc_Word32 DestroyChannel(const WebRtc_Word32 channelId); + int32_t DestroyChannel(const int32_t channelId); - WebRtc_Word32 MaxNumOfChannels() const; + int32_t MaxNumOfChannels() const; - WebRtc_Word32 NumOfChannels() const; + int32_t NumOfChannels() const; - void GetChannelIds(WebRtc_Word32* channelsArray, - WebRtc_Word32& numOfChannels) const; + void GetChannelIds(int32_t* channelsArray, + int32_t& numOfChannels) const; - ChannelManager(const WebRtc_UWord32 instanceId); + ChannelManager(const uint32_t instanceId); ~ChannelManager(); @@ -48,17 +48,17 @@ private: ChannelManager& operator=(const ChannelManager&); - Channel* GetChannel(const WebRtc_Word32 channelId) const; + Channel* GetChannel(const int32_t channelId) const; void GetChannels(MapWrapper& channels) const; void ReleaseChannel(); - virtual void* NewItem(WebRtc_Word32 itemID); + virtual void* NewItem(int32_t itemID); virtual void DeleteItem(void* item); - WebRtc_UWord32 _instanceId; + uint32_t _instanceId; }; class ScopedChannel @@ -67,7 +67,7 @@ public: // Can only be created by the channel manager ScopedChannel(ChannelManager& chManager); - ScopedChannel(ChannelManager& chManager, WebRtc_Word32 channelId); + ScopedChannel(ChannelManager& chManager, int32_t channelId); Channel* ChannelPtr(); diff --git a/webrtc/voice_engine/channel_manager_base.cc b/webrtc/voice_engine/channel_manager_base.cc index 268766bf9..500101ce1 100644 --- a/webrtc/voice_engine/channel_manager_base.cc +++ b/webrtc/voice_engine/channel_manager_base.cc @@ -44,10 +44,10 @@ ChannelManagerBase::~ChannelManagerBase() } } -bool ChannelManagerBase::GetFreeItemId(WebRtc_Word32& itemId) +bool ChannelManagerBase::GetFreeItemId(int32_t& itemId) { CriticalSectionScoped cs(_itemsCritSectPtr); - WebRtc_Word32 i(0); + int32_t i(0); while (i < kVoiceEngineMaxNumChannels) { if (_freeItemIds[i]) @@ -61,7 +61,7 @@ bool ChannelManagerBase::GetFreeItemId(WebRtc_Word32& itemId) return false; } -void ChannelManagerBase::AddFreeItemId(WebRtc_Word32 itemId) +void ChannelManagerBase::AddFreeItemId(int32_t itemId) { assert(itemId < kVoiceEngineMaxNumChannels); _freeItemIds[itemId] = true; @@ -75,7 +75,7 @@ void ChannelManagerBase::RemoveFreeItemIds() } } -bool ChannelManagerBase::CreateItem(WebRtc_Word32& itemId) +bool ChannelManagerBase::CreateItem(int32_t& itemId) { _itemsCritSectPtr->Enter(); void* itemPtr; @@ -98,7 +98,7 @@ bool ChannelManagerBase::CreateItem(WebRtc_Word32& itemId) return true; } -void ChannelManagerBase::InsertItem(WebRtc_Word32 itemId, void* item) +void ChannelManagerBase::InsertItem(int32_t itemId, void* item) { CriticalSectionScoped cs(_itemsCritSectPtr); assert(!_items.Find(itemId)); @@ -106,7 +106,7 @@ void ChannelManagerBase::InsertItem(WebRtc_Word32 itemId, void* item) } void* -ChannelManagerBase::RemoveItem(WebRtc_Word32 itemId) +ChannelManagerBase::RemoveItem(int32_t itemId) { CriticalSectionScoped cs(_itemsCritSectPtr); WriteLockScoped wlock(*_itemsRWLockPtr); @@ -135,18 +135,18 @@ void ChannelManagerBase::DestroyAllItems() RemoveFreeItemIds(); } -WebRtc_Word32 ChannelManagerBase::NumOfItems() const +int32_t ChannelManagerBase::NumOfItems() const { return _items.Size(); } -WebRtc_Word32 ChannelManagerBase::MaxNumOfItems() const +int32_t ChannelManagerBase::MaxNumOfItems() const { - return static_cast (kVoiceEngineMaxNumChannels); + return static_cast (kVoiceEngineMaxNumChannels); } void* -ChannelManagerBase::GetItem(WebRtc_Word32 itemId) const +ChannelManagerBase::GetItem(int32_t itemId) const { CriticalSectionScoped cs(_itemsCritSectPtr); MapItem* it = _items.Find(itemId); @@ -195,8 +195,8 @@ void ChannelManagerBase::ReleaseItem() _itemsRWLockPtr->ReleaseLockShared(); } -void ChannelManagerBase::GetItemIds(WebRtc_Word32* channelsArray, - WebRtc_Word32& numOfChannels) const +void ChannelManagerBase::GetItemIds(int32_t* channelsArray, + int32_t& numOfChannels) const { MapItem* it = _items.First(); numOfChannels = (numOfChannels <= _items.Size()) ? diff --git a/webrtc/voice_engine/channel_manager_base.h b/webrtc/voice_engine/channel_manager_base.h index b74934d51..5bf0b7509 100644 --- a/webrtc/voice_engine/channel_manager_base.h +++ b/webrtc/voice_engine/channel_manager_base.h @@ -29,13 +29,13 @@ class Channel; class ChannelManagerBase { protected: - bool CreateItem(WebRtc_Word32& itemId); + bool CreateItem(int32_t& itemId); - void InsertItem(WebRtc_Word32 itemId, void* item); + void InsertItem(int32_t itemId, void* item); - void* RemoveItem(WebRtc_Word32 itemId); + void* RemoveItem(int32_t itemId); - void* GetItem(WebRtc_Word32 itemId) const; + void* GetItem(int32_t itemId) const; void* GetFirstItem(void*& iterator) const ; @@ -43,24 +43,24 @@ protected: void ReleaseItem(); - void AddFreeItemId(WebRtc_Word32 itemId); + void AddFreeItemId(int32_t itemId); - bool GetFreeItemId(WebRtc_Word32& itemId); + bool GetFreeItemId(int32_t& itemId); void RemoveFreeItemIds(); void DestroyAllItems(); - WebRtc_Word32 NumOfItems() const; + int32_t NumOfItems() const; - WebRtc_Word32 MaxNumOfItems() const; + int32_t MaxNumOfItems() const; - void GetItemIds(WebRtc_Word32* channelsArray, - WebRtc_Word32& numOfChannels) const; + void GetItemIds(int32_t* channelsArray, + int32_t& numOfChannels) const; void GetChannels(MapWrapper& channels) const; - virtual void* NewItem(WebRtc_Word32 itemId) = 0; + virtual void* NewItem(int32_t itemId) = 0; virtual void DeleteItem(void* item) = 0; diff --git a/webrtc/voice_engine/dtmf_inband.cc b/webrtc/voice_engine/dtmf_inband.cc index 689bc543d..c68970957 100644 --- a/webrtc/voice_engine/dtmf_inband.cc +++ b/webrtc/voice_engine/dtmf_inband.cc @@ -16,43 +16,43 @@ namespace webrtc { -const WebRtc_Word16 Dtmf_a_times2Tab8Khz[8]= +const int16_t Dtmf_a_times2Tab8Khz[8]= { 27978, 26956, 25701, 24219, 19073, 16325, 13085, 9314 }; -const WebRtc_Word16 Dtmf_a_times2Tab16Khz[8]= +const int16_t Dtmf_a_times2Tab16Khz[8]= { 31548, 31281, 30951, 30556, 29144, 28361, 27409, 26258 }; -const WebRtc_Word16 Dtmf_a_times2Tab32Khz[8]= +const int16_t Dtmf_a_times2Tab32Khz[8]= { 32462,32394, 32311, 32210, 31849, 31647, 31400, 31098 }; // Second table is sin(2*pi*f/fs) in Q14 -const WebRtc_Word16 Dtmf_ym2Tab8Khz[8]= +const int16_t Dtmf_ym2Tab8Khz[8]= { 8527, 9315, 10163, 11036, 13322, 14206, 15021, 15708 }; -const WebRtc_Word16 Dtmf_ym2Tab16Khz[8]= +const int16_t Dtmf_ym2Tab16Khz[8]= { 4429, 4879, 5380, 5918, 7490, 8207, 8979, 9801 }; -const WebRtc_Word16 Dtmf_ym2Tab32Khz[8]= +const int16_t Dtmf_ym2Tab32Khz[8]= { 2235, 2468, 2728, 3010, 3853, 4249, 4685, 5164 }; -const WebRtc_Word16 Dtmf_dBm0kHz[37]= +const int16_t Dtmf_dBm0kHz[37]= { 16141, 14386, 12821, 11427, 10184, 9077, 8090, 7210, 6426, 5727, 5104, 4549, @@ -64,7 +64,7 @@ const WebRtc_Word16 Dtmf_dBm0kHz[37]= }; -DtmfInband::DtmfInband(const WebRtc_Word32 id) : +DtmfInband::DtmfInband(const int32_t id) : _critSect(*CriticalSectionWrapper::CreateCriticalSection()), _id(id), _outputFrequencyHz(8000), @@ -87,7 +87,7 @@ DtmfInband::~DtmfInband() } int -DtmfInband::SetSampleRate(const WebRtc_UWord16 frequency) +DtmfInband::SetSampleRate(const uint16_t frequency) { if (frequency != 8000 && frequency != 16000 && @@ -102,7 +102,7 @@ DtmfInband::SetSampleRate(const WebRtc_UWord16 frequency) } int -DtmfInband::GetSampleRate(WebRtc_UWord16& frequency) +DtmfInband::GetSampleRate(uint16_t& frequency) { frequency = _outputFrequencyHz; return 0; @@ -125,9 +125,9 @@ DtmfInband::Init() } int -DtmfInband::AddTone(const WebRtc_UWord8 eventCode, - WebRtc_Word32 lengthMs, - WebRtc_Word32 attenuationDb) +DtmfInband::AddTone(const uint8_t eventCode, + int32_t lengthMs, + int32_t attenuationDb) { CriticalSectionScoped lock(&_critSect); @@ -145,10 +145,10 @@ DtmfInband::AddTone(const WebRtc_UWord8 eventCode, ReInit(); - _frameLengthSamples = static_cast (_outputFrequencyHz / 100); - _eventCode = static_cast (eventCode); - _attenuationDb = static_cast (attenuationDb); - _remainingSamples = static_cast + _frameLengthSamples = static_cast (_outputFrequencyHz / 100); + _eventCode = static_cast (eventCode); + _attenuationDb = static_cast (attenuationDb); + _remainingSamples = static_cast (lengthMs * (_outputFrequencyHz / 1000)); _lengthMs = lengthMs; @@ -162,16 +162,16 @@ DtmfInband::ResetTone() ReInit(); - _frameLengthSamples = static_cast (_outputFrequencyHz / 100); - _remainingSamples = static_cast + _frameLengthSamples = static_cast (_outputFrequencyHz / 100); + _remainingSamples = static_cast (_lengthMs * (_outputFrequencyHz / 1000)); return 0; } int -DtmfInband::StartTone(const WebRtc_UWord8 eventCode, - WebRtc_Word32 attenuationDb) +DtmfInband::StartTone(const uint8_t eventCode, + int32_t attenuationDb) { CriticalSectionScoped lock(&_critSect); @@ -188,9 +188,9 @@ DtmfInband::StartTone(const WebRtc_UWord8 eventCode, ReInit(); - _frameLengthSamples = static_cast (_outputFrequencyHz / 100); - _eventCode = static_cast (eventCode); - _attenuationDb = static_cast (attenuationDb); + _frameLengthSamples = static_cast (_outputFrequencyHz / 100); + _eventCode = static_cast (eventCode); + _attenuationDb = static_cast (attenuationDb); _playing = true; return 0; @@ -226,8 +226,8 @@ DtmfInband::IsAddingTone() } int -DtmfInband::Get10msTone(WebRtc_Word16 output[320], - WebRtc_UWord16& outputSizeInSamples) +DtmfInband::Get10msTone(int16_t output[320], + uint16_t& outputSizeInSamples) { CriticalSectionScoped lock(&_critSect); if (DtmfFix_generate(output, @@ -255,22 +255,22 @@ DtmfInband::UpdateDelaySinceLastTone() } } -WebRtc_UWord32 +uint32_t DtmfInband::DelaySinceLastTone() const { return _delaySinceLastToneMS; } -WebRtc_Word16 -DtmfInband::DtmfFix_generate(WebRtc_Word16 *decoded, - const WebRtc_Word16 value, - const WebRtc_Word16 volume, - const WebRtc_Word16 frameLen, - const WebRtc_Word16 fs) +int16_t +DtmfInband::DtmfFix_generate(int16_t *decoded, + const int16_t value, + const int16_t volume, + const int16_t frameLen, + const int16_t fs) { - const WebRtc_Word16 *a_times2Tbl; - const WebRtc_Word16 *y2_Table; - WebRtc_Word16 a1_times2 = 0, a2_times2 = 0; + const int16_t *a_times2Tbl; + const int16_t *y2_Table; + int16_t a1_times2 = 0, a2_times2 = 0; if (fs==8000) { a_times2Tbl=Dtmf_a_times2Tab8Khz; @@ -347,24 +347,24 @@ DtmfInband::DtmfFix_generate(WebRtc_Word16 *decoded, frameLen)); } -WebRtc_Word16 -DtmfInband::DtmfFix_generateSignal(const WebRtc_Word16 a1_times2, - const WebRtc_Word16 a2_times2, - const WebRtc_Word16 volume, - WebRtc_Word16 *signal, - const WebRtc_Word16 length) +int16_t +DtmfInband::DtmfFix_generateSignal(const int16_t a1_times2, + const int16_t a2_times2, + const int16_t volume, + int16_t *signal, + const int16_t length) { int i; /* Generate Signal */ for (i=0;i> 14) - _oldOutputLow[0]); - tempValHigh = (WebRtc_Word16)(((( (WebRtc_Word32)(a2_times2 * + tempValHigh = (int16_t)(((( (int32_t)(a2_times2 * _oldOutputHigh[1])) + 8192) >> 14) - _oldOutputHigh[0]); /* Update memory */ @@ -373,14 +373,14 @@ DtmfInband::DtmfFix_generateSignal(const WebRtc_Word16 a1_times2, _oldOutputHigh[0]=_oldOutputHigh[1]; _oldOutputHigh[1]=tempValHigh; - tempVal = (WebRtc_Word32)(kDtmfAmpLow * tempValLow) + - (WebRtc_Word32)(kDtmfAmpHigh * tempValHigh); + tempVal = (int32_t)(kDtmfAmpLow * tempValLow) + + (int32_t)(kDtmfAmpHigh * tempValHigh); /* Norm the signal to Q14 */ tempVal=(tempVal+16384)>>15; /* Scale the signal to correct dbM0 value */ - signal[i]=(WebRtc_Word16)((tempVal*Dtmf_dBm0kHz[volume]+8192)>>14); + signal[i]=(int16_t)((tempVal*Dtmf_dBm0kHz[volume]+8192)>>14); } return(0); diff --git a/webrtc/voice_engine/dtmf_inband.h b/webrtc/voice_engine/dtmf_inband.h index 806fff06e..c0a431c4d 100644 --- a/webrtc/voice_engine/dtmf_inband.h +++ b/webrtc/voice_engine/dtmf_inband.h @@ -24,68 +24,66 @@ class CriticalSectionWrapper; class DtmfInband { public: - DtmfInband(const WebRtc_Word32 id); + DtmfInband(const int32_t id); virtual ~DtmfInband(); void Init(); - int SetSampleRate(const WebRtc_UWord16 frequency); + int SetSampleRate(const uint16_t frequency); - int GetSampleRate(WebRtc_UWord16& frequency); + int GetSampleRate(uint16_t& frequency); - int AddTone(const WebRtc_UWord8 eventCode, - WebRtc_Word32 lengthMs, - WebRtc_Word32 attenuationDb); + int AddTone(const uint8_t eventCode, + int32_t lengthMs, + int32_t attenuationDb); int ResetTone(); - int StartTone(const WebRtc_UWord8 eventCode, - WebRtc_Word32 attenuationDb); + int StartTone(const uint8_t eventCode, int32_t attenuationDb); int StopTone(); bool IsAddingTone(); - int Get10msTone(WebRtc_Word16 output[320], - WebRtc_UWord16& outputSizeInSamples); + int Get10msTone(int16_t output[320], uint16_t& outputSizeInSamples); - WebRtc_UWord32 DelaySinceLastTone() const; + uint32_t DelaySinceLastTone() const; void UpdateDelaySinceLastTone(); private: void ReInit(); - WebRtc_Word16 DtmfFix_generate(WebRtc_Word16* decoded, - const WebRtc_Word16 value, - const WebRtc_Word16 volume, - const WebRtc_Word16 frameLen, - const WebRtc_Word16 fs); + int16_t DtmfFix_generate(int16_t* decoded, + const int16_t value, + const int16_t volume, + const int16_t frameLen, + const int16_t fs); private: enum {kDtmfFrameSizeMs = 10}; enum {kDtmfAmpHigh = 32768}; enum {kDtmfAmpLow = 23171}; // 3 dB lower than the high frequency - WebRtc_Word16 DtmfFix_generateSignal(const WebRtc_Word16 a1_times2, - const WebRtc_Word16 a2_times2, - const WebRtc_Word16 volume, - WebRtc_Word16* signal, - const WebRtc_Word16 length); + int16_t DtmfFix_generateSignal(const int16_t a1_times2, + const int16_t a2_times2, + const int16_t volume, + int16_t* signal, + const int16_t length); private: CriticalSectionWrapper& _critSect; - WebRtc_Word32 _id; - WebRtc_UWord16 _outputFrequencyHz; // {8000, 16000, 32000} - WebRtc_Word16 _oldOutputLow[2]; // Data needed for oscillator model - WebRtc_Word16 _oldOutputHigh[2]; // Data needed for oscillator model - WebRtc_Word16 _frameLengthSamples; // {80, 160, 320} - WebRtc_Word32 _remainingSamples; - WebRtc_Word16 _eventCode; // [0, 15] - WebRtc_Word16 _attenuationDb; // [0, 36] - WebRtc_Word32 _lengthMs; + int32_t _id; + uint16_t _outputFrequencyHz; // {8000, 16000, 32000} + int16_t _oldOutputLow[2]; // Data needed for oscillator model + int16_t _oldOutputHigh[2]; // Data needed for oscillator model + int16_t _frameLengthSamples; // {80, 160, 320} + int32_t _remainingSamples; + int16_t _eventCode; // [0, 15] + int16_t _attenuationDb; // [0, 36] + int32_t _lengthMs; bool _reinit; // 'true' if the oscillator should be reinit for next event bool _playing; - WebRtc_UWord32 _delaySinceLastToneMS; // time since last generated tone [ms] + uint32_t _delaySinceLastToneMS; // time since last generated tone [ms] }; } // namespace webrtc diff --git a/webrtc/voice_engine/dtmf_inband_queue.cc b/webrtc/voice_engine/dtmf_inband_queue.cc index b81d8273c..fa1dce134 100644 --- a/webrtc/voice_engine/dtmf_inband_queue.cc +++ b/webrtc/voice_engine/dtmf_inband_queue.cc @@ -13,7 +13,7 @@ namespace webrtc { -DtmfInbandQueue::DtmfInbandQueue(const WebRtc_Word32 id): +DtmfInbandQueue::DtmfInbandQueue(const int32_t id): _id(id), _DtmfCritsect(*CriticalSectionWrapper::CreateCriticalSection()), _nextEmptyIndex(0) @@ -29,9 +29,7 @@ DtmfInbandQueue::~DtmfInbandQueue() } int -DtmfInbandQueue::AddDtmf(WebRtc_UWord8 key, - WebRtc_UWord16 len, - WebRtc_UWord8 level) +DtmfInbandQueue::AddDtmf(uint8_t key, uint16_t len, uint8_t level) { CriticalSectionScoped lock(&_DtmfCritsect); @@ -41,7 +39,7 @@ DtmfInbandQueue::AddDtmf(WebRtc_UWord8 key, "DtmfInbandQueue::AddDtmf() unable to add Dtmf tone"); return -1; } - WebRtc_Word32 index = _nextEmptyIndex; + int32_t index = _nextEmptyIndex; _DtmfKey[index] = key; _DtmfLen[index] = len; _DtmfLevel[index] = level; @@ -49,8 +47,8 @@ DtmfInbandQueue::AddDtmf(WebRtc_UWord8 key, return 0; } -WebRtc_Word8 -DtmfInbandQueue::NextDtmf(WebRtc_UWord16* len, WebRtc_UWord8* level) +int8_t +DtmfInbandQueue::NextDtmf(uint16_t* len, uint8_t* level) { CriticalSectionScoped lock(&_DtmfCritsect); @@ -58,16 +56,16 @@ DtmfInbandQueue::NextDtmf(WebRtc_UWord16* len, WebRtc_UWord8* level) { return -1; } - WebRtc_Word8 nextDtmf = _DtmfKey[0]; + int8_t nextDtmf = _DtmfKey[0]; *len=_DtmfLen[0]; *level=_DtmfLevel[0]; memmove(&(_DtmfKey[0]), &(_DtmfKey[1]), - _nextEmptyIndex*sizeof(WebRtc_UWord8)); + _nextEmptyIndex*sizeof(uint8_t)); memmove(&(_DtmfLen[0]), &(_DtmfLen[1]), - _nextEmptyIndex*sizeof(WebRtc_UWord16)); + _nextEmptyIndex*sizeof(uint16_t)); memmove(&(_DtmfLevel[0]), &(_DtmfLevel[1]), - _nextEmptyIndex*sizeof(WebRtc_UWord8)); + _nextEmptyIndex*sizeof(uint8_t)); _nextEmptyIndex--; return nextDtmf; diff --git a/webrtc/voice_engine/dtmf_inband_queue.h b/webrtc/voice_engine/dtmf_inband_queue.h index b3bd39e87..6a65c9e0a 100644 --- a/webrtc/voice_engine/dtmf_inband_queue.h +++ b/webrtc/voice_engine/dtmf_inband_queue.h @@ -22,15 +22,13 @@ class DtmfInbandQueue { public: - DtmfInbandQueue(const WebRtc_Word32 id); + DtmfInbandQueue(const int32_t id); virtual ~DtmfInbandQueue(); - int AddDtmf(WebRtc_UWord8 DtmfKey, - WebRtc_UWord16 len, - WebRtc_UWord8 level); + int AddDtmf(uint8_t DtmfKey, uint16_t len, uint8_t level); - WebRtc_Word8 NextDtmf(WebRtc_UWord16* len, WebRtc_UWord8* level); + int8_t NextDtmf(uint16_t* len, uint8_t* level); bool PendingDtmf(); @@ -39,12 +37,12 @@ public: private: enum {kDtmfInbandMax = 20}; - WebRtc_Word32 _id; + int32_t _id; CriticalSectionWrapper& _DtmfCritsect; - WebRtc_UWord8 _nextEmptyIndex; - WebRtc_UWord8 _DtmfKey[kDtmfInbandMax]; - WebRtc_UWord16 _DtmfLen[kDtmfInbandMax]; - WebRtc_UWord8 _DtmfLevel[kDtmfInbandMax]; + uint8_t _nextEmptyIndex; + uint8_t _DtmfKey[kDtmfInbandMax]; + uint16_t _DtmfLen[kDtmfInbandMax]; + uint8_t _DtmfLevel[kDtmfInbandMax]; }; } // namespace webrtc diff --git a/webrtc/voice_engine/include/mock/fake_voe_external_media.h b/webrtc/voice_engine/include/mock/fake_voe_external_media.h index 7ee80c880..f45e1ba0f 100644 --- a/webrtc/voice_engine/include/mock/fake_voe_external_media.h +++ b/webrtc/voice_engine/include/mock/fake_voe_external_media.h @@ -38,10 +38,10 @@ class FakeVoEExternalMedia : public VoEExternalMedia { WEBRTC_STUB(SetExternalRecordingStatus, (bool enable)); WEBRTC_STUB(SetExternalPlayoutStatus, (bool enable)); WEBRTC_STUB(ExternalRecordingInsertData, - (const WebRtc_Word16 speechData10ms[], int lengthSamples, + (const int16_t speechData10ms[], int lengthSamples, int samplingFreqHz, int current_delay_ms)); WEBRTC_STUB(ExternalPlayoutGetData, - (WebRtc_Word16 speechData10ms[], int samplingFreqHz, + (int16_t speechData10ms[], int samplingFreqHz, int current_delay_ms, int& lengthSamples)); WEBRTC_STUB(GetAudioFrame, (int channel, int desired_sample_rate_hz, AudioFrame* frame)); diff --git a/webrtc/voice_engine/include/voe_external_media.h b/webrtc/voice_engine/include/voe_external_media.h index 5f576d41e..d4aa8f078 100644 --- a/webrtc/voice_engine/include/voe_external_media.h +++ b/webrtc/voice_engine/include/voe_external_media.h @@ -52,7 +52,7 @@ public: // If |isStereo| is true, audio10ms will contain 16-bit PCM data // samples in interleaved stereo format (L0,R0,L1,R1,...). virtual void Process(const int channel, const ProcessingTypes type, - WebRtc_Word16 audio10ms[], const int length, + int16_t audio10ms[], const int length, const int samplingFreq, const bool isStereo) = 0; protected: @@ -93,7 +93,7 @@ public: // this method should be called at as regular an interval as possible // with frames of corresponding size. virtual int ExternalRecordingInsertData( - const WebRtc_Word16 speechData10ms[], int lengthSamples, + const int16_t speechData10ms[], int lengthSamples, int samplingFreqHz, int current_delay_ms) = 0; // This function gets audio for an external playout sink. @@ -102,7 +102,7 @@ public: // be 160, 320, 440 or 480 samples (for 16, 32, 44 or 48 kHz sampling // rates respectively). virtual int ExternalPlayoutGetData( - WebRtc_Word16 speechData10ms[], int samplingFreqHz, + int16_t speechData10ms[], int samplingFreqHz, int current_delay_ms, int& lengthSamples) = 0; // Pulls an audio frame from the specified |channel| for external mixing. diff --git a/webrtc/voice_engine/level_indicator.cc b/webrtc/voice_engine/level_indicator.cc index a84cc3a3a..cc73cb065 100644 --- a/webrtc/voice_engine/level_indicator.cc +++ b/webrtc/voice_engine/level_indicator.cc @@ -20,7 +20,7 @@ namespace voe { // Number of bars on the indicator. // Note that the number of elements is specified because we are indexing it // in the range of 0-32 -const WebRtc_Word8 permutation[33] = +const int8_t permutation[33] = {0,1,2,3,4,4,5,5,5,5,6,6,6,6,6,7,7,7,7,8,8,8,9,9,9,9,9,9,9,9,9,9,9}; @@ -47,7 +47,7 @@ void AudioLevel::Clear() void AudioLevel::ComputeLevel(const AudioFrame& audioFrame) { - WebRtc_Word16 absValue(0); + int16_t absValue(0); // Check speech level (works for 2 channels as well) absValue = WebRtcSpl_MaxAbsValueW16( @@ -68,10 +68,10 @@ void AudioLevel::ComputeLevel(const AudioFrame& audioFrame) _count = 0; - // Highest value for a WebRtc_Word16 is 0x7fff = 32767 + // Highest value for a int16_t is 0x7fff = 32767 // Divide with 1000 to get in the range of 0-32 which is the range of // the permutation vector - WebRtc_Word32 position = _absMax/1000; + int32_t position = _absMax/1000; // Make it less likely that the bar stays at position 0. I.e. only if // its in the range 0-250 (instead of 0-1000) @@ -86,13 +86,13 @@ void AudioLevel::ComputeLevel(const AudioFrame& audioFrame) } } -WebRtc_Word8 AudioLevel::Level() const +int8_t AudioLevel::Level() const { CriticalSectionScoped cs(&_critSect); return _currentLevel; } -WebRtc_Word16 AudioLevel::LevelFullRange() const +int16_t AudioLevel::LevelFullRange() const { CriticalSectionScoped cs(&_critSect); return _currentLevelFullRange; diff --git a/webrtc/voice_engine/level_indicator.h b/webrtc/voice_engine/level_indicator.h index 2041b1bbb..76fd8d54b 100644 --- a/webrtc/voice_engine/level_indicator.h +++ b/webrtc/voice_engine/level_indicator.h @@ -28,8 +28,8 @@ public: // Called on "API thread(s)" from APIs like VoEBase::CreateChannel(), // VoEBase::StopSend(), VoEVolumeControl::GetSpeechOutputLevel(). - WebRtc_Word8 Level() const; - WebRtc_Word16 LevelFullRange() const; + int8_t Level() const; + int16_t LevelFullRange() const; void Clear(); // Called on a native capture audio thread (platform dependent) from the @@ -42,10 +42,10 @@ private: CriticalSectionWrapper& _critSect; - WebRtc_Word16 _absMax; - WebRtc_Word16 _count; - WebRtc_Word8 _currentLevel; - WebRtc_Word16 _currentLevelFullRange; + int16_t _absMax; + int16_t _count; + int8_t _currentLevel; + int16_t _currentLevelFullRange; }; } // namespace voe diff --git a/webrtc/voice_engine/monitor_module.cc b/webrtc/voice_engine/monitor_module.cc index a69c2b615..08fe58017 100644 --- a/webrtc/voice_engine/monitor_module.cc +++ b/webrtc/voice_engine/monitor_module.cc @@ -28,7 +28,7 @@ MonitorModule::~MonitorModule() delete &_callbackCritSect; } -WebRtc_Word32 +int32_t MonitorModule::RegisterObserver(MonitorObserver& observer) { CriticalSectionScoped lock(&_callbackCritSect); @@ -40,7 +40,7 @@ MonitorModule::RegisterObserver(MonitorObserver& observer) return 0; } -WebRtc_Word32 +int32_t MonitorModule::DeRegisterObserver() { CriticalSectionScoped lock(&_callbackCritSect); @@ -52,30 +52,30 @@ MonitorModule::DeRegisterObserver() return 0; } -WebRtc_Word32 +int32_t MonitorModule::Version(char* version, - WebRtc_UWord32& remainingBufferInBytes, - WebRtc_UWord32& position) const + uint32_t& remainingBufferInBytes, + uint32_t& position) const { return 0; } -WebRtc_Word32 -MonitorModule::ChangeUniqueId(const WebRtc_Word32 id) +int32_t +MonitorModule::ChangeUniqueId(const int32_t id) { return 0; } -WebRtc_Word32 +int32_t MonitorModule::TimeUntilNextProcess() { - WebRtc_UWord32 now = TickTime::MillisecondTimestamp(); - WebRtc_Word32 timeToNext = + uint32_t now = TickTime::MillisecondTimestamp(); + int32_t timeToNext = kAverageProcessUpdateTimeMs - (now - _lastProcessTime); return (timeToNext); } -WebRtc_Word32 +int32_t MonitorModule::Process() { _lastProcessTime = TickTime::MillisecondTimestamp(); diff --git a/webrtc/voice_engine/monitor_module.h b/webrtc/voice_engine/monitor_module.h index 7612c3c87..e381b7c1f 100644 --- a/webrtc/voice_engine/monitor_module.h +++ b/webrtc/voice_engine/monitor_module.h @@ -32,28 +32,28 @@ namespace voe { class MonitorModule : public Module { public: - WebRtc_Word32 RegisterObserver(MonitorObserver& observer); + int32_t RegisterObserver(MonitorObserver& observer); - WebRtc_Word32 DeRegisterObserver(); + int32_t DeRegisterObserver(); MonitorModule(); virtual ~MonitorModule(); public: // module - WebRtc_Word32 Version(char* version, - WebRtc_UWord32& remainingBufferInBytes, - WebRtc_UWord32& position) const; + int32_t Version(char* version, + uint32_t& remainingBufferInBytes, + uint32_t& position) const; - WebRtc_Word32 ChangeUniqueId(const WebRtc_Word32 id); + int32_t ChangeUniqueId(const int32_t id); - WebRtc_Word32 TimeUntilNextProcess(); + int32_t TimeUntilNextProcess(); - WebRtc_Word32 Process(); + int32_t Process(); private: enum { kAverageProcessUpdateTimeMs = 1000 }; MonitorObserver* _observerPtr; CriticalSectionWrapper& _callbackCritSect; - WebRtc_Word32 _lastProcessTime; + int32_t _lastProcessTime; }; } // namespace voe diff --git a/webrtc/voice_engine/output_mixer.cc b/webrtc/voice_engine/output_mixer.cc index 36a9da734..a1245649f 100644 --- a/webrtc/voice_engine/output_mixer.cc +++ b/webrtc/voice_engine/output_mixer.cc @@ -24,10 +24,10 @@ namespace webrtc { namespace voe { void -OutputMixer::NewMixedAudio(const WebRtc_Word32 id, +OutputMixer::NewMixedAudio(const int32_t id, const AudioFrame& generalAudioFrame, const AudioFrame** uniqueAudioFrames, - const WebRtc_UWord32 size) + const uint32_t size) { WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1), "OutputMixer::NewMixedAudio(id=%d, size=%u)", id, size); @@ -37,33 +37,29 @@ OutputMixer::NewMixedAudio(const WebRtc_Word32 id, } void OutputMixer::MixedParticipants( - const WebRtc_Word32 id, + const int32_t id, const ParticipantStatistics* participantStatistics, - const WebRtc_UWord32 size) + const uint32_t size) { WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1), "OutputMixer::MixedParticipants(id=%d, size=%u)", id, size); } -void OutputMixer::VADPositiveParticipants( - const WebRtc_Word32 id, - const ParticipantStatistics* participantStatistics, - const WebRtc_UWord32 size) +void OutputMixer::VADPositiveParticipants(const int32_t id, + const ParticipantStatistics* participantStatistics, const uint32_t size) { WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1), "OutputMixer::VADPositiveParticipants(id=%d, size=%u)", id, size); } -void OutputMixer::MixedAudioLevel(const WebRtc_Word32 id, - const WebRtc_UWord32 level) +void OutputMixer::MixedAudioLevel(const int32_t id, const uint32_t level) { WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1), "OutputMixer::MixedAudioLevel(id=%d, level=%u)", id, level); } -void OutputMixer::PlayNotification(const WebRtc_Word32 id, - const WebRtc_UWord32 durationMs) +void OutputMixer::PlayNotification(const int32_t id, const uint32_t durationMs) { WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1), "OutputMixer::PlayNotification(id=%d, durationMs=%d)", @@ -71,8 +67,8 @@ void OutputMixer::PlayNotification(const WebRtc_Word32 id, // Not implement yet } -void OutputMixer::RecordNotification(const WebRtc_Word32 id, - const WebRtc_UWord32 durationMs) +void OutputMixer::RecordNotification(const int32_t id, + const uint32_t durationMs) { WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1), "OutputMixer::RecordNotification(id=%d, durationMs=%d)", @@ -81,7 +77,7 @@ void OutputMixer::RecordNotification(const WebRtc_Word32 id, // Not implement yet } -void OutputMixer::PlayFileEnded(const WebRtc_Word32 id) +void OutputMixer::PlayFileEnded(const int32_t id) { WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1), "OutputMixer::PlayFileEnded(id=%d)", id); @@ -89,7 +85,7 @@ void OutputMixer::PlayFileEnded(const WebRtc_Word32 id) // not needed } -void OutputMixer::RecordFileEnded(const WebRtc_Word32 id) +void OutputMixer::RecordFileEnded(const int32_t id) { WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1), "OutputMixer::RecordFileEnded(id=%d)", id); @@ -102,8 +98,8 @@ void OutputMixer::RecordFileEnded(const WebRtc_Word32 id) "output file recorder module is shutdown"); } -WebRtc_Word32 -OutputMixer::Create(OutputMixer*& mixer, const WebRtc_UWord32 instanceId) +int32_t +OutputMixer::Create(OutputMixer*& mixer, const uint32_t instanceId) { WEBRTC_TRACE(kTraceMemory, kTraceVoice, instanceId, "OutputMixer::Create(instanceId=%d)", instanceId); @@ -118,7 +114,7 @@ OutputMixer::Create(OutputMixer*& mixer, const WebRtc_UWord32 instanceId) return 0; } -OutputMixer::OutputMixer(const WebRtc_UWord32 instanceId) : +OutputMixer::OutputMixer(const uint32_t instanceId) : _callbackCritSect(*CriticalSectionWrapper::CreateCriticalSection()), _fileCritSect(*CriticalSectionWrapper::CreateCriticalSection()), _mixerModule(*AudioConferenceMixer::Create(instanceId)), @@ -182,7 +178,7 @@ OutputMixer::~OutputMixer() delete &_fileCritSect; } -WebRtc_Word32 +int32_t OutputMixer::SetEngineInformation(voe::Statistics& engineStatistics) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1), @@ -191,9 +187,8 @@ OutputMixer::SetEngineInformation(voe::Statistics& engineStatistics) return 0; } -WebRtc_Word32 -OutputMixer::SetAudioProcessingModule( - AudioProcessing* audioProcessingModule) +int32_t +OutputMixer::SetAudioProcessingModule(AudioProcessing* audioProcessingModule) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1), "OutputMixer::SetAudioProcessingModule(" @@ -227,7 +222,7 @@ int OutputMixer::DeRegisterExternalMediaProcessing() return 0; } -int OutputMixer::PlayDtmfTone(WebRtc_UWord8 eventCode, int lengthMs, +int OutputMixer::PlayDtmfTone(uint8_t eventCode, int lengthMs, int attenuationDb) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1), @@ -242,7 +237,7 @@ int OutputMixer::PlayDtmfTone(WebRtc_UWord8 eventCode, int lengthMs, return 0; } -int OutputMixer::StartPlayingDtmfTone(WebRtc_UWord8 eventCode, +int OutputMixer::StartPlayingDtmfTone(uint8_t eventCode, int attenuationDb) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1), @@ -265,41 +260,41 @@ int OutputMixer::StopPlayingDtmfTone() return (_dtmfGenerator.StopTone()); } -WebRtc_Word32 +int32_t OutputMixer::SetMixabilityStatus(MixerParticipant& participant, const bool mixable) { return _mixerModule.SetMixabilityStatus(participant, mixable); } -WebRtc_Word32 +int32_t OutputMixer::SetAnonymousMixabilityStatus(MixerParticipant& participant, const bool mixable) { return _mixerModule.SetAnonymousMixabilityStatus(participant,mixable); } -WebRtc_Word32 +int32_t OutputMixer::MixActiveChannels() { return _mixerModule.Process(); } int -OutputMixer::GetSpeechOutputLevel(WebRtc_UWord32& level) +OutputMixer::GetSpeechOutputLevel(uint32_t& level) { - WebRtc_Word8 currentLevel = _audioLevel.Level(); - level = static_cast (currentLevel); + int8_t currentLevel = _audioLevel.Level(); + level = static_cast (currentLevel); WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1), "GetSpeechOutputLevel() => level=%u", level); return 0; } int -OutputMixer::GetSpeechOutputLevelFullRange(WebRtc_UWord32& level) +OutputMixer::GetSpeechOutputLevelFullRange(uint32_t& level) { - WebRtc_Word16 currentLevel = _audioLevel.LevelFullRange(); - level = static_cast (currentLevel); + int16_t currentLevel = _audioLevel.LevelFullRange(); + level = static_cast (currentLevel); WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1), "GetSpeechOutputLevelFullRange() => level=%u", level); return 0; @@ -340,7 +335,7 @@ int OutputMixer::StartRecordingPlayout(const char* fileName, } FileFormats format; - const WebRtc_UWord32 notificationTime(0); + const uint32_t notificationTime(0); CodecInst dummyCodec={100,"L16",16000,320,1,320000}; if ((codecInst != NULL) && @@ -421,7 +416,7 @@ int OutputMixer::StartRecordingPlayout(OutStream* stream, } FileFormats format; - const WebRtc_UWord32 notificationTime(0); + const uint32_t notificationTime(0); CodecInst dummyCodec={100,"L16",16000,320,1,320000}; if (codecInst != NULL && codecInst->channels != 1) @@ -536,7 +531,7 @@ int OutputMixer::GetMixedAudio(int sample_rate_hz, return RemixAndResample(_audioFrame, &_resampler, frame); } -WebRtc_Word32 +int32_t OutputMixer::DoOperationsOnCombinedSignal() { if (_audioFrame.sample_rate_hz_ != _mixingFrequencyHz) @@ -584,7 +579,7 @@ OutputMixer::DoOperationsOnCombinedSignal() _externalMediaCallbackPtr->Process( -1, kPlaybackAllChannelsMixed, - (WebRtc_Word16*)_audioFrame.data_, + (int16_t*)_audioFrame.data_, _audioFrame.samples_per_channel_, _audioFrame.sample_rate_hz_, isStereo); @@ -619,19 +614,19 @@ void OutputMixer::APMAnalyzeReverseStream() { int OutputMixer::InsertInbandDtmfTone() { - WebRtc_UWord16 sampleRate(0); + uint16_t sampleRate(0); _dtmfGenerator.GetSampleRate(sampleRate); if (sampleRate != _audioFrame.sample_rate_hz_) { // Update sample rate of Dtmf tone since the mixing frequency changed. _dtmfGenerator.SetSampleRate( - (WebRtc_UWord16)(_audioFrame.sample_rate_hz_)); + (uint16_t)(_audioFrame.sample_rate_hz_)); // Reset the tone to be added taking the new sample rate into account. _dtmfGenerator.ResetTone(); } - WebRtc_Word16 toneBuffer[320]; - WebRtc_UWord16 toneSamples(0); + int16_t toneBuffer[320]; + uint16_t toneSamples(0); if (_dtmfGenerator.Get10msTone(toneBuffer, toneSamples) == -1) { WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1), @@ -644,7 +639,7 @@ OutputMixer::InsertInbandDtmfTone() if (_audioFrame.num_channels_ == 1) { // mono - memcpy(_audioFrame.data_, toneBuffer, sizeof(WebRtc_Word16) + memcpy(_audioFrame.data_, toneBuffer, sizeof(int16_t) * toneSamples); } else { diff --git a/webrtc/voice_engine/output_mixer.h b/webrtc/voice_engine/output_mixer.h index 29ca85848..e2ca366bd 100644 --- a/webrtc/voice_engine/output_mixer.h +++ b/webrtc/voice_engine/output_mixer.h @@ -36,14 +36,13 @@ class OutputMixer : public AudioMixerOutputReceiver, public FileCallback { public: - static WebRtc_Word32 Create(OutputMixer*& mixer, - const WebRtc_UWord32 instanceId); + static int32_t Create(OutputMixer*& mixer, const uint32_t instanceId); static void Destroy(OutputMixer*& mixer); - WebRtc_Word32 SetEngineInformation(Statistics& engineStatistics); + int32_t SetEngineInformation(Statistics& engineStatistics); - WebRtc_Word32 SetAudioProcessingModule( + int32_t SetAudioProcessingModule( AudioProcessing* audioProcessingModule); // VoEExternalMedia @@ -53,32 +52,29 @@ public: int DeRegisterExternalMediaProcessing(); // VoEDtmf - int PlayDtmfTone(WebRtc_UWord8 eventCode, - int lengthMs, - int attenuationDb); + int PlayDtmfTone(uint8_t eventCode, int lengthMs, int attenuationDb); - int StartPlayingDtmfTone(WebRtc_UWord8 eventCode, - int attenuationDb); + int StartPlayingDtmfTone(uint8_t eventCode, int attenuationDb); int StopPlayingDtmfTone(); - WebRtc_Word32 MixActiveChannels(); + int32_t MixActiveChannels(); - WebRtc_Word32 DoOperationsOnCombinedSignal(); + int32_t DoOperationsOnCombinedSignal(); - WebRtc_Word32 SetMixabilityStatus(MixerParticipant& participant, - const bool mixable); + int32_t SetMixabilityStatus(MixerParticipant& participant, + const bool mixable); - WebRtc_Word32 SetAnonymousMixabilityStatus(MixerParticipant& participant, - const bool mixable); + int32_t SetAnonymousMixabilityStatus(MixerParticipant& participant, + const bool mixable); int GetMixedAudio(int sample_rate_hz, int num_channels, AudioFrame* audioFrame); // VoEVolumeControl - int GetSpeechOutputLevel(WebRtc_UWord32& level); + int GetSpeechOutputLevel(uint32_t& level); - int GetSpeechOutputLevelFullRange(WebRtc_UWord32& level); + int GetSpeechOutputLevelFullRange(uint32_t& level); int SetOutputVolumePan(float left, float right); @@ -96,37 +92,34 @@ public: // from AudioMixerOutputReceiver virtual void NewMixedAudio( - const WebRtc_Word32 id, + const int32_t id, const AudioFrame& generalAudioFrame, const AudioFrame** uniqueAudioFrames, - const WebRtc_UWord32 size); + const uint32_t size); // from AudioMixerStatusReceiver virtual void MixedParticipants( - const WebRtc_Word32 id, + const int32_t id, const ParticipantStatistics* participantStatistics, - const WebRtc_UWord32 size); + const uint32_t size); virtual void VADPositiveParticipants( - const WebRtc_Word32 id, + const int32_t id, const ParticipantStatistics* participantStatistics, - const WebRtc_UWord32 size); + const uint32_t size); - virtual void MixedAudioLevel(const WebRtc_Word32 id, - const WebRtc_UWord32 level); + virtual void MixedAudioLevel(const int32_t id, const uint32_t level); // For file recording - void PlayNotification(const WebRtc_Word32 id, - const WebRtc_UWord32 durationMs); + void PlayNotification(const int32_t id, const uint32_t durationMs); - void RecordNotification(const WebRtc_Word32 id, - const WebRtc_UWord32 durationMs); + void RecordNotification(const int32_t id, const uint32_t durationMs); - void PlayFileEnded(const WebRtc_Word32 id); - void RecordFileEnded(const WebRtc_Word32 id); + void PlayFileEnded(const int32_t id); + void RecordFileEnded(const int32_t id); private: - OutputMixer(const WebRtc_UWord32 instanceId); + OutputMixer(const uint32_t instanceId); void APMAnalyzeReverseStream(); int InsertInbandDtmfTone(); diff --git a/webrtc/voice_engine/shared_data.cc b/webrtc/voice_engine/shared_data.cc index 69ac8570b..1393eb33a 100644 --- a/webrtc/voice_engine/shared_data.cc +++ b/webrtc/voice_engine/shared_data.cc @@ -21,7 +21,7 @@ namespace webrtc { namespace voe { -static WebRtc_Word32 _gInstanceCounter = 0; +static int32_t _gInstanceCounter = 0; SharedData::SharedData() : _instanceId(++_gInstanceCounter), @@ -76,16 +76,16 @@ void SharedData::set_audio_processing(AudioProcessing* audioproc) { _outputMixerPtr->SetAudioProcessingModule(audioproc); } -WebRtc_UWord16 SharedData::NumOfSendingChannels() +uint16_t SharedData::NumOfSendingChannels() { - WebRtc_Word32 numOfChannels = _channelManager.NumOfChannels(); + int32_t numOfChannels = _channelManager.NumOfChannels(); if (numOfChannels <= 0) { return 0; } - WebRtc_UWord16 nChannelsSending(0); - WebRtc_Word32* channelsArray = new WebRtc_Word32[numOfChannels]; + uint16_t nChannelsSending(0); + int32_t* channelsArray = new int32_t[numOfChannels]; _channelManager.GetChannelIds(channelsArray, numOfChannels); for (int i = 0; i < numOfChannels; i++) @@ -104,16 +104,16 @@ WebRtc_UWord16 SharedData::NumOfSendingChannels() return nChannelsSending; } -void SharedData::SetLastError(const WebRtc_Word32 error) const { +void SharedData::SetLastError(const int32_t error) const { _engineStatistics.SetLastError(error); } -void SharedData::SetLastError(const WebRtc_Word32 error, +void SharedData::SetLastError(const int32_t error, const TraceLevel level) const { _engineStatistics.SetLastError(error, level); } -void SharedData::SetLastError(const WebRtc_Word32 error, const TraceLevel level, +void SharedData::SetLastError(const int32_t error, const TraceLevel level, const char* msg) const { _engineStatistics.SetLastError(error, level, msg); } diff --git a/webrtc/voice_engine/shared_data.h b/webrtc/voice_engine/shared_data.h index cce636869..99cff04cc 100644 --- a/webrtc/voice_engine/shared_data.h +++ b/webrtc/voice_engine/shared_data.h @@ -33,7 +33,7 @@ class SharedData { public: // Public accessors. - WebRtc_UWord32 instance_id() const { return _instanceId; } + uint32_t instance_id() const { return _instanceId; } Statistics& statistics() { return _engineStatistics; } ChannelManager& channel_manager() { return _channelManager; } AudioDeviceModule* audio_device() { return _audioDevicePtr; } @@ -55,16 +55,16 @@ public: _audioDeviceLayer = layer; } - WebRtc_UWord16 NumOfSendingChannels(); + uint16_t NumOfSendingChannels(); // Convenience methods for calling statistics().SetLastError(). - void SetLastError(const WebRtc_Word32 error) const; - void SetLastError(const WebRtc_Word32 error, const TraceLevel level) const; - void SetLastError(const WebRtc_Word32 error, const TraceLevel level, + void SetLastError(const int32_t error) const; + void SetLastError(const int32_t error, const TraceLevel level) const; + void SetLastError(const int32_t error, const TraceLevel level, const char* msg) const; protected: - const WebRtc_UWord32 _instanceId; + const uint32_t _instanceId; CriticalSectionWrapper* _apiCritPtr; ChannelManager _channelManager; Statistics _engineStatistics; diff --git a/webrtc/voice_engine/statistics.cc b/webrtc/voice_engine/statistics.cc index 4f1bc7915..8003d6dbf 100644 --- a/webrtc/voice_engine/statistics.cc +++ b/webrtc/voice_engine/statistics.cc @@ -20,7 +20,7 @@ namespace webrtc { namespace voe { -Statistics::Statistics(const WebRtc_UWord32 instanceId) : +Statistics::Statistics(const uint32_t instanceId) : _critPtr(CriticalSectionWrapper::CreateCriticalSection()), _instanceId(instanceId), _lastError(0), @@ -37,13 +37,13 @@ Statistics::~Statistics() } } -WebRtc_Word32 Statistics::SetInitialized() +int32_t Statistics::SetInitialized() { _isInitialized = true; return 0; } -WebRtc_Word32 Statistics::SetUnInitialized() +int32_t Statistics::SetUnInitialized() { _isInitialized = false; return 0; @@ -54,15 +54,15 @@ bool Statistics::Initialized() const return _isInitialized; } -WebRtc_Word32 Statistics::SetLastError(const WebRtc_Word32 error) const +int32_t Statistics::SetLastError(const int32_t error) const { CriticalSectionScoped cs(_critPtr); _lastError = error; return 0; } -WebRtc_Word32 Statistics::SetLastError(const WebRtc_Word32 error, - const TraceLevel level) const +int32_t Statistics::SetLastError(const int32_t error, + const TraceLevel level) const { CriticalSectionScoped cs(_critPtr); _lastError = error; @@ -72,8 +72,8 @@ WebRtc_Word32 Statistics::SetLastError(const WebRtc_Word32 error, return 0; } -WebRtc_Word32 Statistics::SetLastError( - const WebRtc_Word32 error, +int32_t Statistics::SetLastError( + const int32_t error, const TraceLevel level, const char* msg) const { CriticalSectionScoped cs(_critPtr); @@ -86,7 +86,7 @@ WebRtc_Word32 Statistics::SetLastError( return 0; } -WebRtc_Word32 Statistics::LastError() const +int32_t Statistics::LastError() const { CriticalSectionScoped cs(_critPtr); WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1), diff --git a/webrtc/voice_engine/statistics.h b/webrtc/voice_engine/statistics.h index fc0bf8c6c..057f26629 100644 --- a/webrtc/voice_engine/statistics.h +++ b/webrtc/voice_engine/statistics.h @@ -26,24 +26,23 @@ class Statistics public: enum {KTraceMaxMessageSize = 256}; public: - Statistics(const WebRtc_UWord32 instanceId); + Statistics(const uint32_t instanceId); ~Statistics(); - WebRtc_Word32 SetInitialized(); - WebRtc_Word32 SetUnInitialized(); + int32_t SetInitialized(); + int32_t SetUnInitialized(); bool Initialized() const; - WebRtc_Word32 SetLastError(const WebRtc_Word32 error) const; - WebRtc_Word32 SetLastError(const WebRtc_Word32 error, - const TraceLevel level) const; - WebRtc_Word32 SetLastError(const WebRtc_Word32 error, - const TraceLevel level, - const char* msg) const; - WebRtc_Word32 LastError() const; + int32_t SetLastError(const int32_t error) const; + int32_t SetLastError(const int32_t error, const TraceLevel level) const; + int32_t SetLastError(const int32_t error, + const TraceLevel level, + const char* msg) const; + int32_t LastError() const; private: CriticalSectionWrapper* _critPtr; - const WebRtc_UWord32 _instanceId; - mutable WebRtc_Word32 _lastError; + const uint32_t _instanceId; + mutable int32_t _lastError; bool _isInitialized; }; diff --git a/webrtc/voice_engine/test/auto_test/fakes/fake_media_process.h b/webrtc/voice_engine/test/auto_test/fakes/fake_media_process.h index 9c45129c0..edd80fc79 100644 --- a/webrtc/voice_engine/test/auto_test/fakes/fake_media_process.h +++ b/webrtc/voice_engine/test/auto_test/fakes/fake_media_process.h @@ -16,20 +16,20 @@ class FakeMediaProcess : public webrtc::VoEMediaProcess { public: virtual void Process(const int channel, const webrtc::ProcessingTypes type, - WebRtc_Word16 audio_10ms[], + int16_t audio_10ms[], const int length, const int sampling_freq_hz, const bool stereo) { for (int i = 0; i < length; i++) { if (!stereo) { - audio_10ms[i] = static_cast(audio_10ms[i] * + audio_10ms[i] = static_cast(audio_10ms[i] * sin(2.0 * 3.14 * frequency * 400.0 / sampling_freq_hz)); } else { // Interleaved stereo. - audio_10ms[2 * i] = static_cast ( + audio_10ms[2 * i] = static_cast ( audio_10ms[2 * i] * sin(2.0 * 3.14 * frequency * 400.0 / sampling_freq_hz)); - audio_10ms[2 * i + 1] = static_cast ( + audio_10ms[2 * i + 1] = static_cast ( audio_10ms[2 * i + 1] * sin(2.0 * 3.14 * frequency * 400.0 / sampling_freq_hz)); } diff --git a/webrtc/voice_engine/test/auto_test/voe_extended_test.cc b/webrtc/voice_engine/test/auto_test/voe_extended_test.cc index 84d7c2b38..209ff205b 100644 --- a/webrtc/voice_engine/test/auto_test/voe_extended_test.cc +++ b/webrtc/voice_engine/test/auto_test/voe_extended_test.cc @@ -2986,7 +2986,7 @@ int VoEExtendedTest::TestExternalMedia() { TEST_MUSTPASS(voe_base_->StartSend(0)); int getLen = 0; - WebRtc_Word16 vector[32000]; + int16_t vector[32000]; memset(vector, 0, 32000 * sizeof(short)); #ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT diff --git a/webrtc/voice_engine/test/win_test/WinTestDlg.cc b/webrtc/voice_engine/test/win_test/WinTestDlg.cc index 0aaeb51a4..fee292065 100644 --- a/webrtc/voice_engine/test/win_test/WinTestDlg.cc +++ b/webrtc/voice_engine/test/win_test/WinTestDlg.cc @@ -186,7 +186,7 @@ public: MediaProcessImpl(); virtual void Process(const int channel, const ProcessingTypes type, - WebRtc_Word16 audio_10ms[], + int16_t audio_10ms[], const int length, const int samplingFreqHz, const bool stereo); @@ -198,7 +198,7 @@ MediaProcessImpl::MediaProcessImpl() void MediaProcessImpl::Process(const int channel, const ProcessingTypes type, - WebRtc_Word16 audio_10ms[], + int16_t audio_10ms[], const int length, const int samplingFreqHz, const bool stereo) diff --git a/webrtc/voice_engine/transmit_mixer.cc b/webrtc/voice_engine/transmit_mixer.cc index 0e4717c27..4e6e6d679 100644 --- a/webrtc/voice_engine/transmit_mixer.cc +++ b/webrtc/voice_engine/transmit_mixer.cc @@ -81,8 +81,8 @@ TransmitMixer::OnPeriodicProcess() } -void TransmitMixer::PlayNotification(const WebRtc_Word32 id, - const WebRtc_UWord32 durationMs) +void TransmitMixer::PlayNotification(const int32_t id, + const uint32_t durationMs) { WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1), "TransmitMixer::PlayNotification(id=%d, durationMs=%d)", @@ -91,8 +91,8 @@ void TransmitMixer::PlayNotification(const WebRtc_Word32 id, // Not implement yet } -void TransmitMixer::RecordNotification(const WebRtc_Word32 id, - const WebRtc_UWord32 durationMs) +void TransmitMixer::RecordNotification(const int32_t id, + const uint32_t durationMs) { WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1), "TransmitMixer::RecordNotification(id=%d, durationMs=%d)", @@ -101,7 +101,7 @@ void TransmitMixer::RecordNotification(const WebRtc_Word32 id, // Not implement yet } -void TransmitMixer::PlayFileEnded(const WebRtc_Word32 id) +void TransmitMixer::PlayFileEnded(const int32_t id) { WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1), "TransmitMixer::PlayFileEnded(id=%d)", id); @@ -117,7 +117,7 @@ void TransmitMixer::PlayFileEnded(const WebRtc_Word32 id) } void -TransmitMixer::RecordFileEnded(const WebRtc_Word32 id) +TransmitMixer::RecordFileEnded(const int32_t id) { WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1), "TransmitMixer::RecordFileEnded(id=%d)", id); @@ -139,8 +139,8 @@ TransmitMixer::RecordFileEnded(const WebRtc_Word32 id) } } -WebRtc_Word32 -TransmitMixer::Create(TransmitMixer*& mixer, const WebRtc_UWord32 instanceId) +int32_t +TransmitMixer::Create(TransmitMixer*& mixer, const uint32_t instanceId) { WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId, -1), "TransmitMixer::Create(instanceId=%d)", instanceId); @@ -165,7 +165,7 @@ TransmitMixer::Destroy(TransmitMixer*& mixer) } } -TransmitMixer::TransmitMixer(const WebRtc_UWord32 instanceId) : +TransmitMixer::TransmitMixer(const uint32_t instanceId) : _engineStatisticsPtr(NULL), _channelManagerPtr(NULL), audioproc_(NULL), @@ -250,7 +250,7 @@ TransmitMixer::~TransmitMixer() delete &_callbackCritSect; } -WebRtc_Word32 +int32_t TransmitMixer::SetEngineInformation(ProcessThread& processThread, Statistics& engineStatistics, ChannelManager& channelManager) @@ -275,7 +275,7 @@ TransmitMixer::SetEngineInformation(ProcessThread& processThread, return 0; } -WebRtc_Word32 +int32_t TransmitMixer::RegisterVoiceEngineObserver(VoiceEngineObserver& observer) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1), @@ -293,7 +293,7 @@ TransmitMixer::RegisterVoiceEngineObserver(VoiceEngineObserver& observer) return 0; } -WebRtc_Word32 +int32_t TransmitMixer::SetAudioProcessingModule(AudioProcessing* audioProcessingModule) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1), @@ -325,14 +325,14 @@ void TransmitMixer::GetSendCodecInfo(int* max_sample_rate, int* max_channels) { } } -WebRtc_Word32 +int32_t TransmitMixer::PrepareDemux(const void* audioSamples, - const WebRtc_UWord32 nSamples, - const WebRtc_UWord8 nChannels, - const WebRtc_UWord32 samplesPerSec, - const WebRtc_UWord16 totalDelayMS, - const WebRtc_Word32 clockDrift, - const WebRtc_UWord16 currentMicLevel) + const uint32_t nSamples, + const uint8_t nChannels, + const uint32_t samplesPerSec, + const uint16_t totalDelayMS, + const int32_t clockDrift, + const uint16_t currentMicLevel) { WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1), "TransmitMixer::PrepareDemux(nSamples=%u, nChannels=%u," @@ -341,7 +341,7 @@ TransmitMixer::PrepareDemux(const void* audioSamples, totalDelayMS, clockDrift, currentMicLevel); // --- Resample input audio and create/store the initial audio frame - if (GenerateAudioFrame(static_cast(audioSamples), + if (GenerateAudioFrame(static_cast(audioSamples), nSamples, nChannels, samplesPerSec) == -1) @@ -417,7 +417,7 @@ TransmitMixer::PrepareDemux(const void* audioSamples, return 0; } -WebRtc_Word32 +int32_t TransmitMixer::DemuxAndMix() { WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1), @@ -442,7 +442,7 @@ TransmitMixer::DemuxAndMix() return 0; } -WebRtc_Word32 +int32_t TransmitMixer::EncodeAndSend() { WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1), @@ -462,14 +462,14 @@ TransmitMixer::EncodeAndSend() return 0; } -WebRtc_UWord32 TransmitMixer::CaptureLevel() const +uint32_t TransmitMixer::CaptureLevel() const { CriticalSectionScoped cs(&_critSect); return _captureLevel; } void -TransmitMixer::UpdateMuteMicrophoneTime(const WebRtc_UWord32 lengthMs) +TransmitMixer::UpdateMuteMicrophoneTime(const uint32_t lengthMs) { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1), "TransmitMixer::UpdateMuteMicrophoneTime(lengthMs=%d)", @@ -477,7 +477,7 @@ TransmitMixer::UpdateMuteMicrophoneTime(const WebRtc_UWord32 lengthMs) _remainingMuteMicTimeMs = lengthMs; } -WebRtc_Word32 +int32_t TransmitMixer::StopSend() { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1), @@ -531,7 +531,7 @@ int TransmitMixer::StartPlayingFileAsMicrophone(const char* fileName, return -1; } - const WebRtc_UWord32 notificationTime(0); + const uint32_t notificationTime(0); if (_filePlayerPtr->StartPlayingFile( fileName, @@ -608,7 +608,7 @@ int TransmitMixer::StartPlayingFileAsMicrophone(InStream* stream, return -1; } - const WebRtc_UWord32 notificationTime(0); + const uint32_t notificationTime(0); if (_filePlayerPtr->StartPlayingFile( (InStream&) *stream, @@ -713,7 +713,7 @@ int TransmitMixer::StartRecordingMicrophone(const char* fileName, } FileFormats format; - const WebRtc_UWord32 notificationTime(0); // Not supported in VoE + const uint32_t notificationTime(0); // Not supported in VoE CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 }; if (codecInst != NULL && @@ -792,7 +792,7 @@ int TransmitMixer::StartRecordingMicrophone(OutStream* stream, } FileFormats format; - const WebRtc_UWord32 notificationTime(0); // Not supported in VoE + const uint32_t notificationTime(0); // Not supported in VoE CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 }; if (codecInst != NULL && codecInst->channels != 1) @@ -899,7 +899,7 @@ int TransmitMixer::StartRecordingCall(const char* fileName, } FileFormats format; - const WebRtc_UWord32 notificationTime(0); // Not supported in VoE + const uint32_t notificationTime(0); // Not supported in VoE CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 }; if (codecInst != NULL && codecInst->channels != 1) @@ -977,7 +977,7 @@ int TransmitMixer::StartRecordingCall(OutStream* stream, } FileFormats format; - const WebRtc_UWord32 notificationTime(0); // Not supported in VoE + const uint32_t notificationTime(0); // Not supported in VoE CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 }; if (codecInst != NULL && codecInst->channels != 1) @@ -1128,13 +1128,13 @@ TransmitMixer::Mute() const return _mute; } -WebRtc_Word8 TransmitMixer::AudioLevel() const +int8_t TransmitMixer::AudioLevel() const { // Speech + file level [0,9] return _audioLevel.Level(); } -WebRtc_Word16 TransmitMixer::AudioLevelFullRange() const +int16_t TransmitMixer::AudioLevelFullRange() const { // Speech + file level [0,32767] return _audioLevel.LevelFullRange(); @@ -1211,8 +1211,8 @@ int TransmitMixer::GenerateAudioFrame(const int16_t audio[], return 0; } -WebRtc_Word32 TransmitMixer::RecordAudioToFile( - const WebRtc_UWord32 mixingFrequency) +int32_t TransmitMixer::RecordAudioToFile( + const uint32_t mixingFrequency) { CriticalSectionScoped cs(&_critSect); if (_fileRecorderPtr == NULL) @@ -1234,10 +1234,10 @@ WebRtc_Word32 TransmitMixer::RecordAudioToFile( return 0; } -WebRtc_Word32 TransmitMixer::MixOrReplaceAudioWithFile( +int32_t TransmitMixer::MixOrReplaceAudioWithFile( const int mixingFrequency) { - scoped_array fileBuffer(new WebRtc_Word16[640]); + scoped_array fileBuffer(new int16_t[640]); int fileSamples(0); { diff --git a/webrtc/voice_engine/transmit_mixer.h b/webrtc/voice_engine/transmit_mixer.h index 7863cd48e..f01362f5d 100644 --- a/webrtc/voice_engine/transmit_mixer.h +++ b/webrtc/voice_engine/transmit_mixer.h @@ -40,37 +40,36 @@ class TransmitMixer : public MonitorObserver, { public: - static WebRtc_Word32 Create(TransmitMixer*& mixer, - const WebRtc_UWord32 instanceId); + static int32_t Create(TransmitMixer*& mixer, const uint32_t instanceId); static void Destroy(TransmitMixer*& mixer); - WebRtc_Word32 SetEngineInformation(ProcessThread& processThread, - Statistics& engineStatistics, - ChannelManager& channelManager); + int32_t SetEngineInformation(ProcessThread& processThread, + Statistics& engineStatistics, + ChannelManager& channelManager); - WebRtc_Word32 SetAudioProcessingModule( + int32_t SetAudioProcessingModule( AudioProcessing* audioProcessingModule); - WebRtc_Word32 PrepareDemux(const void* audioSamples, - const WebRtc_UWord32 nSamples, - const WebRtc_UWord8 nChannels, - const WebRtc_UWord32 samplesPerSec, - const WebRtc_UWord16 totalDelayMS, - const WebRtc_Word32 clockDrift, - const WebRtc_UWord16 currentMicLevel); + int32_t PrepareDemux(const void* audioSamples, + const uint32_t nSamples, + const uint8_t nChannels, + const uint32_t samplesPerSec, + const uint16_t totalDelayMS, + const int32_t clockDrift, + const uint16_t currentMicLevel); - WebRtc_Word32 DemuxAndMix(); + int32_t DemuxAndMix(); - WebRtc_Word32 EncodeAndSend(); + int32_t EncodeAndSend(); - WebRtc_UWord32 CaptureLevel() const; + uint32_t CaptureLevel() const; - WebRtc_Word32 StopSend(); + int32_t StopSend(); // VoEDtmf - void UpdateMuteMicrophoneTime(const WebRtc_UWord32 lengthMs); + void UpdateMuteMicrophoneTime(const uint32_t lengthMs); // VoEExternalMedia int RegisterExternalMediaProcessing(VoEMediaProcess* object, @@ -84,9 +83,9 @@ public: bool Mute() const; - WebRtc_Word8 AudioLevel() const; + int8_t AudioLevel() const; - WebRtc_Word16 AudioLevelFullRange() const; + int16_t AudioLevelFullRange() const; bool IsRecordingCall(); @@ -129,7 +128,7 @@ public: void SetMixWithMicStatus(bool mix); - WebRtc_Word32 RegisterVoiceEngineObserver(VoiceEngineObserver& observer); + int32_t RegisterVoiceEngineObserver(VoiceEngineObserver& observer); virtual ~TransmitMixer(); @@ -138,15 +137,15 @@ public: // FileCallback - void PlayNotification(const WebRtc_Word32 id, - const WebRtc_UWord32 durationMs); + void PlayNotification(const int32_t id, + const uint32_t durationMs); - void RecordNotification(const WebRtc_Word32 id, - const WebRtc_UWord32 durationMs); + void RecordNotification(const int32_t id, + const uint32_t durationMs); - void PlayFileEnded(const WebRtc_Word32 id); + void PlayFileEnded(const int32_t id); - void RecordFileEnded(const WebRtc_Word32 id); + void RecordFileEnded(const int32_t id); #ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION // Typing detection @@ -162,7 +161,7 @@ public: bool IsStereoChannelSwappingEnabled(); private: - TransmitMixer(const WebRtc_UWord32 instanceId); + TransmitMixer(const uint32_t instanceId); // Gets the maximum sample rate and number of channels over all currently // sending codecs. @@ -172,9 +171,9 @@ private: int nSamples, int nChannels, int samplesPerSec); - WebRtc_Word32 RecordAudioToFile(const WebRtc_UWord32 mixingFrequency); + int32_t RecordAudioToFile(const uint32_t mixingFrequency); - WebRtc_Word32 MixOrReplaceAudioWithFile( + int32_t MixOrReplaceAudioWithFile( const int mixingFrequency); void ProcessAudio(int delay_ms, int clock_drift, int current_mic_level); @@ -209,9 +208,9 @@ private: CriticalSectionWrapper& _callbackCritSect; #ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION - WebRtc_Word32 _timeActive; - WebRtc_Word32 _timeSinceLastTyping; - WebRtc_Word32 _penaltyCounter; + int32_t _timeActive; + int32_t _timeSinceLastTyping; + int32_t _penaltyCounter; bool _typingNoiseWarning; // Tunable treshold values @@ -226,11 +225,11 @@ private: int _instanceId; bool _mixFileWithMicrophone; - WebRtc_UWord32 _captureLevel; + uint32_t _captureLevel; VoEMediaProcess* external_postproc_ptr_; VoEMediaProcess* external_preproc_ptr_; bool _mute; - WebRtc_Word32 _remainingMuteMicTimeMs; + int32_t _remainingMuteMicTimeMs; bool stereo_codec_; bool swap_stereo_channels_; }; diff --git a/webrtc/voice_engine/utility.cc b/webrtc/voice_engine/utility.cc index 1ef108ea7..6615caf92 100644 --- a/webrtc/voice_engine/utility.cc +++ b/webrtc/voice_engine/utility.cc @@ -21,9 +21,9 @@ namespace voe { enum{kMaxTargetLen = 2*32*10}; // stereo 32KHz 10ms -void Utility::MixWithSat(WebRtc_Word16 target[], +void Utility::MixWithSat(int16_t target[], int target_channel, - const WebRtc_Word16 source[], + const int16_t source[], int source_channel, int source_len) { @@ -34,8 +34,8 @@ void Utility::MixWithSat(WebRtc_Word16 target[], if ((target_channel == 2) && (source_channel == 1)) { // Convert source from mono to stereo. - WebRtc_Word32 left = 0; - WebRtc_Word32 right = 0; + int32_t left = 0; + int32_t right = 0; for (int i = 0; i < source_len; ++i) { left = source[i] + target[i*2]; right = source[i] + target[i*2 + 1]; @@ -46,7 +46,7 @@ void Utility::MixWithSat(WebRtc_Word16 target[], else if ((target_channel == 1) && (source_channel == 2)) { // Convert source from stereo to mono. - WebRtc_Word32 temp = 0; + int32_t temp = 0; for (int i = 0; i < source_len/2; ++i) { temp = ((source[i*2] + source[i*2 + 1])>>1) + target[i]; target[i] = WebRtcSpl_SatW32ToW16(temp); @@ -54,7 +54,7 @@ void Utility::MixWithSat(WebRtc_Word16 target[], } else { - WebRtc_Word32 temp = 0; + int32_t temp = 0; for (int i = 0; i < source_len; ++i) { temp = source[i] + target[i]; target[i] = WebRtcSpl_SatW32ToW16(temp); @@ -62,11 +62,11 @@ void Utility::MixWithSat(WebRtc_Word16 target[], } } -void Utility::MixSubtractWithSat(WebRtc_Word16 target[], - const WebRtc_Word16 source[], - WebRtc_UWord16 len) +void Utility::MixSubtractWithSat(int16_t target[], + const int16_t source[], + uint16_t len) { - WebRtc_Word32 temp(0); + int32_t temp(0); for (int i = 0; i < len; i++) { temp = target[i] - source[i]; @@ -75,48 +75,48 @@ void Utility::MixSubtractWithSat(WebRtc_Word16 target[], else if (temp < -32768) target[i] = -32768; else - target[i] = (WebRtc_Word16) temp; + target[i] = (int16_t) temp; } } -void Utility::MixAndScaleWithSat(WebRtc_Word16 target[], - const WebRtc_Word16 source[], float scale, - WebRtc_UWord16 len) +void Utility::MixAndScaleWithSat(int16_t target[], + const int16_t source[], float scale, + uint16_t len) { - WebRtc_Word32 temp(0); + int32_t temp(0); for (int i = 0; i < len; i++) { - temp = (WebRtc_Word32) (target[i] + scale * source[i]); + temp = (int32_t) (target[i] + scale * source[i]); if (temp > 32767) target[i] = 32767; else if (temp < -32768) target[i] = -32768; else - target[i] = (WebRtc_Word16) temp; + target[i] = (int16_t) temp; } } -void Utility::Scale(WebRtc_Word16 vector[], float scale, WebRtc_UWord16 len) +void Utility::Scale(int16_t vector[], float scale, uint16_t len) { for (int i = 0; i < len; i++) { - vector[i] = (WebRtc_Word16) (scale * vector[i]); + vector[i] = (int16_t) (scale * vector[i]); } } -void Utility::ScaleWithSat(WebRtc_Word16 vector[], float scale, - WebRtc_UWord16 len) +void Utility::ScaleWithSat(int16_t vector[], float scale, + uint16_t len) { - WebRtc_Word32 temp(0); + int32_t temp(0); for (int i = 0; i < len; i++) { - temp = (WebRtc_Word32) (scale * vector[i]); + temp = (int32_t) (scale * vector[i]); if (temp > 32767) vector[i] = 32767; else if (temp < -32768) vector[i] = -32768; else - vector[i] = (WebRtc_Word16) temp; + vector[i] = (int16_t) temp; } } diff --git a/webrtc/voice_engine/utility.h b/webrtc/voice_engine/utility.h index a8af8bdef..50172378f 100644 --- a/webrtc/voice_engine/utility.h +++ b/webrtc/voice_engine/utility.h @@ -29,26 +29,26 @@ namespace voe class Utility { public: - static void MixWithSat(WebRtc_Word16 target[], + static void MixWithSat(int16_t target[], int target_channel, - const WebRtc_Word16 source[], + const int16_t source[], int source_channel, int source_len); - static void MixSubtractWithSat(WebRtc_Word16 target[], - const WebRtc_Word16 source[], - WebRtc_UWord16 len); + static void MixSubtractWithSat(int16_t target[], + const int16_t source[], + uint16_t len); - static void MixAndScaleWithSat(WebRtc_Word16 target[], - const WebRtc_Word16 source[], + static void MixAndScaleWithSat(int16_t target[], + const int16_t source[], float scale, - WebRtc_UWord16 len); + uint16_t len); - static void Scale(WebRtc_Word16 vector[], float scale, WebRtc_UWord16 len); + static void Scale(int16_t vector[], float scale, uint16_t len); - static void ScaleWithSat(WebRtc_Word16 vector[], + static void ScaleWithSat(int16_t vector[], float scale, - WebRtc_UWord16 len); + uint16_t len); }; } // namespace voe diff --git a/webrtc/voice_engine/voe_base_impl.cc b/webrtc/voice_engine/voe_base_impl.cc index ee9d78c4a..0aed543d2 100644 --- a/webrtc/voice_engine/voe_base_impl.cc +++ b/webrtc/voice_engine/voe_base_impl.cc @@ -124,16 +124,16 @@ void VoEBaseImpl::OnWarningIsReported(const WarningCode warning) } } -WebRtc_Word32 VoEBaseImpl::RecordedDataIsAvailable( +int32_t VoEBaseImpl::RecordedDataIsAvailable( const void* audioSamples, - const WebRtc_UWord32 nSamples, - const WebRtc_UWord8 nBytesPerSample, - const WebRtc_UWord8 nChannels, - const WebRtc_UWord32 samplesPerSec, - const WebRtc_UWord32 totalDelayMS, - const WebRtc_Word32 clockDrift, - const WebRtc_UWord32 currentMicLevel, - WebRtc_UWord32& newMicLevel) + const uint32_t nSamples, + const uint8_t nBytesPerSample, + const uint8_t nChannels, + const uint32_t samplesPerSec, + const uint32_t totalDelayMS, + const int32_t clockDrift, + const uint32_t currentMicLevel, + uint32_t& newMicLevel) { WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_shared->instance_id(), -1), "VoEBaseImpl::RecordedDataIsAvailable(nSamples=%u, " @@ -146,9 +146,9 @@ WebRtc_Word32 VoEBaseImpl::RecordedDataIsAvailable( assert(_shared->audio_device() != NULL); bool isAnalogAGC(false); - WebRtc_UWord32 maxVolume(0); - WebRtc_UWord16 currentVoEMicLevel(0); - WebRtc_UWord32 newVoEMicLevel(0); + uint32_t maxVolume(0); + uint16_t currentVoEMicLevel(0); + uint32_t newVoEMicLevel(0); if (_shared->audio_processing() && (_shared->audio_processing()->gain_control()->mode() @@ -165,7 +165,7 @@ WebRtc_Word32 VoEBaseImpl::RecordedDataIsAvailable( { if (0 != maxVolume) { - currentVoEMicLevel = (WebRtc_UWord16) ((currentMicLevel + currentVoEMicLevel = (uint16_t) ((currentMicLevel * kMaxVolumeLevel + (int) (maxVolume / 2)) / (maxVolume)); } @@ -188,13 +188,13 @@ WebRtc_Word32 VoEBaseImpl::RecordedDataIsAvailable( // issues with truncation introduced by the scaling. if (_oldMicLevel == currentMicLevel) { - currentVoEMicLevel = (WebRtc_UWord16) _oldVoEMicLevel; + currentVoEMicLevel = (uint16_t) _oldVoEMicLevel; } // Perform channel-independent operations // (APM, mix with file, record to file, mute, etc.) _shared->transmit_mixer()->PrepareDemux(audioSamples, nSamples, nChannels, - samplesPerSec, static_cast(totalDelayMS), clockDrift, + samplesPerSec, static_cast(totalDelayMS), clockDrift, currentVoEMicLevel); // Copy the audio frame to each sending channel and perform @@ -213,7 +213,7 @@ WebRtc_Word32 VoEBaseImpl::RecordedDataIsAvailable( if (newVoEMicLevel != currentVoEMicLevel) { // Add (kMaxVolumeLevel/2) to round the value - newMicLevel = (WebRtc_UWord32) ((newVoEMicLevel * maxVolume + newMicLevel = (uint32_t) ((newVoEMicLevel * maxVolume + (int) (kMaxVolumeLevel / 2)) / (kMaxVolumeLevel)); } else @@ -230,13 +230,13 @@ WebRtc_Word32 VoEBaseImpl::RecordedDataIsAvailable( return 0; } -WebRtc_Word32 VoEBaseImpl::NeedMorePlayData( - const WebRtc_UWord32 nSamples, - const WebRtc_UWord8 nBytesPerSample, - const WebRtc_UWord8 nChannels, - const WebRtc_UWord32 samplesPerSec, +int32_t VoEBaseImpl::NeedMorePlayData( + const uint32_t nSamples, + const uint8_t nBytesPerSample, + const uint8_t nChannels, + const uint32_t samplesPerSec, void* audioSamples, - WebRtc_UWord32& nSamplesOut) + uint32_t& nSamplesOut) { WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_shared->instance_id(), -1), "VoEBaseImpl::NeedMorePlayData(nSamples=%u, " @@ -259,13 +259,13 @@ WebRtc_Word32 VoEBaseImpl::NeedMorePlayData( assert(static_cast(nSamples) == _audioFrame.samples_per_channel_); assert(samplesPerSec == - static_cast(_audioFrame.sample_rate_hz_)); + static_cast(_audioFrame.sample_rate_hz_)); // Deliver audio (PCM) samples to the ADM memcpy( - (WebRtc_Word16*) audioSamples, - (const WebRtc_Word16*) _audioFrame.data_, - sizeof(WebRtc_Word16) * (_audioFrame.samples_per_channel_ + (int16_t*) audioSamples, + (const int16_t*) _audioFrame.data_, + sizeof(int16_t) * (_audioFrame.samples_per_channel_ * _audioFrame.num_channels_)); nSamplesOut = _audioFrame.samples_per_channel_; @@ -573,7 +573,7 @@ int VoEBaseImpl::MaxNumOfChannels() { WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "MaxNumOfChannels()"); - WebRtc_Word32 maxNumOfChannels = + int32_t maxNumOfChannels = _shared->channel_manager().MaxNumOfChannels(); WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1), @@ -593,7 +593,7 @@ int VoEBaseImpl::CreateChannel() return -1; } - WebRtc_Word32 channelId = -1; + int32_t channelId = -1; if (!_shared->channel_manager().CreateChannel(channelId)) { @@ -860,8 +860,8 @@ int VoEBaseImpl::GetVersion(char version[1024]) char versionBuf[kVoiceEngineVersionMaxMessageSize]; char* versionPtr = versionBuf; - WebRtc_Word32 len = 0; - WebRtc_Word32 accLen = 0; + int32_t len = 0; + int32_t accLen = 0; len = AddVoEVersion(versionPtr); if (len == -1) @@ -933,25 +933,25 @@ int VoEBaseImpl::GetVersion(char version[1024]) return 0; } -WebRtc_Word32 VoEBaseImpl::AddBuildInfo(char* str) const +int32_t VoEBaseImpl::AddBuildInfo(char* str) const { return sprintf(str, "Build: svn:%s %s\n", WEBRTC_SVNREVISION, BUILDINFO); } -WebRtc_Word32 VoEBaseImpl::AddVoEVersion(char* str) const +int32_t VoEBaseImpl::AddVoEVersion(char* str) const { return sprintf(str, "VoiceEngine 4.1.0\n"); } #ifdef WEBRTC_EXTERNAL_TRANSPORT -WebRtc_Word32 VoEBaseImpl::AddExternalTransportBuild(char* str) const +int32_t VoEBaseImpl::AddExternalTransportBuild(char* str) const { return sprintf(str, "External transport build\n"); } #endif #ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT -WebRtc_Word32 VoEBaseImpl::AddExternalRecAndPlayoutBuild(char* str) const +int32_t VoEBaseImpl::AddExternalRecAndPlayoutBuild(char* str) const { return sprintf(str, "External recording and playout build\n"); } @@ -1046,7 +1046,7 @@ int VoEBaseImpl::GetOnHoldStatus(int channel, bool& enabled, OnHoldModes& mode) return channelPtr->GetOnHoldStatus(enabled, mode); } -WebRtc_Word32 VoEBaseImpl::StartPlayout() +int32_t VoEBaseImpl::StartPlayout() { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1), "VoEBaseImpl::StartPlayout()"); @@ -1074,19 +1074,19 @@ WebRtc_Word32 VoEBaseImpl::StartPlayout() return 0; } -WebRtc_Word32 VoEBaseImpl::StopPlayout() +int32_t VoEBaseImpl::StopPlayout() { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1), "VoEBaseImpl::StopPlayout()"); - WebRtc_Word32 numOfChannels = _shared->channel_manager().NumOfChannels(); + int32_t numOfChannels = _shared->channel_manager().NumOfChannels(); if (numOfChannels <= 0) { return 0; } - WebRtc_UWord16 nChannelsPlaying(0); - WebRtc_Word32* channelsArray = new WebRtc_Word32[numOfChannels]; + uint16_t nChannelsPlaying(0); + int32_t* channelsArray = new int32_t[numOfChannels]; // Get number of playing channels _shared->channel_manager().GetChannelIds(channelsArray, numOfChannels); @@ -1117,7 +1117,7 @@ WebRtc_Word32 VoEBaseImpl::StopPlayout() return 0; } -WebRtc_Word32 VoEBaseImpl::StartSend() +int32_t VoEBaseImpl::StartSend() { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1), "VoEBaseImpl::StartSend()"); @@ -1146,7 +1146,7 @@ WebRtc_Word32 VoEBaseImpl::StartSend() return 0; } -WebRtc_Word32 VoEBaseImpl::StopSend() +int32_t VoEBaseImpl::StopSend() { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1), "VoEBaseImpl::StopSend()"); @@ -1167,16 +1167,16 @@ WebRtc_Word32 VoEBaseImpl::StopSend() return 0; } -WebRtc_Word32 VoEBaseImpl::TerminateInternal() +int32_t VoEBaseImpl::TerminateInternal() { WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1), "VoEBaseImpl::TerminateInternal()"); // Delete any remaining channel objects - WebRtc_Word32 numOfChannels = _shared->channel_manager().NumOfChannels(); + int32_t numOfChannels = _shared->channel_manager().NumOfChannels(); if (numOfChannels > 0) { - WebRtc_Word32* channelsArray = new WebRtc_Word32[numOfChannels]; + int32_t* channelsArray = new int32_t[numOfChannels]; _shared->channel_manager().GetChannelIds(channelsArray, numOfChannels); for (int i = 0; i < numOfChannels; i++) { diff --git a/webrtc/voice_engine/voe_base_impl.h b/webrtc/voice_engine/voe_base_impl.h index 11a4dbe5f..42369c317 100644 --- a/webrtc/voice_engine/voe_base_impl.h +++ b/webrtc/voice_engine/voe_base_impl.h @@ -71,23 +71,23 @@ public: virtual int LastError(); // AudioTransport - virtual WebRtc_Word32 + virtual int32_t RecordedDataIsAvailable(const void* audioSamples, - const WebRtc_UWord32 nSamples, - const WebRtc_UWord8 nBytesPerSample, - const WebRtc_UWord8 nChannels, - const WebRtc_UWord32 samplesPerSec, - const WebRtc_UWord32 totalDelayMS, - const WebRtc_Word32 clockDrift, - const WebRtc_UWord32 currentMicLevel, - WebRtc_UWord32& newMicLevel); + const uint32_t nSamples, + const uint8_t nBytesPerSample, + const uint8_t nChannels, + const uint32_t samplesPerSec, + const uint32_t totalDelayMS, + const int32_t clockDrift, + const uint32_t currentMicLevel, + uint32_t& newMicLevel); - virtual WebRtc_Word32 NeedMorePlayData(const WebRtc_UWord32 nSamples, - const WebRtc_UWord8 nBytesPerSample, - const WebRtc_UWord8 nChannels, - const WebRtc_UWord32 samplesPerSec, - void* audioSamples, - WebRtc_UWord32& nSamplesOut); + virtual int32_t NeedMorePlayData(const uint32_t nSamples, + const uint8_t nBytesPerSample, + const uint8_t nChannels, + const uint32_t samplesPerSec, + void* audioSamples, + uint32_t& nSamplesOut); // AudioDeviceObserver virtual void OnErrorIsReported(const ErrorCode error); @@ -98,26 +98,26 @@ protected: virtual ~VoEBaseImpl(); private: - WebRtc_Word32 StartPlayout(); - WebRtc_Word32 StopPlayout(); - WebRtc_Word32 StartSend(); - WebRtc_Word32 StopSend(); - WebRtc_Word32 TerminateInternal(); + int32_t StartPlayout(); + int32_t StopPlayout(); + int32_t StartSend(); + int32_t StopSend(); + int32_t TerminateInternal(); - WebRtc_Word32 AddBuildInfo(char* str) const; - WebRtc_Word32 AddVoEVersion(char* str) const; + int32_t AddBuildInfo(char* str) const; + int32_t AddVoEVersion(char* str) const; #ifdef WEBRTC_EXTERNAL_TRANSPORT - WebRtc_Word32 AddExternalTransportBuild(char* str) const; + int32_t AddExternalTransportBuild(char* str) const; #endif #ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT - WebRtc_Word32 AddExternalRecAndPlayoutBuild(char* str) const; + int32_t AddExternalRecAndPlayoutBuild(char* str) const; #endif VoiceEngineObserver* _voiceEngineObserverPtr; CriticalSectionWrapper& _callbackCritSect; bool _voiceEngineObserver; - WebRtc_UWord32 _oldVoEMicLevel; - WebRtc_UWord32 _oldMicLevel; + uint32_t _oldVoEMicLevel; + uint32_t _oldMicLevel; AudioFrame _audioFrame; voe::SharedData* _shared; diff --git a/webrtc/voice_engine/voe_call_report_impl.cc b/webrtc/voice_engine/voe_call_report_impl.cc index c4a6bc919..8eaf725a9 100644 --- a/webrtc/voice_engine/voe_call_report_impl.cc +++ b/webrtc/voice_engine/voe_call_report_impl.cc @@ -100,13 +100,13 @@ int VoECallReportImpl::ResetCallReportStatistics(int channel) } else { - WebRtc_Word32 numOfChannels = + int32_t numOfChannels = _shared->channel_manager().NumOfChannels(); if (numOfChannels <= 0) { return 0; } - WebRtc_Word32* channelsArray = new WebRtc_Word32[numOfChannels]; + int32_t* channelsArray = new int32_t[numOfChannels]; _shared->channel_manager().GetChannelIds(channelsArray, numOfChannels); for (int i = 0; i < numOfChannels; i++) { @@ -333,12 +333,12 @@ int VoECallReportImpl::WriteReportToFile(const char* fileNameUTF8) _file.WriteText("\nNetwork Packet Round Trip Time (RTT)\n"); _file.WriteText("------------------------------------\n\n"); - WebRtc_Word32 numOfChannels = _shared->channel_manager().NumOfChannels(); + int32_t numOfChannels = _shared->channel_manager().NumOfChannels(); if (numOfChannels <= 0) { return 0; } - WebRtc_Word32* channelsArray = new WebRtc_Word32[numOfChannels]; + int32_t* channelsArray = new int32_t[numOfChannels]; _shared->channel_manager().GetChannelIds(channelsArray, numOfChannels); for (int ch = 0; ch < numOfChannels; ch++) { diff --git a/webrtc/voice_engine/voe_codec_impl.cc b/webrtc/voice_engine/voe_codec_impl.cc index 6efa89989..4df97c3ee 100644 --- a/webrtc/voice_engine/voe_codec_impl.cc +++ b/webrtc/voice_engine/voe_codec_impl.cc @@ -55,7 +55,7 @@ int VoECodecImpl::NumOfCodecs() "NumOfCodecs()"); // Number of supported codecs in the ACM - WebRtc_UWord8 nSupportedCodecs = AudioCodingModule::NumberOfCodecs(); + uint8_t nSupportedCodecs = AudioCodingModule::NumberOfCodecs(); WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1), diff --git a/webrtc/voice_engine/voe_external_media_impl.cc b/webrtc/voice_engine/voe_external_media_impl.cc index 4861c3eeb..324dfec52 100644 --- a/webrtc/voice_engine/voe_external_media_impl.cc +++ b/webrtc/voice_engine/voe_external_media_impl.cc @@ -162,7 +162,7 @@ int VoEExternalMediaImpl::SetExternalRecordingStatus(bool enable) } int VoEExternalMediaImpl::ExternalRecordingInsertData( - const WebRtc_Word16 speechData10ms[], + const int16_t speechData10ms[], int lengthSamples, int samplingFreqHz, int current_delay_ms) @@ -211,12 +211,12 @@ int VoEExternalMediaImpl::ExternalRecordingInsertData( return -1; } - WebRtc_UWord16 blockSize = samplingFreqHz / 100; - WebRtc_UWord32 nBlocks = lengthSamples / blockSize; - WebRtc_Word16 totalDelayMS = 0; - WebRtc_UWord16 playoutDelayMS = 0; + uint16_t blockSize = samplingFreqHz / 100; + uint32_t nBlocks = lengthSamples / blockSize; + int16_t totalDelayMS = 0; + uint16_t playoutDelayMS = 0; - for (WebRtc_UWord32 i = 0; i < nBlocks; i++) + for (uint32_t i = 0; i < nBlocks; i++) { if (!shared_->ext_playout()) { @@ -233,12 +233,12 @@ int VoEExternalMediaImpl::ExternalRecordingInsertData( // to ExternalPlayoutGetData. totalDelayMS = current_delay_ms + playout_delay_ms_; // Compensate for block sizes larger than 10ms - totalDelayMS -= (WebRtc_Word16)(i*10); + totalDelayMS -= (int16_t)(i*10); if (totalDelayMS < 0) totalDelayMS = 0; } shared_->transmit_mixer()->PrepareDemux( - (const WebRtc_Word8*)(&speechData10ms[i*blockSize]), + (const int8_t*)(&speechData10ms[i*blockSize]), blockSize, 1, samplingFreqHz, @@ -278,7 +278,7 @@ int VoEExternalMediaImpl::SetExternalPlayoutStatus(bool enable) } int VoEExternalMediaImpl::ExternalPlayoutGetData( - WebRtc_Word16 speechData10ms[], + int16_t speechData10ms[], int samplingFreqHz, int current_delay_ms, int& lengthSamples) @@ -323,7 +323,7 @@ int VoEExternalMediaImpl::ExternalPlayoutGetData( // Deliver audio (PCM) samples to the external sink memcpy(speechData10ms, audioFrame.data_, - sizeof(WebRtc_Word16)*(audioFrame.samples_per_channel_)); + sizeof(int16_t)*(audioFrame.samples_per_channel_)); lengthSamples = audioFrame.samples_per_channel_; // Store current playout delay (to be used by ExternalRecordingInsertData). diff --git a/webrtc/voice_engine/voe_external_media_impl.h b/webrtc/voice_engine/voe_external_media_impl.h index 9f0027b3f..d2ce77986 100644 --- a/webrtc/voice_engine/voe_external_media_impl.h +++ b/webrtc/voice_engine/voe_external_media_impl.h @@ -34,12 +34,12 @@ public: virtual int SetExternalPlayoutStatus(bool enable); virtual int ExternalRecordingInsertData( - const WebRtc_Word16 speechData10ms[], + const int16_t speechData10ms[], int lengthSamples, int samplingFreqHz, int current_delay_ms); - virtual int ExternalPlayoutGetData(WebRtc_Word16 speechData10ms[], + virtual int ExternalPlayoutGetData(int16_t speechData10ms[], int samplingFreqHz, int current_delay_ms, int& lengthSamples); diff --git a/webrtc/voice_engine/voe_file_impl.cc b/webrtc/voice_engine/voe_file_impl.cc index 1dda43fda..a0690737c 100644 --- a/webrtc/voice_engine/voe_file_impl.cc +++ b/webrtc/voice_engine/voe_file_impl.cc @@ -205,8 +205,8 @@ int VoEFileImpl::StartPlayingFileAsMicrophone(int channel, return -1; } - const WebRtc_UWord32 startPointMs(0); - const WebRtc_UWord32 stopPointMs(0); + const uint32_t startPointMs(0); + const uint32_t stopPointMs(0); if (channel == -1) { @@ -282,8 +282,8 @@ int VoEFileImpl::StartPlayingFileAsMicrophone(int channel, return -1; } - const WebRtc_UWord32 startPointMs(0); - const WebRtc_UWord32 stopPointMs(0); + const uint32_t startPointMs(0); + const uint32_t stopPointMs(0); if (channel == -1) { @@ -692,9 +692,9 @@ int VoEFileImpl::ConvertPCMToWAV(const char* fileNameInUTF8, // Run throught the file AudioFrame audioFrame; - WebRtc_Word16 decodedData[160]; + int16_t decodedData[160]; int decLength=0; - const WebRtc_UWord32 frequency = 16000; + const uint32_t frequency = 16000; while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency)) { @@ -705,7 +705,7 @@ int VoEFileImpl::ConvertPCMToWAV(const char* fileNameInUTF8, } audioFrame.UpdateFrame(-1, 0, decodedData, - (WebRtc_UWord16)decLength, + (uint16_t)decLength, frequency, AudioFrame::kNormalSpeech, AudioFrame::kVadActive); @@ -775,9 +775,9 @@ int VoEFileImpl::ConvertPCMToWAV(InStream* streamIn, OutStream* streamOut) // Run throught the file AudioFrame audioFrame; - WebRtc_Word16 decodedData[160]; + int16_t decodedData[160]; int decLength=0; - const WebRtc_UWord32 frequency = 16000; + const uint32_t frequency = 16000; while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency)) { @@ -788,7 +788,7 @@ int VoEFileImpl::ConvertPCMToWAV(InStream* streamIn, OutStream* streamOut) } audioFrame.UpdateFrame(-1, 0, decodedData, - (WebRtc_UWord16)decLength, frequency, + (uint16_t)decLength, frequency, AudioFrame::kNormalSpeech, AudioFrame::kVadActive); @@ -855,9 +855,9 @@ int VoEFileImpl::ConvertWAVToPCM(const char* fileNameInUTF8, // Run throught the file AudioFrame audioFrame; - WebRtc_Word16 decodedData[160]; + int16_t decodedData[160]; int decLength=0; - const WebRtc_UWord32 frequency = 16000; + const uint32_t frequency = 16000; while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency)) { @@ -868,7 +868,7 @@ int VoEFileImpl::ConvertWAVToPCM(const char* fileNameInUTF8, } audioFrame.UpdateFrame(-1, 0, decodedData, - (WebRtc_UWord16)decLength, + (uint16_t)decLength, frequency, AudioFrame::kNormalSpeech, AudioFrame::kVadActive); @@ -940,9 +940,9 @@ int VoEFileImpl::ConvertWAVToPCM(InStream* streamIn, OutStream* streamOut) // Run throught the file AudioFrame audioFrame; - WebRtc_Word16 decodedData[160]; + int16_t decodedData[160]; int decLength=0; - const WebRtc_UWord32 frequency = 16000; + const uint32_t frequency = 16000; while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency)) { @@ -953,7 +953,7 @@ int VoEFileImpl::ConvertWAVToPCM(InStream* streamIn, OutStream* streamOut) } audioFrame.UpdateFrame(-1, 0, decodedData, - (WebRtc_UWord16)decLength, frequency, + (uint16_t)decLength, frequency, AudioFrame::kNormalSpeech, AudioFrame::kVadActive); @@ -1019,9 +1019,9 @@ int VoEFileImpl::ConvertPCMToCompressed(const char* fileNameInUTF8, // Run throught the file AudioFrame audioFrame; - WebRtc_Word16 decodedData[160]; + int16_t decodedData[160]; int decLength=0; - const WebRtc_UWord32 frequency = 16000; + const uint32_t frequency = 16000; while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency)) { @@ -1031,7 +1031,7 @@ int VoEFileImpl::ConvertPCMToCompressed(const char* fileNameInUTF8, break; } audioFrame.UpdateFrame(-1, 0, decodedData, - (WebRtc_UWord16)decLength, + (uint16_t)decLength, frequency, AudioFrame::kNormalSpeech, AudioFrame::kVadActive); @@ -1103,9 +1103,9 @@ int VoEFileImpl::ConvertPCMToCompressed(InStream* streamIn, // Run throught the file AudioFrame audioFrame; - WebRtc_Word16 decodedData[160]; + int16_t decodedData[160]; int decLength=0; - const WebRtc_UWord32 frequency = 16000; + const uint32_t frequency = 16000; while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency)) { @@ -1115,7 +1115,7 @@ int VoEFileImpl::ConvertPCMToCompressed(InStream* streamIn, break; } audioFrame.UpdateFrame(-1, 0, decodedData, - (WebRtc_UWord16)decLength, + (uint16_t)decLength, frequency, AudioFrame::kNormalSpeech, AudioFrame::kVadActive); @@ -1185,9 +1185,9 @@ int VoEFileImpl::ConvertCompressedToPCM(const char* fileNameInUTF8, // Run throught the file AudioFrame audioFrame; - WebRtc_Word16 decodedData[160]; + int16_t decodedData[160]; int decLength=0; - const WebRtc_UWord32 frequency = 16000; + const uint32_t frequency = 16000; while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency)) { @@ -1197,7 +1197,7 @@ int VoEFileImpl::ConvertCompressedToPCM(const char* fileNameInUTF8, break; } audioFrame.UpdateFrame(-1, 0, decodedData, - (WebRtc_UWord16)decLength, + (uint16_t)decLength, frequency, AudioFrame::kNormalSpeech, AudioFrame::kVadActive); @@ -1274,9 +1274,9 @@ int VoEFileImpl::ConvertCompressedToPCM(InStream* streamIn, // Run throught the file AudioFrame audioFrame; - WebRtc_Word16 decodedData[160]; + int16_t decodedData[160]; int decLength=0; - const WebRtc_UWord32 frequency = 16000; + const uint32_t frequency = 16000; while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency)) { @@ -1286,7 +1286,7 @@ int VoEFileImpl::ConvertCompressedToPCM(InStream* streamIn, break; } audioFrame.UpdateFrame(-1, 0, decodedData, - (WebRtc_UWord16)decLength, + (uint16_t)decLength, frequency, AudioFrame::kNormalSpeech, AudioFrame::kVadActive); @@ -1322,7 +1322,7 @@ int VoEFileImpl::GetFileDuration(const char* fileNameUTF8, MediaFile * fileModule=MediaFile::CreateMediaFile(-1); // Temp container of the right format - WebRtc_UWord32 duration; + uint32_t duration; int res=fileModule->FileDurationMs(fileNameUTF8,duration,format); if (res) { diff --git a/webrtc/voice_engine/voe_hardware_impl.cc b/webrtc/voice_engine/voe_hardware_impl.cc index a374bc9a6..51b833e2c 100644 --- a/webrtc/voice_engine/voe_hardware_impl.cc +++ b/webrtc/voice_engine/voe_hardware_impl.cc @@ -211,7 +211,7 @@ int VoEHardwareImpl::GetRecordingDeviceName(int index, // Note that strGuidUTF8 is allowed to be NULL // Init len variable to length of supplied vectors - const WebRtc_UWord16 strLen = 128; + const uint16_t strLen = 128; // Check if length has been changed in module assert(strLen == kAdmMaxDeviceNameSize); @@ -269,7 +269,7 @@ int VoEHardwareImpl::GetPlayoutDeviceName(int index, // Note that strGuidUTF8 is allowed to be NULL // Init len variable to length of supplied vectors - const WebRtc_UWord16 strLen = 128; + const uint16_t strLen = 128; // Check if length has been changed in module assert(strLen == kAdmMaxDeviceNameSize); @@ -361,9 +361,9 @@ int VoEHardwareImpl::SetRecordingDevice(int index, } // Map indices to unsigned since underlying functions need that - WebRtc_UWord16 indexU = static_cast (index); + uint16_t indexU = static_cast (index); - WebRtc_Word32 res(0); + int32_t res(0); if (index == -1) { @@ -470,9 +470,9 @@ int VoEHardwareImpl::SetPlayoutDevice(int index) // We let the module do the index sanity // Map indices to unsigned since underlying functions need that - WebRtc_UWord16 indexU = static_cast (index); + uint16_t indexU = static_cast (index); - WebRtc_Word32 res(0); + int32_t res(0); if (index == -1) { @@ -722,7 +722,7 @@ int VoEHardwareImpl::GetCPULoad(int& loadPercent) } // Get CPU load from ADM - WebRtc_UWord16 load(0); + uint16_t load(0); if (_shared->audio_device()->CPULoad(&load) != 0) { _shared->SetLastError(VE_CPU_INFO_ERROR, kTraceError, diff --git a/webrtc/voice_engine/voe_network_impl.cc b/webrtc/voice_engine/voe_network_impl.cc index f3722f0e9..11e268067 100644 --- a/webrtc/voice_engine/voe_network_impl.cc +++ b/webrtc/voice_engine/voe_network_impl.cc @@ -122,7 +122,7 @@ int VoENetworkImpl::ReceivedRTPPacket(int channel, "ReceivedRTPPacket() external transport is not enabled"); return -1; } - return channelPtr->ReceivedRTPPacket((const WebRtc_Word8*) data, length); + return channelPtr->ReceivedRTPPacket((const int8_t*) data, length); } int VoENetworkImpl::ReceivedRTCPPacket(int channel, const void* data, @@ -161,7 +161,7 @@ int VoENetworkImpl::ReceivedRTCPPacket(int channel, const void* data, "ReceivedRTCPPacket() external transport is not enabled"); return -1; } - return channelPtr->ReceivedRTCPPacket((const WebRtc_Word8*) data, length); + return channelPtr->ReceivedRTCPPacket((const int8_t*) data, length); } int VoENetworkImpl::SetPacketTimeoutNotification(int channel, diff --git a/webrtc/voice_engine/voe_video_sync_impl.cc b/webrtc/voice_engine/voe_video_sync_impl.cc index 59f5218b5..4ef1228ad 100644 --- a/webrtc/voice_engine/voe_video_sync_impl.cc +++ b/webrtc/voice_engine/voe_video_sync_impl.cc @@ -199,7 +199,7 @@ int VoEVideoSyncImpl::GetPlayoutBufferSize(int& bufferMs) } AudioDeviceModule::BufferType type (AudioDeviceModule::kFixedBufferSize); - WebRtc_UWord16 sizeMS(0); + uint16_t sizeMS(0); if (_shared->audio_device()->PlayoutBuffer(&type, &sizeMS) != 0) { _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError, diff --git a/webrtc/voice_engine/voe_volume_control_impl.cc b/webrtc/voice_engine/voe_volume_control_impl.cc index 6f35a2c25..8104242f3 100644 --- a/webrtc/voice_engine/voe_volume_control_impl.cc +++ b/webrtc/voice_engine/voe_volume_control_impl.cc @@ -68,8 +68,8 @@ int VoEVolumeControlImpl::SetSpeakerVolume(unsigned int volume) return -1; } - WebRtc_UWord32 maxVol(0); - WebRtc_UWord32 spkrVol(0); + uint32_t maxVol(0); + uint32_t spkrVol(0); // scale: [0,kMaxVolumeLevel] -> [0,MaxSpeakerVolume] if (_shared->audio_device()->MaxSpeakerVolume(&maxVol) != 0) @@ -79,7 +79,7 @@ int VoEVolumeControlImpl::SetSpeakerVolume(unsigned int volume) return -1; } // Round the value and avoid floating computation. - spkrVol = (WebRtc_UWord32)((volume * maxVol + + spkrVol = (uint32_t)((volume * maxVol + (int)(kMaxVolumeLevel / 2)) / (kMaxVolumeLevel)); // set the actual volume using the audio mixer @@ -104,8 +104,8 @@ int VoEVolumeControlImpl::GetSpeakerVolume(unsigned int& volume) return -1; } - WebRtc_UWord32 spkrVol(0); - WebRtc_UWord32 maxVol(0); + uint32_t spkrVol(0); + uint32_t maxVol(0); if (_shared->audio_device()->SpeakerVolume(&spkrVol) != 0) { @@ -122,7 +122,7 @@ int VoEVolumeControlImpl::GetSpeakerVolume(unsigned int& volume) return -1; } // Round the value and avoid floating computation. - volume = (WebRtc_UWord32) ((spkrVol * kMaxVolumeLevel + + volume = (uint32_t) ((spkrVol * kMaxVolumeLevel + (int)(maxVol / 2)) / (maxVol)); WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, @@ -194,8 +194,8 @@ int VoEVolumeControlImpl::SetMicVolume(unsigned int volume) return -1; } - WebRtc_UWord32 maxVol(0); - WebRtc_UWord32 micVol(0); + uint32_t maxVol(0); + uint32_t micVol(0); // scale: [0, kMaxVolumeLevel] -> [0,MaxMicrophoneVolume] if (_shared->audio_device()->MaxMicrophoneVolume(&maxVol) != 0) @@ -221,7 +221,7 @@ int VoEVolumeControlImpl::SetMicVolume(unsigned int volume) } // Round the value and avoid floating point computation. - micVol = (WebRtc_UWord32) ((volume * maxVol + + micVol = (uint32_t) ((volume * maxVol + (int)(kMaxVolumeLevel / 2)) / (kMaxVolumeLevel)); // set the actual volume using the audio mixer @@ -247,8 +247,8 @@ int VoEVolumeControlImpl::GetMicVolume(unsigned int& volume) return -1; } - WebRtc_UWord32 micVol(0); - WebRtc_UWord32 maxVol(0); + uint32_t micVol(0); + uint32_t maxVol(0); if (_shared->audio_device()->MicrophoneVolume(&micVol) != 0) { @@ -266,7 +266,7 @@ int VoEVolumeControlImpl::GetMicVolume(unsigned int& volume) } if (micVol < maxVol) { // Round the value and avoid floating point calculation. - volume = (WebRtc_UWord32) ((micVol * kMaxVolumeLevel + + volume = (uint32_t) ((micVol * kMaxVolumeLevel + (int)(maxVol / 2)) / (maxVol)); } else { // Truncate the value to the kMaxVolumeLevel. @@ -396,7 +396,7 @@ int VoEVolumeControlImpl::GetSpeechInputLevel(unsigned int& level) _shared->SetLastError(VE_NOT_INITED, kTraceError); return -1; } - WebRtc_Word8 currentLevel = _shared->transmit_mixer()->AudioLevel(); + int8_t currentLevel = _shared->transmit_mixer()->AudioLevel(); level = static_cast (currentLevel); WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1), @@ -418,7 +418,7 @@ int VoEVolumeControlImpl::GetSpeechOutputLevel(int channel, if (channel == -1) { return _shared->output_mixer()->GetSpeechOutputLevel( - (WebRtc_UWord32&)level); + (uint32_t&)level); } else { @@ -430,7 +430,7 @@ int VoEVolumeControlImpl::GetSpeechOutputLevel(int channel, "GetSpeechOutputLevel() failed to locate channel"); return -1; } - channelPtr->GetSpeechOutputLevel((WebRtc_UWord32&)level); + channelPtr->GetSpeechOutputLevel((uint32_t&)level); } return 0; } @@ -445,7 +445,7 @@ int VoEVolumeControlImpl::GetSpeechInputLevelFullRange(unsigned int& level) _shared->SetLastError(VE_NOT_INITED, kTraceError); return -1; } - WebRtc_Word16 currentLevel = _shared->transmit_mixer()-> + int16_t currentLevel = _shared->transmit_mixer()-> AudioLevelFullRange(); level = static_cast (currentLevel); WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, @@ -468,7 +468,7 @@ int VoEVolumeControlImpl::GetSpeechOutputLevelFullRange(int channel, if (channel == -1) { return _shared->output_mixer()->GetSpeechOutputLevelFullRange( - (WebRtc_UWord32&)level); + (uint32_t&)level); } else { @@ -480,7 +480,7 @@ int VoEVolumeControlImpl::GetSpeechOutputLevelFullRange(int channel, "GetSpeechOutputLevelFullRange() failed to locate channel"); return -1; } - channelPtr->GetSpeechOutputLevelFullRange((WebRtc_UWord32&)level); + channelPtr->GetSpeechOutputLevelFullRange((uint32_t&)level); } return 0; } diff --git a/webrtc/voice_engine/voice_engine_impl.cc b/webrtc/voice_engine/voice_engine_impl.cc index affd1a8d6..fd8723a67 100644 --- a/webrtc/voice_engine/voice_engine_impl.cc +++ b/webrtc/voice_engine/voice_engine_impl.cc @@ -22,7 +22,7 @@ namespace webrtc // methods. It is not the nicest solution, especially not since we already // have a counter in VoEBaseImpl. In other words, there is room for // improvement here. -static WebRtc_Word32 gVoiceEngineInstanceCounter = 0; +static int32_t gVoiceEngineInstanceCounter = 0; extern "C" { @@ -87,11 +87,11 @@ int VoiceEngine::SetTraceFilter(const unsigned int filter) "SetTraceFilter(filter=0x%x)", filter); // Remember old filter - WebRtc_UWord32 oldFilter = 0; + uint32_t oldFilter = 0; Trace::LevelFilter(oldFilter); // Set new filter - WebRtc_Word32 ret = Trace::SetLevelFilter(filter); + int32_t ret = Trace::SetLevelFilter(filter); // If previous log was ignored, log again after changing filter if (kTraceNone == oldFilter)