(Auto)update libjingle 62063505-> 62278774
git-svn-id: http://webrtc.googlecode.com/svn/trunk@5617 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
@@ -122,6 +122,8 @@ const char StatsReport::kStatsValueNameLocalCertificateId[] =
|
|||||||
"googLocalCertificateId";
|
"googLocalCertificateId";
|
||||||
const char StatsReport::kStatsValueNameNacksReceived[] = "googNacksReceived";
|
const char StatsReport::kStatsValueNameNacksReceived[] = "googNacksReceived";
|
||||||
const char StatsReport::kStatsValueNameNacksSent[] = "googNacksSent";
|
const char StatsReport::kStatsValueNameNacksSent[] = "googNacksSent";
|
||||||
|
const char StatsReport::kStatsValueNamePlisReceived[] = "googPlisReceived";
|
||||||
|
const char StatsReport::kStatsValueNamePlisSent[] = "googPlisSent";
|
||||||
const char StatsReport::kStatsValueNamePacketsReceived[] = "packetsReceived";
|
const char StatsReport::kStatsValueNamePacketsReceived[] = "packetsReceived";
|
||||||
const char StatsReport::kStatsValueNamePacketsSent[] = "packetsSent";
|
const char StatsReport::kStatsValueNamePacketsSent[] = "packetsSent";
|
||||||
const char StatsReport::kStatsValueNamePacketsLost[] = "packetsLost";
|
const char StatsReport::kStatsValueNamePacketsLost[] = "packetsLost";
|
||||||
@@ -284,6 +286,8 @@ void ExtractStats(const cricket::VideoReceiverInfo& info, StatsReport* report) {
|
|||||||
|
|
||||||
report->AddValue(StatsReport::kStatsValueNameFirsSent,
|
report->AddValue(StatsReport::kStatsValueNameFirsSent,
|
||||||
info.firs_sent);
|
info.firs_sent);
|
||||||
|
report->AddValue(StatsReport::kStatsValueNamePlisSent,
|
||||||
|
info.plis_sent);
|
||||||
report->AddValue(StatsReport::kStatsValueNameNacksSent,
|
report->AddValue(StatsReport::kStatsValueNameNacksSent,
|
||||||
info.nacks_sent);
|
info.nacks_sent);
|
||||||
report->AddValue(StatsReport::kStatsValueNameFrameWidthReceived,
|
report->AddValue(StatsReport::kStatsValueNameFrameWidthReceived,
|
||||||
@@ -321,6 +325,8 @@ void ExtractStats(const cricket::VideoSenderInfo& info, StatsReport* report) {
|
|||||||
|
|
||||||
report->AddValue(StatsReport::kStatsValueNameFirsReceived,
|
report->AddValue(StatsReport::kStatsValueNameFirsReceived,
|
||||||
info.firs_rcvd);
|
info.firs_rcvd);
|
||||||
|
report->AddValue(StatsReport::kStatsValueNamePlisReceived,
|
||||||
|
info.plis_rcvd);
|
||||||
report->AddValue(StatsReport::kStatsValueNameNacksReceived,
|
report->AddValue(StatsReport::kStatsValueNameNacksReceived,
|
||||||
info.nacks_rcvd);
|
info.nacks_rcvd);
|
||||||
report->AddValue(StatsReport::kStatsValueNameFrameWidthInput,
|
report->AddValue(StatsReport::kStatsValueNameFrameWidthInput,
|
||||||
|
@@ -167,6 +167,8 @@ class StatsReport {
|
|||||||
static const char kStatsValueNameJitterReceived[];
|
static const char kStatsValueNameJitterReceived[];
|
||||||
static const char kStatsValueNameNacksReceived[];
|
static const char kStatsValueNameNacksReceived[];
|
||||||
static const char kStatsValueNameNacksSent[];
|
static const char kStatsValueNameNacksSent[];
|
||||||
|
static const char kStatsValueNamePlisReceived[];
|
||||||
|
static const char kStatsValueNamePlisSent[];
|
||||||
static const char kStatsValueNameRtt[];
|
static const char kStatsValueNameRtt[];
|
||||||
static const char kStatsValueNameAvailableSendBandwidth[];
|
static const char kStatsValueNameAvailableSendBandwidth[];
|
||||||
static const char kStatsValueNameAvailableReceiveBandwidth[];
|
static const char kStatsValueNameAvailableReceiveBandwidth[];
|
||||||
|
@@ -56,7 +56,6 @@ class FakePeriodicVideoCapturer : public cricket::FakeVideoCapturer {
|
|||||||
virtual cricket::CaptureState Start(const cricket::VideoFormat& format) {
|
virtual cricket::CaptureState Start(const cricket::VideoFormat& format) {
|
||||||
cricket::CaptureState state = FakeVideoCapturer::Start(format);
|
cricket::CaptureState state = FakeVideoCapturer::Start(format);
|
||||||
if (state != cricket::CS_FAILED) {
|
if (state != cricket::CS_FAILED) {
|
||||||
set_enable_video_adapter(false); // Simplify testing.
|
|
||||||
talk_base::Thread::Current()->Post(this, MSG_CREATEFRAME);
|
talk_base::Thread::Current()->Post(this, MSG_CREATEFRAME);
|
||||||
}
|
}
|
||||||
return state;
|
return state;
|
||||||
|
@@ -2111,9 +2111,6 @@ bool ParseMediaDescription(const std::string& message,
|
|||||||
message, cricket::MEDIA_TYPE_AUDIO, mline_index, protocol,
|
message, cricket::MEDIA_TYPE_AUDIO, mline_index, protocol,
|
||||||
codec_preference, pos, &content_name,
|
codec_preference, pos, &content_name,
|
||||||
&transport, candidates, error));
|
&transport, candidates, error));
|
||||||
MaybeCreateStaticPayloadAudioCodecs(
|
|
||||||
codec_preference,
|
|
||||||
static_cast<AudioContentDescription*>(content.get()));
|
|
||||||
} else if (HasAttribute(line, kMediaTypeData)) {
|
} else if (HasAttribute(line, kMediaTypeData)) {
|
||||||
DataContentDescription* desc =
|
DataContentDescription* desc =
|
||||||
ParseContentDescription<DataContentDescription>(
|
ParseContentDescription<DataContentDescription>(
|
||||||
@@ -2366,6 +2363,11 @@ bool ParseContent(const std::string& message,
|
|||||||
ASSERT(content_name != NULL);
|
ASSERT(content_name != NULL);
|
||||||
ASSERT(transport != NULL);
|
ASSERT(transport != NULL);
|
||||||
|
|
||||||
|
if (media_type == cricket::MEDIA_TYPE_AUDIO) {
|
||||||
|
MaybeCreateStaticPayloadAudioCodecs(
|
||||||
|
codec_preference, static_cast<AudioContentDescription*>(media_desc));
|
||||||
|
}
|
||||||
|
|
||||||
// The media level "ice-ufrag" and "ice-pwd".
|
// The media level "ice-ufrag" and "ice-pwd".
|
||||||
// The candidates before update the media level "ice-pwd" and "ice-ufrag".
|
// The candidates before update the media level "ice-pwd" and "ice-ufrag".
|
||||||
Candidates candidates_orig;
|
Candidates candidates_orig;
|
||||||
|
@@ -299,6 +299,19 @@ static const char kSdpSctpDataChannelWithCandidatesString[] =
|
|||||||
"a=mid:data_content_name\r\n"
|
"a=mid:data_content_name\r\n"
|
||||||
"a=sctpmap:5000 webrtc-datachannel 1024\r\n";
|
"a=sctpmap:5000 webrtc-datachannel 1024\r\n";
|
||||||
|
|
||||||
|
static const char kSdpConferenceString[] =
|
||||||
|
"v=0\r\n"
|
||||||
|
"o=- 18446744069414584320 18446462598732840960 IN IP4 127.0.0.1\r\n"
|
||||||
|
"s=-\r\n"
|
||||||
|
"t=0 0\r\n"
|
||||||
|
"a=msid-semantic: WMS\r\n"
|
||||||
|
"m=audio 1 RTP/SAVPF 111 103 104\r\n"
|
||||||
|
"c=IN IP4 0.0.0.0\r\n"
|
||||||
|
"a=x-google-flag:conference\r\n"
|
||||||
|
"m=video 1 RTP/SAVPF 120\r\n"
|
||||||
|
"c=IN IP4 0.0.0.0\r\n"
|
||||||
|
"a=x-google-flag:conference\r\n";
|
||||||
|
|
||||||
|
|
||||||
// One candidate reference string as per W3c spec.
|
// One candidate reference string as per W3c spec.
|
||||||
// candidate:<blah> not a=candidate:<blah>CRLF
|
// candidate:<blah> not a=candidate:<blah>CRLF
|
||||||
@@ -1474,6 +1487,21 @@ TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithExtmap) {
|
|||||||
EXPECT_EQ(sdp_with_extmap, message);
|
EXPECT_EQ(sdp_with_extmap, message);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithBufferLatency) {
|
||||||
|
VideoContentDescription* vcd = static_cast<VideoContentDescription*>(
|
||||||
|
GetFirstVideoContent(&desc_)->description);
|
||||||
|
vcd->set_buffered_mode_latency(128);
|
||||||
|
|
||||||
|
ASSERT_TRUE(jdesc_.Initialize(desc_.Copy(),
|
||||||
|
jdesc_.session_id(),
|
||||||
|
jdesc_.session_version()));
|
||||||
|
std::string message = webrtc::SdpSerialize(jdesc_);
|
||||||
|
std::string sdp_with_buffer_latency = kSdpFullString;
|
||||||
|
InjectAfter("a=rtpmap:120 VP8/90000\r\n",
|
||||||
|
"a=x-google-buffer-latency:128\r\n",
|
||||||
|
&sdp_with_buffer_latency);
|
||||||
|
EXPECT_EQ(sdp_with_buffer_latency, message);
|
||||||
|
}
|
||||||
|
|
||||||
TEST_F(WebRtcSdpTest, SerializeCandidates) {
|
TEST_F(WebRtcSdpTest, SerializeCandidates) {
|
||||||
std::string message = webrtc::SdpSerializeCandidate(*jcandidate_);
|
std::string message = webrtc::SdpSerializeCandidate(*jcandidate_);
|
||||||
@@ -1547,6 +1575,37 @@ TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithoutRtpmap) {
|
|||||||
EXPECT_EQ(ref_codecs, audio->codecs());
|
EXPECT_EQ(ref_codecs, audio->codecs());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithoutRtpmapButWithFmtp) {
|
||||||
|
static const char kSdpNoRtpmapString[] =
|
||||||
|
"v=0\r\n"
|
||||||
|
"o=- 11 22 IN IP4 127.0.0.1\r\n"
|
||||||
|
"s=-\r\n"
|
||||||
|
"t=0 0\r\n"
|
||||||
|
"m=audio 49232 RTP/AVP 18 103\r\n"
|
||||||
|
"a=fmtp:18 annexb=yes\r\n"
|
||||||
|
"a=rtpmap:103 ISAC/16000\r\n";
|
||||||
|
|
||||||
|
JsepSessionDescription jdesc(kDummyString);
|
||||||
|
EXPECT_TRUE(SdpDeserialize(kSdpNoRtpmapString, &jdesc));
|
||||||
|
cricket::AudioContentDescription* audio =
|
||||||
|
static_cast<AudioContentDescription*>(
|
||||||
|
jdesc.description()->GetContentDescriptionByName(cricket::CN_AUDIO));
|
||||||
|
|
||||||
|
cricket::AudioCodec g729 = audio->codecs()[0];
|
||||||
|
EXPECT_EQ("G729", g729.name);
|
||||||
|
EXPECT_EQ(8000, g729.clockrate);
|
||||||
|
EXPECT_EQ(18, g729.id);
|
||||||
|
cricket::CodecParameterMap::iterator found =
|
||||||
|
g729.params.find("annexb");
|
||||||
|
ASSERT_TRUE(found != g729.params.end());
|
||||||
|
EXPECT_EQ(found->second, "yes");
|
||||||
|
|
||||||
|
cricket::AudioCodec isac = audio->codecs()[1];
|
||||||
|
EXPECT_EQ("ISAC", isac.name);
|
||||||
|
EXPECT_EQ(103, isac.id);
|
||||||
|
EXPECT_EQ(16000, isac.clockrate);
|
||||||
|
}
|
||||||
|
|
||||||
// Ensure that we can deserialize SDP with a=fingerprint properly.
|
// Ensure that we can deserialize SDP with a=fingerprint properly.
|
||||||
TEST_F(WebRtcSdpTest, DeserializeJsepSessionDescriptionWithFingerprint) {
|
TEST_F(WebRtcSdpTest, DeserializeJsepSessionDescriptionWithFingerprint) {
|
||||||
// Add a DTLS a=fingerprint attribute to our session description.
|
// Add a DTLS a=fingerprint attribute to our session description.
|
||||||
@@ -1654,6 +1713,23 @@ TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithUfragPwd) {
|
|||||||
EXPECT_TRUE(CompareSessionDescription(jdesc_, jdesc_with_ufrag_pwd));
|
EXPECT_TRUE(CompareSessionDescription(jdesc_, jdesc_with_ufrag_pwd));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithBufferLatency) {
|
||||||
|
JsepSessionDescription jdesc_with_buffer_latency(kDummyString);
|
||||||
|
std::string sdp_with_buffer_latency = kSdpFullString;
|
||||||
|
InjectAfter("a=rtpmap:120 VP8/90000\r\n",
|
||||||
|
"a=x-google-buffer-latency:128\r\n",
|
||||||
|
&sdp_with_buffer_latency);
|
||||||
|
|
||||||
|
EXPECT_TRUE(
|
||||||
|
SdpDeserialize(sdp_with_buffer_latency, &jdesc_with_buffer_latency));
|
||||||
|
VideoContentDescription* vcd = static_cast<VideoContentDescription*>(
|
||||||
|
GetFirstVideoContent(&desc_)->description);
|
||||||
|
vcd->set_buffered_mode_latency(128);
|
||||||
|
ASSERT_TRUE(jdesc_.Initialize(desc_.Copy(),
|
||||||
|
jdesc_.session_id(),
|
||||||
|
jdesc_.session_version()));
|
||||||
|
EXPECT_TRUE(CompareSessionDescription(jdesc_, jdesc_with_buffer_latency));
|
||||||
|
}
|
||||||
|
|
||||||
TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithRecvOnlyContent) {
|
TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithRecvOnlyContent) {
|
||||||
EXPECT_TRUE(TestDeserializeDirection(cricket::MD_RECVONLY));
|
EXPECT_TRUE(TestDeserializeDirection(cricket::MD_RECVONLY));
|
||||||
@@ -1904,6 +1980,24 @@ TEST_F(WebRtcSdpTest, DeserializeCandidateOldFormat) {
|
|||||||
EXPECT_TRUE(jcandidate.candidate().IsEquivalent(ref_candidate));
|
EXPECT_TRUE(jcandidate.candidate().IsEquivalent(ref_candidate));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_F(WebRtcSdpTest, DeserializeSdpWithConferenceFlag) {
|
||||||
|
JsepSessionDescription jdesc(kDummyString);
|
||||||
|
|
||||||
|
// Deserialize
|
||||||
|
EXPECT_TRUE(SdpDeserialize(kSdpConferenceString, &jdesc));
|
||||||
|
|
||||||
|
// Verify
|
||||||
|
cricket::AudioContentDescription* audio =
|
||||||
|
static_cast<AudioContentDescription*>(
|
||||||
|
jdesc.description()->GetContentDescriptionByName(cricket::CN_AUDIO));
|
||||||
|
EXPECT_TRUE(audio->conference_mode());
|
||||||
|
|
||||||
|
cricket::VideoContentDescription* video =
|
||||||
|
static_cast<VideoContentDescription*>(
|
||||||
|
jdesc.description()->GetContentDescriptionByName(cricket::CN_VIDEO));
|
||||||
|
EXPECT_TRUE(video->conference_mode());
|
||||||
|
}
|
||||||
|
|
||||||
TEST_F(WebRtcSdpTest, DeserializeBrokenSdp) {
|
TEST_F(WebRtcSdpTest, DeserializeBrokenSdp) {
|
||||||
const char kSdpDestroyer[] = "!@#$%^&";
|
const char kSdpDestroyer[] = "!@#$%^&";
|
||||||
const char kSdpInvalidLine1[] = " =candidate";
|
const char kSdpInvalidLine1[] = " =candidate";
|
||||||
|
@@ -62,7 +62,7 @@ bool BandwidthSmoother::Sample(uint32 sample_time, int bandwidth) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Replace bandwidth with the mean of sampled bandwidths.
|
// Replace bandwidth with the mean of sampled bandwidths.
|
||||||
const int mean_bandwidth = accumulator_.ComputeMean();
|
const int mean_bandwidth = static_cast<int>(accumulator_.ComputeMean());
|
||||||
|
|
||||||
if (mean_bandwidth < bandwidth_estimation_) {
|
if (mean_bandwidth < bandwidth_estimation_) {
|
||||||
time_at_last_change_ = sample_time;
|
time_at_last_change_ = sample_time;
|
||||||
|
163
talk/base/criticalsection_unittest.cc
Normal file
163
talk/base/criticalsection_unittest.cc
Normal file
@@ -0,0 +1,163 @@
|
|||||||
|
/*
|
||||||
|
* libjingle
|
||||||
|
* Copyright 2014, Google Inc.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are met:
|
||||||
|
*
|
||||||
|
* 1. Redistributions of source code must retain the above copyright notice,
|
||||||
|
* this list of conditions and the following disclaimer.
|
||||||
|
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
* this list of conditions and the following disclaimer in the documentation
|
||||||
|
* and/or other materials provided with the distribution.
|
||||||
|
* 3. The name of the author may not be used to endorse or promote products
|
||||||
|
* derived from this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
||||||
|
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||||
|
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
|
||||||
|
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||||
|
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
|
||||||
|
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||||
|
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
||||||
|
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
||||||
|
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <set>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "talk/base/criticalsection.h"
|
||||||
|
#include "talk/base/event.h"
|
||||||
|
#include "talk/base/gunit.h"
|
||||||
|
#include "talk/base/scopedptrcollection.h"
|
||||||
|
#include "talk/base/thread.h"
|
||||||
|
|
||||||
|
namespace talk_base {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
const int kLongTime = 10000; // 10 seconds
|
||||||
|
const int kNumThreads = 16;
|
||||||
|
const int kOperationsToRun = 10000;
|
||||||
|
|
||||||
|
template <class T>
|
||||||
|
class AtomicOpRunner : public MessageHandler {
|
||||||
|
public:
|
||||||
|
explicit AtomicOpRunner(int initial_value)
|
||||||
|
: value_(initial_value),
|
||||||
|
threads_active_(0),
|
||||||
|
start_event_(true, false),
|
||||||
|
done_event_(true, false) {}
|
||||||
|
|
||||||
|
int value() const { return value_; }
|
||||||
|
|
||||||
|
bool Run() {
|
||||||
|
// Signal all threads to start.
|
||||||
|
start_event_.Set();
|
||||||
|
|
||||||
|
// Wait for all threads to finish.
|
||||||
|
return done_event_.Wait(kLongTime);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SetExpectedThreadCount(int count) {
|
||||||
|
threads_active_ = count;
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual void OnMessage(Message* msg) {
|
||||||
|
std::vector<int> values;
|
||||||
|
values.reserve(kOperationsToRun);
|
||||||
|
|
||||||
|
// Wait to start.
|
||||||
|
ASSERT_TRUE(start_event_.Wait(kLongTime));
|
||||||
|
|
||||||
|
// Generate a bunch of values by updating value_ atomically.
|
||||||
|
for (int i = 0; i < kOperationsToRun; ++i) {
|
||||||
|
values.push_back(T::AtomicOp(&value_));
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // Add them all to the set.
|
||||||
|
CritScope cs(&all_values_crit_);
|
||||||
|
for (size_t i = 0; i < values.size(); ++i) {
|
||||||
|
std::pair<std::set<int>::iterator, bool> result =
|
||||||
|
all_values_.insert(values[i]);
|
||||||
|
// Each value should only be taken by one thread, so if this value
|
||||||
|
// has already been added, something went wrong.
|
||||||
|
EXPECT_TRUE(result.second)
|
||||||
|
<< "Thread=" << Thread::Current() << " value=" << values[i];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Signal that we're done.
|
||||||
|
if (AtomicOps::Decrement(&threads_active_) == 0) {
|
||||||
|
done_event_.Set();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
int value_;
|
||||||
|
int threads_active_;
|
||||||
|
CriticalSection all_values_crit_;
|
||||||
|
std::set<int> all_values_;
|
||||||
|
Event start_event_;
|
||||||
|
Event done_event_;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct IncrementOp {
|
||||||
|
static int AtomicOp(int* i) { return AtomicOps::Increment(i); }
|
||||||
|
};
|
||||||
|
|
||||||
|
struct DecrementOp {
|
||||||
|
static int AtomicOp(int* i) { return AtomicOps::Decrement(i); }
|
||||||
|
};
|
||||||
|
|
||||||
|
void StartThreads(ScopedPtrCollection<Thread>* threads,
|
||||||
|
MessageHandler* handler) {
|
||||||
|
for (int i = 0; i < kNumThreads; ++i) {
|
||||||
|
Thread* thread = new Thread();
|
||||||
|
thread->Start();
|
||||||
|
thread->Post(handler);
|
||||||
|
threads->PushBack(thread);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
TEST(AtomicOpsTest, Simple) {
|
||||||
|
int value = 0;
|
||||||
|
EXPECT_EQ(1, AtomicOps::Increment(&value));
|
||||||
|
EXPECT_EQ(1, value);
|
||||||
|
EXPECT_EQ(2, AtomicOps::Increment(&value));
|
||||||
|
EXPECT_EQ(2, value);
|
||||||
|
EXPECT_EQ(1, AtomicOps::Decrement(&value));
|
||||||
|
EXPECT_EQ(1, value);
|
||||||
|
EXPECT_EQ(0, AtomicOps::Decrement(&value));
|
||||||
|
EXPECT_EQ(0, value);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(AtomicOpsTest, Increment) {
|
||||||
|
// Create and start lots of threads.
|
||||||
|
AtomicOpRunner<IncrementOp> runner(0);
|
||||||
|
ScopedPtrCollection<Thread> threads;
|
||||||
|
StartThreads(&threads, &runner);
|
||||||
|
runner.SetExpectedThreadCount(kNumThreads);
|
||||||
|
|
||||||
|
// Release the hounds!
|
||||||
|
EXPECT_TRUE(runner.Run());
|
||||||
|
EXPECT_EQ(kOperationsToRun * kNumThreads, runner.value());
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(AtomicOpsTest, Decrement) {
|
||||||
|
// Create and start lots of threads.
|
||||||
|
AtomicOpRunner<DecrementOp> runner(kOperationsToRun * kNumThreads);
|
||||||
|
ScopedPtrCollection<Thread> threads;
|
||||||
|
StartThreads(&threads, &runner);
|
||||||
|
runner.SetExpectedThreadCount(kNumThreads);
|
||||||
|
|
||||||
|
// Release the hounds!
|
||||||
|
EXPECT_TRUE(runner.Run());
|
||||||
|
EXPECT_EQ(0, runner.value());
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace talk_base
|
@@ -784,7 +784,13 @@ SECStatus NSSStreamAdapter::AuthCertificateHook(void *arg,
|
|||||||
PRBool checksig,
|
PRBool checksig,
|
||||||
PRBool isServer) {
|
PRBool isServer) {
|
||||||
LOG(LS_INFO) << "NSSStreamAdapter::AuthCertificateHook";
|
LOG(LS_INFO) << "NSSStreamAdapter::AuthCertificateHook";
|
||||||
NSSCertificate peer_cert(SSL_PeerCertificate(fd));
|
// SSL_PeerCertificate returns a pointer that is owned by the caller, and
|
||||||
|
// the NSSCertificate constructor copies its argument, so |raw_peer_cert|
|
||||||
|
// must be destroyed in this function.
|
||||||
|
CERTCertificate* raw_peer_cert = SSL_PeerCertificate(fd);
|
||||||
|
NSSCertificate peer_cert(raw_peer_cert);
|
||||||
|
CERT_DestroyCertificate(raw_peer_cert);
|
||||||
|
|
||||||
NSSStreamAdapter *stream = reinterpret_cast<NSSStreamAdapter *>(arg);
|
NSSStreamAdapter *stream = reinterpret_cast<NSSStreamAdapter *>(arg);
|
||||||
stream->cert_ok_ = false;
|
stream->cert_ok_ = false;
|
||||||
|
|
||||||
|
@@ -42,11 +42,8 @@ template<typename T>
|
|||||||
class RollingAccumulator {
|
class RollingAccumulator {
|
||||||
public:
|
public:
|
||||||
explicit RollingAccumulator(size_t max_count)
|
explicit RollingAccumulator(size_t max_count)
|
||||||
: count_(0),
|
: samples_(max_count) {
|
||||||
next_index_(0),
|
Reset();
|
||||||
sum_(0.0),
|
|
||||||
sum_2_(0.0),
|
|
||||||
samples_(max_count) {
|
|
||||||
}
|
}
|
||||||
~RollingAccumulator() {
|
~RollingAccumulator() {
|
||||||
}
|
}
|
||||||
@@ -59,12 +56,29 @@ class RollingAccumulator {
|
|||||||
return count_;
|
return count_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Reset() {
|
||||||
|
count_ = 0U;
|
||||||
|
next_index_ = 0U;
|
||||||
|
sum_ = 0.0;
|
||||||
|
sum_2_ = 0.0;
|
||||||
|
max_ = T();
|
||||||
|
max_stale_ = false;
|
||||||
|
min_ = T();
|
||||||
|
min_stale_ = false;
|
||||||
|
}
|
||||||
|
|
||||||
void AddSample(T sample) {
|
void AddSample(T sample) {
|
||||||
if (count_ == max_count()) {
|
if (count_ == max_count()) {
|
||||||
// Remove oldest sample.
|
// Remove oldest sample.
|
||||||
T sample_to_remove = samples_[next_index_];
|
T sample_to_remove = samples_[next_index_];
|
||||||
sum_ -= sample_to_remove;
|
sum_ -= sample_to_remove;
|
||||||
sum_2_ -= sample_to_remove * sample_to_remove;
|
sum_2_ -= sample_to_remove * sample_to_remove;
|
||||||
|
if (sample_to_remove >= max_) {
|
||||||
|
max_stale_ = true;
|
||||||
|
}
|
||||||
|
if (sample_to_remove <= min_) {
|
||||||
|
min_stale_ = true;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
// Increase count of samples.
|
// Increase count of samples.
|
||||||
++count_;
|
++count_;
|
||||||
@@ -73,6 +87,14 @@ class RollingAccumulator {
|
|||||||
samples_[next_index_] = sample;
|
samples_[next_index_] = sample;
|
||||||
sum_ += sample;
|
sum_ += sample;
|
||||||
sum_2_ += sample * sample;
|
sum_2_ += sample * sample;
|
||||||
|
if (count_ == 1 || sample >= max_) {
|
||||||
|
max_ = sample;
|
||||||
|
max_stale_ = false;
|
||||||
|
}
|
||||||
|
if (count_ == 1 || sample <= min_) {
|
||||||
|
min_ = sample;
|
||||||
|
min_stale_ = false;
|
||||||
|
}
|
||||||
// Update next_index_.
|
// Update next_index_.
|
||||||
next_index_ = (next_index_ + 1) % max_count();
|
next_index_ = (next_index_ + 1) % max_count();
|
||||||
}
|
}
|
||||||
@@ -81,17 +103,43 @@ class RollingAccumulator {
|
|||||||
return static_cast<T>(sum_);
|
return static_cast<T>(sum_);
|
||||||
}
|
}
|
||||||
|
|
||||||
T ComputeMean() const {
|
double ComputeMean() const {
|
||||||
if (count_ == 0) {
|
if (count_ == 0) {
|
||||||
return static_cast<T>(0);
|
return 0.0;
|
||||||
}
|
}
|
||||||
return static_cast<T>(sum_ / count_);
|
return sum_ / count_;
|
||||||
|
}
|
||||||
|
|
||||||
|
T ComputeMax() const {
|
||||||
|
if (max_stale_) {
|
||||||
|
ASSERT(count_ > 0 &&
|
||||||
|
"It shouldn't be possible for max_stale_ && count_ == 0");
|
||||||
|
max_ = samples_[next_index_];
|
||||||
|
for (size_t i = 1u; i < count_; i++) {
|
||||||
|
max_ = _max(max_, samples_[(next_index_ + i) % max_count()]);
|
||||||
|
}
|
||||||
|
max_stale_ = false;
|
||||||
|
}
|
||||||
|
return max_;
|
||||||
|
}
|
||||||
|
|
||||||
|
T ComputeMin() const {
|
||||||
|
if (min_stale_) {
|
||||||
|
ASSERT(count_ > 0 &&
|
||||||
|
"It shouldn't be possible for min_stale_ && count_ == 0");
|
||||||
|
min_ = samples_[next_index_];
|
||||||
|
for (size_t i = 1u; i < count_; i++) {
|
||||||
|
min_ = _min(min_, samples_[(next_index_ + i) % max_count()]);
|
||||||
|
}
|
||||||
|
min_stale_ = false;
|
||||||
|
}
|
||||||
|
return min_;
|
||||||
}
|
}
|
||||||
|
|
||||||
// O(n) time complexity.
|
// O(n) time complexity.
|
||||||
// Weights nth sample with weight (learning_rate)^n. Learning_rate should be
|
// Weights nth sample with weight (learning_rate)^n. Learning_rate should be
|
||||||
// between (0.0, 1.0], otherwise the non-weighted mean is returned.
|
// between (0.0, 1.0], otherwise the non-weighted mean is returned.
|
||||||
T ComputeWeightedMean(double learning_rate) const {
|
double ComputeWeightedMean(double learning_rate) const {
|
||||||
if (count_ < 1 || learning_rate <= 0.0 || learning_rate >= 1.0) {
|
if (count_ < 1 || learning_rate <= 0.0 || learning_rate >= 1.0) {
|
||||||
return ComputeMean();
|
return ComputeMean();
|
||||||
}
|
}
|
||||||
@@ -106,27 +154,31 @@ class RollingAccumulator {
|
|||||||
size_t index = (next_index_ + max_size - i - 1) % max_size;
|
size_t index = (next_index_ + max_size - i - 1) % max_size;
|
||||||
weighted_mean += current_weight * samples_[index];
|
weighted_mean += current_weight * samples_[index];
|
||||||
}
|
}
|
||||||
return static_cast<T>(weighted_mean / weight_sum);
|
return weighted_mean / weight_sum;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compute estimated variance. Estimation is more accurate
|
// Compute estimated variance. Estimation is more accurate
|
||||||
// as the number of samples grows.
|
// as the number of samples grows.
|
||||||
T ComputeVariance() const {
|
double ComputeVariance() const {
|
||||||
if (count_ == 0) {
|
if (count_ == 0) {
|
||||||
return static_cast<T>(0);
|
return 0.0;
|
||||||
}
|
}
|
||||||
// Var = E[x^2] - (E[x])^2
|
// Var = E[x^2] - (E[x])^2
|
||||||
double count_inv = 1.0 / count_;
|
double count_inv = 1.0 / count_;
|
||||||
double mean_2 = sum_2_ * count_inv;
|
double mean_2 = sum_2_ * count_inv;
|
||||||
double mean = sum_ * count_inv;
|
double mean = sum_ * count_inv;
|
||||||
return static_cast<T>(mean_2 - (mean * mean));
|
return mean_2 - (mean * mean);
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
size_t count_;
|
size_t count_;
|
||||||
size_t next_index_;
|
size_t next_index_;
|
||||||
double sum_; // Sum(x)
|
double sum_; // Sum(x) - double to avoid overflow
|
||||||
double sum_2_; // Sum(x*x)
|
double sum_2_; // Sum(x*x) - double to avoid overflow
|
||||||
|
mutable T max_;
|
||||||
|
mutable bool max_stale_;
|
||||||
|
mutable T min_;
|
||||||
|
mutable bool min_stale_;
|
||||||
std::vector<T> samples_;
|
std::vector<T> samples_;
|
||||||
|
|
||||||
DISALLOW_COPY_AND_ASSIGN(RollingAccumulator);
|
DISALLOW_COPY_AND_ASSIGN(RollingAccumulator);
|
||||||
|
@@ -40,8 +40,10 @@ TEST(RollingAccumulatorTest, ZeroSamples) {
|
|||||||
RollingAccumulator<int> accum(10);
|
RollingAccumulator<int> accum(10);
|
||||||
|
|
||||||
EXPECT_EQ(0U, accum.count());
|
EXPECT_EQ(0U, accum.count());
|
||||||
EXPECT_EQ(0, accum.ComputeMean());
|
EXPECT_DOUBLE_EQ(0.0, accum.ComputeMean());
|
||||||
EXPECT_EQ(0, accum.ComputeVariance());
|
EXPECT_DOUBLE_EQ(0.0, accum.ComputeVariance());
|
||||||
|
EXPECT_EQ(0, accum.ComputeMin());
|
||||||
|
EXPECT_EQ(0, accum.ComputeMax());
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(RollingAccumulatorTest, SomeSamples) {
|
TEST(RollingAccumulatorTest, SomeSamples) {
|
||||||
@@ -52,9 +54,11 @@ TEST(RollingAccumulatorTest, SomeSamples) {
|
|||||||
|
|
||||||
EXPECT_EQ(4U, accum.count());
|
EXPECT_EQ(4U, accum.count());
|
||||||
EXPECT_EQ(6, accum.ComputeSum());
|
EXPECT_EQ(6, accum.ComputeSum());
|
||||||
EXPECT_EQ(1, accum.ComputeMean());
|
EXPECT_DOUBLE_EQ(1.5, accum.ComputeMean());
|
||||||
EXPECT_EQ(2, accum.ComputeWeightedMean(kLearningRate));
|
EXPECT_NEAR(2.26666, accum.ComputeWeightedMean(kLearningRate), 0.01);
|
||||||
EXPECT_EQ(1, accum.ComputeVariance());
|
EXPECT_DOUBLE_EQ(1.25, accum.ComputeVariance());
|
||||||
|
EXPECT_EQ(0, accum.ComputeMin());
|
||||||
|
EXPECT_EQ(3, accum.ComputeMax());
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(RollingAccumulatorTest, RollingSamples) {
|
TEST(RollingAccumulatorTest, RollingSamples) {
|
||||||
@@ -65,9 +69,36 @@ TEST(RollingAccumulatorTest, RollingSamples) {
|
|||||||
|
|
||||||
EXPECT_EQ(10U, accum.count());
|
EXPECT_EQ(10U, accum.count());
|
||||||
EXPECT_EQ(65, accum.ComputeSum());
|
EXPECT_EQ(65, accum.ComputeSum());
|
||||||
EXPECT_EQ(6, accum.ComputeMean());
|
EXPECT_DOUBLE_EQ(6.5, accum.ComputeMean());
|
||||||
EXPECT_EQ(10, accum.ComputeWeightedMean(kLearningRate));
|
EXPECT_NEAR(10.0, accum.ComputeWeightedMean(kLearningRate), 0.01);
|
||||||
EXPECT_NEAR(9, accum.ComputeVariance(), 1);
|
EXPECT_NEAR(9.0, accum.ComputeVariance(), 1.0);
|
||||||
|
EXPECT_EQ(2, accum.ComputeMin());
|
||||||
|
EXPECT_EQ(11, accum.ComputeMax());
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(RollingAccumulatorTest, ResetSamples) {
|
||||||
|
RollingAccumulator<int> accum(10);
|
||||||
|
|
||||||
|
for (int i = 0; i < 10; ++i) {
|
||||||
|
accum.AddSample(100);
|
||||||
|
}
|
||||||
|
EXPECT_EQ(10U, accum.count());
|
||||||
|
EXPECT_DOUBLE_EQ(100.0, accum.ComputeMean());
|
||||||
|
EXPECT_EQ(100, accum.ComputeMin());
|
||||||
|
EXPECT_EQ(100, accum.ComputeMax());
|
||||||
|
|
||||||
|
accum.Reset();
|
||||||
|
EXPECT_EQ(0U, accum.count());
|
||||||
|
|
||||||
|
for (int i = 0; i < 5; ++i) {
|
||||||
|
accum.AddSample(i);
|
||||||
|
}
|
||||||
|
|
||||||
|
EXPECT_EQ(5U, accum.count());
|
||||||
|
EXPECT_EQ(10, accum.ComputeSum());
|
||||||
|
EXPECT_DOUBLE_EQ(2.0, accum.ComputeMean());
|
||||||
|
EXPECT_EQ(0, accum.ComputeMin());
|
||||||
|
EXPECT_EQ(4, accum.ComputeMax());
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(RollingAccumulatorTest, RollingSamplesDouble) {
|
TEST(RollingAccumulatorTest, RollingSamplesDouble) {
|
||||||
@@ -81,22 +112,24 @@ TEST(RollingAccumulatorTest, RollingSamplesDouble) {
|
|||||||
EXPECT_DOUBLE_EQ(87.5, accum.ComputeMean());
|
EXPECT_DOUBLE_EQ(87.5, accum.ComputeMean());
|
||||||
EXPECT_NEAR(105.049, accum.ComputeWeightedMean(kLearningRate), 0.1);
|
EXPECT_NEAR(105.049, accum.ComputeWeightedMean(kLearningRate), 0.1);
|
||||||
EXPECT_NEAR(229.166667, accum.ComputeVariance(), 25);
|
EXPECT_NEAR(229.166667, accum.ComputeVariance(), 25);
|
||||||
|
EXPECT_DOUBLE_EQ(65.0, accum.ComputeMin());
|
||||||
|
EXPECT_DOUBLE_EQ(110.0, accum.ComputeMax());
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(RollingAccumulatorTest, ComputeWeightedMeanCornerCases) {
|
TEST(RollingAccumulatorTest, ComputeWeightedMeanCornerCases) {
|
||||||
RollingAccumulator<int> accum(10);
|
RollingAccumulator<int> accum(10);
|
||||||
EXPECT_EQ(0, accum.ComputeWeightedMean(kLearningRate));
|
EXPECT_DOUBLE_EQ(0.0, accum.ComputeWeightedMean(kLearningRate));
|
||||||
EXPECT_EQ(0, accum.ComputeWeightedMean(0.0));
|
EXPECT_DOUBLE_EQ(0.0, accum.ComputeWeightedMean(0.0));
|
||||||
EXPECT_EQ(0, accum.ComputeWeightedMean(1.1));
|
EXPECT_DOUBLE_EQ(0.0, accum.ComputeWeightedMean(1.1));
|
||||||
|
|
||||||
for (int i = 0; i < 8; ++i) {
|
for (int i = 0; i < 8; ++i) {
|
||||||
accum.AddSample(i);
|
accum.AddSample(i);
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPECT_EQ(3, accum.ComputeMean());
|
EXPECT_DOUBLE_EQ(3.5, accum.ComputeMean());
|
||||||
EXPECT_EQ(3, accum.ComputeWeightedMean(0));
|
EXPECT_DOUBLE_EQ(3.5, accum.ComputeWeightedMean(0));
|
||||||
EXPECT_EQ(3, accum.ComputeWeightedMean(1.1));
|
EXPECT_DOUBLE_EQ(3.5, accum.ComputeWeightedMean(1.1));
|
||||||
EXPECT_EQ(6, accum.ComputeWeightedMean(kLearningRate));
|
EXPECT_NEAR(6.0, accum.ComputeWeightedMean(kLearningRate), 0.1);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace talk_base
|
} // namespace talk_base
|
||||||
|
@@ -544,6 +544,7 @@ talk.Unittest(env, name = "base",
|
|||||||
"base/callback_unittest.cc",
|
"base/callback_unittest.cc",
|
||||||
"base/cpumonitor_unittest.cc",
|
"base/cpumonitor_unittest.cc",
|
||||||
"base/crc32_unittest.cc",
|
"base/crc32_unittest.cc",
|
||||||
|
"base/criticalsection_unittest.cc",
|
||||||
"base/event_unittest.cc",
|
"base/event_unittest.cc",
|
||||||
"base/filelock_unittest.cc",
|
"base/filelock_unittest.cc",
|
||||||
"base/fileutils_unittest.cc",
|
"base/fileutils_unittest.cc",
|
||||||
|
@@ -123,6 +123,7 @@
|
|||||||
'base/callback_unittest.cc',
|
'base/callback_unittest.cc',
|
||||||
'base/cpumonitor_unittest.cc',
|
'base/cpumonitor_unittest.cc',
|
||||||
'base/crc32_unittest.cc',
|
'base/crc32_unittest.cc',
|
||||||
|
'base/criticalsection_unittest.cc',
|
||||||
'base/event_unittest.cc',
|
'base/event_unittest.cc',
|
||||||
'base/filelock_unittest.cc',
|
'base/filelock_unittest.cc',
|
||||||
'base/fileutils_unittest.cc',
|
'base/fileutils_unittest.cc',
|
||||||
|
@@ -99,4 +99,8 @@ const char kComfortNoiseCodecName[] = "CN";
|
|||||||
const char kRtpAbsoluteSendTimeHeaderExtension[] =
|
const char kRtpAbsoluteSendTimeHeaderExtension[] =
|
||||||
"http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time";
|
"http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time";
|
||||||
|
|
||||||
|
const int kNumDefaultUnsignalledVideoRecvStreams = 0;
|
||||||
|
|
||||||
|
|
||||||
} // namespace cricket
|
} // namespace cricket
|
||||||
|
|
||||||
|
@@ -120,6 +120,8 @@ extern const char kComfortNoiseCodecName[];
|
|||||||
// http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time
|
// http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time
|
||||||
extern const char kRtpAbsoluteSendTimeHeaderExtension[];
|
extern const char kRtpAbsoluteSendTimeHeaderExtension[];
|
||||||
|
|
||||||
|
extern const int kNumDefaultUnsignalledVideoRecvStreams;
|
||||||
} // namespace cricket
|
} // namespace cricket
|
||||||
|
|
||||||
#endif // TALK_MEDIA_BASE_CONSTANTS_H_
|
#endif // TALK_MEDIA_BASE_CONSTANTS_H_
|
||||||
|
|
||||||
|
@@ -289,6 +289,7 @@ struct VideoOptions {
|
|||||||
process_adaptation_threshhold.Set(kProcessCpuThreshold);
|
process_adaptation_threshhold.Set(kProcessCpuThreshold);
|
||||||
system_low_adaptation_threshhold.Set(kLowSystemCpuThreshold);
|
system_low_adaptation_threshhold.Set(kLowSystemCpuThreshold);
|
||||||
system_high_adaptation_threshhold.Set(kHighSystemCpuThreshold);
|
system_high_adaptation_threshhold.Set(kHighSystemCpuThreshold);
|
||||||
|
unsignalled_recv_stream_limit.Set(kNumDefaultUnsignalledVideoRecvStreams);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SetAll(const VideoOptions& change) {
|
void SetAll(const VideoOptions& change) {
|
||||||
@@ -317,6 +318,7 @@ struct VideoOptions {
|
|||||||
lower_min_bitrate.SetFrom(change.lower_min_bitrate);
|
lower_min_bitrate.SetFrom(change.lower_min_bitrate);
|
||||||
dscp.SetFrom(change.dscp);
|
dscp.SetFrom(change.dscp);
|
||||||
suspend_below_min_bitrate.SetFrom(change.suspend_below_min_bitrate);
|
suspend_below_min_bitrate.SetFrom(change.suspend_below_min_bitrate);
|
||||||
|
unsignalled_recv_stream_limit.SetFrom(change.unsignalled_recv_stream_limit);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool operator==(const VideoOptions& o) const {
|
bool operator==(const VideoOptions& o) const {
|
||||||
@@ -342,7 +344,8 @@ struct VideoOptions {
|
|||||||
buffered_mode_latency == o.buffered_mode_latency &&
|
buffered_mode_latency == o.buffered_mode_latency &&
|
||||||
lower_min_bitrate == o.lower_min_bitrate &&
|
lower_min_bitrate == o.lower_min_bitrate &&
|
||||||
dscp == o.dscp &&
|
dscp == o.dscp &&
|
||||||
suspend_below_min_bitrate == o.suspend_below_min_bitrate;
|
suspend_below_min_bitrate == o.suspend_below_min_bitrate &&
|
||||||
|
unsignalled_recv_stream_limit == o.unsignalled_recv_stream_limit;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string ToString() const {
|
std::string ToString() const {
|
||||||
@@ -372,6 +375,8 @@ struct VideoOptions {
|
|||||||
ost << ToStringIfSet("dscp", dscp);
|
ost << ToStringIfSet("dscp", dscp);
|
||||||
ost << ToStringIfSet("suspend below min bitrate",
|
ost << ToStringIfSet("suspend below min bitrate",
|
||||||
suspend_below_min_bitrate);
|
suspend_below_min_bitrate);
|
||||||
|
ost << ToStringIfSet("num channels for early receive",
|
||||||
|
unsignalled_recv_stream_limit);
|
||||||
ost << "}";
|
ost << "}";
|
||||||
return ost.str();
|
return ost.str();
|
||||||
}
|
}
|
||||||
@@ -421,6 +426,8 @@ struct VideoOptions {
|
|||||||
// Enable WebRTC suspension of video. No video frames will be sent when the
|
// Enable WebRTC suspension of video. No video frames will be sent when the
|
||||||
// bitrate is below the configured minimum bitrate.
|
// bitrate is below the configured minimum bitrate.
|
||||||
Settable<bool> suspend_below_min_bitrate;
|
Settable<bool> suspend_below_min_bitrate;
|
||||||
|
// Limit on the number of early receive channels that can be created.
|
||||||
|
Settable<int> unsignalled_recv_stream_limit;
|
||||||
};
|
};
|
||||||
|
|
||||||
// A class for playing out soundclips.
|
// A class for playing out soundclips.
|
||||||
@@ -677,6 +684,20 @@ struct MediaSenderInfo {
|
|||||||
std::vector<SsrcReceiverInfo> remote_stats;
|
std::vector<SsrcReceiverInfo> remote_stats;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template<class T>
|
||||||
|
struct VariableInfo {
|
||||||
|
VariableInfo()
|
||||||
|
: min_val(),
|
||||||
|
mean(0.0),
|
||||||
|
max_val(),
|
||||||
|
variance(0.0) {
|
||||||
|
}
|
||||||
|
T min_val;
|
||||||
|
double mean;
|
||||||
|
T max_val;
|
||||||
|
double variance;
|
||||||
|
};
|
||||||
|
|
||||||
struct MediaReceiverInfo {
|
struct MediaReceiverInfo {
|
||||||
MediaReceiverInfo()
|
MediaReceiverInfo()
|
||||||
: bytes_rcvd(0),
|
: bytes_rcvd(0),
|
||||||
@@ -782,6 +803,7 @@ struct VideoSenderInfo : public MediaSenderInfo {
|
|||||||
VideoSenderInfo()
|
VideoSenderInfo()
|
||||||
: packets_cached(0),
|
: packets_cached(0),
|
||||||
firs_rcvd(0),
|
firs_rcvd(0),
|
||||||
|
plis_rcvd(0),
|
||||||
nacks_rcvd(0),
|
nacks_rcvd(0),
|
||||||
input_frame_width(0),
|
input_frame_width(0),
|
||||||
input_frame_height(0),
|
input_frame_height(0),
|
||||||
@@ -801,6 +823,7 @@ struct VideoSenderInfo : public MediaSenderInfo {
|
|||||||
std::vector<SsrcGroup> ssrc_groups;
|
std::vector<SsrcGroup> ssrc_groups;
|
||||||
int packets_cached;
|
int packets_cached;
|
||||||
int firs_rcvd;
|
int firs_rcvd;
|
||||||
|
int plis_rcvd;
|
||||||
int nacks_rcvd;
|
int nacks_rcvd;
|
||||||
int input_frame_width;
|
int input_frame_width;
|
||||||
int input_frame_height;
|
int input_frame_height;
|
||||||
@@ -815,12 +838,16 @@ struct VideoSenderInfo : public MediaSenderInfo {
|
|||||||
int avg_encode_ms;
|
int avg_encode_ms;
|
||||||
int encode_usage_percent;
|
int encode_usage_percent;
|
||||||
int capture_queue_delay_ms_per_s;
|
int capture_queue_delay_ms_per_s;
|
||||||
|
VariableInfo<int> adapt_frame_drops;
|
||||||
|
VariableInfo<int> effects_frame_drops;
|
||||||
|
VariableInfo<double> capturer_frame_time;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct VideoReceiverInfo : public MediaReceiverInfo {
|
struct VideoReceiverInfo : public MediaReceiverInfo {
|
||||||
VideoReceiverInfo()
|
VideoReceiverInfo()
|
||||||
: packets_concealed(0),
|
: packets_concealed(0),
|
||||||
firs_sent(0),
|
firs_sent(0),
|
||||||
|
plis_sent(0),
|
||||||
nacks_sent(0),
|
nacks_sent(0),
|
||||||
frame_width(0),
|
frame_width(0),
|
||||||
frame_height(0),
|
frame_height(0),
|
||||||
@@ -841,6 +868,7 @@ struct VideoReceiverInfo : public MediaReceiverInfo {
|
|||||||
std::vector<SsrcGroup> ssrc_groups;
|
std::vector<SsrcGroup> ssrc_groups;
|
||||||
int packets_concealed;
|
int packets_concealed;
|
||||||
int firs_sent;
|
int firs_sent;
|
||||||
|
int plis_sent;
|
||||||
int nacks_sent;
|
int nacks_sent;
|
||||||
int frame_width;
|
int frame_width;
|
||||||
int frame_height;
|
int frame_height;
|
||||||
|
@@ -30,6 +30,7 @@
|
|||||||
#include "talk/base/logging.h"
|
#include "talk/base/logging.h"
|
||||||
#include "talk/base/timeutils.h"
|
#include "talk/base/timeutils.h"
|
||||||
#include "talk/media/base/constants.h"
|
#include "talk/media/base/constants.h"
|
||||||
|
#include "talk/media/base/videocommon.h"
|
||||||
#include "talk/media/base/videoframe.h"
|
#include "talk/media/base/videoframe.h"
|
||||||
|
|
||||||
namespace cricket {
|
namespace cricket {
|
||||||
@@ -235,6 +236,10 @@ const VideoFormat& VideoAdapter::input_format() {
|
|||||||
return input_format_;
|
return input_format_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool VideoAdapter::drops_all_frames() const {
|
||||||
|
return output_num_pixels_ == 0;
|
||||||
|
}
|
||||||
|
|
||||||
const VideoFormat& VideoAdapter::output_format() {
|
const VideoFormat& VideoAdapter::output_format() {
|
||||||
talk_base::CritScope cs(&critical_section_);
|
talk_base::CritScope cs(&critical_section_);
|
||||||
return output_format_;
|
return output_format_;
|
||||||
@@ -308,7 +313,7 @@ bool VideoAdapter::AdaptFrame(const VideoFrame* in_frame,
|
|||||||
}
|
}
|
||||||
|
|
||||||
float scale = 1.f;
|
float scale = 1.f;
|
||||||
if (output_num_pixels_) {
|
if (output_num_pixels_ < input_format_.width * input_format_.height) {
|
||||||
scale = VideoAdapter::FindClosestViewScale(
|
scale = VideoAdapter::FindClosestViewScale(
|
||||||
static_cast<int>(in_frame->GetWidth()),
|
static_cast<int>(in_frame->GetWidth()),
|
||||||
static_cast<int>(in_frame->GetHeight()),
|
static_cast<int>(in_frame->GetHeight()),
|
||||||
@@ -316,6 +321,9 @@ bool VideoAdapter::AdaptFrame(const VideoFrame* in_frame,
|
|||||||
output_format_.width = static_cast<int>(in_frame->GetWidth() * scale + .5f);
|
output_format_.width = static_cast<int>(in_frame->GetWidth() * scale + .5f);
|
||||||
output_format_.height = static_cast<int>(in_frame->GetHeight() * scale +
|
output_format_.height = static_cast<int>(in_frame->GetHeight() * scale +
|
||||||
.5f);
|
.5f);
|
||||||
|
} else {
|
||||||
|
output_format_.width = static_cast<int>(in_frame->GetWidth());
|
||||||
|
output_format_.height = static_cast<int>(in_frame->GetHeight());
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!StretchToOutputFrame(in_frame)) {
|
if (!StretchToOutputFrame(in_frame)) {
|
||||||
|
@@ -51,6 +51,8 @@ class VideoAdapter {
|
|||||||
int GetOutputNumPixels() const;
|
int GetOutputNumPixels() const;
|
||||||
|
|
||||||
const VideoFormat& input_format();
|
const VideoFormat& input_format();
|
||||||
|
// Returns true if the adapter is dropping frames in calls to AdaptFrame.
|
||||||
|
bool drops_all_frames() const;
|
||||||
const VideoFormat& output_format();
|
const VideoFormat& output_format();
|
||||||
// If the parameter black is true, the adapted frames will be black.
|
// If the parameter black is true, the adapted frames will be black.
|
||||||
void SetBlackOutput(bool black);
|
void SetBlackOutput(bool black);
|
||||||
|
@@ -63,6 +63,10 @@ static const int kYU12Penalty = 16; // Needs to be higher than MJPG index.
|
|||||||
static const int kDefaultScreencastFps = 5;
|
static const int kDefaultScreencastFps = 5;
|
||||||
typedef talk_base::TypedMessageData<CaptureState> StateChangeParams;
|
typedef talk_base::TypedMessageData<CaptureState> StateChangeParams;
|
||||||
|
|
||||||
|
// Limit stats data collections to ~20 seconds of 30fps data before dropping
|
||||||
|
// old data in case stats aren't reset for long periods of time.
|
||||||
|
static const size_t kMaxAccumulatorSize = 600;
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
/////////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////////
|
||||||
@@ -92,11 +96,19 @@ bool CapturedFrame::GetDataSize(uint32* size) const {
|
|||||||
/////////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////////
|
||||||
// Implementation of class VideoCapturer
|
// Implementation of class VideoCapturer
|
||||||
/////////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////////
|
||||||
VideoCapturer::VideoCapturer() : thread_(talk_base::Thread::Current()) {
|
VideoCapturer::VideoCapturer()
|
||||||
|
: thread_(talk_base::Thread::Current()),
|
||||||
|
adapt_frame_drops_data_(kMaxAccumulatorSize),
|
||||||
|
effect_frame_drops_data_(kMaxAccumulatorSize),
|
||||||
|
frame_time_data_(kMaxAccumulatorSize) {
|
||||||
Construct();
|
Construct();
|
||||||
}
|
}
|
||||||
|
|
||||||
VideoCapturer::VideoCapturer(talk_base::Thread* thread) : thread_(thread) {
|
VideoCapturer::VideoCapturer(talk_base::Thread* thread)
|
||||||
|
: thread_(thread),
|
||||||
|
adapt_frame_drops_data_(kMaxAccumulatorSize),
|
||||||
|
effect_frame_drops_data_(kMaxAccumulatorSize),
|
||||||
|
frame_time_data_(kMaxAccumulatorSize) {
|
||||||
Construct();
|
Construct();
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -112,6 +124,9 @@ void VideoCapturer::Construct() {
|
|||||||
muted_ = false;
|
muted_ = false;
|
||||||
black_frame_count_down_ = kNumBlackFramesOnMute;
|
black_frame_count_down_ = kNumBlackFramesOnMute;
|
||||||
enable_video_adapter_ = true;
|
enable_video_adapter_ = true;
|
||||||
|
adapt_frame_drops_ = 0;
|
||||||
|
effect_frame_drops_ = 0;
|
||||||
|
previous_frame_time_ = 0.0;
|
||||||
}
|
}
|
||||||
|
|
||||||
const std::vector<VideoFormat>* VideoCapturer::GetSupportedFormats() const {
|
const std::vector<VideoFormat>* VideoCapturer::GetSupportedFormats() const {
|
||||||
@@ -119,6 +134,7 @@ const std::vector<VideoFormat>* VideoCapturer::GetSupportedFormats() const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool VideoCapturer::StartCapturing(const VideoFormat& capture_format) {
|
bool VideoCapturer::StartCapturing(const VideoFormat& capture_format) {
|
||||||
|
previous_frame_time_ = frame_length_time_reporter_.TimerNow();
|
||||||
CaptureState result = Start(capture_format);
|
CaptureState result = Start(capture_format);
|
||||||
const bool success = (result == CS_RUNNING) || (result == CS_STARTING);
|
const bool success = (result == CS_RUNNING) || (result == CS_STARTING);
|
||||||
if (!success) {
|
if (!success) {
|
||||||
@@ -306,6 +322,19 @@ std::string VideoCapturer::ToString(const CapturedFrame* captured_frame) const {
|
|||||||
return ss.str();
|
return ss.str();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void VideoCapturer::GetStats(VariableInfo<int>* adapt_drops_stats,
|
||||||
|
VariableInfo<int>* effect_drops_stats,
|
||||||
|
VariableInfo<double>* frame_time_stats) {
|
||||||
|
talk_base::CritScope cs(&frame_stats_crit_);
|
||||||
|
GetVariableSnapshot(adapt_frame_drops_data_, adapt_drops_stats);
|
||||||
|
GetVariableSnapshot(effect_frame_drops_data_, effect_drops_stats);
|
||||||
|
GetVariableSnapshot(frame_time_data_, frame_time_stats);
|
||||||
|
|
||||||
|
adapt_frame_drops_data_.Reset();
|
||||||
|
effect_frame_drops_data_.Reset();
|
||||||
|
frame_time_data_.Reset();
|
||||||
|
}
|
||||||
|
|
||||||
void VideoCapturer::OnFrameCaptured(VideoCapturer*,
|
void VideoCapturer::OnFrameCaptured(VideoCapturer*,
|
||||||
const CapturedFrame* captured_frame) {
|
const CapturedFrame* captured_frame) {
|
||||||
if (muted_) {
|
if (muted_) {
|
||||||
@@ -482,19 +511,36 @@ void VideoCapturer::OnFrameCaptured(VideoCapturer*,
|
|||||||
VideoFrame* out_frame = NULL;
|
VideoFrame* out_frame = NULL;
|
||||||
video_adapter_.AdaptFrame(adapted_frame, &out_frame);
|
video_adapter_.AdaptFrame(adapted_frame, &out_frame);
|
||||||
if (!out_frame) {
|
if (!out_frame) {
|
||||||
return; // VideoAdapter dropped the frame.
|
// VideoAdapter dropped the frame.
|
||||||
|
++adapt_frame_drops_;
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
adapted_frame = out_frame;
|
adapted_frame = out_frame;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!muted_ && !ApplyProcessors(adapted_frame)) {
|
if (!muted_ && !ApplyProcessors(adapted_frame)) {
|
||||||
// Processor dropped the frame.
|
// Processor dropped the frame.
|
||||||
|
++effect_frame_drops_;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (muted_) {
|
if (muted_) {
|
||||||
adapted_frame->SetToBlack();
|
adapted_frame->SetToBlack();
|
||||||
}
|
}
|
||||||
SignalVideoFrame(this, adapted_frame);
|
SignalVideoFrame(this, adapted_frame);
|
||||||
|
|
||||||
|
double time_now = frame_length_time_reporter_.TimerNow();
|
||||||
|
if (previous_frame_time_ != 0.0) {
|
||||||
|
// Update stats protected from jmi data fetches.
|
||||||
|
talk_base::CritScope cs(&frame_stats_crit_);
|
||||||
|
|
||||||
|
adapt_frame_drops_data_.AddSample(adapt_frame_drops_);
|
||||||
|
effect_frame_drops_data_.AddSample(effect_frame_drops_);
|
||||||
|
frame_time_data_.AddSample(time_now - previous_frame_time_);
|
||||||
|
}
|
||||||
|
previous_frame_time_ = time_now;
|
||||||
|
effect_frame_drops_ = 0;
|
||||||
|
adapt_frame_drops_ = 0;
|
||||||
|
|
||||||
#endif // VIDEO_FRAME_NAME
|
#endif // VIDEO_FRAME_NAME
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -669,4 +715,14 @@ bool VideoCapturer::ShouldFilterFormat(const VideoFormat& format) const {
|
|||||||
format.height > max_format_->height;
|
format.height > max_format_->height;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template<class T>
|
||||||
|
void VideoCapturer::GetVariableSnapshot(
|
||||||
|
const talk_base::RollingAccumulator<T>& data,
|
||||||
|
VariableInfo<T>* stats) {
|
||||||
|
stats->max_val = data.ComputeMax();
|
||||||
|
stats->mean = data.ComputeMean();
|
||||||
|
stats->min_val = data.ComputeMin();
|
||||||
|
stats->variance = data.ComputeVariance();
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace cricket
|
} // namespace cricket
|
||||||
|
@@ -34,9 +34,12 @@
|
|||||||
#include "talk/base/basictypes.h"
|
#include "talk/base/basictypes.h"
|
||||||
#include "talk/base/criticalsection.h"
|
#include "talk/base/criticalsection.h"
|
||||||
#include "talk/base/messagehandler.h"
|
#include "talk/base/messagehandler.h"
|
||||||
|
#include "talk/base/rollingaccumulator.h"
|
||||||
#include "talk/base/scoped_ptr.h"
|
#include "talk/base/scoped_ptr.h"
|
||||||
#include "talk/base/sigslot.h"
|
#include "talk/base/sigslot.h"
|
||||||
#include "talk/base/thread.h"
|
#include "talk/base/thread.h"
|
||||||
|
#include "talk/base/timing.h"
|
||||||
|
#include "talk/media/base/mediachannel.h"
|
||||||
#include "talk/media/base/videoadapter.h"
|
#include "talk/media/base/videoadapter.h"
|
||||||
#include "talk/media/base/videocommon.h"
|
#include "talk/media/base/videocommon.h"
|
||||||
#include "talk/media/devices/devicemanager.h"
|
#include "talk/media/devices/devicemanager.h"
|
||||||
@@ -286,6 +289,13 @@ class VideoCapturer
|
|||||||
return &video_adapter_;
|
return &video_adapter_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Gets statistics for tracked variables recorded since the last call to
|
||||||
|
// GetStats. Note that calling GetStats resets any gathered data so it
|
||||||
|
// should be called only periodically to log statistics.
|
||||||
|
void GetStats(VariableInfo<int>* adapt_drop_stats,
|
||||||
|
VariableInfo<int>* effect_drop_stats,
|
||||||
|
VariableInfo<double>* frame_time_stats);
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
// Callback attached to SignalFrameCaptured where SignalVideoFrames is called.
|
// Callback attached to SignalFrameCaptured where SignalVideoFrames is called.
|
||||||
void OnFrameCaptured(VideoCapturer* video_capturer,
|
void OnFrameCaptured(VideoCapturer* video_capturer,
|
||||||
@@ -338,6 +348,13 @@ class VideoCapturer
|
|||||||
// Returns true if format doesn't fulfill all applied restrictions.
|
// Returns true if format doesn't fulfill all applied restrictions.
|
||||||
bool ShouldFilterFormat(const VideoFormat& format) const;
|
bool ShouldFilterFormat(const VideoFormat& format) const;
|
||||||
|
|
||||||
|
// Helper function to save statistics on the current data from a
|
||||||
|
// RollingAccumulator into stats.
|
||||||
|
template<class T>
|
||||||
|
static void GetVariableSnapshot(
|
||||||
|
const talk_base::RollingAccumulator<T>& data,
|
||||||
|
VariableInfo<T>* stats);
|
||||||
|
|
||||||
talk_base::Thread* thread_;
|
talk_base::Thread* thread_;
|
||||||
std::string id_;
|
std::string id_;
|
||||||
CaptureState capture_state_;
|
CaptureState capture_state_;
|
||||||
@@ -359,6 +376,16 @@ class VideoCapturer
|
|||||||
bool enable_video_adapter_;
|
bool enable_video_adapter_;
|
||||||
CoordinatedVideoAdapter video_adapter_;
|
CoordinatedVideoAdapter video_adapter_;
|
||||||
|
|
||||||
|
talk_base::Timing frame_length_time_reporter_;
|
||||||
|
talk_base::CriticalSection frame_stats_crit_;
|
||||||
|
|
||||||
|
int adapt_frame_drops_;
|
||||||
|
talk_base::RollingAccumulator<int> adapt_frame_drops_data_;
|
||||||
|
int effect_frame_drops_;
|
||||||
|
talk_base::RollingAccumulator<int> effect_frame_drops_data_;
|
||||||
|
double previous_frame_time_;
|
||||||
|
talk_base::RollingAccumulator<double> frame_time_data_;
|
||||||
|
|
||||||
talk_base::CriticalSection crit_;
|
talk_base::CriticalSection crit_;
|
||||||
VideoProcessors video_processors_;
|
VideoProcessors video_processors_;
|
||||||
|
|
||||||
|
@@ -473,19 +473,30 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
cricket::FOURCC_I420);
|
cricket::FOURCC_I420);
|
||||||
EXPECT_EQ(cricket::CS_RUNNING, video_capturer_->Start(format));
|
EXPECT_EQ(cricket::CS_RUNNING, video_capturer_->Start(format));
|
||||||
EXPECT_TRUE(channel_->SetCapturer(kSsrc, video_capturer_.get()));
|
EXPECT_TRUE(channel_->SetCapturer(kSsrc, video_capturer_.get()));
|
||||||
EXPECT_TRUE(channel_->SetSendStreamFormat(kSsrc, format));
|
|
||||||
}
|
}
|
||||||
|
// Utility method to setup an additional stream to send and receive video.
|
||||||
|
// Used to test send and recv between two streams.
|
||||||
void SetUpSecondStream() {
|
void SetUpSecondStream() {
|
||||||
EXPECT_TRUE(channel_->AddRecvStream(
|
SetUpSecondStreamWithNoRecv();
|
||||||
cricket::StreamParams::CreateLegacy(kSsrc)));
|
// Setup recv for second stream.
|
||||||
EXPECT_TRUE(channel_->AddRecvStream(
|
EXPECT_TRUE(channel_->AddRecvStream(
|
||||||
cricket::StreamParams::CreateLegacy(kSsrc + 2)));
|
cricket::StreamParams::CreateLegacy(kSsrc + 2)));
|
||||||
|
// Make the second renderer available for use by a new stream.
|
||||||
|
EXPECT_TRUE(channel_->SetRenderer(kSsrc + 2, &renderer2_));
|
||||||
|
}
|
||||||
|
// Setup an additional stream just to send video. Defer add recv stream.
|
||||||
|
// This is required if you want to test unsignalled recv of video rtp packets.
|
||||||
|
void SetUpSecondStreamWithNoRecv() {
|
||||||
// SetUp() already added kSsrc make sure duplicate SSRCs cant be added.
|
// SetUp() already added kSsrc make sure duplicate SSRCs cant be added.
|
||||||
|
EXPECT_TRUE(channel_->AddRecvStream(
|
||||||
|
cricket::StreamParams::CreateLegacy(kSsrc)));
|
||||||
EXPECT_FALSE(channel_->AddSendStream(
|
EXPECT_FALSE(channel_->AddSendStream(
|
||||||
cricket::StreamParams::CreateLegacy(kSsrc)));
|
cricket::StreamParams::CreateLegacy(kSsrc)));
|
||||||
EXPECT_TRUE(channel_->AddSendStream(
|
EXPECT_TRUE(channel_->AddSendStream(
|
||||||
cricket::StreamParams::CreateLegacy(kSsrc + 2)));
|
cricket::StreamParams::CreateLegacy(kSsrc + 2)));
|
||||||
|
// We dont add recv for the second stream.
|
||||||
|
|
||||||
|
// Setup the receive and renderer for second stream after send.
|
||||||
video_capturer_2_.reset(new cricket::FakeVideoCapturer());
|
video_capturer_2_.reset(new cricket::FakeVideoCapturer());
|
||||||
cricket::VideoFormat format(640, 480,
|
cricket::VideoFormat format(640, 480,
|
||||||
cricket::VideoFormat::FpsToInterval(30),
|
cricket::VideoFormat::FpsToInterval(30),
|
||||||
@@ -493,9 +504,6 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
EXPECT_EQ(cricket::CS_RUNNING, video_capturer_2_->Start(format));
|
EXPECT_EQ(cricket::CS_RUNNING, video_capturer_2_->Start(format));
|
||||||
|
|
||||||
EXPECT_TRUE(channel_->SetCapturer(kSsrc + 2, video_capturer_2_.get()));
|
EXPECT_TRUE(channel_->SetCapturer(kSsrc + 2, video_capturer_2_.get()));
|
||||||
// Make the second renderer available for use by a new stream.
|
|
||||||
EXPECT_TRUE(channel_->SetRenderer(kSsrc + 2, &renderer2_));
|
|
||||||
EXPECT_TRUE(channel_->SetSendStreamFormat(kSsrc + 2, format));
|
|
||||||
}
|
}
|
||||||
virtual void TearDown() {
|
virtual void TearDown() {
|
||||||
channel_.reset();
|
channel_.reset();
|
||||||
@@ -718,7 +726,6 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
EXPECT_TRUE(channel_->SetCapturer(kSsrc, video_capturer_.get()));
|
EXPECT_TRUE(channel_->SetCapturer(kSsrc, video_capturer_.get()));
|
||||||
EXPECT_TRUE(SetOneCodec(DefaultCodec()));
|
EXPECT_TRUE(SetOneCodec(DefaultCodec()));
|
||||||
EXPECT_FALSE(channel_->sending());
|
EXPECT_FALSE(channel_->sending());
|
||||||
EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
|
|
||||||
EXPECT_TRUE(SetSend(true));
|
EXPECT_TRUE(SetSend(true));
|
||||||
EXPECT_TRUE(channel_->sending());
|
EXPECT_TRUE(channel_->sending());
|
||||||
EXPECT_TRUE(SendFrame());
|
EXPECT_TRUE(SendFrame());
|
||||||
@@ -755,7 +762,6 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
// Tests that we can send and receive frames.
|
// Tests that we can send and receive frames.
|
||||||
void SendAndReceive(const cricket::VideoCodec& codec) {
|
void SendAndReceive(const cricket::VideoCodec& codec) {
|
||||||
EXPECT_TRUE(SetOneCodec(codec));
|
EXPECT_TRUE(SetOneCodec(codec));
|
||||||
EXPECT_TRUE(SetSendStreamFormat(kSsrc, codec));
|
|
||||||
EXPECT_TRUE(SetSend(true));
|
EXPECT_TRUE(SetSend(true));
|
||||||
EXPECT_TRUE(channel_->SetRender(true));
|
EXPECT_TRUE(channel_->SetRender(true));
|
||||||
EXPECT_EQ(0, renderer_.num_rendered_frames());
|
EXPECT_EQ(0, renderer_.num_rendered_frames());
|
||||||
@@ -768,7 +774,6 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
void SendManyResizeOnce() {
|
void SendManyResizeOnce() {
|
||||||
cricket::VideoCodec codec(DefaultCodec());
|
cricket::VideoCodec codec(DefaultCodec());
|
||||||
EXPECT_TRUE(SetOneCodec(codec));
|
EXPECT_TRUE(SetOneCodec(codec));
|
||||||
EXPECT_TRUE(SetSendStreamFormat(kSsrc, codec));
|
|
||||||
EXPECT_TRUE(SetSend(true));
|
EXPECT_TRUE(SetSend(true));
|
||||||
EXPECT_TRUE(channel_->SetRender(true));
|
EXPECT_TRUE(channel_->SetRender(true));
|
||||||
EXPECT_EQ(0, renderer_.num_rendered_frames());
|
EXPECT_EQ(0, renderer_.num_rendered_frames());
|
||||||
@@ -783,7 +788,6 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
codec.width /= 2;
|
codec.width /= 2;
|
||||||
codec.height /= 2;
|
codec.height /= 2;
|
||||||
EXPECT_TRUE(SetOneCodec(codec));
|
EXPECT_TRUE(SetOneCodec(codec));
|
||||||
EXPECT_TRUE(SetSendStreamFormat(kSsrc, codec));
|
|
||||||
EXPECT_TRUE(WaitAndSendFrame(30));
|
EXPECT_TRUE(WaitAndSendFrame(30));
|
||||||
EXPECT_FRAME_WAIT(3, codec.width, codec.height, kTimeout);
|
EXPECT_FRAME_WAIT(3, codec.width, codec.height, kTimeout);
|
||||||
EXPECT_EQ(2, renderer_.num_set_sizes());
|
EXPECT_EQ(2, renderer_.num_set_sizes());
|
||||||
@@ -800,6 +804,7 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
EXPECT_EQ(NumRtpPackets(), info.senders[0].packets_sent);
|
EXPECT_EQ(NumRtpPackets(), info.senders[0].packets_sent);
|
||||||
EXPECT_EQ(0.0, info.senders[0].fraction_lost);
|
EXPECT_EQ(0.0, info.senders[0].fraction_lost);
|
||||||
EXPECT_EQ(0, info.senders[0].firs_rcvd);
|
EXPECT_EQ(0, info.senders[0].firs_rcvd);
|
||||||
|
EXPECT_EQ(0, info.senders[0].plis_rcvd);
|
||||||
EXPECT_EQ(0, info.senders[0].nacks_rcvd);
|
EXPECT_EQ(0, info.senders[0].nacks_rcvd);
|
||||||
EXPECT_EQ(DefaultCodec().width, info.senders[0].send_frame_width);
|
EXPECT_EQ(DefaultCodec().width, info.senders[0].send_frame_width);
|
||||||
EXPECT_EQ(DefaultCodec().height, info.senders[0].send_frame_height);
|
EXPECT_EQ(DefaultCodec().height, info.senders[0].send_frame_height);
|
||||||
@@ -816,6 +821,7 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
EXPECT_EQ(0, info.receivers[0].packets_lost);
|
EXPECT_EQ(0, info.receivers[0].packets_lost);
|
||||||
EXPECT_EQ(0, info.receivers[0].packets_concealed);
|
EXPECT_EQ(0, info.receivers[0].packets_concealed);
|
||||||
EXPECT_EQ(0, info.receivers[0].firs_sent);
|
EXPECT_EQ(0, info.receivers[0].firs_sent);
|
||||||
|
EXPECT_EQ(0, info.receivers[0].plis_sent);
|
||||||
EXPECT_EQ(0, info.receivers[0].nacks_sent);
|
EXPECT_EQ(0, info.receivers[0].nacks_sent);
|
||||||
EXPECT_EQ(DefaultCodec().width, info.receivers[0].frame_width);
|
EXPECT_EQ(DefaultCodec().width, info.receivers[0].frame_width);
|
||||||
EXPECT_EQ(DefaultCodec().height, info.receivers[0].frame_height);
|
EXPECT_EQ(DefaultCodec().height, info.receivers[0].frame_height);
|
||||||
@@ -858,6 +864,7 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
EXPECT_EQ(NumRtpPackets(), info.senders[0].packets_sent);
|
EXPECT_EQ(NumRtpPackets(), info.senders[0].packets_sent);
|
||||||
EXPECT_EQ(0.0, info.senders[0].fraction_lost);
|
EXPECT_EQ(0.0, info.senders[0].fraction_lost);
|
||||||
EXPECT_EQ(0, info.senders[0].firs_rcvd);
|
EXPECT_EQ(0, info.senders[0].firs_rcvd);
|
||||||
|
EXPECT_EQ(0, info.senders[0].plis_rcvd);
|
||||||
EXPECT_EQ(0, info.senders[0].nacks_rcvd);
|
EXPECT_EQ(0, info.senders[0].nacks_rcvd);
|
||||||
EXPECT_EQ(DefaultCodec().width, info.senders[0].send_frame_width);
|
EXPECT_EQ(DefaultCodec().width, info.senders[0].send_frame_width);
|
||||||
EXPECT_EQ(DefaultCodec().height, info.senders[0].send_frame_height);
|
EXPECT_EQ(DefaultCodec().height, info.senders[0].send_frame_height);
|
||||||
@@ -874,6 +881,7 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
EXPECT_EQ(0, info.receivers[i].packets_lost);
|
EXPECT_EQ(0, info.receivers[i].packets_lost);
|
||||||
EXPECT_EQ(0, info.receivers[i].packets_concealed);
|
EXPECT_EQ(0, info.receivers[i].packets_concealed);
|
||||||
EXPECT_EQ(0, info.receivers[i].firs_sent);
|
EXPECT_EQ(0, info.receivers[i].firs_sent);
|
||||||
|
EXPECT_EQ(0, info.receivers[i].plis_sent);
|
||||||
EXPECT_EQ(0, info.receivers[i].nacks_sent);
|
EXPECT_EQ(0, info.receivers[i].nacks_sent);
|
||||||
EXPECT_EQ(DefaultCodec().width, info.receivers[i].frame_width);
|
EXPECT_EQ(DefaultCodec().width, info.receivers[i].frame_width);
|
||||||
EXPECT_EQ(DefaultCodec().height, info.receivers[i].frame_height);
|
EXPECT_EQ(DefaultCodec().height, info.receivers[i].frame_height);
|
||||||
@@ -893,7 +901,6 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
EXPECT_TRUE(channel_->AddRecvStream(
|
EXPECT_TRUE(channel_->AddRecvStream(
|
||||||
cricket::StreamParams::CreateLegacy(1234)));
|
cricket::StreamParams::CreateLegacy(1234)));
|
||||||
channel_->UpdateAspectRatio(640, 400);
|
channel_->UpdateAspectRatio(640, 400);
|
||||||
EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
|
|
||||||
EXPECT_TRUE(SetSend(true));
|
EXPECT_TRUE(SetSend(true));
|
||||||
EXPECT_TRUE(channel_->SetRender(true));
|
EXPECT_TRUE(channel_->SetRender(true));
|
||||||
EXPECT_TRUE(SendFrame());
|
EXPECT_TRUE(SendFrame());
|
||||||
@@ -914,7 +921,6 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
EXPECT_TRUE(channel_->AddSendStream(
|
EXPECT_TRUE(channel_->AddSendStream(
|
||||||
cricket::StreamParams::CreateLegacy(5678)));
|
cricket::StreamParams::CreateLegacy(5678)));
|
||||||
EXPECT_TRUE(channel_->SetCapturer(5678, capturer.get()));
|
EXPECT_TRUE(channel_->SetCapturer(5678, capturer.get()));
|
||||||
EXPECT_TRUE(channel_->SetSendStreamFormat(5678, format));
|
|
||||||
EXPECT_TRUE(channel_->AddRecvStream(
|
EXPECT_TRUE(channel_->AddRecvStream(
|
||||||
cricket::StreamParams::CreateLegacy(5678)));
|
cricket::StreamParams::CreateLegacy(5678)));
|
||||||
EXPECT_TRUE(channel_->SetRenderer(5678, &renderer1));
|
EXPECT_TRUE(channel_->SetRenderer(5678, &renderer1));
|
||||||
@@ -997,7 +1003,6 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
talk_base::SetBE32(packet1.data() + 8, kSsrc);
|
talk_base::SetBE32(packet1.data() + 8, kSsrc);
|
||||||
channel_->SetRenderer(0, NULL);
|
channel_->SetRenderer(0, NULL);
|
||||||
EXPECT_TRUE(SetDefaultCodec());
|
EXPECT_TRUE(SetDefaultCodec());
|
||||||
EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
|
|
||||||
EXPECT_TRUE(SetSend(true));
|
EXPECT_TRUE(SetSend(true));
|
||||||
EXPECT_TRUE(channel_->SetRender(true));
|
EXPECT_TRUE(channel_->SetRender(true));
|
||||||
EXPECT_EQ(0, renderer_.num_rendered_frames());
|
EXPECT_EQ(0, renderer_.num_rendered_frames());
|
||||||
@@ -1021,7 +1026,6 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
// Tests setting up and configuring a send stream.
|
// Tests setting up and configuring a send stream.
|
||||||
void AddRemoveSendStreams() {
|
void AddRemoveSendStreams() {
|
||||||
EXPECT_TRUE(SetOneCodec(DefaultCodec()));
|
EXPECT_TRUE(SetOneCodec(DefaultCodec()));
|
||||||
EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
|
|
||||||
EXPECT_TRUE(SetSend(true));
|
EXPECT_TRUE(SetSend(true));
|
||||||
EXPECT_TRUE(channel_->SetRender(true));
|
EXPECT_TRUE(channel_->SetRender(true));
|
||||||
EXPECT_TRUE(SendFrame());
|
EXPECT_TRUE(SendFrame());
|
||||||
@@ -1168,7 +1172,6 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
void AddRemoveRecvStreamAndRender() {
|
void AddRemoveRecvStreamAndRender() {
|
||||||
cricket::FakeVideoRenderer renderer1;
|
cricket::FakeVideoRenderer renderer1;
|
||||||
EXPECT_TRUE(SetDefaultCodec());
|
EXPECT_TRUE(SetDefaultCodec());
|
||||||
EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
|
|
||||||
EXPECT_TRUE(SetSend(true));
|
EXPECT_TRUE(SetSend(true));
|
||||||
EXPECT_TRUE(channel_->SetRender(true));
|
EXPECT_TRUE(channel_->SetRender(true));
|
||||||
EXPECT_TRUE(channel_->AddRecvStream(
|
EXPECT_TRUE(channel_->AddRecvStream(
|
||||||
@@ -1213,7 +1216,6 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
cricket::VideoOptions vmo;
|
cricket::VideoOptions vmo;
|
||||||
vmo.conference_mode.Set(true);
|
vmo.conference_mode.Set(true);
|
||||||
EXPECT_TRUE(channel_->SetOptions(vmo));
|
EXPECT_TRUE(channel_->SetOptions(vmo));
|
||||||
EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
|
|
||||||
EXPECT_TRUE(SetSend(true));
|
EXPECT_TRUE(SetSend(true));
|
||||||
EXPECT_TRUE(channel_->SetRender(true));
|
EXPECT_TRUE(channel_->SetRender(true));
|
||||||
EXPECT_TRUE(channel_->AddRecvStream(
|
EXPECT_TRUE(channel_->AddRecvStream(
|
||||||
@@ -1251,7 +1253,6 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
codec.height = 240;
|
codec.height = 240;
|
||||||
const int time_between_send = TimeBetweenSend(codec);
|
const int time_between_send = TimeBetweenSend(codec);
|
||||||
EXPECT_TRUE(SetOneCodec(codec));
|
EXPECT_TRUE(SetOneCodec(codec));
|
||||||
EXPECT_TRUE(SetSendStreamFormat(kSsrc, codec));
|
|
||||||
EXPECT_TRUE(SetSend(true));
|
EXPECT_TRUE(SetSend(true));
|
||||||
EXPECT_TRUE(channel_->SetRender(true));
|
EXPECT_TRUE(channel_->SetRender(true));
|
||||||
EXPECT_EQ(0, renderer_.num_rendered_frames());
|
EXPECT_EQ(0, renderer_.num_rendered_frames());
|
||||||
@@ -1273,7 +1274,6 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
int captured_frames = 1;
|
int captured_frames = 1;
|
||||||
for (int iterations = 0; iterations < 2; ++iterations) {
|
for (int iterations = 0; iterations < 2; ++iterations) {
|
||||||
EXPECT_TRUE(channel_->SetCapturer(kSsrc, capturer.get()));
|
EXPECT_TRUE(channel_->SetCapturer(kSsrc, capturer.get()));
|
||||||
EXPECT_TRUE(SetSendStreamFormat(kSsrc, codec));
|
|
||||||
talk_base::Thread::Current()->ProcessMessages(time_between_send);
|
talk_base::Thread::Current()->ProcessMessages(time_between_send);
|
||||||
EXPECT_TRUE(capturer->CaptureCustomFrame(format.width, format.height,
|
EXPECT_TRUE(capturer->CaptureCustomFrame(format.width, format.height,
|
||||||
cricket::FOURCC_I420));
|
cricket::FOURCC_I420));
|
||||||
@@ -1313,7 +1313,6 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
// added, the plugin shouldn't crash (and no black frame should be sent).
|
// added, the plugin shouldn't crash (and no black frame should be sent).
|
||||||
void RemoveCapturerWithoutAdd() {
|
void RemoveCapturerWithoutAdd() {
|
||||||
EXPECT_TRUE(SetOneCodec(DefaultCodec()));
|
EXPECT_TRUE(SetOneCodec(DefaultCodec()));
|
||||||
EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
|
|
||||||
EXPECT_TRUE(SetSend(true));
|
EXPECT_TRUE(SetSend(true));
|
||||||
EXPECT_TRUE(channel_->SetRender(true));
|
EXPECT_TRUE(channel_->SetRender(true));
|
||||||
EXPECT_EQ(0, renderer_.num_rendered_frames());
|
EXPECT_EQ(0, renderer_.num_rendered_frames());
|
||||||
@@ -1375,8 +1374,6 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
// TODO(hellner): this seems like an unnecessary constraint, fix it.
|
// TODO(hellner): this seems like an unnecessary constraint, fix it.
|
||||||
EXPECT_TRUE(channel_->SetCapturer(1, capturer1.get()));
|
EXPECT_TRUE(channel_->SetCapturer(1, capturer1.get()));
|
||||||
EXPECT_TRUE(channel_->SetCapturer(2, capturer2.get()));
|
EXPECT_TRUE(channel_->SetCapturer(2, capturer2.get()));
|
||||||
EXPECT_TRUE(SetSendStreamFormat(1, DefaultCodec()));
|
|
||||||
EXPECT_TRUE(SetSendStreamFormat(2, DefaultCodec()));
|
|
||||||
EXPECT_TRUE(SetSend(true));
|
EXPECT_TRUE(SetSend(true));
|
||||||
EXPECT_TRUE(channel_->SetRender(true));
|
EXPECT_TRUE(channel_->SetRender(true));
|
||||||
// Test capturer associated with engine.
|
// Test capturer associated with engine.
|
||||||
@@ -1409,7 +1406,6 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
|
|
||||||
cricket::VideoCodec codec(DefaultCodec());
|
cricket::VideoCodec codec(DefaultCodec());
|
||||||
EXPECT_TRUE(SetOneCodec(codec));
|
EXPECT_TRUE(SetOneCodec(codec));
|
||||||
EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
|
|
||||||
EXPECT_TRUE(SetSend(true));
|
EXPECT_TRUE(SetSend(true));
|
||||||
|
|
||||||
cricket::FakeVideoRenderer renderer;
|
cricket::FakeVideoRenderer renderer;
|
||||||
@@ -1435,7 +1431,6 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
// Capture frame to not get same frame timestamps as previous capturer.
|
// Capture frame to not get same frame timestamps as previous capturer.
|
||||||
capturer->CaptureFrame();
|
capturer->CaptureFrame();
|
||||||
EXPECT_TRUE(channel_->SetCapturer(kSsrc, capturer.get()));
|
EXPECT_TRUE(channel_->SetCapturer(kSsrc, capturer.get()));
|
||||||
EXPECT_TRUE(channel_->SetSendStreamFormat(kSsrc, capture_format));
|
|
||||||
EXPECT_TRUE(talk_base::Thread::Current()->ProcessMessages(30));
|
EXPECT_TRUE(talk_base::Thread::Current()->ProcessMessages(30));
|
||||||
EXPECT_TRUE(capturer->CaptureCustomFrame(kWidth, kHeight,
|
EXPECT_TRUE(capturer->CaptureCustomFrame(kWidth, kHeight,
|
||||||
cricket::FOURCC_ARGB));
|
cricket::FOURCC_ARGB));
|
||||||
@@ -1455,7 +1450,6 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
codec.height /= 2;
|
codec.height /= 2;
|
||||||
// Adapt the resolution.
|
// Adapt the resolution.
|
||||||
EXPECT_TRUE(SetOneCodec(codec));
|
EXPECT_TRUE(SetOneCodec(codec));
|
||||||
EXPECT_TRUE(SetSendStreamFormat(kSsrc, codec));
|
|
||||||
EXPECT_TRUE(WaitAndSendFrame(30));
|
EXPECT_TRUE(WaitAndSendFrame(30));
|
||||||
EXPECT_FRAME_WAIT(2, codec.width, codec.height, kTimeout);
|
EXPECT_FRAME_WAIT(2, codec.width, codec.height, kTimeout);
|
||||||
}
|
}
|
||||||
@@ -1469,7 +1463,6 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
codec.height /= 2;
|
codec.height /= 2;
|
||||||
// Adapt the resolution.
|
// Adapt the resolution.
|
||||||
EXPECT_TRUE(SetOneCodec(codec));
|
EXPECT_TRUE(SetOneCodec(codec));
|
||||||
EXPECT_TRUE(SetSendStreamFormat(kSsrc, codec));
|
|
||||||
EXPECT_TRUE(WaitAndSendFrame(30));
|
EXPECT_TRUE(WaitAndSendFrame(30));
|
||||||
EXPECT_FRAME_WAIT(2, codec.width, codec.height, kTimeout);
|
EXPECT_FRAME_WAIT(2, codec.width, codec.height, kTimeout);
|
||||||
}
|
}
|
||||||
@@ -1604,7 +1597,6 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
cricket::VideoFormat::FpsToInterval(30),
|
cricket::VideoFormat::FpsToInterval(30),
|
||||||
cricket::FOURCC_I420));
|
cricket::FOURCC_I420));
|
||||||
EXPECT_TRUE(channel_->SetCapturer(kSsrc, &video_capturer));
|
EXPECT_TRUE(channel_->SetCapturer(kSsrc, &video_capturer));
|
||||||
EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
|
|
||||||
EXPECT_TRUE(SetSend(true));
|
EXPECT_TRUE(SetSend(true));
|
||||||
EXPECT_TRUE(channel_->SetRender(true));
|
EXPECT_TRUE(channel_->SetRender(true));
|
||||||
EXPECT_EQ(frame_count, renderer_.num_rendered_frames());
|
EXPECT_EQ(frame_count, renderer_.num_rendered_frames());
|
||||||
@@ -1704,6 +1696,121 @@ class VideoMediaChannelTest : public testing::Test,
|
|||||||
EXPECT_TRUE(channel_->RemoveSendStream(0));
|
EXPECT_TRUE(channel_->RemoveSendStream(0));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Tests that we can send and receive frames with early receive.
|
||||||
|
void TwoStreamsSendAndUnsignalledRecv(const cricket::VideoCodec& codec) {
|
||||||
|
cricket::VideoOptions vmo;
|
||||||
|
vmo.conference_mode.Set(true);
|
||||||
|
vmo.unsignalled_recv_stream_limit.Set(1);
|
||||||
|
EXPECT_TRUE(channel_->SetOptions(vmo));
|
||||||
|
SetUpSecondStreamWithNoRecv();
|
||||||
|
// Test sending and receiving on first stream.
|
||||||
|
EXPECT_TRUE(channel_->SetRender(true));
|
||||||
|
Send(codec);
|
||||||
|
EXPECT_EQ_WAIT(2, NumRtpPackets(), kTimeout);
|
||||||
|
EXPECT_EQ_WAIT(1, renderer_.num_rendered_frames(), kTimeout);
|
||||||
|
// The first send is not expected to yield frames, because the ssrc
|
||||||
|
// is not signalled yet. With unsignalled recv enabled, we will drop frames
|
||||||
|
// instead of packets.
|
||||||
|
EXPECT_EQ(0, renderer2_.num_rendered_frames());
|
||||||
|
// Give a chance for the decoder to process before adding the receiver.
|
||||||
|
talk_base::Thread::Current()->ProcessMessages(10);
|
||||||
|
// Test sending and receiving on second stream.
|
||||||
|
EXPECT_TRUE(channel_->AddRecvStream(
|
||||||
|
cricket::StreamParams::CreateLegacy(kSsrc + 2)));
|
||||||
|
EXPECT_TRUE(channel_->SetRenderer(kSsrc + 2, &renderer2_));
|
||||||
|
SendFrame();
|
||||||
|
EXPECT_EQ_WAIT(2, renderer_.num_rendered_frames(), kTimeout);
|
||||||
|
EXPECT_EQ(4, NumRtpPackets());
|
||||||
|
// The second send is expected to yield frame as the ssrc is signalled now.
|
||||||
|
// Decode should succeed here, though we received the key frame earlier.
|
||||||
|
// Without early recv, we would have dropped it and decoding would have
|
||||||
|
// failed.
|
||||||
|
EXPECT_EQ_WAIT(1, renderer2_.num_rendered_frames(), kTimeout);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that we cannot receive key frames with unsignalled recv disabled.
|
||||||
|
void TwoStreamsSendAndFailUnsignalledRecv(const cricket::VideoCodec& codec) {
|
||||||
|
cricket::VideoOptions vmo;
|
||||||
|
vmo.unsignalled_recv_stream_limit.Set(0);
|
||||||
|
EXPECT_TRUE(channel_->SetOptions(vmo));
|
||||||
|
SetUpSecondStreamWithNoRecv();
|
||||||
|
// Test sending and receiving on first stream.
|
||||||
|
EXPECT_TRUE(channel_->SetRender(true));
|
||||||
|
Send(codec);
|
||||||
|
EXPECT_EQ_WAIT(2, NumRtpPackets(), kTimeout);
|
||||||
|
EXPECT_EQ_WAIT(1, renderer_.num_rendered_frames(), kTimeout);
|
||||||
|
EXPECT_EQ_WAIT(0, renderer2_.num_rendered_frames(), kTimeout);
|
||||||
|
// Give a chance for the decoder to process before adding the receiver.
|
||||||
|
talk_base::Thread::Current()->ProcessMessages(10);
|
||||||
|
// Test sending and receiving on second stream.
|
||||||
|
EXPECT_TRUE(channel_->AddRecvStream(
|
||||||
|
cricket::StreamParams::CreateLegacy(kSsrc + 2)));
|
||||||
|
EXPECT_TRUE(channel_->SetRenderer(kSsrc + 2, &renderer2_));
|
||||||
|
SendFrame();
|
||||||
|
EXPECT_TRUE_WAIT(renderer_.num_rendered_frames() >= 1, kTimeout);
|
||||||
|
EXPECT_EQ_WAIT(4, NumRtpPackets(), kTimeout);
|
||||||
|
// We dont expect any frames here, because the key frame would have been
|
||||||
|
// lost in the earlier packet. This is the case we want to solve with early
|
||||||
|
// receive.
|
||||||
|
EXPECT_EQ(0, renderer2_.num_rendered_frames());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that we drop key frames when conference mode is disabled and we
|
||||||
|
// receive rtp packets on unsignalled streams.
|
||||||
|
void TwoStreamsSendAndFailUnsignalledRecvInOneToOne(
|
||||||
|
const cricket::VideoCodec& codec) {
|
||||||
|
cricket::VideoOptions vmo;
|
||||||
|
vmo.conference_mode.Set(false);
|
||||||
|
vmo.unsignalled_recv_stream_limit.Set(1);
|
||||||
|
EXPECT_TRUE(channel_->SetOptions(vmo));
|
||||||
|
SetUpSecondStreamWithNoRecv();
|
||||||
|
// Test sending and receiving on first stream.
|
||||||
|
EXPECT_TRUE(channel_->SetRender(true));
|
||||||
|
Send(codec);
|
||||||
|
EXPECT_EQ_WAIT(2, NumRtpPackets(), kTimeout);
|
||||||
|
EXPECT_EQ_WAIT(1, renderer_.num_rendered_frames(), kTimeout);
|
||||||
|
EXPECT_EQ_WAIT(0, renderer2_.num_rendered_frames(), kTimeout);
|
||||||
|
// Give a chance for the decoder to process before adding the receiver.
|
||||||
|
talk_base::Thread::Current()->ProcessMessages(10);
|
||||||
|
// Test sending and receiving on second stream.
|
||||||
|
EXPECT_TRUE(channel_->AddRecvStream(
|
||||||
|
cricket::StreamParams::CreateLegacy(kSsrc + 2)));
|
||||||
|
EXPECT_TRUE(channel_->SetRenderer(kSsrc + 2, &renderer2_));
|
||||||
|
SendFrame();
|
||||||
|
EXPECT_TRUE_WAIT(renderer_.num_rendered_frames() >= 1, kTimeout);
|
||||||
|
EXPECT_EQ_WAIT(4, NumRtpPackets(), kTimeout);
|
||||||
|
// We dont expect any frames here, because the key frame would have been
|
||||||
|
// lost in the earlier packet. This is the case we want to solve with early
|
||||||
|
// receive.
|
||||||
|
EXPECT_EQ(0, renderer2_.num_rendered_frames());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that we drop key frames when conference mode is enabled and we
|
||||||
|
// receive rtp packets on unsignalled streams. Removal of a unsignalled recv
|
||||||
|
// stream is successful.
|
||||||
|
void TwoStreamsAddAndRemoveUnsignalledRecv(
|
||||||
|
const cricket::VideoCodec& codec) {
|
||||||
|
cricket::VideoOptions vmo;
|
||||||
|
vmo.conference_mode.Set(true);
|
||||||
|
vmo.unsignalled_recv_stream_limit.Set(1);
|
||||||
|
EXPECT_TRUE(channel_->SetOptions(vmo));
|
||||||
|
SetUpSecondStreamWithNoRecv();
|
||||||
|
// Sending and receiving on first stream.
|
||||||
|
EXPECT_TRUE(channel_->SetRender(true));
|
||||||
|
Send(codec);
|
||||||
|
EXPECT_EQ_WAIT(2, NumRtpPackets(), kTimeout);
|
||||||
|
EXPECT_EQ_WAIT(1, renderer_.num_rendered_frames(), kTimeout);
|
||||||
|
// The first send is not expected to yield frames, because the ssrc
|
||||||
|
// is no signalled yet. With unsignalled recv enabled, we will drop frames
|
||||||
|
// instead of packets.
|
||||||
|
EXPECT_EQ(0, renderer2_.num_rendered_frames());
|
||||||
|
// Give a chance for the decoder to process before adding the receiver.
|
||||||
|
talk_base::Thread::Current()->ProcessMessages(100);
|
||||||
|
// Ensure that we can remove the unsignalled recv stream that was created
|
||||||
|
// when the first video packet with unsignalled recv ssrc is received.
|
||||||
|
EXPECT_TRUE(channel_->RemoveRecvStream(kSsrc + 2));
|
||||||
|
}
|
||||||
|
|
||||||
VideoEngineOverride<E> engine_;
|
VideoEngineOverride<E> engine_;
|
||||||
talk_base::scoped_ptr<cricket::FakeVideoCapturer> video_capturer_;
|
talk_base::scoped_ptr<cricket::FakeVideoCapturer> video_capturer_;
|
||||||
talk_base::scoped_ptr<cricket::FakeVideoCapturer> video_capturer_2_;
|
talk_base::scoped_ptr<cricket::FakeVideoCapturer> video_capturer_2_;
|
||||||
|
@@ -209,8 +209,14 @@ bool FileVideoCapturer::Init(const Device& device) {
|
|||||||
std::vector<VideoFormat> supported;
|
std::vector<VideoFormat> supported;
|
||||||
supported.push_back(format);
|
supported.push_back(format);
|
||||||
|
|
||||||
|
// TODO(thorcarpenter): Report the actual file video format as the supported
|
||||||
|
// format. Do not use kMinimumInterval as it conflicts with video adaptation.
|
||||||
SetId(device.id);
|
SetId(device.id);
|
||||||
SetSupportedFormats(supported);
|
SetSupportedFormats(supported);
|
||||||
|
|
||||||
|
// TODO(wuwang): Design an E2E integration test for video adaptation,
|
||||||
|
// then remove the below call to disable the video adapter.
|
||||||
|
set_enable_video_adapter(false);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1044,6 +1044,10 @@ class FakeWebRtcVideoEngine
|
|||||||
channels_[channel]->transmission_smoothing_ = enable;
|
channels_[channel]->transmission_smoothing_ = enable;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||||
|
WEBRTC_STUB_CONST(GetRtcpPacketTypeCounters, (int,
|
||||||
|
webrtc::RtcpPacketTypeCounter*, webrtc::RtcpPacketTypeCounter*));
|
||||||
|
#endif
|
||||||
WEBRTC_STUB_CONST(GetReceivedRTCPStatistics, (const int, unsigned short&,
|
WEBRTC_STUB_CONST(GetReceivedRTCPStatistics, (const int, unsigned short&,
|
||||||
unsigned int&, unsigned int&, unsigned int&, int&));
|
unsigned int&, unsigned int&, unsigned int&, int&));
|
||||||
WEBRTC_STUB_CONST(GetSentRTCPStatistics, (const int, unsigned short&,
|
WEBRTC_STUB_CONST(GetSentRTCPStatistics, (const int, unsigned short&,
|
||||||
|
@@ -371,6 +371,13 @@ class FakeWebRtcVoiceEngine
|
|||||||
}
|
}
|
||||||
WEBRTC_FUNC(SetSendCodec, (int channel, const webrtc::CodecInst& codec)) {
|
WEBRTC_FUNC(SetSendCodec, (int channel, const webrtc::CodecInst& codec)) {
|
||||||
WEBRTC_CHECK_CHANNEL(channel);
|
WEBRTC_CHECK_CHANNEL(channel);
|
||||||
|
// To match the behavior of the real implementation.
|
||||||
|
if (_stricmp(codec.plname, "telephone-event") == 0 ||
|
||||||
|
_stricmp(codec.plname, "audio/telephone-event") == 0 ||
|
||||||
|
_stricmp(codec.plname, "CN") == 0 ||
|
||||||
|
_stricmp(codec.plname, "red") == 0 ) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
channels_[channel]->send_codec = codec;
|
channels_[channel]->send_codec = codec;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@@ -317,8 +317,7 @@ class WebRtcDecoderObserver : public webrtc::ViEDecoderObserver {
|
|||||||
target_delay_ms_(0),
|
target_delay_ms_(0),
|
||||||
jitter_buffer_ms_(0),
|
jitter_buffer_ms_(0),
|
||||||
min_playout_delay_ms_(0),
|
min_playout_delay_ms_(0),
|
||||||
render_delay_ms_(0),
|
render_delay_ms_(0) {
|
||||||
firs_requested_(0) {
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// virtual functions from VieDecoderObserver.
|
// virtual functions from VieDecoderObserver.
|
||||||
@@ -350,16 +349,11 @@ class WebRtcDecoderObserver : public webrtc::ViEDecoderObserver {
|
|||||||
render_delay_ms_ = render_delay_ms;
|
render_delay_ms_ = render_delay_ms;
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void RequestNewKeyFrame(const int videoChannel) {
|
virtual void RequestNewKeyFrame(const int videoChannel) {}
|
||||||
talk_base::CritScope cs(&crit_);
|
|
||||||
ASSERT(video_channel_ == videoChannel);
|
|
||||||
++firs_requested_;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Populate |rinfo| based on previously-set data in |*this|.
|
// Populate |rinfo| based on previously-set data in |*this|.
|
||||||
void ExportTo(VideoReceiverInfo* rinfo) {
|
void ExportTo(VideoReceiverInfo* rinfo) {
|
||||||
talk_base::CritScope cs(&crit_);
|
talk_base::CritScope cs(&crit_);
|
||||||
rinfo->firs_sent = firs_requested_;
|
|
||||||
rinfo->framerate_rcvd = framerate_;
|
rinfo->framerate_rcvd = framerate_;
|
||||||
rinfo->decode_ms = decode_ms_;
|
rinfo->decode_ms = decode_ms_;
|
||||||
rinfo->max_decode_ms = max_decode_ms_;
|
rinfo->max_decode_ms = max_decode_ms_;
|
||||||
@@ -382,7 +376,6 @@ class WebRtcDecoderObserver : public webrtc::ViEDecoderObserver {
|
|||||||
int jitter_buffer_ms_;
|
int jitter_buffer_ms_;
|
||||||
int min_playout_delay_ms_;
|
int min_playout_delay_ms_;
|
||||||
int render_delay_ms_;
|
int render_delay_ms_;
|
||||||
int firs_requested_;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
class WebRtcEncoderObserver : public webrtc::ViEEncoderObserver {
|
class WebRtcEncoderObserver : public webrtc::ViEEncoderObserver {
|
||||||
@@ -672,7 +665,6 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> {
|
|||||||
ASSERT(adapter && "Video adapter should not be null here.");
|
ASSERT(adapter && "Video adapter should not be null here.");
|
||||||
|
|
||||||
UpdateAdapterCpuOptions();
|
UpdateAdapterCpuOptions();
|
||||||
adapter->OnOutputFormatRequest(video_format_);
|
|
||||||
|
|
||||||
overuse_observer_.reset(new WebRtcOveruseObserver(adapter));
|
overuse_observer_.reset(new WebRtcOveruseObserver(adapter));
|
||||||
// (Dis)connect the video adapter from the cpu monitor as appropriate.
|
// (Dis)connect the video adapter from the cpu monitor as appropriate.
|
||||||
@@ -1557,6 +1549,7 @@ WebRtcVideoMediaChannel::WebRtcVideoMediaChannel(
|
|||||||
remb_enabled_(false),
|
remb_enabled_(false),
|
||||||
render_started_(false),
|
render_started_(false),
|
||||||
first_receive_ssrc_(0),
|
first_receive_ssrc_(0),
|
||||||
|
num_unsignalled_recv_channels_(0),
|
||||||
send_rtx_type_(-1),
|
send_rtx_type_(-1),
|
||||||
send_red_type_(-1),
|
send_red_type_(-1),
|
||||||
send_fec_type_(-1),
|
send_fec_type_(-1),
|
||||||
@@ -1936,27 +1929,33 @@ bool WebRtcVideoMediaChannel::AddRecvStream(const StreamParams& sp) {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (recv_channels_.find(sp.first_ssrc()) != recv_channels_.end() ||
|
|
||||||
first_receive_ssrc_ == sp.first_ssrc()) {
|
|
||||||
LOG(LS_ERROR) << "Stream already exists";
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(perkj): Implement recv media from multiple media SSRCs per stream.
|
|
||||||
// NOTE: We have two SSRCs per stream when RTX is enabled.
|
|
||||||
if (!IsOneSsrcStream(sp)) {
|
|
||||||
LOG(LS_ERROR) << "WebRtcVideoMediaChannel supports one primary SSRC per"
|
|
||||||
<< " stream and one FID SSRC per primary SSRC.";
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a new channel for receiving video data.
|
|
||||||
// In order to get the bandwidth estimation work fine for
|
|
||||||
// receive only channels, we connect all receiving channels
|
|
||||||
// to our master send channel.
|
|
||||||
int channel_id = -1;
|
int channel_id = -1;
|
||||||
if (!CreateChannel(sp.first_ssrc(), MD_RECV, &channel_id)) {
|
RecvChannelMap::iterator channel_iterator =
|
||||||
return false;
|
recv_channels_.find(sp.first_ssrc());
|
||||||
|
if (channel_iterator == recv_channels_.end() &&
|
||||||
|
first_receive_ssrc_ != sp.first_ssrc()) {
|
||||||
|
// TODO(perkj): Implement recv media from multiple media SSRCs per stream.
|
||||||
|
// NOTE: We have two SSRCs per stream when RTX is enabled.
|
||||||
|
if (!IsOneSsrcStream(sp)) {
|
||||||
|
LOG(LS_ERROR) << "WebRtcVideoMediaChannel supports one primary SSRC per"
|
||||||
|
<< " stream and one FID SSRC per primary SSRC.";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a new channel for receiving video data.
|
||||||
|
// In order to get the bandwidth estimation work fine for
|
||||||
|
// receive only channels, we connect all receiving channels
|
||||||
|
// to our master send channel.
|
||||||
|
if (!CreateChannel(sp.first_ssrc(), MD_RECV, &channel_id)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Already exists.
|
||||||
|
if (first_receive_ssrc_ == sp.first_ssrc()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// Early receive added channel.
|
||||||
|
channel_id = (*channel_iterator).second->channel_id();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the corresponding RTX SSRC.
|
// Set the corresponding RTX SSRC.
|
||||||
@@ -2327,12 +2326,18 @@ bool WebRtcVideoMediaChannel::GetStats(const StatsOptions& options,
|
|||||||
sinfo.packets_cached = -1;
|
sinfo.packets_cached = -1;
|
||||||
sinfo.packets_lost = -1;
|
sinfo.packets_lost = -1;
|
||||||
sinfo.fraction_lost = -1;
|
sinfo.fraction_lost = -1;
|
||||||
sinfo.firs_rcvd = -1;
|
|
||||||
sinfo.nacks_rcvd = -1;
|
|
||||||
sinfo.rtt_ms = -1;
|
sinfo.rtt_ms = -1;
|
||||||
sinfo.input_frame_width = static_cast<int>(channel_stream_info->width());
|
sinfo.input_frame_width = static_cast<int>(channel_stream_info->width());
|
||||||
sinfo.input_frame_height =
|
sinfo.input_frame_height =
|
||||||
static_cast<int>(channel_stream_info->height());
|
static_cast<int>(channel_stream_info->height());
|
||||||
|
|
||||||
|
VideoCapturer* video_capturer = send_channel->video_capturer();
|
||||||
|
if (video_capturer) {
|
||||||
|
video_capturer->GetStats(&sinfo.adapt_frame_drops,
|
||||||
|
&sinfo.effects_frame_drops,
|
||||||
|
&sinfo.capturer_frame_time);
|
||||||
|
}
|
||||||
|
|
||||||
webrtc::VideoCodec vie_codec;
|
webrtc::VideoCodec vie_codec;
|
||||||
if (engine()->vie()->codec()->GetSendCodec(channel_id, vie_codec) == 0) {
|
if (engine()->vie()->codec()->GetSendCodec(channel_id, vie_codec) == 0) {
|
||||||
sinfo.send_frame_width = vie_codec.width;
|
sinfo.send_frame_width = vie_codec.width;
|
||||||
@@ -2368,6 +2373,26 @@ bool WebRtcVideoMediaChannel::GetStats(const StatsOptions& options,
|
|||||||
sinfo.capture_queue_delay_ms_per_s = capture_queue_delay_ms_per_s;
|
sinfo.capture_queue_delay_ms_per_s = capture_queue_delay_ms_per_s;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||||
|
webrtc::RtcpPacketTypeCounter rtcp_sent;
|
||||||
|
webrtc::RtcpPacketTypeCounter rtcp_received;
|
||||||
|
if (engine()->vie()->rtp()->GetRtcpPacketTypeCounters(
|
||||||
|
channel_id, &rtcp_sent, &rtcp_received) == 0) {
|
||||||
|
sinfo.firs_rcvd = rtcp_received.fir_packets;
|
||||||
|
sinfo.plis_rcvd = rtcp_received.pli_packets;
|
||||||
|
sinfo.nacks_rcvd = rtcp_received.nack_packets;
|
||||||
|
} else {
|
||||||
|
sinfo.firs_rcvd = -1;
|
||||||
|
sinfo.plis_rcvd = -1;
|
||||||
|
sinfo.nacks_rcvd = -1;
|
||||||
|
LOG_RTCERR1(GetRtcpPacketTypeCounters, channel_id);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
sinfo.firs_rcvd = -1;
|
||||||
|
sinfo.plis_rcvd = -1;
|
||||||
|
sinfo.nacks_rcvd = -1;
|
||||||
|
#endif
|
||||||
|
|
||||||
// Get received RTCP statistics for the sender (reported by the remote
|
// Get received RTCP statistics for the sender (reported by the remote
|
||||||
// client in a RTCP packet), if available.
|
// client in a RTCP packet), if available.
|
||||||
// It's not a fatal error if we can't, since RTCP may not have arrived
|
// It's not a fatal error if we can't, since RTCP may not have arrived
|
||||||
@@ -2425,10 +2450,6 @@ bool WebRtcVideoMediaChannel::GetStats(const StatsOptions& options,
|
|||||||
unsigned int estimated_recv_bandwidth = 0;
|
unsigned int estimated_recv_bandwidth = 0;
|
||||||
for (RecvChannelMap::const_iterator it = recv_channels_.begin();
|
for (RecvChannelMap::const_iterator it = recv_channels_.begin();
|
||||||
it != recv_channels_.end(); ++it) {
|
it != recv_channels_.end(); ++it) {
|
||||||
// Don't report receive statistics from the default channel if we have
|
|
||||||
// specified receive channels.
|
|
||||||
if (it->first == 0 && recv_channels_.size() > 1)
|
|
||||||
continue;
|
|
||||||
WebRtcVideoChannelRecvInfo* channel = it->second;
|
WebRtcVideoChannelRecvInfo* channel = it->second;
|
||||||
|
|
||||||
unsigned int ssrc;
|
unsigned int ssrc;
|
||||||
@@ -2453,7 +2474,6 @@ bool WebRtcVideoMediaChannel::GetStats(const StatsOptions& options,
|
|||||||
rinfo.packets_lost = -1;
|
rinfo.packets_lost = -1;
|
||||||
rinfo.packets_concealed = -1;
|
rinfo.packets_concealed = -1;
|
||||||
rinfo.fraction_lost = -1; // from SentRTCP
|
rinfo.fraction_lost = -1; // from SentRTCP
|
||||||
rinfo.nacks_sent = -1;
|
|
||||||
rinfo.frame_width = channel->render_adapter()->width();
|
rinfo.frame_width = channel->render_adapter()->width();
|
||||||
rinfo.frame_height = channel->render_adapter()->height();
|
rinfo.frame_height = channel->render_adapter()->height();
|
||||||
int fps = channel->render_adapter()->framerate();
|
int fps = channel->render_adapter()->framerate();
|
||||||
@@ -2461,6 +2481,26 @@ bool WebRtcVideoMediaChannel::GetStats(const StatsOptions& options,
|
|||||||
rinfo.framerate_output = fps;
|
rinfo.framerate_output = fps;
|
||||||
channel->decoder_observer()->ExportTo(&rinfo);
|
channel->decoder_observer()->ExportTo(&rinfo);
|
||||||
|
|
||||||
|
#ifdef USE_WEBRTC_DEV_BRANCH
|
||||||
|
webrtc::RtcpPacketTypeCounter rtcp_sent;
|
||||||
|
webrtc::RtcpPacketTypeCounter rtcp_received;
|
||||||
|
if (engine()->vie()->rtp()->GetRtcpPacketTypeCounters(
|
||||||
|
channel->channel_id(), &rtcp_sent, &rtcp_received) == 0) {
|
||||||
|
rinfo.firs_sent = rtcp_sent.fir_packets;
|
||||||
|
rinfo.plis_sent = rtcp_sent.pli_packets;
|
||||||
|
rinfo.nacks_sent = rtcp_sent.nack_packets;
|
||||||
|
} else {
|
||||||
|
rinfo.firs_sent = -1;
|
||||||
|
rinfo.plis_sent = -1;
|
||||||
|
rinfo.nacks_sent = -1;
|
||||||
|
LOG_RTCERR1(GetRtcpPacketTypeCounters, channel->channel_id());
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
rinfo.firs_sent = -1;
|
||||||
|
rinfo.plis_sent = -1;
|
||||||
|
rinfo.nacks_sent = -1;
|
||||||
|
#endif
|
||||||
|
|
||||||
// Get our locally created statistics of the received RTP stream.
|
// Get our locally created statistics of the received RTP stream.
|
||||||
webrtc::RtcpStatistics incoming_stream_rtcp_stats;
|
webrtc::RtcpStatistics incoming_stream_rtcp_stats;
|
||||||
int incoming_stream_rtt_ms;
|
int incoming_stream_rtt_ms;
|
||||||
@@ -2558,13 +2598,18 @@ void WebRtcVideoMediaChannel::OnPacketReceived(
|
|||||||
uint32 ssrc = 0;
|
uint32 ssrc = 0;
|
||||||
if (!GetRtpSsrc(packet->data(), packet->length(), &ssrc))
|
if (!GetRtpSsrc(packet->data(), packet->length(), &ssrc))
|
||||||
return;
|
return;
|
||||||
int which_channel = GetRecvChannelNum(ssrc);
|
int processing_channel = GetRecvChannelNum(ssrc);
|
||||||
if (which_channel == -1) {
|
if (processing_channel == -1) {
|
||||||
which_channel = video_channel();
|
// Allocate an unsignalled recv channel for processing in conference mode.
|
||||||
|
if (!InConferenceMode() ||
|
||||||
|
!CreateUnsignalledRecvChannel(ssrc, &processing_channel)) {
|
||||||
|
// If we cant find or allocate one, use the default.
|
||||||
|
processing_channel = video_channel();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
engine()->vie()->network()->ReceivedRTPPacket(
|
engine()->vie()->network()->ReceivedRTPPacket(
|
||||||
which_channel,
|
processing_channel,
|
||||||
packet->data(),
|
packet->data(),
|
||||||
static_cast<int>(packet->length()),
|
static_cast<int>(packet->length()),
|
||||||
webrtc::PacketTime(packet_time.timestamp, packet_time.not_before));
|
webrtc::PacketTime(packet_time.timestamp, packet_time.not_before));
|
||||||
@@ -3101,6 +3146,22 @@ bool WebRtcVideoMediaChannel::CreateChannel(uint32 ssrc_key,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool WebRtcVideoMediaChannel::CreateUnsignalledRecvChannel(
|
||||||
|
uint32 ssrc_key, int* out_channel_id) {
|
||||||
|
int unsignalled_recv_channel_limit =
|
||||||
|
options_.unsignalled_recv_stream_limit.GetWithDefaultIfUnset(
|
||||||
|
kNumDefaultUnsignalledVideoRecvStreams);
|
||||||
|
if (num_unsignalled_recv_channels_ >= unsignalled_recv_channel_limit) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (!CreateChannel(ssrc_key, MD_RECV, out_channel_id)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// TODO(tvsriram): Support dynamic sizing of unsignalled recv channels.
|
||||||
|
num_unsignalled_recv_channels_++;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
bool WebRtcVideoMediaChannel::ConfigureChannel(int channel_id,
|
bool WebRtcVideoMediaChannel::ConfigureChannel(int channel_id,
|
||||||
MediaDirection direction,
|
MediaDirection direction,
|
||||||
uint32 ssrc_key) {
|
uint32 ssrc_key) {
|
||||||
|
@@ -323,6 +323,7 @@ class WebRtcVideoMediaChannel : public talk_base::MessageHandler,
|
|||||||
// returning false.
|
// returning false.
|
||||||
bool CreateChannel(uint32 ssrc_key, MediaDirection direction,
|
bool CreateChannel(uint32 ssrc_key, MediaDirection direction,
|
||||||
int* channel_id);
|
int* channel_id);
|
||||||
|
bool CreateUnsignalledRecvChannel(uint32 ssrc_key, int* channel_id);
|
||||||
bool ConfigureChannel(int channel_id, MediaDirection direction,
|
bool ConfigureChannel(int channel_id, MediaDirection direction,
|
||||||
uint32 ssrc_key);
|
uint32 ssrc_key);
|
||||||
bool ConfigureReceiving(int channel_id, uint32 remote_ssrc_key);
|
bool ConfigureReceiving(int channel_id, uint32 remote_ssrc_key);
|
||||||
@@ -431,6 +432,7 @@ class WebRtcVideoMediaChannel : public talk_base::MessageHandler,
|
|||||||
bool render_started_;
|
bool render_started_;
|
||||||
uint32 first_receive_ssrc_;
|
uint32 first_receive_ssrc_;
|
||||||
std::vector<RtpHeaderExtension> receive_extensions_;
|
std::vector<RtpHeaderExtension> receive_extensions_;
|
||||||
|
int num_unsignalled_recv_channels_;
|
||||||
|
|
||||||
// Global send side state.
|
// Global send side state.
|
||||||
SendChannelMap send_channels_;
|
SendChannelMap send_channels_;
|
||||||
|
@@ -1292,7 +1292,6 @@ TEST_F(WebRtcVideoEngineTestFake, MultipleSendStreamsWithOneCapturer) {
|
|||||||
cricket::StreamParams::CreateLegacy(kSsrcs2[i])));
|
cricket::StreamParams::CreateLegacy(kSsrcs2[i])));
|
||||||
// Register the capturer to the ssrc.
|
// Register the capturer to the ssrc.
|
||||||
EXPECT_TRUE(channel_->SetCapturer(kSsrcs2[i], &capturer));
|
EXPECT_TRUE(channel_->SetCapturer(kSsrcs2[i], &capturer));
|
||||||
EXPECT_TRUE(channel_->SetSendStreamFormat(kSsrcs2[i], capture_format_vga));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const int channel0 = vie_.GetChannelFromLocalSsrc(kSsrcs2[0]);
|
const int channel0 = vie_.GetChannelFromLocalSsrc(kSsrcs2[0]);
|
||||||
@@ -1937,10 +1936,6 @@ TEST_F(WebRtcVideoMediaChannelTest, SendVp8HdAndReceiveAdaptedVp8Vga) {
|
|||||||
EXPECT_TRUE(SetOneCodec(codec));
|
EXPECT_TRUE(SetOneCodec(codec));
|
||||||
codec.width /= 2;
|
codec.width /= 2;
|
||||||
codec.height /= 2;
|
codec.height /= 2;
|
||||||
EXPECT_TRUE(channel_->SetSendStreamFormat(kSsrc, cricket::VideoFormat(
|
|
||||||
codec.width, codec.height,
|
|
||||||
cricket::VideoFormat::FpsToInterval(codec.framerate),
|
|
||||||
cricket::FOURCC_ANY)));
|
|
||||||
EXPECT_TRUE(SetSend(true));
|
EXPECT_TRUE(SetSend(true));
|
||||||
EXPECT_TRUE(channel_->SetRender(true));
|
EXPECT_TRUE(channel_->SetRender(true));
|
||||||
EXPECT_EQ(0, renderer_.num_rendered_frames());
|
EXPECT_EQ(0, renderer_.num_rendered_frames());
|
||||||
@@ -2099,3 +2094,26 @@ TEST_F(WebRtcVideoMediaChannelTest, TwoStreamsReUseFirstStream) {
|
|||||||
Base::TwoStreamsReUseFirstStream(cricket::VideoCodec(100, "VP8", 640, 400, 30,
|
Base::TwoStreamsReUseFirstStream(cricket::VideoCodec(100, "VP8", 640, 400, 30,
|
||||||
0));
|
0));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_F(WebRtcVideoMediaChannelTest, TwoStreamsSendAndUnsignalledRecv) {
|
||||||
|
Base::TwoStreamsSendAndUnsignalledRecv(cricket::VideoCodec(100, "VP8", 640,
|
||||||
|
400, 30, 0));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(WebRtcVideoMediaChannelTest, TwoStreamsSendAndFailUnsignalledRecv) {
|
||||||
|
Base::TwoStreamsSendAndFailUnsignalledRecv(
|
||||||
|
cricket::VideoCodec(100, "VP8", 640, 400, 30, 0));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(WebRtcVideoMediaChannelTest,
|
||||||
|
TwoStreamsSendAndFailUnsignalledRecvInOneToOne) {
|
||||||
|
Base::TwoStreamsSendAndFailUnsignalledRecvInOneToOne(
|
||||||
|
cricket::VideoCodec(100, "VP8", 640, 400, 30, 0));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(WebRtcVideoMediaChannelTest,
|
||||||
|
TwoStreamsAddAndRemoveUnsignalledRecv) {
|
||||||
|
Base::TwoStreamsAddAndRemoveUnsignalledRecv(cricket::VideoCodec(100, "VP8",
|
||||||
|
640, 400, 30,
|
||||||
|
0));
|
||||||
|
}
|
||||||
|
@@ -194,6 +194,18 @@ static bool IsCodecMultiRate(const webrtc::CodecInst& codec) {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool IsTelephoneEventCodec(const std::string& name) {
|
||||||
|
return _stricmp(name.c_str(), "telephone-event") == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool IsCNCodec(const std::string& name) {
|
||||||
|
return _stricmp(name.c_str(), "CN") == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool IsRedCodec(const std::string& name) {
|
||||||
|
return _stricmp(name.c_str(), "red") == 0;
|
||||||
|
}
|
||||||
|
|
||||||
static bool FindCodec(const std::vector<AudioCodec>& codecs,
|
static bool FindCodec(const std::vector<AudioCodec>& codecs,
|
||||||
const AudioCodec& codec,
|
const AudioCodec& codec,
|
||||||
AudioCodec* found_codec) {
|
AudioCodec* found_codec) {
|
||||||
@@ -1966,10 +1978,11 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
|
|||||||
|
|
||||||
// Scan through the list to figure out the codec to use for sending, along
|
// Scan through the list to figure out the codec to use for sending, along
|
||||||
// with the proper configuration for VAD and DTMF.
|
// with the proper configuration for VAD and DTMF.
|
||||||
bool first = true;
|
bool found_send_codec = false;
|
||||||
webrtc::CodecInst send_codec;
|
webrtc::CodecInst send_codec;
|
||||||
memset(&send_codec, 0, sizeof(send_codec));
|
memset(&send_codec, 0, sizeof(send_codec));
|
||||||
|
|
||||||
|
// Set send codec (the first non-telephone-event/CN codec)
|
||||||
for (std::vector<AudioCodec>::const_iterator it = codecs.begin();
|
for (std::vector<AudioCodec>::const_iterator it = codecs.begin();
|
||||||
it != codecs.end(); ++it) {
|
it != codecs.end(); ++it) {
|
||||||
// Ignore codecs we don't know about. The negotiation step should prevent
|
// Ignore codecs we don't know about. The negotiation step should prevent
|
||||||
@@ -1980,6 +1993,11 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (IsTelephoneEventCodec(it->name) || IsCNCodec(it->name)) {
|
||||||
|
// Skip telephone-event/CN codec, which will be handled later.
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
// If OPUS, change what we send according to the "stereo" codec
|
// If OPUS, change what we send according to the "stereo" codec
|
||||||
// parameter, and not the "channels" parameter. We set
|
// parameter, and not the "channels" parameter. We set
|
||||||
// voe_codec.channels to 2 if "stereo=1" and 1 otherwise. If
|
// voe_codec.channels to 2 if "stereo=1" and 1 otherwise. If
|
||||||
@@ -2015,21 +2033,73 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// We'll use the first codec in the list to actually send audio data.
|
||||||
|
// Be sure to use the payload type requested by the remote side.
|
||||||
|
// "red", for FEC audio, is a special case where the actual codec to be
|
||||||
|
// used is specified in params.
|
||||||
|
if (IsRedCodec(it->name)) {
|
||||||
|
// Parse out the RED parameters. If we fail, just ignore RED;
|
||||||
|
// we don't support all possible params/usage scenarios.
|
||||||
|
if (!GetRedSendCodec(*it, codecs, &send_codec)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enable redundant encoding of the specified codec. Treat any
|
||||||
|
// failure as a fatal internal error.
|
||||||
|
LOG(LS_INFO) << "Enabling FEC";
|
||||||
|
if (engine()->voe()->rtp()->SetFECStatus(channel, true, it->id) == -1) {
|
||||||
|
LOG_RTCERR3(SetFECStatus, channel, true, it->id);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
send_codec = voe_codec;
|
||||||
|
nack_enabled_ = IsNackEnabled(*it);
|
||||||
|
SetNack(channel, nack_enabled_);
|
||||||
|
}
|
||||||
|
found_send_codec = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!found_send_codec) {
|
||||||
|
LOG(LS_WARNING) << "Received empty list of codecs.";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the codec immediately, since SetVADStatus() depends on whether
|
||||||
|
// the current codec is mono or stereo.
|
||||||
|
if (!SetSendCodec(channel, send_codec))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
// Always update the |send_codec_| to the currently set send codec.
|
||||||
|
send_codec_.reset(new webrtc::CodecInst(send_codec));
|
||||||
|
|
||||||
|
if (send_bw_setting_) {
|
||||||
|
SetSendBandwidthInternal(send_bw_bps_);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Loop through the codecs list again to config the telephone-event/CN codec.
|
||||||
|
for (std::vector<AudioCodec>::const_iterator it = codecs.begin();
|
||||||
|
it != codecs.end(); ++it) {
|
||||||
|
// Ignore codecs we don't know about. The negotiation step should prevent
|
||||||
|
// this, but double-check to be sure.
|
||||||
|
webrtc::CodecInst voe_codec;
|
||||||
|
if (!engine()->FindWebRtcCodec(*it, &voe_codec)) {
|
||||||
|
LOG(LS_WARNING) << "Unknown codec " << ToString(*it);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
// Find the DTMF telephone event "codec" and tell VoiceEngine channels
|
// Find the DTMF telephone event "codec" and tell VoiceEngine channels
|
||||||
// about it.
|
// about it.
|
||||||
if (_stricmp(it->name.c_str(), "telephone-event") == 0 ||
|
if (IsTelephoneEventCodec(it->name)) {
|
||||||
_stricmp(it->name.c_str(), "audio/telephone-event") == 0) {
|
|
||||||
if (engine()->voe()->dtmf()->SetSendTelephoneEventPayloadType(
|
if (engine()->voe()->dtmf()->SetSendTelephoneEventPayloadType(
|
||||||
channel, it->id) == -1) {
|
channel, it->id) == -1) {
|
||||||
LOG_RTCERR2(SetSendTelephoneEventPayloadType, channel, it->id);
|
LOG_RTCERR2(SetSendTelephoneEventPayloadType, channel, it->id);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
} else if (IsCNCodec(it->name)) {
|
||||||
|
// Turn voice activity detection/comfort noise on if supported.
|
||||||
// Turn voice activity detection/comfort noise on if supported.
|
// Set the wideband CN payload type appropriately.
|
||||||
// Set the wideband CN payload type appropriately.
|
// (narrowband always uses the static payload type 13).
|
||||||
// (narrowband always uses the static payload type 13).
|
|
||||||
if (_stricmp(it->name.c_str(), "CN") == 0) {
|
|
||||||
webrtc::PayloadFrequencies cn_freq;
|
webrtc::PayloadFrequencies cn_freq;
|
||||||
switch (it->clockrate) {
|
switch (it->clockrate) {
|
||||||
case 8000:
|
case 8000:
|
||||||
@@ -2062,7 +2132,6 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
|
|||||||
// send the offer.
|
// send the offer.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only turn on VAD if we have a CN payload type that matches the
|
// Only turn on VAD if we have a CN payload type that matches the
|
||||||
// clockrate for the codec we are going to use.
|
// clockrate for the codec we are going to use.
|
||||||
if (it->clockrate == send_codec.plfreq) {
|
if (it->clockrate == send_codec.plfreq) {
|
||||||
@@ -2073,54 +2142,6 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// We'll use the first codec in the list to actually send audio data.
|
|
||||||
// Be sure to use the payload type requested by the remote side.
|
|
||||||
// "red", for FEC audio, is a special case where the actual codec to be
|
|
||||||
// used is specified in params.
|
|
||||||
if (first) {
|
|
||||||
if (_stricmp(it->name.c_str(), "red") == 0) {
|
|
||||||
// Parse out the RED parameters. If we fail, just ignore RED;
|
|
||||||
// we don't support all possible params/usage scenarios.
|
|
||||||
if (!GetRedSendCodec(*it, codecs, &send_codec)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Enable redundant encoding of the specified codec. Treat any
|
|
||||||
// failure as a fatal internal error.
|
|
||||||
LOG(LS_INFO) << "Enabling FEC";
|
|
||||||
if (engine()->voe()->rtp()->SetFECStatus(channel, true, it->id) == -1) {
|
|
||||||
LOG_RTCERR3(SetFECStatus, channel, true, it->id);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
send_codec = voe_codec;
|
|
||||||
nack_enabled_ = IsNackEnabled(*it);
|
|
||||||
SetNack(channel, nack_enabled_);
|
|
||||||
}
|
|
||||||
first = false;
|
|
||||||
// Set the codec immediately, since SetVADStatus() depends on whether
|
|
||||||
// the current codec is mono or stereo.
|
|
||||||
if (!SetSendCodec(channel, send_codec))
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we're being asked to set an empty list of codecs, due to a buggy client,
|
|
||||||
// choose the most common format: PCMU
|
|
||||||
if (first) {
|
|
||||||
LOG(LS_WARNING) << "Received empty list of codecs; using PCMU/8000";
|
|
||||||
AudioCodec codec(0, "PCMU", 8000, 0, 1, 0);
|
|
||||||
engine()->FindWebRtcCodec(codec, &send_codec);
|
|
||||||
if (!SetSendCodec(channel, send_codec))
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Always update the |send_codec_| to the currently set send codec.
|
|
||||||
send_codec_.reset(new webrtc::CodecInst(send_codec));
|
|
||||||
|
|
||||||
if (send_bw_setting_) {
|
|
||||||
SetSendBandwidthInternal(send_bw_bps_);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
@@ -680,93 +680,67 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecs) {
|
|||||||
EXPECT_EQ(106, voe_.GetSendTelephoneEventPayloadType(channel_num));
|
EXPECT_EQ(106, voe_.GetSendTelephoneEventPayloadType(channel_num));
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(pthatcher): Change failure behavior to returning false rather
|
// Test that if clockrate is not 48000 for opus, we fail.
|
||||||
// than defaulting to PCMU.
|
|
||||||
// Test that if clockrate is not 48000 for opus, we fail by fallback to PCMU.
|
|
||||||
TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusBadClockrate) {
|
TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusBadClockrate) {
|
||||||
EXPECT_TRUE(SetupEngine());
|
EXPECT_TRUE(SetupEngine());
|
||||||
int channel_num = voe_.GetLastChannel();
|
|
||||||
std::vector<cricket::AudioCodec> codecs;
|
std::vector<cricket::AudioCodec> codecs;
|
||||||
codecs.push_back(kOpusCodec);
|
codecs.push_back(kOpusCodec);
|
||||||
codecs[0].bitrate = 0;
|
codecs[0].bitrate = 0;
|
||||||
codecs[0].clockrate = 50000;
|
codecs[0].clockrate = 50000;
|
||||||
EXPECT_TRUE(channel_->SetSendCodecs(codecs));
|
EXPECT_FALSE(channel_->SetSendCodecs(codecs));
|
||||||
webrtc::CodecInst gcodec;
|
|
||||||
EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
|
|
||||||
EXPECT_STREQ("PCMU", gcodec.plname);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that if channels=0 for opus, we fail by falling back to PCMU.
|
// Test that if channels=0 for opus, we fail.
|
||||||
TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusBad0ChannelsNoStereo) {
|
TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusBad0ChannelsNoStereo) {
|
||||||
EXPECT_TRUE(SetupEngine());
|
EXPECT_TRUE(SetupEngine());
|
||||||
int channel_num = voe_.GetLastChannel();
|
|
||||||
std::vector<cricket::AudioCodec> codecs;
|
std::vector<cricket::AudioCodec> codecs;
|
||||||
codecs.push_back(kOpusCodec);
|
codecs.push_back(kOpusCodec);
|
||||||
codecs[0].bitrate = 0;
|
codecs[0].bitrate = 0;
|
||||||
codecs[0].channels = 0;
|
codecs[0].channels = 0;
|
||||||
EXPECT_TRUE(channel_->SetSendCodecs(codecs));
|
EXPECT_FALSE(channel_->SetSendCodecs(codecs));
|
||||||
webrtc::CodecInst gcodec;
|
|
||||||
EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
|
|
||||||
EXPECT_STREQ("PCMU", gcodec.plname);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that if channels=0 for opus, we fail by falling back to PCMU.
|
// Test that if channels=0 for opus, we fail.
|
||||||
TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusBad0Channels1Stereo) {
|
TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusBad0Channels1Stereo) {
|
||||||
EXPECT_TRUE(SetupEngine());
|
EXPECT_TRUE(SetupEngine());
|
||||||
int channel_num = voe_.GetLastChannel();
|
|
||||||
std::vector<cricket::AudioCodec> codecs;
|
std::vector<cricket::AudioCodec> codecs;
|
||||||
codecs.push_back(kOpusCodec);
|
codecs.push_back(kOpusCodec);
|
||||||
codecs[0].bitrate = 0;
|
codecs[0].bitrate = 0;
|
||||||
codecs[0].channels = 0;
|
codecs[0].channels = 0;
|
||||||
codecs[0].params["stereo"] = "1";
|
codecs[0].params["stereo"] = "1";
|
||||||
EXPECT_TRUE(channel_->SetSendCodecs(codecs));
|
EXPECT_FALSE(channel_->SetSendCodecs(codecs));
|
||||||
webrtc::CodecInst gcodec;
|
|
||||||
EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
|
|
||||||
EXPECT_STREQ("PCMU", gcodec.plname);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that if channel is 1 for opus and there's no stereo, we fail.
|
// Test that if channel is 1 for opus and there's no stereo, we fail.
|
||||||
TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpus1ChannelNoStereo) {
|
TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpus1ChannelNoStereo) {
|
||||||
EXPECT_TRUE(SetupEngine());
|
EXPECT_TRUE(SetupEngine());
|
||||||
int channel_num = voe_.GetLastChannel();
|
|
||||||
std::vector<cricket::AudioCodec> codecs;
|
std::vector<cricket::AudioCodec> codecs;
|
||||||
codecs.push_back(kOpusCodec);
|
codecs.push_back(kOpusCodec);
|
||||||
codecs[0].bitrate = 0;
|
codecs[0].bitrate = 0;
|
||||||
codecs[0].channels = 1;
|
codecs[0].channels = 1;
|
||||||
EXPECT_TRUE(channel_->SetSendCodecs(codecs));
|
EXPECT_FALSE(channel_->SetSendCodecs(codecs));
|
||||||
webrtc::CodecInst gcodec;
|
|
||||||
EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
|
|
||||||
EXPECT_STREQ("PCMU", gcodec.plname);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that if channel is 1 for opus and stereo=0, we fail.
|
// Test that if channel is 1 for opus and stereo=0, we fail.
|
||||||
TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusBad1Channel0Stereo) {
|
TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusBad1Channel0Stereo) {
|
||||||
EXPECT_TRUE(SetupEngine());
|
EXPECT_TRUE(SetupEngine());
|
||||||
int channel_num = voe_.GetLastChannel();
|
|
||||||
std::vector<cricket::AudioCodec> codecs;
|
std::vector<cricket::AudioCodec> codecs;
|
||||||
codecs.push_back(kOpusCodec);
|
codecs.push_back(kOpusCodec);
|
||||||
codecs[0].bitrate = 0;
|
codecs[0].bitrate = 0;
|
||||||
codecs[0].channels = 1;
|
codecs[0].channels = 1;
|
||||||
codecs[0].params["stereo"] = "0";
|
codecs[0].params["stereo"] = "0";
|
||||||
EXPECT_TRUE(channel_->SetSendCodecs(codecs));
|
EXPECT_FALSE(channel_->SetSendCodecs(codecs));
|
||||||
webrtc::CodecInst gcodec;
|
|
||||||
EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
|
|
||||||
EXPECT_STREQ("PCMU", gcodec.plname);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that if channel is 1 for opus and stereo=1, we fail.
|
// Test that if channel is 1 for opus and stereo=1, we fail.
|
||||||
TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusBad1Channel1Stereo) {
|
TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusBad1Channel1Stereo) {
|
||||||
EXPECT_TRUE(SetupEngine());
|
EXPECT_TRUE(SetupEngine());
|
||||||
int channel_num = voe_.GetLastChannel();
|
|
||||||
std::vector<cricket::AudioCodec> codecs;
|
std::vector<cricket::AudioCodec> codecs;
|
||||||
codecs.push_back(kOpusCodec);
|
codecs.push_back(kOpusCodec);
|
||||||
codecs[0].bitrate = 0;
|
codecs[0].bitrate = 0;
|
||||||
codecs[0].channels = 1;
|
codecs[0].channels = 1;
|
||||||
codecs[0].params["stereo"] = "1";
|
codecs[0].params["stereo"] = "1";
|
||||||
EXPECT_TRUE(channel_->SetSendCodecs(codecs));
|
EXPECT_FALSE(channel_->SetSendCodecs(codecs));
|
||||||
webrtc::CodecInst gcodec;
|
|
||||||
EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
|
|
||||||
EXPECT_STREQ("PCMU", gcodec.plname);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that with bitrate=0 and no stereo,
|
// Test that with bitrate=0 and no stereo,
|
||||||
@@ -1087,11 +1061,11 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsCelt) {
|
|||||||
int channel_num = voe_.GetLastChannel();
|
int channel_num = voe_.GetLastChannel();
|
||||||
std::vector<cricket::AudioCodec> codecs;
|
std::vector<cricket::AudioCodec> codecs;
|
||||||
codecs.push_back(kCeltCodec);
|
codecs.push_back(kCeltCodec);
|
||||||
codecs.push_back(kPcmuCodec);
|
codecs.push_back(kIsacCodec);
|
||||||
codecs[0].id = 96;
|
codecs[0].id = 96;
|
||||||
codecs[0].channels = 2;
|
codecs[0].channels = 2;
|
||||||
codecs[0].bitrate = 96000;
|
codecs[0].bitrate = 96000;
|
||||||
codecs[1].bitrate = 96000;
|
codecs[1].bitrate = 64000;
|
||||||
EXPECT_TRUE(channel_->SetSendCodecs(codecs));
|
EXPECT_TRUE(channel_->SetSendCodecs(codecs));
|
||||||
webrtc::CodecInst gcodec;
|
webrtc::CodecInst gcodec;
|
||||||
EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
|
EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
|
||||||
@@ -1103,10 +1077,10 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsCelt) {
|
|||||||
codecs[0].channels = 1;
|
codecs[0].channels = 1;
|
||||||
EXPECT_TRUE(channel_->SetSendCodecs(codecs));
|
EXPECT_TRUE(channel_->SetSendCodecs(codecs));
|
||||||
EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
|
EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
|
||||||
EXPECT_EQ(0, gcodec.pltype);
|
EXPECT_EQ(103, gcodec.pltype);
|
||||||
EXPECT_EQ(1, gcodec.channels);
|
EXPECT_EQ(1, gcodec.channels);
|
||||||
EXPECT_EQ(64000, gcodec.rate);
|
EXPECT_EQ(64000, gcodec.rate);
|
||||||
EXPECT_STREQ("PCMU", gcodec.plname);
|
EXPECT_STREQ("ISAC", gcodec.plname);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that we can switch back and forth between CELT and ISAC with CN.
|
// Test that we can switch back and forth between CELT and ISAC with CN.
|
||||||
@@ -1186,21 +1160,49 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsBitrate) {
|
|||||||
EXPECT_EQ(32000, gcodec.rate);
|
EXPECT_EQ(32000, gcodec.rate);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that we fall back to PCMU if no codecs are specified.
|
// Test that we fail if no codecs are specified.
|
||||||
TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsNoCodecs) {
|
TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsNoCodecs) {
|
||||||
|
EXPECT_TRUE(SetupEngine());
|
||||||
|
std::vector<cricket::AudioCodec> codecs;
|
||||||
|
EXPECT_FALSE(channel_->SetSendCodecs(codecs));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that we can set send codecs even with telephone-event codec as the first
|
||||||
|
// one on the list.
|
||||||
|
TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsDTMFOnTop) {
|
||||||
EXPECT_TRUE(SetupEngine());
|
EXPECT_TRUE(SetupEngine());
|
||||||
int channel_num = voe_.GetLastChannel();
|
int channel_num = voe_.GetLastChannel();
|
||||||
std::vector<cricket::AudioCodec> codecs;
|
std::vector<cricket::AudioCodec> codecs;
|
||||||
|
codecs.push_back(kTelephoneEventCodec);
|
||||||
|
codecs.push_back(kIsacCodec);
|
||||||
|
codecs.push_back(kPcmuCodec);
|
||||||
|
codecs[0].id = 98; // DTMF
|
||||||
|
codecs[1].id = 96;
|
||||||
EXPECT_TRUE(channel_->SetSendCodecs(codecs));
|
EXPECT_TRUE(channel_->SetSendCodecs(codecs));
|
||||||
webrtc::CodecInst gcodec;
|
webrtc::CodecInst gcodec;
|
||||||
EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
|
EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
|
||||||
EXPECT_EQ(0, gcodec.pltype);
|
EXPECT_EQ(96, gcodec.pltype);
|
||||||
EXPECT_STREQ("PCMU", gcodec.plname);
|
EXPECT_STREQ("ISAC", gcodec.plname);
|
||||||
EXPECT_FALSE(voe_.GetVAD(channel_num));
|
EXPECT_EQ(98, voe_.GetSendTelephoneEventPayloadType(channel_num));
|
||||||
EXPECT_FALSE(voe_.GetFEC(channel_num));
|
}
|
||||||
EXPECT_EQ(13, voe_.GetSendCNPayloadType(channel_num, false));
|
|
||||||
EXPECT_EQ(105, voe_.GetSendCNPayloadType(channel_num, true));
|
// Test that we can set send codecs even with CN codec as the first
|
||||||
EXPECT_EQ(106, voe_.GetSendTelephoneEventPayloadType(channel_num));
|
// one on the list.
|
||||||
|
TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsCNOnTop) {
|
||||||
|
EXPECT_TRUE(SetupEngine());
|
||||||
|
int channel_num = voe_.GetLastChannel();
|
||||||
|
std::vector<cricket::AudioCodec> codecs;
|
||||||
|
codecs.push_back(kCn16000Codec);
|
||||||
|
codecs.push_back(kIsacCodec);
|
||||||
|
codecs.push_back(kPcmuCodec);
|
||||||
|
codecs[0].id = 98; // wideband CN
|
||||||
|
codecs[1].id = 96;
|
||||||
|
EXPECT_TRUE(channel_->SetSendCodecs(codecs));
|
||||||
|
webrtc::CodecInst gcodec;
|
||||||
|
EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
|
||||||
|
EXPECT_EQ(96, gcodec.pltype);
|
||||||
|
EXPECT_STREQ("ISAC", gcodec.plname);
|
||||||
|
EXPECT_EQ(98, voe_.GetSendCNPayloadType(channel_num, true));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that we set VAD and DTMF types correctly as caller.
|
// Test that we set VAD and DTMF types correctly as caller.
|
||||||
|
@@ -760,9 +760,9 @@ void BaseSession::OnTransportCandidatesAllocationDone(Transport* transport) {
|
|||||||
// Transport, since this removes the need to manually iterate over all
|
// Transport, since this removes the need to manually iterate over all
|
||||||
// the transports, as is needed to make sure signals are handled properly
|
// the transports, as is needed to make sure signals are handled properly
|
||||||
// when BUNDLEing.
|
// when BUNDLEing.
|
||||||
#if 0
|
// TODO(juberti): Per b/7998978, devs and QA are hitting this assert in ways
|
||||||
ASSERT(!IsCandidateAllocationDone());
|
// that make it prohibitively difficult to run dbg builds. Disabled for now.
|
||||||
#endif
|
//ASSERT(!IsCandidateAllocationDone());
|
||||||
for (TransportMap::iterator iter = transports_.begin();
|
for (TransportMap::iterator iter = transports_.begin();
|
||||||
iter != transports_.end(); ++iter) {
|
iter != transports_.end(); ++iter) {
|
||||||
if (iter->second->impl() == transport) {
|
if (iter->second->impl() == transport) {
|
||||||
|
@@ -163,6 +163,8 @@ static const RtpHeaderExtension kVideoRtpExtensionAnswer[] = {
|
|||||||
RtpHeaderExtension("urn:ietf:params:rtp-hdrext:toffset", 14),
|
RtpHeaderExtension("urn:ietf:params:rtp-hdrext:toffset", 14),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const uint32 kSimulcastParamsSsrc[] = {10, 11, 20, 21, 30, 31};
|
||||||
|
static const uint32 kSimSsrc[] = {10, 20, 30};
|
||||||
static const uint32 kFec1Ssrc[] = {10, 11};
|
static const uint32 kFec1Ssrc[] = {10, 11};
|
||||||
static const uint32 kFec2Ssrc[] = {20, 21};
|
static const uint32 kFec2Ssrc[] = {20, 21};
|
||||||
static const uint32 kFec3Ssrc[] = {30, 31};
|
static const uint32 kFec3Ssrc[] = {30, 31};
|
||||||
@@ -192,6 +194,32 @@ class MediaSessionDescriptionFactoryTest : public testing::Test {
|
|||||||
tdf2_.set_identity(&id2_);
|
tdf2_.set_identity(&id2_);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Create a video StreamParamsVec object with:
|
||||||
|
// - one video stream with 3 simulcast streams and FEC,
|
||||||
|
StreamParamsVec CreateComplexVideoStreamParamsVec() {
|
||||||
|
SsrcGroup sim_group("SIM", MAKE_VECTOR(kSimSsrc));
|
||||||
|
SsrcGroup fec_group1("FEC", MAKE_VECTOR(kFec1Ssrc));
|
||||||
|
SsrcGroup fec_group2("FEC", MAKE_VECTOR(kFec2Ssrc));
|
||||||
|
SsrcGroup fec_group3("FEC", MAKE_VECTOR(kFec3Ssrc));
|
||||||
|
|
||||||
|
std::vector<SsrcGroup> ssrc_groups;
|
||||||
|
ssrc_groups.push_back(sim_group);
|
||||||
|
ssrc_groups.push_back(fec_group1);
|
||||||
|
ssrc_groups.push_back(fec_group2);
|
||||||
|
ssrc_groups.push_back(fec_group3);
|
||||||
|
|
||||||
|
StreamParams simulcast_params;
|
||||||
|
simulcast_params.id = kVideoTrack1;
|
||||||
|
simulcast_params.ssrcs = MAKE_VECTOR(kSimulcastParamsSsrc);
|
||||||
|
simulcast_params.ssrc_groups = ssrc_groups;
|
||||||
|
simulcast_params.cname = "Video_SIM_FEC";
|
||||||
|
simulcast_params.sync_label = kMediaStream1;
|
||||||
|
|
||||||
|
StreamParamsVec video_streams;
|
||||||
|
video_streams.push_back(simulcast_params);
|
||||||
|
|
||||||
|
return video_streams;
|
||||||
|
}
|
||||||
|
|
||||||
bool CompareCryptoParams(const CryptoParamsVec& c1,
|
bool CompareCryptoParams(const CryptoParamsVec& c1,
|
||||||
const CryptoParamsVec& c2) {
|
const CryptoParamsVec& c2) {
|
||||||
|
1010
talk/session/media/planarfunctions_unittest.cc
Normal file
1010
talk/session/media/planarfunctions_unittest.cc
Normal file
File diff suppressed because it is too large
Load Diff
615
talk/session/media/yuvscaler_unittest.cc
Normal file
615
talk/session/media/yuvscaler_unittest.cc
Normal file
@@ -0,0 +1,615 @@
|
|||||||
|
/*
|
||||||
|
* libjingle
|
||||||
|
* Copyright 2010 Google Inc.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are met:
|
||||||
|
*
|
||||||
|
* 1. Redistributions of source code must retain the above copyright notice,
|
||||||
|
* this list of conditions and the following disclaimer.
|
||||||
|
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
* this list of conditions and the following disclaimer in the documentation
|
||||||
|
* and/or other materials provided with the distribution.
|
||||||
|
* 3. The name of the author may not be used to endorse or promote products
|
||||||
|
* derived from this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
||||||
|
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||||
|
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
|
||||||
|
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||||
|
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
|
||||||
|
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||||
|
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
||||||
|
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
||||||
|
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <sstream>
|
||||||
|
|
||||||
|
#include "libyuv/cpu_id.h"
|
||||||
|
#include "libyuv/scale.h"
|
||||||
|
#include "talk/base/basictypes.h"
|
||||||
|
#include "talk/base/flags.h"
|
||||||
|
#include "talk/base/gunit.h"
|
||||||
|
#include "talk/base/scoped_ptr.h"
|
||||||
|
#include "talk/media/base/testutils.h"
|
||||||
|
|
||||||
|
#if defined(_MSC_VER)
|
||||||
|
#define ALIGN16(var) __declspec(align(16)) var
|
||||||
|
#else
|
||||||
|
#define ALIGN16(var) var __attribute__((aligned(16)))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
using cricket::LoadPlanarYuvTestImage;
|
||||||
|
using cricket::DumpPlanarYuvTestImage;
|
||||||
|
using talk_base::scoped_ptr;
|
||||||
|
|
||||||
|
DEFINE_bool(yuvscaler_dump, false,
|
||||||
|
"whether to write out scaled images for inspection");
|
||||||
|
DEFINE_int(yuvscaler_repeat, 1,
|
||||||
|
"how many times to perform each scaling operation (for perf testing)");
|
||||||
|
|
||||||
|
static const int kAlignment = 16;
|
||||||
|
|
||||||
|
// TEST_UNCACHED flushes cache to test real memory performance.
|
||||||
|
// TEST_RSTSC uses cpu cycles for more accurate benchmark of the scale function.
|
||||||
|
#ifndef __arm__
|
||||||
|
// #define TEST_UNCACHED 1
|
||||||
|
// #define TEST_RSTSC 1
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(TEST_UNCACHED) || defined(TEST_RSTSC)
|
||||||
|
#ifdef _MSC_VER
|
||||||
|
#include <emmintrin.h> // NOLINT
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(__GNUC__) && defined(__i386__)
|
||||||
|
static inline uint64 __rdtsc(void) {
|
||||||
|
uint32_t a, d;
|
||||||
|
__asm__ volatile("rdtsc" : "=a" (a), "=d" (d));
|
||||||
|
return (reinterpret_cast<uint64>(d) << 32) + a;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void _mm_clflush(volatile void *__p) {
|
||||||
|
asm volatile("clflush %0" : "+m" (*(volatile char *)__p));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static void FlushCache(uint8* dst, int count) {
|
||||||
|
while (count >= 32) {
|
||||||
|
_mm_clflush(dst);
|
||||||
|
dst += 32;
|
||||||
|
count -= 32;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
class YuvScalerTest : public testing::Test {
|
||||||
|
protected:
|
||||||
|
virtual void SetUp() {
|
||||||
|
dump_ = *FlagList::Lookup("yuvscaler_dump")->bool_variable();
|
||||||
|
repeat_ = *FlagList::Lookup("yuvscaler_repeat")->int_variable();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scale an image and compare against a Lanczos-filtered test image.
|
||||||
|
// Lanczos is considered to be the "ideal" image resampling method, so we try
|
||||||
|
// to get as close to that as possible, while being as fast as possible.
|
||||||
|
bool TestScale(int iw, int ih, int ow, int oh, int offset, bool usefile,
|
||||||
|
bool optimize, int cpuflags, bool interpolate,
|
||||||
|
int memoffset, double* error) {
|
||||||
|
*error = 0.;
|
||||||
|
size_t isize = I420_SIZE(iw, ih);
|
||||||
|
size_t osize = I420_SIZE(ow, oh);
|
||||||
|
scoped_ptr<uint8[]> ibuffer(new uint8[isize + kAlignment + memoffset]());
|
||||||
|
scoped_ptr<uint8[]> obuffer(new uint8[osize + kAlignment + memoffset]());
|
||||||
|
scoped_ptr<uint8[]> xbuffer(new uint8[osize + kAlignment + memoffset]());
|
||||||
|
|
||||||
|
uint8 *ibuf = ALIGNP(ibuffer.get(), kAlignment) + memoffset;
|
||||||
|
uint8 *obuf = ALIGNP(obuffer.get(), kAlignment) + memoffset;
|
||||||
|
uint8 *xbuf = ALIGNP(xbuffer.get(), kAlignment) + memoffset;
|
||||||
|
|
||||||
|
if (usefile) {
|
||||||
|
if (!LoadPlanarYuvTestImage("faces", iw, ih, ibuf) ||
|
||||||
|
!LoadPlanarYuvTestImage("faces", ow, oh, xbuf)) {
|
||||||
|
LOG(LS_ERROR) << "Failed to load image";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// These are used to test huge images.
|
||||||
|
memset(ibuf, 213, isize); // Input is constant color.
|
||||||
|
memset(obuf, 100, osize); // Output set to something wrong for now.
|
||||||
|
memset(xbuf, 213, osize); // Expected result.
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef TEST_UNCACHED
|
||||||
|
FlushCache(ibuf, isize);
|
||||||
|
FlushCache(obuf, osize);
|
||||||
|
FlushCache(xbuf, osize);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Scale down.
|
||||||
|
// If cpu true, disable cpu optimizations. Else allow auto detect
|
||||||
|
// TODO(fbarchard): set flags for libyuv
|
||||||
|
libyuv::MaskCpuFlags(cpuflags);
|
||||||
|
#ifdef TEST_RSTSC
|
||||||
|
uint64 t = 0;
|
||||||
|
#endif
|
||||||
|
for (int i = 0; i < repeat_; ++i) {
|
||||||
|
#ifdef TEST_UNCACHED
|
||||||
|
FlushCache(ibuf, isize);
|
||||||
|
FlushCache(obuf, osize);
|
||||||
|
#endif
|
||||||
|
#ifdef TEST_RSTSC
|
||||||
|
uint64 t1 = __rdtsc();
|
||||||
|
#endif
|
||||||
|
EXPECT_EQ(0, libyuv::ScaleOffset(ibuf, iw, ih, obuf, ow, oh,
|
||||||
|
offset, interpolate));
|
||||||
|
#ifdef TEST_RSTSC
|
||||||
|
uint64 t2 = __rdtsc();
|
||||||
|
t += t2 - t1;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef TEST_RSTSC
|
||||||
|
LOG(LS_INFO) << "Time: " << std::setw(9) << t;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
if (dump_) {
|
||||||
|
const testing::TestInfo* const test_info =
|
||||||
|
testing::UnitTest::GetInstance()->current_test_info();
|
||||||
|
std::string test_name(test_info->name());
|
||||||
|
DumpPlanarYuvTestImage(test_name, obuf, ow, oh);
|
||||||
|
}
|
||||||
|
|
||||||
|
double sse = cricket::ComputeSumSquareError(obuf, xbuf, osize);
|
||||||
|
*error = sse / osize; // Mean Squared Error.
|
||||||
|
double PSNR = cricket::ComputePSNR(sse, osize);
|
||||||
|
LOG(LS_INFO) << "Image MSE: " <<
|
||||||
|
std::setw(6) << std::setprecision(4) << *error <<
|
||||||
|
" Image PSNR: " << PSNR;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the index of the first differing byte. Easier to debug than memcmp.
|
||||||
|
static int FindDiff(const uint8* buf1, const uint8* buf2, int len) {
|
||||||
|
int i = 0;
|
||||||
|
while (i < len && buf1[i] == buf2[i]) {
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
return (i < len) ? i : -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
bool dump_;
|
||||||
|
int repeat_;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Tests straight copy of data.
|
||||||
|
TEST_F(YuvScalerTest, TestCopy) {
|
||||||
|
const int iw = 640, ih = 360;
|
||||||
|
const int ow = 640, oh = 360;
|
||||||
|
ALIGN16(uint8 ibuf[I420_SIZE(iw, ih)]);
|
||||||
|
ALIGN16(uint8 obuf[I420_SIZE(ow, oh)]);
|
||||||
|
|
||||||
|
// Load the frame, scale it, check it.
|
||||||
|
ASSERT_TRUE(LoadPlanarYuvTestImage("faces", iw, ih, ibuf));
|
||||||
|
for (int i = 0; i < repeat_; ++i) {
|
||||||
|
libyuv::ScaleOffset(ibuf, iw, ih, obuf, ow, oh, 0, false);
|
||||||
|
}
|
||||||
|
if (dump_) DumpPlanarYuvTestImage("TestCopy", obuf, ow, oh);
|
||||||
|
EXPECT_EQ(-1, FindDiff(obuf, ibuf, sizeof(ibuf)));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests copy from 4:3 to 16:9.
|
||||||
|
TEST_F(YuvScalerTest, TestOffset16_10Copy) {
|
||||||
|
const int iw = 640, ih = 360;
|
||||||
|
const int ow = 640, oh = 480;
|
||||||
|
const int offset = (480 - 360) / 2;
|
||||||
|
scoped_ptr<uint8[]> ibuffer(new uint8[I420_SIZE(iw, ih) + kAlignment]);
|
||||||
|
scoped_ptr<uint8[]> obuffer(new uint8[I420_SIZE(ow, oh) + kAlignment]);
|
||||||
|
|
||||||
|
uint8 *ibuf = ALIGNP(ibuffer.get(), kAlignment);
|
||||||
|
uint8 *obuf = ALIGNP(obuffer.get(), kAlignment);
|
||||||
|
|
||||||
|
// Load the frame, scale it, check it.
|
||||||
|
ASSERT_TRUE(LoadPlanarYuvTestImage("faces", iw, ih, ibuf));
|
||||||
|
|
||||||
|
// Clear to black, which is Y = 0 and U and V = 128
|
||||||
|
memset(obuf, 0, ow * oh);
|
||||||
|
memset(obuf + ow * oh, 128, ow * oh / 2);
|
||||||
|
for (int i = 0; i < repeat_; ++i) {
|
||||||
|
libyuv::ScaleOffset(ibuf, iw, ih, obuf, ow, oh, offset, false);
|
||||||
|
}
|
||||||
|
if (dump_) DumpPlanarYuvTestImage("TestOffsetCopy16_9", obuf, ow, oh);
|
||||||
|
EXPECT_EQ(-1, FindDiff(obuf + ow * offset,
|
||||||
|
ibuf,
|
||||||
|
iw * ih));
|
||||||
|
EXPECT_EQ(-1, FindDiff(obuf + ow * oh + ow * offset / 4,
|
||||||
|
ibuf + iw * ih,
|
||||||
|
iw * ih / 4));
|
||||||
|
EXPECT_EQ(-1, FindDiff(obuf + ow * oh * 5 / 4 + ow * offset / 4,
|
||||||
|
ibuf + iw * ih * 5 / 4,
|
||||||
|
iw * ih / 4));
|
||||||
|
}
|
||||||
|
|
||||||
|
// The following are 'cpu' flag values:
|
||||||
|
// Allow all SIMD optimizations
|
||||||
|
#define ALLFLAGS -1
|
||||||
|
// Disable SSSE3 but allow other forms of SIMD (SSE2)
|
||||||
|
#define NOSSSE3 ~libyuv::kCpuHasSSSE3
|
||||||
|
// Disable SSE2 and SSSE3
|
||||||
|
#define NOSSE ~libyuv::kCpuHasSSE2 & ~libyuv::kCpuHasSSSE3
|
||||||
|
|
||||||
|
// TEST_M scale factor with variations of opt, align, int
|
||||||
|
#define TEST_M(name, iwidth, iheight, owidth, oheight, mse) \
|
||||||
|
TEST_F(YuvScalerTest, name##Ref) { \
|
||||||
|
double error; \
|
||||||
|
EXPECT_TRUE(TestScale(iwidth, iheight, owidth, oheight, \
|
||||||
|
0, true, false, ALLFLAGS, false, 0, &error)); \
|
||||||
|
EXPECT_LE(error, mse); \
|
||||||
|
} \
|
||||||
|
TEST_F(YuvScalerTest, name##OptAligned) { \
|
||||||
|
double error; \
|
||||||
|
EXPECT_TRUE(TestScale(iwidth, iheight, owidth, oheight, \
|
||||||
|
0, true, true, ALLFLAGS, false, 0, &error)); \
|
||||||
|
EXPECT_LE(error, mse); \
|
||||||
|
} \
|
||||||
|
TEST_F(YuvScalerTest, name##OptUnaligned) { \
|
||||||
|
double error; \
|
||||||
|
EXPECT_TRUE(TestScale(iwidth, iheight, owidth, oheight, \
|
||||||
|
0, true, true, ALLFLAGS, false, 1, &error)); \
|
||||||
|
EXPECT_LE(error, mse); \
|
||||||
|
} \
|
||||||
|
TEST_F(YuvScalerTest, name##OptSSE2) { \
|
||||||
|
double error; \
|
||||||
|
EXPECT_TRUE(TestScale(iwidth, iheight, owidth, oheight, \
|
||||||
|
0, true, true, NOSSSE3, false, 0, &error)); \
|
||||||
|
EXPECT_LE(error, mse); \
|
||||||
|
} \
|
||||||
|
TEST_F(YuvScalerTest, name##OptC) { \
|
||||||
|
double error; \
|
||||||
|
EXPECT_TRUE(TestScale(iwidth, iheight, owidth, oheight, \
|
||||||
|
0, true, true, NOSSE, false, 0, &error)); \
|
||||||
|
EXPECT_LE(error, mse); \
|
||||||
|
} \
|
||||||
|
TEST_F(YuvScalerTest, name##IntRef) { \
|
||||||
|
double error; \
|
||||||
|
EXPECT_TRUE(TestScale(iwidth, iheight, owidth, oheight, \
|
||||||
|
0, true, false, ALLFLAGS, true, 0, &error)); \
|
||||||
|
EXPECT_LE(error, mse); \
|
||||||
|
} \
|
||||||
|
TEST_F(YuvScalerTest, name##IntOptAligned) { \
|
||||||
|
double error; \
|
||||||
|
EXPECT_TRUE(TestScale(iwidth, iheight, owidth, oheight, \
|
||||||
|
0, true, true, ALLFLAGS, true, 0, &error)); \
|
||||||
|
EXPECT_LE(error, mse); \
|
||||||
|
} \
|
||||||
|
TEST_F(YuvScalerTest, name##IntOptUnaligned) { \
|
||||||
|
double error; \
|
||||||
|
EXPECT_TRUE(TestScale(iwidth, iheight, owidth, oheight, \
|
||||||
|
0, true, true, ALLFLAGS, true, 1, &error)); \
|
||||||
|
EXPECT_LE(error, mse); \
|
||||||
|
} \
|
||||||
|
TEST_F(YuvScalerTest, name##IntOptSSE2) { \
|
||||||
|
double error; \
|
||||||
|
EXPECT_TRUE(TestScale(iwidth, iheight, owidth, oheight, \
|
||||||
|
0, true, true, NOSSSE3, true, 0, &error)); \
|
||||||
|
EXPECT_LE(error, mse); \
|
||||||
|
} \
|
||||||
|
TEST_F(YuvScalerTest, name##IntOptC) { \
|
||||||
|
double error; \
|
||||||
|
EXPECT_TRUE(TestScale(iwidth, iheight, owidth, oheight, \
|
||||||
|
0, true, true, NOSSE, true, 0, &error)); \
|
||||||
|
EXPECT_LE(error, mse); \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define TEST_H(name, iwidth, iheight, owidth, oheight, opt, cpu, intr, mse) \
|
||||||
|
TEST_F(YuvScalerTest, name) { \
|
||||||
|
double error; \
|
||||||
|
EXPECT_TRUE(TestScale(iwidth, iheight, owidth, oheight, \
|
||||||
|
0, false, opt, cpu, intr, 0, &error)); \
|
||||||
|
EXPECT_LE(error, mse); \
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test 4x3 aspect ratio scaling
|
||||||
|
|
||||||
|
// Tests 1/1x scale down.
|
||||||
|
TEST_M(TestScale4by3Down11, 640, 480, 640, 480, 0)
|
||||||
|
|
||||||
|
// Tests 3/4x scale down.
|
||||||
|
TEST_M(TestScale4by3Down34, 640, 480, 480, 360, 60)
|
||||||
|
|
||||||
|
// Tests 1/2x scale down.
|
||||||
|
TEST_M(TestScale4by3Down12, 640, 480, 320, 240, 60)
|
||||||
|
|
||||||
|
// Tests 3/8x scale down.
|
||||||
|
TEST_M(TestScale4by3Down38, 640, 480, 240, 180, 60)
|
||||||
|
|
||||||
|
// Tests 1/4x scale down..
|
||||||
|
TEST_M(TestScale4by3Down14, 640, 480, 160, 120, 60)
|
||||||
|
|
||||||
|
// Tests 3/16x scale down.
|
||||||
|
TEST_M(TestScale4by3Down316, 640, 480, 120, 90, 120)
|
||||||
|
|
||||||
|
// Tests 1/8x scale down.
|
||||||
|
TEST_M(TestScale4by3Down18, 640, 480, 80, 60, 150)
|
||||||
|
|
||||||
|
// Tests 2/3x scale down.
|
||||||
|
TEST_M(TestScale4by3Down23, 480, 360, 320, 240, 60)
|
||||||
|
|
||||||
|
// Tests 4/3x scale up.
|
||||||
|
TEST_M(TestScale4by3Up43, 480, 360, 640, 480, 60)
|
||||||
|
|
||||||
|
// Tests 2/1x scale up.
|
||||||
|
TEST_M(TestScale4by3Up21, 320, 240, 640, 480, 60)
|
||||||
|
|
||||||
|
// Tests 4/1x scale up.
|
||||||
|
TEST_M(TestScale4by3Up41, 160, 120, 640, 480, 80)
|
||||||
|
|
||||||
|
// Test 16x10 aspect ratio scaling
|
||||||
|
|
||||||
|
// Tests 1/1x scale down.
|
||||||
|
TEST_M(TestScale16by10Down11, 640, 400, 640, 400, 0)
|
||||||
|
|
||||||
|
// Tests 3/4x scale down.
|
||||||
|
TEST_M(TestScale16by10Down34, 640, 400, 480, 300, 60)
|
||||||
|
|
||||||
|
// Tests 1/2x scale down.
|
||||||
|
TEST_M(TestScale16by10Down12, 640, 400, 320, 200, 60)
|
||||||
|
|
||||||
|
// Tests 3/8x scale down.
|
||||||
|
TEST_M(TestScale16by10Down38, 640, 400, 240, 150, 60)
|
||||||
|
|
||||||
|
// Tests 1/4x scale down..
|
||||||
|
TEST_M(TestScale16by10Down14, 640, 400, 160, 100, 60)
|
||||||
|
|
||||||
|
// Tests 3/16x scale down.
|
||||||
|
TEST_M(TestScale16by10Down316, 640, 400, 120, 75, 120)
|
||||||
|
|
||||||
|
// Tests 1/8x scale down.
|
||||||
|
TEST_M(TestScale16by10Down18, 640, 400, 80, 50, 150)
|
||||||
|
|
||||||
|
// Tests 2/3x scale down.
|
||||||
|
TEST_M(TestScale16by10Down23, 480, 300, 320, 200, 60)
|
||||||
|
|
||||||
|
// Tests 4/3x scale up.
|
||||||
|
TEST_M(TestScale16by10Up43, 480, 300, 640, 400, 60)
|
||||||
|
|
||||||
|
// Tests 2/1x scale up.
|
||||||
|
TEST_M(TestScale16by10Up21, 320, 200, 640, 400, 60)
|
||||||
|
|
||||||
|
// Tests 4/1x scale up.
|
||||||
|
TEST_M(TestScale16by10Up41, 160, 100, 640, 400, 80)
|
||||||
|
|
||||||
|
// Test 16x9 aspect ratio scaling
|
||||||
|
|
||||||
|
// Tests 1/1x scale down.
|
||||||
|
TEST_M(TestScaleDown11, 640, 360, 640, 360, 0)
|
||||||
|
|
||||||
|
// Tests 3/4x scale down.
|
||||||
|
TEST_M(TestScaleDown34, 640, 360, 480, 270, 60)
|
||||||
|
|
||||||
|
// Tests 1/2x scale down.
|
||||||
|
TEST_M(TestScaleDown12, 640, 360, 320, 180, 60)
|
||||||
|
|
||||||
|
// Tests 3/8x scale down.
|
||||||
|
TEST_M(TestScaleDown38, 640, 360, 240, 135, 60)
|
||||||
|
|
||||||
|
// Tests 1/4x scale down..
|
||||||
|
TEST_M(TestScaleDown14, 640, 360, 160, 90, 60)
|
||||||
|
|
||||||
|
// Tests 3/16x scale down.
|
||||||
|
TEST_M(TestScaleDown316, 640, 360, 120, 68, 120)
|
||||||
|
|
||||||
|
// Tests 1/8x scale down.
|
||||||
|
TEST_M(TestScaleDown18, 640, 360, 80, 45, 150)
|
||||||
|
|
||||||
|
// Tests 2/3x scale down.
|
||||||
|
TEST_M(TestScaleDown23, 480, 270, 320, 180, 60)
|
||||||
|
|
||||||
|
// Tests 4/3x scale up.
|
||||||
|
TEST_M(TestScaleUp43, 480, 270, 640, 360, 60)
|
||||||
|
|
||||||
|
// Tests 2/1x scale up.
|
||||||
|
TEST_M(TestScaleUp21, 320, 180, 640, 360, 60)
|
||||||
|
|
||||||
|
// Tests 4/1x scale up.
|
||||||
|
TEST_M(TestScaleUp41, 160, 90, 640, 360, 80)
|
||||||
|
|
||||||
|
// Test HD 4x3 aspect ratio scaling
|
||||||
|
|
||||||
|
// Tests 1/1x scale down.
|
||||||
|
TEST_M(TestScaleHD4x3Down11, 1280, 960, 1280, 960, 0)
|
||||||
|
|
||||||
|
// Tests 3/4x scale down.
|
||||||
|
TEST_M(TestScaleHD4x3Down34, 1280, 960, 960, 720, 60)
|
||||||
|
|
||||||
|
// Tests 1/2x scale down.
|
||||||
|
TEST_M(TestScaleHD4x3Down12, 1280, 960, 640, 480, 60)
|
||||||
|
|
||||||
|
// Tests 3/8x scale down.
|
||||||
|
TEST_M(TestScaleHD4x3Down38, 1280, 960, 480, 360, 60)
|
||||||
|
|
||||||
|
// Tests 1/4x scale down..
|
||||||
|
TEST_M(TestScaleHD4x3Down14, 1280, 960, 320, 240, 60)
|
||||||
|
|
||||||
|
// Tests 3/16x scale down.
|
||||||
|
TEST_M(TestScaleHD4x3Down316, 1280, 960, 240, 180, 120)
|
||||||
|
|
||||||
|
// Tests 1/8x scale down.
|
||||||
|
TEST_M(TestScaleHD4x3Down18, 1280, 960, 160, 120, 150)
|
||||||
|
|
||||||
|
// Tests 2/3x scale down.
|
||||||
|
TEST_M(TestScaleHD4x3Down23, 960, 720, 640, 480, 60)
|
||||||
|
|
||||||
|
// Tests 4/3x scale up.
|
||||||
|
TEST_M(TestScaleHD4x3Up43, 960, 720, 1280, 960, 60)
|
||||||
|
|
||||||
|
// Tests 2/1x scale up.
|
||||||
|
TEST_M(TestScaleHD4x3Up21, 640, 480, 1280, 960, 60)
|
||||||
|
|
||||||
|
// Tests 4/1x scale up.
|
||||||
|
TEST_M(TestScaleHD4x3Up41, 320, 240, 1280, 960, 80)
|
||||||
|
|
||||||
|
// Test HD 16x10 aspect ratio scaling
|
||||||
|
|
||||||
|
// Tests 1/1x scale down.
|
||||||
|
TEST_M(TestScaleHD16x10Down11, 1280, 800, 1280, 800, 0)
|
||||||
|
|
||||||
|
// Tests 3/4x scale down.
|
||||||
|
TEST_M(TestScaleHD16x10Down34, 1280, 800, 960, 600, 60)
|
||||||
|
|
||||||
|
// Tests 1/2x scale down.
|
||||||
|
TEST_M(TestScaleHD16x10Down12, 1280, 800, 640, 400, 60)
|
||||||
|
|
||||||
|
// Tests 3/8x scale down.
|
||||||
|
TEST_M(TestScaleHD16x10Down38, 1280, 800, 480, 300, 60)
|
||||||
|
|
||||||
|
// Tests 1/4x scale down..
|
||||||
|
TEST_M(TestScaleHD16x10Down14, 1280, 800, 320, 200, 60)
|
||||||
|
|
||||||
|
// Tests 3/16x scale down.
|
||||||
|
TEST_M(TestScaleHD16x10Down316, 1280, 800, 240, 150, 120)
|
||||||
|
|
||||||
|
// Tests 1/8x scale down.
|
||||||
|
TEST_M(TestScaleHD16x10Down18, 1280, 800, 160, 100, 150)
|
||||||
|
|
||||||
|
// Tests 2/3x scale down.
|
||||||
|
TEST_M(TestScaleHD16x10Down23, 960, 600, 640, 400, 60)
|
||||||
|
|
||||||
|
// Tests 4/3x scale up.
|
||||||
|
TEST_M(TestScaleHD16x10Up43, 960, 600, 1280, 800, 60)
|
||||||
|
|
||||||
|
// Tests 2/1x scale up.
|
||||||
|
TEST_M(TestScaleHD16x10Up21, 640, 400, 1280, 800, 60)
|
||||||
|
|
||||||
|
// Tests 4/1x scale up.
|
||||||
|
TEST_M(TestScaleHD16x10Up41, 320, 200, 1280, 800, 80)
|
||||||
|
|
||||||
|
// Test HD 16x9 aspect ratio scaling
|
||||||
|
|
||||||
|
// Tests 1/1x scale down.
|
||||||
|
TEST_M(TestScaleHDDown11, 1280, 720, 1280, 720, 0)
|
||||||
|
|
||||||
|
// Tests 3/4x scale down.
|
||||||
|
TEST_M(TestScaleHDDown34, 1280, 720, 960, 540, 60)
|
||||||
|
|
||||||
|
// Tests 1/2x scale down.
|
||||||
|
TEST_M(TestScaleHDDown12, 1280, 720, 640, 360, 60)
|
||||||
|
|
||||||
|
// Tests 3/8x scale down.
|
||||||
|
TEST_M(TestScaleHDDown38, 1280, 720, 480, 270, 60)
|
||||||
|
|
||||||
|
// Tests 1/4x scale down..
|
||||||
|
TEST_M(TestScaleHDDown14, 1280, 720, 320, 180, 60)
|
||||||
|
|
||||||
|
// Tests 3/16x scale down.
|
||||||
|
TEST_M(TestScaleHDDown316, 1280, 720, 240, 135, 120)
|
||||||
|
|
||||||
|
// Tests 1/8x scale down.
|
||||||
|
TEST_M(TestScaleHDDown18, 1280, 720, 160, 90, 150)
|
||||||
|
|
||||||
|
// Tests 2/3x scale down.
|
||||||
|
TEST_M(TestScaleHDDown23, 960, 540, 640, 360, 60)
|
||||||
|
|
||||||
|
// Tests 4/3x scale up.
|
||||||
|
TEST_M(TestScaleHDUp43, 960, 540, 1280, 720, 60)
|
||||||
|
|
||||||
|
// Tests 2/1x scale up.
|
||||||
|
TEST_M(TestScaleHDUp21, 640, 360, 1280, 720, 60)
|
||||||
|
|
||||||
|
// Tests 4/1x scale up.
|
||||||
|
TEST_M(TestScaleHDUp41, 320, 180, 1280, 720, 80)
|
||||||
|
|
||||||
|
// Tests 1366x768 resolution for comparison to chromium scaler_bench
|
||||||
|
TEST_M(TestScaleHDUp1366, 1280, 720, 1366, 768, 10)
|
||||||
|
|
||||||
|
// Tests odd source/dest sizes. 3 less to make chroma odd as well.
|
||||||
|
TEST_M(TestScaleHDUp1363, 1277, 717, 1363, 765, 10)
|
||||||
|
|
||||||
|
// Tests 1/2x scale down, using optimized algorithm.
|
||||||
|
TEST_M(TestScaleOddDown12, 180, 100, 90, 50, 50)
|
||||||
|
|
||||||
|
// Tests bilinear scale down
|
||||||
|
TEST_M(TestScaleOddDownBilin, 160, 100, 90, 50, 120)
|
||||||
|
|
||||||
|
// Test huge buffer scales that are expected to use a different code path
|
||||||
|
// that avoids stack overflow but still work using point sampling.
|
||||||
|
// Max output size is 640 wide.
|
||||||
|
|
||||||
|
// Tests interpolated 1/8x scale down, using optimized algorithm.
|
||||||
|
TEST_H(TestScaleDown18HDOptInt, 6144, 48, 768, 6, true, ALLFLAGS, true, 1)
|
||||||
|
|
||||||
|
// Tests interpolated 1/8x scale down, using c_only optimized algorithm.
|
||||||
|
TEST_H(TestScaleDown18HDCOnlyOptInt, 6144, 48, 768, 6, true, NOSSE, true, 1)
|
||||||
|
|
||||||
|
// Tests interpolated 3/8x scale down, using optimized algorithm.
|
||||||
|
TEST_H(TestScaleDown38HDOptInt, 2048, 16, 768, 6, true, ALLFLAGS, true, 1)
|
||||||
|
|
||||||
|
// Tests interpolated 3/8x scale down, using no SSSE3 optimized algorithm.
|
||||||
|
TEST_H(TestScaleDown38HDNoSSSE3OptInt, 2048, 16, 768, 6, true, NOSSSE3, true, 1)
|
||||||
|
|
||||||
|
// Tests interpolated 3/8x scale down, using c_only optimized algorithm.
|
||||||
|
TEST_H(TestScaleDown38HDCOnlyOptInt, 2048, 16, 768, 6, true, NOSSE, true, 1)
|
||||||
|
|
||||||
|
// Tests interpolated 3/16x scale down, using optimized algorithm.
|
||||||
|
TEST_H(TestScaleDown316HDOptInt, 4096, 32, 768, 6, true, ALLFLAGS, true, 1)
|
||||||
|
|
||||||
|
// Tests interpolated 3/16x scale down, using no SSSE3 optimized algorithm.
|
||||||
|
TEST_H(TestScaleDown316HDNoSSSE3OptInt, 4096, 32, 768, 6, true, NOSSSE3, true,
|
||||||
|
1)
|
||||||
|
|
||||||
|
// Tests interpolated 3/16x scale down, using c_only optimized algorithm.
|
||||||
|
TEST_H(TestScaleDown316HDCOnlyOptInt, 4096, 32, 768, 6, true, NOSSE, true, 1)
|
||||||
|
|
||||||
|
// Test special sizes dont crash
|
||||||
|
// Tests scaling down to 1 pixel width
|
||||||
|
TEST_H(TestScaleDown1x6OptInt, 3, 24, 1, 6, true, ALLFLAGS, true, 4)
|
||||||
|
|
||||||
|
// Tests scaling down to 1 pixel height
|
||||||
|
TEST_H(TestScaleDown6x1OptInt, 24, 3, 6, 1, true, ALLFLAGS, true, 4)
|
||||||
|
|
||||||
|
// Tests scaling up from 1 pixel width
|
||||||
|
TEST_H(TestScaleUp1x6OptInt, 1, 6, 3, 24, true, ALLFLAGS, true, 4)
|
||||||
|
|
||||||
|
// Tests scaling up from 1 pixel height
|
||||||
|
TEST_H(TestScaleUp6x1OptInt, 6, 1, 24, 3, true, ALLFLAGS, true, 4)
|
||||||
|
|
||||||
|
// Test performance of a range of box filter scale sizes
|
||||||
|
|
||||||
|
// Tests interpolated 1/2x scale down, using optimized algorithm.
|
||||||
|
TEST_H(TestScaleDown2xHDOptInt, 1280, 720, 1280 / 2, 720 / 2, true, ALLFLAGS,
|
||||||
|
true, 1)
|
||||||
|
|
||||||
|
// Tests interpolated 1/3x scale down, using optimized algorithm.
|
||||||
|
TEST_H(TestScaleDown3xHDOptInt, 1280, 720, 1280 / 3, 720 / 3, true, ALLFLAGS,
|
||||||
|
true, 1)
|
||||||
|
|
||||||
|
// Tests interpolated 1/4x scale down, using optimized algorithm.
|
||||||
|
TEST_H(TestScaleDown4xHDOptInt, 1280, 720, 1280 / 4, 720 / 4, true, ALLFLAGS,
|
||||||
|
true, 1)
|
||||||
|
|
||||||
|
// Tests interpolated 1/5x scale down, using optimized algorithm.
|
||||||
|
TEST_H(TestScaleDown5xHDOptInt, 1280, 720, 1280 / 5, 720 / 5, true, ALLFLAGS,
|
||||||
|
true, 1)
|
||||||
|
|
||||||
|
// Tests interpolated 1/6x scale down, using optimized algorithm.
|
||||||
|
TEST_H(TestScaleDown6xHDOptInt, 1280, 720, 1280 / 6, 720 / 6, true, ALLFLAGS,
|
||||||
|
true, 1)
|
||||||
|
|
||||||
|
// Tests interpolated 1/7x scale down, using optimized algorithm.
|
||||||
|
TEST_H(TestScaleDown7xHDOptInt, 1280, 720, 1280 / 7, 720 / 7, true, ALLFLAGS,
|
||||||
|
true, 1)
|
||||||
|
|
||||||
|
// Tests interpolated 1/8x scale down, using optimized algorithm.
|
||||||
|
TEST_H(TestScaleDown8xHDOptInt, 1280, 720, 1280 / 8, 720 / 8, true, ALLFLAGS,
|
||||||
|
true, 1)
|
||||||
|
|
||||||
|
// Tests interpolated 1/8x scale down, using optimized algorithm.
|
||||||
|
TEST_H(TestScaleDown9xHDOptInt, 1280, 720, 1280 / 9, 720 / 9, true, ALLFLAGS,
|
||||||
|
true, 1)
|
||||||
|
|
||||||
|
// Tests interpolated 1/8x scale down, using optimized algorithm.
|
||||||
|
TEST_H(TestScaleDown10xHDOptInt, 1280, 720, 1280 / 10, 720 / 10, true, ALLFLAGS,
|
||||||
|
true, 1)
|
Reference in New Issue
Block a user