webrtc/talk/p2p/client/fakeportallocator.h
guoweis@webrtc.org 40c2aa36f2 Implemented Network::GetBestIP() selection logic as following.
1) return the first global temporary and non-deprecrated ones.
2) if #1 not available, return global one.
3) if #2 not available, use ULA ipv6 as last resort.

ULA stands for unique local address. They are only useful in a private
WebRTC deployment. More detail: http://en.wikipedia.org/wiki/Unique_local_address

BUG=3808

At this point, rule #3 actually won't happen at current
implementation. The reason being that ULA address starting with 0xfc 0r 0xfd will be grouped into its own Network. The result of that is WebRTC will have one extra Network to generate candidates but the lack of rule #3 shouldn't prevent turning on IPv6 since ULA should only be tried in a close deployment anyway.

R=jiayl@webrtc.org

Committed: https://code.google.com/p/webrtc/source/detail?r=7200

Review URL: https://webrtc-codereview.appspot.com/31369004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@7201 4adac7df-926f-26a2-2b94-8c16560cd09d
2014-09-16 20:29:41 +00:00

108 lines
3.2 KiB
C++

// Copyright 2010 Google Inc. All Rights Reserved,
//
// Author: Justin Uberti (juberti@google.com)
#ifndef TALK_P2P_CLIENT_FAKEPORTALLOCATOR_H_
#define TALK_P2P_CLIENT_FAKEPORTALLOCATOR_H_
#include <string>
#include "talk/p2p/base/basicpacketsocketfactory.h"
#include "talk/p2p/base/portallocator.h"
#include "talk/p2p/base/udpport.h"
#include "webrtc/base/scoped_ptr.h"
namespace rtc {
class SocketFactory;
class Thread;
}
namespace cricket {
class FakePortAllocatorSession : public PortAllocatorSession {
public:
FakePortAllocatorSession(rtc::Thread* worker_thread,
rtc::PacketSocketFactory* factory,
const std::string& content_name,
int component,
const std::string& ice_ufrag,
const std::string& ice_pwd)
: PortAllocatorSession(content_name, component, ice_ufrag, ice_pwd,
cricket::PORTALLOCATOR_ENABLE_SHARED_UFRAG),
worker_thread_(worker_thread),
factory_(factory),
network_("network", "unittest",
rtc::IPAddress(INADDR_LOOPBACK), 8),
port_(), running_(false),
port_config_count_(0) {
network_.AddIP(rtc::IPAddress(INADDR_LOOPBACK));
}
virtual void StartGettingPorts() {
if (!port_) {
port_.reset(cricket::UDPPort::Create(worker_thread_, factory_,
&network_, network_.ip(), 0, 0,
username(),
password()));
AddPort(port_.get());
}
++port_config_count_;
running_ = true;
}
virtual void StopGettingPorts() { running_ = false; }
virtual bool IsGettingPorts() { return running_; }
int port_config_count() { return port_config_count_; }
void AddPort(cricket::Port* port) {
port->set_component(component_);
port->set_generation(0);
port->SignalPortComplete.connect(
this, &FakePortAllocatorSession::OnPortComplete);
port->PrepareAddress();
SignalPortReady(this, port);
}
void OnPortComplete(cricket::Port* port) {
SignalCandidatesReady(this, port->Candidates());
SignalCandidatesAllocationDone(this);
}
private:
rtc::Thread* worker_thread_;
rtc::PacketSocketFactory* factory_;
rtc::Network network_;
rtc::scoped_ptr<cricket::Port> port_;
bool running_;
int port_config_count_;
};
class FakePortAllocator : public cricket::PortAllocator {
public:
FakePortAllocator(rtc::Thread* worker_thread,
rtc::PacketSocketFactory* factory)
: worker_thread_(worker_thread), factory_(factory) {
if (factory_ == NULL) {
owned_factory_.reset(new rtc::BasicPacketSocketFactory(
worker_thread_));
factory_ = owned_factory_.get();
}
}
virtual cricket::PortAllocatorSession* CreateSessionInternal(
const std::string& content_name,
int component,
const std::string& ice_ufrag,
const std::string& ice_pwd) {
return new FakePortAllocatorSession(
worker_thread_, factory_, content_name, component, ice_ufrag, ice_pwd);
}
private:
rtc::Thread* worker_thread_;
rtc::PacketSocketFactory* factory_;
rtc::scoped_ptr<rtc::BasicPacketSocketFactory> owned_factory_;
};
} // namespace cricket
#endif // TALK_P2P_CLIENT_FAKEPORTALLOCATOR_H_