mirror of
https://github.com/zeromq/libzmq.git
synced 2025-11-08 14:56:14 +01:00
Problem: inconsistent naming style for private data members, conflicts with naming of local variables and member functions
Solution: apply and check _lower_case naming style for private data members
This commit is contained in:
340
src/ctx.cpp
340
src/ctx.cpp
@@ -66,25 +66,25 @@ int clipped_maxsocket (int max_requested_)
|
||||
}
|
||||
|
||||
zmq::ctx_t::ctx_t () :
|
||||
tag (ZMQ_CTX_TAG_VALUE_GOOD),
|
||||
starting (true),
|
||||
terminating (false),
|
||||
reaper (NULL),
|
||||
slot_count (0),
|
||||
slots (NULL),
|
||||
max_sockets (clipped_maxsocket (ZMQ_MAX_SOCKETS_DFLT)),
|
||||
max_msgsz (INT_MAX),
|
||||
io_thread_count (ZMQ_IO_THREADS_DFLT),
|
||||
blocky (true),
|
||||
ipv6 (false),
|
||||
zero_copy (true)
|
||||
_tag (ZMQ_CTX_TAG_VALUE_GOOD),
|
||||
_starting (true),
|
||||
_terminating (false),
|
||||
_reaper (NULL),
|
||||
_slot_count (0),
|
||||
_slots (NULL),
|
||||
_max_sockets (clipped_maxsocket (ZMQ_MAX_SOCKETS_DFLT)),
|
||||
_max_msgsz (INT_MAX),
|
||||
_io_thread_count (ZMQ_IO_THREADS_DFLT),
|
||||
_blocky (true),
|
||||
_ipv6 (false),
|
||||
_zero_copy (true)
|
||||
{
|
||||
#ifdef HAVE_FORK
|
||||
pid = getpid ();
|
||||
_pid = getpid ();
|
||||
#endif
|
||||
#ifdef ZMQ_HAVE_VMCI
|
||||
vmci_fd = -1;
|
||||
vmci_family = -1;
|
||||
_vmci_fd = -1;
|
||||
_vmci_family = -1;
|
||||
#endif
|
||||
|
||||
// Initialise crypto library, if needed.
|
||||
@@ -93,54 +93,54 @@ zmq::ctx_t::ctx_t () :
|
||||
|
||||
bool zmq::ctx_t::check_tag ()
|
||||
{
|
||||
return tag == ZMQ_CTX_TAG_VALUE_GOOD;
|
||||
return _tag == ZMQ_CTX_TAG_VALUE_GOOD;
|
||||
}
|
||||
|
||||
zmq::ctx_t::~ctx_t ()
|
||||
{
|
||||
// Check that there are no remaining sockets.
|
||||
zmq_assert (sockets.empty ());
|
||||
// Check that there are no remaining _sockets.
|
||||
zmq_assert (_sockets.empty ());
|
||||
|
||||
// Ask I/O threads to terminate. If stop signal wasn't sent to I/O
|
||||
// thread subsequent invocation of destructor would hang-up.
|
||||
for (io_threads_t::size_type i = 0; i != io_threads.size (); i++) {
|
||||
io_threads[i]->stop ();
|
||||
for (io_threads_t::size_type i = 0; i != _io_threads.size (); i++) {
|
||||
_io_threads[i]->stop ();
|
||||
}
|
||||
|
||||
// Wait till I/O threads actually terminate.
|
||||
for (io_threads_t::size_type i = 0; i != io_threads.size (); i++) {
|
||||
LIBZMQ_DELETE (io_threads[i]);
|
||||
for (io_threads_t::size_type i = 0; i != _io_threads.size (); i++) {
|
||||
LIBZMQ_DELETE (_io_threads[i]);
|
||||
}
|
||||
|
||||
// Deallocate the reaper thread object.
|
||||
LIBZMQ_DELETE (reaper);
|
||||
LIBZMQ_DELETE (_reaper);
|
||||
|
||||
// Deallocate the array of mailboxes. No special work is
|
||||
// needed as mailboxes themselves were deallocated with their
|
||||
// corresponding io_thread/socket objects.
|
||||
free (slots);
|
||||
free (_slots);
|
||||
|
||||
// De-initialise crypto library, if needed.
|
||||
zmq::random_close ();
|
||||
|
||||
// Remove the tag, so that the object is considered dead.
|
||||
tag = ZMQ_CTX_TAG_VALUE_BAD;
|
||||
_tag = ZMQ_CTX_TAG_VALUE_BAD;
|
||||
}
|
||||
|
||||
bool zmq::ctx_t::valid () const
|
||||
{
|
||||
return term_mailbox.valid ();
|
||||
return _term_mailbox.valid ();
|
||||
}
|
||||
|
||||
int zmq::ctx_t::terminate ()
|
||||
{
|
||||
slot_sync.lock ();
|
||||
_slot_sync.lock ();
|
||||
|
||||
bool save_terminating = terminating;
|
||||
terminating = false;
|
||||
bool save_terminating = _terminating;
|
||||
_terminating = false;
|
||||
|
||||
// Connect up any pending inproc connections, otherwise we will hang
|
||||
pending_connections_t copy = pending_connections;
|
||||
pending_connections_t copy = _pending_connections;
|
||||
for (pending_connections_t::iterator p = copy.begin (); p != copy.end ();
|
||||
++p) {
|
||||
zmq::socket_base_t *s = create_socket (ZMQ_PAIR);
|
||||
@@ -149,57 +149,57 @@ int zmq::ctx_t::terminate ()
|
||||
s->bind (p->first.c_str ());
|
||||
s->close ();
|
||||
}
|
||||
terminating = save_terminating;
|
||||
_terminating = save_terminating;
|
||||
|
||||
if (!starting) {
|
||||
if (!_starting) {
|
||||
#ifdef HAVE_FORK
|
||||
if (pid != getpid ()) {
|
||||
if (_pid != getpid ()) {
|
||||
// we are a forked child process. Close all file descriptors
|
||||
// inherited from the parent.
|
||||
for (sockets_t::size_type i = 0; i != sockets.size (); i++)
|
||||
sockets[i]->get_mailbox ()->forked ();
|
||||
for (sockets_t::size_type i = 0; i != _sockets.size (); i++)
|
||||
_sockets[i]->get_mailbox ()->forked ();
|
||||
|
||||
term_mailbox.forked ();
|
||||
_term_mailbox.forked ();
|
||||
}
|
||||
#endif
|
||||
|
||||
// Check whether termination was already underway, but interrupted and now
|
||||
// restarted.
|
||||
bool restarted = terminating;
|
||||
terminating = true;
|
||||
bool restarted = _terminating;
|
||||
_terminating = true;
|
||||
|
||||
// First attempt to terminate the context.
|
||||
if (!restarted) {
|
||||
// First send stop command to sockets so that any blocking calls
|
||||
// can be interrupted. If there are no sockets we can ask reaper
|
||||
// thread to stop.
|
||||
for (sockets_t::size_type i = 0; i != sockets.size (); i++)
|
||||
sockets[i]->stop ();
|
||||
if (sockets.empty ())
|
||||
reaper->stop ();
|
||||
for (sockets_t::size_type i = 0; i != _sockets.size (); i++)
|
||||
_sockets[i]->stop ();
|
||||
if (_sockets.empty ())
|
||||
_reaper->stop ();
|
||||
}
|
||||
slot_sync.unlock ();
|
||||
_slot_sync.unlock ();
|
||||
|
||||
// Wait till reaper thread closes all the sockets.
|
||||
command_t cmd;
|
||||
int rc = term_mailbox.recv (&cmd, -1);
|
||||
int rc = _term_mailbox.recv (&cmd, -1);
|
||||
if (rc == -1 && errno == EINTR)
|
||||
return -1;
|
||||
errno_assert (rc == 0);
|
||||
zmq_assert (cmd.type == command_t::done);
|
||||
slot_sync.lock ();
|
||||
zmq_assert (sockets.empty ());
|
||||
_slot_sync.lock ();
|
||||
zmq_assert (_sockets.empty ());
|
||||
}
|
||||
slot_sync.unlock ();
|
||||
_slot_sync.unlock ();
|
||||
|
||||
#ifdef ZMQ_HAVE_VMCI
|
||||
vmci_sync.lock ();
|
||||
_vmci_sync.lock ();
|
||||
|
||||
VMCISock_ReleaseAFValueFd (vmci_fd);
|
||||
vmci_family = -1;
|
||||
vmci_fd = -1;
|
||||
VMCISock_ReleaseAFValueFd (_vmci_fd);
|
||||
_vmci_family = -1;
|
||||
_vmci_fd = -1;
|
||||
|
||||
vmci_sync.unlock ();
|
||||
_vmci_sync.unlock ();
|
||||
#endif
|
||||
|
||||
// Deallocate the resources.
|
||||
@@ -210,18 +210,18 @@ int zmq::ctx_t::terminate ()
|
||||
|
||||
int zmq::ctx_t::shutdown ()
|
||||
{
|
||||
scoped_lock_t locker (slot_sync);
|
||||
scoped_lock_t locker (_slot_sync);
|
||||
|
||||
if (!starting && !terminating) {
|
||||
terminating = true;
|
||||
if (!_starting && !_terminating) {
|
||||
_terminating = true;
|
||||
|
||||
// Send stop command to sockets so that any blocking calls
|
||||
// can be interrupted. If there are no sockets we can ask reaper
|
||||
// thread to stop.
|
||||
for (sockets_t::size_type i = 0; i != sockets.size (); i++)
|
||||
sockets[i]->stop ();
|
||||
if (sockets.empty ())
|
||||
reaper->stop ();
|
||||
for (sockets_t::size_type i = 0; i != _sockets.size (); i++)
|
||||
_sockets[i]->stop ();
|
||||
if (_sockets.empty ())
|
||||
_reaper->stop ();
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -232,23 +232,23 @@ int zmq::ctx_t::set (int option_, int optval_)
|
||||
int rc = 0;
|
||||
if (option_ == ZMQ_MAX_SOCKETS && optval_ >= 1
|
||||
&& optval_ == clipped_maxsocket (optval_)) {
|
||||
scoped_lock_t locker (opt_sync);
|
||||
max_sockets = optval_;
|
||||
scoped_lock_t locker (_opt_sync);
|
||||
_max_sockets = optval_;
|
||||
} else if (option_ == ZMQ_IO_THREADS && optval_ >= 0) {
|
||||
scoped_lock_t locker (opt_sync);
|
||||
io_thread_count = optval_;
|
||||
scoped_lock_t locker (_opt_sync);
|
||||
_io_thread_count = optval_;
|
||||
} else if (option_ == ZMQ_IPV6 && optval_ >= 0) {
|
||||
scoped_lock_t locker (opt_sync);
|
||||
ipv6 = (optval_ != 0);
|
||||
scoped_lock_t locker (_opt_sync);
|
||||
_ipv6 = (optval_ != 0);
|
||||
} else if (option_ == ZMQ_BLOCKY && optval_ >= 0) {
|
||||
scoped_lock_t locker (opt_sync);
|
||||
blocky = (optval_ != 0);
|
||||
scoped_lock_t locker (_opt_sync);
|
||||
_blocky = (optval_ != 0);
|
||||
} else if (option_ == ZMQ_MAX_MSGSZ && optval_ >= 0) {
|
||||
scoped_lock_t locker (opt_sync);
|
||||
max_msgsz = optval_ < INT_MAX ? optval_ : INT_MAX;
|
||||
scoped_lock_t locker (_opt_sync);
|
||||
_max_msgsz = optval_ < INT_MAX ? optval_ : INT_MAX;
|
||||
} else if (option_ == ZMQ_ZERO_COPY_RECV && optval_ >= 0) {
|
||||
scoped_lock_t locker (opt_sync);
|
||||
zero_copy = (optval_ != 0);
|
||||
scoped_lock_t locker (_opt_sync);
|
||||
_zero_copy = (optval_ != 0);
|
||||
} else {
|
||||
rc = thread_ctx_t::set (option_, optval_);
|
||||
}
|
||||
@@ -259,21 +259,21 @@ int zmq::ctx_t::get (int option_)
|
||||
{
|
||||
int rc = 0;
|
||||
if (option_ == ZMQ_MAX_SOCKETS)
|
||||
rc = max_sockets;
|
||||
rc = _max_sockets;
|
||||
else if (option_ == ZMQ_SOCKET_LIMIT)
|
||||
rc = clipped_maxsocket (65535);
|
||||
else if (option_ == ZMQ_IO_THREADS)
|
||||
rc = io_thread_count;
|
||||
rc = _io_thread_count;
|
||||
else if (option_ == ZMQ_IPV6)
|
||||
rc = ipv6;
|
||||
rc = _ipv6;
|
||||
else if (option_ == ZMQ_BLOCKY)
|
||||
rc = blocky;
|
||||
rc = _blocky;
|
||||
else if (option_ == ZMQ_MAX_MSGSZ)
|
||||
rc = max_msgsz;
|
||||
rc = _max_msgsz;
|
||||
else if (option_ == ZMQ_MSG_T_SIZE)
|
||||
rc = sizeof (zmq_msg_t);
|
||||
else if (option_ == ZMQ_ZERO_COPY_RECV) {
|
||||
rc = zero_copy;
|
||||
rc = _zero_copy;
|
||||
} else {
|
||||
errno = EINVAL;
|
||||
rc = -1;
|
||||
@@ -285,36 +285,36 @@ bool zmq::ctx_t::start ()
|
||||
{
|
||||
// Initialise the array of mailboxes. Additional three slots are for
|
||||
// zmq_ctx_term thread and reaper thread.
|
||||
opt_sync.lock ();
|
||||
int mazmq = max_sockets;
|
||||
int ios = io_thread_count;
|
||||
opt_sync.unlock ();
|
||||
slot_count = mazmq + ios + 2;
|
||||
slots =
|
||||
static_cast<i_mailbox **> (malloc (sizeof (i_mailbox *) * slot_count));
|
||||
if (!slots) {
|
||||
_opt_sync.lock ();
|
||||
int mazmq = _max_sockets;
|
||||
int ios = _io_thread_count;
|
||||
_opt_sync.unlock ();
|
||||
_slot_count = mazmq + ios + 2;
|
||||
_slots =
|
||||
static_cast<i_mailbox **> (malloc (sizeof (i_mailbox *) * _slot_count));
|
||||
if (!_slots) {
|
||||
errno = ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
// Initialise the infrastructure for zmq_ctx_term thread.
|
||||
slots[term_tid] = &term_mailbox;
|
||||
_slots[term_tid] = &_term_mailbox;
|
||||
|
||||
// Create the reaper thread.
|
||||
reaper = new (std::nothrow) reaper_t (this, reaper_tid);
|
||||
if (!reaper) {
|
||||
_reaper = new (std::nothrow) reaper_t (this, reaper_tid);
|
||||
if (!_reaper) {
|
||||
errno = ENOMEM;
|
||||
goto fail_cleanup_slots;
|
||||
}
|
||||
if (!reaper->get_mailbox ()->valid ())
|
||||
if (!_reaper->get_mailbox ()->valid ())
|
||||
goto fail_cleanup_reaper;
|
||||
slots[reaper_tid] = reaper->get_mailbox ();
|
||||
reaper->start ();
|
||||
_slots[reaper_tid] = _reaper->get_mailbox ();
|
||||
_reaper->start ();
|
||||
|
||||
// Create I/O thread objects and launch them.
|
||||
for (int32_t i = static_cast<int32_t> (slot_count) - 1;
|
||||
for (int32_t i = static_cast<int32_t> (_slot_count) - 1;
|
||||
i >= static_cast<int32_t> (2); i--) {
|
||||
slots[i] = NULL;
|
||||
_slots[i] = NULL;
|
||||
}
|
||||
|
||||
for (int i = 2; i != ios + 2; i++) {
|
||||
@@ -327,28 +327,28 @@ bool zmq::ctx_t::start ()
|
||||
delete io_thread;
|
||||
goto fail_cleanup_reaper;
|
||||
}
|
||||
io_threads.push_back (io_thread);
|
||||
slots[i] = io_thread->get_mailbox ();
|
||||
_io_threads.push_back (io_thread);
|
||||
_slots[i] = io_thread->get_mailbox ();
|
||||
io_thread->start ();
|
||||
}
|
||||
|
||||
// In the unused part of the slot array, create a list of empty slots.
|
||||
for (int32_t i = static_cast<int32_t> (slot_count) - 1;
|
||||
for (int32_t i = static_cast<int32_t> (_slot_count) - 1;
|
||||
i >= static_cast<int32_t> (ios) + 2; i--) {
|
||||
empty_slots.push_back (i);
|
||||
_empty_slots.push_back (i);
|
||||
}
|
||||
|
||||
starting = false;
|
||||
_starting = false;
|
||||
return true;
|
||||
|
||||
fail_cleanup_reaper:
|
||||
reaper->stop ();
|
||||
delete reaper;
|
||||
reaper = NULL;
|
||||
_reaper->stop ();
|
||||
delete _reaper;
|
||||
_reaper = NULL;
|
||||
|
||||
fail_cleanup_slots:
|
||||
free (slots);
|
||||
slots = NULL;
|
||||
free (_slots);
|
||||
_slots = NULL;
|
||||
|
||||
fail:
|
||||
return false;
|
||||
@@ -356,28 +356,28 @@ fail:
|
||||
|
||||
zmq::socket_base_t *zmq::ctx_t::create_socket (int type_)
|
||||
{
|
||||
scoped_lock_t locker (slot_sync);
|
||||
scoped_lock_t locker (_slot_sync);
|
||||
|
||||
if (unlikely (starting)) {
|
||||
if (unlikely (_starting)) {
|
||||
if (!start ())
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Once zmq_ctx_term() was called, we can't create new sockets.
|
||||
if (terminating) {
|
||||
if (_terminating) {
|
||||
errno = ETERM;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// If max_sockets limit was reached, return error.
|
||||
if (empty_slots.empty ()) {
|
||||
if (_empty_slots.empty ()) {
|
||||
errno = EMFILE;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Choose a slot for the socket.
|
||||
uint32_t slot = empty_slots.back ();
|
||||
empty_slots.pop_back ();
|
||||
uint32_t slot = _empty_slots.back ();
|
||||
_empty_slots.pop_back ();
|
||||
|
||||
// Generate new unique socket ID.
|
||||
int sid = (static_cast<int> (max_socket_id.add (1))) + 1;
|
||||
@@ -385,41 +385,41 @@ zmq::socket_base_t *zmq::ctx_t::create_socket (int type_)
|
||||
// Create the socket and register its mailbox.
|
||||
socket_base_t *s = socket_base_t::create (type_, this, slot, sid);
|
||||
if (!s) {
|
||||
empty_slots.push_back (slot);
|
||||
_empty_slots.push_back (slot);
|
||||
return NULL;
|
||||
}
|
||||
sockets.push_back (s);
|
||||
slots[slot] = s->get_mailbox ();
|
||||
_sockets.push_back (s);
|
||||
_slots[slot] = s->get_mailbox ();
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
void zmq::ctx_t::destroy_socket (class socket_base_t *socket_)
|
||||
{
|
||||
scoped_lock_t locker (slot_sync);
|
||||
scoped_lock_t locker (_slot_sync);
|
||||
|
||||
// Free the associated thread slot.
|
||||
uint32_t tid = socket_->get_tid ();
|
||||
empty_slots.push_back (tid);
|
||||
slots[tid] = NULL;
|
||||
_empty_slots.push_back (tid);
|
||||
_slots[tid] = NULL;
|
||||
|
||||
// Remove the socket from the list of sockets.
|
||||
sockets.erase (socket_);
|
||||
_sockets.erase (socket_);
|
||||
|
||||
// If zmq_ctx_term() was already called and there are no more socket
|
||||
// we can ask reaper thread to terminate.
|
||||
if (terminating && sockets.empty ())
|
||||
reaper->stop ();
|
||||
if (_terminating && _sockets.empty ())
|
||||
_reaper->stop ();
|
||||
}
|
||||
|
||||
zmq::object_t *zmq::ctx_t::get_reaper ()
|
||||
{
|
||||
return reaper;
|
||||
return _reaper;
|
||||
}
|
||||
|
||||
zmq::thread_ctx_t::thread_ctx_t () :
|
||||
thread_priority (ZMQ_THREAD_PRIORITY_DFLT),
|
||||
thread_sched_policy (ZMQ_THREAD_SCHED_POLICY_DFLT)
|
||||
_thread_priority (ZMQ_THREAD_PRIORITY_DFLT),
|
||||
_thread_sched_policy (ZMQ_THREAD_SCHED_POLICY_DFLT)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -429,13 +429,13 @@ void zmq::thread_ctx_t::start_thread (thread_t &thread_,
|
||||
{
|
||||
static unsigned int nthreads_started = 0;
|
||||
|
||||
thread_.setSchedulingParameters (thread_priority, thread_sched_policy,
|
||||
thread_affinity_cpus);
|
||||
thread_.setSchedulingParameters (_thread_priority, _thread_sched_policy,
|
||||
_thread_affinity_cpus);
|
||||
thread_.start (tfn_, arg_);
|
||||
#ifndef ZMQ_HAVE_ANDROID
|
||||
std::ostringstream s;
|
||||
if (!thread_name_prefix.empty ())
|
||||
s << thread_name_prefix << "/";
|
||||
if (!_thread_name_prefix.empty ())
|
||||
s << _thread_name_prefix << "/";
|
||||
s << "ZMQbg/" << nthreads_started;
|
||||
thread_.setThreadName (s.str ().c_str ());
|
||||
#endif
|
||||
@@ -446,16 +446,16 @@ int zmq::thread_ctx_t::set (int option_, int optval_)
|
||||
{
|
||||
int rc = 0;
|
||||
if (option_ == ZMQ_THREAD_SCHED_POLICY && optval_ >= 0) {
|
||||
scoped_lock_t locker (opt_sync);
|
||||
thread_sched_policy = optval_;
|
||||
scoped_lock_t locker (_opt_sync);
|
||||
_thread_sched_policy = optval_;
|
||||
} else if (option_ == ZMQ_THREAD_AFFINITY_CPU_ADD && optval_ >= 0) {
|
||||
scoped_lock_t locker (opt_sync);
|
||||
thread_affinity_cpus.insert (optval_);
|
||||
scoped_lock_t locker (_opt_sync);
|
||||
_thread_affinity_cpus.insert (optval_);
|
||||
} else if (option_ == ZMQ_THREAD_AFFINITY_CPU_REMOVE && optval_ >= 0) {
|
||||
scoped_lock_t locker (opt_sync);
|
||||
std::set<int>::iterator it = thread_affinity_cpus.find (optval_);
|
||||
if (it != thread_affinity_cpus.end ()) {
|
||||
thread_affinity_cpus.erase (it);
|
||||
scoped_lock_t locker (_opt_sync);
|
||||
std::set<int>::iterator it = _thread_affinity_cpus.find (optval_);
|
||||
if (it != _thread_affinity_cpus.end ()) {
|
||||
_thread_affinity_cpus.erase (it);
|
||||
} else {
|
||||
errno = EINVAL;
|
||||
rc = -1;
|
||||
@@ -463,11 +463,11 @@ int zmq::thread_ctx_t::set (int option_, int optval_)
|
||||
} else if (option_ == ZMQ_THREAD_NAME_PREFIX && optval_ >= 0) {
|
||||
std::ostringstream s;
|
||||
s << optval_;
|
||||
scoped_lock_t locker (opt_sync);
|
||||
thread_name_prefix = s.str ();
|
||||
scoped_lock_t locker (_opt_sync);
|
||||
_thread_name_prefix = s.str ();
|
||||
} else if (option_ == ZMQ_THREAD_PRIORITY && optval_ >= 0) {
|
||||
scoped_lock_t locker (opt_sync);
|
||||
thread_priority = optval_;
|
||||
scoped_lock_t locker (_opt_sync);
|
||||
_thread_priority = optval_;
|
||||
} else {
|
||||
errno = EINVAL;
|
||||
rc = -1;
|
||||
@@ -477,23 +477,23 @@ int zmq::thread_ctx_t::set (int option_, int optval_)
|
||||
|
||||
void zmq::ctx_t::send_command (uint32_t tid_, const command_t &command_)
|
||||
{
|
||||
slots[tid_]->send (command_);
|
||||
_slots[tid_]->send (command_);
|
||||
}
|
||||
|
||||
zmq::io_thread_t *zmq::ctx_t::choose_io_thread (uint64_t affinity_)
|
||||
{
|
||||
if (io_threads.empty ())
|
||||
if (_io_threads.empty ())
|
||||
return NULL;
|
||||
|
||||
// Find the I/O thread with minimum load.
|
||||
int min_load = -1;
|
||||
io_thread_t *selected_io_thread = NULL;
|
||||
for (io_threads_t::size_type i = 0; i != io_threads.size (); i++) {
|
||||
for (io_threads_t::size_type i = 0; i != _io_threads.size (); i++) {
|
||||
if (!affinity_ || (affinity_ & (uint64_t (1) << i))) {
|
||||
int load = io_threads[i]->get_load ();
|
||||
int load = _io_threads[i]->get_load ();
|
||||
if (selected_io_thread == NULL || load < min_load) {
|
||||
min_load = load;
|
||||
selected_io_thread = io_threads[i];
|
||||
selected_io_thread = _io_threads[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -503,10 +503,10 @@ zmq::io_thread_t *zmq::ctx_t::choose_io_thread (uint64_t affinity_)
|
||||
int zmq::ctx_t::register_endpoint (const char *addr_,
|
||||
const endpoint_t &endpoint_)
|
||||
{
|
||||
scoped_lock_t locker (endpoints_sync);
|
||||
scoped_lock_t locker (_endpoints_sync);
|
||||
|
||||
const bool inserted =
|
||||
endpoints.ZMQ_MAP_INSERT_OR_EMPLACE (std::string (addr_), endpoint_)
|
||||
_endpoints.ZMQ_MAP_INSERT_OR_EMPLACE (std::string (addr_), endpoint_)
|
||||
.second;
|
||||
if (!inserted) {
|
||||
errno = EADDRINUSE;
|
||||
@@ -518,30 +518,30 @@ int zmq::ctx_t::register_endpoint (const char *addr_,
|
||||
int zmq::ctx_t::unregister_endpoint (const std::string &addr_,
|
||||
socket_base_t *socket_)
|
||||
{
|
||||
scoped_lock_t locker (endpoints_sync);
|
||||
scoped_lock_t locker (_endpoints_sync);
|
||||
|
||||
const endpoints_t::iterator it = endpoints.find (addr_);
|
||||
if (it == endpoints.end () || it->second.socket != socket_) {
|
||||
const endpoints_t::iterator it = _endpoints.find (addr_);
|
||||
if (it == _endpoints.end () || it->second.socket != socket_) {
|
||||
errno = ENOENT;
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Remove endpoint.
|
||||
endpoints.erase (it);
|
||||
_endpoints.erase (it);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void zmq::ctx_t::unregister_endpoints (socket_base_t *socket_)
|
||||
{
|
||||
scoped_lock_t locker (endpoints_sync);
|
||||
scoped_lock_t locker (_endpoints_sync);
|
||||
|
||||
endpoints_t::iterator it = endpoints.begin ();
|
||||
while (it != endpoints.end ()) {
|
||||
endpoints_t::iterator it = _endpoints.begin ();
|
||||
while (it != _endpoints.end ()) {
|
||||
if (it->second.socket == socket_) {
|
||||
endpoints_t::iterator to_erase = it;
|
||||
++it;
|
||||
endpoints.erase (to_erase);
|
||||
_endpoints.erase (to_erase);
|
||||
continue;
|
||||
}
|
||||
++it;
|
||||
@@ -550,10 +550,10 @@ void zmq::ctx_t::unregister_endpoints (socket_base_t *socket_)
|
||||
|
||||
zmq::endpoint_t zmq::ctx_t::find_endpoint (const char *addr_)
|
||||
{
|
||||
scoped_lock_t locker (endpoints_sync);
|
||||
scoped_lock_t locker (_endpoints_sync);
|
||||
|
||||
endpoints_t::iterator it = endpoints.find (addr_);
|
||||
if (it == endpoints.end ()) {
|
||||
endpoints_t::iterator it = _endpoints.find (addr_);
|
||||
if (it == _endpoints.end ()) {
|
||||
errno = ECONNREFUSED;
|
||||
endpoint_t empty = {NULL, options_t ()};
|
||||
return empty;
|
||||
@@ -573,17 +573,17 @@ void zmq::ctx_t::pend_connection (const std::string &addr_,
|
||||
const endpoint_t &endpoint_,
|
||||
pipe_t **pipes_)
|
||||
{
|
||||
scoped_lock_t locker (endpoints_sync);
|
||||
scoped_lock_t locker (_endpoints_sync);
|
||||
|
||||
const pending_connection_t pending_connection = {endpoint_, pipes_[0],
|
||||
pipes_[1]};
|
||||
|
||||
endpoints_t::iterator it = endpoints.find (addr_);
|
||||
if (it == endpoints.end ()) {
|
||||
endpoints_t::iterator it = _endpoints.find (addr_);
|
||||
if (it == _endpoints.end ()) {
|
||||
// Still no bind.
|
||||
endpoint_.socket->inc_seqnum ();
|
||||
pending_connections.ZMQ_MAP_INSERT_OR_EMPLACE (addr_,
|
||||
pending_connection);
|
||||
_pending_connections.ZMQ_MAP_INSERT_OR_EMPLACE (addr_,
|
||||
pending_connection);
|
||||
} else {
|
||||
// Bind has happened in the mean time, connect directly
|
||||
connect_inproc_sockets (it->second.socket, it->second.options,
|
||||
@@ -594,16 +594,16 @@ void zmq::ctx_t::pend_connection (const std::string &addr_,
|
||||
void zmq::ctx_t::connect_pending (const char *addr_,
|
||||
zmq::socket_base_t *bind_socket_)
|
||||
{
|
||||
scoped_lock_t locker (endpoints_sync);
|
||||
scoped_lock_t locker (_endpoints_sync);
|
||||
|
||||
std::pair<pending_connections_t::iterator, pending_connections_t::iterator>
|
||||
pending = pending_connections.equal_range (addr_);
|
||||
pending = _pending_connections.equal_range (addr_);
|
||||
for (pending_connections_t::iterator p = pending.first; p != pending.second;
|
||||
++p)
|
||||
connect_inproc_sockets (bind_socket_, endpoints[addr_].options,
|
||||
connect_inproc_sockets (bind_socket_, _endpoints[addr_].options,
|
||||
p->second, bind_side);
|
||||
|
||||
pending_connections.erase (pending.first, pending.second);
|
||||
_pending_connections.erase (pending.first, pending.second);
|
||||
}
|
||||
|
||||
void zmq::ctx_t::connect_inproc_sockets (
|
||||
@@ -682,20 +682,20 @@ void zmq::ctx_t::connect_inproc_sockets (
|
||||
|
||||
int zmq::ctx_t::get_vmci_socket_family ()
|
||||
{
|
||||
zmq::scoped_lock_t locker (vmci_sync);
|
||||
zmq::scoped_lock_t locker (_vmci_sync);
|
||||
|
||||
if (vmci_fd == -1) {
|
||||
vmci_family = VMCISock_GetAFValueFd (&vmci_fd);
|
||||
if (_vmci_fd == -1) {
|
||||
_vmci_family = VMCISock_GetAFValueFd (&_vmci_fd);
|
||||
|
||||
if (vmci_fd != -1) {
|
||||
if (_vmci_fd != -1) {
|
||||
#ifdef FD_CLOEXEC
|
||||
int rc = fcntl (vmci_fd, F_SETFD, FD_CLOEXEC);
|
||||
int rc = fcntl (_vmci_fd, F_SETFD, FD_CLOEXEC);
|
||||
errno_assert (rc != -1);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
return vmci_family;
|
||||
return _vmci_family;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
Reference in New Issue
Block a user