2009-07-29 12:07:54 +02:00
|
|
|
/*
|
2017-07-27 15:43:14 +02:00
|
|
|
Copyright (c) 2007-2017 Contributors as noted in the AUTHORS file
|
2009-07-29 12:07:54 +02:00
|
|
|
|
2015-06-02 22:33:55 +02:00
|
|
|
This file is part of libzmq, the ZeroMQ core engine in C++.
|
2009-07-29 12:07:54 +02:00
|
|
|
|
2015-06-02 22:33:55 +02:00
|
|
|
libzmq is free software; you can redistribute it and/or modify it under
|
|
|
|
the terms of the GNU Lesser General Public License (LGPL) as published
|
|
|
|
by the Free Software Foundation; either version 3 of the License, or
|
2009-07-29 12:07:54 +02:00
|
|
|
(at your option) any later version.
|
|
|
|
|
2015-06-02 22:33:55 +02:00
|
|
|
As a special exception, the Contributors give you permission to link
|
|
|
|
this library with independent modules to produce an executable,
|
|
|
|
regardless of the license terms of these independent modules, and to
|
|
|
|
copy and distribute the resulting executable under terms of your choice,
|
|
|
|
provided that you also meet, for each linked independent module, the
|
|
|
|
terms and conditions of the license of that module. An independent
|
|
|
|
module is a module which is not derived from or based on this library.
|
|
|
|
If you modify this library, you must extend this exception to your
|
|
|
|
version of the library.
|
|
|
|
|
|
|
|
libzmq is distributed in the hope that it will be useful, but WITHOUT
|
|
|
|
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
|
|
|
|
License for more details.
|
2009-07-29 12:07:54 +02:00
|
|
|
|
2010-10-30 15:08:28 +02:00
|
|
|
You should have received a copy of the GNU Lesser General Public License
|
2009-07-29 12:07:54 +02:00
|
|
|
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
2016-02-18 17:56:52 +01:00
|
|
|
#include "precompiled.hpp"
|
2015-08-22 01:12:22 +02:00
|
|
|
#include "macros.hpp"
|
2016-05-14 05:41:26 +02:00
|
|
|
#ifndef ZMQ_HAVE_WINDOWS
|
2011-06-18 20:44:03 +02:00
|
|
|
#include <unistd.h>
|
|
|
|
#endif
|
|
|
|
|
2014-02-13 15:54:06 +01:00
|
|
|
#include <limits>
|
2016-02-09 10:51:51 +01:00
|
|
|
#include <climits>
|
2009-12-15 23:49:55 +01:00
|
|
|
#include <new>
|
2017-10-17 13:06:50 +02:00
|
|
|
#include <sstream>
|
2010-05-04 10:22:16 +02:00
|
|
|
#include <string.h>
|
2009-12-15 23:49:55 +01:00
|
|
|
|
2010-05-05 14:24:54 +02:00
|
|
|
#include "ctx.hpp"
|
2009-11-21 20:59:55 +01:00
|
|
|
#include "socket_base.hpp"
|
2009-07-29 12:07:54 +02:00
|
|
|
#include "io_thread.hpp"
|
2011-02-09 15:32:15 +01:00
|
|
|
#include "reaper.hpp"
|
2009-07-29 12:07:54 +02:00
|
|
|
#include "pipe.hpp"
|
2011-04-21 22:27:48 +02:00
|
|
|
#include "err.hpp"
|
|
|
|
#include "msg.hpp"
|
2017-07-27 15:43:14 +02:00
|
|
|
#include "random.hpp"
|
2014-08-31 19:30:44 +02:00
|
|
|
|
2015-12-07 13:19:45 +01:00
|
|
|
#ifdef ZMQ_HAVE_VMCI
|
|
|
|
#include <vmci_sockets.h>
|
|
|
|
#endif
|
|
|
|
|
2013-06-20 18:43:32 +02:00
|
|
|
#define ZMQ_CTX_TAG_VALUE_GOOD 0xabadcafe
|
2018-02-01 11:46:09 +01:00
|
|
|
#define ZMQ_CTX_TAG_VALUE_BAD 0xdeadbeef
|
2013-06-20 18:43:32 +02:00
|
|
|
|
2016-02-11 13:32:01 +01:00
|
|
|
int clipped_maxsocket (int max_requested)
|
2013-11-07 18:46:19 +01:00
|
|
|
{
|
2018-02-01 11:46:09 +01:00
|
|
|
if (max_requested >= zmq::poller_t::max_fds ()
|
|
|
|
&& zmq::poller_t::max_fds () != -1)
|
2014-05-16 15:54:14 +02:00
|
|
|
// -1 because we need room for the reaper mailbox.
|
|
|
|
max_requested = zmq::poller_t::max_fds () - 1;
|
2014-07-09 13:49:40 +02:00
|
|
|
|
2013-11-07 18:46:19 +01:00
|
|
|
return max_requested;
|
|
|
|
}
|
|
|
|
|
2012-03-20 01:41:20 +01:00
|
|
|
zmq::ctx_t::ctx_t () :
|
2013-06-20 18:43:32 +02:00
|
|
|
tag (ZMQ_CTX_TAG_VALUE_GOOD),
|
2012-03-20 01:41:20 +01:00
|
|
|
starting (true),
|
|
|
|
terminating (false),
|
|
|
|
reaper (NULL),
|
|
|
|
slot_count (0),
|
|
|
|
slots (NULL),
|
2013-11-07 18:50:29 +01:00
|
|
|
max_sockets (clipped_maxsocket (ZMQ_MAX_SOCKETS_DFLT)),
|
2016-02-09 10:51:51 +01:00
|
|
|
max_msgsz (INT_MAX),
|
2013-01-31 21:52:30 +01:00
|
|
|
io_thread_count (ZMQ_IO_THREADS_DFLT),
|
2014-11-17 11:56:59 +01:00
|
|
|
blocky (true),
|
2018-03-05 13:19:20 +01:00
|
|
|
ipv6 (false),
|
|
|
|
zero_copy (true)
|
2009-07-29 12:07:54 +02:00
|
|
|
{
|
2013-08-31 13:17:11 +02:00
|
|
|
#ifdef HAVE_FORK
|
2018-02-01 11:46:09 +01:00
|
|
|
pid = getpid ();
|
2013-08-31 13:17:11 +02:00
|
|
|
#endif
|
2015-12-07 13:19:45 +01:00
|
|
|
#ifdef ZMQ_HAVE_VMCI
|
|
|
|
vmci_fd = -1;
|
|
|
|
vmci_family = -1;
|
|
|
|
#endif
|
2016-03-29 23:23:56 +02:00
|
|
|
|
2017-07-27 15:43:14 +02:00
|
|
|
// Initialise crypto library, if needed.
|
|
|
|
zmq::random_open ();
|
2009-09-04 16:02:41 +02:00
|
|
|
}
|
|
|
|
|
2011-04-09 09:35:34 +02:00
|
|
|
bool zmq::ctx_t::check_tag ()
|
|
|
|
{
|
2013-06-20 18:43:32 +02:00
|
|
|
return tag == ZMQ_CTX_TAG_VALUE_GOOD;
|
2011-04-09 09:35:34 +02:00
|
|
|
}
|
|
|
|
|
2010-05-05 14:24:54 +02:00
|
|
|
zmq::ctx_t::~ctx_t ()
|
2009-07-29 12:07:54 +02:00
|
|
|
{
|
2011-02-09 15:32:15 +01:00
|
|
|
// Check that there are no remaining sockets.
|
2010-08-06 17:49:37 +02:00
|
|
|
zmq_assert (sockets.empty ());
|
|
|
|
|
2009-08-06 12:51:32 +02:00
|
|
|
// Ask I/O threads to terminate. If stop signal wasn't sent to I/O
|
|
|
|
// thread subsequent invocation of destructor would hang-up.
|
2015-08-16 22:05:11 +02:00
|
|
|
for (io_threads_t::size_type i = 0; i != io_threads.size (); i++) {
|
2018-02-01 11:46:09 +01:00
|
|
|
io_threads[i]->stop ();
|
2015-08-16 22:05:11 +02:00
|
|
|
}
|
2009-07-29 12:07:54 +02:00
|
|
|
|
|
|
|
// Wait till I/O threads actually terminate.
|
2015-08-16 22:05:11 +02:00
|
|
|
for (io_threads_t::size_type i = 0; i != io_threads.size (); i++) {
|
2018-02-01 11:46:09 +01:00
|
|
|
LIBZMQ_DELETE (io_threads[i]);
|
2015-08-16 22:05:11 +02:00
|
|
|
}
|
2009-07-29 12:07:54 +02:00
|
|
|
|
2011-02-09 15:32:15 +01:00
|
|
|
// Deallocate the reaper thread object.
|
2018-02-01 11:46:09 +01:00
|
|
|
LIBZMQ_DELETE (reaper);
|
2011-02-09 15:32:15 +01:00
|
|
|
|
2010-11-05 17:39:51 +01:00
|
|
|
// Deallocate the array of mailboxes. No special work is
|
|
|
|
// needed as mailboxes themselves were deallocated with their
|
2010-08-06 17:49:37 +02:00
|
|
|
// corresponding io_thread/socket objects.
|
2013-10-04 20:54:52 +02:00
|
|
|
free (slots);
|
2011-04-09 09:35:34 +02:00
|
|
|
|
2017-07-27 15:43:14 +02:00
|
|
|
// De-initialise crypto library, if needed.
|
|
|
|
zmq::random_close ();
|
2014-08-31 19:30:44 +02:00
|
|
|
|
2011-04-09 09:35:34 +02:00
|
|
|
// Remove the tag, so that the object is considered dead.
|
2013-06-20 18:43:32 +02:00
|
|
|
tag = ZMQ_CTX_TAG_VALUE_BAD;
|
2009-07-29 12:07:54 +02:00
|
|
|
}
|
|
|
|
|
2018-01-31 17:03:29 +01:00
|
|
|
bool zmq::ctx_t::valid () const
|
|
|
|
{
|
|
|
|
return term_mailbox.valid ();
|
|
|
|
}
|
|
|
|
|
2010-08-12 08:16:18 +02:00
|
|
|
int zmq::ctx_t::terminate ()
|
2009-07-29 12:07:54 +02:00
|
|
|
{
|
2018-02-01 11:46:09 +01:00
|
|
|
slot_sync.lock ();
|
2015-08-21 11:06:54 +02:00
|
|
|
|
2016-02-01 23:00:06 +01:00
|
|
|
bool saveTerminating = terminating;
|
|
|
|
terminating = false;
|
2015-08-21 11:06:54 +02:00
|
|
|
|
2016-02-01 23:00:06 +01:00
|
|
|
// Connect up any pending inproc connections, otherwise we will hang
|
2014-02-13 00:45:39 +01:00
|
|
|
pending_connections_t copy = pending_connections;
|
2018-02-01 11:46:09 +01:00
|
|
|
for (pending_connections_t::iterator p = copy.begin (); p != copy.end ();
|
|
|
|
++p) {
|
2014-02-13 00:45:39 +01:00
|
|
|
zmq::socket_base_t *s = create_socket (ZMQ_PAIR);
|
2016-09-24 19:57:59 +02:00
|
|
|
// create_socket might fail eg: out of memory/sockets limit reached
|
|
|
|
zmq_assert (s);
|
2014-02-13 00:45:39 +01:00
|
|
|
s->bind (p->first.c_str ());
|
|
|
|
s->close ();
|
|
|
|
}
|
2016-02-01 23:00:06 +01:00
|
|
|
terminating = saveTerminating;
|
2014-02-13 00:45:39 +01:00
|
|
|
|
2012-03-20 01:41:20 +01:00
|
|
|
if (!starting) {
|
2013-08-31 13:17:11 +02:00
|
|
|
#ifdef HAVE_FORK
|
2014-01-09 13:35:33 +01:00
|
|
|
if (pid != getpid ()) {
|
2013-08-31 13:17:11 +02:00
|
|
|
// we are a forked child process. Close all file descriptors
|
|
|
|
// inherited from the parent.
|
|
|
|
for (sockets_t::size_type i = 0; i != sockets.size (); i++)
|
2018-02-01 11:46:09 +01:00
|
|
|
sockets[i]->get_mailbox ()->forked ();
|
2013-08-31 13:17:11 +02:00
|
|
|
|
2014-07-09 13:49:40 +02:00
|
|
|
term_mailbox.forked ();
|
2013-08-31 13:17:11 +02:00
|
|
|
}
|
|
|
|
#endif
|
2014-02-13 00:10:23 +01:00
|
|
|
|
2012-03-20 01:41:20 +01:00
|
|
|
// Check whether termination was already underway, but interrupted and now
|
|
|
|
// restarted.
|
|
|
|
bool restarted = terminating;
|
2011-02-09 15:32:15 +01:00
|
|
|
terminating = true;
|
2010-08-06 17:49:37 +02:00
|
|
|
|
2012-03-20 01:41:20 +01:00
|
|
|
// First attempt to terminate the context.
|
|
|
|
if (!restarted) {
|
|
|
|
// First send stop command to sockets so that any blocking calls
|
|
|
|
// can be interrupted. If there are no sockets we can ask reaper
|
|
|
|
// thread to stop.
|
|
|
|
for (sockets_t::size_type i = 0; i != sockets.size (); i++)
|
2018-02-01 11:46:09 +01:00
|
|
|
sockets[i]->stop ();
|
2012-03-20 01:41:20 +01:00
|
|
|
if (sockets.empty ())
|
|
|
|
reaper->stop ();
|
|
|
|
}
|
2018-02-01 11:46:09 +01:00
|
|
|
slot_sync.unlock ();
|
2012-03-20 01:41:20 +01:00
|
|
|
|
|
|
|
// Wait till reaper thread closes all the sockets.
|
|
|
|
command_t cmd;
|
|
|
|
int rc = term_mailbox.recv (&cmd, -1);
|
|
|
|
if (rc == -1 && errno == EINTR)
|
|
|
|
return -1;
|
2012-05-28 23:13:09 +02:00
|
|
|
errno_assert (rc == 0);
|
2012-03-20 01:41:20 +01:00
|
|
|
zmq_assert (cmd.type == command_t::done);
|
|
|
|
slot_sync.lock ();
|
|
|
|
zmq_assert (sockets.empty ());
|
|
|
|
}
|
2012-03-22 17:03:32 +01:00
|
|
|
slot_sync.unlock ();
|
2010-02-08 18:37:48 +01:00
|
|
|
|
2015-12-07 13:19:45 +01:00
|
|
|
#ifdef ZMQ_HAVE_VMCI
|
|
|
|
vmci_sync.lock ();
|
|
|
|
|
|
|
|
VMCISock_ReleaseAFValueFd (vmci_fd);
|
|
|
|
vmci_family = -1;
|
|
|
|
vmci_fd = -1;
|
|
|
|
|
|
|
|
vmci_sync.unlock ();
|
|
|
|
#endif
|
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
// Deallocate the resources.
|
|
|
|
delete this;
|
2010-05-05 13:03:26 +02:00
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
return 0;
|
|
|
|
}
|
2010-05-05 13:03:26 +02:00
|
|
|
|
2013-09-10 14:30:00 +02:00
|
|
|
int zmq::ctx_t::shutdown ()
|
|
|
|
{
|
2018-02-01 11:46:09 +01:00
|
|
|
scoped_lock_t locker (slot_sync);
|
2016-07-31 14:27:11 +02:00
|
|
|
|
2013-09-10 14:30:00 +02:00
|
|
|
if (!starting && !terminating) {
|
|
|
|
terminating = true;
|
|
|
|
|
|
|
|
// Send stop command to sockets so that any blocking calls
|
|
|
|
// can be interrupted. If there are no sockets we can ask reaper
|
|
|
|
// thread to stop.
|
|
|
|
for (sockets_t::size_type i = 0; i != sockets.size (); i++)
|
2018-02-01 11:46:09 +01:00
|
|
|
sockets[i]->stop ();
|
2013-09-10 14:30:00 +02:00
|
|
|
if (sockets.empty ())
|
|
|
|
reaper->stop ();
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-03-20 01:41:20 +01:00
|
|
|
int zmq::ctx_t::set (int option_, int optval_)
|
|
|
|
{
|
|
|
|
int rc = 0;
|
2018-02-01 11:46:09 +01:00
|
|
|
if (option_ == ZMQ_MAX_SOCKETS && optval_ >= 1
|
|
|
|
&& optval_ == clipped_maxsocket (optval_)) {
|
|
|
|
scoped_lock_t locker (opt_sync);
|
2012-03-20 01:41:20 +01:00
|
|
|
max_sockets = optval_;
|
2018-02-01 11:46:09 +01:00
|
|
|
} else if (option_ == ZMQ_IO_THREADS && optval_ >= 0) {
|
|
|
|
scoped_lock_t locker (opt_sync);
|
2012-03-20 01:41:20 +01:00
|
|
|
io_thread_count = optval_;
|
2018-02-01 11:46:09 +01:00
|
|
|
} else if (option_ == ZMQ_IPV6 && optval_ >= 0) {
|
|
|
|
scoped_lock_t locker (opt_sync);
|
2013-02-19 14:18:50 +01:00
|
|
|
ipv6 = (optval_ != 0);
|
2018-02-01 11:46:09 +01:00
|
|
|
} else if (option_ == ZMQ_BLOCKY && optval_ >= 0) {
|
|
|
|
scoped_lock_t locker (opt_sync);
|
2014-11-17 11:56:59 +01:00
|
|
|
blocky = (optval_ != 0);
|
2018-02-01 11:46:09 +01:00
|
|
|
} else if (option_ == ZMQ_MAX_MSGSZ && optval_ >= 0) {
|
|
|
|
scoped_lock_t locker (opt_sync);
|
|
|
|
max_msgsz = optval_ < INT_MAX ? optval_ : INT_MAX;
|
2018-03-05 13:19:20 +01:00
|
|
|
} else if (option_ == ZMQ_ZERO_COPY_RECV && optval_ >= 0) {
|
|
|
|
scoped_lock_t locker (opt_sync);
|
|
|
|
zero_copy = (optval_ != 0);
|
2018-02-01 11:46:09 +01:00
|
|
|
} else {
|
2018-02-08 17:47:13 +01:00
|
|
|
rc = thread_ctx_t::set (option_, optval_);
|
2012-03-20 01:41:20 +01:00
|
|
|
}
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
int zmq::ctx_t::get (int option_)
|
|
|
|
{
|
|
|
|
int rc = 0;
|
|
|
|
if (option_ == ZMQ_MAX_SOCKETS)
|
|
|
|
rc = max_sockets;
|
2018-02-01 11:46:09 +01:00
|
|
|
else if (option_ == ZMQ_SOCKET_LIMIT)
|
2014-06-11 18:24:39 +02:00
|
|
|
rc = clipped_maxsocket (65535);
|
2018-02-01 11:46:09 +01:00
|
|
|
else if (option_ == ZMQ_IO_THREADS)
|
2012-03-20 01:41:20 +01:00
|
|
|
rc = io_thread_count;
|
2018-02-01 11:46:09 +01:00
|
|
|
else if (option_ == ZMQ_IPV6)
|
2013-01-31 21:52:30 +01:00
|
|
|
rc = ipv6;
|
2018-02-01 11:46:09 +01:00
|
|
|
else if (option_ == ZMQ_BLOCKY)
|
2014-11-17 11:56:59 +01:00
|
|
|
rc = blocky;
|
2018-02-01 11:46:09 +01:00
|
|
|
else if (option_ == ZMQ_MAX_MSGSZ)
|
2016-02-09 10:51:51 +01:00
|
|
|
rc = max_msgsz;
|
2018-02-01 11:46:09 +01:00
|
|
|
else if (option_ == ZMQ_MSG_T_SIZE)
|
2016-11-20 13:24:03 +01:00
|
|
|
rc = sizeof (zmq_msg_t);
|
2018-03-05 13:19:20 +01:00
|
|
|
else if (option_ == ZMQ_ZERO_COPY_RECV) {
|
|
|
|
rc = zero_copy;
|
|
|
|
} else {
|
2012-03-20 01:41:20 +01:00
|
|
|
errno = EINVAL;
|
|
|
|
rc = -1;
|
|
|
|
}
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2018-01-31 17:03:29 +01:00
|
|
|
bool zmq::ctx_t::start ()
|
2010-08-06 17:49:37 +02:00
|
|
|
{
|
2018-01-31 17:03:29 +01:00
|
|
|
// Initialise the array of mailboxes. Additional three slots are for
|
|
|
|
// zmq_ctx_term thread and reaper thread.
|
|
|
|
opt_sync.lock ();
|
|
|
|
int mazmq = max_sockets;
|
|
|
|
int ios = io_thread_count;
|
|
|
|
opt_sync.unlock ();
|
|
|
|
slot_count = mazmq + ios + 2;
|
|
|
|
slots = (i_mailbox **) malloc (sizeof (i_mailbox *) * slot_count);
|
|
|
|
if (!slots) {
|
|
|
|
errno = ENOMEM;
|
|
|
|
goto fail;
|
|
|
|
}
|
2016-07-31 14:27:11 +02:00
|
|
|
|
2018-01-31 17:03:29 +01:00
|
|
|
// Initialise the infrastructure for zmq_ctx_term thread.
|
|
|
|
slots[term_tid] = &term_mailbox;
|
2012-03-20 01:41:20 +01:00
|
|
|
|
2018-01-31 17:03:29 +01:00
|
|
|
// Create the reaper thread.
|
|
|
|
reaper = new (std::nothrow) reaper_t (this, reaper_tid);
|
|
|
|
if (!reaper) {
|
|
|
|
errno = ENOMEM;
|
|
|
|
goto fail_cleanup_slots;
|
|
|
|
}
|
|
|
|
if (!reaper->get_mailbox ()->valid ())
|
|
|
|
goto fail_cleanup_reaper;
|
|
|
|
slots[reaper_tid] = reaper->get_mailbox ();
|
|
|
|
reaper->start ();
|
|
|
|
|
|
|
|
// Create I/O thread objects and launch them.
|
|
|
|
for (int32_t i = (int32_t) slot_count - 1; i >= (int32_t) 2; i--) {
|
|
|
|
slots[i] = NULL;
|
|
|
|
}
|
2012-03-20 01:41:20 +01:00
|
|
|
|
2018-01-31 17:03:29 +01:00
|
|
|
for (int i = 2; i != ios + 2; i++) {
|
|
|
|
io_thread_t *io_thread = new (std::nothrow) io_thread_t (this, i);
|
|
|
|
if (!io_thread) {
|
|
|
|
errno = ENOMEM;
|
|
|
|
goto fail_cleanup_reaper;
|
|
|
|
}
|
|
|
|
if (!io_thread->get_mailbox ()->valid ()) {
|
|
|
|
delete io_thread;
|
|
|
|
goto fail_cleanup_reaper;
|
2012-03-20 01:41:20 +01:00
|
|
|
}
|
2018-01-31 17:03:29 +01:00
|
|
|
io_threads.push_back (io_thread);
|
2018-02-01 11:46:09 +01:00
|
|
|
slots[i] = io_thread->get_mailbox ();
|
2018-01-31 17:03:29 +01:00
|
|
|
io_thread->start ();
|
|
|
|
}
|
|
|
|
|
|
|
|
// In the unused part of the slot array, create a list of empty slots.
|
|
|
|
for (int32_t i = (int32_t) slot_count - 1; i >= (int32_t) ios + 2; i--) {
|
|
|
|
empty_slots.push_back (i);
|
|
|
|
}
|
|
|
|
|
|
|
|
starting = false;
|
|
|
|
return true;
|
|
|
|
|
|
|
|
fail_cleanup_reaper:
|
|
|
|
reaper->stop ();
|
|
|
|
delete reaper;
|
|
|
|
reaper = NULL;
|
|
|
|
|
|
|
|
fail_cleanup_slots:
|
|
|
|
free (slots);
|
|
|
|
slots = NULL;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
zmq::socket_base_t *zmq::ctx_t::create_socket (int type_)
|
|
|
|
{
|
|
|
|
scoped_lock_t locker (slot_sync);
|
|
|
|
|
|
|
|
if (unlikely (starting)) {
|
|
|
|
if (!start ())
|
|
|
|
return NULL;
|
2012-03-20 01:41:20 +01:00
|
|
|
}
|
|
|
|
|
2013-01-31 21:52:30 +01:00
|
|
|
// Once zmq_ctx_term() was called, we can't create new sockets.
|
2011-02-09 15:32:15 +01:00
|
|
|
if (terminating) {
|
|
|
|
errno = ETERM;
|
|
|
|
return NULL;
|
|
|
|
}
|
2010-05-05 13:03:26 +02:00
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
// If max_sockets limit was reached, return error.
|
|
|
|
if (empty_slots.empty ()) {
|
|
|
|
errno = EMFILE;
|
|
|
|
return NULL;
|
2009-07-29 12:07:54 +02:00
|
|
|
}
|
2010-02-08 18:37:48 +01:00
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
// Choose a slot for the socket.
|
|
|
|
uint32_t slot = empty_slots.back ();
|
|
|
|
empty_slots.pop_back ();
|
2009-08-08 16:01:58 +02:00
|
|
|
|
2012-03-20 01:41:20 +01:00
|
|
|
// Generate new unique socket ID.
|
|
|
|
int sid = ((int) max_socket_id.add (1)) + 1;
|
|
|
|
|
2010-11-05 17:39:51 +01:00
|
|
|
// Create the socket and register its mailbox.
|
2012-03-20 01:41:20 +01:00
|
|
|
socket_base_t *s = socket_base_t::create (type_, this, slot, sid);
|
2010-08-06 17:49:37 +02:00
|
|
|
if (!s) {
|
|
|
|
empty_slots.push_back (slot);
|
2010-02-02 08:46:35 +01:00
|
|
|
return NULL;
|
2010-08-06 17:49:37 +02:00
|
|
|
}
|
|
|
|
sockets.push_back (s);
|
2018-02-01 11:46:09 +01:00
|
|
|
slots[slot] = s->get_mailbox ();
|
2010-02-02 08:46:35 +01:00
|
|
|
|
|
|
|
return s;
|
2009-07-29 12:07:54 +02:00
|
|
|
}
|
|
|
|
|
2011-02-09 15:32:15 +01:00
|
|
|
void zmq::ctx_t::destroy_socket (class socket_base_t *socket_)
|
2009-09-04 16:02:41 +02:00
|
|
|
{
|
2018-02-01 11:46:09 +01:00
|
|
|
scoped_lock_t locker (slot_sync);
|
2010-08-06 17:49:37 +02:00
|
|
|
|
2012-03-22 07:06:17 +01:00
|
|
|
// Free the associated thread slot.
|
2011-02-09 15:32:15 +01:00
|
|
|
uint32_t tid = socket_->get_tid ();
|
|
|
|
empty_slots.push_back (tid);
|
2018-02-01 11:46:09 +01:00
|
|
|
slots[tid] = NULL;
|
2010-08-06 17:49:37 +02:00
|
|
|
|
2011-02-09 15:32:15 +01:00
|
|
|
// Remove the socket from the list of sockets.
|
|
|
|
sockets.erase (socket_);
|
|
|
|
|
2013-01-31 21:52:30 +01:00
|
|
|
// If zmq_ctx_term() was already called and there are no more socket
|
2011-02-09 15:32:15 +01:00
|
|
|
// we can ask reaper thread to terminate.
|
|
|
|
if (terminating && sockets.empty ())
|
|
|
|
reaper->stop ();
|
2010-02-08 18:37:48 +01:00
|
|
|
}
|
|
|
|
|
2011-02-09 15:32:15 +01:00
|
|
|
zmq::object_t *zmq::ctx_t::get_reaper ()
|
|
|
|
{
|
|
|
|
return reaper;
|
|
|
|
}
|
|
|
|
|
2018-02-08 17:47:13 +01:00
|
|
|
zmq::thread_ctx_t::thread_ctx_t () :
|
|
|
|
thread_priority (ZMQ_THREAD_PRIORITY_DFLT),
|
|
|
|
thread_sched_policy (ZMQ_THREAD_SCHED_POLICY_DFLT)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
void zmq::thread_ctx_t::start_thread (thread_t &thread_,
|
|
|
|
thread_fn *tfn_,
|
|
|
|
void *arg_) const
|
2014-07-02 12:07:35 +02:00
|
|
|
{
|
2017-10-17 13:06:50 +02:00
|
|
|
static unsigned int nthreads_started = 0;
|
|
|
|
|
2018-02-01 11:46:09 +01:00
|
|
|
thread_.setSchedulingParameters (thread_priority, thread_sched_policy,
|
|
|
|
thread_affinity_cpus);
|
|
|
|
thread_.start (tfn_, arg_);
|
2017-05-17 13:54:25 +02:00
|
|
|
#ifndef ZMQ_HAVE_ANDROID
|
2017-10-17 13:06:50 +02:00
|
|
|
std::ostringstream s;
|
2018-02-01 11:46:09 +01:00
|
|
|
if (!thread_name_prefix.empty ())
|
2017-10-25 09:55:47 +02:00
|
|
|
s << thread_name_prefix << "/";
|
|
|
|
s << "ZMQbg/" << nthreads_started;
|
2018-02-01 11:46:09 +01:00
|
|
|
thread_.setThreadName (s.str ().c_str ());
|
2017-05-17 13:54:25 +02:00
|
|
|
#endif
|
2017-10-17 13:06:50 +02:00
|
|
|
nthreads_started++;
|
2014-07-02 12:07:35 +02:00
|
|
|
}
|
|
|
|
|
2018-02-08 17:47:13 +01:00
|
|
|
int zmq::thread_ctx_t::set (int option_, int optval_)
|
|
|
|
{
|
|
|
|
int rc = 0;
|
|
|
|
if (option_ == ZMQ_THREAD_SCHED_POLICY && optval_ >= 0) {
|
|
|
|
scoped_lock_t locker (opt_sync);
|
|
|
|
thread_sched_policy = optval_;
|
|
|
|
} else if (option_ == ZMQ_THREAD_AFFINITY_CPU_ADD && optval_ >= 0) {
|
|
|
|
scoped_lock_t locker (opt_sync);
|
|
|
|
thread_affinity_cpus.insert (optval_);
|
|
|
|
} else if (option_ == ZMQ_THREAD_AFFINITY_CPU_REMOVE && optval_ >= 0) {
|
|
|
|
scoped_lock_t locker (opt_sync);
|
|
|
|
std::set<int>::iterator it = thread_affinity_cpus.find (optval_);
|
|
|
|
if (it != thread_affinity_cpus.end ()) {
|
|
|
|
thread_affinity_cpus.erase (it);
|
|
|
|
} else {
|
|
|
|
errno = EINVAL;
|
|
|
|
rc = -1;
|
|
|
|
}
|
|
|
|
} else if (option_ == ZMQ_THREAD_NAME_PREFIX && optval_ >= 0) {
|
|
|
|
std::ostringstream s;
|
|
|
|
s << optval_;
|
|
|
|
scoped_lock_t locker (opt_sync);
|
|
|
|
thread_name_prefix = s.str ();
|
|
|
|
} else if (option_ == ZMQ_THREAD_PRIORITY && optval_ >= 0) {
|
|
|
|
scoped_lock_t locker (opt_sync);
|
|
|
|
thread_priority = optval_;
|
|
|
|
} else {
|
|
|
|
errno = EINVAL;
|
|
|
|
rc = -1;
|
|
|
|
}
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2010-11-05 16:38:52 +01:00
|
|
|
void zmq::ctx_t::send_command (uint32_t tid_, const command_t &command_)
|
2010-02-07 09:14:43 +01:00
|
|
|
{
|
2018-02-01 11:46:09 +01:00
|
|
|
slots[tid_]->send (command_);
|
2010-02-07 09:14:43 +01:00
|
|
|
}
|
|
|
|
|
2010-05-05 14:24:54 +02:00
|
|
|
zmq::io_thread_t *zmq::ctx_t::choose_io_thread (uint64_t affinity_)
|
2009-07-29 12:07:54 +02:00
|
|
|
{
|
2010-09-09 08:25:00 +02:00
|
|
|
if (io_threads.empty ())
|
|
|
|
return NULL;
|
|
|
|
|
2009-07-29 12:07:54 +02:00
|
|
|
// Find the I/O thread with minimum load.
|
2010-02-02 13:29:31 +01:00
|
|
|
int min_load = -1;
|
2012-03-22 06:55:12 +01:00
|
|
|
io_thread_t *selected_io_thread = NULL;
|
2010-02-02 13:29:31 +01:00
|
|
|
for (io_threads_t::size_type i = 0; i != io_threads.size (); i++) {
|
|
|
|
if (!affinity_ || (affinity_ & (uint64_t (1) << i))) {
|
2018-02-01 11:46:09 +01:00
|
|
|
int load = io_threads[i]->get_load ();
|
2012-03-22 06:55:12 +01:00
|
|
|
if (selected_io_thread == NULL || load < min_load) {
|
2009-07-29 12:07:54 +02:00
|
|
|
min_load = load;
|
2018-02-01 11:46:09 +01:00
|
|
|
selected_io_thread = io_threads[i];
|
2009-07-29 12:07:54 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-03-22 06:55:12 +01:00
|
|
|
return selected_io_thread;
|
2009-07-29 12:07:54 +02:00
|
|
|
}
|
2009-08-28 16:51:46 +02:00
|
|
|
|
2014-05-23 10:53:58 +02:00
|
|
|
int zmq::ctx_t::register_endpoint (const char *addr_,
|
2018-02-01 11:46:09 +01:00
|
|
|
const endpoint_t &endpoint_)
|
2009-11-21 20:59:55 +01:00
|
|
|
{
|
2018-02-01 11:46:09 +01:00
|
|
|
scoped_lock_t locker (endpoints_sync);
|
2012-03-22 06:51:41 +01:00
|
|
|
|
2018-02-01 11:46:09 +01:00
|
|
|
const bool inserted =
|
|
|
|
endpoints.ZMQ_MAP_INSERT_OR_EMPLACE (addr_, endpoint_).second;
|
2009-11-21 20:59:55 +01:00
|
|
|
if (!inserted) {
|
|
|
|
errno = EADDRINUSE;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-02-01 11:46:09 +01:00
|
|
|
int zmq::ctx_t::unregister_endpoint (const std::string &addr_,
|
|
|
|
socket_base_t *socket_)
|
2014-07-09 09:57:28 +02:00
|
|
|
{
|
2018-02-01 11:46:09 +01:00
|
|
|
scoped_lock_t locker (endpoints_sync);
|
2014-07-09 09:57:28 +02:00
|
|
|
|
|
|
|
const endpoints_t::iterator it = endpoints.find (addr_);
|
|
|
|
if (it == endpoints.end () || it->second.socket != socket_) {
|
|
|
|
errno = ENOENT;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove endpoint.
|
|
|
|
endpoints.erase (it);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-05-05 14:24:54 +02:00
|
|
|
void zmq::ctx_t::unregister_endpoints (socket_base_t *socket_)
|
2009-11-21 20:59:55 +01:00
|
|
|
{
|
2018-02-01 11:46:09 +01:00
|
|
|
scoped_lock_t locker (endpoints_sync);
|
2009-11-21 20:59:55 +01:00
|
|
|
|
|
|
|
endpoints_t::iterator it = endpoints.begin ();
|
|
|
|
while (it != endpoints.end ()) {
|
2011-01-10 13:53:30 +01:00
|
|
|
if (it->second.socket == socket_) {
|
2009-11-21 20:59:55 +01:00
|
|
|
endpoints_t::iterator to_erase = it;
|
2011-01-18 15:57:45 +01:00
|
|
|
++it;
|
2009-11-21 20:59:55 +01:00
|
|
|
endpoints.erase (to_erase);
|
|
|
|
continue;
|
|
|
|
}
|
2011-01-18 15:57:45 +01:00
|
|
|
++it;
|
2009-11-21 20:59:55 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-01-10 13:53:30 +01:00
|
|
|
zmq::endpoint_t zmq::ctx_t::find_endpoint (const char *addr_)
|
2009-11-21 20:59:55 +01:00
|
|
|
{
|
2018-02-01 11:46:09 +01:00
|
|
|
scoped_lock_t locker (endpoints_sync);
|
2009-11-21 20:59:55 +01:00
|
|
|
|
2016-07-31 18:21:11 +02:00
|
|
|
endpoints_t::iterator it = endpoints.find (addr_);
|
|
|
|
if (it == endpoints.end ()) {
|
|
|
|
errno = ECONNREFUSED;
|
2018-02-01 11:46:09 +01:00
|
|
|
endpoint_t empty = {NULL, options_t ()};
|
2016-07-31 18:21:11 +02:00
|
|
|
return empty;
|
2018-02-01 11:46:09 +01:00
|
|
|
}
|
|
|
|
endpoint_t endpoint = it->second;
|
2009-11-21 20:59:55 +01:00
|
|
|
|
2018-02-01 11:46:09 +01:00
|
|
|
// Increment the command sequence number of the peer so that it won't
|
|
|
|
// get deallocated until "bind" command is issued by the caller.
|
|
|
|
// The subsequent 'bind' has to be called with inc_seqnum parameter
|
|
|
|
// set to false, so that the seqnum isn't incremented twice.
|
|
|
|
endpoint.socket->inc_seqnum ();
|
2009-11-21 20:59:55 +01:00
|
|
|
|
2018-02-01 11:46:09 +01:00
|
|
|
return endpoint;
|
2009-11-21 20:59:55 +01:00
|
|
|
}
|
2012-03-20 01:41:20 +01:00
|
|
|
|
2014-05-21 13:05:56 +02:00
|
|
|
void zmq::ctx_t::pend_connection (const std::string &addr_,
|
2018-02-01 11:46:09 +01:00
|
|
|
const endpoint_t &endpoint_,
|
|
|
|
pipe_t **pipes_)
|
2013-09-12 15:44:44 +02:00
|
|
|
{
|
2018-02-01 11:46:09 +01:00
|
|
|
scoped_lock_t locker (endpoints_sync);
|
2014-05-21 13:05:56 +02:00
|
|
|
|
2018-02-01 11:46:09 +01:00
|
|
|
const pending_connection_t pending_connection = {endpoint_, pipes_[0],
|
|
|
|
pipes_[1]};
|
2013-09-12 15:44:44 +02:00
|
|
|
|
2013-09-12 19:09:37 +02:00
|
|
|
endpoints_t::iterator it = endpoints.find (addr_);
|
2014-01-09 13:35:33 +01:00
|
|
|
if (it == endpoints.end ()) {
|
2016-09-17 08:44:00 +02:00
|
|
|
// Still no bind.
|
2014-05-21 13:05:56 +02:00
|
|
|
endpoint_.socket->inc_seqnum ();
|
2018-02-01 11:46:09 +01:00
|
|
|
pending_connections.ZMQ_MAP_INSERT_OR_EMPLACE (addr_,
|
|
|
|
pending_connection);
|
2016-07-31 14:27:11 +02:00
|
|
|
} else {
|
2016-09-17 08:44:00 +02:00
|
|
|
// Bind has happened in the mean time, connect directly
|
2018-02-01 11:46:09 +01:00
|
|
|
connect_inproc_sockets (it->second.socket, it->second.options,
|
|
|
|
pending_connection, connect_side);
|
2016-09-17 08:44:00 +02:00
|
|
|
}
|
2013-09-12 15:44:44 +02:00
|
|
|
}
|
|
|
|
|
2018-02-01 11:46:09 +01:00
|
|
|
void zmq::ctx_t::connect_pending (const char *addr_,
|
|
|
|
zmq::socket_base_t *bind_socket_)
|
2013-09-12 15:44:44 +02:00
|
|
|
{
|
2018-02-01 11:46:09 +01:00
|
|
|
scoped_lock_t locker (endpoints_sync);
|
2013-09-12 15:44:44 +02:00
|
|
|
|
2018-02-01 11:46:09 +01:00
|
|
|
std::pair<pending_connections_t::iterator, pending_connections_t::iterator>
|
|
|
|
pending = pending_connections.equal_range (addr_);
|
|
|
|
for (pending_connections_t::iterator p = pending.first; p != pending.second;
|
|
|
|
++p)
|
|
|
|
connect_inproc_sockets (bind_socket_, endpoints[addr_].options,
|
|
|
|
p->second, bind_side);
|
2013-09-14 18:27:18 +02:00
|
|
|
|
2018-02-01 11:46:09 +01:00
|
|
|
pending_connections.erase (pending.first, pending.second);
|
2013-09-14 18:27:18 +02:00
|
|
|
}
|
|
|
|
|
2018-02-01 11:46:09 +01:00
|
|
|
void zmq::ctx_t::connect_inproc_sockets (
|
|
|
|
zmq::socket_base_t *bind_socket_,
|
|
|
|
options_t &bind_options,
|
|
|
|
const pending_connection_t &pending_connection_,
|
|
|
|
side side_)
|
2013-09-14 18:27:18 +02:00
|
|
|
{
|
2018-02-01 11:46:09 +01:00
|
|
|
bind_socket_->inc_seqnum ();
|
2014-07-09 13:49:40 +02:00
|
|
|
pending_connection_.bind_pipe->set_tid (bind_socket_->get_tid ());
|
2013-09-14 18:27:18 +02:00
|
|
|
|
2017-09-06 17:45:56 +02:00
|
|
|
if (!bind_options.recv_routing_id) {
|
2014-05-21 09:03:05 +02:00
|
|
|
msg_t msg;
|
|
|
|
const bool ok = pending_connection_.bind_pipe->read (&msg);
|
|
|
|
zmq_assert (ok);
|
|
|
|
const int rc = msg.close ();
|
|
|
|
errno_assert (rc == 0);
|
|
|
|
}
|
|
|
|
|
2018-02-01 11:46:09 +01:00
|
|
|
bool conflate =
|
|
|
|
pending_connection_.endpoint.options.conflate
|
|
|
|
&& (pending_connection_.endpoint.options.type == ZMQ_DEALER
|
|
|
|
|| pending_connection_.endpoint.options.type == ZMQ_PULL
|
|
|
|
|| pending_connection_.endpoint.options.type == ZMQ_PUSH
|
|
|
|
|| pending_connection_.endpoint.options.type == ZMQ_PUB
|
|
|
|
|| pending_connection_.endpoint.options.type == ZMQ_SUB);
|
2013-09-14 18:27:18 +02:00
|
|
|
|
2015-06-05 18:14:55 +02:00
|
|
|
if (!conflate) {
|
2018-02-01 11:46:09 +01:00
|
|
|
pending_connection_.connect_pipe->set_hwms_boost (bind_options.sndhwm,
|
|
|
|
bind_options.rcvhwm);
|
|
|
|
pending_connection_.bind_pipe->set_hwms_boost (
|
|
|
|
pending_connection_.endpoint.options.sndhwm,
|
|
|
|
pending_connection_.endpoint.options.rcvhwm);
|
|
|
|
|
|
|
|
pending_connection_.connect_pipe->set_hwms (
|
|
|
|
pending_connection_.endpoint.options.rcvhwm,
|
|
|
|
pending_connection_.endpoint.options.sndhwm);
|
|
|
|
pending_connection_.bind_pipe->set_hwms (bind_options.rcvhwm,
|
|
|
|
bind_options.sndhwm);
|
|
|
|
} else {
|
|
|
|
pending_connection_.connect_pipe->set_hwms (-1, -1);
|
|
|
|
pending_connection_.bind_pipe->set_hwms (-1, -1);
|
2015-06-05 18:14:55 +02:00
|
|
|
}
|
2014-06-04 13:13:15 +02:00
|
|
|
|
|
|
|
if (side_ == bind_side) {
|
|
|
|
command_t cmd;
|
|
|
|
cmd.type = command_t::bind;
|
|
|
|
cmd.args.bind.pipe = pending_connection_.bind_pipe;
|
|
|
|
bind_socket_->process_command (cmd);
|
2018-02-01 11:46:09 +01:00
|
|
|
bind_socket_->send_inproc_connected (
|
|
|
|
pending_connection_.endpoint.socket);
|
|
|
|
} else
|
|
|
|
pending_connection_.connect_pipe->send_bind (
|
|
|
|
bind_socket_, pending_connection_.bind_pipe, false);
|
2013-09-14 18:27:18 +02:00
|
|
|
|
2016-09-24 19:07:23 +02:00
|
|
|
// When a ctx is terminated all pending inproc connection will be
|
|
|
|
// connected, but the socket will already be closed and the pipe will be
|
|
|
|
// in waiting_for_delimiter state, which means no more writes can be done
|
2017-09-06 17:45:56 +02:00
|
|
|
// and the routing id write fails and causes an assert. Check if the socket
|
2016-09-24 19:07:23 +02:00
|
|
|
// is open before sending.
|
2018-02-01 11:46:09 +01:00
|
|
|
if (pending_connection_.endpoint.options.recv_routing_id
|
|
|
|
&& pending_connection_.endpoint.socket->check_tag ()) {
|
2017-09-06 17:45:56 +02:00
|
|
|
msg_t routing_id;
|
|
|
|
const int rc = routing_id.init_size (bind_options.routing_id_size);
|
2013-09-14 18:27:18 +02:00
|
|
|
errno_assert (rc == 0);
|
2018-02-01 11:46:09 +01:00
|
|
|
memcpy (routing_id.data (), bind_options.routing_id,
|
|
|
|
bind_options.routing_id_size);
|
2017-09-06 17:45:56 +02:00
|
|
|
routing_id.set_flags (msg_t::routing_id);
|
|
|
|
const bool written = pending_connection_.bind_pipe->write (&routing_id);
|
2013-09-14 18:27:18 +02:00
|
|
|
zmq_assert (written);
|
|
|
|
pending_connection_.bind_pipe->flush ();
|
2013-09-12 15:44:44 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-07 13:19:45 +01:00
|
|
|
#ifdef ZMQ_HAVE_VMCI
|
|
|
|
|
|
|
|
int zmq::ctx_t::get_vmci_socket_family ()
|
|
|
|
{
|
2018-02-01 11:46:09 +01:00
|
|
|
zmq::scoped_lock_t locker (vmci_sync);
|
2015-12-07 13:19:45 +01:00
|
|
|
|
2018-02-01 11:46:09 +01:00
|
|
|
if (vmci_fd == -1) {
|
2015-12-07 13:19:45 +01:00
|
|
|
vmci_family = VMCISock_GetAFValueFd (&vmci_fd);
|
|
|
|
|
|
|
|
if (vmci_fd != -1) {
|
|
|
|
#ifdef FD_CLOEXEC
|
|
|
|
int rc = fcntl (vmci_fd, F_SETFD, FD_CLOEXEC);
|
|
|
|
errno_assert (rc != -1);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return vmci_family;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2012-03-20 01:41:20 +01:00
|
|
|
// The last used socket ID, or 0 if no socket was used so far. Note that this
|
|
|
|
// is a global variable. Thus, even sockets created in different contexts have
|
|
|
|
// unique IDs.
|
|
|
|
zmq::atomic_counter_t zmq::ctx_t::max_socket_id;
|