2009-07-29 12:07:54 +02:00
|
|
|
/*
|
2010-01-05 08:29:35 +01:00
|
|
|
Copyright (c) 2007-2010 iMatix Corporation
|
2009-07-29 12:07:54 +02:00
|
|
|
|
|
|
|
This file is part of 0MQ.
|
|
|
|
|
|
|
|
0MQ is free software; you can redistribute it and/or modify it under
|
2010-10-30 15:08:28 +02:00
|
|
|
the terms of the GNU Lesser General Public License as published by
|
2009-07-29 12:07:54 +02:00
|
|
|
the Free Software Foundation; either version 3 of the License, or
|
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
0MQ is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2010-10-30 15:08:28 +02:00
|
|
|
GNU Lesser General Public License for more details.
|
2009-07-29 12:07:54 +02:00
|
|
|
|
2010-10-30 15:08:28 +02:00
|
|
|
You should have received a copy of the GNU Lesser General Public License
|
2009-07-29 12:07:54 +02:00
|
|
|
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
2009-12-15 23:49:55 +01:00
|
|
|
#include <new>
|
2010-05-04 10:22:16 +02:00
|
|
|
#include <string.h>
|
2009-12-15 23:49:55 +01:00
|
|
|
|
2010-05-05 14:24:54 +02:00
|
|
|
#include "ctx.hpp"
|
2009-11-21 20:59:55 +01:00
|
|
|
#include "socket_base.hpp"
|
2009-07-29 12:07:54 +02:00
|
|
|
#include "io_thread.hpp"
|
|
|
|
#include "platform.hpp"
|
|
|
|
#include "err.hpp"
|
|
|
|
#include "pipe.hpp"
|
|
|
|
|
2009-08-03 11:30:13 +02:00
|
|
|
#if defined ZMQ_HAVE_WINDOWS
|
2009-07-29 12:07:54 +02:00
|
|
|
#include "windows.h"
|
2010-08-06 17:49:37 +02:00
|
|
|
#else
|
|
|
|
#include "unistd.h"
|
2009-07-29 12:07:54 +02:00
|
|
|
#endif
|
|
|
|
|
2010-05-05 14:24:54 +02:00
|
|
|
zmq::ctx_t::ctx_t (uint32_t io_threads_) :
|
2010-08-06 17:49:37 +02:00
|
|
|
no_sockets_notify (false)
|
2009-07-29 12:07:54 +02:00
|
|
|
{
|
2010-09-01 13:31:45 +02:00
|
|
|
int rc;
|
|
|
|
|
2009-08-03 11:30:13 +02:00
|
|
|
#ifdef ZMQ_HAVE_WINDOWS
|
2009-07-29 12:07:54 +02:00
|
|
|
// Intialise Windows sockets. Note that WSAStartup can be called multiple
|
|
|
|
// times given that WSACleanup will be called for each WSAStartup.
|
|
|
|
WORD version_requested = MAKEWORD (2, 2);
|
|
|
|
WSADATA wsa_data;
|
2010-09-01 13:31:45 +02:00
|
|
|
rc = WSAStartup (version_requested, &wsa_data);
|
2009-08-03 11:30:13 +02:00
|
|
|
zmq_assert (rc == 0);
|
|
|
|
zmq_assert (LOBYTE (wsa_data.wVersion) == 2 &&
|
2009-07-29 12:07:54 +02:00
|
|
|
HIBYTE (wsa_data.wVersion) == 2);
|
|
|
|
#endif
|
|
|
|
|
2011-02-08 16:19:37 +01:00
|
|
|
// Initialise the array of mailboxes. +1 accounts for internal log socket.
|
|
|
|
slot_count = max_sockets + io_threads_ + 1;
|
2010-11-05 17:39:51 +01:00
|
|
|
slots = (mailbox_t**) malloc (sizeof (mailbox_t*) * slot_count);
|
2010-08-06 17:49:37 +02:00
|
|
|
zmq_assert (slots);
|
2009-07-29 12:07:54 +02:00
|
|
|
|
2010-05-05 13:03:26 +02:00
|
|
|
// Create I/O thread objects and launch them.
|
2010-04-29 20:34:48 +02:00
|
|
|
for (uint32_t i = 0; i != io_threads_; i++) {
|
2010-05-04 10:22:16 +02:00
|
|
|
io_thread_t *io_thread = new (std::nothrow) io_thread_t (this, i);
|
2009-08-03 11:30:13 +02:00
|
|
|
zmq_assert (io_thread);
|
2009-07-29 12:07:54 +02:00
|
|
|
io_threads.push_back (io_thread);
|
2010-11-05 17:39:51 +01:00
|
|
|
slots [i] = io_thread->get_mailbox ();
|
2010-05-05 13:03:26 +02:00
|
|
|
io_thread->start ();
|
2009-07-29 12:07:54 +02:00
|
|
|
}
|
2009-09-04 16:02:41 +02:00
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
// In the unused part of the slot array, create a list of empty slots.
|
2010-09-09 08:25:00 +02:00
|
|
|
for (int32_t i = (int32_t) slot_count - 1;
|
|
|
|
i >= (int32_t) io_threads_; i--) {
|
2010-08-06 17:49:37 +02:00
|
|
|
empty_slots.push_back (i);
|
|
|
|
slots [i] = NULL;
|
|
|
|
}
|
2010-09-01 07:57:38 +02:00
|
|
|
|
|
|
|
// Create the logging infrastructure.
|
|
|
|
log_socket = create_socket (ZMQ_PUB);
|
|
|
|
zmq_assert (log_socket);
|
2010-09-01 15:29:19 +02:00
|
|
|
rc = log_socket->bind ("sys://log");
|
2010-09-01 07:57:38 +02:00
|
|
|
zmq_assert (rc == 0);
|
2009-09-04 16:02:41 +02:00
|
|
|
}
|
|
|
|
|
2010-05-05 14:24:54 +02:00
|
|
|
zmq::ctx_t::~ctx_t ()
|
2009-07-29 12:07:54 +02:00
|
|
|
{
|
2010-08-06 17:49:37 +02:00
|
|
|
// Check that there are no remaining open or zombie sockets.
|
|
|
|
zmq_assert (sockets.empty ());
|
|
|
|
zmq_assert (zombies.empty ());
|
|
|
|
|
2009-08-06 12:51:32 +02:00
|
|
|
// Ask I/O threads to terminate. If stop signal wasn't sent to I/O
|
|
|
|
// thread subsequent invocation of destructor would hang-up.
|
2009-07-29 12:07:54 +02:00
|
|
|
for (io_threads_t::size_type i = 0; i != io_threads.size (); i++)
|
|
|
|
io_threads [i]->stop ();
|
|
|
|
|
|
|
|
// Wait till I/O threads actually terminate.
|
|
|
|
for (io_threads_t::size_type i = 0; i != io_threads.size (); i++)
|
2009-08-06 12:51:32 +02:00
|
|
|
delete io_threads [i];
|
2009-07-29 12:07:54 +02:00
|
|
|
|
2010-11-05 17:39:51 +01:00
|
|
|
// Deallocate the array of mailboxes. No special work is
|
|
|
|
// needed as mailboxes themselves were deallocated with their
|
2010-08-06 17:49:37 +02:00
|
|
|
// corresponding io_thread/socket objects.
|
|
|
|
free (slots);
|
2010-05-04 10:22:16 +02:00
|
|
|
|
2009-08-03 11:30:13 +02:00
|
|
|
#ifdef ZMQ_HAVE_WINDOWS
|
2009-07-29 12:07:54 +02:00
|
|
|
// On Windows, uninitialise socket layer.
|
|
|
|
int rc = WSACleanup ();
|
|
|
|
wsa_assert (rc != SOCKET_ERROR);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2010-08-12 08:16:18 +02:00
|
|
|
int zmq::ctx_t::terminate ()
|
2009-07-29 12:07:54 +02:00
|
|
|
{
|
2010-09-01 07:57:38 +02:00
|
|
|
// Close the logging infrastructure.
|
|
|
|
log_sync.lock ();
|
|
|
|
int rc = log_socket->close ();
|
|
|
|
zmq_assert (rc == 0);
|
|
|
|
log_socket = NULL;
|
|
|
|
log_sync.unlock ();
|
|
|
|
|
2010-09-19 22:17:37 +02:00
|
|
|
// First send stop command to sockets so that any
|
|
|
|
// blocking calls are interrupted.
|
2010-08-06 17:49:37 +02:00
|
|
|
slot_sync.lock ();
|
2010-09-19 22:17:37 +02:00
|
|
|
for (sockets_t::size_type i = 0; i != sockets.size (); i++)
|
|
|
|
sockets [i]->stop ();
|
2010-08-06 17:49:37 +02:00
|
|
|
if (!sockets.empty ())
|
|
|
|
no_sockets_notify = true;
|
|
|
|
slot_sync.unlock ();
|
2010-09-19 22:17:37 +02:00
|
|
|
|
|
|
|
// Find out whether there are any open sockets to care about.
|
|
|
|
// If there are open sockets, sleep till they are closed. Note that we can
|
|
|
|
// use no_sockets_notify safely out of the critical section as once set
|
|
|
|
// its value is never changed again.
|
2010-08-06 17:49:37 +02:00
|
|
|
if (no_sockets_notify)
|
|
|
|
no_sockets_sync.wait ();
|
|
|
|
|
2010-09-19 22:17:37 +02:00
|
|
|
// Note that the lock won't block anyone here. There's noone else having
|
|
|
|
// open sockets anyway. The only purpose of the lock is to double-check all
|
|
|
|
// the CPU caches have been synchronised.
|
|
|
|
slot_sync.lock ();
|
|
|
|
|
2010-08-12 08:16:18 +02:00
|
|
|
// At this point there should be no active sockets. What we have is a set
|
|
|
|
// of zombies waiting to be dezombified.
|
2010-08-06 17:49:37 +02:00
|
|
|
zmq_assert (sockets.empty ());
|
|
|
|
|
2010-09-19 22:17:37 +02:00
|
|
|
// Get rid of remaining zombie sockets.
|
2010-08-06 17:49:37 +02:00
|
|
|
while (!zombies.empty ()) {
|
|
|
|
dezombify ();
|
|
|
|
|
|
|
|
// Sleep for 1ms not to end up busy-looping in the case the I/O threads
|
|
|
|
// are still busy sending data. We can possibly add a grand poll here
|
|
|
|
// (polling for fds associated with all the zombie sockets), but it's
|
|
|
|
// probably not worth of implementing it.
|
|
|
|
#if defined ZMQ_HAVE_WINDOWS
|
|
|
|
Sleep (1);
|
|
|
|
#else
|
|
|
|
usleep (1000);
|
|
|
|
#endif
|
2010-02-08 18:37:48 +01:00
|
|
|
}
|
2010-08-12 08:16:18 +02:00
|
|
|
slot_sync.unlock ();
|
2010-02-08 18:37:48 +01:00
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
// Deallocate the resources.
|
|
|
|
delete this;
|
2010-05-05 13:03:26 +02:00
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
return 0;
|
|
|
|
}
|
2010-05-05 13:03:26 +02:00
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
zmq::socket_base_t *zmq::ctx_t::create_socket (int type_)
|
|
|
|
{
|
|
|
|
slot_sync.lock ();
|
2010-05-05 13:03:26 +02:00
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
// Free the slots, if possible.
|
|
|
|
dezombify ();
|
2010-05-05 13:03:26 +02:00
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
// If max_sockets limit was reached, return error.
|
|
|
|
if (empty_slots.empty ()) {
|
|
|
|
slot_sync.unlock ();
|
|
|
|
errno = EMFILE;
|
|
|
|
return NULL;
|
2009-07-29 12:07:54 +02:00
|
|
|
}
|
2010-02-08 18:37:48 +01:00
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
// Choose a slot for the socket.
|
|
|
|
uint32_t slot = empty_slots.back ();
|
|
|
|
empty_slots.pop_back ();
|
2009-08-08 16:01:58 +02:00
|
|
|
|
2010-11-05 17:39:51 +01:00
|
|
|
// Create the socket and register its mailbox.
|
2010-08-06 17:49:37 +02:00
|
|
|
socket_base_t *s = socket_base_t::create (type_, this, slot);
|
|
|
|
if (!s) {
|
|
|
|
empty_slots.push_back (slot);
|
|
|
|
slot_sync.unlock ();
|
2010-02-02 08:46:35 +01:00
|
|
|
return NULL;
|
2010-08-06 17:49:37 +02:00
|
|
|
}
|
|
|
|
sockets.push_back (s);
|
2010-11-05 17:39:51 +01:00
|
|
|
slots [slot] = s->get_mailbox ();
|
2010-02-02 08:46:35 +01:00
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
slot_sync.unlock ();
|
2009-09-04 16:02:41 +02:00
|
|
|
|
2010-02-02 08:46:35 +01:00
|
|
|
return s;
|
2009-07-29 12:07:54 +02:00
|
|
|
}
|
|
|
|
|
2010-08-11 14:09:56 +02:00
|
|
|
void zmq::ctx_t::zombify_socket (socket_base_t *socket_)
|
2009-09-04 16:02:41 +02:00
|
|
|
{
|
2010-08-06 17:49:37 +02:00
|
|
|
// Zombification of socket basically means that its ownership is tranferred
|
|
|
|
// from the application that created it to the context.
|
2009-09-04 16:02:41 +02:00
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
// Note that the lock provides the memory barrier needed to migrate
|
|
|
|
// zombie-to-be socket from it's native thread to shared data area
|
|
|
|
// synchronised by slot_sync.
|
|
|
|
slot_sync.lock ();
|
|
|
|
sockets.erase (socket_);
|
|
|
|
zombies.push_back (socket_);
|
|
|
|
|
|
|
|
// Try to get rid of at least some zombie sockets at this point.
|
|
|
|
dezombify ();
|
|
|
|
|
|
|
|
// If shutdown thread is interested in notification about no more
|
|
|
|
// open sockets, notify it now.
|
|
|
|
if (sockets.empty () && no_sockets_notify)
|
|
|
|
no_sockets_sync.post ();
|
|
|
|
|
|
|
|
slot_sync.unlock ();
|
2010-02-08 18:37:48 +01:00
|
|
|
}
|
|
|
|
|
2010-11-05 16:38:52 +01:00
|
|
|
void zmq::ctx_t::send_command (uint32_t tid_, const command_t &command_)
|
2010-02-07 09:14:43 +01:00
|
|
|
{
|
2010-11-05 16:38:52 +01:00
|
|
|
slots [tid_]->send (command_);
|
2010-02-07 09:14:43 +01:00
|
|
|
}
|
|
|
|
|
2010-05-05 14:24:54 +02:00
|
|
|
zmq::io_thread_t *zmq::ctx_t::choose_io_thread (uint64_t affinity_)
|
2009-07-29 12:07:54 +02:00
|
|
|
{
|
2010-09-09 08:25:00 +02:00
|
|
|
if (io_threads.empty ())
|
|
|
|
return NULL;
|
|
|
|
|
2009-07-29 12:07:54 +02:00
|
|
|
// Find the I/O thread with minimum load.
|
2010-02-02 13:29:31 +01:00
|
|
|
int min_load = -1;
|
2009-07-29 12:07:54 +02:00
|
|
|
io_threads_t::size_type result = 0;
|
2010-02-02 13:29:31 +01:00
|
|
|
for (io_threads_t::size_type i = 0; i != io_threads.size (); i++) {
|
|
|
|
if (!affinity_ || (affinity_ & (uint64_t (1) << i))) {
|
2009-07-29 12:07:54 +02:00
|
|
|
int load = io_threads [i]->get_load ();
|
2010-02-02 13:29:31 +01:00
|
|
|
if (min_load == -1 || load < min_load) {
|
2009-07-29 12:07:54 +02:00
|
|
|
min_load = load;
|
|
|
|
result = i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2010-02-02 13:29:31 +01:00
|
|
|
zmq_assert (min_load != -1);
|
2009-07-29 12:07:54 +02:00
|
|
|
return io_threads [result];
|
|
|
|
}
|
2009-08-28 16:51:46 +02:00
|
|
|
|
2011-01-10 13:53:30 +01:00
|
|
|
int zmq::ctx_t::register_endpoint (const char *addr_, endpoint_t &endpoint_)
|
2009-11-21 20:59:55 +01:00
|
|
|
{
|
|
|
|
endpoints_sync.lock ();
|
|
|
|
|
2010-10-14 12:13:52 +02:00
|
|
|
bool inserted = endpoints.insert (endpoints_t::value_type (
|
2011-01-10 13:53:30 +01:00
|
|
|
std::string (addr_), endpoint_)).second;
|
2009-11-21 20:59:55 +01:00
|
|
|
if (!inserted) {
|
|
|
|
errno = EADDRINUSE;
|
|
|
|
endpoints_sync.unlock ();
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
endpoints_sync.unlock ();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-05-05 14:24:54 +02:00
|
|
|
void zmq::ctx_t::unregister_endpoints (socket_base_t *socket_)
|
2009-11-21 20:59:55 +01:00
|
|
|
{
|
|
|
|
endpoints_sync.lock ();
|
|
|
|
|
|
|
|
endpoints_t::iterator it = endpoints.begin ();
|
|
|
|
while (it != endpoints.end ()) {
|
2011-01-10 13:53:30 +01:00
|
|
|
if (it->second.socket == socket_) {
|
2009-11-21 20:59:55 +01:00
|
|
|
endpoints_t::iterator to_erase = it;
|
2011-01-18 15:57:45 +01:00
|
|
|
++it;
|
2009-11-21 20:59:55 +01:00
|
|
|
endpoints.erase (to_erase);
|
|
|
|
continue;
|
|
|
|
}
|
2011-01-18 15:57:45 +01:00
|
|
|
++it;
|
2009-11-21 20:59:55 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
endpoints_sync.unlock ();
|
|
|
|
}
|
|
|
|
|
2011-01-10 13:53:30 +01:00
|
|
|
zmq::endpoint_t zmq::ctx_t::find_endpoint (const char *addr_)
|
2009-11-21 20:59:55 +01:00
|
|
|
{
|
|
|
|
endpoints_sync.lock ();
|
|
|
|
|
|
|
|
endpoints_t::iterator it = endpoints.find (addr_);
|
|
|
|
if (it == endpoints.end ()) {
|
|
|
|
endpoints_sync.unlock ();
|
|
|
|
errno = ECONNREFUSED;
|
2011-01-10 13:53:30 +01:00
|
|
|
endpoint_t empty = {NULL, options_t()};
|
|
|
|
return empty;
|
2009-11-21 20:59:55 +01:00
|
|
|
}
|
2011-01-10 13:53:30 +01:00
|
|
|
endpoint_t *endpoint = &it->second;
|
2009-11-21 20:59:55 +01:00
|
|
|
|
|
|
|
// Increment the command sequence number of the peer so that it won't
|
|
|
|
// get deallocated until "bind" command is issued by the caller.
|
2009-12-02 21:26:47 +01:00
|
|
|
// The subsequent 'bind' has to be called with inc_seqnum parameter
|
|
|
|
// set to false, so that the seqnum isn't incremented twice.
|
2011-01-10 13:53:30 +01:00
|
|
|
endpoint->socket->inc_seqnum ();
|
2009-11-21 20:59:55 +01:00
|
|
|
|
|
|
|
endpoints_sync.unlock ();
|
2011-01-10 13:53:30 +01:00
|
|
|
return *endpoint;
|
2009-11-21 20:59:55 +01:00
|
|
|
}
|
|
|
|
|
2010-09-01 07:57:38 +02:00
|
|
|
void zmq::ctx_t::log (zmq_msg_t *msg_)
|
|
|
|
{
|
|
|
|
// At this point we migrate the log socket to the current thread.
|
|
|
|
// We rely on mutex for executing the memory barrier.
|
|
|
|
log_sync.lock ();
|
|
|
|
if (log_socket)
|
|
|
|
log_socket->send (msg_, 0);
|
|
|
|
log_sync.unlock ();
|
|
|
|
}
|
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
void zmq::ctx_t::dezombify ()
|
|
|
|
{
|
2010-08-11 14:09:56 +02:00
|
|
|
// Try to dezombify each zombie in the list. Note that caller is
|
|
|
|
// responsible for calling this method in the slot_sync critical section.
|
2010-08-12 15:03:51 +02:00
|
|
|
for (zombies_t::iterator it = zombies.begin (); it != zombies.end ();) {
|
2010-11-05 16:38:52 +01:00
|
|
|
uint32_t tid = (*it)->get_tid ();
|
2010-08-12 15:03:51 +02:00
|
|
|
if ((*it)->dezombify ()) {
|
2010-09-27 11:18:21 +02:00
|
|
|
#if defined _MSC_VER
|
|
|
|
|
|
|
|
// HP implementation of STL requires doing it this way...
|
|
|
|
it = zombies.erase (it);
|
|
|
|
#else
|
2010-08-12 15:03:51 +02:00
|
|
|
zombies.erase (it);
|
2010-09-27 11:18:21 +02:00
|
|
|
#endif
|
2010-11-05 16:38:52 +01:00
|
|
|
empty_slots.push_back (tid);
|
|
|
|
slots [tid] = NULL;
|
2010-08-12 15:03:51 +02:00
|
|
|
}
|
|
|
|
else
|
2011-01-18 15:57:45 +01:00
|
|
|
++it;
|
2010-08-12 08:16:18 +02:00
|
|
|
}
|
2010-08-06 17:49:37 +02:00
|
|
|
}
|
|
|
|
|