2009-07-29 12:07:54 +02:00
|
|
|
/*
|
2011-10-31 16:20:30 +01:00
|
|
|
Copyright (c) 2009-2011 250bpm s.r.o.
|
2011-03-02 16:30:40 +01:00
|
|
|
Copyright (c) 2007-2011 iMatix Corporation
|
|
|
|
Copyright (c) 2007-2011 Other contributors as noted in the AUTHORS file
|
2009-07-29 12:07:54 +02:00
|
|
|
|
|
|
|
This file is part of 0MQ.
|
|
|
|
|
|
|
|
0MQ is free software; you can redistribute it and/or modify it under
|
2010-10-30 15:08:28 +02:00
|
|
|
the terms of the GNU Lesser General Public License as published by
|
2009-07-29 12:07:54 +02:00
|
|
|
the Free Software Foundation; either version 3 of the License, or
|
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
0MQ is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2010-10-30 15:08:28 +02:00
|
|
|
GNU Lesser General Public License for more details.
|
2009-07-29 12:07:54 +02:00
|
|
|
|
2010-10-30 15:08:28 +02:00
|
|
|
You should have received a copy of the GNU Lesser General Public License
|
2009-07-29 12:07:54 +02:00
|
|
|
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
2011-06-18 20:44:03 +02:00
|
|
|
#include "platform.hpp"
|
|
|
|
#if defined ZMQ_HAVE_WINDOWS
|
|
|
|
#include "windows.hpp"
|
|
|
|
#else
|
|
|
|
#include <unistd.h>
|
|
|
|
#endif
|
|
|
|
|
2009-12-15 23:49:55 +01:00
|
|
|
#include <new>
|
2010-05-04 10:22:16 +02:00
|
|
|
#include <string.h>
|
2009-12-15 23:49:55 +01:00
|
|
|
|
2010-05-05 14:24:54 +02:00
|
|
|
#include "ctx.hpp"
|
2009-11-21 20:59:55 +01:00
|
|
|
#include "socket_base.hpp"
|
2009-07-29 12:07:54 +02:00
|
|
|
#include "io_thread.hpp"
|
2011-02-09 15:32:15 +01:00
|
|
|
#include "reaper.hpp"
|
2009-07-29 12:07:54 +02:00
|
|
|
#include "pipe.hpp"
|
2011-04-21 22:27:48 +02:00
|
|
|
#include "err.hpp"
|
|
|
|
#include "msg.hpp"
|
2009-07-29 12:07:54 +02:00
|
|
|
|
2010-05-05 14:24:54 +02:00
|
|
|
zmq::ctx_t::ctx_t (uint32_t io_threads_) :
|
2011-04-09 09:35:34 +02:00
|
|
|
tag (0xbadcafe0),
|
2011-02-09 15:32:15 +01:00
|
|
|
terminating (false)
|
2009-07-29 12:07:54 +02:00
|
|
|
{
|
2011-02-09 15:32:15 +01:00
|
|
|
// Initialise the array of mailboxes. Additional three slots are for
|
|
|
|
// internal log socket and the zmq_term thread the reaper thread.
|
|
|
|
slot_count = max_sockets + io_threads_ + 3;
|
2010-11-05 17:39:51 +01:00
|
|
|
slots = (mailbox_t**) malloc (sizeof (mailbox_t*) * slot_count);
|
2011-02-22 16:23:36 +01:00
|
|
|
alloc_assert (slots);
|
2009-07-29 12:07:54 +02:00
|
|
|
|
2011-02-09 15:32:15 +01:00
|
|
|
// Initialise the infrastructure for zmq_term thread.
|
|
|
|
slots [term_tid] = &term_mailbox;
|
|
|
|
|
|
|
|
// Create the reaper thread.
|
|
|
|
reaper = new (std::nothrow) reaper_t (this, reaper_tid);
|
2011-02-22 16:23:36 +01:00
|
|
|
alloc_assert (reaper);
|
2011-02-09 15:32:15 +01:00
|
|
|
slots [reaper_tid] = reaper->get_mailbox ();
|
|
|
|
reaper->start ();
|
|
|
|
|
2010-05-05 13:03:26 +02:00
|
|
|
// Create I/O thread objects and launch them.
|
2011-02-09 15:32:15 +01:00
|
|
|
for (uint32_t i = 2; i != io_threads_ + 2; i++) {
|
2010-05-04 10:22:16 +02:00
|
|
|
io_thread_t *io_thread = new (std::nothrow) io_thread_t (this, i);
|
2011-02-22 16:23:36 +01:00
|
|
|
alloc_assert (io_thread);
|
2009-07-29 12:07:54 +02:00
|
|
|
io_threads.push_back (io_thread);
|
2010-11-05 17:39:51 +01:00
|
|
|
slots [i] = io_thread->get_mailbox ();
|
2010-05-05 13:03:26 +02:00
|
|
|
io_thread->start ();
|
2009-07-29 12:07:54 +02:00
|
|
|
}
|
2009-09-04 16:02:41 +02:00
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
// In the unused part of the slot array, create a list of empty slots.
|
2010-09-09 08:25:00 +02:00
|
|
|
for (int32_t i = (int32_t) slot_count - 1;
|
2011-02-09 15:32:15 +01:00
|
|
|
i >= (int32_t) io_threads_ + 2; i--) {
|
2010-08-06 17:49:37 +02:00
|
|
|
empty_slots.push_back (i);
|
|
|
|
slots [i] = NULL;
|
|
|
|
}
|
2009-09-04 16:02:41 +02:00
|
|
|
}
|
|
|
|
|
2011-04-09 09:35:34 +02:00
|
|
|
bool zmq::ctx_t::check_tag ()
|
|
|
|
{
|
|
|
|
return tag == 0xbadcafe0;
|
|
|
|
}
|
|
|
|
|
2010-05-05 14:24:54 +02:00
|
|
|
zmq::ctx_t::~ctx_t ()
|
2009-07-29 12:07:54 +02:00
|
|
|
{
|
2011-02-09 15:32:15 +01:00
|
|
|
// Check that there are no remaining sockets.
|
2010-08-06 17:49:37 +02:00
|
|
|
zmq_assert (sockets.empty ());
|
|
|
|
|
2009-08-06 12:51:32 +02:00
|
|
|
// Ask I/O threads to terminate. If stop signal wasn't sent to I/O
|
|
|
|
// thread subsequent invocation of destructor would hang-up.
|
2009-07-29 12:07:54 +02:00
|
|
|
for (io_threads_t::size_type i = 0; i != io_threads.size (); i++)
|
|
|
|
io_threads [i]->stop ();
|
|
|
|
|
|
|
|
// Wait till I/O threads actually terminate.
|
|
|
|
for (io_threads_t::size_type i = 0; i != io_threads.size (); i++)
|
2009-08-06 12:51:32 +02:00
|
|
|
delete io_threads [i];
|
2009-07-29 12:07:54 +02:00
|
|
|
|
2011-02-09 15:32:15 +01:00
|
|
|
// Deallocate the reaper thread object.
|
|
|
|
delete reaper;
|
|
|
|
|
2010-11-05 17:39:51 +01:00
|
|
|
// Deallocate the array of mailboxes. No special work is
|
|
|
|
// needed as mailboxes themselves were deallocated with their
|
2010-08-06 17:49:37 +02:00
|
|
|
// corresponding io_thread/socket objects.
|
|
|
|
free (slots);
|
2011-04-09 09:35:34 +02:00
|
|
|
|
|
|
|
// Remove the tag, so that the object is considered dead.
|
|
|
|
tag = 0xdeadbeef;
|
2009-07-29 12:07:54 +02:00
|
|
|
}
|
|
|
|
|
2010-08-12 08:16:18 +02:00
|
|
|
int zmq::ctx_t::terminate ()
|
2009-07-29 12:07:54 +02:00
|
|
|
{
|
2011-02-09 15:32:15 +01:00
|
|
|
// Check whether termination was already underway, but interrupted and now
|
|
|
|
// restarted.
|
2010-08-06 17:49:37 +02:00
|
|
|
slot_sync.lock ();
|
2011-02-09 15:32:15 +01:00
|
|
|
bool restarted = terminating;
|
2010-08-06 17:49:37 +02:00
|
|
|
slot_sync.unlock ();
|
2010-09-19 22:17:37 +02:00
|
|
|
|
2011-02-09 15:32:15 +01:00
|
|
|
// First attempt to terminate the context.
|
|
|
|
if (!restarted) {
|
|
|
|
// First send stop command to sockets so that any blocking calls can be
|
|
|
|
// interrupted. If there are no sockets we can ask reaper thread to stop.
|
|
|
|
slot_sync.lock ();
|
|
|
|
terminating = true;
|
|
|
|
for (sockets_t::size_type i = 0; i != sockets.size (); i++)
|
|
|
|
sockets [i]->stop ();
|
|
|
|
if (sockets.empty ())
|
|
|
|
reaper->stop ();
|
|
|
|
slot_sync.unlock ();
|
|
|
|
}
|
2010-08-06 17:49:37 +02:00
|
|
|
|
2011-02-09 15:32:15 +01:00
|
|
|
// Wait till reaper thread closes all the sockets.
|
|
|
|
command_t cmd;
|
2011-06-17 12:22:02 +02:00
|
|
|
int rc = term_mailbox.recv (&cmd, -1);
|
2011-02-09 15:32:15 +01:00
|
|
|
if (rc == -1 && errno == EINTR)
|
|
|
|
return -1;
|
|
|
|
zmq_assert (rc == 0);
|
|
|
|
zmq_assert (cmd.type == command_t::done);
|
2010-09-19 22:17:37 +02:00
|
|
|
slot_sync.lock ();
|
2010-08-06 17:49:37 +02:00
|
|
|
zmq_assert (sockets.empty ());
|
2010-08-12 08:16:18 +02:00
|
|
|
slot_sync.unlock ();
|
2010-02-08 18:37:48 +01:00
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
// Deallocate the resources.
|
|
|
|
delete this;
|
2010-05-05 13:03:26 +02:00
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
return 0;
|
|
|
|
}
|
2010-05-05 13:03:26 +02:00
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
zmq::socket_base_t *zmq::ctx_t::create_socket (int type_)
|
|
|
|
{
|
|
|
|
slot_sync.lock ();
|
2010-05-05 13:03:26 +02:00
|
|
|
|
2011-02-09 15:32:15 +01:00
|
|
|
// Once zmq_term() was called, we can't create new sockets.
|
|
|
|
if (terminating) {
|
|
|
|
slot_sync.unlock ();
|
|
|
|
errno = ETERM;
|
|
|
|
return NULL;
|
|
|
|
}
|
2010-05-05 13:03:26 +02:00
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
// If max_sockets limit was reached, return error.
|
|
|
|
if (empty_slots.empty ()) {
|
|
|
|
slot_sync.unlock ();
|
|
|
|
errno = EMFILE;
|
|
|
|
return NULL;
|
2009-07-29 12:07:54 +02:00
|
|
|
}
|
2010-02-08 18:37:48 +01:00
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
// Choose a slot for the socket.
|
|
|
|
uint32_t slot = empty_slots.back ();
|
|
|
|
empty_slots.pop_back ();
|
2009-08-08 16:01:58 +02:00
|
|
|
|
2010-11-05 17:39:51 +01:00
|
|
|
// Create the socket and register its mailbox.
|
2010-08-06 17:49:37 +02:00
|
|
|
socket_base_t *s = socket_base_t::create (type_, this, slot);
|
|
|
|
if (!s) {
|
|
|
|
empty_slots.push_back (slot);
|
|
|
|
slot_sync.unlock ();
|
2010-02-02 08:46:35 +01:00
|
|
|
return NULL;
|
2010-08-06 17:49:37 +02:00
|
|
|
}
|
|
|
|
sockets.push_back (s);
|
2010-11-05 17:39:51 +01:00
|
|
|
slots [slot] = s->get_mailbox ();
|
2010-02-02 08:46:35 +01:00
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
slot_sync.unlock ();
|
2009-09-04 16:02:41 +02:00
|
|
|
|
2010-02-02 08:46:35 +01:00
|
|
|
return s;
|
2009-07-29 12:07:54 +02:00
|
|
|
}
|
|
|
|
|
2011-02-09 15:32:15 +01:00
|
|
|
void zmq::ctx_t::destroy_socket (class socket_base_t *socket_)
|
2009-09-04 16:02:41 +02:00
|
|
|
{
|
2010-08-06 17:49:37 +02:00
|
|
|
slot_sync.lock ();
|
|
|
|
|
2011-02-09 15:32:15 +01:00
|
|
|
// Free the associared thread slot.
|
|
|
|
uint32_t tid = socket_->get_tid ();
|
|
|
|
empty_slots.push_back (tid);
|
|
|
|
slots [tid] = NULL;
|
2010-08-06 17:49:37 +02:00
|
|
|
|
2011-02-09 15:32:15 +01:00
|
|
|
// Remove the socket from the list of sockets.
|
|
|
|
sockets.erase (socket_);
|
|
|
|
|
|
|
|
// If zmq_term() was already called and there are no more socket
|
|
|
|
// we can ask reaper thread to terminate.
|
|
|
|
if (terminating && sockets.empty ())
|
|
|
|
reaper->stop ();
|
2010-08-06 17:49:37 +02:00
|
|
|
|
|
|
|
slot_sync.unlock ();
|
2010-02-08 18:37:48 +01:00
|
|
|
}
|
|
|
|
|
2011-02-09 15:32:15 +01:00
|
|
|
zmq::object_t *zmq::ctx_t::get_reaper ()
|
|
|
|
{
|
|
|
|
return reaper;
|
|
|
|
}
|
|
|
|
|
2010-11-05 16:38:52 +01:00
|
|
|
void zmq::ctx_t::send_command (uint32_t tid_, const command_t &command_)
|
2010-02-07 09:14:43 +01:00
|
|
|
{
|
2010-11-05 16:38:52 +01:00
|
|
|
slots [tid_]->send (command_);
|
2010-02-07 09:14:43 +01:00
|
|
|
}
|
|
|
|
|
2010-05-05 14:24:54 +02:00
|
|
|
zmq::io_thread_t *zmq::ctx_t::choose_io_thread (uint64_t affinity_)
|
2009-07-29 12:07:54 +02:00
|
|
|
{
|
2010-09-09 08:25:00 +02:00
|
|
|
if (io_threads.empty ())
|
|
|
|
return NULL;
|
|
|
|
|
2009-07-29 12:07:54 +02:00
|
|
|
// Find the I/O thread with minimum load.
|
2010-02-02 13:29:31 +01:00
|
|
|
int min_load = -1;
|
2009-07-29 12:07:54 +02:00
|
|
|
io_threads_t::size_type result = 0;
|
2010-02-02 13:29:31 +01:00
|
|
|
for (io_threads_t::size_type i = 0; i != io_threads.size (); i++) {
|
|
|
|
if (!affinity_ || (affinity_ & (uint64_t (1) << i))) {
|
2009-07-29 12:07:54 +02:00
|
|
|
int load = io_threads [i]->get_load ();
|
2010-02-02 13:29:31 +01:00
|
|
|
if (min_load == -1 || load < min_load) {
|
2009-07-29 12:07:54 +02:00
|
|
|
min_load = load;
|
|
|
|
result = i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2010-02-02 13:29:31 +01:00
|
|
|
zmq_assert (min_load != -1);
|
2009-07-29 12:07:54 +02:00
|
|
|
return io_threads [result];
|
|
|
|
}
|
2009-08-28 16:51:46 +02:00
|
|
|
|
2011-01-10 13:53:30 +01:00
|
|
|
int zmq::ctx_t::register_endpoint (const char *addr_, endpoint_t &endpoint_)
|
2009-11-21 20:59:55 +01:00
|
|
|
{
|
|
|
|
endpoints_sync.lock ();
|
|
|
|
|
2010-10-14 12:13:52 +02:00
|
|
|
bool inserted = endpoints.insert (endpoints_t::value_type (
|
2011-01-10 13:53:30 +01:00
|
|
|
std::string (addr_), endpoint_)).second;
|
2009-11-21 20:59:55 +01:00
|
|
|
if (!inserted) {
|
|
|
|
errno = EADDRINUSE;
|
|
|
|
endpoints_sync.unlock ();
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
endpoints_sync.unlock ();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-05-05 14:24:54 +02:00
|
|
|
void zmq::ctx_t::unregister_endpoints (socket_base_t *socket_)
|
2009-11-21 20:59:55 +01:00
|
|
|
{
|
|
|
|
endpoints_sync.lock ();
|
|
|
|
|
|
|
|
endpoints_t::iterator it = endpoints.begin ();
|
|
|
|
while (it != endpoints.end ()) {
|
2011-01-10 13:53:30 +01:00
|
|
|
if (it->second.socket == socket_) {
|
2009-11-21 20:59:55 +01:00
|
|
|
endpoints_t::iterator to_erase = it;
|
2011-01-18 15:57:45 +01:00
|
|
|
++it;
|
2009-11-21 20:59:55 +01:00
|
|
|
endpoints.erase (to_erase);
|
|
|
|
continue;
|
|
|
|
}
|
2011-01-18 15:57:45 +01:00
|
|
|
++it;
|
2009-11-21 20:59:55 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
endpoints_sync.unlock ();
|
|
|
|
}
|
|
|
|
|
2011-01-10 13:53:30 +01:00
|
|
|
zmq::endpoint_t zmq::ctx_t::find_endpoint (const char *addr_)
|
2009-11-21 20:59:55 +01:00
|
|
|
{
|
|
|
|
endpoints_sync.lock ();
|
|
|
|
|
|
|
|
endpoints_t::iterator it = endpoints.find (addr_);
|
|
|
|
if (it == endpoints.end ()) {
|
|
|
|
endpoints_sync.unlock ();
|
|
|
|
errno = ECONNREFUSED;
|
2011-01-10 13:53:30 +01:00
|
|
|
endpoint_t empty = {NULL, options_t()};
|
|
|
|
return empty;
|
2009-11-21 20:59:55 +01:00
|
|
|
}
|
2011-01-10 13:53:30 +01:00
|
|
|
endpoint_t *endpoint = &it->second;
|
2009-11-21 20:59:55 +01:00
|
|
|
|
|
|
|
// Increment the command sequence number of the peer so that it won't
|
|
|
|
// get deallocated until "bind" command is issued by the caller.
|
2009-12-02 21:26:47 +01:00
|
|
|
// The subsequent 'bind' has to be called with inc_seqnum parameter
|
|
|
|
// set to false, so that the seqnum isn't incremented twice.
|
2011-01-10 13:53:30 +01:00
|
|
|
endpoint->socket->inc_seqnum ();
|
2009-11-21 20:59:55 +01:00
|
|
|
|
|
|
|
endpoints_sync.unlock ();
|
2011-01-10 13:53:30 +01:00
|
|
|
return *endpoint;
|
2009-11-21 20:59:55 +01:00
|
|
|
}
|