libzmq/src/socket_base.cpp

1325 lines
38 KiB
C++
Raw Normal View History

/*
2014-01-02 12:00:57 +01:00
Copyright (c) 2007-2014 Contributors as noted in the AUTHORS file
This file is part of 0MQ.
0MQ is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
0MQ is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
2009-12-15 23:49:55 +01:00
#include <new>
2009-08-21 14:29:22 +02:00
#include <string>
#include <algorithm>
#include "platform.hpp"
#if defined ZMQ_HAVE_WINDOWS
#include "windows.hpp"
#if defined _MSC_VER
#if defined _WIN32_WCE
#include <cmnintrin.h>
#else
#include <intrin.h>
#endif
#endif
#else
#include <unistd.h>
#endif
2010-05-05 14:24:54 +02:00
#include "socket_base.hpp"
#include "tcp_listener.hpp"
#include "ipc_listener.hpp"
#include "tipc_listener.hpp"
#include "tcp_connecter.hpp"
#include "io_thread.hpp"
#include "session_base.hpp"
2009-08-09 11:57:21 +02:00
#include "config.hpp"
2009-08-27 10:54:28 +02:00
#include "pipe.hpp"
2009-09-04 16:02:41 +02:00
#include "err.hpp"
2010-05-05 14:24:54 +02:00
#include "ctx.hpp"
2009-09-11 17:58:37 +02:00
#include "platform.hpp"
2010-04-11 16:36:27 +02:00
#include "likely.hpp"
#include "msg.hpp"
#include "address.hpp"
#include "ipc_address.hpp"
#include "tcp_address.hpp"
#include "tipc_address.hpp"
#ifdef ZMQ_HAVE_OPENPGM
#include "pgm_socket.hpp"
#endif
#include "pair.hpp"
#include "pub.hpp"
#include "sub.hpp"
#include "req.hpp"
#include "rep.hpp"
#include "pull.hpp"
#include "push.hpp"
#include "dealer.hpp"
#include "router.hpp"
#include "xpub.hpp"
#include "xsub.hpp"
#include "stream.hpp"
bool zmq::socket_base_t::check_tag ()
{
return tag == 0xbaddecaf;
}
zmq::socket_base_t *zmq::socket_base_t::create (int type_, class ctx_t *parent_,
uint32_t tid_, int sid_)
{
socket_base_t *s = NULL;
switch (type_) {
2013-11-07 14:59:53 +01:00
case ZMQ_PAIR:
s = new (std::nothrow) pair_t (parent_, tid_, sid_);
break;
case ZMQ_PUB:
s = new (std::nothrow) pub_t (parent_, tid_, sid_);
break;
case ZMQ_SUB:
s = new (std::nothrow) sub_t (parent_, tid_, sid_);
break;
case ZMQ_REQ:
s = new (std::nothrow) req_t (parent_, tid_, sid_);
break;
case ZMQ_REP:
s = new (std::nothrow) rep_t (parent_, tid_, sid_);
break;
case ZMQ_DEALER:
s = new (std::nothrow) dealer_t (parent_, tid_, sid_);
break;
case ZMQ_ROUTER:
s = new (std::nothrow) router_t (parent_, tid_, sid_);
break;
case ZMQ_PULL:
s = new (std::nothrow) pull_t (parent_, tid_, sid_);
break;
case ZMQ_PUSH:
s = new (std::nothrow) push_t (parent_, tid_, sid_);
break;
case ZMQ_XPUB:
s = new (std::nothrow) xpub_t (parent_, tid_, sid_);
break;
case ZMQ_XSUB:
s = new (std::nothrow) xsub_t (parent_, tid_, sid_);
break;
case ZMQ_STREAM:
s = new (std::nothrow) stream_t (parent_, tid_, sid_);
break;
default:
errno = EINVAL;
return NULL;
}
alloc_assert (s);
if (s->mailbox.get_fd () == retired_fd)
2013-11-07 14:59:53 +01:00
return NULL;
return s;
}
zmq::socket_base_t::socket_base_t (ctx_t *parent_, uint32_t tid_, int sid_) :
own_t (parent_, tid_),
tag (0xbaddecaf),
ctx_terminated (false),
2010-08-12 15:03:51 +02:00
destroyed (false),
2010-09-26 16:55:54 +02:00
last_tsc (0),
2009-08-27 10:54:28 +02:00
ticks (0),
rcvmore (false),
file_desc(-1),
monitor_socket (NULL),
monitor_events (0)
2009-08-27 10:54:28 +02:00
{
options.socket_id = sid_;
options.ipv6 = (parent_->get (ZMQ_IPV6) != 0);
}
zmq::socket_base_t::~socket_base_t ()
{
stop_monitor ();
zmq_assert (destroyed);
}
zmq::mailbox_t *zmq::socket_base_t::get_mailbox ()
{
return &mailbox;
}
void zmq::socket_base_t::stop ()
{
// Called by ctx when it is terminated (zmq_term).
// 'stop' command is sent from the threads that called zmq_term to
// the thread owning the socket. This way, blocking call in the
// owner thread can be interrupted.
send_stop ();
}
int zmq::socket_base_t::parse_uri (const char *uri_,
std::string &protocol_, std::string &address_)
{
zmq_assert (uri_ != NULL);
std::string uri (uri_);
std::string::size_type pos = uri.find ("://");
if (pos == std::string::npos) {
errno = EINVAL;
return -1;
}
protocol_ = uri.substr (0, pos);
address_ = uri.substr (pos + 3);
if (protocol_.empty () || address_.empty ()) {
errno = EINVAL;
return -1;
}
return 0;
}
2010-08-11 14:09:56 +02:00
int zmq::socket_base_t::check_protocol (const std::string &protocol_)
{
// First check out whether the protcol is something we are aware of.
if (protocol_ != "inproc" && protocol_ != "ipc" && protocol_ != "tcp" &&
2014-03-15 15:48:12 +01:00
protocol_ != "pgm" && protocol_ != "epgm" && protocol_ != "tipc" &&
protocol_ != "norm") {
2010-08-11 14:09:56 +02:00
errno = EPROTONOSUPPORT;
return -1;
}
// If 0MQ is not compiled with OpenPGM, pgm and epgm transports
// are not avaialble.
#if !defined ZMQ_HAVE_OPENPGM
if (protocol_ == "pgm" || protocol_ == "epgm") {
errno = EPROTONOSUPPORT;
return -1;
}
#endif
2014-03-15 15:48:12 +01:00
#if !defined ZMQ_HAVE_NORM
if (protocol_ == "norm") {
errno = EPROTONOSUPPORT;
return -1;
}
#endif // !ZMQ_HAVE_NORM
2010-08-11 14:09:56 +02:00
// IPC transport is not available on Windows and OpenVMS.
#if defined ZMQ_HAVE_WINDOWS || defined ZMQ_HAVE_OPENVMS
if (protocol_ == "ipc") {
2010-08-11 14:09:56 +02:00
// Unknown protocol.
errno = EPROTONOSUPPORT;
return -1;
}
#endif
// TIPC transport is only available on Linux.
#if !defined ZMQ_HAVE_TIPC
if (protocol_ == "tipc") {
errno = EPROTONOSUPPORT;
return -1;
}
#endif
2010-08-11 14:09:56 +02:00
// Check whether socket type and transport protocol match.
// Specifically, multicast protocols can't be combined with
// bi-directional messaging patterns (socket types).
2014-03-15 15:48:12 +01:00
if ((protocol_ == "pgm" || protocol_ == "epgm" || protocol_ == "norm") &&
options.type != ZMQ_PUB && options.type != ZMQ_SUB &&
options.type != ZMQ_XPUB && options.type != ZMQ_XSUB) {
2010-08-11 14:09:56 +02:00
errno = ENOCOMPATPROTO;
return -1;
}
// Protocol is available.
return 0;
}
void zmq::socket_base_t::attach_pipe (pipe_t *pipe_, bool subscribe_to_all_)
{
// First, register the pipe so that we can terminate it later on.
pipe_->set_event_sink (this);
pipes.push_back (pipe_);
// Let the derived socket type know about new pipe.
xattach_pipe (pipe_, subscribe_to_all_);
// If the socket is already being closed, ask any new pipes to terminate
// straight away.
if (is_terminating ()) {
register_term_acks (1);
pipe_->terminate (false);
}
}
2009-08-21 14:29:22 +02:00
int zmq::socket_base_t::setsockopt (int option_, const void *optval_,
size_t optvallen_)
{
if (unlikely (ctx_terminated)) {
2010-04-11 16:36:27 +02:00
errno = ETERM;
return -1;
}
2009-09-21 14:39:59 +02:00
// First, check whether specific socket type overloads the option.
int rc = xsetsockopt (option_, optval_, optvallen_);
if (rc == 0 || errno != EINVAL)
return rc;
// If the socket type doesn't support the option, pass it to
// the generic option parser.
return options.setsockopt (option_, optval_, optvallen_);
}
2010-04-09 13:04:15 +02:00
int zmq::socket_base_t::getsockopt (int option_, void *optval_,
size_t *optvallen_)
{
if (unlikely (ctx_terminated)) {
2010-04-11 16:36:27 +02:00
errno = ETERM;
return -1;
}
// First, check whether specific socket type overloads the option.
int rc = xgetsockopt (option_, optval_, optvallen_);
if (rc == 0 || errno != EINVAL)
return rc;
2010-04-11 16:36:27 +02:00
if (option_ == ZMQ_RCVMORE) {
if (*optvallen_ < sizeof (int)) {
errno = EINVAL;
return -1;
}
*((int*) optval_) = rcvmore ? 1 : 0;
*optvallen_ = sizeof (int);
return 0;
}
if (option_ == ZMQ_FD) {
if (*optvallen_ < sizeof (fd_t)) {
errno = EINVAL;
return -1;
}
*((fd_t*) optval_) = mailbox.get_fd ();
*optvallen_ = sizeof (fd_t);
return 0;
}
if (option_ == ZMQ_EVENTS) {
if (*optvallen_ < sizeof (int)) {
errno = EINVAL;
return -1;
}
int rc = process_commands (0, false);
if (rc != 0 && (errno == EINTR || errno == ETERM))
return -1;
errno_assert (rc == 0);
*((int*) optval_) = 0;
if (has_out ())
*((int*) optval_) |= ZMQ_POLLOUT;
if (has_in ())
*((int*) optval_) |= ZMQ_POLLIN;
*optvallen_ = sizeof (int);
return 0;
}
if (option_ == ZMQ_LAST_ENDPOINT) {
if (*optvallen_ < last_endpoint.size () + 1) {
errno = EINVAL;
return -1;
}
strcpy (static_cast <char *> (optval_), last_endpoint.c_str ());
*optvallen_ = last_endpoint.size () + 1;
return 0;
}
2010-04-09 13:04:15 +02:00
return options.getsockopt (option_, optval_, optvallen_);
}
int zmq::socket_base_t::bind (const char *addr_)
{
if (unlikely (ctx_terminated)) {
2010-04-11 16:36:27 +02:00
errno = ETERM;
return -1;
}
// Process pending commands, if any.
int rc = process_commands (0, false);
if (unlikely (rc != 0))
return -1;
2009-09-16 10:11:01 +02:00
// Parse addr_ string.
2010-08-11 14:09:56 +02:00
std::string protocol;
std::string address;
rc = parse_uri (addr_, protocol, address);
if (rc != 0)
return -1;
rc = check_protocol (protocol);
2010-08-11 14:09:56 +02:00
if (rc != 0)
return -1;
2010-01-15 14:11:39 +01:00
2012-03-19 22:15:09 +01:00
if (protocol == "inproc") {
endpoint_t endpoint = {this, options};
int rc = register_endpoint (addr_, endpoint);
if (rc == 0) {
connect_pending(addr_, this);
last_endpoint.assign (addr_);
}
return rc;
}
2010-01-15 14:11:39 +01:00
2014-03-15 15:48:12 +01:00
if (protocol == "pgm" || protocol == "epgm" || protocol == "norm") {
// For convenience's sake, bind can be used interchageable with
2014-03-15 15:48:12 +01:00
// connect for PGM, EPGM and NORM transports.
return connect (addr_);
}
// Remaining trasnports require to be run in an I/O thread, so at this
// point we'll choose one.
io_thread_t *io_thread = choose_io_thread (options.affinity);
if (!io_thread) {
errno = EMTHREAD;
return -1;
}
if (protocol == "tcp") {
tcp_listener_t *listener = new (std::nothrow) tcp_listener_t (
io_thread, this, options);
alloc_assert (listener);
int rc = listener->set_address (address.c_str ());
2010-02-12 15:08:57 +01:00
if (rc != 0) {
delete listener;
event_bind_failed (address, zmq_errno());
2009-09-16 10:11:01 +02:00
return -1;
2010-02-12 15:08:57 +01:00
}
// Save last endpoint URI
listener->get_address (last_endpoint);
add_endpoint (addr_, (own_t *) listener, NULL);
2009-09-16 10:11:01 +02:00
return 0;
}
#if !defined ZMQ_HAVE_WINDOWS && !defined ZMQ_HAVE_OPENVMS
if (protocol == "ipc") {
ipc_listener_t *listener = new (std::nothrow) ipc_listener_t (
io_thread, this, options);
alloc_assert (listener);
int rc = listener->set_address (address.c_str ());
if (rc != 0) {
delete listener;
event_bind_failed (address, zmq_errno());
return -1;
}
// Save last endpoint URI
listener->get_address (last_endpoint);
add_endpoint (addr_, (own_t *) listener, NULL);
return 0;
2009-09-16 10:11:01 +02:00
}
#endif
#if defined ZMQ_HAVE_TIPC
if (protocol == "tipc") {
tipc_listener_t *listener = new (std::nothrow) tipc_listener_t (
io_thread, this, options);
alloc_assert (listener);
int rc = listener->set_address (address.c_str ());
if (rc != 0) {
delete listener;
event_bind_failed (address, zmq_errno());
return -1;
}
// Save last endpoint URI
listener->get_address (last_endpoint);
add_endpoint (addr_, (own_t *) listener, NULL);
return 0;
}
#endif
2009-09-16 10:11:01 +02:00
2010-08-11 14:09:56 +02:00
zmq_assert (false);
2010-08-24 15:58:48 +02:00
return -1;
}
int zmq::socket_base_t::connect (const char *addr_)
{
if (unlikely (ctx_terminated)) {
2010-04-11 16:36:27 +02:00
errno = ETERM;
return -1;
}
// Process pending commands, if any.
int rc = process_commands (0, false);
if (unlikely (rc != 0))
return -1;
2009-09-11 17:58:37 +02:00
// Parse addr_ string.
2010-08-11 14:09:56 +02:00
std::string protocol;
std::string address;
rc = parse_uri (addr_, protocol, address);
if (rc != 0)
return -1;
2009-09-11 17:58:37 +02:00
rc = check_protocol (protocol);
2010-08-11 14:09:56 +02:00
if (rc != 0)
return -1;
2009-09-11 17:58:37 +02:00
2012-03-19 22:15:09 +01:00
if (protocol == "inproc") {
2009-11-21 20:59:55 +01:00
// TODO: inproc connect is specific with respect to creating pipes
// as there's no 'reconnect' functionality implemented. Once that
// is in place we should follow generic pipe creation algorithm.
// Find the peer endpoint.
endpoint_t peer = find_endpoint (addr_);
2009-11-21 20:59:55 +01:00
// The total HWM for an inproc connection should be the sum of
// the binder's HWM and the connector's HWM.
2012-11-09 14:12:11 +01:00
int sndhwm = 0;
if (peer.socket == NULL)
sndhwm = options.sndhwm;
else if (options.sndhwm != 0 && peer.options.rcvhwm != 0)
sndhwm = options.sndhwm + peer.options.rcvhwm;
2012-11-09 14:12:11 +01:00
int rcvhwm = 0;
if (peer.socket == NULL)
rcvhwm = options.rcvhwm;
else if (options.rcvhwm != 0 && peer.options.sndhwm != 0)
rcvhwm = options.rcvhwm + peer.options.sndhwm;
// Create a bi-directional pipe to connect the peers.
object_t *parents [2] = {this, peer.socket == NULL ? this : peer.socket};
2013-01-01 10:24:51 +01:00
pipe_t *new_pipes [2] = {NULL, NULL};
bool conflate = options.conflate &&
(options.type == ZMQ_DEALER ||
options.type == ZMQ_PULL ||
options.type == ZMQ_PUSH ||
options.type == ZMQ_PUB ||
options.type == ZMQ_SUB);
int hwms [2] = {conflate? -1 : sndhwm, conflate? -1 : rcvhwm};
bool conflates [2] = {conflate, conflate};
2013-08-20 23:48:04 +02:00
int rc = pipepair (parents, new_pipes, hwms, conflates);
errno_assert (rc == 0);
2009-11-21 20:59:55 +01:00
// Attach local end of the pipe to this socket object.
2013-01-01 10:24:51 +01:00
attach_pipe (new_pipes [0]);
2009-11-21 20:59:55 +01:00
if (!peer.socket) {
// The peer doesn't exist yet so we don't know whether
// to send the identity message or not. To resolve this,
// we always send our identity and drop it later if
// the peer doesn't expect it.
msg_t id;
rc = id.init_size (options.identity_size);
errno_assert (rc == 0);
memcpy (id.data (), options.identity, options.identity_size);
id.set_flags (msg_t::identity);
bool written = new_pipes [0]->write (&id);
zmq_assert (written);
new_pipes [0]->flush ();
endpoint_t endpoint = {this, options};
pending_connection_t pending_connection = {endpoint, new_pipes [0], new_pipes [1]};
pend_connection (addr_, pending_connection);
}
else
{
// If required, send the identity of the local socket to the peer.
if (peer.options.recv_identity) {
msg_t id;
rc = id.init_size (options.identity_size);
errno_assert (rc == 0);
memcpy (id.data (), options.identity, options.identity_size);
id.set_flags (msg_t::identity);
bool written = new_pipes [0]->write (&id);
zmq_assert (written);
new_pipes [0]->flush ();
}
// If required, send the identity of the peer to the local socket.
if (options.recv_identity) {
msg_t id;
rc = id.init_size (peer.options.identity_size);
errno_assert (rc == 0);
memcpy (id.data (), peer.options.identity, peer.options.identity_size);
id.set_flags (msg_t::identity);
bool written = new_pipes [1]->write (&id);
zmq_assert (written);
new_pipes [1]->flush ();
}
// Attach remote end of the pipe to the peer socket. Note that peer's
// seqnum was incremented in find_endpoint function. We don't need it
// increased here.
send_bind (peer.socket, new_pipes [1], false);
}
2009-11-21 20:59:55 +01:00
// Save last endpoint URI
last_endpoint.assign (addr_);
// remember inproc connections for disconnect
2013-01-01 10:24:51 +01:00
inprocs.insert (inprocs_t::value_type (std::string (addr_), new_pipes[0]));
2009-11-21 20:59:55 +01:00
return 0;
}
bool is_single_connect = (options.type == ZMQ_DEALER ||
options.type == ZMQ_SUB ||
options.type == ZMQ_REQ);
if (unlikely (is_single_connect)) {
endpoints_t::iterator it = endpoints.find (addr_);
if (it != endpoints.end ()) {
// There is no valid use for multiple connects for SUB-PUB nor
// DEALER-ROUTER nor REQ-REP. Multiple connects produces
// nonsensical results.
return 0;
}
}
2009-11-21 20:59:55 +01:00
// Choose the I/O thread to run the session in.
io_thread_t *io_thread = choose_io_thread (options.affinity);
if (!io_thread) {
errno = EMTHREAD;
return -1;
}
address_t *paddr = new (std::nothrow) address_t (protocol, address);
alloc_assert (paddr);
// Resolve address (if needed by the protocol)
if (protocol == "tcp") {
// Defer resolution until a socket is opened
paddr->resolved.tcp_addr = NULL;
}
2012-02-17 23:07:52 +01:00
#if !defined ZMQ_HAVE_WINDOWS && !defined ZMQ_HAVE_OPENVMS
2012-10-24 02:18:52 +02:00
else
if (protocol == "ipc") {
paddr->resolved.ipc_addr = new (std::nothrow) ipc_address_t ();
alloc_assert (paddr->resolved.ipc_addr);
int rc = paddr->resolved.ipc_addr->resolve (address.c_str ());
if (rc != 0) {
delete paddr;
return -1;
}
}
#endif
2014-03-15 15:48:12 +01:00
// TBD - Should we check address for ZMQ_HAVE_NORM???
#ifdef ZMQ_HAVE_OPENPGM
if (protocol == "pgm" || protocol == "epgm") {
struct pgm_addrinfo_t *res = NULL;
uint16_t port_number = 0;
int rc = pgm_socket_t::init_address(address.c_str(), &res, &port_number);
if (res != NULL)
pgm_freeaddrinfo (res);
if (rc != 0 || port_number == 0)
return -1;
}
2012-02-17 23:07:52 +01:00
#endif
#if defined ZMQ_HAVE_TIPC
else
if (protocol == "tipc") {
paddr->resolved.tipc_addr = new (std::nothrow) tipc_address_t ();
alloc_assert (paddr->resolved.tipc_addr);
int rc = paddr->resolved.tipc_addr->resolve (address.c_str());
if (rc != 0) {
delete paddr;
return -1;
}
}
#endif
2010-08-11 14:09:56 +02:00
// Create session.
session_base_t *session = session_base_t::create (io_thread, true, this,
options, paddr);
errno_assert (session);
// PGM does not support subscription forwarding; ask for all data to be
2014-03-15 15:48:12 +01:00
// sent to this pipe. (same for NORM, currently?)
bool subscribe_to_all = protocol == "pgm" || protocol == "epgm" || protocol == "norm";
pipe_t *newpipe = NULL;
if (options.immediate != 1 || subscribe_to_all) {
// Create a bi-directional pipe.
object_t *parents [2] = {this, session};
2013-01-01 10:24:51 +01:00
pipe_t *new_pipes [2] = {NULL, NULL};
bool conflate = options.conflate &&
(options.type == ZMQ_DEALER ||
options.type == ZMQ_PULL ||
options.type == ZMQ_PUSH ||
options.type == ZMQ_PUB ||
options.type == ZMQ_SUB);
int hwms [2] = {conflate? -1 : options.sndhwm,
conflate? -1 : options.rcvhwm};
bool conflates [2] = {conflate, conflate};
2013-08-20 23:48:04 +02:00
rc = pipepair (parents, new_pipes, hwms, conflates);
errno_assert (rc == 0);
// Attach local end of the pipe to the socket object.
attach_pipe (new_pipes [0], subscribe_to_all);
newpipe = new_pipes [0];
2009-08-27 10:54:28 +02:00
// Attach remote end of the pipe to the session object later on.
2013-01-01 10:24:51 +01:00
session->attach_pipe (new_pipes [1]);
}
// Save last endpoint URI
paddr->to_string (last_endpoint);
add_endpoint (addr_, (own_t *) session, newpipe);
return 0;
}
void zmq::socket_base_t::add_endpoint (const char *addr_, own_t *endpoint_, pipe_t *pipe)
{
2010-08-11 14:09:56 +02:00
// Activate the session. Make it a child of this socket.
launch_child (endpoint_);
endpoints.insert (endpoints_t::value_type (std::string (addr_), endpoint_pipe_t(endpoint_, pipe)));
}
int zmq::socket_base_t::term_endpoint (const char *addr_)
{
// Check whether the library haven't been shut down yet.
if (unlikely (ctx_terminated)) {
errno = ETERM;
return -1;
}
2009-09-11 17:58:37 +02:00
2012-04-21 06:12:59 +02:00
// Check whether endpoint address passed to the function is valid.
if (unlikely (!addr_)) {
errno = EINVAL;
return -1;
}
// Process pending commands, if any, since there could be pending unprocessed process_own()'s
// (from launch_child() for example) we're asked to terminate now.
int rc = process_commands (0, false);
if (unlikely (rc != 0))
return -1;
// Parse addr_ string.
std::string protocol;
std::string address;
rc = parse_uri (addr_, protocol, address);
if (rc != 0)
return -1;
rc = check_protocol (protocol);
if (rc != 0)
return -1;
// Disconnect an inproc socket
if (protocol == "inproc") {
std::pair <inprocs_t::iterator, inprocs_t::iterator> range = inprocs.equal_range (std::string (addr_));
if (range.first == range.second) {
errno = ENOENT;
return -1;
}
for (inprocs_t::iterator it = range.first; it != range.second; ++it)
it->second->terminate(true);
inprocs.erase (range.first, range.second);
return 0;
}
// Find the endpoints range (if any) corresponding to the addr_ string.
std::pair <endpoints_t::iterator, endpoints_t::iterator> range = endpoints.equal_range (std::string (addr_));
if (range.first == range.second) {
errno = ENOENT;
return -1;
}
for (endpoints_t::iterator it = range.first; it != range.second; ++it) {
// If we have an associated pipe, terminate it.
if (it->second.second != NULL)
it->second.second->terminate(false);
term_child (it->second.first);
}
endpoints.erase (range.first, range.second);
2010-08-11 14:09:56 +02:00
return 0;
}
int zmq::socket_base_t::send (msg_t *msg_, int flags_)
{
// Check whether the library haven't been shut down yet.
if (unlikely (ctx_terminated)) {
errno = ETERM;
return -1;
}
// Check whether message passed to the function is valid.
if (unlikely (!msg_ || !msg_->check ())) {
errno = EFAULT;
return -1;
}
2010-04-11 16:36:27 +02:00
// Process pending commands, if any.
int rc = process_commands (0, true);
if (unlikely (rc != 0))
2010-04-11 16:36:27 +02:00
return -1;
// Clear any user-visible flags that are set on the message.
msg_->reset_flags (msg_t::more);
// At this point we impose the flags on the message.
if (flags_ & ZMQ_SNDMORE)
msg_->set_flags (msg_t::more);
2009-08-27 10:54:28 +02:00
// Try to send the message.
2012-11-09 17:08:03 +01:00
rc = xsend (msg_);
2009-09-21 14:39:59 +02:00
if (rc == 0)
return 0;
if (unlikely (errno != EAGAIN))
return -1;
2009-08-27 10:54:28 +02:00
2009-09-21 14:39:59 +02:00
// In case of non-blocking send we'll simply propagate
// the error - including EAGAIN - up the stack.
if (flags_ & ZMQ_DONTWAIT || options.sndtimeo == 0)
2009-08-27 10:54:28 +02:00
return -1;
// Compute the time when the timeout should occur.
2013-01-01 10:26:04 +01:00
// If the timeout is infinite, don't care.
int timeout = options.sndtimeo;
uint64_t end = timeout < 0 ? 0 : (clock.now_ms () + timeout);
2009-09-21 14:39:59 +02:00
// Oops, we couldn't send the message. Wait for the next
// command, process it and try to send the message again.
// If timeout is reached in the meantime, return EAGAIN.
while (true) {
if (unlikely (process_commands (timeout, false) != 0))
2010-04-11 16:36:27 +02:00
return -1;
2012-11-09 17:08:03 +01:00
rc = xsend (msg_);
if (rc == 0)
break;
if (unlikely (errno != EAGAIN))
return -1;
if (timeout > 0) {
timeout = (int) (end - clock.now_ms ());
if (timeout <= 0) {
errno = EAGAIN;
return -1;
}
}
2009-09-21 14:39:59 +02:00
}
2009-08-27 10:54:28 +02:00
return 0;
}
int zmq::socket_base_t::recv (msg_t *msg_, int flags_)
{
// Check whether the library haven't been shut down yet.
if (unlikely (ctx_terminated)) {
errno = ETERM;
return -1;
}
// Check whether message passed to the function is valid.
if (unlikely (!msg_ || !msg_->check ())) {
errno = EFAULT;
return -1;
}
2010-02-12 12:12:49 +01:00
// Once every inbound_poll_rate messages check for signals and process
// incoming commands. This happens only if we are not polling altogether
// because there are messages available all the time. If poll occurs,
// ticks is set to zero and thus we avoid this code.
//
// Note that 'recv' uses different command throttling algorithm (the one
// described above) from the one used by 'send'. This is because counting
2010-09-26 16:55:54 +02:00
// ticks is more efficient than doing RDTSC all the time.
2010-02-12 12:12:49 +01:00
if (++ticks == inbound_poll_rate) {
if (unlikely (process_commands (0, false) != 0))
2010-04-11 16:36:27 +02:00
return -1;
2010-02-12 12:12:49 +01:00
ticks = 0;
}
2012-10-29 10:09:00 +01:00
// Get the message.
2012-11-09 17:17:43 +01:00
int rc = xrecv (msg_);
2012-10-29 10:09:00 +01:00
if (unlikely (rc != 0 && errno != EAGAIN))
return -1;
2010-02-12 12:12:49 +01:00
// If we have the message, return immediately.
if (rc == 0) {
if (file_desc != retired_fd)
msg_->set_fd(file_desc);
extract_flags (msg_);
2009-09-21 14:39:59 +02:00
return 0;
}
2009-09-21 14:39:59 +02:00
2009-08-27 10:54:28 +02:00
// If the message cannot be fetched immediately, there are two scenarios.
2010-08-28 10:15:03 +02:00
// For non-blocking recv, commands are processed in case there's an
// activate_reader command already waiting int a command pipe.
// If it's not, return EAGAIN.
if (flags_ & ZMQ_DONTWAIT || options.rcvtimeo == 0) {
if (unlikely (process_commands (0, false) != 0))
2010-04-11 16:36:27 +02:00
return -1;
ticks = 0;
2012-11-09 17:17:43 +01:00
rc = xrecv (msg_);
if (rc < 0)
return rc;
if (file_desc != retired_fd)
msg_->set_fd(file_desc);
extract_flags (msg_);
return 0;
2009-08-27 10:54:28 +02:00
}
// Compute the time when the timeout should occur.
2013-01-01 10:26:04 +01:00
// If the timeout is infinite, don't care.
int timeout = options.rcvtimeo;
uint64_t end = timeout < 0 ? 0 : (clock.now_ms () + timeout);
// In blocking scenario, commands are processed over and over again until
// we are able to fetch a message.
bool block = (ticks != 0);
while (true) {
if (unlikely (process_commands (block ? timeout : 0, false) != 0))
2010-04-11 16:36:27 +02:00
return -1;
2012-11-09 17:17:43 +01:00
rc = xrecv (msg_);
if (rc == 0) {
ticks = 0;
break;
}
if (unlikely (errno != EAGAIN))
return -1;
block = true;
if (timeout > 0) {
timeout = (int) (end - clock.now_ms ());
if (timeout <= 0) {
errno = EAGAIN;
return -1;
}
}
}
if (file_desc != retired_fd)
msg_->set_fd(file_desc);
extract_flags (msg_);
return 0;
}
int zmq::socket_base_t::close ()
{
// Mark the socket as dead
tag = 0xdeadbeef;
// Transfer the ownership of the socket from this application thread
// to the reaper thread which will take care of the rest of shutdown
// process.
send_reap (this);
2009-09-04 16:02:41 +02:00
return 0;
}
bool zmq::socket_base_t::has_in ()
{
return xhas_in ();
}
bool zmq::socket_base_t::has_out ()
{
return xhas_out ();
}
void zmq::socket_base_t::start_reaping (poller_t *poller_)
2009-09-02 10:22:23 +02:00
{
// Plug the socket to the reaper thread.
poller = poller_;
handle = poller->add_fd (mailbox.get_fd (), this);
poller->set_pollin (handle);
// Initialise the termination and check whether it can be deallocated
// immediately.
terminate ();
check_destroy ();
}
int zmq::socket_base_t::process_commands (int timeout_, bool throttle_)
2009-08-27 10:54:28 +02:00
{
int rc;
command_t cmd;
if (timeout_ != 0) {
// If we are asked to wait, simply ask mailbox to wait.
rc = mailbox.recv (&cmd, timeout_);
}
else {
// If we are asked not to wait, check whether we haven't processed
// commands recently, so that we can throttle the new commands.
2010-09-26 16:55:54 +02:00
// Get the CPU's tick counter. If 0, the counter is not available.
uint64_t tsc = zmq::clock_t::rdtsc ();
// Optimised version of command processing - it doesn't have to check
// for incoming commands each time. It does so only if certain time
// elapsed since last command processing. Command delay varies
// depending on CPU speed: It's ~1ms on 3GHz CPU, ~2ms on 1.5GHz CPU
// etc. The optimisation makes sense only on platforms where getting
// a timestamp is a very cheap operation (tens of nanoseconds).
2010-09-26 16:55:54 +02:00
if (tsc && throttle_) {
// Check whether TSC haven't jumped backwards (in case of migration
// between CPU cores) and whether certain time have elapsed since
// last command processing. If it didn't do nothing.
2010-09-26 16:55:54 +02:00
if (tsc >= last_tsc && tsc - last_tsc <= max_command_delay)
return 0;
2010-09-26 16:55:54 +02:00
last_tsc = tsc;
}
// Check whether there are any commands pending for this thread.
rc = mailbox.recv (&cmd, 0);
}
2009-08-27 10:54:28 +02:00
2012-07-06 12:17:13 +02:00
// Process all available commands.
while (rc == 0) {
cmd.destination->process_command (cmd);
rc = mailbox.recv (&cmd, 0);
2012-07-06 12:17:13 +02:00
}
if (errno == EINTR)
return -1;
zmq_assert (errno == EAGAIN);
if (ctx_terminated) {
errno = ETERM;
return -1;
}
return 0;
2009-08-28 16:51:46 +02:00
}
void zmq::socket_base_t::process_stop ()
2009-08-28 16:51:46 +02:00
{
// Here, someone have called zmq_term while the socket was still alive.
// We'll remember the fact so that any blocking call is interrupted and any
// further attempt to use the socket will return ETERM. The user is still
// responsible for calling zmq_close on the socket though!
stop_monitor ();
ctx_terminated = true;
2009-09-02 16:16:25 +02:00
}
void zmq::socket_base_t::process_bind (pipe_t *pipe_)
2009-08-27 10:54:28 +02:00
{
attach_pipe (pipe_);
2009-08-27 10:54:28 +02:00
}
void zmq::socket_base_t::process_term (int linger_)
{
2010-08-11 14:09:56 +02:00
// Unregister all inproc endpoints associated with this socket.
// Doing this we make sure that no new pipes from other sockets (inproc)
// will be initiated.
unregister_endpoints (this);
// Ask all attached pipes to terminate.
for (pipes_t::size_type i = 0; i != pipes.size (); ++i)
pipes [i]->terminate (false);
register_term_acks ((int) pipes.size ());
2010-08-11 14:09:56 +02:00
// Continue the termination process immediately.
own_t::process_term (linger_);
2009-12-02 21:26:47 +01:00
}
2010-08-12 15:03:51 +02:00
void zmq::socket_base_t::process_destroy ()
{
destroyed = true;
}
int zmq::socket_base_t::xsetsockopt (int, const void *, size_t)
{
errno = EINVAL;
return -1;
}
int zmq::socket_base_t::xgetsockopt (int, const void *, size_t*)
{
errno = EINVAL;
return -1;
}
bool zmq::socket_base_t::xhas_out ()
{
return false;
}
2012-11-09 17:08:03 +01:00
int zmq::socket_base_t::xsend (msg_t *)
{
errno = ENOTSUP;
return -1;
}
bool zmq::socket_base_t::xhas_in ()
{
return false;
}
2012-11-09 17:17:43 +01:00
int zmq::socket_base_t::xrecv (msg_t *)
{
errno = ENOTSUP;
return -1;
}
zmq::blob_t zmq::socket_base_t::get_credential () const
{
return blob_t ();
}
void zmq::socket_base_t::xread_activated (pipe_t *)
{
zmq_assert (false);
}
void zmq::socket_base_t::xwrite_activated (pipe_t *)
{
zmq_assert (false);
}
void zmq::socket_base_t::xhiccuped (pipe_t *)
{
zmq_assert (false);
}
void zmq::socket_base_t::in_event ()
{
// This function is invoked only once the socket is running in the context
// of the reaper thread. Process any commands from other threads/sockets
// that may be available at the moment. Ultimately, the socket will
// be destroyed.
process_commands (0, false);
check_destroy ();
}
void zmq::socket_base_t::out_event ()
{
zmq_assert (false);
}
void zmq::socket_base_t::timer_event (int)
{
zmq_assert (false);
}
void zmq::socket_base_t::check_destroy ()
{
// If the object was already marked as destroyed, finish the deallocation.
if (destroyed) {
// Remove the socket from the reaper's poller.
poller->rm_fd (handle);
// Remove the socket from the context.
destroy_socket (this);
// Notify the reaper about the fact.
send_reaped ();
// Deallocate.
own_t::process_destroy ();
}
}
void zmq::socket_base_t::read_activated (pipe_t *pipe_)
{
xread_activated (pipe_);
}
void zmq::socket_base_t::write_activated (pipe_t *pipe_)
{
xwrite_activated (pipe_);
}
void zmq::socket_base_t::hiccuped (pipe_t *pipe_)
{
if (options.immediate == 1)
pipe_->terminate (false);
else
// Notify derived sockets of the hiccup
xhiccuped (pipe_);
}
2013-05-28 16:49:24 +02:00
void zmq::socket_base_t::pipe_terminated (pipe_t *pipe_)
{
// Notify the specific socket type about the pipe termination.
2013-05-28 16:49:24 +02:00
xpipe_terminated (pipe_);
// Remove pipe from inproc pipes
for (inprocs_t::iterator it = inprocs.begin(); it != inprocs.end(); ++it) {
if (it->second == pipe_) {
inprocs.erase(it);
break;
}
}
// Remove the pipe from the list of attached pipes and confirm its
// termination if we are already shutting down.
pipes.erase (pipe_);
if (is_terminating ())
unregister_term_ack ();
}
void zmq::socket_base_t::extract_flags (msg_t *msg_)
{
// Test whether IDENTITY flag is valid for this socket type.
if (unlikely (msg_->flags () & msg_t::identity))
zmq_assert (options.recv_identity);
// Remove MORE flag.
rcvmore = msg_->flags () & msg_t::more ? true : false;
}
int zmq::socket_base_t::monitor (const char *addr_, int events_)
{
if (unlikely (ctx_terminated)) {
errno = ETERM;
return -1;
}
// Support deregistering monitoring endpoints as well
if (addr_ == NULL) {
stop_monitor ();
return 0;
}
// Parse addr_ string.
std::string protocol;
std::string address;
int rc = parse_uri (addr_, protocol, address);
if (rc != 0)
return -1;
rc = check_protocol (protocol);
if (rc != 0)
return -1;
// Event notification only supported over inproc://
if (protocol != "inproc") {
errno = EPROTONOSUPPORT;
return -1;
}
// Register events to monitor
monitor_events = events_;
2012-11-09 14:12:11 +01:00
monitor_socket = zmq_socket (get_ctx (), ZMQ_PAIR);
if (monitor_socket == NULL)
return -1;
// Never block context termination on pending event messages
int linger = 0;
rc = zmq_setsockopt (monitor_socket, ZMQ_LINGER, &linger, sizeof (linger));
if (rc == -1)
stop_monitor ();
// Spawn the monitor socket endpoint
rc = zmq_bind (monitor_socket, addr_);
if (rc == -1)
stop_monitor ();
return rc;
}
void zmq::socket_base_t::set_fd(zmq::fd_t fd_)
{
file_desc = fd_;
}
zmq::fd_t zmq::socket_base_t::fd()
{
return file_desc;
}
void zmq::socket_base_t::event_connected (std::string &addr_, int fd_)
{
if (monitor_events & ZMQ_EVENT_CONNECTED)
monitor_event (ZMQ_EVENT_CONNECTED, fd_, addr_);
2012-05-04 03:35:22 +02:00
}
void zmq::socket_base_t::event_connect_delayed (std::string &addr_, int err_)
{
if (monitor_events & ZMQ_EVENT_CONNECT_DELAYED)
monitor_event (ZMQ_EVENT_CONNECT_DELAYED, err_, addr_);
}
void zmq::socket_base_t::event_connect_retried (std::string &addr_, int interval_)
{
if (monitor_events & ZMQ_EVENT_CONNECT_RETRIED)
monitor_event (ZMQ_EVENT_CONNECT_RETRIED, interval_, addr_);
}
void zmq::socket_base_t::event_listening (std::string &addr_, int fd_)
{
if (monitor_events & ZMQ_EVENT_LISTENING)
monitor_event (ZMQ_EVENT_LISTENING, fd_, addr_);
}
void zmq::socket_base_t::event_bind_failed (std::string &addr_, int err_)
{
if (monitor_events & ZMQ_EVENT_BIND_FAILED)
monitor_event (ZMQ_EVENT_BIND_FAILED, err_, addr_);
}
void zmq::socket_base_t::event_accepted (std::string &addr_, int fd_)
{
if (monitor_events & ZMQ_EVENT_ACCEPTED)
monitor_event (ZMQ_EVENT_ACCEPTED, fd_, addr_);
}
void zmq::socket_base_t::event_accept_failed (std::string &addr_, int err_)
{
if (monitor_events & ZMQ_EVENT_ACCEPT_FAILED)
monitor_event (ZMQ_EVENT_ACCEPT_FAILED, err_, addr_);
}
void zmq::socket_base_t::event_closed (std::string &addr_, int fd_)
{
if (monitor_events & ZMQ_EVENT_CLOSED)
monitor_event (ZMQ_EVENT_CLOSED, fd_, addr_);
}
void zmq::socket_base_t::event_close_failed (std::string &addr_, int err_)
{
if (monitor_events & ZMQ_EVENT_CLOSE_FAILED)
monitor_event (ZMQ_EVENT_CLOSE_FAILED, err_, addr_);
}
void zmq::socket_base_t::event_disconnected (std::string &addr_, int fd_)
{
if (monitor_events & ZMQ_EVENT_DISCONNECTED)
monitor_event (ZMQ_EVENT_DISCONNECTED, fd_, addr_);
}
// Send a monitor event
void zmq::socket_base_t::monitor_event (int event_, int value_, const std::string &addr_)
{
2012-10-24 02:18:52 +02:00
if (monitor_socket) {
// Send event in first frame
2012-10-24 02:18:52 +02:00
zmq_msg_t msg;
zmq_msg_init_size (&msg, 6);
uint8_t *data = (uint8_t *) zmq_msg_data (&msg);
*(uint16_t *) (data + 0) = (uint16_t) event_;
*(uint32_t *) (data + 2) = (uint32_t) value_;
zmq_sendmsg (monitor_socket, &msg, ZMQ_SNDMORE);
// Send address in second frame
zmq_msg_init_size (&msg, addr_.size());
memcpy (zmq_msg_data (&msg), addr_.c_str (), addr_.size ());
2012-10-24 02:18:52 +02:00
zmq_sendmsg (monitor_socket, &msg, 0);
}
}
void zmq::socket_base_t::stop_monitor (void)
{
if (monitor_socket) {
if (monitor_events & ZMQ_EVENT_MONITOR_STOPPED)
monitor_event (ZMQ_EVENT_MONITOR_STOPPED, 0, "");
zmq_close (monitor_socket);
monitor_socket = NULL;
monitor_events = 0;
}
2012-10-18 04:04:51 +02:00
}