2009-08-08 16:01:58 +02:00
|
|
|
/*
|
2010-01-05 08:29:35 +01:00
|
|
|
Copyright (c) 2007-2010 iMatix Corporation
|
2009-08-08 16:01:58 +02:00
|
|
|
|
|
|
|
This file is part of 0MQ.
|
|
|
|
|
|
|
|
0MQ is free software; you can redistribute it and/or modify it under
|
|
|
|
the terms of the Lesser GNU General Public License as published by
|
|
|
|
the Free Software Foundation; either version 3 of the License, or
|
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
0MQ is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
Lesser GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the Lesser GNU General Public License
|
|
|
|
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
2009-12-15 23:49:55 +01:00
|
|
|
#include <new>
|
2009-08-21 14:29:22 +02:00
|
|
|
#include <string>
|
2009-08-08 16:01:58 +02:00
|
|
|
#include <algorithm>
|
|
|
|
|
2010-03-11 20:33:27 +01:00
|
|
|
#include "../include/zmq.h"
|
2009-08-08 16:01:58 +02:00
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
#include "platform.hpp"
|
|
|
|
|
|
|
|
#if defined ZMQ_HAVE_WINDOWS
|
|
|
|
#include "windows.hpp"
|
|
|
|
#if defined _MSC_VER
|
|
|
|
#include <intrin.h>
|
|
|
|
#endif
|
|
|
|
#else
|
|
|
|
#include <unistd.h>
|
|
|
|
#endif
|
2010-05-05 14:24:54 +02:00
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
#include "socket_base.hpp"
|
2009-08-08 16:01:58 +02:00
|
|
|
#include "zmq_listener.hpp"
|
2009-08-09 16:12:09 +02:00
|
|
|
#include "zmq_connecter.hpp"
|
2009-08-08 16:01:58 +02:00
|
|
|
#include "io_thread.hpp"
|
2009-08-20 11:32:23 +02:00
|
|
|
#include "session.hpp"
|
2009-08-09 11:57:21 +02:00
|
|
|
#include "config.hpp"
|
2009-08-20 11:32:23 +02:00
|
|
|
#include "owned.hpp"
|
2009-08-27 10:54:28 +02:00
|
|
|
#include "pipe.hpp"
|
2009-09-04 16:02:41 +02:00
|
|
|
#include "err.hpp"
|
2010-05-05 14:24:54 +02:00
|
|
|
#include "ctx.hpp"
|
2009-09-11 17:58:37 +02:00
|
|
|
#include "platform.hpp"
|
|
|
|
#include "pgm_sender.hpp"
|
2009-09-16 10:11:01 +02:00
|
|
|
#include "pgm_receiver.hpp"
|
2010-04-11 16:36:27 +02:00
|
|
|
#include "likely.hpp"
|
2010-08-06 17:49:37 +02:00
|
|
|
#include "pair.hpp"
|
|
|
|
#include "pub.hpp"
|
|
|
|
#include "sub.hpp"
|
|
|
|
#include "req.hpp"
|
|
|
|
#include "rep.hpp"
|
|
|
|
#include "pull.hpp"
|
|
|
|
#include "push.hpp"
|
|
|
|
#include "xreq.hpp"
|
|
|
|
#include "xrep.hpp"
|
2010-06-17 11:01:18 +02:00
|
|
|
#include "uuid.hpp"
|
2009-08-08 16:01:58 +02:00
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
// If the RDTSC is available we use it to prevent excessive
|
|
|
|
// polling for commands. The nice thing here is that it will work on any
|
|
|
|
// system with x86 architecture and gcc or MSVC compiler.
|
|
|
|
#if (defined __GNUC__ && (defined __i386__ || defined __x86_64__)) ||\
|
|
|
|
(defined _MSC_VER && (defined _M_IX86 || defined _M_X64))
|
|
|
|
#define ZMQ_DELAY_COMMANDS
|
|
|
|
#endif
|
|
|
|
|
|
|
|
zmq::socket_base_t *zmq::socket_base_t::create (int type_, class ctx_t *parent_,
|
|
|
|
uint32_t slot_)
|
|
|
|
{
|
|
|
|
socket_base_t *s = NULL;
|
|
|
|
switch (type_) {
|
|
|
|
|
|
|
|
case ZMQ_PAIR:
|
|
|
|
s = new (std::nothrow) pair_t (parent_, slot_);
|
|
|
|
break;
|
|
|
|
case ZMQ_PUB:
|
|
|
|
s = new (std::nothrow) pub_t (parent_, slot_);
|
|
|
|
break;
|
|
|
|
case ZMQ_SUB:
|
|
|
|
s = new (std::nothrow) sub_t (parent_, slot_);
|
|
|
|
break;
|
|
|
|
case ZMQ_REQ:
|
|
|
|
s = new (std::nothrow) req_t (parent_, slot_);
|
|
|
|
break;
|
|
|
|
case ZMQ_REP:
|
|
|
|
s = new (std::nothrow) rep_t (parent_, slot_);
|
|
|
|
break;
|
|
|
|
case ZMQ_XREQ:
|
|
|
|
s = new (std::nothrow) xreq_t (parent_, slot_);
|
|
|
|
break;
|
|
|
|
case ZMQ_XREP:
|
|
|
|
s = new (std::nothrow) xrep_t (parent_, slot_);
|
|
|
|
break;
|
|
|
|
case ZMQ_PULL:
|
|
|
|
s = new (std::nothrow) pull_t (parent_, slot_);
|
|
|
|
break;
|
|
|
|
case ZMQ_PUSH:
|
|
|
|
s = new (std::nothrow) push_t (parent_, slot_);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
errno = EINVAL;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
zmq_assert (s);
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
zmq::socket_base_t::socket_base_t (ctx_t *parent_, uint32_t slot_) :
|
|
|
|
object_t (parent_, slot_),
|
|
|
|
zombie (false),
|
|
|
|
last_processing_time (0),
|
2009-08-08 16:01:58 +02:00
|
|
|
pending_term_acks (0),
|
2009-08-27 10:54:28 +02:00
|
|
|
ticks (0),
|
2010-04-11 10:26:47 +02:00
|
|
|
rcvmore (false),
|
2009-11-21 20:59:55 +01:00
|
|
|
sent_seqnum (0),
|
2009-12-23 19:37:56 +01:00
|
|
|
processed_seqnum (0),
|
|
|
|
next_ordinal (1)
|
2009-08-27 10:54:28 +02:00
|
|
|
{
|
2009-08-08 16:01:58 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
zmq::socket_base_t::~socket_base_t ()
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
zmq::signaler_t *zmq::socket_base_t::get_signaler ()
|
|
|
|
{
|
|
|
|
return &signaler;
|
|
|
|
}
|
|
|
|
|
|
|
|
void zmq::socket_base_t::stop ()
|
|
|
|
{
|
|
|
|
// Called by ctx when it is terminated (zmq_term).
|
|
|
|
// 'stop' command is sent from the threads that called zmq_term to
|
|
|
|
// the thread owning the socket. This way, blocking call in the
|
|
|
|
// owner thread can be interrupted.
|
|
|
|
send_stop ();
|
|
|
|
}
|
|
|
|
|
|
|
|
void zmq::socket_base_t::attach_pipes (class reader_t *inpipe_,
|
|
|
|
class writer_t *outpipe_, const blob_t &peer_identity_)
|
|
|
|
{
|
|
|
|
// If the peer haven't specified it's identity, let's generate one.
|
|
|
|
if (peer_identity_.size ()) {
|
|
|
|
xattach_pipes (inpipe_, outpipe_, peer_identity_);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
blob_t identity (1, 0);
|
|
|
|
identity.append (uuid_t ().to_blob (), uuid_t::uuid_blob_len);
|
|
|
|
xattach_pipes (inpipe_, outpipe_, identity);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-08-21 14:29:22 +02:00
|
|
|
int zmq::socket_base_t::setsockopt (int option_, const void *optval_,
|
2009-08-09 09:24:48 +02:00
|
|
|
size_t optvallen_)
|
2009-08-08 16:01:58 +02:00
|
|
|
{
|
2010-08-06 17:49:37 +02:00
|
|
|
if (unlikely (zombie)) {
|
2010-04-11 16:36:27 +02:00
|
|
|
errno = ETERM;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2009-09-21 14:39:59 +02:00
|
|
|
// First, check whether specific socket type overloads the option.
|
|
|
|
int rc = xsetsockopt (option_, optval_, optvallen_);
|
|
|
|
if (rc == 0 || errno != EINVAL)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
// If the socket type doesn't support the option, pass it to
|
|
|
|
// the generic option parser.
|
|
|
|
return options.setsockopt (option_, optval_, optvallen_);
|
2009-08-09 09:24:48 +02:00
|
|
|
}
|
|
|
|
|
2010-04-09 13:04:15 +02:00
|
|
|
int zmq::socket_base_t::getsockopt (int option_, void *optval_,
|
|
|
|
size_t *optvallen_)
|
|
|
|
{
|
2010-08-06 17:49:37 +02:00
|
|
|
if (unlikely (zombie)) {
|
2010-04-11 16:36:27 +02:00
|
|
|
errno = ETERM;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2010-04-11 10:26:47 +02:00
|
|
|
if (option_ == ZMQ_RCVMORE) {
|
|
|
|
if (*optvallen_ < sizeof (int64_t)) {
|
|
|
|
errno = EINVAL;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
*((int64_t*) optval_) = rcvmore ? 1 : 0;
|
|
|
|
*optvallen_ = sizeof (int64_t);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
if (option_ == ZMQ_FD) {
|
|
|
|
if (*optvallen_ < sizeof (fd_t)) {
|
|
|
|
errno = EINVAL;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
*((fd_t*) optval_) = signaler.get_fd ();
|
|
|
|
*optvallen_ = sizeof (fd_t);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (option_ == ZMQ_EVENTS) {
|
|
|
|
if (*optvallen_ < sizeof (uint32_t)) {
|
|
|
|
errno = EINVAL;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
process_commands(false, false);
|
|
|
|
*((uint32_t*) optval_) = 0;
|
|
|
|
if (has_out ())
|
|
|
|
*((uint32_t*) optval_) |= ZMQ_POLLOUT;
|
|
|
|
if (has_in ())
|
|
|
|
*((uint32_t*) optval_) |= ZMQ_POLLIN;
|
|
|
|
*optvallen_ = sizeof (uint32_t);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-04-09 13:04:15 +02:00
|
|
|
return options.getsockopt (option_, optval_, optvallen_);
|
|
|
|
}
|
|
|
|
|
2009-08-09 09:24:48 +02:00
|
|
|
int zmq::socket_base_t::bind (const char *addr_)
|
|
|
|
{
|
2010-08-06 17:49:37 +02:00
|
|
|
if (unlikely (zombie)) {
|
2010-04-11 16:36:27 +02:00
|
|
|
errno = ETERM;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2009-09-16 10:11:01 +02:00
|
|
|
// Parse addr_ string.
|
|
|
|
std::string addr_type;
|
|
|
|
std::string addr_args;
|
|
|
|
|
|
|
|
std::string addr (addr_);
|
|
|
|
std::string::size_type pos = addr.find ("://");
|
|
|
|
|
|
|
|
if (pos == std::string::npos) {
|
|
|
|
errno = EINVAL;
|
2009-08-09 11:21:47 +02:00
|
|
|
return -1;
|
2009-09-16 10:11:01 +02:00
|
|
|
}
|
2009-08-09 11:21:47 +02:00
|
|
|
|
2009-09-16 10:11:01 +02:00
|
|
|
addr_type = addr.substr (0, pos);
|
|
|
|
addr_args = addr.substr (pos + 3);
|
|
|
|
|
2009-11-21 20:59:55 +01:00
|
|
|
if (addr_type == "inproc")
|
|
|
|
return register_endpoint (addr_args.c_str (), this);
|
|
|
|
|
2010-01-15 14:11:39 +01:00
|
|
|
if (addr_type == "tcp" || addr_type == "ipc") {
|
|
|
|
|
|
|
|
#if defined ZMQ_HAVE_WINDOWS || defined ZMQ_HAVE_OPENVMS
|
|
|
|
if (addr_type == "ipc") {
|
|
|
|
errno = EPROTONOSUPPORT;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-12-15 23:49:55 +01:00
|
|
|
zmq_listener_t *listener = new (std::nothrow) zmq_listener_t (
|
2009-09-16 10:11:01 +02:00
|
|
|
choose_io_thread (options.affinity), this, options);
|
2009-12-15 23:49:55 +01:00
|
|
|
zmq_assert (listener);
|
2010-01-15 14:11:39 +01:00
|
|
|
int rc = listener->set_address (addr_type.c_str(), addr_args.c_str ());
|
2010-02-12 15:08:57 +01:00
|
|
|
if (rc != 0) {
|
|
|
|
delete listener;
|
2009-09-16 10:11:01 +02:00
|
|
|
return -1;
|
2010-02-12 15:08:57 +01:00
|
|
|
}
|
2009-09-16 10:11:01 +02:00
|
|
|
|
|
|
|
send_plug (listener);
|
|
|
|
send_own (this, listener);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-09-25 17:50:12 +02:00
|
|
|
#if defined ZMQ_HAVE_OPENPGM
|
2010-03-09 18:14:49 +01:00
|
|
|
if (addr_type == "pgm" || addr_type == "epgm") {
|
2009-09-16 10:11:01 +02:00
|
|
|
// In the case of PGM bind behaves the same like connect.
|
|
|
|
return connect (addr_);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-09-22 11:52:35 +02:00
|
|
|
// Unknown protocol.
|
|
|
|
errno = EPROTONOSUPPORT;
|
2009-09-16 10:11:01 +02:00
|
|
|
return -1;
|
2009-08-08 16:01:58 +02:00
|
|
|
}
|
|
|
|
|
2009-08-09 09:24:48 +02:00
|
|
|
int zmq::socket_base_t::connect (const char *addr_)
|
2009-08-08 16:01:58 +02:00
|
|
|
{
|
2010-08-06 17:49:37 +02:00
|
|
|
if (unlikely (zombie)) {
|
2010-04-11 16:36:27 +02:00
|
|
|
errno = ETERM;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2009-09-11 17:58:37 +02:00
|
|
|
// Parse addr_ string.
|
|
|
|
std::string addr_type;
|
|
|
|
std::string addr_args;
|
|
|
|
|
|
|
|
std::string addr (addr_);
|
|
|
|
std::string::size_type pos = addr.find ("://");
|
|
|
|
|
|
|
|
if (pos == std::string::npos) {
|
|
|
|
errno = EINVAL;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
addr_type = addr.substr (0, pos);
|
|
|
|
addr_args = addr.substr (pos + 3);
|
|
|
|
|
2009-11-21 20:59:55 +01:00
|
|
|
if (addr_type == "inproc") {
|
|
|
|
|
2010-02-14 13:34:48 +01:00
|
|
|
// TODO: inproc connect is specific with respect to creating pipes
|
|
|
|
// as there's no 'reconnect' functionality implemented. Once that
|
|
|
|
// is in place we should follow generic pipe creation algorithm.
|
|
|
|
|
2009-11-21 20:59:55 +01:00
|
|
|
// Find the peer socket.
|
|
|
|
socket_base_t *peer = find_endpoint (addr_args.c_str ());
|
|
|
|
if (!peer)
|
|
|
|
return -1;
|
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
reader_t *inpipe_reader = NULL;
|
|
|
|
writer_t *inpipe_writer = NULL;
|
|
|
|
reader_t *outpipe_reader = NULL;
|
|
|
|
writer_t *outpipe_writer = NULL;
|
|
|
|
|
2009-11-21 20:59:55 +01:00
|
|
|
// Create inbound pipe, if required.
|
2010-08-06 17:49:37 +02:00
|
|
|
if (options.requires_in)
|
|
|
|
create_pipe (this, peer, options.hwm, options.swap,
|
|
|
|
&inpipe_reader, &inpipe_writer);
|
2009-11-21 20:59:55 +01:00
|
|
|
|
|
|
|
// Create outbound pipe, if required.
|
2010-08-06 17:49:37 +02:00
|
|
|
if (options.requires_out)
|
|
|
|
create_pipe (peer, this, options.hwm, options.swap,
|
|
|
|
&outpipe_reader, &outpipe_writer);
|
2009-11-21 20:59:55 +01:00
|
|
|
|
|
|
|
// Attach the pipes to this socket object.
|
2010-08-06 17:49:37 +02:00
|
|
|
attach_pipes (inpipe_reader, outpipe_writer, blob_t ());
|
2009-11-21 20:59:55 +01:00
|
|
|
|
|
|
|
// Attach the pipes to the peer socket. Note that peer's seqnum
|
2009-11-21 21:30:09 +01:00
|
|
|
// was incremented in find_endpoint function. The callee is notified
|
|
|
|
// about the fact via the last parameter.
|
2010-08-06 17:49:37 +02:00
|
|
|
send_bind (peer, outpipe_reader, inpipe_writer,
|
|
|
|
options.identity, false);
|
2009-11-21 20:59:55 +01:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-12-23 19:37:56 +01:00
|
|
|
// Create unnamed session.
|
2009-08-21 14:29:22 +02:00
|
|
|
io_thread_t *io_thread = choose_io_thread (options.affinity);
|
2009-12-23 19:37:56 +01:00
|
|
|
session_t *session = new (std::nothrow) session_t (io_thread,
|
|
|
|
this, options);
|
2009-08-21 14:29:22 +02:00
|
|
|
zmq_assert (session);
|
2009-08-27 10:54:28 +02:00
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
// If 'immediate connect' feature is required, we'll create the pipes
|
2010-02-14 13:34:48 +01:00
|
|
|
// to the session straight away. Otherwise, they'll be created by the
|
|
|
|
// session once the connection is established.
|
|
|
|
if (options.immediate_connect) {
|
2009-09-21 14:39:59 +02:00
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
reader_t *inpipe_reader = NULL;
|
|
|
|
writer_t *inpipe_writer = NULL;
|
|
|
|
reader_t *outpipe_reader = NULL;
|
|
|
|
writer_t *outpipe_writer = NULL;
|
2009-09-21 14:39:59 +02:00
|
|
|
|
2010-02-14 13:34:48 +01:00
|
|
|
// Create inbound pipe, if required.
|
2010-08-06 17:49:37 +02:00
|
|
|
if (options.requires_in)
|
|
|
|
create_pipe (this, session, options.hwm, options.swap,
|
|
|
|
&inpipe_reader, &inpipe_writer);
|
2009-09-21 14:39:59 +02:00
|
|
|
|
2010-02-14 13:34:48 +01:00
|
|
|
// Create outbound pipe, if required.
|
2010-08-06 17:49:37 +02:00
|
|
|
if (options.requires_out)
|
|
|
|
create_pipe (session, this, options.hwm, options.swap,
|
|
|
|
&outpipe_reader, &outpipe_writer);
|
2009-09-21 14:39:59 +02:00
|
|
|
|
2010-02-14 13:34:48 +01:00
|
|
|
// Attach the pipes to the socket object.
|
2010-08-06 17:49:37 +02:00
|
|
|
attach_pipes (inpipe_reader, outpipe_writer, blob_t ());
|
2010-02-14 13:34:48 +01:00
|
|
|
|
|
|
|
// Attach the pipes to the session object.
|
2010-08-06 17:49:37 +02:00
|
|
|
session->attach_pipes (outpipe_reader, inpipe_writer, blob_t ());
|
2010-02-14 13:34:48 +01:00
|
|
|
}
|
2009-08-27 10:54:28 +02:00
|
|
|
|
|
|
|
// Activate the session.
|
2009-08-21 14:29:22 +02:00
|
|
|
send_plug (session);
|
|
|
|
send_own (this, session);
|
|
|
|
|
2010-01-15 14:11:39 +01:00
|
|
|
if (addr_type == "tcp" || addr_type == "ipc") {
|
|
|
|
|
|
|
|
#if defined ZMQ_HAVE_WINDOWS || defined ZMQ_HAVE_OPENVMS
|
2010-02-14 13:34:48 +01:00
|
|
|
// Windows named pipes are not compatible with Winsock API.
|
|
|
|
// There's no UNIX domain socket implementation on OpenVMS.
|
2010-01-15 14:11:39 +01:00
|
|
|
if (addr_type == "ipc") {
|
|
|
|
errno = EPROTONOSUPPORT;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
#endif
|
2009-09-11 17:58:37 +02:00
|
|
|
|
2009-09-21 17:20:13 +02:00
|
|
|
// Create the connecter object. Supply it with the session name
|
|
|
|
// so that it can bind the new connection to the session once
|
|
|
|
// it is established.
|
2009-12-15 23:49:55 +01:00
|
|
|
zmq_connecter_t *connecter = new (std::nothrow) zmq_connecter_t (
|
2009-09-11 17:58:37 +02:00
|
|
|
choose_io_thread (options.affinity), this, options,
|
2009-12-23 19:37:56 +01:00
|
|
|
session->get_ordinal (), false);
|
2009-12-15 23:49:55 +01:00
|
|
|
zmq_assert (connecter);
|
2010-01-15 14:11:39 +01:00
|
|
|
int rc = connecter->set_address (addr_type.c_str(), addr_args.c_str ());
|
2009-09-11 17:58:37 +02:00
|
|
|
if (rc != 0) {
|
|
|
|
delete connecter;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
send_plug (connecter);
|
|
|
|
send_own (this, connecter);
|
|
|
|
|
|
|
|
return 0;
|
2009-08-21 14:29:22 +02:00
|
|
|
}
|
|
|
|
|
2009-09-25 17:50:12 +02:00
|
|
|
#if defined ZMQ_HAVE_OPENPGM
|
2010-03-09 18:14:49 +01:00
|
|
|
if (addr_type == "pgm" || addr_type == "epgm") {
|
2009-09-16 15:36:38 +02:00
|
|
|
|
2009-09-21 14:39:59 +02:00
|
|
|
// If the socket type requires bi-directional communication
|
|
|
|
// multicast is not an option (it is uni-directional).
|
2009-09-21 17:20:13 +02:00
|
|
|
if (options.requires_in && options.requires_out) {
|
2009-09-22 11:52:35 +02:00
|
|
|
errno = ENOCOMPATPROTO;
|
2009-09-21 14:39:59 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2010-03-09 18:14:49 +01:00
|
|
|
// For epgm, pgm transport with UDP encapsulation is used.
|
|
|
|
bool udp_encapsulation = (addr_type == "epgm");
|
2009-09-15 09:43:42 +02:00
|
|
|
|
2010-02-14 13:34:48 +01:00
|
|
|
// At this point we'll create message pipes to the session straight
|
|
|
|
// away. There's no point in delaying it as no concept of 'connect'
|
|
|
|
// exists with PGM anyway.
|
2009-09-21 17:20:13 +02:00
|
|
|
if (options.requires_out) {
|
2009-09-16 10:11:01 +02:00
|
|
|
|
2009-09-21 17:20:13 +02:00
|
|
|
// PGM sender.
|
2009-12-15 23:49:55 +01:00
|
|
|
pgm_sender_t *pgm_sender = new (std::nothrow) pgm_sender_t (
|
2009-12-23 19:37:56 +01:00
|
|
|
choose_io_thread (options.affinity), options);
|
2009-12-15 23:49:55 +01:00
|
|
|
zmq_assert (pgm_sender);
|
2009-09-11 17:58:37 +02:00
|
|
|
|
2009-09-16 15:36:38 +02:00
|
|
|
int rc = pgm_sender->init (udp_encapsulation, addr_args.c_str ());
|
2009-09-11 17:58:37 +02:00
|
|
|
if (rc != 0) {
|
|
|
|
delete pgm_sender;
|
|
|
|
return -1;
|
|
|
|
}
|
2009-09-15 09:43:42 +02:00
|
|
|
|
2010-02-13 14:07:30 +01:00
|
|
|
send_attach (session, pgm_sender, blob_t ());
|
2009-09-11 17:58:37 +02:00
|
|
|
}
|
2009-09-21 17:20:13 +02:00
|
|
|
else if (options.requires_in) {
|
2009-09-16 10:11:01 +02:00
|
|
|
|
2009-09-21 17:20:13 +02:00
|
|
|
// PGM receiver.
|
2009-12-15 23:49:55 +01:00
|
|
|
pgm_receiver_t *pgm_receiver = new (std::nothrow) pgm_receiver_t (
|
2009-12-23 19:37:56 +01:00
|
|
|
choose_io_thread (options.affinity), options);
|
2009-12-15 23:49:55 +01:00
|
|
|
zmq_assert (pgm_receiver);
|
2009-09-16 10:11:01 +02:00
|
|
|
|
2009-09-16 15:36:38 +02:00
|
|
|
int rc = pgm_receiver->init (udp_encapsulation, addr_args.c_str ());
|
2009-09-16 10:11:01 +02:00
|
|
|
if (rc != 0) {
|
|
|
|
delete pgm_receiver;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2010-02-13 14:07:30 +01:00
|
|
|
send_attach (session, pgm_receiver, blob_t ());
|
2009-09-11 17:58:37 +02:00
|
|
|
}
|
2009-09-21 17:20:13 +02:00
|
|
|
else
|
|
|
|
zmq_assert (false);
|
2009-09-11 17:58:37 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-09-22 11:52:35 +02:00
|
|
|
// Unknown protoco.
|
|
|
|
errno = EPROTONOSUPPORT;
|
2009-09-11 17:58:37 +02:00
|
|
|
return -1;
|
2009-08-08 16:01:58 +02:00
|
|
|
}
|
|
|
|
|
2009-08-21 14:29:22 +02:00
|
|
|
int zmq::socket_base_t::send (::zmq_msg_t *msg_, int flags_)
|
2009-08-08 16:01:58 +02:00
|
|
|
{
|
2010-08-06 17:49:37 +02:00
|
|
|
if (unlikely (zombie)) {
|
|
|
|
errno = ETERM;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2010-04-11 16:36:27 +02:00
|
|
|
// Process pending commands, if any.
|
2010-08-06 17:49:37 +02:00
|
|
|
process_commands (false, true);
|
|
|
|
if (unlikely (zombie)) {
|
2010-04-11 16:36:27 +02:00
|
|
|
errno = ETERM;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2010-04-11 10:26:47 +02:00
|
|
|
// At this point we impose the MORE flag on the message.
|
|
|
|
if (flags_ & ZMQ_SNDMORE)
|
2010-03-27 21:25:40 +01:00
|
|
|
msg_->flags |= ZMQ_MSG_MORE;
|
2010-03-27 14:57:56 +01:00
|
|
|
|
2009-08-27 10:54:28 +02:00
|
|
|
// Try to send the message.
|
2009-09-21 14:39:59 +02:00
|
|
|
int rc = xsend (msg_, flags_);
|
|
|
|
if (rc == 0)
|
|
|
|
return 0;
|
2009-08-27 10:54:28 +02:00
|
|
|
|
2009-09-21 14:39:59 +02:00
|
|
|
// In case of non-blocking send we'll simply propagate
|
|
|
|
// the error - including EAGAIN - upwards.
|
|
|
|
if (flags_ & ZMQ_NOBLOCK)
|
2009-08-27 10:54:28 +02:00
|
|
|
return -1;
|
|
|
|
|
2009-09-21 14:39:59 +02:00
|
|
|
// Oops, we couldn't send the message. Wait for the next
|
|
|
|
// command, process it and try to send the message again.
|
|
|
|
while (rc != 0) {
|
|
|
|
if (errno != EAGAIN)
|
|
|
|
return -1;
|
2010-08-06 17:49:37 +02:00
|
|
|
process_commands (true, false);
|
|
|
|
if (unlikely (zombie)) {
|
2010-04-11 16:36:27 +02:00
|
|
|
errno = ETERM;
|
|
|
|
return -1;
|
|
|
|
}
|
2009-09-21 14:39:59 +02:00
|
|
|
rc = xsend (msg_, flags_);
|
|
|
|
}
|
2009-08-27 10:54:28 +02:00
|
|
|
return 0;
|
2009-08-08 16:01:58 +02:00
|
|
|
}
|
|
|
|
|
2009-08-21 14:29:22 +02:00
|
|
|
int zmq::socket_base_t::recv (::zmq_msg_t *msg_, int flags_)
|
2009-08-08 16:01:58 +02:00
|
|
|
{
|
2010-08-06 17:49:37 +02:00
|
|
|
if (unlikely (zombie)) {
|
|
|
|
errno = ETERM;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2010-02-12 12:12:49 +01:00
|
|
|
// Get the message.
|
2009-09-21 14:39:59 +02:00
|
|
|
int rc = xrecv (msg_, flags_);
|
2010-02-17 20:33:00 +01:00
|
|
|
int err = errno;
|
2010-02-12 12:12:49 +01:00
|
|
|
|
|
|
|
// Once every inbound_poll_rate messages check for signals and process
|
|
|
|
// incoming commands. This happens only if we are not polling altogether
|
|
|
|
// because there are messages available all the time. If poll occurs,
|
|
|
|
// ticks is set to zero and thus we avoid this code.
|
|
|
|
//
|
|
|
|
// Note that 'recv' uses different command throttling algorithm (the one
|
|
|
|
// described above) from the one used by 'send'. This is because counting
|
|
|
|
// ticks is more efficient than doing rdtsc all the time.
|
|
|
|
if (++ticks == inbound_poll_rate) {
|
2010-08-06 17:49:37 +02:00
|
|
|
process_commands (false, false);
|
|
|
|
if (unlikely (zombie)) {
|
2010-04-11 16:36:27 +02:00
|
|
|
errno = ETERM;
|
|
|
|
return -1;
|
|
|
|
}
|
2010-02-12 12:12:49 +01:00
|
|
|
ticks = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we have the message, return immediately.
|
2010-04-11 10:26:47 +02:00
|
|
|
if (rc == 0) {
|
|
|
|
rcvmore = msg_->flags & ZMQ_MSG_MORE;
|
|
|
|
if (rcvmore)
|
|
|
|
msg_->flags &= ~ZMQ_MSG_MORE;
|
2009-09-21 14:39:59 +02:00
|
|
|
return 0;
|
2010-04-11 10:26:47 +02:00
|
|
|
}
|
2009-09-21 14:39:59 +02:00
|
|
|
|
2010-02-17 21:16:59 +01:00
|
|
|
// If we don't have the message, restore the original cause of the problem.
|
|
|
|
errno = err;
|
|
|
|
|
2009-08-27 10:54:28 +02:00
|
|
|
// If the message cannot be fetched immediately, there are two scenarios.
|
2009-09-21 14:39:59 +02:00
|
|
|
// For non-blocking recv, commands are processed in case there's a revive
|
|
|
|
// command already waiting int a command pipe. If it's not, return EAGAIN.
|
|
|
|
if (flags_ & ZMQ_NOBLOCK) {
|
|
|
|
if (errno != EAGAIN)
|
|
|
|
return -1;
|
2010-08-06 17:49:37 +02:00
|
|
|
process_commands (false, false);
|
|
|
|
if (unlikely (zombie)) {
|
2010-04-11 16:36:27 +02:00
|
|
|
errno = ETERM;
|
|
|
|
return -1;
|
|
|
|
}
|
2009-12-28 21:29:31 +01:00
|
|
|
ticks = 0;
|
2010-06-11 08:03:34 +02:00
|
|
|
|
|
|
|
rc = xrecv (msg_, flags_);
|
|
|
|
if (rc == 0) {
|
|
|
|
rcvmore = msg_->flags & ZMQ_MSG_MORE;
|
|
|
|
if (rcvmore)
|
|
|
|
msg_->flags &= ~ZMQ_MSG_MORE;
|
|
|
|
}
|
|
|
|
return rc;
|
2009-08-27 10:54:28 +02:00
|
|
|
}
|
|
|
|
|
2010-02-17 21:16:59 +01:00
|
|
|
// In blocking scenario, commands are processed over and over again until
|
|
|
|
// we are able to fetch a message.
|
|
|
|
while (rc != 0) {
|
|
|
|
if (errno != EAGAIN)
|
|
|
|
return -1;
|
2010-08-06 17:49:37 +02:00
|
|
|
process_commands (true, false);
|
|
|
|
if (unlikely (zombie)) {
|
2010-04-11 16:36:27 +02:00
|
|
|
errno = ETERM;
|
|
|
|
return -1;
|
|
|
|
}
|
2010-02-17 21:16:59 +01:00
|
|
|
rc = xrecv (msg_, flags_);
|
|
|
|
ticks = 0;
|
|
|
|
}
|
2010-04-11 10:26:47 +02:00
|
|
|
|
|
|
|
rcvmore = msg_->flags & ZMQ_MSG_MORE;
|
|
|
|
if (rcvmore)
|
|
|
|
msg_->flags &= ~ZMQ_MSG_MORE;
|
2010-02-17 21:16:59 +01:00
|
|
|
return 0;
|
2009-08-08 16:01:58 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
int zmq::socket_base_t::close ()
|
|
|
|
{
|
2010-08-06 17:49:37 +02:00
|
|
|
// Socket becomes a zombie. From now on all new arrived pipes (bind
|
|
|
|
// command) and I/O objects (own command) are immediately terminated.
|
|
|
|
// Also, any further requests form I/O object termination are ignored
|
|
|
|
// (we are going to shut them down anyway -- this way we assure that
|
|
|
|
// we do so once only).
|
|
|
|
zombie = true;
|
2009-09-21 14:39:59 +02:00
|
|
|
|
2009-11-21 20:59:55 +01:00
|
|
|
// Unregister all inproc endpoints associated with this socket.
|
2010-08-06 17:49:37 +02:00
|
|
|
// Doing this we make sure that no new pipes from other sockets (inproc)
|
|
|
|
// will be initiated. However, there may be some inproc pipes already
|
|
|
|
// on the fly, but not yet received by this socket. To get finished
|
|
|
|
// with them we'll do the subsequent waiting from on-the-fly commands.
|
|
|
|
// This should happen very quickly. There's no way to block here for
|
|
|
|
// extensive period of time.
|
|
|
|
unregister_endpoints (this);
|
2009-11-21 20:59:55 +01:00
|
|
|
while (processed_seqnum != sent_seqnum.get ())
|
2010-08-06 17:49:37 +02:00
|
|
|
process_commands (true, false);
|
|
|
|
// TODO: My feeling is that the above has to be done in the dezombification
|
|
|
|
// loop, otherwise we may end up with number of i/o object dropping to zero
|
|
|
|
// even though there are more i/o objects on the way.
|
|
|
|
|
|
|
|
// The above process ensures that only pipes that will arrive from now on
|
|
|
|
// are those initiated by sessions. These in turn have a nice property of
|
|
|
|
// not arriving totally asynchronously. When a session -- being an I/O
|
|
|
|
// object -- acknowledges its termination we are 100% sure that we'll get
|
|
|
|
// no new pipe from it.
|
|
|
|
|
|
|
|
// Start termination of all the pipes presently associated with the socket.
|
|
|
|
xterm_pipes ();
|
|
|
|
|
|
|
|
// Send termination request to all associated I/O objects.
|
|
|
|
// Start waiting for the acks. Note that the actual waiting is not done
|
|
|
|
// in this function. Rather it is done in delayed manner as socket is
|
|
|
|
// being dezombified. The reason is that I/O object shutdown can take
|
|
|
|
// considerable amount of time in case there's still a lot of data to
|
|
|
|
// push to the network.
|
|
|
|
for (io_objects_t::iterator it = io_objects.begin ();
|
|
|
|
it != io_objects.end (); it++)
|
|
|
|
send_term (*it);
|
|
|
|
pending_term_acks += io_objects.size ();
|
|
|
|
io_objects.clear ();
|
|
|
|
|
|
|
|
// Note that new I/O objects may arrive even in zombie state (say new
|
|
|
|
// session initiated by a listener object), however, in such case number
|
|
|
|
// of pending acks never drops to zero. Here's the scenario: We have an
|
|
|
|
// pending ack for the listener object. Then 'own' commands arrives from
|
|
|
|
// the listener notifying the socket about new session. It immediately
|
|
|
|
// triggers termination request and number of of pending acks if
|
|
|
|
// incremented. Then term_acks arrives from the listener. Number of pending
|
|
|
|
// acks is decremented. Later on, the session itself will ack its
|
|
|
|
// termination. During the process, number of pending acks never dropped
|
|
|
|
// to zero and thus the socket remains safely in the zombie state.
|
|
|
|
|
|
|
|
// Transfer the ownership of the socket from this application thread
|
|
|
|
// to the context which will take care of the rest of shutdown process.
|
|
|
|
zombify (this);
|
2009-09-04 16:02:41 +02:00
|
|
|
|
2009-08-08 16:01:58 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-11-21 20:59:55 +01:00
|
|
|
void zmq::socket_base_t::inc_seqnum ()
|
|
|
|
{
|
2010-08-06 17:49:37 +02:00
|
|
|
// Be aware: This function may be called from a different thread!
|
2009-11-21 20:59:55 +01:00
|
|
|
sent_seqnum.add (1);
|
|
|
|
}
|
|
|
|
|
2009-10-01 10:56:17 +02:00
|
|
|
bool zmq::socket_base_t::has_in ()
|
|
|
|
{
|
|
|
|
return xhas_in ();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool zmq::socket_base_t::has_out ()
|
|
|
|
{
|
|
|
|
return xhas_out ();
|
|
|
|
}
|
|
|
|
|
2010-02-13 14:07:30 +01:00
|
|
|
bool zmq::socket_base_t::register_session (const blob_t &peer_identity_,
|
|
|
|
session_t *session_)
|
2009-08-20 11:32:23 +02:00
|
|
|
{
|
|
|
|
sessions_sync.lock ();
|
2010-02-13 14:07:30 +01:00
|
|
|
bool registered = named_sessions.insert (
|
|
|
|
std::make_pair (peer_identity_, session_)).second;
|
2009-08-20 11:32:23 +02:00
|
|
|
sessions_sync.unlock ();
|
2009-08-21 14:29:22 +02:00
|
|
|
return registered;
|
2009-08-20 11:32:23 +02:00
|
|
|
}
|
|
|
|
|
2010-02-13 14:07:30 +01:00
|
|
|
void zmq::socket_base_t::unregister_session (const blob_t &peer_identity_)
|
2009-08-20 11:32:23 +02:00
|
|
|
{
|
|
|
|
sessions_sync.lock ();
|
2010-02-13 14:07:30 +01:00
|
|
|
named_sessions_t::iterator it = named_sessions.find (peer_identity_);
|
2009-12-23 19:37:56 +01:00
|
|
|
zmq_assert (it != named_sessions.end ());
|
|
|
|
named_sessions.erase (it);
|
2009-08-20 11:32:23 +02:00
|
|
|
sessions_sync.unlock ();
|
|
|
|
}
|
|
|
|
|
2010-02-13 14:07:30 +01:00
|
|
|
zmq::session_t *zmq::socket_base_t::find_session (const blob_t &peer_identity_)
|
2009-08-20 11:32:23 +02:00
|
|
|
{
|
|
|
|
sessions_sync.lock ();
|
2010-02-13 14:07:30 +01:00
|
|
|
named_sessions_t::iterator it = named_sessions.find (peer_identity_);
|
2009-12-23 19:37:56 +01:00
|
|
|
if (it == named_sessions.end ()) {
|
|
|
|
sessions_sync.unlock ();
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
session_t *session = it->second;
|
|
|
|
|
|
|
|
// Prepare the session for subsequent attach command.
|
|
|
|
session->inc_seqnum ();
|
|
|
|
|
|
|
|
sessions_sync.unlock ();
|
|
|
|
return session;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t zmq::socket_base_t::register_session (session_t *session_)
|
|
|
|
{
|
|
|
|
sessions_sync.lock ();
|
|
|
|
uint64_t ordinal = next_ordinal;
|
|
|
|
next_ordinal++;
|
2010-02-07 16:24:14 +01:00
|
|
|
unnamed_sessions.insert (std::make_pair (ordinal, session_));
|
2009-12-23 19:37:56 +01:00
|
|
|
sessions_sync.unlock ();
|
|
|
|
return ordinal;
|
|
|
|
}
|
|
|
|
|
|
|
|
void zmq::socket_base_t::unregister_session (uint64_t ordinal_)
|
|
|
|
{
|
|
|
|
sessions_sync.lock ();
|
|
|
|
unnamed_sessions_t::iterator it = unnamed_sessions.find (ordinal_);
|
|
|
|
zmq_assert (it != unnamed_sessions.end ());
|
|
|
|
unnamed_sessions.erase (it);
|
|
|
|
sessions_sync.unlock ();
|
|
|
|
}
|
|
|
|
|
|
|
|
zmq::session_t *zmq::socket_base_t::find_session (uint64_t ordinal_)
|
|
|
|
{
|
|
|
|
sessions_sync.lock ();
|
|
|
|
|
|
|
|
unnamed_sessions_t::iterator it = unnamed_sessions.find (ordinal_);
|
|
|
|
if (it == unnamed_sessions.end ()) {
|
2009-08-21 14:29:22 +02:00
|
|
|
sessions_sync.unlock ();
|
|
|
|
return NULL;
|
|
|
|
}
|
2009-12-23 19:37:56 +01:00
|
|
|
session_t *session = it->second;
|
2009-08-21 14:29:22 +02:00
|
|
|
|
|
|
|
// Prepare the session for subsequent attach command.
|
2009-12-23 19:37:56 +01:00
|
|
|
session->inc_seqnum ();
|
2009-08-21 14:29:22 +02:00
|
|
|
|
2009-08-20 11:32:23 +02:00
|
|
|
sessions_sync.unlock ();
|
2009-12-23 19:37:56 +01:00
|
|
|
return session;
|
2009-08-20 11:32:23 +02:00
|
|
|
}
|
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
bool zmq::socket_base_t::dezombify ()
|
2009-09-02 10:22:23 +02:00
|
|
|
{
|
2010-08-06 17:49:37 +02:00
|
|
|
zmq_assert (zombie);
|
2009-09-02 10:22:23 +02:00
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
// Process any commands from other threads/sockets that may be available
|
|
|
|
// at the moment.
|
|
|
|
process_commands (false, false);
|
2009-09-02 10:22:23 +02:00
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
// If there are no more pipes attached and there are no more I/O objects
|
|
|
|
// owned by the socket, we can kill the zombie.
|
|
|
|
if (!pending_term_acks && !xhas_pipes ()) {
|
|
|
|
|
|
|
|
// If all objects have acknowledged their termination there should
|
|
|
|
// definitely be no I/O object remaining in the list.
|
|
|
|
zmq_assert (io_objects.empty ());
|
|
|
|
|
|
|
|
// Check whether there are no session leaks.
|
|
|
|
sessions_sync.lock ();
|
|
|
|
zmq_assert (named_sessions.empty ());
|
|
|
|
zmq_assert (unnamed_sessions.empty ());
|
|
|
|
sessions_sync.unlock ();
|
|
|
|
|
|
|
|
// Deallocate all the resources tied to this socket.
|
|
|
|
delete this;
|
|
|
|
|
|
|
|
// Notify the caller about the fact that the zombie is finally dead.
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// The zombie remains undead.
|
|
|
|
return false;
|
2010-03-01 10:13:26 +01:00
|
|
|
}
|
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
void zmq::socket_base_t::process_commands (bool block_, bool throttle_)
|
2009-08-27 10:54:28 +02:00
|
|
|
{
|
2010-08-06 17:49:37 +02:00
|
|
|
bool received;
|
|
|
|
command_t cmd;
|
|
|
|
if (block_) {
|
|
|
|
received = signaler.recv (&cmd, true);
|
|
|
|
zmq_assert (received);
|
2010-06-17 11:01:18 +02:00
|
|
|
}
|
|
|
|
else {
|
2010-08-06 17:49:37 +02:00
|
|
|
|
|
|
|
#if defined ZMQ_DELAY_COMMANDS
|
|
|
|
// Optimised version of command processing - it doesn't have to check
|
|
|
|
// for incoming commands each time. It does so only if certain time
|
|
|
|
// elapsed since last command processing. Command delay varies
|
|
|
|
// depending on CPU speed: It's ~1ms on 3GHz CPU, ~2ms on 1.5GHz CPU
|
|
|
|
// etc. The optimisation makes sense only on platforms where getting
|
|
|
|
// a timestamp is a very cheap operation (tens of nanoseconds).
|
|
|
|
if (throttle_) {
|
|
|
|
|
|
|
|
// Get timestamp counter.
|
|
|
|
#if defined __GNUC__
|
|
|
|
uint32_t low;
|
|
|
|
uint32_t high;
|
|
|
|
__asm__ volatile ("rdtsc" : "=a" (low), "=d" (high));
|
|
|
|
uint64_t current_time = (uint64_t) high << 32 | low;
|
|
|
|
#elif defined _MSC_VER
|
|
|
|
uint64_t current_time = __rdtsc ();
|
|
|
|
#else
|
|
|
|
#error
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// Check whether certain time have elapsed since last command
|
|
|
|
// processing.
|
|
|
|
if (current_time - last_processing_time <= max_command_delay)
|
|
|
|
return;
|
|
|
|
last_processing_time = current_time;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// Check whether there are any commands pending for this thread.
|
|
|
|
received = signaler.recv (&cmd, false);
|
2010-06-17 11:01:18 +02:00
|
|
|
}
|
2009-08-27 10:54:28 +02:00
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
// Process all the commands available at the moment.
|
|
|
|
while (received) {
|
|
|
|
cmd.destination->process_command (cmd);
|
|
|
|
received = signaler.recv (&cmd, false);
|
|
|
|
}
|
2009-08-28 16:51:46 +02:00
|
|
|
}
|
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
void zmq::socket_base_t::process_stop ()
|
2009-08-28 16:51:46 +02:00
|
|
|
{
|
2010-08-06 17:49:37 +02:00
|
|
|
// Here, someone have called zmq_term while the socket was still alive.
|
|
|
|
// We'll zombify it so that any blocking call is interrupted and any
|
|
|
|
// further attempt to use the socket will return ETERM. The user is still
|
|
|
|
// responsible for calling zmq_close on the socket though!
|
|
|
|
zombie = true;
|
2009-09-02 16:16:25 +02:00
|
|
|
}
|
|
|
|
|
2009-08-20 11:32:23 +02:00
|
|
|
void zmq::socket_base_t::process_own (owned_t *object_)
|
2009-08-08 16:01:58 +02:00
|
|
|
{
|
2010-08-06 17:49:37 +02:00
|
|
|
// If the socket is already being shut down, new owned objects are
|
|
|
|
// immediately asked to terminate.
|
|
|
|
if (zombie) {
|
|
|
|
send_term (object_);
|
|
|
|
pending_term_acks++;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-08-09 09:24:48 +02:00
|
|
|
io_objects.insert (object_);
|
2009-08-08 16:01:58 +02:00
|
|
|
}
|
|
|
|
|
2010-02-13 15:30:03 +01:00
|
|
|
void zmq::socket_base_t::process_bind (reader_t *in_pipe_, writer_t *out_pipe_,
|
|
|
|
const blob_t &peer_identity_)
|
2009-08-27 10:54:28 +02:00
|
|
|
{
|
2010-08-06 17:49:37 +02:00
|
|
|
// If the socket is already being shut down, the termination process on
|
|
|
|
// the new pipes is started immediately. However, they are still attached
|
|
|
|
// as to let the process finish in a decent manner.
|
|
|
|
if (unlikely (zombie)) {
|
|
|
|
if (in_pipe_)
|
|
|
|
in_pipe_->terminate ();
|
|
|
|
if (out_pipe_)
|
|
|
|
out_pipe_->terminate ();
|
|
|
|
}
|
|
|
|
|
2010-02-16 18:30:38 +01:00
|
|
|
attach_pipes (in_pipe_, out_pipe_, peer_identity_);
|
2009-08-27 10:54:28 +02:00
|
|
|
}
|
|
|
|
|
2009-08-20 11:32:23 +02:00
|
|
|
void zmq::socket_base_t::process_term_req (owned_t *object_)
|
2009-08-08 16:01:58 +02:00
|
|
|
{
|
2009-08-21 14:29:22 +02:00
|
|
|
// When shutting down we can ignore termination requests from owned
|
2010-08-06 17:49:37 +02:00
|
|
|
// objects. It means the termination request was already sent to
|
|
|
|
// the object.
|
|
|
|
if (zombie)
|
2009-08-21 14:29:22 +02:00
|
|
|
return;
|
|
|
|
|
2009-08-08 16:01:58 +02:00
|
|
|
// If I/O object is well and alive ask it to terminate.
|
|
|
|
io_objects_t::iterator it = std::find (io_objects.begin (),
|
|
|
|
io_objects.end (), object_);
|
|
|
|
|
|
|
|
// If not found, we assume that termination request was already sent to
|
2010-08-06 17:49:37 +02:00
|
|
|
// the object so we can safely ignore the request.
|
2009-08-09 09:24:48 +02:00
|
|
|
if (it == io_objects.end ())
|
|
|
|
return;
|
|
|
|
|
|
|
|
pending_term_acks++;
|
|
|
|
io_objects.erase (it);
|
|
|
|
send_term (object_);
|
2009-08-08 16:01:58 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void zmq::socket_base_t::process_term_ack ()
|
|
|
|
{
|
|
|
|
zmq_assert (pending_term_acks);
|
|
|
|
pending_term_acks--;
|
|
|
|
}
|
2009-08-27 10:54:28 +02:00
|
|
|
|
2009-12-02 21:26:47 +01:00
|
|
|
void zmq::socket_base_t::process_seqnum ()
|
|
|
|
{
|
|
|
|
processed_seqnum++;
|
|
|
|
}
|
|
|
|
|
2010-08-06 17:49:37 +02:00
|
|
|
int zmq::socket_base_t::xsetsockopt (int option_, const void *optval_,
|
|
|
|
size_t optvallen_)
|
|
|
|
{
|
|
|
|
errno = EINVAL;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool zmq::socket_base_t::xhas_out ()
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
int zmq::socket_base_t::xsend (zmq_msg_t *msg_, int options_)
|
|
|
|
{
|
|
|
|
errno = ENOTSUP;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool zmq::socket_base_t::xhas_in ()
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
int zmq::socket_base_t::xrecv (zmq_msg_t *msg_, int options_)
|
|
|
|
{
|
|
|
|
errno = ENOTSUP;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|