2009-12-13 14:45:23 +01:00
|
|
|
/*
|
2011-10-31 16:20:30 +01:00
|
|
|
Copyright (c) 2009-2011 250bpm s.r.o.
|
2011-11-01 18:06:11 +01:00
|
|
|
Copyright (c) 2011 iMatix Corporation
|
2011-11-01 13:39:54 +01:00
|
|
|
Copyright (c) 2011 VMware, Inc.
|
2011-03-02 16:30:40 +01:00
|
|
|
Copyright (c) 2007-2011 Other contributors as noted in the AUTHORS file
|
2009-12-13 14:45:23 +01:00
|
|
|
|
|
|
|
This file is part of 0MQ.
|
|
|
|
|
|
|
|
0MQ is free software; you can redistribute it and/or modify it under
|
2010-10-30 15:08:28 +02:00
|
|
|
the terms of the GNU Lesser General Public License as published by
|
2009-12-13 14:45:23 +01:00
|
|
|
the Free Software Foundation; either version 3 of the License, or
|
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
0MQ is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2010-10-30 15:08:28 +02:00
|
|
|
GNU Lesser General Public License for more details.
|
2009-12-13 14:45:23 +01:00
|
|
|
|
2010-10-30 15:08:28 +02:00
|
|
|
You should have received a copy of the GNU Lesser General Public License
|
2009-12-13 14:45:23 +01:00
|
|
|
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "xrep.hpp"
|
2010-02-16 18:30:38 +01:00
|
|
|
#include "pipe.hpp"
|
2011-06-22 11:02:16 +02:00
|
|
|
#include "wire.hpp"
|
|
|
|
#include "random.hpp"
|
2011-09-28 08:03:14 +02:00
|
|
|
#include "likely.hpp"
|
2011-04-21 22:27:48 +02:00
|
|
|
#include "err.hpp"
|
2009-12-13 14:45:23 +01:00
|
|
|
|
2010-11-05 16:38:52 +01:00
|
|
|
zmq::xrep_t::xrep_t (class ctx_t *parent_, uint32_t tid_) :
|
|
|
|
socket_base_t (parent_, tid_),
|
2010-07-06 22:47:07 +02:00
|
|
|
prefetched (false),
|
2010-04-27 17:36:00 +02:00
|
|
|
more_in (false),
|
|
|
|
current_out (NULL),
|
2011-07-15 11:24:33 +02:00
|
|
|
more_out (false),
|
|
|
|
next_peer_id (generate_random ())
|
2009-12-13 14:45:23 +01:00
|
|
|
{
|
2010-09-28 15:27:45 +02:00
|
|
|
options.type = ZMQ_XREP;
|
2010-02-12 19:42:35 +01:00
|
|
|
|
2011-10-31 15:56:39 +01:00
|
|
|
// TODO: Uncomment the following line when XREP will become true XREP
|
|
|
|
// rather than generic router socket.
|
2011-06-23 07:57:47 +02:00
|
|
|
// If peer disconnect there's noone to send reply to anyway. We can drop
|
|
|
|
// all the outstanding requests from that peer.
|
2011-10-31 15:56:39 +01:00
|
|
|
// options.delay_on_disconnect = false;
|
2011-06-23 07:57:47 +02:00
|
|
|
|
2011-06-22 16:51:40 +02:00
|
|
|
prefetched_msg.init ();
|
2009-12-13 14:45:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
zmq::xrep_t::~xrep_t ()
|
|
|
|
{
|
2010-08-06 17:49:37 +02:00
|
|
|
zmq_assert (outpipes.empty ());
|
2011-06-22 16:51:40 +02:00
|
|
|
prefetched_msg.close ();
|
2009-12-13 14:45:23 +01:00
|
|
|
}
|
|
|
|
|
2011-07-15 11:24:33 +02:00
|
|
|
void zmq::xrep_t::xattach_pipe (pipe_t *pipe_)
|
2009-12-13 14:45:23 +01:00
|
|
|
{
|
2011-05-22 17:26:53 +02:00
|
|
|
zmq_assert (pipe_);
|
|
|
|
|
2011-06-22 11:02:16 +02:00
|
|
|
// Generate a new peer ID. Take care to avoid duplicates.
|
|
|
|
outpipes_t::iterator it = outpipes.lower_bound (next_peer_id);
|
|
|
|
if (!outpipes.empty ()) {
|
|
|
|
while (true) {
|
|
|
|
if (it == outpipes.end ())
|
|
|
|
it = outpipes.begin ();
|
|
|
|
if (it->first != next_peer_id)
|
|
|
|
break;
|
|
|
|
++next_peer_id;
|
|
|
|
++it;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-05-22 17:26:53 +02:00
|
|
|
// Add the pipe to the map out outbound pipes.
|
|
|
|
outpipe_t outpipe = {pipe_, true};
|
|
|
|
bool ok = outpipes.insert (outpipes_t::value_type (
|
2011-06-22 11:02:16 +02:00
|
|
|
next_peer_id, outpipe)).second;
|
2011-05-22 17:26:53 +02:00
|
|
|
zmq_assert (ok);
|
|
|
|
|
|
|
|
// Add the pipe to the list of inbound pipes.
|
2011-06-22 16:51:40 +02:00
|
|
|
pipe_->set_pipe_id (next_peer_id);
|
|
|
|
fq.attach (pipe_);
|
2011-06-22 11:02:16 +02:00
|
|
|
|
|
|
|
// Advance next peer ID so that if new connection is dropped shortly after
|
|
|
|
// its creation we don't accidentally get two subsequent peers with
|
|
|
|
// the same ID.
|
|
|
|
++next_peer_id;
|
2009-12-13 14:45:23 +01:00
|
|
|
}
|
|
|
|
|
2011-05-23 20:30:01 +02:00
|
|
|
void zmq::xrep_t::xterminated (pipe_t *pipe_)
|
2009-12-13 14:45:23 +01:00
|
|
|
{
|
2011-06-22 16:51:40 +02:00
|
|
|
fq.terminated (pipe_);
|
2009-12-13 14:45:23 +01:00
|
|
|
|
2010-02-16 18:30:38 +01:00
|
|
|
for (outpipes_t::iterator it = outpipes.begin ();
|
2010-04-27 17:36:00 +02:00
|
|
|
it != outpipes.end (); ++it) {
|
2011-05-22 17:26:53 +02:00
|
|
|
if (it->second.pipe == pipe_) {
|
2010-02-16 18:30:38 +01:00
|
|
|
outpipes.erase (it);
|
2010-04-27 17:36:00 +02:00
|
|
|
if (pipe_ == current_out)
|
|
|
|
current_out = NULL;
|
2010-02-16 18:30:38 +01:00
|
|
|
return;
|
|
|
|
}
|
2010-04-27 17:36:00 +02:00
|
|
|
}
|
2009-12-13 14:45:23 +01:00
|
|
|
zmq_assert (false);
|
|
|
|
}
|
|
|
|
|
2011-05-23 20:30:01 +02:00
|
|
|
void zmq::xrep_t::xread_activated (pipe_t *pipe_)
|
2009-12-13 14:45:23 +01:00
|
|
|
{
|
2011-06-22 16:51:40 +02:00
|
|
|
fq.activated (pipe_);
|
2009-12-13 14:45:23 +01:00
|
|
|
}
|
|
|
|
|
2011-05-23 20:30:01 +02:00
|
|
|
void zmq::xrep_t::xwrite_activated (pipe_t *pipe_)
|
2010-03-01 10:13:26 +01:00
|
|
|
{
|
2010-04-27 17:36:00 +02:00
|
|
|
for (outpipes_t::iterator it = outpipes.begin ();
|
|
|
|
it != outpipes.end (); ++it) {
|
2011-05-22 17:26:53 +02:00
|
|
|
if (it->second.pipe == pipe_) {
|
2010-04-27 17:36:00 +02:00
|
|
|
zmq_assert (!it->second.active);
|
|
|
|
it->second.active = true;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
zmq_assert (false);
|
2010-03-01 10:13:26 +01:00
|
|
|
}
|
|
|
|
|
2011-04-21 22:27:48 +02:00
|
|
|
int zmq::xrep_t::xsend (msg_t *msg_, int flags_)
|
2009-12-13 14:45:23 +01:00
|
|
|
{
|
2011-06-22 11:02:16 +02:00
|
|
|
// If this is the first part of the message it's the ID of the
|
2010-04-27 17:36:00 +02:00
|
|
|
// peer to send the message to.
|
|
|
|
if (!more_out) {
|
|
|
|
zmq_assert (!current_out);
|
|
|
|
|
2010-08-11 17:05:19 +02:00
|
|
|
// If we have malformed message (prefix with no subsequent message)
|
2010-10-10 09:23:37 +02:00
|
|
|
// then just silently ignore it.
|
2011-06-20 11:33:54 +02:00
|
|
|
// TODO: The connections should be killed instead.
|
2011-11-01 13:39:54 +01:00
|
|
|
if (msg_->flags () & msg_t::more) {
|
2010-10-10 09:23:37 +02:00
|
|
|
|
|
|
|
more_out = true;
|
|
|
|
|
2011-06-22 11:02:16 +02:00
|
|
|
// Find the pipe associated with the peer ID stored in the prefix.
|
2010-10-10 09:23:37 +02:00
|
|
|
// If there's no such pipe just silently ignore the message.
|
2011-09-03 07:40:49 +02:00
|
|
|
if (msg_->size () == 4) {
|
|
|
|
uint32_t peer_id = get_uint32 ((unsigned char*) msg_->data ());
|
|
|
|
outpipes_t::iterator it = outpipes.find (peer_id);
|
|
|
|
|
|
|
|
if (it != outpipes.end ()) {
|
|
|
|
current_out = it->second.pipe;
|
|
|
|
msg_t empty;
|
|
|
|
int rc = empty.init ();
|
|
|
|
errno_assert (rc == 0);
|
|
|
|
if (!current_out->check_write (&empty)) {
|
|
|
|
it->second.active = false;
|
|
|
|
more_out = false;
|
|
|
|
current_out = NULL;
|
|
|
|
}
|
|
|
|
rc = empty.close ();
|
|
|
|
errno_assert (rc == 0);
|
2011-03-03 12:37:11 +01:00
|
|
|
}
|
|
|
|
}
|
2010-10-10 09:23:37 +02:00
|
|
|
}
|
2010-04-27 17:36:00 +02:00
|
|
|
|
2011-04-21 22:27:48 +02:00
|
|
|
int rc = msg_->close ();
|
|
|
|
errno_assert (rc == 0);
|
|
|
|
rc = msg_->init ();
|
|
|
|
errno_assert (rc == 0);
|
2010-02-16 18:30:38 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-04-27 17:36:00 +02:00
|
|
|
// Check whether this is the last part of the message.
|
2011-11-01 13:39:54 +01:00
|
|
|
more_out = msg_->flags () & msg_t::more ? true : false;
|
2010-03-02 12:41:33 +01:00
|
|
|
|
2010-04-27 17:36:00 +02:00
|
|
|
// Push the message into the pipe. If there's no out pipe, just drop it.
|
|
|
|
if (current_out) {
|
|
|
|
bool ok = current_out->write (msg_);
|
2011-09-28 08:03:14 +02:00
|
|
|
if (unlikely (!ok))
|
|
|
|
current_out = NULL;
|
|
|
|
else if (!more_out) {
|
2010-04-27 17:36:00 +02:00
|
|
|
current_out->flush ();
|
|
|
|
current_out = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
2011-04-21 22:27:48 +02:00
|
|
|
int rc = msg_->close ();
|
|
|
|
errno_assert (rc == 0);
|
2010-04-27 17:36:00 +02:00
|
|
|
}
|
2010-02-16 18:30:38 +01:00
|
|
|
|
|
|
|
// Detach the message from the data buffer.
|
2011-04-21 22:27:48 +02:00
|
|
|
int rc = msg_->init ();
|
|
|
|
errno_assert (rc == 0);
|
2010-02-16 18:30:38 +01:00
|
|
|
|
|
|
|
return 0;
|
2009-12-13 14:45:23 +01:00
|
|
|
}
|
|
|
|
|
2011-04-21 22:27:48 +02:00
|
|
|
int zmq::xrep_t::xrecv (msg_t *msg_, int flags_)
|
2009-12-13 14:45:23 +01:00
|
|
|
{
|
2010-09-30 15:10:47 +02:00
|
|
|
// If there is a prefetched message, return it.
|
2010-07-06 22:47:07 +02:00
|
|
|
if (prefetched) {
|
2011-04-21 22:27:48 +02:00
|
|
|
int rc = msg_->move (prefetched_msg);
|
|
|
|
errno_assert (rc == 0);
|
2011-11-01 13:39:54 +01:00
|
|
|
more_in = msg_->flags () & msg_t::more ? true : false;
|
2010-07-06 22:47:07 +02:00
|
|
|
prefetched = false;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-06-22 16:51:40 +02:00
|
|
|
// Get next message part.
|
|
|
|
pipe_t *pipe;
|
|
|
|
int rc = fq.recvpipe (msg_, flags_, &pipe);
|
|
|
|
if (rc != 0)
|
|
|
|
return -1;
|
2010-09-30 15:10:47 +02:00
|
|
|
|
2011-06-22 16:51:40 +02:00
|
|
|
// If we are in the middle of reading a message, just return the next part.
|
2010-04-27 17:36:00 +02:00
|
|
|
if (more_in) {
|
2011-11-01 13:39:54 +01:00
|
|
|
more_in = msg_->flags () & msg_t::more ? true : false;
|
2010-04-27 17:36:00 +02:00
|
|
|
return 0;
|
|
|
|
}
|
2011-06-22 16:51:40 +02:00
|
|
|
|
|
|
|
// We are at the beginning of a new message. Move the message part we
|
|
|
|
// have to the prefetched and return the ID of the peer instead.
|
|
|
|
rc = prefetched_msg.move (*msg_);
|
|
|
|
errno_assert (rc == 0);
|
|
|
|
prefetched = true;
|
|
|
|
rc = msg_->close ();
|
2011-04-21 22:27:48 +02:00
|
|
|
errno_assert (rc == 0);
|
2011-06-22 16:51:40 +02:00
|
|
|
rc = msg_->init_size (4);
|
|
|
|
errno_assert (rc == 0);
|
|
|
|
put_uint32 ((unsigned char*) msg_->data (), pipe->get_pipe_id ());
|
2011-11-01 13:39:54 +01:00
|
|
|
msg_->set_flags (msg_t::more);
|
2011-06-22 16:51:40 +02:00
|
|
|
return 0;
|
2009-12-13 14:45:23 +01:00
|
|
|
}
|
|
|
|
|
2011-05-08 09:02:47 +02:00
|
|
|
int zmq::xrep_t::rollback (void)
|
|
|
|
{
|
|
|
|
if (current_out) {
|
|
|
|
current_out->rollback ();
|
|
|
|
current_out = NULL;
|
|
|
|
more_out = false;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-12-13 14:45:23 +01:00
|
|
|
bool zmq::xrep_t::xhas_in ()
|
|
|
|
{
|
2011-06-22 16:51:40 +02:00
|
|
|
if (prefetched)
|
2010-04-27 17:36:00 +02:00
|
|
|
return true;
|
2011-06-22 16:51:40 +02:00
|
|
|
return fq.has_in ();
|
2009-12-13 14:45:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
bool zmq::xrep_t::xhas_out ()
|
|
|
|
{
|
2010-02-16 18:30:38 +01:00
|
|
|
// In theory, XREP socket is always ready for writing. Whether actual
|
|
|
|
// attempt to write succeeds depends on whitch pipe the message is going
|
|
|
|
// to be routed to.
|
|
|
|
return true;
|
2009-12-13 14:45:23 +01:00
|
|
|
}
|
|
|
|
|
2011-09-15 10:00:23 +02:00
|
|
|
zmq::xrep_session_t::xrep_session_t (io_thread_t *io_thread_, bool connect_,
|
|
|
|
socket_base_t *socket_, const options_t &options_,
|
|
|
|
const char *protocol_, const char *address_) :
|
|
|
|
session_base_t (io_thread_, connect_, socket_, options_, protocol_,
|
|
|
|
address_)
|
|
|
|
{
|
|
|
|
}
|
2009-12-13 14:45:23 +01:00
|
|
|
|
2011-09-15 10:00:23 +02:00
|
|
|
zmq::xrep_session_t::~xrep_session_t ()
|
|
|
|
{
|
|
|
|
}
|
2011-05-23 20:30:01 +02:00
|
|
|
|