mirror of
https://github.com/zeromq/libzmq.git
synced 2024-12-12 10:33:52 +01:00
Problem: formatting inconsistent
Solution: applied clang-format
This commit is contained in:
parent
6d8baea714
commit
41f459e1dc
245
include/zmq.h
245
include/zmq.h
@ -43,10 +43,10 @@
|
||||
#define ZMQ_VERSION_MINOR 2
|
||||
#define ZMQ_VERSION_PATCH 4
|
||||
|
||||
#define ZMQ_MAKE_VERSION(major, minor, patch) \
|
||||
((major) * 10000 + (minor) * 100 + (patch))
|
||||
#define ZMQ_VERSION \
|
||||
ZMQ_MAKE_VERSION(ZMQ_VERSION_MAJOR, ZMQ_VERSION_MINOR, ZMQ_VERSION_PATCH)
|
||||
#define ZMQ_MAKE_VERSION(major, minor, patch) \
|
||||
((major) *10000 + (minor) *100 + (patch))
|
||||
#define ZMQ_VERSION \
|
||||
ZMQ_MAKE_VERSION (ZMQ_VERSION_MAJOR, ZMQ_VERSION_MINOR, ZMQ_VERSION_PATCH)
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
@ -66,7 +66,7 @@ extern "C" {
|
||||
|
||||
#ifdef __MINGW32__
|
||||
// Require Windows XP or higher with MinGW for getaddrinfo().
|
||||
#if(_WIN32_WINNT >= 0x0501)
|
||||
#if (_WIN32_WINNT >= 0x0501)
|
||||
#else
|
||||
#error You need at least Windows XP target
|
||||
#endif
|
||||
@ -76,49 +76,49 @@ extern "C" {
|
||||
|
||||
/* Handle DSO symbol visibility */
|
||||
#if defined _WIN32
|
||||
# if defined ZMQ_STATIC
|
||||
# define ZMQ_EXPORT
|
||||
# elif defined DLL_EXPORT
|
||||
# define ZMQ_EXPORT __declspec(dllexport)
|
||||
# else
|
||||
# define ZMQ_EXPORT __declspec(dllimport)
|
||||
# endif
|
||||
#if defined ZMQ_STATIC
|
||||
#define ZMQ_EXPORT
|
||||
#elif defined DLL_EXPORT
|
||||
#define ZMQ_EXPORT __declspec(dllexport)
|
||||
#else
|
||||
# if defined __SUNPRO_C || defined __SUNPRO_CC
|
||||
# define ZMQ_EXPORT __global
|
||||
# elif (defined __GNUC__ && __GNUC__ >= 4) || defined __INTEL_COMPILER
|
||||
# define ZMQ_EXPORT __attribute__ ((visibility("default")))
|
||||
# else
|
||||
# define ZMQ_EXPORT
|
||||
# endif
|
||||
#define ZMQ_EXPORT __declspec(dllimport)
|
||||
#endif
|
||||
#else
|
||||
#if defined __SUNPRO_C || defined __SUNPRO_CC
|
||||
#define ZMQ_EXPORT __global
|
||||
#elif (defined __GNUC__ && __GNUC__ >= 4) || defined __INTEL_COMPILER
|
||||
#define ZMQ_EXPORT __attribute__ ((visibility ("default")))
|
||||
#else
|
||||
#define ZMQ_EXPORT
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* Define integer types needed for event interface */
|
||||
#define ZMQ_DEFINED_STDINT 1
|
||||
#if defined ZMQ_HAVE_SOLARIS || defined ZMQ_HAVE_OPENVMS
|
||||
# include <inttypes.h>
|
||||
#include <inttypes.h>
|
||||
#elif defined _MSC_VER && _MSC_VER < 1600
|
||||
# ifndef int32_t
|
||||
typedef __int32 int32_t;
|
||||
# endif
|
||||
# ifndef uint32_t
|
||||
typedef unsigned __int32 uint32_t;
|
||||
# endif
|
||||
# ifndef uint16_t
|
||||
typedef unsigned __int16 uint16_t;
|
||||
# endif
|
||||
# ifndef uint8_t
|
||||
typedef unsigned __int8 uint8_t;
|
||||
# endif
|
||||
#ifndef int32_t
|
||||
typedef __int32 int32_t;
|
||||
#endif
|
||||
#ifndef uint32_t
|
||||
typedef unsigned __int32 uint32_t;
|
||||
#endif
|
||||
#ifndef uint16_t
|
||||
typedef unsigned __int16 uint16_t;
|
||||
#endif
|
||||
#ifndef uint8_t
|
||||
typedef unsigned __int8 uint8_t;
|
||||
#endif
|
||||
#else
|
||||
# include <stdint.h>
|
||||
#include <stdint.h>
|
||||
#endif
|
||||
|
||||
// 32-bit AIX's pollfd struct members are called reqevents and rtnevents so it
|
||||
// defines compatibility macros for them. Need to include that header first to
|
||||
// stop build failures since zmq_pollset_t defines them as events and revents.
|
||||
#ifdef ZMQ_HAVE_AIX
|
||||
#include <poll.h>
|
||||
#include <poll.h>
|
||||
#endif
|
||||
|
||||
|
||||
@ -209,7 +209,7 @@ ZMQ_EXPORT void zmq_version (int *major, int *minor, int *patch);
|
||||
/******************************************************************************/
|
||||
|
||||
/* Context options */
|
||||
#define ZMQ_IO_THREADS 1
|
||||
#define ZMQ_IO_THREADS 1
|
||||
#define ZMQ_MAX_SOCKETS 2
|
||||
#define ZMQ_SOCKET_LIMIT 3
|
||||
#define ZMQ_THREAD_PRIORITY 3
|
||||
@ -217,7 +217,7 @@ ZMQ_EXPORT void zmq_version (int *major, int *minor, int *patch);
|
||||
#define ZMQ_MAX_MSGSZ 5
|
||||
|
||||
/* Default for new contexts */
|
||||
#define ZMQ_IO_THREADS_DFLT 1
|
||||
#define ZMQ_IO_THREADS_DFLT 1
|
||||
#define ZMQ_MAX_SOCKETS_DFLT 1023
|
||||
#define ZMQ_THREAD_PRIORITY_DFLT -1
|
||||
#define ZMQ_THREAD_SCHED_POLICY_DFLT -1
|
||||
@ -242,26 +242,27 @@ ZMQ_EXPORT int zmq_ctx_destroy (void *context);
|
||||
* alignment and raise sigbus on violations. Make sure applications allocate
|
||||
* zmq_msg_t on addresses aligned on a pointer-size boundary to avoid this issue.
|
||||
*/
|
||||
typedef struct zmq_msg_t {
|
||||
#if defined (__GNUC__) || defined ( __INTEL_COMPILER) || \
|
||||
(defined (__SUNPRO_C) && __SUNPRO_C >= 0x590) || \
|
||||
(defined (__SUNPRO_CC) && __SUNPRO_CC >= 0x590)
|
||||
unsigned char _ [64] __attribute__ ((aligned (sizeof (void *))));
|
||||
#elif defined (_MSC_VER) && (defined (_M_X64) || defined (_M_ARM64))
|
||||
__declspec (align (8)) unsigned char _ [64];
|
||||
#elif defined (_MSC_VER) && (defined (_M_IX86) || defined (_M_ARM_ARMV7VE))
|
||||
__declspec (align (4)) unsigned char _ [64];
|
||||
typedef struct zmq_msg_t
|
||||
{
|
||||
#if defined(__GNUC__) || defined(__INTEL_COMPILER) \
|
||||
|| (defined(__SUNPRO_C) && __SUNPRO_C >= 0x590) \
|
||||
|| (defined(__SUNPRO_CC) && __SUNPRO_CC >= 0x590)
|
||||
unsigned char _[64] __attribute__ ((aligned (sizeof (void *))));
|
||||
#elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_ARM64))
|
||||
__declspec(align (8)) unsigned char _[64];
|
||||
#elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_ARM_ARMV7VE))
|
||||
__declspec(align (4)) unsigned char _[64];
|
||||
#else
|
||||
unsigned char _ [64];
|
||||
unsigned char _[64];
|
||||
#endif
|
||||
} zmq_msg_t;
|
||||
|
||||
typedef void (zmq_free_fn) (void *data, void *hint);
|
||||
typedef void(zmq_free_fn) (void *data, void *hint);
|
||||
|
||||
ZMQ_EXPORT int zmq_msg_init (zmq_msg_t *msg);
|
||||
ZMQ_EXPORT int zmq_msg_init_size (zmq_msg_t *msg, size_t size);
|
||||
ZMQ_EXPORT int zmq_msg_init_data (zmq_msg_t *msg, void *data,
|
||||
size_t size, zmq_free_fn *ffn, void *hint);
|
||||
ZMQ_EXPORT int zmq_msg_init_data (
|
||||
zmq_msg_t *msg, void *data, size_t size, zmq_free_fn *ffn, void *hint);
|
||||
ZMQ_EXPORT int zmq_msg_send (zmq_msg_t *msg, void *s, int flags);
|
||||
ZMQ_EXPORT int zmq_msg_recv (zmq_msg_t *msg, void *s, int flags);
|
||||
ZMQ_EXPORT int zmq_msg_close (zmq_msg_t *msg);
|
||||
@ -272,7 +273,8 @@ ZMQ_EXPORT size_t zmq_msg_size (const zmq_msg_t *msg);
|
||||
ZMQ_EXPORT int zmq_msg_more (const zmq_msg_t *msg);
|
||||
ZMQ_EXPORT int zmq_msg_get (const zmq_msg_t *msg, int property);
|
||||
ZMQ_EXPORT int zmq_msg_set (zmq_msg_t *msg, int property, int optval);
|
||||
ZMQ_EXPORT const char *zmq_msg_gets (const zmq_msg_t *msg, const char *property);
|
||||
ZMQ_EXPORT const char *zmq_msg_gets (const zmq_msg_t *msg,
|
||||
const char *property);
|
||||
|
||||
/******************************************************************************/
|
||||
/* 0MQ socket definition. */
|
||||
@ -386,20 +388,20 @@ ZMQ_EXPORT const char *zmq_msg_gets (const zmq_msg_t *msg, const char *property)
|
||||
#define ZMQ_GSSAPI 3
|
||||
|
||||
/* RADIO-DISH protocol */
|
||||
#define ZMQ_GROUP_MAX_LENGTH 15
|
||||
#define ZMQ_GROUP_MAX_LENGTH 15
|
||||
|
||||
/* Deprecated options and aliases */
|
||||
#define ZMQ_IDENTITY ZMQ_ROUTING_ID
|
||||
#define ZMQ_CONNECT_RID ZMQ_CONNECT_ROUTING_ID
|
||||
#define ZMQ_TCP_ACCEPT_FILTER 38
|
||||
#define ZMQ_IPC_FILTER_PID 58
|
||||
#define ZMQ_IPC_FILTER_UID 59
|
||||
#define ZMQ_IPC_FILTER_GID 60
|
||||
#define ZMQ_IPV4ONLY 31
|
||||
#define ZMQ_IDENTITY ZMQ_ROUTING_ID
|
||||
#define ZMQ_CONNECT_RID ZMQ_CONNECT_ROUTING_ID
|
||||
#define ZMQ_TCP_ACCEPT_FILTER 38
|
||||
#define ZMQ_IPC_FILTER_PID 58
|
||||
#define ZMQ_IPC_FILTER_UID 59
|
||||
#define ZMQ_IPC_FILTER_GID 60
|
||||
#define ZMQ_IPV4ONLY 31
|
||||
#define ZMQ_DELAY_ATTACH_ON_CONNECT ZMQ_IMMEDIATE
|
||||
#define ZMQ_NOBLOCK ZMQ_DONTWAIT
|
||||
#define ZMQ_FAIL_UNROUTABLE ZMQ_ROUTER_MANDATORY
|
||||
#define ZMQ_ROUTER_BEHAVIOR ZMQ_ROUTER_MANDATORY
|
||||
#define ZMQ_NOBLOCK ZMQ_DONTWAIT
|
||||
#define ZMQ_FAIL_UNROUTABLE ZMQ_ROUTER_MANDATORY
|
||||
#define ZMQ_ROUTER_BEHAVIOR ZMQ_ROUTER_MANDATORY
|
||||
|
||||
/* Deprecated Message options */
|
||||
#define ZMQ_SRCFD 2
|
||||
@ -410,25 +412,25 @@ ZMQ_EXPORT const char *zmq_msg_gets (const zmq_msg_t *msg, const char *property)
|
||||
|
||||
/* Socket transport events (TCP, IPC and TIPC only) */
|
||||
|
||||
#define ZMQ_EVENT_CONNECTED 0x0001
|
||||
#define ZMQ_EVENT_CONNECT_DELAYED 0x0002
|
||||
#define ZMQ_EVENT_CONNECT_RETRIED 0x0004
|
||||
#define ZMQ_EVENT_LISTENING 0x0008
|
||||
#define ZMQ_EVENT_BIND_FAILED 0x0010
|
||||
#define ZMQ_EVENT_ACCEPTED 0x0020
|
||||
#define ZMQ_EVENT_ACCEPT_FAILED 0x0040
|
||||
#define ZMQ_EVENT_CLOSED 0x0080
|
||||
#define ZMQ_EVENT_CLOSE_FAILED 0x0100
|
||||
#define ZMQ_EVENT_DISCONNECTED 0x0200
|
||||
#define ZMQ_EVENT_MONITOR_STOPPED 0x0400
|
||||
#define ZMQ_EVENT_ALL 0xFFFF
|
||||
#define ZMQ_EVENT_CONNECTED 0x0001
|
||||
#define ZMQ_EVENT_CONNECT_DELAYED 0x0002
|
||||
#define ZMQ_EVENT_CONNECT_RETRIED 0x0004
|
||||
#define ZMQ_EVENT_LISTENING 0x0008
|
||||
#define ZMQ_EVENT_BIND_FAILED 0x0010
|
||||
#define ZMQ_EVENT_ACCEPTED 0x0020
|
||||
#define ZMQ_EVENT_ACCEPT_FAILED 0x0040
|
||||
#define ZMQ_EVENT_CLOSED 0x0080
|
||||
#define ZMQ_EVENT_CLOSE_FAILED 0x0100
|
||||
#define ZMQ_EVENT_DISCONNECTED 0x0200
|
||||
#define ZMQ_EVENT_MONITOR_STOPPED 0x0400
|
||||
#define ZMQ_EVENT_ALL 0xFFFF
|
||||
|
||||
ZMQ_EXPORT void *zmq_socket (void *, int type);
|
||||
ZMQ_EXPORT int zmq_close (void *s);
|
||||
ZMQ_EXPORT int zmq_setsockopt (void *s, int option, const void *optval,
|
||||
size_t optvallen);
|
||||
ZMQ_EXPORT int zmq_getsockopt (void *s, int option, void *optval,
|
||||
size_t *optvallen);
|
||||
ZMQ_EXPORT int
|
||||
zmq_setsockopt (void *s, int option, const void *optval, size_t optvallen);
|
||||
ZMQ_EXPORT int
|
||||
zmq_getsockopt (void *s, int option, void *optval, size_t *optvallen);
|
||||
ZMQ_EXPORT int zmq_bind (void *s, const char *addr);
|
||||
ZMQ_EXPORT int zmq_connect (void *s, const char *addr);
|
||||
ZMQ_EXPORT int zmq_unbind (void *s, const char *addr);
|
||||
@ -462,14 +464,17 @@ typedef struct zmq_pollitem_t
|
||||
|
||||
#define ZMQ_POLLITEMS_DFLT 16
|
||||
|
||||
ZMQ_EXPORT int zmq_poll (zmq_pollitem_t *items, int nitems, long timeout);
|
||||
ZMQ_EXPORT int zmq_poll (zmq_pollitem_t *items, int nitems, long timeout);
|
||||
|
||||
/******************************************************************************/
|
||||
/* Message proxying */
|
||||
/******************************************************************************/
|
||||
|
||||
ZMQ_EXPORT int zmq_proxy (void *frontend, void *backend, void *capture);
|
||||
ZMQ_EXPORT int zmq_proxy_steerable (void *frontend, void *backend, void *capture, void *control);
|
||||
ZMQ_EXPORT int zmq_proxy_steerable (void *frontend,
|
||||
void *backend,
|
||||
void *capture,
|
||||
void *control);
|
||||
|
||||
/******************************************************************************/
|
||||
/* Probe library capabilities */
|
||||
@ -488,8 +493,10 @@ ZMQ_EXPORT int zmq_device (int type, void *frontend, void *backend);
|
||||
ZMQ_EXPORT int zmq_sendmsg (void *s, zmq_msg_t *msg, int flags);
|
||||
ZMQ_EXPORT int zmq_recvmsg (void *s, zmq_msg_t *msg, int flags);
|
||||
struct iovec;
|
||||
ZMQ_EXPORT int zmq_sendiov (void *s, struct iovec *iov, size_t count, int flags);
|
||||
ZMQ_EXPORT int zmq_recviov (void *s, struct iovec *iov, size_t *count, int flags);
|
||||
ZMQ_EXPORT int
|
||||
zmq_sendiov (void *s, struct iovec *iov, size_t count, int flags);
|
||||
ZMQ_EXPORT int
|
||||
zmq_recviov (void *s, struct iovec *iov, size_t *count, int flags);
|
||||
|
||||
/******************************************************************************/
|
||||
/* Encryption functions */
|
||||
@ -507,7 +514,8 @@ ZMQ_EXPORT int zmq_curve_keypair (char *z85_public_key, char *z85_secret_key);
|
||||
|
||||
/* Derive the z85-encoded public key from the z85-encoded secret key. */
|
||||
/* Returns 0 on success. */
|
||||
ZMQ_EXPORT int zmq_curve_public (char *z85_public_key, const char *z85_secret_key);
|
||||
ZMQ_EXPORT int zmq_curve_public (char *z85_public_key,
|
||||
const char *z85_secret_key);
|
||||
|
||||
/******************************************************************************/
|
||||
/* Atomic utility methods */
|
||||
@ -540,13 +548,13 @@ ZMQ_EXPORT unsigned long zmq_stopwatch_stop (void *watch_);
|
||||
/* Sleeps for specified number of seconds. */
|
||||
ZMQ_EXPORT void zmq_sleep (int seconds_);
|
||||
|
||||
typedef void (zmq_thread_fn) (void*);
|
||||
typedef void(zmq_thread_fn) (void *);
|
||||
|
||||
/* Start a thread. Returns a handle to the thread. */
|
||||
ZMQ_EXPORT void *zmq_threadstart (zmq_thread_fn* func, void* arg);
|
||||
ZMQ_EXPORT void *zmq_threadstart (zmq_thread_fn *func, void *arg);
|
||||
|
||||
/* Wait for thread to complete then free up resources. */
|
||||
ZMQ_EXPORT void zmq_threadclose (void* thread);
|
||||
ZMQ_EXPORT void zmq_threadclose (void *thread);
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
@ -573,16 +581,16 @@ ZMQ_EXPORT void zmq_threadclose (void* thread);
|
||||
|
||||
/* DRAFT 0MQ socket events and monitoring */
|
||||
/* Unspecified system errors during handshake. Event value is an errno. */
|
||||
#define ZMQ_EVENT_HANDSHAKE_FAILED_NO_DETAIL 0x0800
|
||||
#define ZMQ_EVENT_HANDSHAKE_FAILED_NO_DETAIL 0x0800
|
||||
/* Handshake complete successfully with successful authentication (if *
|
||||
* enabled). Event value is unused. */
|
||||
#define ZMQ_EVENT_HANDSHAKE_SUCCEEDED 0x1000
|
||||
#define ZMQ_EVENT_HANDSHAKE_SUCCEEDED 0x1000
|
||||
/* Protocol errors between ZMTP peers or between server and ZAP handler. *
|
||||
* Event value is one of ZMQ_PROTOCOL_ERROR_* */
|
||||
#define ZMQ_EVENT_HANDSHAKE_FAILED_PROTOCOL 0x2000
|
||||
#define ZMQ_EVENT_HANDSHAKE_FAILED_PROTOCOL 0x2000
|
||||
/* Failed authentication requests. Event value is the numeric ZAP status *
|
||||
* code, i.e. 300, 400 or 500. */
|
||||
#define ZMQ_EVENT_HANDSHAKE_FAILED_AUTH 0x4000
|
||||
#define ZMQ_EVENT_HANDSHAKE_FAILED_AUTH 0x4000
|
||||
|
||||
#define ZMQ_PROTOCOL_ERROR_ZMTP_UNSPECIFIED 0x10000000
|
||||
#define ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND 0x10000001
|
||||
@ -601,7 +609,7 @@ ZMQ_EXPORT void zmq_threadclose (void* thread);
|
||||
#define ZMQ_PROTOCOL_ERROR_ZMTP_CRYPTOGRAPHIC 0x11000001
|
||||
#define ZMQ_PROTOCOL_ERROR_ZMTP_MECHANISM_MISMATCH 0x11000002
|
||||
|
||||
#define ZMQ_PROTOCOL_ERROR_ZAP_UNSPECIFIED 0x20000000
|
||||
#define ZMQ_PROTOCOL_ERROR_ZAP_UNSPECIFIED 0x20000000
|
||||
#define ZMQ_PROTOCOL_ERROR_ZAP_MALFORMED_REPLY 0x20000001
|
||||
#define ZMQ_PROTOCOL_ERROR_ZAP_BAD_REQUEST_ID 0x20000002
|
||||
#define ZMQ_PROTOCOL_ERROR_ZAP_BAD_VERSION 0x20000003
|
||||
@ -619,16 +627,16 @@ ZMQ_EXPORT int zmq_join (void *s, const char *group);
|
||||
ZMQ_EXPORT int zmq_leave (void *s, const char *group);
|
||||
|
||||
/* DRAFT Msg methods. */
|
||||
ZMQ_EXPORT int zmq_msg_set_routing_id(zmq_msg_t *msg, uint32_t routing_id);
|
||||
ZMQ_EXPORT uint32_t zmq_msg_routing_id(zmq_msg_t *msg);
|
||||
ZMQ_EXPORT int zmq_msg_set_group(zmq_msg_t *msg, const char *group);
|
||||
ZMQ_EXPORT const char *zmq_msg_group(zmq_msg_t *msg);
|
||||
ZMQ_EXPORT int zmq_msg_set_routing_id (zmq_msg_t *msg, uint32_t routing_id);
|
||||
ZMQ_EXPORT uint32_t zmq_msg_routing_id (zmq_msg_t *msg);
|
||||
ZMQ_EXPORT int zmq_msg_set_group (zmq_msg_t *msg, const char *group);
|
||||
ZMQ_EXPORT const char *zmq_msg_group (zmq_msg_t *msg);
|
||||
|
||||
/* DRAFT Msg property names. */
|
||||
#define ZMQ_MSG_PROPERTY_ROUTING_ID "Routing-Id"
|
||||
#define ZMQ_MSG_PROPERTY_SOCKET_TYPE "Socket-Type"
|
||||
#define ZMQ_MSG_PROPERTY_USER_ID "User-Id"
|
||||
#define ZMQ_MSG_PROPERTY_PEER_ADDRESS "Peer-Address"
|
||||
#define ZMQ_MSG_PROPERTY_ROUTING_ID "Routing-Id"
|
||||
#define ZMQ_MSG_PROPERTY_SOCKET_TYPE "Socket-Type"
|
||||
#define ZMQ_MSG_PROPERTY_USER_ID "User-Id"
|
||||
#define ZMQ_MSG_PROPERTY_PEER_ADDRESS "Peer-Address"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Poller polling on sockets,fd and thread-safe sockets */
|
||||
@ -649,19 +657,26 @@ typedef struct zmq_poller_event_t
|
||||
} zmq_poller_event_t;
|
||||
|
||||
ZMQ_EXPORT void *zmq_poller_new (void);
|
||||
ZMQ_EXPORT int zmq_poller_destroy (void **poller_p);
|
||||
ZMQ_EXPORT int zmq_poller_add (void *poller, void *socket, void *user_data, short events);
|
||||
ZMQ_EXPORT int zmq_poller_modify (void *poller, void *socket, short events);
|
||||
ZMQ_EXPORT int zmq_poller_remove (void *poller, void *socket);
|
||||
ZMQ_EXPORT int zmq_poller_wait (void *poller, zmq_poller_event_t *event, long timeout);
|
||||
ZMQ_EXPORT int zmq_poller_wait_all (void *poller, zmq_poller_event_t *events, int n_events, long timeout);
|
||||
ZMQ_EXPORT int zmq_poller_destroy (void **poller_p);
|
||||
ZMQ_EXPORT int
|
||||
zmq_poller_add (void *poller, void *socket, void *user_data, short events);
|
||||
ZMQ_EXPORT int zmq_poller_modify (void *poller, void *socket, short events);
|
||||
ZMQ_EXPORT int zmq_poller_remove (void *poller, void *socket);
|
||||
ZMQ_EXPORT int
|
||||
zmq_poller_wait (void *poller, zmq_poller_event_t *event, long timeout);
|
||||
ZMQ_EXPORT int zmq_poller_wait_all (void *poller,
|
||||
zmq_poller_event_t *events,
|
||||
int n_events,
|
||||
long timeout);
|
||||
|
||||
#if defined _WIN32
|
||||
ZMQ_EXPORT int zmq_poller_add_fd (void *poller, SOCKET fd, void *user_data, short events);
|
||||
ZMQ_EXPORT int
|
||||
zmq_poller_add_fd (void *poller, SOCKET fd, void *user_data, short events);
|
||||
ZMQ_EXPORT int zmq_poller_modify_fd (void *poller, SOCKET fd, short events);
|
||||
ZMQ_EXPORT int zmq_poller_remove_fd (void *poller, SOCKET fd);
|
||||
#else
|
||||
ZMQ_EXPORT int zmq_poller_add_fd (void *poller, int fd, void *user_data, short events);
|
||||
ZMQ_EXPORT int
|
||||
zmq_poller_add_fd (void *poller, int fd, void *user_data, short events);
|
||||
ZMQ_EXPORT int zmq_poller_modify_fd (void *poller, int fd, short events);
|
||||
ZMQ_EXPORT int zmq_poller_remove_fd (void *poller, int fd);
|
||||
#endif
|
||||
@ -676,16 +691,18 @@ ZMQ_EXPORT int zmq_socket_get_peer_state (void *socket,
|
||||
|
||||
#define ZMQ_HAVE_TIMERS
|
||||
|
||||
typedef void (zmq_timer_fn)(int timer_id, void *arg);
|
||||
typedef void(zmq_timer_fn) (int timer_id, void *arg);
|
||||
|
||||
ZMQ_EXPORT void *zmq_timers_new (void);
|
||||
ZMQ_EXPORT int zmq_timers_destroy (void **timers_p);
|
||||
ZMQ_EXPORT int zmq_timers_add (void *timers, size_t interval, zmq_timer_fn handler, void *arg);
|
||||
ZMQ_EXPORT int zmq_timers_cancel (void *timers, int timer_id);
|
||||
ZMQ_EXPORT int zmq_timers_set_interval (void *timers, int timer_id, size_t interval);
|
||||
ZMQ_EXPORT int zmq_timers_reset (void *timers, int timer_id);
|
||||
ZMQ_EXPORT long zmq_timers_timeout (void *timers);
|
||||
ZMQ_EXPORT int zmq_timers_execute (void *timers);
|
||||
ZMQ_EXPORT int zmq_timers_destroy (void **timers_p);
|
||||
ZMQ_EXPORT int
|
||||
zmq_timers_add (void *timers, size_t interval, zmq_timer_fn handler, void *arg);
|
||||
ZMQ_EXPORT int zmq_timers_cancel (void *timers, int timer_id);
|
||||
ZMQ_EXPORT int
|
||||
zmq_timers_set_interval (void *timers, int timer_id, size_t interval);
|
||||
ZMQ_EXPORT int zmq_timers_reset (void *timers, int timer_id);
|
||||
ZMQ_EXPORT long zmq_timers_timeout (void *timers);
|
||||
ZMQ_EXPORT int zmq_timers_execute (void *timers);
|
||||
|
||||
/******************************************************************************/
|
||||
/* GSSAPI definitions */
|
||||
|
@ -34,14 +34,16 @@
|
||||
compilers even have an equivalent concept.
|
||||
So in the worst case, this include file is treated as silently empty. */
|
||||
|
||||
#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) || defined(_MSC_VER)
|
||||
#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) \
|
||||
|| defined(_MSC_VER)
|
||||
#if defined(__GNUC__) || defined(__GNUG__)
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic warning "-Wcpp"
|
||||
#pragma GCC diagnostic ignored "-Werror"
|
||||
#pragma GCC diagnostic ignored "-Wall"
|
||||
#endif
|
||||
#pragma message("Warning: zmq_utils.h is deprecated. All its functionality is provided by zmq.h.")
|
||||
#pragma message( \
|
||||
"Warning: zmq_utils.h is deprecated. All its functionality is provided by zmq.h.")
|
||||
#if defined(__GNUC__) || defined(__GNUG__)
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
@ -106,7 +106,7 @@ static void *worker (void *ctx_)
|
||||
#endif
|
||||
}
|
||||
|
||||
int main (int argc, char *argv [])
|
||||
int main (int argc, char *argv[])
|
||||
{
|
||||
#if defined ZMQ_HAVE_WINDOWS
|
||||
HANDLE local_thread;
|
||||
@ -127,8 +127,8 @@ int main (int argc, char *argv [])
|
||||
return 1;
|
||||
}
|
||||
|
||||
message_size = atoi (argv [1]);
|
||||
roundtrip_count = atoi (argv [2]);
|
||||
message_size = atoi (argv[1]);
|
||||
roundtrip_count = atoi (argv[2]);
|
||||
|
||||
ctx = zmq_init (1);
|
||||
if (!ctx) {
|
||||
@ -149,8 +149,7 @@ int main (int argc, char *argv [])
|
||||
}
|
||||
|
||||
#if defined ZMQ_HAVE_WINDOWS
|
||||
local_thread = (HANDLE) _beginthreadex (NULL, 0,
|
||||
worker, ctx, 0 , NULL);
|
||||
local_thread = (HANDLE) _beginthreadex (NULL, 0, worker, ctx, 0, NULL);
|
||||
if (local_thread == 0) {
|
||||
printf ("error in _beginthreadex\n");
|
||||
return -1;
|
||||
@ -237,4 +236,3 @@ int main (int argc, char *argv [])
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -71,7 +71,6 @@ static void *worker (void *ctx_)
|
||||
}
|
||||
|
||||
for (i = 0; i != message_count; i++) {
|
||||
|
||||
rc = zmq_msg_init_size (&msg, message_size);
|
||||
if (rc != 0) {
|
||||
printf ("error in zmq_msg_init_size: %s\n", zmq_strerror (errno));
|
||||
@ -106,7 +105,7 @@ static void *worker (void *ctx_)
|
||||
#endif
|
||||
}
|
||||
|
||||
int main (int argc, char *argv [])
|
||||
int main (int argc, char *argv[])
|
||||
{
|
||||
#if defined ZMQ_HAVE_WINDOWS
|
||||
HANDLE local_thread;
|
||||
@ -128,8 +127,8 @@ int main (int argc, char *argv [])
|
||||
return 1;
|
||||
}
|
||||
|
||||
message_size = atoi (argv [1]);
|
||||
message_count = atoi (argv [2]);
|
||||
message_size = atoi (argv[1]);
|
||||
message_count = atoi (argv[2]);
|
||||
|
||||
ctx = zmq_init (1);
|
||||
if (!ctx) {
|
||||
@ -150,8 +149,7 @@ int main (int argc, char *argv [])
|
||||
}
|
||||
|
||||
#if defined ZMQ_HAVE_WINDOWS
|
||||
local_thread = (HANDLE) _beginthreadex (NULL, 0,
|
||||
worker, ctx, 0 , NULL);
|
||||
local_thread = (HANDLE) _beginthreadex (NULL, 0, worker, ctx, 0, NULL);
|
||||
if (local_thread == 0) {
|
||||
printf ("error in _beginthreadex\n");
|
||||
return -1;
|
||||
@ -238,8 +236,8 @@ int main (int argc, char *argv [])
|
||||
return -1;
|
||||
}
|
||||
|
||||
throughput = (unsigned long)
|
||||
((double) message_count / (double) elapsed * 1000000);
|
||||
throughput =
|
||||
(unsigned long) ((double) message_count / (double) elapsed * 1000000);
|
||||
megabits = (double) (throughput * message_size * 8) / 1000000;
|
||||
|
||||
printf ("mean throughput: %d [msg/s]\n", (int) throughput);
|
||||
@ -247,4 +245,3 @@ int main (int argc, char *argv [])
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -31,7 +31,7 @@
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
int main (int argc, char *argv [])
|
||||
int main (int argc, char *argv[])
|
||||
{
|
||||
const char *bind_to;
|
||||
int roundtrip_count;
|
||||
@ -44,12 +44,12 @@ int main (int argc, char *argv [])
|
||||
|
||||
if (argc != 4) {
|
||||
printf ("usage: local_lat <bind-to> <message-size> "
|
||||
"<roundtrip-count>\n");
|
||||
"<roundtrip-count>\n");
|
||||
return 1;
|
||||
}
|
||||
bind_to = argv [1];
|
||||
message_size = atoi (argv [2]);
|
||||
roundtrip_count = atoi (argv [3]);
|
||||
bind_to = argv[1];
|
||||
message_size = atoi (argv[2]);
|
||||
roundtrip_count = atoi (argv[3]);
|
||||
|
||||
ctx = zmq_init (1);
|
||||
if (!ctx) {
|
||||
|
@ -34,7 +34,7 @@
|
||||
// keys are arbitrary but must match remote_lat.cpp
|
||||
const char server_prvkey[] = "{X}#>t#jRGaQ}gMhv=30r(Mw+87YGs+5%kh=i@f8";
|
||||
|
||||
int main (int argc, char *argv [])
|
||||
int main (int argc, char *argv[])
|
||||
{
|
||||
const char *bind_to;
|
||||
int message_count;
|
||||
@ -51,13 +51,14 @@ int main (int argc, char *argv [])
|
||||
int curve = 0;
|
||||
|
||||
if (argc != 4 && argc != 5) {
|
||||
printf ("usage: local_thr <bind-to> <message-size> <message-count> [<enable_curve>]\n");
|
||||
printf ("usage: local_thr <bind-to> <message-size> <message-count> "
|
||||
"[<enable_curve>]\n");
|
||||
return 1;
|
||||
}
|
||||
bind_to = argv [1];
|
||||
message_size = atoi (argv [2]);
|
||||
message_count = atoi (argv [3]);
|
||||
if (argc >= 5 && atoi (argv [4])) {
|
||||
bind_to = argv[1];
|
||||
message_size = atoi (argv[2]);
|
||||
message_count = atoi (argv[3]);
|
||||
if (argc >= 5 && atoi (argv[4])) {
|
||||
curve = 1;
|
||||
}
|
||||
|
||||
@ -76,13 +77,14 @@ int main (int argc, char *argv [])
|
||||
// Add your socket options here.
|
||||
// For example ZMQ_RATE, ZMQ_RECOVERY_IVL and ZMQ_MCAST_LOOP for PGM.
|
||||
if (curve) {
|
||||
rc = zmq_setsockopt (s, ZMQ_CURVE_SECRETKEY, server_prvkey, sizeof(server_prvkey));
|
||||
rc = zmq_setsockopt (s, ZMQ_CURVE_SECRETKEY, server_prvkey,
|
||||
sizeof (server_prvkey));
|
||||
if (rc != 0) {
|
||||
printf ("error in zmq_setsockoopt: %s\n", zmq_strerror (errno));
|
||||
return -1;
|
||||
}
|
||||
int server = 1;
|
||||
rc = zmq_setsockopt (s, ZMQ_CURVE_SERVER, &server, sizeof(int));
|
||||
rc = zmq_setsockopt (s, ZMQ_CURVE_SERVER, &server, sizeof (int));
|
||||
if (rc != 0) {
|
||||
printf ("error in zmq_setsockoopt: %s\n", zmq_strerror (errno));
|
||||
return -1;
|
||||
@ -135,8 +137,7 @@ int main (int argc, char *argv [])
|
||||
return -1;
|
||||
}
|
||||
|
||||
throughput =
|
||||
((double) message_count / (double) elapsed * 1000000);
|
||||
throughput = ((double) message_count / (double) elapsed * 1000000);
|
||||
megabits = ((double) throughput * message_size * 8) / 1000000;
|
||||
|
||||
printf ("message size: %d [B]\n", (int) message_size);
|
||||
|
@ -32,7 +32,7 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
int main (int argc, char *argv [])
|
||||
int main (int argc, char *argv[])
|
||||
{
|
||||
const char *connect_to;
|
||||
int roundtrip_count;
|
||||
@ -48,12 +48,12 @@ int main (int argc, char *argv [])
|
||||
|
||||
if (argc != 4) {
|
||||
printf ("usage: remote_lat <connect-to> <message-size> "
|
||||
"<roundtrip-count>\n");
|
||||
"<roundtrip-count>\n");
|
||||
return 1;
|
||||
}
|
||||
connect_to = argv [1];
|
||||
message_size = atoi (argv [2]);
|
||||
roundtrip_count = atoi (argv [3]);
|
||||
connect_to = argv[1];
|
||||
message_size = atoi (argv[2]);
|
||||
roundtrip_count = atoi (argv[3]);
|
||||
|
||||
ctx = zmq_init (1);
|
||||
if (!ctx) {
|
||||
|
@ -37,7 +37,7 @@ const char server_pubkey[] = "DX4nh=yUn{-9ugra0X3Src4SU-4xTgqxcYY.+<SH";
|
||||
const char client_pubkey[] = "<n^oA}I:66W+*ds3tAmi1+KJzv-}k&fC2aA5Bj0K";
|
||||
const char client_prvkey[] = "9R9bV}[6z6DC-%$!jTVTKvWc=LEL{4i4gzUe$@Zx";
|
||||
|
||||
int main (int argc, char *argv [])
|
||||
int main (int argc, char *argv[])
|
||||
{
|
||||
const char *connect_to;
|
||||
int message_count;
|
||||
@ -51,13 +51,13 @@ int main (int argc, char *argv [])
|
||||
|
||||
if (argc != 4 && argc != 5) {
|
||||
printf ("usage: remote_thr <connect-to> <message-size> "
|
||||
"<message-count> [<enable_curve>]\n");
|
||||
"<message-count> [<enable_curve>]\n");
|
||||
return 1;
|
||||
}
|
||||
connect_to = argv [1];
|
||||
message_size = atoi (argv [2]);
|
||||
message_count = atoi (argv [3]);
|
||||
if (argc >= 5 && atoi (argv [4])) {
|
||||
connect_to = argv[1];
|
||||
message_size = atoi (argv[2]);
|
||||
message_count = atoi (argv[3]);
|
||||
if (argc >= 5 && atoi (argv[4])) {
|
||||
curve = 1;
|
||||
}
|
||||
|
||||
@ -76,19 +76,22 @@ int main (int argc, char *argv [])
|
||||
// Add your socket options here.
|
||||
// For example ZMQ_RATE, ZMQ_RECOVERY_IVL and ZMQ_MCAST_LOOP for PGM.
|
||||
if (curve) {
|
||||
rc = zmq_setsockopt (s, ZMQ_CURVE_SECRETKEY, client_prvkey, sizeof (client_prvkey));
|
||||
rc = zmq_setsockopt (s, ZMQ_CURVE_SECRETKEY, client_prvkey,
|
||||
sizeof (client_prvkey));
|
||||
if (rc != 0) {
|
||||
printf ("error in zmq_setsockoopt: %s\n", zmq_strerror (errno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
rc = zmq_setsockopt (s, ZMQ_CURVE_PUBLICKEY, client_pubkey, sizeof (client_pubkey));
|
||||
|
||||
rc = zmq_setsockopt (s, ZMQ_CURVE_PUBLICKEY, client_pubkey,
|
||||
sizeof (client_pubkey));
|
||||
if (rc != 0) {
|
||||
printf ("error in zmq_setsockoopt: %s\n", zmq_strerror (errno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
rc = zmq_setsockopt (s, ZMQ_CURVE_SERVERKEY, server_pubkey, sizeof (server_pubkey));
|
||||
|
||||
rc = zmq_setsockopt (s, ZMQ_CURVE_SERVERKEY, server_pubkey,
|
||||
sizeof (server_pubkey));
|
||||
if (rc != 0) {
|
||||
printf ("error in zmq_setsockoopt: %s\n", zmq_strerror (errno));
|
||||
return -1;
|
||||
|
@ -44,11 +44,12 @@
|
||||
#include <string>
|
||||
#include <sstream>
|
||||
|
||||
zmq::address_t::address_t (
|
||||
const std::string &protocol_, const std::string &address_, ctx_t *parent_)
|
||||
: protocol (protocol_),
|
||||
address (address_),
|
||||
parent (parent_)
|
||||
zmq::address_t::address_t (const std::string &protocol_,
|
||||
const std::string &address_,
|
||||
ctx_t *parent_) :
|
||||
protocol (protocol_),
|
||||
address (address_),
|
||||
parent (parent_)
|
||||
{
|
||||
memset (&resolved, 0, sizeof resolved);
|
||||
}
|
||||
@ -57,35 +58,32 @@ zmq::address_t::~address_t ()
|
||||
{
|
||||
if (protocol == "tcp") {
|
||||
if (resolved.tcp_addr) {
|
||||
LIBZMQ_DELETE(resolved.tcp_addr);
|
||||
LIBZMQ_DELETE (resolved.tcp_addr);
|
||||
}
|
||||
}
|
||||
if (protocol == "udp") {
|
||||
if (resolved.udp_addr) {
|
||||
LIBZMQ_DELETE(resolved.udp_addr);
|
||||
LIBZMQ_DELETE (resolved.udp_addr);
|
||||
}
|
||||
}
|
||||
#if !defined ZMQ_HAVE_WINDOWS && !defined ZMQ_HAVE_OPENVMS
|
||||
else
|
||||
if (protocol == "ipc") {
|
||||
else if (protocol == "ipc") {
|
||||
if (resolved.ipc_addr) {
|
||||
LIBZMQ_DELETE(resolved.ipc_addr);
|
||||
LIBZMQ_DELETE (resolved.ipc_addr);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#if defined ZMQ_HAVE_TIPC
|
||||
else
|
||||
if (protocol == "tipc") {
|
||||
else if (protocol == "tipc") {
|
||||
if (resolved.tipc_addr) {
|
||||
LIBZMQ_DELETE(resolved.tipc_addr);
|
||||
LIBZMQ_DELETE (resolved.tipc_addr);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#if defined ZMQ_HAVE_VMCI
|
||||
else
|
||||
if (protocol == "vmci") {
|
||||
else if (protocol == "vmci") {
|
||||
if (resolved.vmci_addr) {
|
||||
LIBZMQ_DELETE(resolved.vmci_addr);
|
||||
LIBZMQ_DELETE (resolved.vmci_addr);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@ -102,22 +100,19 @@ int zmq::address_t::to_string (std::string &addr_) const
|
||||
return resolved.udp_addr->to_string (addr_);
|
||||
}
|
||||
#if !defined ZMQ_HAVE_WINDOWS && !defined ZMQ_HAVE_OPENVMS
|
||||
else
|
||||
if (protocol == "ipc") {
|
||||
else if (protocol == "ipc") {
|
||||
if (resolved.ipc_addr)
|
||||
return resolved.ipc_addr->to_string (addr_);
|
||||
}
|
||||
#endif
|
||||
#if defined ZMQ_HAVE_TIPC
|
||||
else
|
||||
if (protocol == "tipc") {
|
||||
else if (protocol == "tipc") {
|
||||
if (resolved.tipc_addr)
|
||||
return resolved.tipc_addr->to_string (addr_);
|
||||
}
|
||||
#endif
|
||||
#if defined ZMQ_HAVE_VMCI
|
||||
else
|
||||
if (protocol == "vmci") {
|
||||
else if (protocol == "vmci") {
|
||||
if (resolved.vmci_addr)
|
||||
return resolved.vmci_addr->to_string (addr_);
|
||||
}
|
||||
|
@ -34,44 +34,48 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
class ctx_t;
|
||||
class tcp_address_t;
|
||||
class udp_address_t;
|
||||
class ctx_t;
|
||||
class tcp_address_t;
|
||||
class udp_address_t;
|
||||
#if !defined ZMQ_HAVE_WINDOWS && !defined ZMQ_HAVE_OPENVMS
|
||||
class ipc_address_t;
|
||||
class ipc_address_t;
|
||||
#endif
|
||||
#if defined ZMQ_HAVE_LINUX
|
||||
class tipc_address_t;
|
||||
class tipc_address_t;
|
||||
#endif
|
||||
#if defined ZMQ_HAVE_VMCI
|
||||
class vmci_address_t;
|
||||
class vmci_address_t;
|
||||
#endif
|
||||
struct address_t {
|
||||
address_t (const std::string &protocol_, const std::string &address_, ctx_t *parent_);
|
||||
struct address_t
|
||||
{
|
||||
address_t (const std::string &protocol_,
|
||||
const std::string &address_,
|
||||
ctx_t *parent_);
|
||||
|
||||
~address_t ();
|
||||
~address_t ();
|
||||
|
||||
const std::string protocol;
|
||||
const std::string address;
|
||||
ctx_t *parent;
|
||||
const std::string protocol;
|
||||
const std::string address;
|
||||
ctx_t *parent;
|
||||
|
||||
// Protocol specific resolved address
|
||||
union {
|
||||
tcp_address_t *tcp_addr;
|
||||
udp_address_t *udp_addr;
|
||||
// Protocol specific resolved address
|
||||
union
|
||||
{
|
||||
tcp_address_t *tcp_addr;
|
||||
udp_address_t *udp_addr;
|
||||
#if !defined ZMQ_HAVE_WINDOWS && !defined ZMQ_HAVE_OPENVMS
|
||||
ipc_address_t *ipc_addr;
|
||||
ipc_address_t *ipc_addr;
|
||||
#endif
|
||||
#if defined ZMQ_HAVE_LINUX
|
||||
tipc_address_t *tipc_addr;
|
||||
tipc_address_t *tipc_addr;
|
||||
#endif
|
||||
#if defined ZMQ_HAVE_VMCI
|
||||
vmci_address_t *vmci_addr;
|
||||
vmci_address_t *vmci_addr;
|
||||
#endif
|
||||
} resolved;
|
||||
} resolved;
|
||||
|
||||
int to_string (std::string &addr_) const;
|
||||
};
|
||||
int to_string (std::string &addr_) const;
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
200
src/array.hpp
200
src/array.hpp
@ -35,133 +35,101 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
// Implementation of fast arrays with O(1) access, insertion and
|
||||
// removal. The array stores pointers rather than objects.
|
||||
// O(1) is achieved by making items inheriting from
|
||||
// array_item_t<ID> class which internally stores the position
|
||||
// in the array.
|
||||
// The ID template argument is used to differentiate among arrays
|
||||
// and thus let an object be stored in different arrays.
|
||||
// Implementation of fast arrays with O(1) access, insertion and
|
||||
// removal. The array stores pointers rather than objects.
|
||||
// O(1) is achieved by making items inheriting from
|
||||
// array_item_t<ID> class which internally stores the position
|
||||
// in the array.
|
||||
// The ID template argument is used to differentiate among arrays
|
||||
// and thus let an object be stored in different arrays.
|
||||
|
||||
// Base class for objects stored in the array. If you want to store
|
||||
// same object in multiple arrays, each of those arrays has to have
|
||||
// different ID. The item itself has to be derived from instantiations of
|
||||
// array_item_t template for all relevant IDs.
|
||||
// Base class for objects stored in the array. If you want to store
|
||||
// same object in multiple arrays, each of those arrays has to have
|
||||
// different ID. The item itself has to be derived from instantiations of
|
||||
// array_item_t template for all relevant IDs.
|
||||
|
||||
template <int ID = 0> class array_item_t
|
||||
template <int ID = 0> class array_item_t
|
||||
{
|
||||
public:
|
||||
inline array_item_t () : array_index (-1) {}
|
||||
|
||||
// The destructor doesn't have to be virtual. It is made virtual
|
||||
// just to keep ICC and code checking tools from complaining.
|
||||
inline virtual ~array_item_t () {}
|
||||
|
||||
inline void set_array_index (int index_) { array_index = index_; }
|
||||
|
||||
inline int get_array_index () { return array_index; }
|
||||
|
||||
private:
|
||||
int array_index;
|
||||
|
||||
array_item_t (const array_item_t &);
|
||||
const array_item_t &operator= (const array_item_t &);
|
||||
};
|
||||
|
||||
|
||||
template <typename T, int ID = 0> class array_t
|
||||
{
|
||||
private:
|
||||
typedef array_item_t<ID> item_t;
|
||||
|
||||
public:
|
||||
typedef typename std::vector<T *>::size_type size_type;
|
||||
|
||||
inline array_t () {}
|
||||
|
||||
inline ~array_t () {}
|
||||
|
||||
inline size_type size () { return items.size (); }
|
||||
|
||||
inline bool empty () { return items.empty (); }
|
||||
|
||||
inline T *&operator[] (size_type index_) { return items[index_]; }
|
||||
|
||||
inline void push_back (T *item_)
|
||||
{
|
||||
public:
|
||||
if (item_)
|
||||
((item_t *) item_)->set_array_index ((int) items.size ());
|
||||
items.push_back (item_);
|
||||
}
|
||||
|
||||
inline array_item_t () :
|
||||
array_index (-1)
|
||||
{
|
||||
}
|
||||
|
||||
// The destructor doesn't have to be virtual. It is made virtual
|
||||
// just to keep ICC and code checking tools from complaining.
|
||||
inline virtual ~array_item_t ()
|
||||
{
|
||||
}
|
||||
|
||||
inline void set_array_index (int index_)
|
||||
{
|
||||
array_index = index_;
|
||||
}
|
||||
|
||||
inline int get_array_index ()
|
||||
{
|
||||
return array_index;
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
int array_index;
|
||||
|
||||
array_item_t (const array_item_t&);
|
||||
const array_item_t &operator = (const array_item_t&);
|
||||
};
|
||||
|
||||
|
||||
template <typename T, int ID = 0> class array_t
|
||||
inline void erase (T *item_)
|
||||
{
|
||||
private:
|
||||
erase (((item_t *) item_)->get_array_index ());
|
||||
}
|
||||
|
||||
typedef array_item_t <ID> item_t;
|
||||
inline void erase (size_type index_)
|
||||
{
|
||||
if (items.back ())
|
||||
((item_t *) items.back ())->set_array_index ((int) index_);
|
||||
items[index_] = items.back ();
|
||||
items.pop_back ();
|
||||
}
|
||||
|
||||
public:
|
||||
inline void swap (size_type index1_, size_type index2_)
|
||||
{
|
||||
if (items[index1_])
|
||||
((item_t *) items[index1_])->set_array_index ((int) index2_);
|
||||
if (items[index2_])
|
||||
((item_t *) items[index2_])->set_array_index ((int) index1_);
|
||||
std::swap (items[index1_], items[index2_]);
|
||||
}
|
||||
|
||||
typedef typename std::vector <T*>::size_type size_type;
|
||||
inline void clear () { items.clear (); }
|
||||
|
||||
inline array_t ()
|
||||
{
|
||||
}
|
||||
inline size_type index (T *item_)
|
||||
{
|
||||
return (size_type) ((item_t *) item_)->get_array_index ();
|
||||
}
|
||||
|
||||
inline ~array_t ()
|
||||
{
|
||||
}
|
||||
|
||||
inline size_type size ()
|
||||
{
|
||||
return items.size ();
|
||||
}
|
||||
|
||||
inline bool empty ()
|
||||
{
|
||||
return items.empty ();
|
||||
}
|
||||
|
||||
inline T *&operator [] (size_type index_)
|
||||
{
|
||||
return items [index_];
|
||||
}
|
||||
|
||||
inline void push_back (T *item_)
|
||||
{
|
||||
if (item_)
|
||||
((item_t*) item_)->set_array_index ((int) items.size ());
|
||||
items.push_back (item_);
|
||||
}
|
||||
|
||||
inline void erase (T *item_) {
|
||||
erase (((item_t*) item_)->get_array_index ());
|
||||
}
|
||||
|
||||
inline void erase (size_type index_) {
|
||||
if (items.back ())
|
||||
((item_t*) items.back ())->set_array_index ((int) index_);
|
||||
items [index_] = items.back ();
|
||||
items.pop_back ();
|
||||
}
|
||||
|
||||
inline void swap (size_type index1_, size_type index2_)
|
||||
{
|
||||
if (items [index1_])
|
||||
((item_t*) items [index1_])->set_array_index ((int) index2_);
|
||||
if (items [index2_])
|
||||
((item_t*) items [index2_])->set_array_index ((int) index1_);
|
||||
std::swap (items [index1_], items [index2_]);
|
||||
}
|
||||
|
||||
inline void clear ()
|
||||
{
|
||||
items.clear ();
|
||||
}
|
||||
|
||||
inline size_type index (T *item_)
|
||||
{
|
||||
return (size_type) ((item_t*) item_)->get_array_index ();
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
typedef std::vector <T*> items_t;
|
||||
items_t items;
|
||||
|
||||
array_t (const array_t&);
|
||||
const array_t &operator = (const array_t&);
|
||||
};
|
||||
private:
|
||||
typedef std::vector<T *> items_t;
|
||||
items_t items;
|
||||
|
||||
array_t (const array_t &);
|
||||
const array_t &operator= (const array_t &);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -44,7 +44,8 @@
|
||||
#define ZMQ_ATOMIC_COUNTER_ARM
|
||||
#elif defined ZMQ_HAVE_WINDOWS
|
||||
#define ZMQ_ATOMIC_COUNTER_WINDOWS
|
||||
#elif (defined ZMQ_HAVE_SOLARIS || defined ZMQ_HAVE_NETBSD || defined ZMQ_HAVE_GNU)
|
||||
#elif (defined ZMQ_HAVE_SOLARIS || defined ZMQ_HAVE_NETBSD \
|
||||
|| defined ZMQ_HAVE_GNU)
|
||||
#define ZMQ_ATOMIC_COUNTER_ATOMIC_H
|
||||
#elif defined __tile__
|
||||
#define ZMQ_ATOMIC_COUNTER_TILE
|
||||
@ -66,174 +67,158 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
// This class represents an integer that can be incremented/decremented
|
||||
// in atomic fashion.
|
||||
//
|
||||
// In zmq::shared_message_memory_allocator a buffer with an atomic_counter_t
|
||||
// at the start is allocated. If the class does not align to pointer size,
|
||||
// access to pointers in structures in the buffer will cause SIGBUS on
|
||||
// architectures that do not allow mis-aligned pointers (eg: SPARC).
|
||||
// Force the compiler to align to pointer size, which will cause the object
|
||||
// to grow from 4 bytes to 8 bytes on 64 bit architectures (when not using
|
||||
// mutexes).
|
||||
|
||||
// This class represents an integer that can be incremented/decremented
|
||||
// in atomic fashion.
|
||||
//
|
||||
// In zmq::shared_message_memory_allocator a buffer with an atomic_counter_t
|
||||
// at the start is allocated. If the class does not align to pointer size,
|
||||
// access to pointers in structures in the buffer will cause SIGBUS on
|
||||
// architectures that do not allow mis-aligned pointers (eg: SPARC).
|
||||
// Force the compiler to align to pointer size, which will cause the object
|
||||
// to grow from 4 bytes to 8 bytes on 64 bit architectures (when not using
|
||||
// mutexes).
|
||||
|
||||
#if defined (_MSC_VER) && (defined (_M_X64) || defined (_M_ARM64))
|
||||
class __declspec (align (8)) atomic_counter_t
|
||||
#elif defined (_MSC_VER) && (defined (_M_IX86) || defined (_M_ARM_ARMV7VE))
|
||||
class __declspec (align (4)) atomic_counter_t
|
||||
#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_ARM64))
|
||||
class __declspec(align (8)) atomic_counter_t
|
||||
#elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_ARM_ARMV7VE))
|
||||
class __declspec(align (4)) atomic_counter_t
|
||||
#else
|
||||
class atomic_counter_t
|
||||
class atomic_counter_t
|
||||
#endif
|
||||
{
|
||||
public:
|
||||
typedef uint32_t integer_t;
|
||||
|
||||
inline atomic_counter_t (integer_t value_ = 0) : value (value_) {}
|
||||
|
||||
inline ~atomic_counter_t () {}
|
||||
|
||||
// Set counter value (not thread-safe).
|
||||
inline void set (integer_t value_) { value = value_; }
|
||||
|
||||
// Atomic addition. Returns the old value.
|
||||
inline integer_t add (integer_t increment_)
|
||||
{
|
||||
public:
|
||||
|
||||
typedef uint32_t integer_t;
|
||||
|
||||
inline atomic_counter_t (integer_t value_ = 0) :
|
||||
value (value_)
|
||||
{
|
||||
}
|
||||
|
||||
inline ~atomic_counter_t ()
|
||||
{
|
||||
}
|
||||
|
||||
// Set counter value (not thread-safe).
|
||||
inline void set (integer_t value_)
|
||||
{
|
||||
value = value_;
|
||||
}
|
||||
|
||||
// Atomic addition. Returns the old value.
|
||||
inline integer_t add (integer_t increment_)
|
||||
{
|
||||
integer_t old_value;
|
||||
integer_t old_value;
|
||||
|
||||
#if defined ZMQ_ATOMIC_COUNTER_WINDOWS
|
||||
old_value = InterlockedExchangeAdd ((LONG*) &value, increment_);
|
||||
old_value = InterlockedExchangeAdd ((LONG *) &value, increment_);
|
||||
#elif defined ZMQ_ATOMIC_COUNTER_INTRINSIC
|
||||
old_value = __atomic_fetch_add(&value, increment_, __ATOMIC_ACQ_REL);
|
||||
old_value = __atomic_fetch_add (&value, increment_, __ATOMIC_ACQ_REL);
|
||||
#elif defined ZMQ_ATOMIC_COUNTER_CXX11
|
||||
old_value = value.fetch_add(increment_, std::memory_order_acq_rel);
|
||||
old_value = value.fetch_add (increment_, std::memory_order_acq_rel);
|
||||
#elif defined ZMQ_ATOMIC_COUNTER_ATOMIC_H
|
||||
integer_t new_value = atomic_add_32_nv (&value, increment_);
|
||||
old_value = new_value - increment_;
|
||||
integer_t new_value = atomic_add_32_nv (&value, increment_);
|
||||
old_value = new_value - increment_;
|
||||
#elif defined ZMQ_ATOMIC_COUNTER_TILE
|
||||
old_value = arch_atomic_add (&value, increment_);
|
||||
old_value = arch_atomic_add (&value, increment_);
|
||||
#elif defined ZMQ_ATOMIC_COUNTER_X86
|
||||
__asm__ volatile (
|
||||
"lock; xadd %0, %1 \n\t"
|
||||
: "=r" (old_value), "=m" (value)
|
||||
: "0" (increment_), "m" (value)
|
||||
: "cc", "memory");
|
||||
__asm__ volatile("lock; xadd %0, %1 \n\t"
|
||||
: "=r"(old_value), "=m"(value)
|
||||
: "0"(increment_), "m"(value)
|
||||
: "cc", "memory");
|
||||
#elif defined ZMQ_ATOMIC_COUNTER_ARM
|
||||
integer_t flag, tmp;
|
||||
__asm__ volatile (
|
||||
" dmb sy\n\t"
|
||||
"1: ldrex %0, [%5]\n\t"
|
||||
" add %2, %0, %4\n\t"
|
||||
" strex %1, %2, [%5]\n\t"
|
||||
" teq %1, #0\n\t"
|
||||
" bne 1b\n\t"
|
||||
" dmb sy\n\t"
|
||||
: "=&r"(old_value), "=&r"(flag), "=&r"(tmp), "+Qo"(value)
|
||||
: "Ir"(increment_), "r"(&value)
|
||||
: "cc");
|
||||
integer_t flag, tmp;
|
||||
__asm__ volatile(" dmb sy\n\t"
|
||||
"1: ldrex %0, [%5]\n\t"
|
||||
" add %2, %0, %4\n\t"
|
||||
" strex %1, %2, [%5]\n\t"
|
||||
" teq %1, #0\n\t"
|
||||
" bne 1b\n\t"
|
||||
" dmb sy\n\t"
|
||||
: "=&r"(old_value), "=&r"(flag), "=&r"(tmp),
|
||||
"+Qo"(value)
|
||||
: "Ir"(increment_), "r"(&value)
|
||||
: "cc");
|
||||
#elif defined ZMQ_ATOMIC_COUNTER_MUTEX
|
||||
sync.lock ();
|
||||
old_value = value;
|
||||
value += increment_;
|
||||
sync.unlock ();
|
||||
sync.lock ();
|
||||
old_value = value;
|
||||
value += increment_;
|
||||
sync.unlock ();
|
||||
#else
|
||||
#error atomic_counter is not implemented for this platform
|
||||
#endif
|
||||
return old_value;
|
||||
}
|
||||
return old_value;
|
||||
}
|
||||
|
||||
// Atomic subtraction. Returns false if the counter drops to zero.
|
||||
inline bool sub (integer_t decrement)
|
||||
{
|
||||
// Atomic subtraction. Returns false if the counter drops to zero.
|
||||
inline bool sub (integer_t decrement)
|
||||
{
|
||||
#if defined ZMQ_ATOMIC_COUNTER_WINDOWS
|
||||
LONG delta = - ((LONG) decrement);
|
||||
integer_t old = InterlockedExchangeAdd ((LONG*) &value, delta);
|
||||
return old - decrement != 0;
|
||||
LONG delta = -((LONG) decrement);
|
||||
integer_t old = InterlockedExchangeAdd ((LONG *) &value, delta);
|
||||
return old - decrement != 0;
|
||||
#elif defined ZMQ_ATOMIC_COUNTER_INTRINSIC
|
||||
integer_t nv = __atomic_sub_fetch(&value, decrement, __ATOMIC_ACQ_REL);
|
||||
return nv != 0;
|
||||
integer_t nv = __atomic_sub_fetch (&value, decrement, __ATOMIC_ACQ_REL);
|
||||
return nv != 0;
|
||||
#elif defined ZMQ_ATOMIC_COUNTER_CXX11
|
||||
integer_t old = value.fetch_sub(decrement, std::memory_order_acq_rel);
|
||||
return old - decrement != 0;
|
||||
integer_t old = value.fetch_sub (decrement, std::memory_order_acq_rel);
|
||||
return old - decrement != 0;
|
||||
#elif defined ZMQ_ATOMIC_COUNTER_ATOMIC_H
|
||||
int32_t delta = - ((int32_t) decrement);
|
||||
integer_t nv = atomic_add_32_nv (&value, delta);
|
||||
return nv != 0;
|
||||
int32_t delta = -((int32_t) decrement);
|
||||
integer_t nv = atomic_add_32_nv (&value, delta);
|
||||
return nv != 0;
|
||||
#elif defined ZMQ_ATOMIC_COUNTER_TILE
|
||||
int32_t delta = - ((int32_t) decrement);
|
||||
integer_t nv = arch_atomic_add (&value, delta);
|
||||
return nv != 0;
|
||||
int32_t delta = -((int32_t) decrement);
|
||||
integer_t nv = arch_atomic_add (&value, delta);
|
||||
return nv != 0;
|
||||
#elif defined ZMQ_ATOMIC_COUNTER_X86
|
||||
integer_t oldval = -decrement;
|
||||
volatile integer_t *val = &value;
|
||||
__asm__ volatile ("lock; xaddl %0,%1"
|
||||
: "=r" (oldval), "=m" (*val)
|
||||
: "0" (oldval), "m" (*val)
|
||||
: "cc", "memory");
|
||||
return oldval != decrement;
|
||||
integer_t oldval = -decrement;
|
||||
volatile integer_t *val = &value;
|
||||
__asm__ volatile("lock; xaddl %0,%1"
|
||||
: "=r"(oldval), "=m"(*val)
|
||||
: "0"(oldval), "m"(*val)
|
||||
: "cc", "memory");
|
||||
return oldval != decrement;
|
||||
#elif defined ZMQ_ATOMIC_COUNTER_ARM
|
||||
integer_t old_value, flag, tmp;
|
||||
__asm__ volatile (
|
||||
" dmb sy\n\t"
|
||||
"1: ldrex %0, [%5]\n\t"
|
||||
" sub %2, %0, %4\n\t"
|
||||
" strex %1, %2, [%5]\n\t"
|
||||
" teq %1, #0\n\t"
|
||||
" bne 1b\n\t"
|
||||
" dmb sy\n\t"
|
||||
: "=&r"(old_value), "=&r"(flag), "=&r"(tmp), "+Qo"(value)
|
||||
: "Ir"(decrement), "r"(&value)
|
||||
: "cc");
|
||||
return old_value - decrement != 0;
|
||||
integer_t old_value, flag, tmp;
|
||||
__asm__ volatile(" dmb sy\n\t"
|
||||
"1: ldrex %0, [%5]\n\t"
|
||||
" sub %2, %0, %4\n\t"
|
||||
" strex %1, %2, [%5]\n\t"
|
||||
" teq %1, #0\n\t"
|
||||
" bne 1b\n\t"
|
||||
" dmb sy\n\t"
|
||||
: "=&r"(old_value), "=&r"(flag), "=&r"(tmp),
|
||||
"+Qo"(value)
|
||||
: "Ir"(decrement), "r"(&value)
|
||||
: "cc");
|
||||
return old_value - decrement != 0;
|
||||
#elif defined ZMQ_ATOMIC_COUNTER_MUTEX
|
||||
sync.lock ();
|
||||
value -= decrement;
|
||||
bool result = value ? true : false;
|
||||
sync.unlock ();
|
||||
return result;
|
||||
sync.lock ();
|
||||
value -= decrement;
|
||||
bool result = value ? true : false;
|
||||
sync.unlock ();
|
||||
return result;
|
||||
#else
|
||||
#error atomic_counter is not implemented for this platform
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
inline integer_t get () const
|
||||
{
|
||||
return value;
|
||||
}
|
||||
|
||||
private:
|
||||
inline integer_t get () const { return value; }
|
||||
|
||||
private:
|
||||
#if defined ZMQ_ATOMIC_COUNTER_CXX11
|
||||
std::atomic<integer_t> value;
|
||||
std::atomic<integer_t> value;
|
||||
#else
|
||||
volatile integer_t value;
|
||||
volatile integer_t value;
|
||||
#endif
|
||||
|
||||
#if defined ZMQ_ATOMIC_COUNTER_MUTEX
|
||||
mutex_t sync;
|
||||
mutex_t sync;
|
||||
#endif
|
||||
|
||||
#if ! defined ZMQ_ATOMIC_COUNTER_CXX11
|
||||
atomic_counter_t (const atomic_counter_t&);
|
||||
const atomic_counter_t& operator = (const atomic_counter_t&);
|
||||
#if !defined ZMQ_ATOMIC_COUNTER_CXX11
|
||||
atomic_counter_t (const atomic_counter_t &);
|
||||
const atomic_counter_t &operator= (const atomic_counter_t &);
|
||||
#endif
|
||||
#if defined (__GNUC__) || defined ( __INTEL_COMPILER) || \
|
||||
(defined (__SUNPRO_C) && __SUNPRO_C >= 0x590) || \
|
||||
(defined (__SUNPRO_CC) && __SUNPRO_CC >= 0x590)
|
||||
} __attribute__ ((aligned (sizeof (void *))));
|
||||
#if defined(__GNUC__) || defined(__INTEL_COMPILER) \
|
||||
|| (defined(__SUNPRO_C) && __SUNPRO_C >= 0x590) \
|
||||
|| (defined(__SUNPRO_CC) && __SUNPRO_CC >= 0x590)
|
||||
} __attribute__ ((aligned (sizeof (void *))));
|
||||
#else
|
||||
};
|
||||
};
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
// Remove macros local to this file.
|
||||
|
@ -44,7 +44,8 @@
|
||||
#define ZMQ_ATOMIC_PTR_TILE
|
||||
#elif defined ZMQ_HAVE_WINDOWS
|
||||
#define ZMQ_ATOMIC_PTR_WINDOWS
|
||||
#elif (defined ZMQ_HAVE_SOLARIS || defined ZMQ_HAVE_NETBSD || defined ZMQ_HAVE_GNU)
|
||||
#elif (defined ZMQ_HAVE_SOLARIS || defined ZMQ_HAVE_NETBSD \
|
||||
|| defined ZMQ_HAVE_GNU)
|
||||
#define ZMQ_ATOMIC_PTR_ATOMIC_H
|
||||
#else
|
||||
#define ZMQ_ATOMIC_PTR_MUTEX
|
||||
@ -64,154 +65,138 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
// This class encapsulates several atomic operations on pointers.
|
||||
|
||||
// This class encapsulates several atomic operations on pointers.
|
||||
template <typename T> class atomic_ptr_t
|
||||
{
|
||||
public:
|
||||
// Initialise atomic pointer
|
||||
inline atomic_ptr_t () { ptr = NULL; }
|
||||
|
||||
template <typename T> class atomic_ptr_t
|
||||
// Destroy atomic pointer
|
||||
inline ~atomic_ptr_t () {}
|
||||
|
||||
// Set value of atomic pointer in a non-threadsafe way
|
||||
// Use this function only when you are sure that at most one
|
||||
// thread is accessing the pointer at the moment.
|
||||
inline void set (T *ptr_) { this->ptr = ptr_; }
|
||||
|
||||
// Perform atomic 'exchange pointers' operation. Pointer is set
|
||||
// to the 'val' value. Old value is returned.
|
||||
inline T *xchg (T *val_)
|
||||
{
|
||||
public:
|
||||
|
||||
// Initialise atomic pointer
|
||||
inline atomic_ptr_t ()
|
||||
{
|
||||
ptr = NULL;
|
||||
}
|
||||
|
||||
// Destroy atomic pointer
|
||||
inline ~atomic_ptr_t ()
|
||||
{
|
||||
}
|
||||
|
||||
// Set value of atomic pointer in a non-threadsafe way
|
||||
// Use this function only when you are sure that at most one
|
||||
// thread is accessing the pointer at the moment.
|
||||
inline void set (T *ptr_)
|
||||
{
|
||||
this->ptr = ptr_;
|
||||
}
|
||||
|
||||
// Perform atomic 'exchange pointers' operation. Pointer is set
|
||||
// to the 'val' value. Old value is returned.
|
||||
inline T *xchg (T *val_)
|
||||
{
|
||||
#if defined ZMQ_ATOMIC_PTR_WINDOWS
|
||||
return (T*) InterlockedExchangePointer ((PVOID*) &ptr, val_);
|
||||
return (T *) InterlockedExchangePointer ((PVOID *) &ptr, val_);
|
||||
#elif defined ZMQ_ATOMIC_PTR_INTRINSIC
|
||||
return (T*) __atomic_exchange_n (&ptr, val_, __ATOMIC_ACQ_REL);
|
||||
return (T *) __atomic_exchange_n (&ptr, val_, __ATOMIC_ACQ_REL);
|
||||
#elif defined ZMQ_ATOMIC_PTR_CXX11
|
||||
return ptr.exchange(val_, std::memory_order_acq_rel);
|
||||
return ptr.exchange (val_, std::memory_order_acq_rel);
|
||||
#elif defined ZMQ_ATOMIC_PTR_ATOMIC_H
|
||||
return (T*) atomic_swap_ptr (&ptr, val_);
|
||||
return (T *) atomic_swap_ptr (&ptr, val_);
|
||||
#elif defined ZMQ_ATOMIC_PTR_TILE
|
||||
return (T*) arch_atomic_exchange (&ptr, val_);
|
||||
return (T *) arch_atomic_exchange (&ptr, val_);
|
||||
#elif defined ZMQ_ATOMIC_PTR_X86
|
||||
T *old;
|
||||
__asm__ volatile (
|
||||
"lock; xchg %0, %2"
|
||||
: "=r" (old), "=m" (ptr)
|
||||
: "m" (ptr), "0" (val_));
|
||||
return old;
|
||||
T *old;
|
||||
__asm__ volatile("lock; xchg %0, %2"
|
||||
: "=r"(old), "=m"(ptr)
|
||||
: "m"(ptr), "0"(val_));
|
||||
return old;
|
||||
#elif defined ZMQ_ATOMIC_PTR_ARM
|
||||
T* old;
|
||||
unsigned int flag;
|
||||
__asm__ volatile (
|
||||
" dmb sy\n\t"
|
||||
"1: ldrex %1, [%3]\n\t"
|
||||
" strex %0, %4, [%3]\n\t"
|
||||
" teq %0, #0\n\t"
|
||||
" bne 1b\n\t"
|
||||
" dmb sy\n\t"
|
||||
: "=&r"(flag), "=&r"(old), "+Qo"(ptr)
|
||||
: "r"(&ptr), "r"(val_)
|
||||
: "cc");
|
||||
return old;
|
||||
T *old;
|
||||
unsigned int flag;
|
||||
__asm__ volatile(" dmb sy\n\t"
|
||||
"1: ldrex %1, [%3]\n\t"
|
||||
" strex %0, %4, [%3]\n\t"
|
||||
" teq %0, #0\n\t"
|
||||
" bne 1b\n\t"
|
||||
" dmb sy\n\t"
|
||||
: "=&r"(flag), "=&r"(old), "+Qo"(ptr)
|
||||
: "r"(&ptr), "r"(val_)
|
||||
: "cc");
|
||||
return old;
|
||||
#elif defined ZMQ_ATOMIC_PTR_MUTEX
|
||||
sync.lock ();
|
||||
T *old = (T*) ptr;
|
||||
sync.lock ();
|
||||
T *old = (T *) ptr;
|
||||
ptr = val_;
|
||||
sync.unlock ();
|
||||
return old;
|
||||
#else
|
||||
#error atomic_ptr is not implemented for this platform
|
||||
#endif
|
||||
}
|
||||
|
||||
// Perform atomic 'compare and swap' operation on the pointer.
|
||||
// The pointer is compared to 'cmp' argument and if they are
|
||||
// equal, its value is set to 'val'. Old value of the pointer
|
||||
// is returned.
|
||||
inline T *cas (T *cmp_, T *val_)
|
||||
{
|
||||
#if defined ZMQ_ATOMIC_PTR_WINDOWS
|
||||
return (T *) InterlockedCompareExchangePointer ((volatile PVOID *) &ptr,
|
||||
val_, cmp_);
|
||||
#elif defined ZMQ_ATOMIC_PTR_INTRINSIC
|
||||
T *old = cmp_;
|
||||
__atomic_compare_exchange_n (&ptr, (volatile T **) &old, val_, false,
|
||||
__ATOMIC_RELEASE, __ATOMIC_ACQUIRE);
|
||||
return old;
|
||||
#elif defined ZMQ_ATOMIC_PTR_CXX11
|
||||
ptr.compare_exchange_strong (cmp_, val_, std::memory_order_acq_rel);
|
||||
return cmp_;
|
||||
#elif defined ZMQ_ATOMIC_PTR_ATOMIC_H
|
||||
return (T *) atomic_cas_ptr (&ptr, cmp_, val_);
|
||||
#elif defined ZMQ_ATOMIC_PTR_TILE
|
||||
return (T *) arch_atomic_val_compare_and_exchange (&ptr, cmp_, val_);
|
||||
#elif defined ZMQ_ATOMIC_PTR_X86
|
||||
T *old;
|
||||
__asm__ volatile("lock; cmpxchg %2, %3"
|
||||
: "=a"(old), "=m"(ptr)
|
||||
: "r"(val_), "m"(ptr), "0"(cmp_)
|
||||
: "cc");
|
||||
return old;
|
||||
#elif defined ZMQ_ATOMIC_PTR_ARM
|
||||
T *old;
|
||||
unsigned int flag;
|
||||
__asm__ volatile(" dmb sy\n\t"
|
||||
"1: ldrex %1, [%3]\n\t"
|
||||
" mov %0, #0\n\t"
|
||||
" teq %1, %4\n\t"
|
||||
" it eq\n\t"
|
||||
" strexeq %0, %5, [%3]\n\t"
|
||||
" teq %0, #0\n\t"
|
||||
" bne 1b\n\t"
|
||||
" dmb sy\n\t"
|
||||
: "=&r"(flag), "=&r"(old), "+Qo"(ptr)
|
||||
: "r"(&ptr), "r"(cmp_), "r"(val_)
|
||||
: "cc");
|
||||
return old;
|
||||
#elif defined ZMQ_ATOMIC_PTR_MUTEX
|
||||
sync.lock ();
|
||||
T *old = (T *) ptr;
|
||||
if (ptr == cmp_)
|
||||
ptr = val_;
|
||||
sync.unlock ();
|
||||
return old;
|
||||
sync.unlock ();
|
||||
return old;
|
||||
#else
|
||||
#error atomic_ptr is not implemented for this platform
|
||||
#endif
|
||||
}
|
||||
|
||||
// Perform atomic 'compare and swap' operation on the pointer.
|
||||
// The pointer is compared to 'cmp' argument and if they are
|
||||
// equal, its value is set to 'val'. Old value of the pointer
|
||||
// is returned.
|
||||
inline T *cas (T *cmp_, T *val_)
|
||||
{
|
||||
#if defined ZMQ_ATOMIC_PTR_WINDOWS
|
||||
return (T*) InterlockedCompareExchangePointer (
|
||||
(volatile PVOID*) &ptr, val_, cmp_);
|
||||
#elif defined ZMQ_ATOMIC_PTR_INTRINSIC
|
||||
T *old = cmp_;
|
||||
__atomic_compare_exchange_n (&ptr, (volatile T**) &old, val_, false,
|
||||
__ATOMIC_RELEASE, __ATOMIC_ACQUIRE);
|
||||
return old;
|
||||
#elif defined ZMQ_ATOMIC_PTR_CXX11
|
||||
ptr.compare_exchange_strong(cmp_, val_, std::memory_order_acq_rel);
|
||||
return cmp_;
|
||||
#elif defined ZMQ_ATOMIC_PTR_ATOMIC_H
|
||||
return (T*) atomic_cas_ptr (&ptr, cmp_, val_);
|
||||
#elif defined ZMQ_ATOMIC_PTR_TILE
|
||||
return (T*) arch_atomic_val_compare_and_exchange (&ptr, cmp_, val_);
|
||||
#elif defined ZMQ_ATOMIC_PTR_X86
|
||||
T *old;
|
||||
__asm__ volatile (
|
||||
"lock; cmpxchg %2, %3"
|
||||
: "=a" (old), "=m" (ptr)
|
||||
: "r" (val_), "m" (ptr), "0" (cmp_)
|
||||
: "cc");
|
||||
return old;
|
||||
#elif defined ZMQ_ATOMIC_PTR_ARM
|
||||
T *old;
|
||||
unsigned int flag;
|
||||
__asm__ volatile (
|
||||
" dmb sy\n\t"
|
||||
"1: ldrex %1, [%3]\n\t"
|
||||
" mov %0, #0\n\t"
|
||||
" teq %1, %4\n\t"
|
||||
" it eq\n\t"
|
||||
" strexeq %0, %5, [%3]\n\t"
|
||||
" teq %0, #0\n\t"
|
||||
" bne 1b\n\t"
|
||||
" dmb sy\n\t"
|
||||
: "=&r"(flag), "=&r"(old), "+Qo"(ptr)
|
||||
: "r"(&ptr), "r"(cmp_), "r"(val_)
|
||||
: "cc");
|
||||
return old;
|
||||
#elif defined ZMQ_ATOMIC_PTR_MUTEX
|
||||
sync.lock ();
|
||||
T *old = (T*) ptr;
|
||||
if (ptr == cmp_)
|
||||
ptr = val_;
|
||||
sync.unlock ();
|
||||
return old;
|
||||
#else
|
||||
#error atomic_ptr is not implemented for this platform
|
||||
#endif
|
||||
}
|
||||
|
||||
private:
|
||||
}
|
||||
|
||||
private:
|
||||
#if defined ZMQ_ATOMIC_PTR_CXX11
|
||||
std::atomic<T*> ptr;
|
||||
std::atomic<T *> ptr;
|
||||
#else
|
||||
volatile T *ptr;
|
||||
volatile T *ptr;
|
||||
#endif
|
||||
|
||||
#if defined ZMQ_ATOMIC_PTR_MUTEX
|
||||
mutex_t sync;
|
||||
mutex_t sync;
|
||||
#endif
|
||||
|
||||
#if ! defined ZMQ_ATOMIC_PTR_CXX11
|
||||
atomic_ptr_t (const atomic_ptr_t&);
|
||||
const atomic_ptr_t &operator = (const atomic_ptr_t&);
|
||||
#if !defined ZMQ_ATOMIC_PTR_CXX11
|
||||
atomic_ptr_t (const atomic_ptr_t &);
|
||||
const atomic_ptr_t &operator= (const atomic_ptr_t &);
|
||||
#endif
|
||||
};
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
// Remove macros local to this file.
|
||||
|
224
src/blob.hpp
224
src/blob.hpp
@ -36,7 +36,7 @@
|
||||
|
||||
#if __cplusplus >= 201103L || defined(_MSC_VER) && _MSC_VER > 1700
|
||||
#define ZMQ_HAS_MOVE_SEMANTICS
|
||||
#define ZMQ_MAP_INSERT_OR_EMPLACE(k, v) emplace (k,v)
|
||||
#define ZMQ_MAP_INSERT_OR_EMPLACE(k, v) emplace (k, v)
|
||||
#define ZMQ_PUSH_OR_EMPLACE_BACK emplace_back
|
||||
#define ZMQ_MOVE(x) std::move (x)
|
||||
#else
|
||||
@ -47,140 +47,140 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
struct reference_tag_t {};
|
||||
struct reference_tag_t
|
||||
{
|
||||
};
|
||||
|
||||
// Object to hold dynamically allocated opaque binary data.
|
||||
// On modern compilers, it will be movable but not copyable. Copies
|
||||
// must be explicitly created by set_deep_copy.
|
||||
// On older compilers, it is copyable for syntactical reasons.
|
||||
struct blob_t
|
||||
// Object to hold dynamically allocated opaque binary data.
|
||||
// On modern compilers, it will be movable but not copyable. Copies
|
||||
// must be explicitly created by set_deep_copy.
|
||||
// On older compilers, it is copyable for syntactical reasons.
|
||||
struct blob_t
|
||||
{
|
||||
// Creates an empty blob_t.
|
||||
blob_t () : data_ (0), size_ (0), owned_ (true) {}
|
||||
|
||||
// Creates a blob_t of a given size, with uninitialized content.
|
||||
blob_t (const size_t size) :
|
||||
data_ ((unsigned char *) malloc (size)),
|
||||
size_ (size),
|
||||
owned_ (true)
|
||||
{
|
||||
// Creates an empty blob_t.
|
||||
blob_t () : data_ (0), size_ (0), owned_ (true) {}
|
||||
}
|
||||
|
||||
// Creates a blob_t of a given size, with uninitialized content.
|
||||
blob_t (const size_t size)
|
||||
: data_ ((unsigned char*)malloc (size))
|
||||
, size_ (size)
|
||||
, owned_ (true)
|
||||
{
|
||||
}
|
||||
// Creates a blob_t of a given size, an initializes content by copying
|
||||
// from another buffer.
|
||||
blob_t (const unsigned char *const data, const size_t size) :
|
||||
data_ ((unsigned char *) malloc (size)),
|
||||
size_ (size),
|
||||
owned_ (true)
|
||||
{
|
||||
memcpy (data_, data, size_);
|
||||
}
|
||||
|
||||
// Creates a blob_t of a given size, an initializes content by copying
|
||||
// from another buffer.
|
||||
blob_t(const unsigned char * const data, const size_t size)
|
||||
: data_ ((unsigned char*)malloc (size))
|
||||
, size_ (size)
|
||||
, owned_ (true)
|
||||
{
|
||||
memcpy(data_, data, size_);
|
||||
}
|
||||
// Creates a blob_t for temporary use that only references a
|
||||
// pre-allocated block of data.
|
||||
// Use with caution and ensure that the blob_t will not outlive
|
||||
// the referenced data.
|
||||
blob_t (unsigned char *const data, const size_t size, reference_tag_t) :
|
||||
data_ (data),
|
||||
size_ (size),
|
||||
owned_ (false)
|
||||
{
|
||||
}
|
||||
|
||||
// Creates a blob_t for temporary use that only references a
|
||||
// pre-allocated block of data.
|
||||
// Use with caution and ensure that the blob_t will not outlive
|
||||
// the referenced data.
|
||||
blob_t (unsigned char * const data, const size_t size, reference_tag_t)
|
||||
: data_ (data)
|
||||
, size_ (size)
|
||||
, owned_ (false)
|
||||
{
|
||||
}
|
||||
// Returns the size of the blob_t.
|
||||
size_t size () const { return size_; }
|
||||
|
||||
// Returns the size of the blob_t.
|
||||
size_t size () const { return size_; }
|
||||
|
||||
// Returns a pointer to the data of the blob_t.
|
||||
const unsigned char *data() const {
|
||||
return data_;
|
||||
}
|
||||
// Returns a pointer to the data of the blob_t.
|
||||
const unsigned char *data () const { return data_; }
|
||||
|
||||
// Returns a pointer to the data of the blob_t.
|
||||
unsigned char *data() {
|
||||
return data_;
|
||||
}
|
||||
// Returns a pointer to the data of the blob_t.
|
||||
unsigned char *data () { return data_; }
|
||||
|
||||
// Defines an order relationship on blob_t.
|
||||
bool operator< (blob_t const &other) const {
|
||||
int cmpres = memcmp (data_, other.data_, std::min (size_, other.size_));
|
||||
return cmpres < 0 || (cmpres == 0 && size_ < other.size_);
|
||||
}
|
||||
// Defines an order relationship on blob_t.
|
||||
bool operator< (blob_t const &other) const
|
||||
{
|
||||
int cmpres = memcmp (data_, other.data_, std::min (size_, other.size_));
|
||||
return cmpres < 0 || (cmpres == 0 && size_ < other.size_);
|
||||
}
|
||||
|
||||
// Sets a blob_t to a deep copy of another blob_t.
|
||||
void set_deep_copy (blob_t const &other)
|
||||
{
|
||||
clear ();
|
||||
data_ = (unsigned char*)malloc (other.size_);
|
||||
size_ = other.size_;
|
||||
owned_ = true;
|
||||
memcpy (data_, other.data_, size_);
|
||||
}
|
||||
// Sets a blob_t to a deep copy of another blob_t.
|
||||
void set_deep_copy (blob_t const &other)
|
||||
{
|
||||
clear ();
|
||||
data_ = (unsigned char *) malloc (other.size_);
|
||||
size_ = other.size_;
|
||||
owned_ = true;
|
||||
memcpy (data_, other.data_, size_);
|
||||
}
|
||||
|
||||
// Sets a blob_t to a copy of a given buffer.
|
||||
void set (const unsigned char * const data, const size_t size)
|
||||
{
|
||||
clear ();
|
||||
data_ = (unsigned char*)malloc (size);
|
||||
size_ = size;
|
||||
owned_ = true;
|
||||
memcpy (data_, data, size_);
|
||||
}
|
||||
// Sets a blob_t to a copy of a given buffer.
|
||||
void set (const unsigned char *const data, const size_t size)
|
||||
{
|
||||
clear ();
|
||||
data_ = (unsigned char *) malloc (size);
|
||||
size_ = size;
|
||||
owned_ = true;
|
||||
memcpy (data_, data, size_);
|
||||
}
|
||||
|
||||
// Empties a blob_t.
|
||||
void clear () {
|
||||
if (owned_) { free (data_); }
|
||||
data_ = 0; size_ = 0;
|
||||
// Empties a blob_t.
|
||||
void clear ()
|
||||
{
|
||||
if (owned_) {
|
||||
free (data_);
|
||||
}
|
||||
data_ = 0;
|
||||
size_ = 0;
|
||||
}
|
||||
|
||||
~blob_t () {
|
||||
if (owned_) { free (data_); }
|
||||
~blob_t ()
|
||||
{
|
||||
if (owned_) {
|
||||
free (data_);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ZMQ_HAS_MOVE_SEMANTICS
|
||||
blob_t (const blob_t &) = delete;
|
||||
blob_t &operator= (const blob_t &) = delete;
|
||||
|
||||
blob_t (blob_t&& other)
|
||||
: data_ (other.data_)
|
||||
, size_ (other.size_)
|
||||
, owned_ (other.owned_)
|
||||
{
|
||||
blob_t (const blob_t &) = delete;
|
||||
blob_t &operator= (const blob_t &) = delete;
|
||||
|
||||
blob_t (blob_t &&other) :
|
||||
data_ (other.data_),
|
||||
size_ (other.size_),
|
||||
owned_ (other.owned_)
|
||||
{
|
||||
other.owned_ = false;
|
||||
}
|
||||
blob_t &operator= (blob_t &&other)
|
||||
{
|
||||
if (this != &other) {
|
||||
clear ();
|
||||
data_ = other.data_;
|
||||
size_ = other.size_;
|
||||
owned_ = other.owned_;
|
||||
other.owned_ = false;
|
||||
}
|
||||
blob_t &operator= (blob_t&& other) {
|
||||
if (this != &other)
|
||||
{
|
||||
clear ();
|
||||
data_ = other.data_;
|
||||
size_ = other.size_;
|
||||
owned_ = other.owned_;
|
||||
other.owned_ = false;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
#else
|
||||
blob_t (const blob_t &other)
|
||||
: owned_(false)
|
||||
{
|
||||
blob_t (const blob_t &other) : owned_ (false) { set_deep_copy (other); }
|
||||
blob_t &operator= (const blob_t &other)
|
||||
{
|
||||
if (this != &other) {
|
||||
clear ();
|
||||
set_deep_copy (other);
|
||||
}
|
||||
blob_t &operator= (const blob_t &other) {
|
||||
if (this != &other)
|
||||
{
|
||||
clear ();
|
||||
set_deep_copy (other);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
#endif
|
||||
|
||||
private:
|
||||
unsigned char *data_;
|
||||
size_t size_;
|
||||
bool owned_;
|
||||
};
|
||||
|
||||
private:
|
||||
unsigned char *data_;
|
||||
size_t size_;
|
||||
bool owned_;
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -69,7 +69,6 @@ int zmq::client_t::xrecv (msg_t *msg_)
|
||||
|
||||
// Drop any messages with more flag
|
||||
while (rc == 0 && msg_->flags () & msg_t::more) {
|
||||
|
||||
// drop all frames of the current multi-frame message
|
||||
rc = fq.recvpipe (msg_, NULL);
|
||||
|
||||
|
@ -37,45 +37,39 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
class ctx_t;
|
||||
class msg_t;
|
||||
class pipe_t;
|
||||
class io_thread_t;
|
||||
class socket_base_t;
|
||||
|
||||
class ctx_t;
|
||||
class msg_t;
|
||||
class pipe_t;
|
||||
class io_thread_t;
|
||||
class socket_base_t;
|
||||
class client_t : public socket_base_t
|
||||
{
|
||||
public:
|
||||
client_t (zmq::ctx_t *parent_, uint32_t tid_, int sid);
|
||||
~client_t ();
|
||||
|
||||
class client_t :
|
||||
public socket_base_t
|
||||
{
|
||||
public:
|
||||
protected:
|
||||
// Overrides of functions from socket_base_t.
|
||||
void xattach_pipe (zmq::pipe_t *pipe_, bool subscribe_to_all_);
|
||||
int xsend (zmq::msg_t *msg_);
|
||||
int xrecv (zmq::msg_t *msg_);
|
||||
bool xhas_in ();
|
||||
bool xhas_out ();
|
||||
const blob_t &get_credential () const;
|
||||
void xread_activated (zmq::pipe_t *pipe_);
|
||||
void xwrite_activated (zmq::pipe_t *pipe_);
|
||||
void xpipe_terminated (zmq::pipe_t *pipe_);
|
||||
|
||||
client_t (zmq::ctx_t *parent_, uint32_t tid_, int sid);
|
||||
~client_t ();
|
||||
|
||||
protected:
|
||||
|
||||
// Overrides of functions from socket_base_t.
|
||||
void xattach_pipe (zmq::pipe_t *pipe_, bool subscribe_to_all_);
|
||||
int xsend (zmq::msg_t *msg_);
|
||||
int xrecv (zmq::msg_t *msg_);
|
||||
bool xhas_in ();
|
||||
bool xhas_out ();
|
||||
const blob_t &get_credential () const;
|
||||
void xread_activated (zmq::pipe_t *pipe_);
|
||||
void xwrite_activated (zmq::pipe_t *pipe_);
|
||||
void xpipe_terminated (zmq::pipe_t *pipe_);
|
||||
|
||||
private:
|
||||
|
||||
// Messages are fair-queued from inbound pipes. And load-balanced to
|
||||
// the outbound pipes.
|
||||
fq_t fq;
|
||||
lb_t lb;
|
||||
|
||||
client_t (const client_t &);
|
||||
const client_t &operator = (const client_t&);
|
||||
};
|
||||
private:
|
||||
// Messages are fair-queued from inbound pipes. And load-balanced to
|
||||
// the outbound pipes.
|
||||
fq_t fq;
|
||||
lb_t lb;
|
||||
|
||||
client_t (const client_t &);
|
||||
const client_t &operator= (const client_t &);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -72,58 +72,61 @@ int alt_clock_gettime (int clock_id, timespec *ts)
|
||||
#endif
|
||||
|
||||
#ifdef ZMQ_HAVE_WINDOWS
|
||||
typedef ULONGLONG (*f_compatible_get_tick_count64)();
|
||||
typedef ULONGLONG (*f_compatible_get_tick_count64) ();
|
||||
|
||||
static zmq::mutex_t compatible_get_tick_count64_mutex;
|
||||
|
||||
ULONGLONG compatible_get_tick_count64()
|
||||
ULONGLONG compatible_get_tick_count64 ()
|
||||
{
|
||||
#ifdef ZMQ_HAVE_WINDOWS_UWP
|
||||
const ULONGLONG result = ::GetTickCount64();
|
||||
return result;
|
||||
const ULONGLONG result = ::GetTickCount64 ();
|
||||
return result;
|
||||
#else
|
||||
zmq::scoped_lock_t locker(compatible_get_tick_count64_mutex);
|
||||
zmq::scoped_lock_t locker (compatible_get_tick_count64_mutex);
|
||||
|
||||
static DWORD s_wrap = 0;
|
||||
static DWORD s_last_tick = 0;
|
||||
const DWORD current_tick = ::GetTickCount();
|
||||
static DWORD s_wrap = 0;
|
||||
static DWORD s_last_tick = 0;
|
||||
const DWORD current_tick = ::GetTickCount ();
|
||||
|
||||
if (current_tick < s_last_tick)
|
||||
++s_wrap;
|
||||
if (current_tick < s_last_tick)
|
||||
++s_wrap;
|
||||
|
||||
s_last_tick = current_tick;
|
||||
const ULONGLONG result = (static_cast<ULONGLONG>(s_wrap) << 32) + static_cast<ULONGLONG>(current_tick);
|
||||
s_last_tick = current_tick;
|
||||
const ULONGLONG result = (static_cast<ULONGLONG> (s_wrap) << 32)
|
||||
+ static_cast<ULONGLONG> (current_tick);
|
||||
|
||||
return result;
|
||||
return result;
|
||||
#endif
|
||||
}
|
||||
|
||||
f_compatible_get_tick_count64 init_compatible_get_tick_count64()
|
||||
f_compatible_get_tick_count64 init_compatible_get_tick_count64 ()
|
||||
{
|
||||
f_compatible_get_tick_count64 func = NULL;
|
||||
f_compatible_get_tick_count64 func = NULL;
|
||||
#if !defined ZMQ_HAVE_WINDOWS_UWP
|
||||
|
||||
HMODULE module = ::LoadLibraryA("Kernel32.dll");
|
||||
if (module != NULL)
|
||||
func = reinterpret_cast<f_compatible_get_tick_count64>(::GetProcAddress(module, "GetTickCount64"));
|
||||
HMODULE module = ::LoadLibraryA ("Kernel32.dll");
|
||||
if (module != NULL)
|
||||
func = reinterpret_cast<f_compatible_get_tick_count64> (
|
||||
::GetProcAddress (module, "GetTickCount64"));
|
||||
#endif
|
||||
if (func == NULL)
|
||||
func = compatible_get_tick_count64;
|
||||
if (func == NULL)
|
||||
func = compatible_get_tick_count64;
|
||||
|
||||
#if !defined ZMQ_HAVE_WINDOWS_UWP
|
||||
::FreeLibrary(module);
|
||||
::FreeLibrary (module);
|
||||
#endif
|
||||
|
||||
return func;
|
||||
return func;
|
||||
}
|
||||
|
||||
static f_compatible_get_tick_count64 my_get_tick_count64 = init_compatible_get_tick_count64();
|
||||
static f_compatible_get_tick_count64 my_get_tick_count64 =
|
||||
init_compatible_get_tick_count64 ();
|
||||
#endif
|
||||
|
||||
zmq::clock_t::clock_t () :
|
||||
last_tsc (rdtsc ()),
|
||||
#ifdef ZMQ_HAVE_WINDOWS
|
||||
last_time (static_cast<uint64_t>((*my_get_tick_count64)()))
|
||||
last_time (static_cast<uint64_t> ((*my_get_tick_count64) ()))
|
||||
#else
|
||||
last_time (now_us () / 1000)
|
||||
#endif
|
||||
@ -156,7 +159,8 @@ uint64_t zmq::clock_t::now_us ()
|
||||
// Use POSIX clock_gettime function to get precise monotonic time.
|
||||
struct timespec tv;
|
||||
|
||||
#if defined ZMQ_HAVE_OSX && __MAC_OS_X_VERSION_MIN_REQUIRED < 101200 // less than macOS 10.12
|
||||
#if defined ZMQ_HAVE_OSX \
|
||||
&& __MAC_OS_X_VERSION_MIN_REQUIRED < 101200 // less than macOS 10.12
|
||||
int rc = alt_clock_gettime (SYSTEM_CLOCK, &tv);
|
||||
#else
|
||||
int rc = clock_gettime (CLOCK_MONOTONIC, &tv);
|
||||
@ -164,7 +168,7 @@ uint64_t zmq::clock_t::now_us ()
|
||||
// Fix case where system has clock_gettime but CLOCK_MONOTONIC is not supported.
|
||||
// This should be a configuration check, but I looked into it and writing an
|
||||
// AC_FUNC_CLOCK_MONOTONIC seems beyond my powers.
|
||||
if( rc != 0) {
|
||||
if (rc != 0) {
|
||||
// Use POSIX gettimeofday function to get precise time.
|
||||
struct timeval tv;
|
||||
int rc = gettimeofday (&tv, NULL);
|
||||
@ -193,14 +197,13 @@ uint64_t zmq::clock_t::now_ms ()
|
||||
uint64_t tsc = rdtsc ();
|
||||
|
||||
// If TSC is not supported, get precise time and chop off the microseconds.
|
||||
if (!tsc)
|
||||
{
|
||||
if (!tsc) {
|
||||
#ifdef ZMQ_HAVE_WINDOWS
|
||||
// Under Windows, now_us is not so reliable since QueryPerformanceCounter
|
||||
// does not guarantee that it will use a hardware that offers a monotonic timer.
|
||||
// So, lets use GetTickCount when GetTickCount64 is not available with an workaround
|
||||
// to its 32 bit limitation.
|
||||
return static_cast<uint64_t>((*my_get_tick_count64)());
|
||||
return static_cast<uint64_t> ((*my_get_tick_count64) ());
|
||||
#else
|
||||
return now_us () / 1000;
|
||||
#endif
|
||||
@ -214,7 +217,7 @@ uint64_t zmq::clock_t::now_ms ()
|
||||
|
||||
last_tsc = tsc;
|
||||
#ifdef ZMQ_HAVE_WINDOWS
|
||||
last_time = static_cast<uint64_t>((*my_get_tick_count64)());
|
||||
last_time = static_cast<uint64_t> ((*my_get_tick_count64) ());
|
||||
#else
|
||||
last_time = now_us () / 1000;
|
||||
#endif
|
||||
@ -227,27 +230,29 @@ uint64_t zmq::clock_t::rdtsc ()
|
||||
return __rdtsc ();
|
||||
#elif (defined __GNUC__ && (defined __i386__ || defined __x86_64__))
|
||||
uint32_t low, high;
|
||||
__asm__ volatile ("rdtsc" : "=a" (low), "=d" (high));
|
||||
__asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
|
||||
return (uint64_t) high << 32 | low;
|
||||
#elif (defined __SUNPRO_CC && (__SUNPRO_CC >= 0x5100) && (defined __i386 || \
|
||||
defined __amd64 || defined __x86_64))
|
||||
union {
|
||||
#elif (defined __SUNPRO_CC && (__SUNPRO_CC >= 0x5100) \
|
||||
&& (defined __i386 || defined __amd64 || defined __x86_64))
|
||||
union
|
||||
{
|
||||
uint64_t u64val;
|
||||
uint32_t u32val [2];
|
||||
uint32_t u32val[2];
|
||||
} tsc;
|
||||
asm("rdtsc" : "=a" (tsc.u32val [0]), "=d" (tsc.u32val [1]));
|
||||
asm("rdtsc" : "=a"(tsc.u32val[0]), "=d"(tsc.u32val[1]));
|
||||
return tsc.u64val;
|
||||
#elif defined(__s390__)
|
||||
uint64_t tsc;
|
||||
asm("\tstck\t%0\n" : "=Q" (tsc) : : "cc");
|
||||
return(tsc);
|
||||
asm("\tstck\t%0\n" : "=Q"(tsc) : : "cc");
|
||||
return (tsc);
|
||||
#else
|
||||
struct timespec ts;
|
||||
#if defined ZMQ_HAVE_OSX && __MAC_OS_X_VERSION_MIN_REQUIRED < 101200 // less than macOS 10.12
|
||||
alt_clock_gettime (SYSTEM_CLOCK, &ts);
|
||||
#else
|
||||
clock_gettime (CLOCK_MONOTONIC, &ts);
|
||||
#endif
|
||||
return (uint64_t)(ts.tv_sec) * 1000000000 + ts.tv_nsec;
|
||||
#if defined ZMQ_HAVE_OSX \
|
||||
&& __MAC_OS_X_VERSION_MIN_REQUIRED < 101200 // less than macOS 10.12
|
||||
alt_clock_gettime (SYSTEM_CLOCK, &ts);
|
||||
#else
|
||||
clock_gettime (CLOCK_MONOTONIC, &ts);
|
||||
#endif
|
||||
return (uint64_t) (ts.tv_sec) * 1000000000 + ts.tv_nsec;
|
||||
#endif
|
||||
}
|
||||
|
@ -48,36 +48,32 @@ int alt_clock_gettime (int clock_id, timespec *ts);
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
class clock_t
|
||||
{
|
||||
public:
|
||||
clock_t ();
|
||||
~clock_t ();
|
||||
|
||||
class clock_t
|
||||
{
|
||||
public:
|
||||
// CPU's timestamp counter. Returns 0 if it's not available.
|
||||
static uint64_t rdtsc ();
|
||||
|
||||
clock_t ();
|
||||
~clock_t ();
|
||||
// High precision timestamp.
|
||||
static uint64_t now_us ();
|
||||
|
||||
// CPU's timestamp counter. Returns 0 if it's not available.
|
||||
static uint64_t rdtsc ();
|
||||
// Low precision timestamp. In tight loops generating it can be
|
||||
// 10 to 100 times faster than the high precision timestamp.
|
||||
uint64_t now_ms ();
|
||||
|
||||
// High precision timestamp.
|
||||
static uint64_t now_us ();
|
||||
private:
|
||||
// TSC timestamp of when last time measurement was made.
|
||||
uint64_t last_tsc;
|
||||
|
||||
// Low precision timestamp. In tight loops generating it can be
|
||||
// 10 to 100 times faster than the high precision timestamp.
|
||||
uint64_t now_ms ();
|
||||
|
||||
private:
|
||||
|
||||
// TSC timestamp of when last time measurement was made.
|
||||
uint64_t last_tsc;
|
||||
|
||||
// Physical time corresponding to the TSC above (in milliseconds).
|
||||
uint64_t last_time;
|
||||
|
||||
clock_t (const clock_t&);
|
||||
const clock_t &operator = (const clock_t&);
|
||||
};
|
||||
// Physical time corresponding to the TSC above (in milliseconds).
|
||||
uint64_t last_time;
|
||||
|
||||
clock_t (const clock_t &);
|
||||
const clock_t &operator= (const clock_t &);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
250
src/command.hpp
250
src/command.hpp
@ -35,153 +35,169 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
class object_t;
|
||||
class own_t;
|
||||
struct i_engine;
|
||||
class pipe_t;
|
||||
class socket_base_t;
|
||||
|
||||
class object_t;
|
||||
class own_t;
|
||||
struct i_engine;
|
||||
class pipe_t;
|
||||
class socket_base_t;
|
||||
|
||||
// This structure defines the commands that can be sent between threads.
|
||||
// This structure defines the commands that can be sent between threads.
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(push)
|
||||
#pragma warning(disable: 4324) // C4324: alignment padding warnings
|
||||
__declspec(align(64))
|
||||
#pragma warning(disable : 4324) // C4324: alignment padding warnings
|
||||
__declspec(align (64))
|
||||
#endif
|
||||
struct command_t
|
||||
struct command_t
|
||||
{
|
||||
// Object to process the command.
|
||||
zmq::object_t *destination;
|
||||
|
||||
enum type_t
|
||||
{
|
||||
// Object to process the command.
|
||||
zmq::object_t *destination;
|
||||
stop,
|
||||
plug,
|
||||
own,
|
||||
attach,
|
||||
bind,
|
||||
activate_read,
|
||||
activate_write,
|
||||
hiccup,
|
||||
pipe_term,
|
||||
pipe_term_ack,
|
||||
pipe_hwm,
|
||||
term_req,
|
||||
term,
|
||||
term_ack,
|
||||
term_endpoint,
|
||||
reap,
|
||||
reaped,
|
||||
inproc_connected,
|
||||
done
|
||||
} type;
|
||||
|
||||
enum type_t
|
||||
union args_t
|
||||
{
|
||||
// Sent to I/O thread to let it know that it should
|
||||
// terminate itself.
|
||||
struct
|
||||
{
|
||||
stop,
|
||||
plug,
|
||||
own,
|
||||
attach,
|
||||
bind,
|
||||
activate_read,
|
||||
activate_write,
|
||||
hiccup,
|
||||
pipe_term,
|
||||
pipe_term_ack,
|
||||
pipe_hwm,
|
||||
term_req,
|
||||
term,
|
||||
term_ack,
|
||||
term_endpoint,
|
||||
reap,
|
||||
reaped,
|
||||
inproc_connected,
|
||||
done
|
||||
} type;
|
||||
} stop;
|
||||
|
||||
union args_t
|
||||
// Sent to I/O object to make it register with its I/O thread.
|
||||
struct
|
||||
{
|
||||
} plug;
|
||||
|
||||
// Sent to I/O thread to let it know that it should
|
||||
// terminate itself.
|
||||
struct {
|
||||
} stop;
|
||||
// Sent to socket to let it know about the newly created object.
|
||||
struct
|
||||
{
|
||||
zmq::own_t *object;
|
||||
} own;
|
||||
|
||||
// Sent to I/O object to make it register with its I/O thread.
|
||||
struct {
|
||||
} plug;
|
||||
// Attach the engine to the session. If engine is NULL, it informs
|
||||
// session that the connection have failed.
|
||||
struct
|
||||
{
|
||||
struct i_engine *engine;
|
||||
} attach;
|
||||
|
||||
// Sent to socket to let it know about the newly created object.
|
||||
struct {
|
||||
zmq::own_t *object;
|
||||
} own;
|
||||
// Sent from session to socket to establish pipe(s) between them.
|
||||
// Caller have used inc_seqnum beforehand sending the command.
|
||||
struct
|
||||
{
|
||||
zmq::pipe_t *pipe;
|
||||
} bind;
|
||||
|
||||
// Attach the engine to the session. If engine is NULL, it informs
|
||||
// session that the connection have failed.
|
||||
struct {
|
||||
struct i_engine *engine;
|
||||
} attach;
|
||||
// Sent by pipe writer to inform dormant pipe reader that there
|
||||
// are messages in the pipe.
|
||||
struct
|
||||
{
|
||||
} activate_read;
|
||||
|
||||
// Sent from session to socket to establish pipe(s) between them.
|
||||
// Caller have used inc_seqnum beforehand sending the command.
|
||||
struct {
|
||||
zmq::pipe_t *pipe;
|
||||
} bind;
|
||||
// Sent by pipe reader to inform pipe writer about how many
|
||||
// messages it has read so far.
|
||||
struct
|
||||
{
|
||||
uint64_t msgs_read;
|
||||
} activate_write;
|
||||
|
||||
// Sent by pipe writer to inform dormant pipe reader that there
|
||||
// are messages in the pipe.
|
||||
struct {
|
||||
} activate_read;
|
||||
// Sent by pipe reader to writer after creating a new inpipe.
|
||||
// The parameter is actually of type pipe_t::upipe_t, however,
|
||||
// its definition is private so we'll have to do with void*.
|
||||
struct
|
||||
{
|
||||
void *pipe;
|
||||
} hiccup;
|
||||
|
||||
// Sent by pipe reader to inform pipe writer about how many
|
||||
// messages it has read so far.
|
||||
struct {
|
||||
uint64_t msgs_read;
|
||||
} activate_write;
|
||||
// Sent by pipe reader to pipe writer to ask it to terminate
|
||||
// its end of the pipe.
|
||||
struct
|
||||
{
|
||||
} pipe_term;
|
||||
|
||||
// Sent by pipe reader to writer after creating a new inpipe.
|
||||
// The parameter is actually of type pipe_t::upipe_t, however,
|
||||
// its definition is private so we'll have to do with void*.
|
||||
struct {
|
||||
void *pipe;
|
||||
} hiccup;
|
||||
// Pipe writer acknowledges pipe_term command.
|
||||
struct
|
||||
{
|
||||
} pipe_term_ack;
|
||||
|
||||
// Sent by pipe reader to pipe writer to ask it to terminate
|
||||
// its end of the pipe.
|
||||
struct {
|
||||
} pipe_term;
|
||||
// Sent by one of pipe to another part for modify hwm
|
||||
struct
|
||||
{
|
||||
int inhwm;
|
||||
int outhwm;
|
||||
} pipe_hwm;
|
||||
|
||||
// Pipe writer acknowledges pipe_term command.
|
||||
struct {
|
||||
} pipe_term_ack;
|
||||
// Sent by I/O object ot the socket to request the shutdown of
|
||||
// the I/O object.
|
||||
struct
|
||||
{
|
||||
zmq::own_t *object;
|
||||
} term_req;
|
||||
|
||||
// Sent by one of pipe to another part for modify hwm
|
||||
struct {
|
||||
int inhwm;
|
||||
int outhwm;
|
||||
} pipe_hwm;
|
||||
// Sent by socket to I/O object to start its shutdown.
|
||||
struct
|
||||
{
|
||||
int linger;
|
||||
} term;
|
||||
|
||||
// Sent by I/O object ot the socket to request the shutdown of
|
||||
// the I/O object.
|
||||
struct {
|
||||
zmq::own_t *object;
|
||||
} term_req;
|
||||
// Sent by I/O object to the socket to acknowledge it has
|
||||
// shut down.
|
||||
struct
|
||||
{
|
||||
} term_ack;
|
||||
|
||||
// Sent by socket to I/O object to start its shutdown.
|
||||
struct {
|
||||
int linger;
|
||||
} term;
|
||||
// Sent by session_base (I/O thread) to socket (application thread)
|
||||
// to ask to disconnect the endpoint.
|
||||
struct
|
||||
{
|
||||
std::string *endpoint;
|
||||
} term_endpoint;
|
||||
|
||||
// Sent by I/O object to the socket to acknowledge it has
|
||||
// shut down.
|
||||
struct {
|
||||
} term_ack;
|
||||
// Transfers the ownership of the closed socket
|
||||
// to the reaper thread.
|
||||
struct
|
||||
{
|
||||
zmq::socket_base_t *socket;
|
||||
} reap;
|
||||
|
||||
// Sent by session_base (I/O thread) to socket (application thread)
|
||||
// to ask to disconnect the endpoint.
|
||||
struct {
|
||||
std::string *endpoint;
|
||||
} term_endpoint;
|
||||
// Closed socket notifies the reaper that it's already deallocated.
|
||||
struct
|
||||
{
|
||||
} reaped;
|
||||
|
||||
// Transfers the ownership of the closed socket
|
||||
// to the reaper thread.
|
||||
struct {
|
||||
zmq::socket_base_t *socket;
|
||||
} reap;
|
||||
// Sent by reaper thread to the term thread when all the sockets
|
||||
// are successfully deallocated.
|
||||
struct
|
||||
{
|
||||
} done;
|
||||
|
||||
// Closed socket notifies the reaper that it's already deallocated.
|
||||
struct {
|
||||
} reaped;
|
||||
|
||||
// Sent by reaper thread to the term thread when all the sockets
|
||||
// are successfully deallocated.
|
||||
struct {
|
||||
} done;
|
||||
|
||||
} args;
|
||||
} args;
|
||||
#ifdef _MSC_VER
|
||||
};
|
||||
};
|
||||
#pragma warning(pop)
|
||||
#else
|
||||
} __attribute__((aligned(64)));
|
||||
} __attribute__ ((aligned (64)));
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -58,38 +58,26 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
class condition_variable_t
|
||||
{
|
||||
public:
|
||||
inline condition_variable_t () { zmq_assert (false); }
|
||||
|
||||
class condition_variable_t
|
||||
inline ~condition_variable_t () {}
|
||||
|
||||
inline int wait (mutex_t *mutex_, int timeout_)
|
||||
{
|
||||
public:
|
||||
inline condition_variable_t ()
|
||||
{
|
||||
zmq_assert(false);
|
||||
}
|
||||
zmq_assert (false);
|
||||
return -1;
|
||||
}
|
||||
|
||||
inline ~condition_variable_t ()
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
inline int wait (mutex_t* mutex_, int timeout_ )
|
||||
{
|
||||
zmq_assert(false);
|
||||
return -1;
|
||||
}
|
||||
|
||||
inline void broadcast ()
|
||||
{
|
||||
zmq_assert(false);
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
// Disable copy construction and assignment.
|
||||
condition_variable_t (const condition_variable_t&);
|
||||
void operator = (const condition_variable_t&);
|
||||
};
|
||||
inline void broadcast () { zmq_assert (false); }
|
||||
|
||||
private:
|
||||
// Disable copy construction and assignment.
|
||||
condition_variable_t (const condition_variable_t &);
|
||||
void operator= (const condition_variable_t &);
|
||||
};
|
||||
}
|
||||
|
||||
#else
|
||||
@ -103,94 +91,79 @@ namespace zmq
|
||||
{
|
||||
|
||||
#if !defined(ZMQ_HAVE_WINDOWS_TARGET_XP) && _WIN32_WINNT >= 0x0600
|
||||
class condition_variable_t
|
||||
class condition_variable_t
|
||||
{
|
||||
public:
|
||||
inline condition_variable_t () { InitializeConditionVariable (&cv); }
|
||||
|
||||
inline ~condition_variable_t () {}
|
||||
|
||||
inline int wait (mutex_t *mutex_, int timeout_)
|
||||
{
|
||||
public:
|
||||
inline condition_variable_t ()
|
||||
{
|
||||
InitializeConditionVariable (&cv);
|
||||
}
|
||||
int rc = SleepConditionVariableCS (&cv, mutex_->get_cs (), timeout_);
|
||||
|
||||
inline ~condition_variable_t ()
|
||||
{
|
||||
if (rc != 0)
|
||||
return 0;
|
||||
|
||||
}
|
||||
rc = GetLastError ();
|
||||
|
||||
inline int wait (mutex_t* mutex_, int timeout_ )
|
||||
{
|
||||
int rc = SleepConditionVariableCS(&cv, mutex_->get_cs (), timeout_);
|
||||
if (rc != ERROR_TIMEOUT)
|
||||
win_assert (rc);
|
||||
|
||||
if (rc != 0)
|
||||
return 0;
|
||||
errno = EAGAIN;
|
||||
return -1;
|
||||
}
|
||||
|
||||
rc = GetLastError();
|
||||
inline void broadcast () { WakeAllConditionVariable (&cv); }
|
||||
|
||||
if (rc != ERROR_TIMEOUT)
|
||||
win_assert(rc);
|
||||
private:
|
||||
CONDITION_VARIABLE cv;
|
||||
|
||||
errno = EAGAIN;
|
||||
return -1;
|
||||
}
|
||||
|
||||
inline void broadcast ()
|
||||
{
|
||||
WakeAllConditionVariable(&cv);
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
CONDITION_VARIABLE cv;
|
||||
|
||||
// Disable copy construction and assignment.
|
||||
condition_variable_t (const condition_variable_t&);
|
||||
void operator = (const condition_variable_t&);
|
||||
};
|
||||
// Disable copy construction and assignment.
|
||||
condition_variable_t (const condition_variable_t &);
|
||||
void operator= (const condition_variable_t &);
|
||||
};
|
||||
#else
|
||||
class condition_variable_t
|
||||
{
|
||||
public:
|
||||
inline condition_variable_t()
|
||||
{
|
||||
class condition_variable_t
|
||||
{
|
||||
public:
|
||||
inline condition_variable_t () {}
|
||||
|
||||
}
|
||||
inline ~condition_variable_t () {}
|
||||
|
||||
inline ~condition_variable_t()
|
||||
{
|
||||
inline int wait (mutex_t *mutex_, int timeout_)
|
||||
{
|
||||
std::unique_lock<std::mutex> lck (mtx); // lock mtx
|
||||
mutex_->unlock (); // unlock mutex_
|
||||
int res = 0;
|
||||
if (timeout_ == -1) {
|
||||
cv.wait (
|
||||
lck); // unlock mtx and wait cv.notify_all(), lock mtx after cv.notify_all()
|
||||
} else if (cv.wait_for (lck, std::chrono::milliseconds (timeout_))
|
||||
== std::cv_status::timeout) {
|
||||
// time expired
|
||||
errno = EAGAIN;
|
||||
res = -1;
|
||||
}
|
||||
lck.unlock (); // unlock mtx
|
||||
mutex_->lock (); // lock mutex_
|
||||
return res;
|
||||
}
|
||||
|
||||
}
|
||||
inline void broadcast ()
|
||||
{
|
||||
std::unique_lock<std::mutex> lck (mtx); // lock mtx
|
||||
cv.notify_all ();
|
||||
}
|
||||
|
||||
inline int wait(mutex_t* mutex_, int timeout_)
|
||||
{
|
||||
std::unique_lock<std::mutex> lck(mtx); // lock mtx
|
||||
mutex_->unlock(); // unlock mutex_
|
||||
int res = 0;
|
||||
if(timeout_ == -1) {
|
||||
cv.wait(lck); // unlock mtx and wait cv.notify_all(), lock mtx after cv.notify_all()
|
||||
} else if (cv.wait_for(lck, std::chrono::milliseconds(timeout_)) == std::cv_status::timeout) {
|
||||
// time expired
|
||||
errno = EAGAIN;
|
||||
res = -1;
|
||||
}
|
||||
lck.unlock(); // unlock mtx
|
||||
mutex_->lock(); // lock mutex_
|
||||
return res;
|
||||
}
|
||||
private:
|
||||
std::condition_variable cv;
|
||||
std::mutex mtx;
|
||||
|
||||
inline void broadcast()
|
||||
{
|
||||
std::unique_lock<std::mutex> lck(mtx); // lock mtx
|
||||
cv.notify_all();
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
std::condition_variable cv;
|
||||
std::mutex mtx;
|
||||
|
||||
// Disable copy construction and assignment.
|
||||
condition_variable_t(const condition_variable_t&);
|
||||
void operator = (const condition_variable_t&);
|
||||
};
|
||||
// Disable copy construction and assignment.
|
||||
condition_variable_t (const condition_variable_t &);
|
||||
void operator= (const condition_variable_t &);
|
||||
};
|
||||
|
||||
#endif
|
||||
}
|
||||
@ -203,74 +176,72 @@ namespace zmq
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
|
||||
class condition_variable_t
|
||||
class condition_variable_t
|
||||
{
|
||||
public:
|
||||
inline condition_variable_t ()
|
||||
{
|
||||
public:
|
||||
inline condition_variable_t ()
|
||||
{
|
||||
int rc = pthread_cond_init (&cond, NULL);
|
||||
posix_assert (rc);
|
||||
}
|
||||
int rc = pthread_cond_init (&cond, NULL);
|
||||
posix_assert (rc);
|
||||
}
|
||||
|
||||
inline ~condition_variable_t ()
|
||||
{
|
||||
int rc = pthread_cond_destroy (&cond);
|
||||
posix_assert (rc);
|
||||
}
|
||||
inline ~condition_variable_t ()
|
||||
{
|
||||
int rc = pthread_cond_destroy (&cond);
|
||||
posix_assert (rc);
|
||||
}
|
||||
|
||||
inline int wait (mutex_t* mutex_, int timeout_)
|
||||
{
|
||||
int rc;
|
||||
inline int wait (mutex_t *mutex_, int timeout_)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (timeout_ != -1) {
|
||||
struct timespec timeout;
|
||||
if (timeout_ != -1) {
|
||||
struct timespec timeout;
|
||||
|
||||
#if defined ZMQ_HAVE_OSX && __MAC_OS_X_VERSION_MIN_REQUIRED < 101200 // less than macOS 10.12
|
||||
alt_clock_gettime(SYSTEM_CLOCK, &timeout);
|
||||
#if defined ZMQ_HAVE_OSX \
|
||||
&& __MAC_OS_X_VERSION_MIN_REQUIRED < 101200 // less than macOS 10.12
|
||||
alt_clock_gettime (SYSTEM_CLOCK, &timeout);
|
||||
#else
|
||||
clock_gettime(CLOCK_MONOTONIC, &timeout);
|
||||
clock_gettime (CLOCK_MONOTONIC, &timeout);
|
||||
#endif
|
||||
|
||||
timeout.tv_sec += timeout_ / 1000;
|
||||
timeout.tv_nsec += (timeout_ % 1000) * 1000000;
|
||||
timeout.tv_sec += timeout_ / 1000;
|
||||
timeout.tv_nsec += (timeout_ % 1000) * 1000000;
|
||||
|
||||
if (timeout.tv_nsec > 1000000000) {
|
||||
timeout.tv_sec++;
|
||||
timeout.tv_nsec -= 1000000000;
|
||||
}
|
||||
|
||||
rc = pthread_cond_timedwait (&cond, mutex_->get_mutex (), &timeout);
|
||||
}
|
||||
else
|
||||
rc = pthread_cond_wait(&cond, mutex_->get_mutex());
|
||||
|
||||
if (rc == 0)
|
||||
return 0;
|
||||
|
||||
if (rc == ETIMEDOUT){
|
||||
errno= EAGAIN;
|
||||
return -1;
|
||||
if (timeout.tv_nsec > 1000000000) {
|
||||
timeout.tv_sec++;
|
||||
timeout.tv_nsec -= 1000000000;
|
||||
}
|
||||
|
||||
posix_assert (rc);
|
||||
rc = pthread_cond_timedwait (&cond, mutex_->get_mutex (), &timeout);
|
||||
} else
|
||||
rc = pthread_cond_wait (&cond, mutex_->get_mutex ());
|
||||
|
||||
if (rc == 0)
|
||||
return 0;
|
||||
|
||||
if (rc == ETIMEDOUT) {
|
||||
errno = EAGAIN;
|
||||
return -1;
|
||||
}
|
||||
|
||||
inline void broadcast ()
|
||||
{
|
||||
int rc = pthread_cond_broadcast (&cond);
|
||||
posix_assert (rc);
|
||||
}
|
||||
posix_assert (rc);
|
||||
return -1;
|
||||
}
|
||||
|
||||
private:
|
||||
inline void broadcast ()
|
||||
{
|
||||
int rc = pthread_cond_broadcast (&cond);
|
||||
posix_assert (rc);
|
||||
}
|
||||
|
||||
pthread_cond_t cond;
|
||||
private:
|
||||
pthread_cond_t cond;
|
||||
|
||||
// Disable copy construction and assignment.
|
||||
condition_variable_t (const condition_variable_t&);
|
||||
const condition_variable_t &operator = (const condition_variable_t&);
|
||||
};
|
||||
// Disable copy construction and assignment.
|
||||
condition_variable_t (const condition_variable_t &);
|
||||
const condition_variable_t &operator= (const condition_variable_t &);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
100
src/config.hpp
100
src/config.hpp
@ -32,67 +32,65 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
// Compile-time settings.
|
||||
|
||||
// Compile-time settings.
|
||||
enum
|
||||
{
|
||||
// Number of new messages in message pipe needed to trigger new memory
|
||||
// allocation. Setting this parameter to 256 decreases the impact of
|
||||
// memory allocation by approximately 99.6%
|
||||
message_pipe_granularity = 256,
|
||||
|
||||
enum
|
||||
{
|
||||
// Number of new messages in message pipe needed to trigger new memory
|
||||
// allocation. Setting this parameter to 256 decreases the impact of
|
||||
// memory allocation by approximately 99.6%
|
||||
message_pipe_granularity = 256,
|
||||
// Commands in pipe per allocation event.
|
||||
command_pipe_granularity = 16,
|
||||
|
||||
// Commands in pipe per allocation event.
|
||||
command_pipe_granularity = 16,
|
||||
// Determines how often does socket poll for new commands when it
|
||||
// still has unprocessed messages to handle. Thus, if it is set to 100,
|
||||
// socket will process 100 inbound messages before doing the poll.
|
||||
// If there are no unprocessed messages available, poll is done
|
||||
// immediately. Decreasing the value trades overall latency for more
|
||||
// real-time behaviour (less latency peaks).
|
||||
inbound_poll_rate = 100,
|
||||
|
||||
// Determines how often does socket poll for new commands when it
|
||||
// still has unprocessed messages to handle. Thus, if it is set to 100,
|
||||
// socket will process 100 inbound messages before doing the poll.
|
||||
// If there are no unprocessed messages available, poll is done
|
||||
// immediately. Decreasing the value trades overall latency for more
|
||||
// real-time behaviour (less latency peaks).
|
||||
inbound_poll_rate = 100,
|
||||
// Maximal batching size for engines with receiving functionality.
|
||||
// So, if there are 10 messages that fit into the batch size, all of
|
||||
// them may be read by a single 'recv' system call, thus avoiding
|
||||
// unnecessary network stack traversals.
|
||||
in_batch_size = 8192,
|
||||
|
||||
// Maximal batching size for engines with receiving functionality.
|
||||
// So, if there are 10 messages that fit into the batch size, all of
|
||||
// them may be read by a single 'recv' system call, thus avoiding
|
||||
// unnecessary network stack traversals.
|
||||
in_batch_size = 8192,
|
||||
// Maximal batching size for engines with sending functionality.
|
||||
// So, if there are 10 messages that fit into the batch size, all of
|
||||
// them may be written by a single 'send' system call, thus avoiding
|
||||
// unnecessary network stack traversals.
|
||||
out_batch_size = 8192,
|
||||
|
||||
// Maximal batching size for engines with sending functionality.
|
||||
// So, if there are 10 messages that fit into the batch size, all of
|
||||
// them may be written by a single 'send' system call, thus avoiding
|
||||
// unnecessary network stack traversals.
|
||||
out_batch_size = 8192,
|
||||
// Maximal delta between high and low watermark.
|
||||
max_wm_delta = 1024,
|
||||
|
||||
// Maximal delta between high and low watermark.
|
||||
max_wm_delta = 1024,
|
||||
// Maximum number of events the I/O thread can process in one go.
|
||||
max_io_events = 256,
|
||||
|
||||
// Maximum number of events the I/O thread can process in one go.
|
||||
max_io_events = 256,
|
||||
// Maximal delay to process command in API thread (in CPU ticks).
|
||||
// 3,000,000 ticks equals to 1 - 2 milliseconds on current CPUs.
|
||||
// Note that delay is only applied when there is continuous stream of
|
||||
// messages to process. If not so, commands are processed immediately.
|
||||
max_command_delay = 3000000,
|
||||
|
||||
// Maximal delay to process command in API thread (in CPU ticks).
|
||||
// 3,000,000 ticks equals to 1 - 2 milliseconds on current CPUs.
|
||||
// Note that delay is only applied when there is continuous stream of
|
||||
// messages to process. If not so, commands are processed immediately.
|
||||
max_command_delay = 3000000,
|
||||
|
||||
// Low-precision clock precision in CPU ticks. 1ms. Value of 1000000
|
||||
// should be OK for CPU frequencies above 1GHz. If should work
|
||||
// reasonably well for CPU frequencies above 500MHz. For lower CPU
|
||||
// frequencies you may consider lowering this value to get best
|
||||
// possible latencies.
|
||||
clock_precision = 1000000,
|
||||
|
||||
// On some OSes the signaler has to be emulated using a TCP
|
||||
// connection. In such cases following port is used.
|
||||
// If 0, it lets the OS choose a free port without requiring use of a
|
||||
// global mutex. The original implementation of a Windows signaler
|
||||
// socket used port 5905 instead of letting the OS choose a free port.
|
||||
// https://github.com/zeromq/libzmq/issues/1542
|
||||
signaler_port = 0
|
||||
};
|
||||
// Low-precision clock precision in CPU ticks. 1ms. Value of 1000000
|
||||
// should be OK for CPU frequencies above 1GHz. If should work
|
||||
// reasonably well for CPU frequencies above 500MHz. For lower CPU
|
||||
// frequencies you may consider lowering this value to get best
|
||||
// possible latencies.
|
||||
clock_precision = 1000000,
|
||||
|
||||
// On some OSes the signaler has to be emulated using a TCP
|
||||
// connection. In such cases following port is used.
|
||||
// If 0, it lets the OS choose a free port without requiring use of a
|
||||
// global mutex. The original implementation of a Windows signaler
|
||||
// socket used port 5905 instead of letting the OS choose a free port.
|
||||
// https://github.com/zeromq/libzmq/issues/1542
|
||||
signaler_port = 0
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
277
src/ctx.cpp
277
src/ctx.cpp
@ -53,11 +53,12 @@
|
||||
#endif
|
||||
|
||||
#define ZMQ_CTX_TAG_VALUE_GOOD 0xabadcafe
|
||||
#define ZMQ_CTX_TAG_VALUE_BAD 0xdeadbeef
|
||||
#define ZMQ_CTX_TAG_VALUE_BAD 0xdeadbeef
|
||||
|
||||
int clipped_maxsocket (int max_requested)
|
||||
{
|
||||
if (max_requested >= zmq::poller_t::max_fds () && zmq::poller_t::max_fds () != -1)
|
||||
if (max_requested >= zmq::poller_t::max_fds ()
|
||||
&& zmq::poller_t::max_fds () != -1)
|
||||
// -1 because we need room for the reaper mailbox.
|
||||
max_requested = zmq::poller_t::max_fds () - 1;
|
||||
|
||||
@ -80,7 +81,7 @@ zmq::ctx_t::ctx_t () :
|
||||
thread_sched_policy (ZMQ_THREAD_SCHED_POLICY_DFLT)
|
||||
{
|
||||
#ifdef HAVE_FORK
|
||||
pid = getpid();
|
||||
pid = getpid ();
|
||||
#endif
|
||||
#ifdef ZMQ_HAVE_VMCI
|
||||
vmci_fd = -1;
|
||||
@ -104,16 +105,16 @@ zmq::ctx_t::~ctx_t ()
|
||||
// Ask I/O threads to terminate. If stop signal wasn't sent to I/O
|
||||
// thread subsequent invocation of destructor would hang-up.
|
||||
for (io_threads_t::size_type i = 0; i != io_threads.size (); i++) {
|
||||
io_threads [i]->stop ();
|
||||
io_threads[i]->stop ();
|
||||
}
|
||||
|
||||
// Wait till I/O threads actually terminate.
|
||||
for (io_threads_t::size_type i = 0; i != io_threads.size (); i++) {
|
||||
LIBZMQ_DELETE(io_threads [i]);
|
||||
LIBZMQ_DELETE (io_threads[i]);
|
||||
}
|
||||
|
||||
// Deallocate the reaper thread object.
|
||||
LIBZMQ_DELETE(reaper);
|
||||
LIBZMQ_DELETE (reaper);
|
||||
|
||||
// Deallocate the array of mailboxes. No special work is
|
||||
// needed as mailboxes themselves were deallocated with their
|
||||
@ -134,14 +135,15 @@ bool zmq::ctx_t::valid () const
|
||||
|
||||
int zmq::ctx_t::terminate ()
|
||||
{
|
||||
slot_sync.lock();
|
||||
slot_sync.lock ();
|
||||
|
||||
bool saveTerminating = terminating;
|
||||
terminating = false;
|
||||
|
||||
// Connect up any pending inproc connections, otherwise we will hang
|
||||
pending_connections_t copy = pending_connections;
|
||||
for (pending_connections_t::iterator p = copy.begin (); p != copy.end (); ++p) {
|
||||
for (pending_connections_t::iterator p = copy.begin (); p != copy.end ();
|
||||
++p) {
|
||||
zmq::socket_base_t *s = create_socket (ZMQ_PAIR);
|
||||
// create_socket might fail eg: out of memory/sockets limit reached
|
||||
zmq_assert (s);
|
||||
@ -156,7 +158,7 @@ int zmq::ctx_t::terminate ()
|
||||
// we are a forked child process. Close all file descriptors
|
||||
// inherited from the parent.
|
||||
for (sockets_t::size_type i = 0; i != sockets.size (); i++)
|
||||
sockets [i]->get_mailbox ()->forked ();
|
||||
sockets[i]->get_mailbox ()->forked ();
|
||||
|
||||
term_mailbox.forked ();
|
||||
}
|
||||
@ -173,11 +175,11 @@ int zmq::ctx_t::terminate ()
|
||||
// can be interrupted. If there are no sockets we can ask reaper
|
||||
// thread to stop.
|
||||
for (sockets_t::size_type i = 0; i != sockets.size (); i++)
|
||||
sockets [i]->stop ();
|
||||
sockets[i]->stop ();
|
||||
if (sockets.empty ())
|
||||
reaper->stop ();
|
||||
}
|
||||
slot_sync.unlock();
|
||||
slot_sync.unlock ();
|
||||
|
||||
// Wait till reaper thread closes all the sockets.
|
||||
command_t cmd;
|
||||
@ -209,7 +211,7 @@ int zmq::ctx_t::terminate ()
|
||||
|
||||
int zmq::ctx_t::shutdown ()
|
||||
{
|
||||
scoped_lock_t locker(slot_sync);
|
||||
scoped_lock_t locker (slot_sync);
|
||||
|
||||
if (!starting && !terminating) {
|
||||
terminating = true;
|
||||
@ -218,7 +220,7 @@ int zmq::ctx_t::shutdown ()
|
||||
// can be interrupted. If there are no sockets we can ask reaper
|
||||
// thread to stop.
|
||||
for (sockets_t::size_type i = 0; i != sockets.size (); i++)
|
||||
sockets [i]->stop ();
|
||||
sockets[i]->stop ();
|
||||
if (sockets.empty ())
|
||||
reaper->stop ();
|
||||
}
|
||||
@ -229,65 +231,46 @@ int zmq::ctx_t::shutdown ()
|
||||
int zmq::ctx_t::set (int option_, int optval_)
|
||||
{
|
||||
int rc = 0;
|
||||
if (option_ == ZMQ_MAX_SOCKETS
|
||||
&& optval_ >= 1 && optval_ == clipped_maxsocket (optval_)) {
|
||||
scoped_lock_t locker(opt_sync);
|
||||
if (option_ == ZMQ_MAX_SOCKETS && optval_ >= 1
|
||||
&& optval_ == clipped_maxsocket (optval_)) {
|
||||
scoped_lock_t locker (opt_sync);
|
||||
max_sockets = optval_;
|
||||
}
|
||||
else
|
||||
if (option_ == ZMQ_IO_THREADS && optval_ >= 0) {
|
||||
scoped_lock_t locker(opt_sync);
|
||||
} else if (option_ == ZMQ_IO_THREADS && optval_ >= 0) {
|
||||
scoped_lock_t locker (opt_sync);
|
||||
io_thread_count = optval_;
|
||||
}
|
||||
else
|
||||
if (option_ == ZMQ_IPV6 && optval_ >= 0) {
|
||||
scoped_lock_t locker(opt_sync);
|
||||
} else if (option_ == ZMQ_IPV6 && optval_ >= 0) {
|
||||
scoped_lock_t locker (opt_sync);
|
||||
ipv6 = (optval_ != 0);
|
||||
}
|
||||
else
|
||||
if (option_ == ZMQ_THREAD_PRIORITY && optval_ >= 0) {
|
||||
scoped_lock_t locker(opt_sync);
|
||||
} else if (option_ == ZMQ_THREAD_PRIORITY && optval_ >= 0) {
|
||||
scoped_lock_t locker (opt_sync);
|
||||
thread_priority = optval_;
|
||||
}
|
||||
else
|
||||
if (option_ == ZMQ_THREAD_SCHED_POLICY && optval_ >= 0) {
|
||||
scoped_lock_t locker(opt_sync);
|
||||
} else if (option_ == ZMQ_THREAD_SCHED_POLICY && optval_ >= 0) {
|
||||
scoped_lock_t locker (opt_sync);
|
||||
thread_sched_policy = optval_;
|
||||
}
|
||||
else
|
||||
if (option_ == ZMQ_THREAD_AFFINITY_CPU_ADD && optval_ >= 0) {
|
||||
scoped_lock_t locker(opt_sync);
|
||||
thread_affinity_cpus.insert( optval_ );
|
||||
}
|
||||
else
|
||||
if (option_ == ZMQ_THREAD_AFFINITY_CPU_REMOVE && optval_ >= 0) {
|
||||
scoped_lock_t locker(opt_sync);
|
||||
std::set<int>::iterator it = thread_affinity_cpus.find( optval_ );
|
||||
if (it != thread_affinity_cpus.end()) {
|
||||
thread_affinity_cpus.erase( it );
|
||||
} else if (option_ == ZMQ_THREAD_AFFINITY_CPU_ADD && optval_ >= 0) {
|
||||
scoped_lock_t locker (opt_sync);
|
||||
thread_affinity_cpus.insert (optval_);
|
||||
} else if (option_ == ZMQ_THREAD_AFFINITY_CPU_REMOVE && optval_ >= 0) {
|
||||
scoped_lock_t locker (opt_sync);
|
||||
std::set<int>::iterator it = thread_affinity_cpus.find (optval_);
|
||||
if (it != thread_affinity_cpus.end ()) {
|
||||
thread_affinity_cpus.erase (it);
|
||||
} else {
|
||||
errno = EINVAL;
|
||||
rc = -1;
|
||||
}
|
||||
}
|
||||
else
|
||||
if (option_ == ZMQ_THREAD_NAME_PREFIX && optval_ >= 0) {
|
||||
} else if (option_ == ZMQ_THREAD_NAME_PREFIX && optval_ >= 0) {
|
||||
std::ostringstream s;
|
||||
s << optval_;
|
||||
scoped_lock_t locker(opt_sync);
|
||||
thread_name_prefix = s.str();
|
||||
}
|
||||
else
|
||||
if (option_ == ZMQ_BLOCKY && optval_ >= 0) {
|
||||
scoped_lock_t locker(opt_sync);
|
||||
scoped_lock_t locker (opt_sync);
|
||||
thread_name_prefix = s.str ();
|
||||
} else if (option_ == ZMQ_BLOCKY && optval_ >= 0) {
|
||||
scoped_lock_t locker (opt_sync);
|
||||
blocky = (optval_ != 0);
|
||||
}
|
||||
else
|
||||
if (option_ == ZMQ_MAX_MSGSZ && optval_ >= 0) {
|
||||
scoped_lock_t locker(opt_sync);
|
||||
max_msgsz = optval_ < INT_MAX? optval_: INT_MAX;
|
||||
}
|
||||
else {
|
||||
} else if (option_ == ZMQ_MAX_MSGSZ && optval_ >= 0) {
|
||||
scoped_lock_t locker (opt_sync);
|
||||
max_msgsz = optval_ < INT_MAX ? optval_ : INT_MAX;
|
||||
} else {
|
||||
errno = EINVAL;
|
||||
rc = -1;
|
||||
}
|
||||
@ -299,23 +282,17 @@ int zmq::ctx_t::get (int option_)
|
||||
int rc = 0;
|
||||
if (option_ == ZMQ_MAX_SOCKETS)
|
||||
rc = max_sockets;
|
||||
else
|
||||
if (option_ == ZMQ_SOCKET_LIMIT)
|
||||
else if (option_ == ZMQ_SOCKET_LIMIT)
|
||||
rc = clipped_maxsocket (65535);
|
||||
else
|
||||
if (option_ == ZMQ_IO_THREADS)
|
||||
else if (option_ == ZMQ_IO_THREADS)
|
||||
rc = io_thread_count;
|
||||
else
|
||||
if (option_ == ZMQ_IPV6)
|
||||
else if (option_ == ZMQ_IPV6)
|
||||
rc = ipv6;
|
||||
else
|
||||
if (option_ == ZMQ_BLOCKY)
|
||||
else if (option_ == ZMQ_BLOCKY)
|
||||
rc = blocky;
|
||||
else
|
||||
if (option_ == ZMQ_MAX_MSGSZ)
|
||||
else if (option_ == ZMQ_MAX_MSGSZ)
|
||||
rc = max_msgsz;
|
||||
else
|
||||
if (option_ == ZMQ_MSG_T_SIZE)
|
||||
else if (option_ == ZMQ_MSG_T_SIZE)
|
||||
rc = sizeof (zmq_msg_t);
|
||||
else {
|
||||
errno = EINVAL;
|
||||
@ -369,7 +346,7 @@ bool zmq::ctx_t::start ()
|
||||
goto fail_cleanup_reaper;
|
||||
}
|
||||
io_threads.push_back (io_thread);
|
||||
slots [i] = io_thread->get_mailbox ();
|
||||
slots[i] = io_thread->get_mailbox ();
|
||||
io_thread->start ();
|
||||
}
|
||||
|
||||
@ -429,19 +406,19 @@ zmq::socket_base_t *zmq::ctx_t::create_socket (int type_)
|
||||
return NULL;
|
||||
}
|
||||
sockets.push_back (s);
|
||||
slots [slot] = s->get_mailbox ();
|
||||
slots[slot] = s->get_mailbox ();
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
void zmq::ctx_t::destroy_socket (class socket_base_t *socket_)
|
||||
{
|
||||
scoped_lock_t locker(slot_sync);
|
||||
scoped_lock_t locker (slot_sync);
|
||||
|
||||
// Free the associated thread slot.
|
||||
uint32_t tid = socket_->get_tid ();
|
||||
empty_slots.push_back (tid);
|
||||
slots [tid] = NULL;
|
||||
slots[tid] = NULL;
|
||||
|
||||
// Remove the socket from the list of sockets.
|
||||
sockets.erase (socket_);
|
||||
@ -457,25 +434,28 @@ zmq::object_t *zmq::ctx_t::get_reaper ()
|
||||
return reaper;
|
||||
}
|
||||
|
||||
void zmq::ctx_t::start_thread (thread_t &thread_, thread_fn *tfn_, void *arg_) const
|
||||
void zmq::ctx_t::start_thread (thread_t &thread_,
|
||||
thread_fn *tfn_,
|
||||
void *arg_) const
|
||||
{
|
||||
static unsigned int nthreads_started = 0;
|
||||
|
||||
thread_.setSchedulingParameters(thread_priority, thread_sched_policy, thread_affinity_cpus);
|
||||
thread_.start(tfn_, arg_);
|
||||
thread_.setSchedulingParameters (thread_priority, thread_sched_policy,
|
||||
thread_affinity_cpus);
|
||||
thread_.start (tfn_, arg_);
|
||||
#ifndef ZMQ_HAVE_ANDROID
|
||||
std::ostringstream s;
|
||||
if (!thread_name_prefix.empty())
|
||||
if (!thread_name_prefix.empty ())
|
||||
s << thread_name_prefix << "/";
|
||||
s << "ZMQbg/" << nthreads_started;
|
||||
thread_.setThreadName (s.str().c_str());
|
||||
thread_.setThreadName (s.str ().c_str ());
|
||||
#endif
|
||||
nthreads_started++;
|
||||
}
|
||||
|
||||
void zmq::ctx_t::send_command (uint32_t tid_, const command_t &command_)
|
||||
{
|
||||
slots [tid_]->send (command_);
|
||||
slots[tid_]->send (command_);
|
||||
}
|
||||
|
||||
zmq::io_thread_t *zmq::ctx_t::choose_io_thread (uint64_t affinity_)
|
||||
@ -488,10 +468,10 @@ zmq::io_thread_t *zmq::ctx_t::choose_io_thread (uint64_t affinity_)
|
||||
io_thread_t *selected_io_thread = NULL;
|
||||
for (io_threads_t::size_type i = 0; i != io_threads.size (); i++) {
|
||||
if (!affinity_ || (affinity_ & (uint64_t (1) << i))) {
|
||||
int load = io_threads [i]->get_load ();
|
||||
int load = io_threads[i]->get_load ();
|
||||
if (selected_io_thread == NULL || load < min_load) {
|
||||
min_load = load;
|
||||
selected_io_thread = io_threads [i];
|
||||
selected_io_thread = io_threads[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -499,12 +479,12 @@ zmq::io_thread_t *zmq::ctx_t::choose_io_thread (uint64_t affinity_)
|
||||
}
|
||||
|
||||
int zmq::ctx_t::register_endpoint (const char *addr_,
|
||||
const endpoint_t &endpoint_)
|
||||
const endpoint_t &endpoint_)
|
||||
{
|
||||
scoped_lock_t locker(endpoints_sync);
|
||||
scoped_lock_t locker (endpoints_sync);
|
||||
|
||||
const bool inserted = endpoints.ZMQ_MAP_INSERT_OR_EMPLACE (addr_,
|
||||
endpoint_).second;
|
||||
const bool inserted =
|
||||
endpoints.ZMQ_MAP_INSERT_OR_EMPLACE (addr_, endpoint_).second;
|
||||
if (!inserted) {
|
||||
errno = EADDRINUSE;
|
||||
return -1;
|
||||
@ -512,10 +492,10 @@ int zmq::ctx_t::register_endpoint (const char *addr_,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int zmq::ctx_t::unregister_endpoint (
|
||||
const std::string &addr_, socket_base_t *socket_)
|
||||
int zmq::ctx_t::unregister_endpoint (const std::string &addr_,
|
||||
socket_base_t *socket_)
|
||||
{
|
||||
scoped_lock_t locker(endpoints_sync);
|
||||
scoped_lock_t locker (endpoints_sync);
|
||||
|
||||
const endpoints_t::iterator it = endpoints.find (addr_);
|
||||
if (it == endpoints.end () || it->second.socket != socket_) {
|
||||
@ -531,7 +511,7 @@ int zmq::ctx_t::unregister_endpoint (
|
||||
|
||||
void zmq::ctx_t::unregister_endpoints (socket_base_t *socket_)
|
||||
{
|
||||
scoped_lock_t locker(endpoints_sync);
|
||||
scoped_lock_t locker (endpoints_sync);
|
||||
|
||||
endpoints_t::iterator it = endpoints.begin ();
|
||||
while (it != endpoints.end ()) {
|
||||
@ -547,58 +527,69 @@ void zmq::ctx_t::unregister_endpoints (socket_base_t *socket_)
|
||||
|
||||
zmq::endpoint_t zmq::ctx_t::find_endpoint (const char *addr_)
|
||||
{
|
||||
scoped_lock_t locker(endpoints_sync);
|
||||
scoped_lock_t locker (endpoints_sync);
|
||||
|
||||
endpoints_t::iterator it = endpoints.find (addr_);
|
||||
if (it == endpoints.end ()) {
|
||||
errno = ECONNREFUSED;
|
||||
endpoint_t empty = {NULL, options_t()};
|
||||
endpoint_t empty = {NULL, options_t ()};
|
||||
return empty;
|
||||
}
|
||||
endpoint_t endpoint = it->second;
|
||||
}
|
||||
endpoint_t endpoint = it->second;
|
||||
|
||||
// Increment the command sequence number of the peer so that it won't
|
||||
// get deallocated until "bind" command is issued by the caller.
|
||||
// The subsequent 'bind' has to be called with inc_seqnum parameter
|
||||
// set to false, so that the seqnum isn't incremented twice.
|
||||
endpoint.socket->inc_seqnum ();
|
||||
// Increment the command sequence number of the peer so that it won't
|
||||
// get deallocated until "bind" command is issued by the caller.
|
||||
// The subsequent 'bind' has to be called with inc_seqnum parameter
|
||||
// set to false, so that the seqnum isn't incremented twice.
|
||||
endpoint.socket->inc_seqnum ();
|
||||
|
||||
return endpoint;
|
||||
return endpoint;
|
||||
}
|
||||
|
||||
void zmq::ctx_t::pend_connection (const std::string &addr_,
|
||||
const endpoint_t &endpoint_, pipe_t **pipes_)
|
||||
const endpoint_t &endpoint_,
|
||||
pipe_t **pipes_)
|
||||
{
|
||||
scoped_lock_t locker(endpoints_sync);
|
||||
scoped_lock_t locker (endpoints_sync);
|
||||
|
||||
const pending_connection_t pending_connection = {endpoint_, pipes_ [0], pipes_ [1]};
|
||||
const pending_connection_t pending_connection = {endpoint_, pipes_[0],
|
||||
pipes_[1]};
|
||||
|
||||
endpoints_t::iterator it = endpoints.find (addr_);
|
||||
if (it == endpoints.end ()) {
|
||||
// Still no bind.
|
||||
endpoint_.socket->inc_seqnum ();
|
||||
pending_connections.ZMQ_MAP_INSERT_OR_EMPLACE (addr_, pending_connection);
|
||||
pending_connections.ZMQ_MAP_INSERT_OR_EMPLACE (addr_,
|
||||
pending_connection);
|
||||
} else {
|
||||
// Bind has happened in the mean time, connect directly
|
||||
connect_inproc_sockets(it->second.socket, it->second.options, pending_connection, connect_side);
|
||||
connect_inproc_sockets (it->second.socket, it->second.options,
|
||||
pending_connection, connect_side);
|
||||
}
|
||||
}
|
||||
|
||||
void zmq::ctx_t::connect_pending (const char *addr_, zmq::socket_base_t *bind_socket_)
|
||||
void zmq::ctx_t::connect_pending (const char *addr_,
|
||||
zmq::socket_base_t *bind_socket_)
|
||||
{
|
||||
scoped_lock_t locker(endpoints_sync);
|
||||
scoped_lock_t locker (endpoints_sync);
|
||||
|
||||
std::pair<pending_connections_t::iterator, pending_connections_t::iterator> pending = pending_connections.equal_range(addr_);
|
||||
for (pending_connections_t::iterator p = pending.first; p != pending.second; ++p)
|
||||
connect_inproc_sockets(bind_socket_, endpoints[addr_].options, p->second, bind_side);
|
||||
std::pair<pending_connections_t::iterator, pending_connections_t::iterator>
|
||||
pending = pending_connections.equal_range (addr_);
|
||||
for (pending_connections_t::iterator p = pending.first; p != pending.second;
|
||||
++p)
|
||||
connect_inproc_sockets (bind_socket_, endpoints[addr_].options,
|
||||
p->second, bind_side);
|
||||
|
||||
pending_connections.erase(pending.first, pending.second);
|
||||
pending_connections.erase (pending.first, pending.second);
|
||||
}
|
||||
|
||||
void zmq::ctx_t::connect_inproc_sockets (zmq::socket_base_t *bind_socket_,
|
||||
options_t& bind_options, const pending_connection_t &pending_connection_, side side_)
|
||||
void zmq::ctx_t::connect_inproc_sockets (
|
||||
zmq::socket_base_t *bind_socket_,
|
||||
options_t &bind_options,
|
||||
const pending_connection_t &pending_connection_,
|
||||
side side_)
|
||||
{
|
||||
bind_socket_->inc_seqnum();
|
||||
bind_socket_->inc_seqnum ();
|
||||
pending_connection_.bind_pipe->set_tid (bind_socket_->get_tid ());
|
||||
|
||||
if (!bind_options.recv_routing_id) {
|
||||
@ -609,23 +600,29 @@ void zmq::ctx_t::connect_inproc_sockets (zmq::socket_base_t *bind_socket_,
|
||||
errno_assert (rc == 0);
|
||||
}
|
||||
|
||||
bool conflate = pending_connection_.endpoint.options.conflate &&
|
||||
(pending_connection_.endpoint.options.type == ZMQ_DEALER ||
|
||||
pending_connection_.endpoint.options.type == ZMQ_PULL ||
|
||||
pending_connection_.endpoint.options.type == ZMQ_PUSH ||
|
||||
pending_connection_.endpoint.options.type == ZMQ_PUB ||
|
||||
pending_connection_.endpoint.options.type == ZMQ_SUB);
|
||||
bool conflate =
|
||||
pending_connection_.endpoint.options.conflate
|
||||
&& (pending_connection_.endpoint.options.type == ZMQ_DEALER
|
||||
|| pending_connection_.endpoint.options.type == ZMQ_PULL
|
||||
|| pending_connection_.endpoint.options.type == ZMQ_PUSH
|
||||
|| pending_connection_.endpoint.options.type == ZMQ_PUB
|
||||
|| pending_connection_.endpoint.options.type == ZMQ_SUB);
|
||||
|
||||
if (!conflate) {
|
||||
pending_connection_.connect_pipe->set_hwms_boost(bind_options.sndhwm, bind_options.rcvhwm);
|
||||
pending_connection_.bind_pipe->set_hwms_boost(pending_connection_.endpoint.options.sndhwm, pending_connection_.endpoint.options.rcvhwm);
|
||||
pending_connection_.connect_pipe->set_hwms_boost (bind_options.sndhwm,
|
||||
bind_options.rcvhwm);
|
||||
pending_connection_.bind_pipe->set_hwms_boost (
|
||||
pending_connection_.endpoint.options.sndhwm,
|
||||
pending_connection_.endpoint.options.rcvhwm);
|
||||
|
||||
pending_connection_.connect_pipe->set_hwms(pending_connection_.endpoint.options.rcvhwm, pending_connection_.endpoint.options.sndhwm);
|
||||
pending_connection_.bind_pipe->set_hwms(bind_options.rcvhwm, bind_options.sndhwm);
|
||||
}
|
||||
else {
|
||||
pending_connection_.connect_pipe->set_hwms(-1, -1);
|
||||
pending_connection_.bind_pipe->set_hwms(-1, -1);
|
||||
pending_connection_.connect_pipe->set_hwms (
|
||||
pending_connection_.endpoint.options.rcvhwm,
|
||||
pending_connection_.endpoint.options.sndhwm);
|
||||
pending_connection_.bind_pipe->set_hwms (bind_options.rcvhwm,
|
||||
bind_options.sndhwm);
|
||||
} else {
|
||||
pending_connection_.connect_pipe->set_hwms (-1, -1);
|
||||
pending_connection_.bind_pipe->set_hwms (-1, -1);
|
||||
}
|
||||
|
||||
if (side_ == bind_side) {
|
||||
@ -633,22 +630,24 @@ void zmq::ctx_t::connect_inproc_sockets (zmq::socket_base_t *bind_socket_,
|
||||
cmd.type = command_t::bind;
|
||||
cmd.args.bind.pipe = pending_connection_.bind_pipe;
|
||||
bind_socket_->process_command (cmd);
|
||||
bind_socket_->send_inproc_connected (pending_connection_.endpoint.socket);
|
||||
}
|
||||
else
|
||||
pending_connection_.connect_pipe->send_bind (bind_socket_, pending_connection_.bind_pipe, false);
|
||||
bind_socket_->send_inproc_connected (
|
||||
pending_connection_.endpoint.socket);
|
||||
} else
|
||||
pending_connection_.connect_pipe->send_bind (
|
||||
bind_socket_, pending_connection_.bind_pipe, false);
|
||||
|
||||
// When a ctx is terminated all pending inproc connection will be
|
||||
// connected, but the socket will already be closed and the pipe will be
|
||||
// in waiting_for_delimiter state, which means no more writes can be done
|
||||
// and the routing id write fails and causes an assert. Check if the socket
|
||||
// is open before sending.
|
||||
if (pending_connection_.endpoint.options.recv_routing_id &&
|
||||
pending_connection_.endpoint.socket->check_tag ()) {
|
||||
if (pending_connection_.endpoint.options.recv_routing_id
|
||||
&& pending_connection_.endpoint.socket->check_tag ()) {
|
||||
msg_t routing_id;
|
||||
const int rc = routing_id.init_size (bind_options.routing_id_size);
|
||||
errno_assert (rc == 0);
|
||||
memcpy (routing_id.data (), bind_options.routing_id, bind_options.routing_id_size);
|
||||
memcpy (routing_id.data (), bind_options.routing_id,
|
||||
bind_options.routing_id_size);
|
||||
routing_id.set_flags (msg_t::routing_id);
|
||||
const bool written = pending_connection_.bind_pipe->write (&routing_id);
|
||||
zmq_assert (written);
|
||||
@ -660,9 +659,9 @@ void zmq::ctx_t::connect_inproc_sockets (zmq::socket_base_t *bind_socket_,
|
||||
|
||||
int zmq::ctx_t::get_vmci_socket_family ()
|
||||
{
|
||||
zmq::scoped_lock_t locker(vmci_sync);
|
||||
zmq::scoped_lock_t locker (vmci_sync);
|
||||
|
||||
if (vmci_fd == -1) {
|
||||
if (vmci_fd == -1) {
|
||||
vmci_family = VMCISock_GetAFValueFd (&vmci_fd);
|
||||
|
||||
if (vmci_fd != -1) {
|
||||
|
298
src/ctx.hpp
298
src/ctx.hpp
@ -46,200 +46,208 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
class object_t;
|
||||
class io_thread_t;
|
||||
class socket_base_t;
|
||||
class reaper_t;
|
||||
class pipe_t;
|
||||
|
||||
class object_t;
|
||||
class io_thread_t;
|
||||
class socket_base_t;
|
||||
class reaper_t;
|
||||
class pipe_t;
|
||||
// Information associated with inproc endpoint. Note that endpoint options
|
||||
// are registered as well so that the peer can access them without a need
|
||||
// for synchronisation, handshaking or similar.
|
||||
struct endpoint_t
|
||||
{
|
||||
socket_base_t *socket;
|
||||
options_t options;
|
||||
};
|
||||
|
||||
// Information associated with inproc endpoint. Note that endpoint options
|
||||
// are registered as well so that the peer can access them without a need
|
||||
// for synchronisation, handshaking or similar.
|
||||
struct endpoint_t
|
||||
{
|
||||
socket_base_t *socket;
|
||||
options_t options;
|
||||
};
|
||||
// Context object encapsulates all the global state associated with
|
||||
// the library.
|
||||
|
||||
// Context object encapsulates all the global state associated with
|
||||
// the library.
|
||||
class ctx_t
|
||||
{
|
||||
public:
|
||||
// Create the context object.
|
||||
ctx_t ();
|
||||
|
||||
class ctx_t
|
||||
{
|
||||
public:
|
||||
// Returns false if object is not a context.
|
||||
bool check_tag ();
|
||||
|
||||
// Create the context object.
|
||||
ctx_t ();
|
||||
// This function is called when user invokes zmq_ctx_term. If there are
|
||||
// no more sockets open it'll cause all the infrastructure to be shut
|
||||
// down. If there are open sockets still, the deallocation happens
|
||||
// after the last one is closed.
|
||||
int terminate ();
|
||||
|
||||
// Returns false if object is not a context.
|
||||
bool check_tag ();
|
||||
// This function starts the terminate process by unblocking any blocking
|
||||
// operations currently in progress and stopping any more socket activity
|
||||
// (except zmq_close).
|
||||
// This function is non-blocking.
|
||||
// terminate must still be called afterwards.
|
||||
// This function is optional, terminate will unblock any current
|
||||
// operations as well.
|
||||
int shutdown ();
|
||||
|
||||
// This function is called when user invokes zmq_ctx_term. If there are
|
||||
// no more sockets open it'll cause all the infrastructure to be shut
|
||||
// down. If there are open sockets still, the deallocation happens
|
||||
// after the last one is closed.
|
||||
int terminate ();
|
||||
// Set and get context properties.
|
||||
int set (int option_, int optval_);
|
||||
int get (int option_);
|
||||
|
||||
// This function starts the terminate process by unblocking any blocking
|
||||
// operations currently in progress and stopping any more socket activity
|
||||
// (except zmq_close).
|
||||
// This function is non-blocking.
|
||||
// terminate must still be called afterwards.
|
||||
// This function is optional, terminate will unblock any current
|
||||
// operations as well.
|
||||
int shutdown();
|
||||
// Create and destroy a socket.
|
||||
zmq::socket_base_t *create_socket (int type_);
|
||||
void destroy_socket (zmq::socket_base_t *socket_);
|
||||
|
||||
// Set and get context properties.
|
||||
int set (int option_, int optval_);
|
||||
int get (int option_);
|
||||
// Start a new thread with proper scheduling parameters.
|
||||
void start_thread (thread_t &thread_, thread_fn *tfn_, void *arg_) const;
|
||||
|
||||
// Create and destroy a socket.
|
||||
zmq::socket_base_t *create_socket (int type_);
|
||||
void destroy_socket (zmq::socket_base_t *socket_);
|
||||
// Send command to the destination thread.
|
||||
void send_command (uint32_t tid_, const command_t &command_);
|
||||
|
||||
// Start a new thread with proper scheduling parameters.
|
||||
void start_thread (thread_t &thread_, thread_fn *tfn_, void *arg_) const;
|
||||
// Returns the I/O thread that is the least busy at the moment.
|
||||
// Affinity specifies which I/O threads are eligible (0 = all).
|
||||
// Returns NULL if no I/O thread is available.
|
||||
zmq::io_thread_t *choose_io_thread (uint64_t affinity_);
|
||||
|
||||
// Send command to the destination thread.
|
||||
void send_command (uint32_t tid_, const command_t &command_);
|
||||
// Returns reaper thread object.
|
||||
zmq::object_t *get_reaper ();
|
||||
|
||||
// Returns the I/O thread that is the least busy at the moment.
|
||||
// Affinity specifies which I/O threads are eligible (0 = all).
|
||||
// Returns NULL if no I/O thread is available.
|
||||
zmq::io_thread_t *choose_io_thread (uint64_t affinity_);
|
||||
|
||||
// Returns reaper thread object.
|
||||
zmq::object_t *get_reaper ();
|
||||
|
||||
// Management of inproc endpoints.
|
||||
int register_endpoint (const char *addr_, const endpoint_t &endpoint_);
|
||||
int unregister_endpoint (const std::string &addr_, socket_base_t *socket_);
|
||||
void unregister_endpoints (zmq::socket_base_t *socket_);
|
||||
endpoint_t find_endpoint (const char *addr_);
|
||||
void pend_connection (const std::string &addr_,
|
||||
const endpoint_t &endpoint_, pipe_t **pipes_);
|
||||
void connect_pending (const char *addr_, zmq::socket_base_t *bind_socket_);
|
||||
// Management of inproc endpoints.
|
||||
int register_endpoint (const char *addr_, const endpoint_t &endpoint_);
|
||||
int unregister_endpoint (const std::string &addr_, socket_base_t *socket_);
|
||||
void unregister_endpoints (zmq::socket_base_t *socket_);
|
||||
endpoint_t find_endpoint (const char *addr_);
|
||||
void pend_connection (const std::string &addr_,
|
||||
const endpoint_t &endpoint_,
|
||||
pipe_t **pipes_);
|
||||
void connect_pending (const char *addr_, zmq::socket_base_t *bind_socket_);
|
||||
|
||||
#ifdef ZMQ_HAVE_VMCI
|
||||
// Return family for the VMCI socket or -1 if it's not available.
|
||||
int get_vmci_socket_family ();
|
||||
// Return family for the VMCI socket or -1 if it's not available.
|
||||
int get_vmci_socket_family ();
|
||||
#endif
|
||||
|
||||
enum {
|
||||
term_tid = 0,
|
||||
reaper_tid = 1
|
||||
};
|
||||
enum
|
||||
{
|
||||
term_tid = 0,
|
||||
reaper_tid = 1
|
||||
};
|
||||
|
||||
~ctx_t ();
|
||||
~ctx_t ();
|
||||
|
||||
bool valid() const;
|
||||
bool valid () const;
|
||||
|
||||
private:
|
||||
bool start();
|
||||
private:
|
||||
bool start ();
|
||||
|
||||
struct pending_connection_t
|
||||
{
|
||||
endpoint_t endpoint;
|
||||
pipe_t* connect_pipe;
|
||||
pipe_t* bind_pipe;
|
||||
};
|
||||
struct pending_connection_t
|
||||
{
|
||||
endpoint_t endpoint;
|
||||
pipe_t *connect_pipe;
|
||||
pipe_t *bind_pipe;
|
||||
};
|
||||
|
||||
// Used to check whether the object is a context.
|
||||
uint32_t tag;
|
||||
// Used to check whether the object is a context.
|
||||
uint32_t tag;
|
||||
|
||||
// Sockets belonging to this context. We need the list so that
|
||||
// we can notify the sockets when zmq_ctx_term() is called.
|
||||
// The sockets will return ETERM then.
|
||||
typedef array_t <socket_base_t> sockets_t;
|
||||
sockets_t sockets;
|
||||
// Sockets belonging to this context. We need the list so that
|
||||
// we can notify the sockets when zmq_ctx_term() is called.
|
||||
// The sockets will return ETERM then.
|
||||
typedef array_t<socket_base_t> sockets_t;
|
||||
sockets_t sockets;
|
||||
|
||||
// List of unused thread slots.
|
||||
typedef std::vector <uint32_t> empty_slots_t;
|
||||
empty_slots_t empty_slots;
|
||||
// List of unused thread slots.
|
||||
typedef std::vector<uint32_t> empty_slots_t;
|
||||
empty_slots_t empty_slots;
|
||||
|
||||
// If true, zmq_init has been called but no socket has been created
|
||||
// yet. Launching of I/O threads is delayed.
|
||||
bool starting;
|
||||
// If true, zmq_init has been called but no socket has been created
|
||||
// yet. Launching of I/O threads is delayed.
|
||||
bool starting;
|
||||
|
||||
// If true, zmq_ctx_term was already called.
|
||||
bool terminating;
|
||||
// If true, zmq_ctx_term was already called.
|
||||
bool terminating;
|
||||
|
||||
// Synchronisation of accesses to global slot-related data:
|
||||
// sockets, empty_slots, terminating. It also synchronises
|
||||
// access to zombie sockets as such (as opposed to slots) and provides
|
||||
// a memory barrier to ensure that all CPU cores see the same data.
|
||||
mutex_t slot_sync;
|
||||
// Synchronisation of accesses to global slot-related data:
|
||||
// sockets, empty_slots, terminating. It also synchronises
|
||||
// access to zombie sockets as such (as opposed to slots) and provides
|
||||
// a memory barrier to ensure that all CPU cores see the same data.
|
||||
mutex_t slot_sync;
|
||||
|
||||
// The reaper thread.
|
||||
zmq::reaper_t *reaper;
|
||||
// The reaper thread.
|
||||
zmq::reaper_t *reaper;
|
||||
|
||||
// I/O threads.
|
||||
typedef std::vector <zmq::io_thread_t*> io_threads_t;
|
||||
io_threads_t io_threads;
|
||||
// I/O threads.
|
||||
typedef std::vector<zmq::io_thread_t *> io_threads_t;
|
||||
io_threads_t io_threads;
|
||||
|
||||
// Array of pointers to mailboxes for both application and I/O threads.
|
||||
uint32_t slot_count;
|
||||
i_mailbox **slots;
|
||||
// Array of pointers to mailboxes for both application and I/O threads.
|
||||
uint32_t slot_count;
|
||||
i_mailbox **slots;
|
||||
|
||||
// Mailbox for zmq_ctx_term thread.
|
||||
mailbox_t term_mailbox;
|
||||
// Mailbox for zmq_ctx_term thread.
|
||||
mailbox_t term_mailbox;
|
||||
|
||||
// List of inproc endpoints within this context.
|
||||
typedef std::map <std::string, endpoint_t> endpoints_t;
|
||||
endpoints_t endpoints;
|
||||
// List of inproc endpoints within this context.
|
||||
typedef std::map<std::string, endpoint_t> endpoints_t;
|
||||
endpoints_t endpoints;
|
||||
|
||||
// List of inproc connection endpoints pending a bind
|
||||
typedef std::multimap <std::string, pending_connection_t> pending_connections_t;
|
||||
pending_connections_t pending_connections;
|
||||
// List of inproc connection endpoints pending a bind
|
||||
typedef std::multimap<std::string, pending_connection_t>
|
||||
pending_connections_t;
|
||||
pending_connections_t pending_connections;
|
||||
|
||||
// Synchronisation of access to the list of inproc endpoints.
|
||||
mutex_t endpoints_sync;
|
||||
// Synchronisation of access to the list of inproc endpoints.
|
||||
mutex_t endpoints_sync;
|
||||
|
||||
// Maximum socket ID.
|
||||
static atomic_counter_t max_socket_id;
|
||||
// Maximum socket ID.
|
||||
static atomic_counter_t max_socket_id;
|
||||
|
||||
// Maximum number of sockets that can be opened at the same time.
|
||||
int max_sockets;
|
||||
// Maximum number of sockets that can be opened at the same time.
|
||||
int max_sockets;
|
||||
|
||||
// Maximum allowed message size
|
||||
int max_msgsz;
|
||||
// Maximum allowed message size
|
||||
int max_msgsz;
|
||||
|
||||
// Number of I/O threads to launch.
|
||||
int io_thread_count;
|
||||
// Number of I/O threads to launch.
|
||||
int io_thread_count;
|
||||
|
||||
// Does context wait (possibly forever) on termination?
|
||||
bool blocky;
|
||||
// Does context wait (possibly forever) on termination?
|
||||
bool blocky;
|
||||
|
||||
// Is IPv6 enabled on this context?
|
||||
bool ipv6;
|
||||
// Is IPv6 enabled on this context?
|
||||
bool ipv6;
|
||||
|
||||
// Thread parameters.
|
||||
int thread_priority;
|
||||
int thread_sched_policy;
|
||||
std::set<int> thread_affinity_cpus;
|
||||
std::string thread_name_prefix;
|
||||
// Thread parameters.
|
||||
int thread_priority;
|
||||
int thread_sched_policy;
|
||||
std::set<int> thread_affinity_cpus;
|
||||
std::string thread_name_prefix;
|
||||
|
||||
// Synchronisation of access to context options.
|
||||
mutex_t opt_sync;
|
||||
// Synchronisation of access to context options.
|
||||
mutex_t opt_sync;
|
||||
|
||||
ctx_t (const ctx_t&);
|
||||
const ctx_t &operator = (const ctx_t&);
|
||||
ctx_t (const ctx_t &);
|
||||
const ctx_t &operator= (const ctx_t &);
|
||||
|
||||
#ifdef HAVE_FORK
|
||||
// the process that created this context. Used to detect forking.
|
||||
pid_t pid;
|
||||
// the process that created this context. Used to detect forking.
|
||||
pid_t pid;
|
||||
#endif
|
||||
enum side { connect_side, bind_side };
|
||||
void connect_inproc_sockets(zmq::socket_base_t *bind_socket_, options_t& bind_options, const pending_connection_t &pending_connection_, side side_);
|
||||
enum side
|
||||
{
|
||||
connect_side,
|
||||
bind_side
|
||||
};
|
||||
void
|
||||
connect_inproc_sockets (zmq::socket_base_t *bind_socket_,
|
||||
options_t &bind_options,
|
||||
const pending_connection_t &pending_connection_,
|
||||
side side_);
|
||||
|
||||
#ifdef ZMQ_HAVE_VMCI
|
||||
int vmci_fd;
|
||||
int vmci_family;
|
||||
mutex_t vmci_sync;
|
||||
int vmci_fd;
|
||||
int vmci_family;
|
||||
mutex_t vmci_sync;
|
||||
#endif
|
||||
};
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -80,7 +80,7 @@ int zmq::curve_client_t::next_handshake_command (msg_t *msg_)
|
||||
int zmq::curve_client_t::process_handshake_command (msg_t *msg_)
|
||||
{
|
||||
const unsigned char *msg_data =
|
||||
static_cast <unsigned char *> (msg_->data ());
|
||||
static_cast<unsigned char *> (msg_->data ());
|
||||
const size_t msg_size = msg_->size ();
|
||||
|
||||
int rc = 0;
|
||||
@ -94,8 +94,7 @@ int zmq::curve_client_t::process_handshake_command (msg_t *msg_)
|
||||
rc = process_error (msg_data, msg_size);
|
||||
else {
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
session->get_endpoint (),
|
||||
ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND);
|
||||
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND);
|
||||
errno = EPROTO;
|
||||
rc = -1;
|
||||
}
|
||||
@ -126,8 +125,7 @@ zmq::mechanism_t::status_t zmq::curve_client_t::status () const
|
||||
{
|
||||
if (state == connected)
|
||||
return mechanism_t::ready;
|
||||
else
|
||||
if (state == error_received)
|
||||
else if (state == error_received)
|
||||
return mechanism_t::error;
|
||||
else
|
||||
return mechanism_t::handshaking;
|
||||
@ -141,11 +139,10 @@ int zmq::curve_client_t::produce_hello (msg_t *msg_)
|
||||
rc = tools.produce_hello (msg_->data (), cn_nonce);
|
||||
if (rc == -1) {
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
session->get_endpoint (),
|
||||
ZMQ_PROTOCOL_ERROR_ZMTP_CRYPTOGRAPHIC);
|
||||
|
||||
// TODO this is somewhat inconsistent: we call init_size, but we may
|
||||
// not close msg_; i.e. we assume that msg_ is initialized but empty
|
||||
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_CRYPTOGRAPHIC);
|
||||
|
||||
// TODO this is somewhat inconsistent: we call init_size, but we may
|
||||
// not close msg_; i.e. we assume that msg_ is initialized but empty
|
||||
// (if it were non-empty, calling init_size might cause a leak!)
|
||||
|
||||
// msg_->close ();
|
||||
@ -164,8 +161,7 @@ int zmq::curve_client_t::process_welcome (const uint8_t *msg_data,
|
||||
|
||||
if (rc == -1) {
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
session->get_endpoint (),
|
||||
ZMQ_PROTOCOL_ERROR_ZMTP_CRYPTOGRAPHIC);
|
||||
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_CRYPTOGRAPHIC);
|
||||
|
||||
errno = EPROTO;
|
||||
return -1;
|
||||
@ -196,8 +192,7 @@ int zmq::curve_client_t::produce_initiate (msg_t *msg_)
|
||||
|
||||
if (-1 == rc) {
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
session->get_endpoint (),
|
||||
ZMQ_PROTOCOL_ERROR_ZMTP_CRYPTOGRAPHIC);
|
||||
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_CRYPTOGRAPHIC);
|
||||
|
||||
// TODO see comment in produce_hello
|
||||
return -1;
|
||||
@ -208,8 +203,8 @@ int zmq::curve_client_t::produce_initiate (msg_t *msg_)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int zmq::curve_client_t::process_ready (
|
||||
const uint8_t *msg_data, size_t msg_size)
|
||||
int zmq::curve_client_t::process_ready (const uint8_t *msg_data,
|
||||
size_t msg_size)
|
||||
{
|
||||
if (msg_size < 30) {
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
@ -221,7 +216,7 @@ int zmq::curve_client_t::process_ready (
|
||||
|
||||
const size_t clen = (msg_size - 14) + crypto_box_BOXZEROBYTES;
|
||||
|
||||
uint8_t ready_nonce [crypto_box_NONCEBYTES];
|
||||
uint8_t ready_nonce[crypto_box_NONCEBYTES];
|
||||
uint8_t *ready_plaintext = (uint8_t *) malloc (crypto_box_ZEROBYTES + clen);
|
||||
alloc_assert (ready_plaintext);
|
||||
uint8_t *ready_box =
|
||||
@ -229,21 +224,20 @@ int zmq::curve_client_t::process_ready (
|
||||
alloc_assert (ready_box);
|
||||
|
||||
memset (ready_box, 0, crypto_box_BOXZEROBYTES);
|
||||
memcpy (ready_box + crypto_box_BOXZEROBYTES,
|
||||
msg_data + 14, clen - crypto_box_BOXZEROBYTES);
|
||||
memcpy (ready_box + crypto_box_BOXZEROBYTES, msg_data + 14,
|
||||
clen - crypto_box_BOXZEROBYTES);
|
||||
|
||||
memcpy (ready_nonce, "CurveZMQREADY---", 16);
|
||||
memcpy (ready_nonce + 16, msg_data + 6, 8);
|
||||
cn_peer_nonce = get_uint64(msg_data + 6);
|
||||
cn_peer_nonce = get_uint64 (msg_data + 6);
|
||||
|
||||
int rc = crypto_box_open_afternm (ready_plaintext, ready_box,
|
||||
clen, ready_nonce, cn_precom);
|
||||
int rc = crypto_box_open_afternm (ready_plaintext, ready_box, clen,
|
||||
ready_nonce, cn_precom);
|
||||
free (ready_box);
|
||||
|
||||
if (rc != 0) {
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
session->get_endpoint (),
|
||||
ZMQ_PROTOCOL_ERROR_ZMTP_CRYPTOGRAPHIC);
|
||||
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_CRYPTOGRAPHIC);
|
||||
errno = EPROTO;
|
||||
return -1;
|
||||
}
|
||||
@ -254,8 +248,7 @@ int zmq::curve_client_t::process_ready (
|
||||
|
||||
if (rc == 0)
|
||||
state = connected;
|
||||
else
|
||||
{
|
||||
else {
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_INVALID_METADATA);
|
||||
errno = EPROTO;
|
||||
@ -264,8 +257,8 @@ int zmq::curve_client_t::process_ready (
|
||||
return rc;
|
||||
}
|
||||
|
||||
int zmq::curve_client_t::process_error (
|
||||
const uint8_t *msg_data, size_t msg_size)
|
||||
int zmq::curve_client_t::process_error (const uint8_t *msg_data,
|
||||
size_t msg_size)
|
||||
{
|
||||
if (state != expect_welcome && state != expect_ready) {
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
@ -280,7 +273,7 @@ int zmq::curve_client_t::process_error (
|
||||
errno = EPROTO;
|
||||
return -1;
|
||||
}
|
||||
const size_t error_reason_len = static_cast <size_t> (msg_data [6]);
|
||||
const size_t error_reason_len = static_cast<size_t> (msg_data[6]);
|
||||
if (error_reason_len > msg_size - 7) {
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
session->get_endpoint (),
|
||||
|
@ -38,48 +38,45 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
class msg_t;
|
||||
class session_base_t;
|
||||
|
||||
class msg_t;
|
||||
class session_base_t;
|
||||
class curve_client_t : public curve_mechanism_base_t
|
||||
{
|
||||
public:
|
||||
curve_client_t (session_base_t *session_, const options_t &options_);
|
||||
virtual ~curve_client_t ();
|
||||
|
||||
class curve_client_t : public curve_mechanism_base_t
|
||||
// mechanism implementation
|
||||
virtual int next_handshake_command (msg_t *msg_);
|
||||
virtual int process_handshake_command (msg_t *msg_);
|
||||
virtual int encode (msg_t *msg_);
|
||||
virtual int decode (msg_t *msg_);
|
||||
virtual status_t status () const;
|
||||
|
||||
private:
|
||||
enum state_t
|
||||
{
|
||||
public:
|
||||
|
||||
curve_client_t (session_base_t *session_, const options_t &options_);
|
||||
virtual ~curve_client_t ();
|
||||
|
||||
// mechanism implementation
|
||||
virtual int next_handshake_command (msg_t *msg_);
|
||||
virtual int process_handshake_command (msg_t *msg_);
|
||||
virtual int encode (msg_t *msg_);
|
||||
virtual int decode (msg_t *msg_);
|
||||
virtual status_t status () const;
|
||||
|
||||
private:
|
||||
|
||||
enum state_t {
|
||||
send_hello,
|
||||
expect_welcome,
|
||||
send_initiate,
|
||||
expect_ready,
|
||||
error_received,
|
||||
connected
|
||||
};
|
||||
|
||||
// Current FSM state
|
||||
state_t state;
|
||||
|
||||
// CURVE protocol tools
|
||||
curve_client_tools_t tools;
|
||||
|
||||
int produce_hello (msg_t *msg_);
|
||||
int process_welcome (const uint8_t *cmd_data, size_t data_size);
|
||||
int produce_initiate (msg_t *msg_);
|
||||
int process_ready (const uint8_t *cmd_data, size_t data_size);
|
||||
int process_error (const uint8_t *cmd_data, size_t data_size);
|
||||
send_hello,
|
||||
expect_welcome,
|
||||
send_initiate,
|
||||
expect_ready,
|
||||
error_received,
|
||||
connected
|
||||
};
|
||||
|
||||
// Current FSM state
|
||||
state_t state;
|
||||
|
||||
// CURVE protocol tools
|
||||
curve_client_tools_t tools;
|
||||
|
||||
int produce_hello (msg_t *msg_);
|
||||
int process_welcome (const uint8_t *cmd_data, size_t data_size);
|
||||
int produce_initiate (msg_t *msg_);
|
||||
int process_ready (const uint8_t *cmd_data, size_t data_size);
|
||||
int process_error (const uint8_t *cmd_data, size_t data_size);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -153,8 +153,7 @@ struct curve_client_tools_t
|
||||
// Create vouch = Box [C',S](C->S')
|
||||
memset (vouch_plaintext, 0, crypto_box_ZEROBYTES);
|
||||
memcpy (vouch_plaintext + crypto_box_ZEROBYTES, cn_public, 32);
|
||||
memcpy (vouch_plaintext + crypto_box_ZEROBYTES + 32, server_key,
|
||||
32);
|
||||
memcpy (vouch_plaintext + crypto_box_ZEROBYTES + 32, server_key, 32);
|
||||
|
||||
memcpy (vouch_nonce, "VOUCH---", 8);
|
||||
randombytes (vouch_nonce + 8, 16);
|
||||
@ -165,8 +164,8 @@ struct curve_client_tools_t
|
||||
return -1;
|
||||
|
||||
uint8_t initiate_nonce[crypto_box_NONCEBYTES];
|
||||
uint8_t *initiate_box = (uint8_t *) malloc (
|
||||
crypto_box_BOXZEROBYTES + 144 + metadata_length);
|
||||
uint8_t *initiate_box =
|
||||
(uint8_t *) malloc (crypto_box_BOXZEROBYTES + 144 + metadata_length);
|
||||
alloc_assert (initiate_box);
|
||||
uint8_t *initiate_plaintext =
|
||||
(uint8_t *) malloc (crypto_box_ZEROBYTES + 128 + metadata_length);
|
||||
@ -174,8 +173,7 @@ struct curve_client_tools_t
|
||||
|
||||
// Create Box [C + vouch + metadata](C'->S')
|
||||
memset (initiate_plaintext, 0, crypto_box_ZEROBYTES);
|
||||
memcpy (initiate_plaintext + crypto_box_ZEROBYTES, public_key,
|
||||
32);
|
||||
memcpy (initiate_plaintext + crypto_box_ZEROBYTES, public_key, 32);
|
||||
memcpy (initiate_plaintext + crypto_box_ZEROBYTES + 32, vouch_nonce + 8,
|
||||
16);
|
||||
memcpy (initiate_plaintext + crypto_box_ZEROBYTES + 48,
|
||||
|
@ -53,7 +53,7 @@ int zmq::curve_mechanism_base_t::encode (msg_t *msg_)
|
||||
{
|
||||
const size_t mlen = crypto_box_ZEROBYTES + 1 + msg_->size ();
|
||||
|
||||
uint8_t message_nonce [crypto_box_NONCEBYTES];
|
||||
uint8_t message_nonce[crypto_box_NONCEBYTES];
|
||||
memcpy (message_nonce, encode_nonce_prefix, 16);
|
||||
put_uint64 (message_nonce + 16, cn_nonce);
|
||||
|
||||
@ -63,19 +63,19 @@ int zmq::curve_mechanism_base_t::encode (msg_t *msg_)
|
||||
if (msg_->flags () & msg_t::command)
|
||||
flags |= 0x02;
|
||||
|
||||
uint8_t *message_plaintext = static_cast <uint8_t *> (malloc (mlen));
|
||||
uint8_t *message_plaintext = static_cast<uint8_t *> (malloc (mlen));
|
||||
alloc_assert (message_plaintext);
|
||||
|
||||
memset (message_plaintext, 0, crypto_box_ZEROBYTES);
|
||||
message_plaintext [crypto_box_ZEROBYTES] = flags;
|
||||
memcpy (message_plaintext + crypto_box_ZEROBYTES + 1,
|
||||
msg_->data (), msg_->size ());
|
||||
message_plaintext[crypto_box_ZEROBYTES] = flags;
|
||||
memcpy (message_plaintext + crypto_box_ZEROBYTES + 1, msg_->data (),
|
||||
msg_->size ());
|
||||
|
||||
uint8_t *message_box = static_cast <uint8_t *> (malloc (mlen));
|
||||
uint8_t *message_box = static_cast<uint8_t *> (malloc (mlen));
|
||||
alloc_assert (message_box);
|
||||
|
||||
int rc = crypto_box_afternm (message_box, message_plaintext,
|
||||
mlen, message_nonce, cn_precom);
|
||||
int rc = crypto_box_afternm (message_box, message_plaintext, mlen,
|
||||
message_nonce, cn_precom);
|
||||
zmq_assert (rc == 0);
|
||||
|
||||
rc = msg_->close ();
|
||||
@ -84,7 +84,7 @@ int zmq::curve_mechanism_base_t::encode (msg_t *msg_)
|
||||
rc = msg_->init_size (16 + mlen - crypto_box_BOXZEROBYTES);
|
||||
zmq_assert (rc == 0);
|
||||
|
||||
uint8_t *message = static_cast <uint8_t *> (msg_->data ());
|
||||
uint8_t *message = static_cast<uint8_t *> (msg_->data ());
|
||||
|
||||
memcpy (message, "\x07MESSAGE", 8);
|
||||
memcpy (message + 8, message_nonce + 16, 8);
|
||||
@ -103,11 +103,11 @@ int zmq::curve_mechanism_base_t::decode (msg_t *msg_)
|
||||
{
|
||||
int rc = check_basic_command_structure (msg_);
|
||||
if (rc == -1)
|
||||
return -1;
|
||||
return -1;
|
||||
|
||||
const size_t size = msg_->size ();
|
||||
const uint8_t *message = static_cast <uint8_t *> (msg_->data ());
|
||||
|
||||
const uint8_t *message = static_cast<uint8_t *> (msg_->data ());
|
||||
|
||||
if (size < 8 || memcmp (message, "\x07MESSAGE", 8)) {
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND);
|
||||
@ -123,10 +123,10 @@ int zmq::curve_mechanism_base_t::decode (msg_t *msg_)
|
||||
return -1;
|
||||
}
|
||||
|
||||
uint8_t message_nonce [crypto_box_NONCEBYTES];
|
||||
uint8_t message_nonce[crypto_box_NONCEBYTES];
|
||||
memcpy (message_nonce, decode_nonce_prefix, 16);
|
||||
memcpy (message_nonce + 16, message + 8, 8);
|
||||
uint64_t nonce = get_uint64(message + 8);
|
||||
uint64_t nonce = get_uint64 (message + 8);
|
||||
if (nonce <= cn_peer_nonce) {
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_INVALID_SEQUENCE);
|
||||
@ -137,15 +137,15 @@ int zmq::curve_mechanism_base_t::decode (msg_t *msg_)
|
||||
|
||||
const size_t clen = crypto_box_BOXZEROBYTES + msg_->size () - 16;
|
||||
|
||||
uint8_t *message_plaintext = static_cast <uint8_t *> (malloc (clen));
|
||||
uint8_t *message_plaintext = static_cast<uint8_t *> (malloc (clen));
|
||||
alloc_assert (message_plaintext);
|
||||
|
||||
uint8_t *message_box = static_cast <uint8_t *> (malloc (clen));
|
||||
uint8_t *message_box = static_cast<uint8_t *> (malloc (clen));
|
||||
alloc_assert (message_box);
|
||||
|
||||
memset (message_box, 0, crypto_box_BOXZEROBYTES);
|
||||
memcpy (message_box + crypto_box_BOXZEROBYTES,
|
||||
message + 16, msg_->size () - 16);
|
||||
memcpy (message_box + crypto_box_BOXZEROBYTES, message + 16,
|
||||
msg_->size () - 16);
|
||||
|
||||
rc = crypto_box_open_afternm (message_plaintext, message_box, clen,
|
||||
message_nonce, cn_precom);
|
||||
@ -156,17 +156,15 @@ int zmq::curve_mechanism_base_t::decode (msg_t *msg_)
|
||||
rc = msg_->init_size (clen - 1 - crypto_box_ZEROBYTES);
|
||||
zmq_assert (rc == 0);
|
||||
|
||||
const uint8_t flags = message_plaintext [crypto_box_ZEROBYTES];
|
||||
const uint8_t flags = message_plaintext[crypto_box_ZEROBYTES];
|
||||
if (flags & 0x01)
|
||||
msg_->set_flags (msg_t::more);
|
||||
if (flags & 0x02)
|
||||
msg_->set_flags (msg_t::command);
|
||||
|
||||
memcpy (msg_->data (),
|
||||
message_plaintext + crypto_box_ZEROBYTES + 1,
|
||||
memcpy (msg_->data (), message_plaintext + crypto_box_ZEROBYTES + 1,
|
||||
msg_->size ());
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
// CURVE I : connection key used for MESSAGE is wrong
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_CRYPTOGRAPHIC);
|
||||
|
@ -70,7 +70,7 @@ class curve_mechanism_base_t : public virtual mechanism_base_t
|
||||
uint64_t cn_peer_nonce;
|
||||
|
||||
// Intermediary buffer used to speed up boxing and unboxing.
|
||||
uint8_t cn_precom [crypto_box_BEFORENMBYTES];
|
||||
uint8_t cn_precom[crypto_box_BEFORENMBYTES];
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -100,10 +100,10 @@ int zmq::curve_server_t::process_handshake_command (msg_t *msg_)
|
||||
rc = process_initiate (msg_);
|
||||
break;
|
||||
default:
|
||||
// TODO I think this is not a case reachable with a misbehaving
|
||||
// client. It is not an "invalid handshake command", but would be
|
||||
// trying to process a handshake command in an invalid state,
|
||||
// which is purely under control of this peer.
|
||||
// TODO I think this is not a case reachable with a misbehaving
|
||||
// client. It is not an "invalid handshake command", but would be
|
||||
// trying to process a handshake command in an invalid state,
|
||||
// which is purely under control of this peer.
|
||||
// Therefore, it should be changed to zmq_assert (false);
|
||||
|
||||
// CURVE I: invalid handshake command
|
||||
@ -138,10 +138,10 @@ int zmq::curve_server_t::process_hello (msg_t *msg_)
|
||||
{
|
||||
int rc = check_basic_command_structure (msg_);
|
||||
if (rc == -1)
|
||||
return -1;
|
||||
return -1;
|
||||
|
||||
const size_t size = msg_->size ();
|
||||
const uint8_t * const hello = static_cast <uint8_t *> (msg_->data ());
|
||||
const uint8_t *const hello = static_cast<uint8_t *> (msg_->data ());
|
||||
|
||||
if (size < 6 || memcmp (hello, "\x05HELLO", 6)) {
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
@ -152,18 +152,20 @@ int zmq::curve_server_t::process_hello (msg_t *msg_)
|
||||
|
||||
if (size != 200) {
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_HELLO);
|
||||
session->get_endpoint (),
|
||||
ZMQ_PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_HELLO);
|
||||
errno = EPROTO;
|
||||
return -1;
|
||||
}
|
||||
|
||||
const uint8_t major = hello [6];
|
||||
const uint8_t minor = hello [7];
|
||||
const uint8_t major = hello[6];
|
||||
const uint8_t minor = hello[7];
|
||||
|
||||
if (major != 1 || minor != 0) {
|
||||
// CURVE I: client HELLO has unknown version number
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_HELLO);
|
||||
session->get_endpoint (),
|
||||
ZMQ_PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_HELLO);
|
||||
errno = EPROTO;
|
||||
return -1;
|
||||
}
|
||||
@ -171,13 +173,13 @@ int zmq::curve_server_t::process_hello (msg_t *msg_)
|
||||
// Save client's short-term public key (C')
|
||||
memcpy (cn_client, hello + 80, 32);
|
||||
|
||||
uint8_t hello_nonce [crypto_box_NONCEBYTES];
|
||||
uint8_t hello_plaintext [crypto_box_ZEROBYTES + 64];
|
||||
uint8_t hello_box [crypto_box_BOXZEROBYTES + 80];
|
||||
uint8_t hello_nonce[crypto_box_NONCEBYTES];
|
||||
uint8_t hello_plaintext[crypto_box_ZEROBYTES + 64];
|
||||
uint8_t hello_box[crypto_box_BOXZEROBYTES + 80];
|
||||
|
||||
memcpy (hello_nonce, "CurveZMQHELLO---", 16);
|
||||
memcpy (hello_nonce + 16, hello + 112, 8);
|
||||
cn_peer_nonce = get_uint64(hello + 112);
|
||||
cn_peer_nonce = get_uint64 (hello + 112);
|
||||
|
||||
memset (hello_box, 0, crypto_box_BOXZEROBYTES);
|
||||
memcpy (hello_box + crypto_box_BOXZEROBYTES, hello + 120, 80);
|
||||
@ -199,9 +201,9 @@ int zmq::curve_server_t::process_hello (msg_t *msg_)
|
||||
|
||||
int zmq::curve_server_t::produce_welcome (msg_t *msg_)
|
||||
{
|
||||
uint8_t cookie_nonce [crypto_secretbox_NONCEBYTES];
|
||||
uint8_t cookie_plaintext [crypto_secretbox_ZEROBYTES + 64];
|
||||
uint8_t cookie_ciphertext [crypto_secretbox_BOXZEROBYTES + 80];
|
||||
uint8_t cookie_nonce[crypto_secretbox_NONCEBYTES];
|
||||
uint8_t cookie_plaintext[crypto_secretbox_ZEROBYTES + 64];
|
||||
uint8_t cookie_ciphertext[crypto_secretbox_BOXZEROBYTES + 80];
|
||||
|
||||
// Create full nonce for encryption
|
||||
// 8-byte prefix plus 16-byte random nonce
|
||||
@ -210,23 +212,21 @@ int zmq::curve_server_t::produce_welcome (msg_t *msg_)
|
||||
|
||||
// Generate cookie = Box [C' + s'](t)
|
||||
memset (cookie_plaintext, 0, crypto_secretbox_ZEROBYTES);
|
||||
memcpy (cookie_plaintext + crypto_secretbox_ZEROBYTES,
|
||||
cn_client, 32);
|
||||
memcpy (cookie_plaintext + crypto_secretbox_ZEROBYTES + 32,
|
||||
cn_secret, 32);
|
||||
memcpy (cookie_plaintext + crypto_secretbox_ZEROBYTES, cn_client, 32);
|
||||
memcpy (cookie_plaintext + crypto_secretbox_ZEROBYTES + 32, cn_secret, 32);
|
||||
|
||||
// Generate fresh cookie key
|
||||
randombytes (cookie_key, crypto_secretbox_KEYBYTES);
|
||||
|
||||
// Encrypt using symmetric cookie key
|
||||
int rc = crypto_secretbox (cookie_ciphertext, cookie_plaintext,
|
||||
sizeof cookie_plaintext,
|
||||
cookie_nonce, cookie_key);
|
||||
int rc =
|
||||
crypto_secretbox (cookie_ciphertext, cookie_plaintext,
|
||||
sizeof cookie_plaintext, cookie_nonce, cookie_key);
|
||||
zmq_assert (rc == 0);
|
||||
|
||||
uint8_t welcome_nonce [crypto_box_NONCEBYTES];
|
||||
uint8_t welcome_plaintext [crypto_box_ZEROBYTES + 128];
|
||||
uint8_t welcome_ciphertext [crypto_box_BOXZEROBYTES + 144];
|
||||
uint8_t welcome_nonce[crypto_box_NONCEBYTES];
|
||||
uint8_t welcome_plaintext[crypto_box_ZEROBYTES + 128];
|
||||
uint8_t welcome_ciphertext[crypto_box_BOXZEROBYTES + 144];
|
||||
|
||||
// Create full nonce for encryption
|
||||
// 8-byte prefix plus 16-byte random nonce
|
||||
@ -236,8 +236,8 @@ int zmq::curve_server_t::produce_welcome (msg_t *msg_)
|
||||
// Create 144-byte Box [S' + cookie](S->C')
|
||||
memset (welcome_plaintext, 0, crypto_box_ZEROBYTES);
|
||||
memcpy (welcome_plaintext + crypto_box_ZEROBYTES, cn_public, 32);
|
||||
memcpy (welcome_plaintext + crypto_box_ZEROBYTES + 32,
|
||||
cookie_nonce + 8, 16);
|
||||
memcpy (welcome_plaintext + crypto_box_ZEROBYTES + 32, cookie_nonce + 8,
|
||||
16);
|
||||
memcpy (welcome_plaintext + crypto_box_ZEROBYTES + 48,
|
||||
cookie_ciphertext + crypto_secretbox_BOXZEROBYTES, 80);
|
||||
|
||||
@ -257,7 +257,7 @@ int zmq::curve_server_t::produce_welcome (msg_t *msg_)
|
||||
rc = msg_->init_size (168);
|
||||
errno_assert (rc == 0);
|
||||
|
||||
uint8_t * const welcome = static_cast <uint8_t *> (msg_->data ());
|
||||
uint8_t *const welcome = static_cast<uint8_t *> (msg_->data ());
|
||||
memcpy (welcome, "\x07WELCOME", 8);
|
||||
memcpy (welcome + 8, welcome_nonce + 8, 16);
|
||||
memcpy (welcome + 24, welcome_ciphertext + crypto_box_BOXZEROBYTES, 144);
|
||||
@ -269,10 +269,10 @@ int zmq::curve_server_t::process_initiate (msg_t *msg_)
|
||||
{
|
||||
int rc = check_basic_command_structure (msg_);
|
||||
if (rc == -1)
|
||||
return -1;
|
||||
return -1;
|
||||
|
||||
const size_t size = msg_->size ();
|
||||
const uint8_t *initiate = static_cast <uint8_t *> (msg_->data ());
|
||||
const uint8_t *initiate = static_cast<uint8_t *> (msg_->data ());
|
||||
|
||||
if (size < 9 || memcmp (initiate, "\x08INITIATE", 9)) {
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
@ -289,9 +289,9 @@ int zmq::curve_server_t::process_initiate (msg_t *msg_)
|
||||
return -1;
|
||||
}
|
||||
|
||||
uint8_t cookie_nonce [crypto_secretbox_NONCEBYTES];
|
||||
uint8_t cookie_plaintext [crypto_secretbox_ZEROBYTES + 64];
|
||||
uint8_t cookie_box [crypto_secretbox_BOXZEROBYTES + 80];
|
||||
uint8_t cookie_nonce[crypto_secretbox_NONCEBYTES];
|
||||
uint8_t cookie_plaintext[crypto_secretbox_ZEROBYTES + 64];
|
||||
uint8_t cookie_box[crypto_secretbox_BOXZEROBYTES + 80];
|
||||
|
||||
// Open Box [C' + s'](t)
|
||||
memset (cookie_box, 0, crypto_secretbox_BOXZEROBYTES);
|
||||
@ -312,7 +312,8 @@ int zmq::curve_server_t::process_initiate (msg_t *msg_)
|
||||
|
||||
// Check cookie plain text is as expected [C' + s']
|
||||
if (memcmp (cookie_plaintext + crypto_secretbox_ZEROBYTES, cn_client, 32)
|
||||
|| memcmp (cookie_plaintext + crypto_secretbox_ZEROBYTES + 32, cn_secret, 32)) {
|
||||
|| memcmp (cookie_plaintext + crypto_secretbox_ZEROBYTES + 32,
|
||||
cn_secret, 32)) {
|
||||
// TODO this case is very hard to test, as it would require a modified
|
||||
// client that knows the server's secret temporary cookie key
|
||||
|
||||
@ -325,21 +326,21 @@ int zmq::curve_server_t::process_initiate (msg_t *msg_)
|
||||
|
||||
const size_t clen = (size - 113) + crypto_box_BOXZEROBYTES;
|
||||
|
||||
uint8_t initiate_nonce [crypto_box_NONCEBYTES];
|
||||
uint8_t initiate_plaintext [crypto_box_ZEROBYTES + 128 + 256];
|
||||
uint8_t initiate_box [crypto_box_BOXZEROBYTES + 144 + 256];
|
||||
uint8_t initiate_nonce[crypto_box_NONCEBYTES];
|
||||
uint8_t initiate_plaintext[crypto_box_ZEROBYTES + 128 + 256];
|
||||
uint8_t initiate_box[crypto_box_BOXZEROBYTES + 144 + 256];
|
||||
|
||||
// Open Box [C + vouch + metadata](C'->S')
|
||||
memset (initiate_box, 0, crypto_box_BOXZEROBYTES);
|
||||
memcpy (initiate_box + crypto_box_BOXZEROBYTES,
|
||||
initiate + 113, clen - crypto_box_BOXZEROBYTES);
|
||||
memcpy (initiate_box + crypto_box_BOXZEROBYTES, initiate + 113,
|
||||
clen - crypto_box_BOXZEROBYTES);
|
||||
|
||||
memcpy (initiate_nonce, "CurveZMQINITIATE", 16);
|
||||
memcpy (initiate_nonce + 16, initiate + 105, 8);
|
||||
cn_peer_nonce = get_uint64(initiate + 105);
|
||||
cn_peer_nonce = get_uint64 (initiate + 105);
|
||||
|
||||
rc = crypto_box_open (initiate_plaintext, initiate_box,
|
||||
clen, initiate_nonce, cn_client, cn_secret);
|
||||
rc = crypto_box_open (initiate_plaintext, initiate_box, clen,
|
||||
initiate_nonce, cn_client, cn_secret);
|
||||
if (rc != 0) {
|
||||
// CURVE I: cannot open client INITIATE
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
@ -350,9 +351,9 @@ int zmq::curve_server_t::process_initiate (msg_t *msg_)
|
||||
|
||||
const uint8_t *client_key = initiate_plaintext + crypto_box_ZEROBYTES;
|
||||
|
||||
uint8_t vouch_nonce [crypto_box_NONCEBYTES];
|
||||
uint8_t vouch_plaintext [crypto_box_ZEROBYTES + 64];
|
||||
uint8_t vouch_box [crypto_box_BOXZEROBYTES + 80];
|
||||
uint8_t vouch_nonce[crypto_box_NONCEBYTES];
|
||||
uint8_t vouch_plaintext[crypto_box_ZEROBYTES + 64];
|
||||
uint8_t vouch_box[crypto_box_BOXZEROBYTES + 80];
|
||||
|
||||
// Open Box Box [C',S](C->S') and check contents
|
||||
memset (vouch_box, 0, crypto_box_BOXZEROBYTES);
|
||||
@ -360,11 +361,10 @@ int zmq::curve_server_t::process_initiate (msg_t *msg_)
|
||||
initiate_plaintext + crypto_box_ZEROBYTES + 48, 80);
|
||||
|
||||
memcpy (vouch_nonce, "VOUCH---", 8);
|
||||
memcpy (vouch_nonce + 8,
|
||||
initiate_plaintext + crypto_box_ZEROBYTES + 32, 16);
|
||||
memcpy (vouch_nonce + 8, initiate_plaintext + crypto_box_ZEROBYTES + 32,
|
||||
16);
|
||||
|
||||
rc = crypto_box_open (vouch_plaintext, vouch_box,
|
||||
sizeof vouch_box,
|
||||
rc = crypto_box_open (vouch_plaintext, vouch_box, sizeof vouch_box,
|
||||
vouch_nonce, client_key, cn_secret);
|
||||
if (rc != 0) {
|
||||
// CURVE I: cannot open client INITIATE vouch
|
||||
@ -399,9 +399,9 @@ int zmq::curve_server_t::process_initiate (msg_t *msg_)
|
||||
send_zap_request (client_key);
|
||||
state = waiting_for_zap_reply;
|
||||
|
||||
// TODO actually, it is quite unlikely that we can read the ZAP
|
||||
// TODO actually, it is quite unlikely that we can read the ZAP
|
||||
// reply already, but removing this has some strange side-effect
|
||||
// (probably because the pipe's in_active flag is true until a read
|
||||
// (probably because the pipe's in_active flag is true until a read
|
||||
// is attempted)
|
||||
rc = receive_and_process_zap_reply ();
|
||||
if (rc == -1)
|
||||
@ -427,7 +427,7 @@ int zmq::curve_server_t::process_initiate (msg_t *msg_)
|
||||
int zmq::curve_server_t::produce_ready (msg_t *msg_)
|
||||
{
|
||||
const size_t metadata_length = basic_properties_len ();
|
||||
uint8_t ready_nonce [crypto_box_NONCEBYTES];
|
||||
uint8_t ready_nonce[crypto_box_NONCEBYTES];
|
||||
|
||||
uint8_t *ready_plaintext =
|
||||
(uint8_t *) malloc (crypto_box_ZEROBYTES + metadata_length);
|
||||
@ -456,7 +456,7 @@ int zmq::curve_server_t::produce_ready (msg_t *msg_)
|
||||
rc = msg_->init_size (14 + mlen - crypto_box_BOXZEROBYTES);
|
||||
errno_assert (rc == 0);
|
||||
|
||||
uint8_t *ready = static_cast <uint8_t *> (msg_->data ());
|
||||
uint8_t *ready = static_cast<uint8_t *> (msg_->data ());
|
||||
|
||||
memcpy (ready, "\x05READY", 6);
|
||||
// Short nonce, prefixed by "CurveZMQREADY---"
|
||||
@ -477,9 +477,9 @@ int zmq::curve_server_t::produce_error (msg_t *msg_) const
|
||||
zmq_assert (status_code.length () == 3);
|
||||
const int rc = msg_->init_size (6 + 1 + expected_status_code_length);
|
||||
zmq_assert (rc == 0);
|
||||
char *msg_data = static_cast <char *> (msg_->data ());
|
||||
char *msg_data = static_cast<char *> (msg_->data ());
|
||||
memcpy (msg_data, "\5ERROR", 6);
|
||||
msg_data [6] = expected_status_code_length;
|
||||
msg_data[6] = expected_status_code_length;
|
||||
memcpy (msg_data + 7, status_code.c_str (), expected_status_code_length);
|
||||
return 0;
|
||||
}
|
||||
|
@ -39,54 +39,51 @@
|
||||
namespace zmq
|
||||
{
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning (push)
|
||||
#pragma warning (disable: 4250)
|
||||
#pragma warning(push)
|
||||
#pragma warning(disable : 4250)
|
||||
#endif
|
||||
class curve_server_t : public zap_client_common_handshake_t,
|
||||
public curve_mechanism_base_t
|
||||
{
|
||||
public:
|
||||
class curve_server_t : public zap_client_common_handshake_t,
|
||||
public curve_mechanism_base_t
|
||||
{
|
||||
public:
|
||||
curve_server_t (session_base_t *session_,
|
||||
const std::string &peer_address_,
|
||||
const options_t &options_);
|
||||
virtual ~curve_server_t ();
|
||||
|
||||
curve_server_t (session_base_t *session_,
|
||||
const std::string &peer_address_,
|
||||
const options_t &options_);
|
||||
virtual ~curve_server_t ();
|
||||
// mechanism implementation
|
||||
virtual int next_handshake_command (msg_t *msg_);
|
||||
virtual int process_handshake_command (msg_t *msg_);
|
||||
virtual int encode (msg_t *msg_);
|
||||
virtual int decode (msg_t *msg_);
|
||||
|
||||
// mechanism implementation
|
||||
virtual int next_handshake_command (msg_t *msg_);
|
||||
virtual int process_handshake_command (msg_t *msg_);
|
||||
virtual int encode (msg_t *msg_);
|
||||
virtual int decode (msg_t *msg_);
|
||||
private:
|
||||
// Our secret key (s)
|
||||
uint8_t secret_key[crypto_box_SECRETKEYBYTES];
|
||||
|
||||
private:
|
||||
// Our short-term public key (S')
|
||||
uint8_t cn_public[crypto_box_PUBLICKEYBYTES];
|
||||
|
||||
// Our secret key (s)
|
||||
uint8_t secret_key [crypto_box_SECRETKEYBYTES];
|
||||
// Our short-term secret key (s')
|
||||
uint8_t cn_secret[crypto_box_SECRETKEYBYTES];
|
||||
|
||||
// Our short-term public key (S')
|
||||
uint8_t cn_public [crypto_box_PUBLICKEYBYTES];
|
||||
// Client's short-term public key (C')
|
||||
uint8_t cn_client[crypto_box_PUBLICKEYBYTES];
|
||||
|
||||
// Our short-term secret key (s')
|
||||
uint8_t cn_secret [crypto_box_SECRETKEYBYTES];
|
||||
// Key used to produce cookie
|
||||
uint8_t cookie_key[crypto_secretbox_KEYBYTES];
|
||||
|
||||
// Client's short-term public key (C')
|
||||
uint8_t cn_client [crypto_box_PUBLICKEYBYTES];
|
||||
int process_hello (msg_t *msg_);
|
||||
int produce_welcome (msg_t *msg_);
|
||||
int process_initiate (msg_t *msg_);
|
||||
int produce_ready (msg_t *msg_);
|
||||
int produce_error (msg_t *msg_) const;
|
||||
|
||||
// Key used to produce cookie
|
||||
uint8_t cookie_key [crypto_secretbox_KEYBYTES];
|
||||
|
||||
int process_hello (msg_t *msg_);
|
||||
int produce_welcome (msg_t *msg_);
|
||||
int process_initiate (msg_t *msg_);
|
||||
int produce_ready (msg_t *msg_);
|
||||
int produce_error (msg_t *msg_) const;
|
||||
|
||||
void send_zap_request (const uint8_t *key);
|
||||
};
|
||||
void send_zap_request (const uint8_t *key);
|
||||
};
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning (pop)
|
||||
#pragma warning(pop)
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
157
src/dbuffer.hpp
157
src/dbuffer.hpp
@ -39,106 +39,103 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
// dbuffer is a single-producer single-consumer double-buffer
|
||||
// implementation.
|
||||
//
|
||||
// The producer writes to a back buffer and then tries to swap
|
||||
// pointers between the back and front buffers. If it fails,
|
||||
// due to the consumer reading from the front buffer, it just
|
||||
// gives up, which is ok since writes are many and redundant.
|
||||
//
|
||||
// The reader simply reads from the front buffer.
|
||||
//
|
||||
// has_msg keeps track of whether there has been a not yet read
|
||||
// value written, it is used by ypipe_conflate to mimic ypipe
|
||||
// functionality regarding a reader being asleep
|
||||
|
||||
// dbuffer is a single-producer single-consumer double-buffer
|
||||
// implementation.
|
||||
//
|
||||
// The producer writes to a back buffer and then tries to swap
|
||||
// pointers between the back and front buffers. If it fails,
|
||||
// due to the consumer reading from the front buffer, it just
|
||||
// gives up, which is ok since writes are many and redundant.
|
||||
//
|
||||
// The reader simply reads from the front buffer.
|
||||
//
|
||||
// has_msg keeps track of whether there has been a not yet read
|
||||
// value written, it is used by ypipe_conflate to mimic ypipe
|
||||
// functionality regarding a reader being asleep
|
||||
template <typename T> class dbuffer_t;
|
||||
|
||||
template <typename T> class dbuffer_t;
|
||||
|
||||
template <> class dbuffer_t<msg_t>
|
||||
template <> class dbuffer_t<msg_t>
|
||||
{
|
||||
public:
|
||||
inline dbuffer_t () :
|
||||
back (&storage[0]),
|
||||
front (&storage[1]),
|
||||
has_msg (false)
|
||||
{
|
||||
public:
|
||||
back->init ();
|
||||
front->init ();
|
||||
}
|
||||
|
||||
inline dbuffer_t ()
|
||||
: back (&storage[0])
|
||||
, front (&storage[1])
|
||||
, has_msg (false)
|
||||
{
|
||||
back->init ();
|
||||
front->init ();
|
||||
inline ~dbuffer_t ()
|
||||
{
|
||||
back->close ();
|
||||
front->close ();
|
||||
}
|
||||
|
||||
inline void write (const msg_t &value_)
|
||||
{
|
||||
msg_t &xvalue = const_cast<msg_t &> (value_);
|
||||
|
||||
zmq_assert (xvalue.check ());
|
||||
back->move (xvalue); // cannot just overwrite, might leak
|
||||
|
||||
zmq_assert (back->check ());
|
||||
|
||||
if (sync.try_lock ()) {
|
||||
std::swap (back, front);
|
||||
has_msg = true;
|
||||
|
||||
sync.unlock ();
|
||||
}
|
||||
}
|
||||
|
||||
inline bool read (msg_t *value_)
|
||||
{
|
||||
if (!value_)
|
||||
return false;
|
||||
|
||||
inline ~dbuffer_t()
|
||||
{
|
||||
back->close ();
|
||||
front->close ();
|
||||
}
|
||||
|
||||
inline void write (const msg_t &value_)
|
||||
{
|
||||
msg_t& xvalue = const_cast<msg_t&>(value_);
|
||||
|
||||
zmq_assert (xvalue.check ());
|
||||
back->move (xvalue); // cannot just overwrite, might leak
|
||||
|
||||
zmq_assert (back->check ());
|
||||
|
||||
if (sync.try_lock ())
|
||||
{
|
||||
std::swap (back, front);
|
||||
has_msg = true;
|
||||
|
||||
sync.unlock ();
|
||||
}
|
||||
}
|
||||
|
||||
inline bool read (msg_t *value_)
|
||||
{
|
||||
if (!value_)
|
||||
scoped_lock_t lock (sync);
|
||||
if (!has_msg)
|
||||
return false;
|
||||
|
||||
{
|
||||
scoped_lock_t lock (sync);
|
||||
if (!has_msg)
|
||||
return false;
|
||||
zmq_assert (front->check ());
|
||||
|
||||
zmq_assert (front->check ());
|
||||
*value_ = *front;
|
||||
front->init (); // avoid double free
|
||||
|
||||
*value_ = *front;
|
||||
front->init (); // avoid double free
|
||||
|
||||
has_msg = false;
|
||||
return true;
|
||||
}
|
||||
has_msg = false;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
inline bool check_read ()
|
||||
{
|
||||
scoped_lock_t lock (sync);
|
||||
inline bool check_read ()
|
||||
{
|
||||
scoped_lock_t lock (sync);
|
||||
|
||||
return has_msg;
|
||||
}
|
||||
return has_msg;
|
||||
}
|
||||
|
||||
inline bool probe (bool (*fn)(const msg_t &))
|
||||
{
|
||||
scoped_lock_t lock (sync);
|
||||
return (*fn) (*front);
|
||||
}
|
||||
inline bool probe (bool (*fn) (const msg_t &))
|
||||
{
|
||||
scoped_lock_t lock (sync);
|
||||
return (*fn) (*front);
|
||||
}
|
||||
|
||||
|
||||
private:
|
||||
msg_t storage[2];
|
||||
msg_t *back, *front;
|
||||
private:
|
||||
msg_t storage[2];
|
||||
msg_t *back, *front;
|
||||
|
||||
mutex_t sync;
|
||||
bool has_msg;
|
||||
mutex_t sync;
|
||||
bool has_msg;
|
||||
|
||||
// Disable copying of dbuffer.
|
||||
dbuffer_t (const dbuffer_t&);
|
||||
const dbuffer_t &operator = (const dbuffer_t&);
|
||||
};
|
||||
// Disable copying of dbuffer.
|
||||
dbuffer_t (const dbuffer_t &);
|
||||
const dbuffer_t &operator= (const dbuffer_t &);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -67,12 +67,14 @@ void zmq::dealer_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_)
|
||||
lb.attach (pipe_);
|
||||
}
|
||||
|
||||
int zmq::dealer_t::xsetsockopt (int option_, const void *optval_,
|
||||
size_t optvallen_)
|
||||
int zmq::dealer_t::xsetsockopt (int option_,
|
||||
const void *optval_,
|
||||
size_t optvallen_)
|
||||
{
|
||||
bool is_int = (optvallen_ == sizeof (int));
|
||||
int value = 0;
|
||||
if (is_int) memcpy(&value, optval_, sizeof (int));
|
||||
if (is_int)
|
||||
memcpy (&value, optval_, sizeof (int));
|
||||
|
||||
switch (option_) {
|
||||
case ZMQ_PROBE_ROUTER:
|
||||
|
@ -37,53 +37,47 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
class ctx_t;
|
||||
class msg_t;
|
||||
class pipe_t;
|
||||
class io_thread_t;
|
||||
class socket_base_t;
|
||||
|
||||
class ctx_t;
|
||||
class msg_t;
|
||||
class pipe_t;
|
||||
class io_thread_t;
|
||||
class socket_base_t;
|
||||
class dealer_t : public socket_base_t
|
||||
{
|
||||
public:
|
||||
dealer_t (zmq::ctx_t *parent_, uint32_t tid_, int sid);
|
||||
~dealer_t ();
|
||||
|
||||
class dealer_t :
|
||||
public socket_base_t
|
||||
{
|
||||
public:
|
||||
protected:
|
||||
// Overrides of functions from socket_base_t.
|
||||
void xattach_pipe (zmq::pipe_t *pipe_, bool subscribe_to_all_);
|
||||
int xsetsockopt (int option_, const void *optval_, size_t optvallen_);
|
||||
int xsend (zmq::msg_t *msg_);
|
||||
int xrecv (zmq::msg_t *msg_);
|
||||
bool xhas_in ();
|
||||
bool xhas_out ();
|
||||
const blob_t &get_credential () const;
|
||||
void xread_activated (zmq::pipe_t *pipe_);
|
||||
void xwrite_activated (zmq::pipe_t *pipe_);
|
||||
void xpipe_terminated (zmq::pipe_t *pipe_);
|
||||
|
||||
dealer_t (zmq::ctx_t *parent_, uint32_t tid_, int sid);
|
||||
~dealer_t ();
|
||||
// Send and recv - knowing which pipe was used.
|
||||
int sendpipe (zmq::msg_t *msg_, zmq::pipe_t **pipe_);
|
||||
int recvpipe (zmq::msg_t *msg_, zmq::pipe_t **pipe_);
|
||||
|
||||
protected:
|
||||
private:
|
||||
// Messages are fair-queued from inbound pipes. And load-balanced to
|
||||
// the outbound pipes.
|
||||
fq_t fq;
|
||||
lb_t lb;
|
||||
|
||||
// Overrides of functions from socket_base_t.
|
||||
void xattach_pipe (zmq::pipe_t *pipe_, bool subscribe_to_all_);
|
||||
int xsetsockopt (int option_, const void *optval_, size_t optvallen_);
|
||||
int xsend (zmq::msg_t *msg_);
|
||||
int xrecv (zmq::msg_t *msg_);
|
||||
bool xhas_in ();
|
||||
bool xhas_out ();
|
||||
const blob_t &get_credential () const;
|
||||
void xread_activated (zmq::pipe_t *pipe_);
|
||||
void xwrite_activated (zmq::pipe_t *pipe_);
|
||||
void xpipe_terminated (zmq::pipe_t *pipe_);
|
||||
|
||||
// Send and recv - knowing which pipe was used.
|
||||
int sendpipe (zmq::msg_t *msg_, zmq::pipe_t **pipe_);
|
||||
int recvpipe (zmq::msg_t *msg_, zmq::pipe_t **pipe_);
|
||||
|
||||
private:
|
||||
|
||||
// Messages are fair-queued from inbound pipes. And load-balanced to
|
||||
// the outbound pipes.
|
||||
fq_t fq;
|
||||
lb_t lb;
|
||||
|
||||
// if true, send an empty message to every connected router peer
|
||||
bool probe_router;
|
||||
|
||||
dealer_t (const dealer_t&);
|
||||
const dealer_t &operator = (const dealer_t&);
|
||||
};
|
||||
// if true, send an empty message to every connected router peer
|
||||
bool probe_router;
|
||||
|
||||
dealer_t (const dealer_t &);
|
||||
const dealer_t &operator= (const dealer_t &);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
257
src/decoder.hpp
257
src/decoder.hpp
@ -42,157 +42,152 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
// Helper base class for decoders that know the amount of data to read
|
||||
// in advance at any moment. Knowing the amount in advance is a property
|
||||
// of the protocol used. 0MQ framing protocol is based size-prefixed
|
||||
// paradigm, which qualifies it to be parsed by this class.
|
||||
// On the other hand, XML-based transports (like XMPP or SOAP) don't allow
|
||||
// for knowing the size of data to read in advance and should use different
|
||||
// decoding algorithms.
|
||||
//
|
||||
// This class implements the state machine that parses the incoming buffer.
|
||||
// Derived class should implement individual state machine actions.
|
||||
//
|
||||
// Buffer management is done by an allocator policy.
|
||||
template <typename T, typename A = c_single_allocator>
|
||||
class decoder_base_t : public i_decoder
|
||||
// Helper base class for decoders that know the amount of data to read
|
||||
// in advance at any moment. Knowing the amount in advance is a property
|
||||
// of the protocol used. 0MQ framing protocol is based size-prefixed
|
||||
// paradigm, which qualifies it to be parsed by this class.
|
||||
// On the other hand, XML-based transports (like XMPP or SOAP) don't allow
|
||||
// for knowing the size of data to read in advance and should use different
|
||||
// decoding algorithms.
|
||||
//
|
||||
// This class implements the state machine that parses the incoming buffer.
|
||||
// Derived class should implement individual state machine actions.
|
||||
//
|
||||
// Buffer management is done by an allocator policy.
|
||||
template <typename T, typename A = c_single_allocator>
|
||||
class decoder_base_t : public i_decoder
|
||||
{
|
||||
public:
|
||||
explicit decoder_base_t (A *allocator_) :
|
||||
next (NULL),
|
||||
read_pos (NULL),
|
||||
to_read (0),
|
||||
allocator (allocator_)
|
||||
{
|
||||
public:
|
||||
buf = allocator->allocate ();
|
||||
}
|
||||
|
||||
explicit decoder_base_t (A *allocator_) :
|
||||
next (NULL),
|
||||
read_pos (NULL),
|
||||
to_read (0),
|
||||
allocator(allocator_)
|
||||
{
|
||||
buf = allocator->allocate ();
|
||||
// The destructor doesn't have to be virtual. It is made virtual
|
||||
// just to keep ICC and code checking tools from complaining.
|
||||
virtual ~decoder_base_t () { allocator->deallocate (); }
|
||||
|
||||
// Returns a buffer to be filled with binary data.
|
||||
void get_buffer (unsigned char **data_, std::size_t *size_)
|
||||
{
|
||||
buf = allocator->allocate ();
|
||||
|
||||
// If we are expected to read large message, we'll opt for zero-
|
||||
// copy, i.e. we'll ask caller to fill the data directly to the
|
||||
// message. Note that subsequent read(s) are non-blocking, thus
|
||||
// each single read reads at most SO_RCVBUF bytes at once not
|
||||
// depending on how large is the chunk returned from here.
|
||||
// As a consequence, large messages being received won't block
|
||||
// other engines running in the same I/O thread for excessive
|
||||
// amounts of time.
|
||||
if (to_read >= allocator->size ()) {
|
||||
*data_ = read_pos;
|
||||
*size_ = to_read;
|
||||
return;
|
||||
}
|
||||
|
||||
// The destructor doesn't have to be virtual. It is made virtual
|
||||
// just to keep ICC and code checking tools from complaining.
|
||||
virtual ~decoder_base_t ()
|
||||
{
|
||||
allocator->deallocate ();
|
||||
}
|
||||
*data_ = buf;
|
||||
*size_ = allocator->size ();
|
||||
}
|
||||
|
||||
// Returns a buffer to be filled with binary data.
|
||||
void get_buffer (unsigned char **data_, std::size_t *size_)
|
||||
{
|
||||
buf = allocator->allocate ();
|
||||
// Processes the data in the buffer previously allocated using
|
||||
// get_buffer function. size_ argument specifies number of bytes
|
||||
// actually filled into the buffer. Function returns 1 when the
|
||||
// whole message was decoded or 0 when more data is required.
|
||||
// On error, -1 is returned and errno set accordingly.
|
||||
// Number of bytes processed is returned in bytes_used_.
|
||||
int decode (const unsigned char *data_,
|
||||
std::size_t size_,
|
||||
std::size_t &bytes_used_)
|
||||
{
|
||||
bytes_used_ = 0;
|
||||
|
||||
// If we are expected to read large message, we'll opt for zero-
|
||||
// copy, i.e. we'll ask caller to fill the data directly to the
|
||||
// message. Note that subsequent read(s) are non-blocking, thus
|
||||
// each single read reads at most SO_RCVBUF bytes at once not
|
||||
// depending on how large is the chunk returned from here.
|
||||
// As a consequence, large messages being received won't block
|
||||
// other engines running in the same I/O thread for excessive
|
||||
// amounts of time.
|
||||
if (to_read >= allocator->size ()) {
|
||||
*data_ = read_pos;
|
||||
*size_ = to_read;
|
||||
return;
|
||||
// In case of zero-copy simply adjust the pointers, no copying
|
||||
// is required. Also, run the state machine in case all the data
|
||||
// were processed.
|
||||
if (data_ == read_pos) {
|
||||
zmq_assert (size_ <= to_read);
|
||||
read_pos += size_;
|
||||
to_read -= size_;
|
||||
bytes_used_ = size_;
|
||||
|
||||
while (!to_read) {
|
||||
const int rc =
|
||||
(static_cast<T *> (this)->*next) (data_ + bytes_used_);
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
}
|
||||
|
||||
*data_ = buf;
|
||||
*size_ = allocator->size ();
|
||||
}
|
||||
|
||||
// Processes the data in the buffer previously allocated using
|
||||
// get_buffer function. size_ argument specifies number of bytes
|
||||
// actually filled into the buffer. Function returns 1 when the
|
||||
// whole message was decoded or 0 when more data is required.
|
||||
// On error, -1 is returned and errno set accordingly.
|
||||
// Number of bytes processed is returned in bytes_used_.
|
||||
int decode (const unsigned char *data_, std::size_t size_,
|
||||
std::size_t &bytes_used_)
|
||||
{
|
||||
bytes_used_ = 0;
|
||||
|
||||
// In case of zero-copy simply adjust the pointers, no copying
|
||||
// is required. Also, run the state machine in case all the data
|
||||
// were processed.
|
||||
if (data_ == read_pos) {
|
||||
zmq_assert (size_ <= to_read);
|
||||
read_pos += size_;
|
||||
to_read -= size_;
|
||||
bytes_used_ = size_;
|
||||
|
||||
while (!to_read) {
|
||||
const int rc =
|
||||
(static_cast <T *> (this)->*next) (data_ + bytes_used_);
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
while (bytes_used_ < size_) {
|
||||
// Copy the data from buffer to the message.
|
||||
const size_t to_copy = std::min (to_read, size_ - bytes_used_);
|
||||
// Only copy when destination address is different from the
|
||||
// current address in the buffer.
|
||||
if (read_pos != data_ + bytes_used_) {
|
||||
memcpy (read_pos, data_ + bytes_used_, to_copy);
|
||||
}
|
||||
|
||||
read_pos += to_copy;
|
||||
to_read -= to_copy;
|
||||
bytes_used_ += to_copy;
|
||||
// Try to get more space in the message to fill in.
|
||||
// If none is available, return.
|
||||
while (to_read == 0) {
|
||||
// pass current address in the buffer
|
||||
const int rc =
|
||||
(static_cast <T *> (this)->*next) (data_ + bytes_used_);
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
virtual void resize_buffer (std::size_t new_size)
|
||||
{
|
||||
allocator->resize (new_size);
|
||||
while (bytes_used_ < size_) {
|
||||
// Copy the data from buffer to the message.
|
||||
const size_t to_copy = std::min (to_read, size_ - bytes_used_);
|
||||
// Only copy when destination address is different from the
|
||||
// current address in the buffer.
|
||||
if (read_pos != data_ + bytes_used_) {
|
||||
memcpy (read_pos, data_ + bytes_used_, to_copy);
|
||||
}
|
||||
|
||||
read_pos += to_copy;
|
||||
to_read -= to_copy;
|
||||
bytes_used_ += to_copy;
|
||||
// Try to get more space in the message to fill in.
|
||||
// If none is available, return.
|
||||
while (to_read == 0) {
|
||||
// pass current address in the buffer
|
||||
const int rc =
|
||||
(static_cast<T *> (this)->*next) (data_ + bytes_used_);
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Prototype of state machine action. Action should return false if
|
||||
// it is unable to push the data to the system.
|
||||
typedef int (T:: *step_t) (unsigned char const *);
|
||||
virtual void resize_buffer (std::size_t new_size)
|
||||
{
|
||||
allocator->resize (new_size);
|
||||
}
|
||||
|
||||
// This function should be called from derived class to read data
|
||||
// from the buffer and schedule next state machine action.
|
||||
void next_step (void *read_pos_, std::size_t to_read_, step_t next_)
|
||||
{
|
||||
read_pos = static_cast <unsigned char*> (read_pos_);
|
||||
to_read = to_read_;
|
||||
next = next_;
|
||||
}
|
||||
protected:
|
||||
// Prototype of state machine action. Action should return false if
|
||||
// it is unable to push the data to the system.
|
||||
typedef int (T::*step_t) (unsigned char const *);
|
||||
|
||||
private:
|
||||
// This function should be called from derived class to read data
|
||||
// from the buffer and schedule next state machine action.
|
||||
void next_step (void *read_pos_, std::size_t to_read_, step_t next_)
|
||||
{
|
||||
read_pos = static_cast<unsigned char *> (read_pos_);
|
||||
to_read = to_read_;
|
||||
next = next_;
|
||||
}
|
||||
|
||||
// Next step. If set to NULL, it means that associated data stream
|
||||
// is dead. Note that there can be still data in the process in such
|
||||
// case.
|
||||
step_t next;
|
||||
private:
|
||||
// Next step. If set to NULL, it means that associated data stream
|
||||
// is dead. Note that there can be still data in the process in such
|
||||
// case.
|
||||
step_t next;
|
||||
|
||||
// Where to store the read data.
|
||||
unsigned char *read_pos;
|
||||
// Where to store the read data.
|
||||
unsigned char *read_pos;
|
||||
|
||||
// How much data to read before taking next step.
|
||||
std::size_t to_read;
|
||||
// How much data to read before taking next step.
|
||||
std::size_t to_read;
|
||||
|
||||
// The duffer for data to decode.
|
||||
A *allocator;
|
||||
unsigned char *buf;
|
||||
// The duffer for data to decode.
|
||||
A *allocator;
|
||||
unsigned char *buf;
|
||||
|
||||
decoder_base_t (const decoder_base_t &);
|
||||
const decoder_base_t &operator = (const decoder_base_t &);
|
||||
};
|
||||
decoder_base_t (const decoder_base_t &);
|
||||
const decoder_base_t &operator= (const decoder_base_t &);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -34,34 +34,39 @@
|
||||
|
||||
#include "msg.hpp"
|
||||
|
||||
zmq::shared_message_memory_allocator::shared_message_memory_allocator (std::size_t bufsize_) :
|
||||
buf(NULL),
|
||||
bufsize(0),
|
||||
max_size(bufsize_),
|
||||
msg_content(NULL),
|
||||
maxCounters (static_cast <size_t> (std::ceil (static_cast <double> (max_size) / static_cast <double> (msg_t::max_vsm_size))))
|
||||
zmq::shared_message_memory_allocator::shared_message_memory_allocator (
|
||||
std::size_t bufsize_) :
|
||||
buf (NULL),
|
||||
bufsize (0),
|
||||
max_size (bufsize_),
|
||||
msg_content (NULL),
|
||||
maxCounters (static_cast<size_t> (
|
||||
std::ceil (static_cast<double> (max_size)
|
||||
/ static_cast<double> (msg_t::max_vsm_size))))
|
||||
{
|
||||
}
|
||||
|
||||
zmq::shared_message_memory_allocator::shared_message_memory_allocator (std::size_t bufsize_, std::size_t maxMessages) :
|
||||
buf(NULL),
|
||||
bufsize(0),
|
||||
max_size(bufsize_),
|
||||
msg_content(NULL),
|
||||
maxCounters(maxMessages)
|
||||
zmq::shared_message_memory_allocator::shared_message_memory_allocator (
|
||||
std::size_t bufsize_, std::size_t maxMessages) :
|
||||
buf (NULL),
|
||||
bufsize (0),
|
||||
max_size (bufsize_),
|
||||
msg_content (NULL),
|
||||
maxCounters (maxMessages)
|
||||
{
|
||||
}
|
||||
|
||||
zmq::shared_message_memory_allocator::~shared_message_memory_allocator ()
|
||||
{
|
||||
deallocate();
|
||||
deallocate ();
|
||||
}
|
||||
|
||||
unsigned char* zmq::shared_message_memory_allocator::allocate ()
|
||||
unsigned char *zmq::shared_message_memory_allocator::allocate ()
|
||||
{
|
||||
if (buf) {
|
||||
// release reference count to couple lifetime to messages
|
||||
zmq::atomic_counter_t* c = reinterpret_cast<zmq::atomic_counter_t* >(buf);
|
||||
zmq::atomic_counter_t *c =
|
||||
reinterpret_cast<zmq::atomic_counter_t *> (buf);
|
||||
|
||||
// if refcnt drops to 0, there are no message using the buffer
|
||||
// because either all messages have been closed or only vsm-messages
|
||||
@ -77,36 +82,38 @@ unsigned char* zmq::shared_message_memory_allocator::allocate ()
|
||||
if (!buf) {
|
||||
// allocate memory for reference counters together with reception buffer
|
||||
std::size_t const allocationsize =
|
||||
max_size + sizeof (zmq::atomic_counter_t) +
|
||||
maxCounters * sizeof (zmq::msg_t::content_t);
|
||||
max_size + sizeof (zmq::atomic_counter_t)
|
||||
+ maxCounters * sizeof (zmq::msg_t::content_t);
|
||||
|
||||
buf = static_cast <unsigned char *> (std::malloc (allocationsize));
|
||||
buf = static_cast<unsigned char *> (std::malloc (allocationsize));
|
||||
alloc_assert (buf);
|
||||
|
||||
new (buf) atomic_counter_t (1);
|
||||
} else {
|
||||
// release reference count to couple lifetime to messages
|
||||
zmq::atomic_counter_t *c = reinterpret_cast <zmq::atomic_counter_t *> (buf);
|
||||
zmq::atomic_counter_t *c =
|
||||
reinterpret_cast<zmq::atomic_counter_t *> (buf);
|
||||
c->set (1);
|
||||
}
|
||||
|
||||
bufsize = max_size;
|
||||
msg_content = reinterpret_cast <zmq::msg_t::content_t*> (buf + sizeof (atomic_counter_t) + max_size);
|
||||
msg_content = reinterpret_cast<zmq::msg_t::content_t *> (
|
||||
buf + sizeof (atomic_counter_t) + max_size);
|
||||
return buf + sizeof (zmq::atomic_counter_t);
|
||||
}
|
||||
|
||||
void zmq::shared_message_memory_allocator::deallocate ()
|
||||
{
|
||||
zmq::atomic_counter_t* c = reinterpret_cast<zmq::atomic_counter_t* >(buf);
|
||||
if (buf && !c->sub(1)) {
|
||||
std::free(buf);
|
||||
zmq::atomic_counter_t *c = reinterpret_cast<zmq::atomic_counter_t *> (buf);
|
||||
if (buf && !c->sub (1)) {
|
||||
std::free (buf);
|
||||
}
|
||||
release();
|
||||
release ();
|
||||
}
|
||||
|
||||
unsigned char* zmq::shared_message_memory_allocator::release ()
|
||||
unsigned char *zmq::shared_message_memory_allocator::release ()
|
||||
{
|
||||
unsigned char* b = buf;
|
||||
unsigned char *b = buf;
|
||||
buf = NULL;
|
||||
bufsize = 0;
|
||||
msg_content = NULL;
|
||||
@ -116,14 +123,14 @@ unsigned char* zmq::shared_message_memory_allocator::release ()
|
||||
|
||||
void zmq::shared_message_memory_allocator::inc_ref ()
|
||||
{
|
||||
(reinterpret_cast <zmq::atomic_counter_t*> (buf))->add (1);
|
||||
(reinterpret_cast<zmq::atomic_counter_t *> (buf))->add (1);
|
||||
}
|
||||
|
||||
void zmq::shared_message_memory_allocator::call_dec_ref(void*, void* hint)
|
||||
void zmq::shared_message_memory_allocator::call_dec_ref (void *, void *hint)
|
||||
{
|
||||
zmq_assert (hint);
|
||||
unsigned char* buf = static_cast <unsigned char*> (hint);
|
||||
zmq::atomic_counter_t* c = reinterpret_cast <zmq::atomic_counter_t*> (buf);
|
||||
unsigned char *buf = static_cast<unsigned char *> (hint);
|
||||
zmq::atomic_counter_t *c = reinterpret_cast<zmq::atomic_counter_t *> (buf);
|
||||
|
||||
if (!c->sub (1)) {
|
||||
c->~atomic_counter_t ();
|
||||
@ -138,7 +145,7 @@ std::size_t zmq::shared_message_memory_allocator::size () const
|
||||
return bufsize;
|
||||
}
|
||||
|
||||
unsigned char* zmq::shared_message_memory_allocator::data ()
|
||||
unsigned char *zmq::shared_message_memory_allocator::data ()
|
||||
{
|
||||
return buf + sizeof (zmq::atomic_counter_t);
|
||||
}
|
||||
|
@ -39,117 +39,93 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
// Static buffer policy.
|
||||
class c_single_allocator
|
||||
// Static buffer policy.
|
||||
class c_single_allocator
|
||||
{
|
||||
public:
|
||||
explicit c_single_allocator (std::size_t bufsize_) :
|
||||
bufsize (bufsize_),
|
||||
buf (static_cast<unsigned char *> (std::malloc (bufsize)))
|
||||
{
|
||||
public:
|
||||
explicit c_single_allocator (std::size_t bufsize_) :
|
||||
bufsize(bufsize_),
|
||||
buf(static_cast <unsigned char*> (std::malloc (bufsize)))
|
||||
{
|
||||
alloc_assert (buf);
|
||||
}
|
||||
alloc_assert (buf);
|
||||
}
|
||||
|
||||
~c_single_allocator ()
|
||||
{
|
||||
std::free (buf);
|
||||
}
|
||||
~c_single_allocator () { std::free (buf); }
|
||||
|
||||
unsigned char* allocate ()
|
||||
{
|
||||
return buf;
|
||||
}
|
||||
unsigned char *allocate () { return buf; }
|
||||
|
||||
void deallocate ()
|
||||
{
|
||||
}
|
||||
void deallocate () {}
|
||||
|
||||
std::size_t size () const
|
||||
{
|
||||
return bufsize;
|
||||
}
|
||||
std::size_t size () const { return bufsize; }
|
||||
|
||||
void resize (std::size_t new_size)
|
||||
{
|
||||
bufsize = new_size;
|
||||
}
|
||||
private:
|
||||
std::size_t bufsize;
|
||||
unsigned char* buf;
|
||||
void resize (std::size_t new_size) { bufsize = new_size; }
|
||||
|
||||
c_single_allocator (c_single_allocator const&);
|
||||
c_single_allocator& operator = (c_single_allocator const&);
|
||||
};
|
||||
private:
|
||||
std::size_t bufsize;
|
||||
unsigned char *buf;
|
||||
|
||||
// This allocator allocates a reference counted buffer which is used by v2_decoder_t
|
||||
// to use zero-copy msg::init_data to create messages with memory from this buffer as
|
||||
// data storage.
|
||||
c_single_allocator (c_single_allocator const &);
|
||||
c_single_allocator &operator= (c_single_allocator const &);
|
||||
};
|
||||
|
||||
// This allocator allocates a reference counted buffer which is used by v2_decoder_t
|
||||
// to use zero-copy msg::init_data to create messages with memory from this buffer as
|
||||
// data storage.
|
||||
//
|
||||
// The buffer is allocated with a reference count of 1 to make sure that is is alive while
|
||||
// decoding messages. Otherwise, it is possible that e.g. the first message increases the count
|
||||
// from zero to one, gets passed to the user application, processed in the user thread and deleted
|
||||
// which would then deallocate the buffer. The drawback is that the buffer may be allocated longer
|
||||
// than necessary because it is only deleted when allocate is called the next time.
|
||||
class shared_message_memory_allocator
|
||||
{
|
||||
public:
|
||||
explicit shared_message_memory_allocator (std::size_t bufsize_);
|
||||
|
||||
// Create an allocator for a maximum number of messages
|
||||
shared_message_memory_allocator (std::size_t bufsize_,
|
||||
std::size_t maxMessages);
|
||||
|
||||
~shared_message_memory_allocator ();
|
||||
|
||||
// Allocate a new buffer
|
||||
//
|
||||
// The buffer is allocated with a reference count of 1 to make sure that is is alive while
|
||||
// decoding messages. Otherwise, it is possible that e.g. the first message increases the count
|
||||
// from zero to one, gets passed to the user application, processed in the user thread and deleted
|
||||
// which would then deallocate the buffer. The drawback is that the buffer may be allocated longer
|
||||
// than necessary because it is only deleted when allocate is called the next time.
|
||||
class shared_message_memory_allocator
|
||||
{
|
||||
public:
|
||||
explicit shared_message_memory_allocator (std::size_t bufsize_);
|
||||
// This releases the current buffer to be bound to the lifetime of the messages
|
||||
// created on this buffer.
|
||||
unsigned char *allocate ();
|
||||
|
||||
// Create an allocator for a maximum number of messages
|
||||
shared_message_memory_allocator (std::size_t bufsize_, std::size_t maxMessages);
|
||||
// force deallocation of buffer.
|
||||
void deallocate ();
|
||||
|
||||
~shared_message_memory_allocator ();
|
||||
// Give up ownership of the buffer. The buffer's lifetime is now coupled to
|
||||
// the messages constructed on top of it.
|
||||
unsigned char *release ();
|
||||
|
||||
// Allocate a new buffer
|
||||
//
|
||||
// This releases the current buffer to be bound to the lifetime of the messages
|
||||
// created on this buffer.
|
||||
unsigned char* allocate ();
|
||||
void inc_ref ();
|
||||
|
||||
// force deallocation of buffer.
|
||||
void deallocate ();
|
||||
static void call_dec_ref (void *, void *buffer);
|
||||
|
||||
// Give up ownership of the buffer. The buffer's lifetime is now coupled to
|
||||
// the messages constructed on top of it.
|
||||
unsigned char* release ();
|
||||
std::size_t size () const;
|
||||
|
||||
void inc_ref ();
|
||||
// Return pointer to the first message data byte.
|
||||
unsigned char *data ();
|
||||
|
||||
static void call_dec_ref (void*, void* buffer);
|
||||
// Return pointer to the first byte of the buffer.
|
||||
unsigned char *buffer () { return buf; }
|
||||
|
||||
std::size_t size () const;
|
||||
void resize (std::size_t new_size) { bufsize = new_size; }
|
||||
|
||||
// Return pointer to the first message data byte.
|
||||
unsigned char* data ();
|
||||
zmq::msg_t::content_t *provide_content () { return msg_content; }
|
||||
|
||||
// Return pointer to the first byte of the buffer.
|
||||
unsigned char* buffer ()
|
||||
{
|
||||
return buf;
|
||||
}
|
||||
void advance_content () { msg_content++; }
|
||||
|
||||
void resize (std::size_t new_size)
|
||||
{
|
||||
bufsize = new_size;
|
||||
}
|
||||
|
||||
zmq::msg_t::content_t* provide_content ()
|
||||
{
|
||||
return msg_content;
|
||||
}
|
||||
|
||||
void advance_content ()
|
||||
{
|
||||
msg_content++;
|
||||
}
|
||||
|
||||
private:
|
||||
unsigned char* buf;
|
||||
std::size_t bufsize;
|
||||
std::size_t max_size;
|
||||
zmq::msg_t::content_t* msg_content;
|
||||
std::size_t maxCounters;
|
||||
};
|
||||
private:
|
||||
unsigned char *buf;
|
||||
std::size_t bufsize;
|
||||
std::size_t max_size;
|
||||
zmq::msg_t::content_t *msg_content;
|
||||
std::size_t maxCounters;
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -47,7 +47,7 @@
|
||||
#include "i_poll_events.hpp"
|
||||
|
||||
zmq::devpoll_t::devpoll_t (const zmq::ctx_t &ctx_) :
|
||||
ctx(ctx_),
|
||||
ctx (ctx_),
|
||||
stopping (false)
|
||||
{
|
||||
devpoll_fd = open ("/dev/poll", O_RDWR);
|
||||
@ -68,24 +68,24 @@ void zmq::devpoll_t::devpoll_ctl (fd_t fd_, short events_)
|
||||
}
|
||||
|
||||
zmq::devpoll_t::handle_t zmq::devpoll_t::add_fd (fd_t fd_,
|
||||
i_poll_events *reactor_)
|
||||
i_poll_events *reactor_)
|
||||
{
|
||||
// If the file descriptor table is too small expand it.
|
||||
fd_table_t::size_type sz = fd_table.size ();
|
||||
if (sz <= (fd_table_t::size_type) fd_) {
|
||||
fd_table.resize (fd_ + 1);
|
||||
while (sz != (fd_table_t::size_type) (fd_ + 1)) {
|
||||
fd_table [sz].valid = false;
|
||||
fd_table[sz].valid = false;
|
||||
++sz;
|
||||
}
|
||||
}
|
||||
|
||||
zmq_assert (!fd_table [fd_].valid);
|
||||
zmq_assert (!fd_table[fd_].valid);
|
||||
|
||||
fd_table [fd_].events = 0;
|
||||
fd_table [fd_].reactor = reactor_;
|
||||
fd_table [fd_].valid = true;
|
||||
fd_table [fd_].accepted = false;
|
||||
fd_table[fd_].events = 0;
|
||||
fd_table[fd_].reactor = reactor_;
|
||||
fd_table[fd_].valid = true;
|
||||
fd_table[fd_].accepted = false;
|
||||
|
||||
devpoll_ctl (fd_, 0);
|
||||
pending_list.push_back (fd_);
|
||||
@ -98,10 +98,10 @@ zmq::devpoll_t::handle_t zmq::devpoll_t::add_fd (fd_t fd_,
|
||||
|
||||
void zmq::devpoll_t::rm_fd (handle_t handle_)
|
||||
{
|
||||
zmq_assert (fd_table [handle_].valid);
|
||||
zmq_assert (fd_table[handle_].valid);
|
||||
|
||||
devpoll_ctl (handle_, POLLREMOVE);
|
||||
fd_table [handle_].valid = false;
|
||||
fd_table[handle_].valid = false;
|
||||
|
||||
// Decrease the load metric of the thread.
|
||||
adjust_load (-1);
|
||||
@ -110,29 +110,29 @@ void zmq::devpoll_t::rm_fd (handle_t handle_)
|
||||
void zmq::devpoll_t::set_pollin (handle_t handle_)
|
||||
{
|
||||
devpoll_ctl (handle_, POLLREMOVE);
|
||||
fd_table [handle_].events |= POLLIN;
|
||||
devpoll_ctl (handle_, fd_table [handle_].events);
|
||||
fd_table[handle_].events |= POLLIN;
|
||||
devpoll_ctl (handle_, fd_table[handle_].events);
|
||||
}
|
||||
|
||||
void zmq::devpoll_t::reset_pollin (handle_t handle_)
|
||||
{
|
||||
devpoll_ctl (handle_, POLLREMOVE);
|
||||
fd_table [handle_].events &= ~((short) POLLIN);
|
||||
devpoll_ctl (handle_, fd_table [handle_].events);
|
||||
fd_table[handle_].events &= ~((short) POLLIN);
|
||||
devpoll_ctl (handle_, fd_table[handle_].events);
|
||||
}
|
||||
|
||||
void zmq::devpoll_t::set_pollout (handle_t handle_)
|
||||
{
|
||||
devpoll_ctl (handle_, POLLREMOVE);
|
||||
fd_table [handle_].events |= POLLOUT;
|
||||
devpoll_ctl (handle_, fd_table [handle_].events);
|
||||
fd_table[handle_].events |= POLLOUT;
|
||||
devpoll_ctl (handle_, fd_table[handle_].events);
|
||||
}
|
||||
|
||||
void zmq::devpoll_t::reset_pollout (handle_t handle_)
|
||||
{
|
||||
devpoll_ctl (handle_, POLLREMOVE);
|
||||
fd_table [handle_].events &= ~((short) POLLOUT);
|
||||
devpoll_ctl (handle_, fd_table [handle_].events);
|
||||
fd_table[handle_].events &= ~((short) POLLOUT);
|
||||
devpoll_ctl (handle_, fd_table[handle_].events);
|
||||
}
|
||||
|
||||
void zmq::devpoll_t::start ()
|
||||
@ -153,12 +153,11 @@ int zmq::devpoll_t::max_fds ()
|
||||
void zmq::devpoll_t::loop ()
|
||||
{
|
||||
while (!stopping) {
|
||||
|
||||
struct pollfd ev_buf [max_io_events];
|
||||
struct pollfd ev_buf[max_io_events];
|
||||
struct dvpoll poll_req;
|
||||
|
||||
for (pending_list_t::size_type i = 0; i < pending_list.size (); i ++)
|
||||
fd_table [pending_list [i]].accepted = true;
|
||||
for (pending_list_t::size_type i = 0; i < pending_list.size (); i++)
|
||||
fd_table[pending_list[i]].accepted = true;
|
||||
pending_list.clear ();
|
||||
|
||||
// Execute any due timers.
|
||||
@ -166,7 +165,7 @@ void zmq::devpoll_t::loop ()
|
||||
|
||||
// Wait for events.
|
||||
// On Solaris, we can retrieve no more then (OPEN_MAX - 1) events.
|
||||
poll_req.dp_fds = &ev_buf [0];
|
||||
poll_req.dp_fds = &ev_buf[0];
|
||||
#if defined ZMQ_HAVE_SOLARIS
|
||||
poll_req.dp_nfds = std::min ((int) max_io_events, OPEN_MAX - 1);
|
||||
#else
|
||||
@ -178,20 +177,19 @@ void zmq::devpoll_t::loop ()
|
||||
continue;
|
||||
errno_assert (n != -1);
|
||||
|
||||
for (int i = 0; i < n; i ++) {
|
||||
|
||||
fd_entry_t *fd_ptr = &fd_table [ev_buf [i].fd];
|
||||
for (int i = 0; i < n; i++) {
|
||||
fd_entry_t *fd_ptr = &fd_table[ev_buf[i].fd];
|
||||
if (!fd_ptr->valid || !fd_ptr->accepted)
|
||||
continue;
|
||||
if (ev_buf [i].revents & (POLLERR | POLLHUP))
|
||||
if (ev_buf[i].revents & (POLLERR | POLLHUP))
|
||||
fd_ptr->reactor->in_event ();
|
||||
if (!fd_ptr->valid || !fd_ptr->accepted)
|
||||
continue;
|
||||
if (ev_buf [i].revents & POLLOUT)
|
||||
if (ev_buf[i].revents & POLLOUT)
|
||||
fd_ptr->reactor->out_event ();
|
||||
if (!fd_ptr->valid || !fd_ptr->accepted)
|
||||
continue;
|
||||
if (ev_buf [i].revents & POLLIN)
|
||||
if (ev_buf[i].revents & POLLIN)
|
||||
fd_ptr->reactor->in_event ();
|
||||
}
|
||||
}
|
||||
@ -199,7 +197,7 @@ void zmq::devpoll_t::loop ()
|
||||
|
||||
void zmq::devpoll_t::worker_routine (void *arg_)
|
||||
{
|
||||
((devpoll_t*) arg_)->loop ();
|
||||
((devpoll_t *) arg_)->loop ();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
120
src/devpoll.hpp
120
src/devpoll.hpp
@ -43,75 +43,71 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
struct i_poll_events;
|
||||
|
||||
struct i_poll_events;
|
||||
// Implements socket polling mechanism using the "/dev/poll" interface.
|
||||
|
||||
// Implements socket polling mechanism using the "/dev/poll" interface.
|
||||
class devpoll_t : public poller_base_t
|
||||
{
|
||||
public:
|
||||
typedef fd_t handle_t;
|
||||
|
||||
class devpoll_t : public poller_base_t
|
||||
devpoll_t (const ctx_t &ctx_);
|
||||
~devpoll_t ();
|
||||
|
||||
// "poller" concept.
|
||||
handle_t add_fd (fd_t fd_, zmq::i_poll_events *events_);
|
||||
void rm_fd (handle_t handle_);
|
||||
void set_pollin (handle_t handle_);
|
||||
void reset_pollin (handle_t handle_);
|
||||
void set_pollout (handle_t handle_);
|
||||
void reset_pollout (handle_t handle_);
|
||||
void start ();
|
||||
void stop ();
|
||||
|
||||
static int max_fds ();
|
||||
|
||||
private:
|
||||
// Main worker thread routine.
|
||||
static void worker_routine (void *arg_);
|
||||
|
||||
// Main event loop.
|
||||
void loop ();
|
||||
|
||||
// Reference to ZMQ context.
|
||||
const ctx_t &ctx;
|
||||
|
||||
// File descriptor referring to "/dev/poll" pseudo-device.
|
||||
fd_t devpoll_fd;
|
||||
|
||||
struct fd_entry_t
|
||||
{
|
||||
public:
|
||||
|
||||
typedef fd_t handle_t;
|
||||
|
||||
devpoll_t (const ctx_t &ctx_);
|
||||
~devpoll_t ();
|
||||
|
||||
// "poller" concept.
|
||||
handle_t add_fd (fd_t fd_, zmq::i_poll_events *events_);
|
||||
void rm_fd (handle_t handle_);
|
||||
void set_pollin (handle_t handle_);
|
||||
void reset_pollin (handle_t handle_);
|
||||
void set_pollout (handle_t handle_);
|
||||
void reset_pollout (handle_t handle_);
|
||||
void start ();
|
||||
void stop ();
|
||||
|
||||
static int max_fds ();
|
||||
|
||||
private:
|
||||
|
||||
// Main worker thread routine.
|
||||
static void worker_routine (void *arg_);
|
||||
|
||||
// Main event loop.
|
||||
void loop ();
|
||||
|
||||
// Reference to ZMQ context.
|
||||
const ctx_t &ctx;
|
||||
|
||||
// File descriptor referring to "/dev/poll" pseudo-device.
|
||||
fd_t devpoll_fd;
|
||||
|
||||
struct fd_entry_t
|
||||
{
|
||||
short events;
|
||||
zmq::i_poll_events *reactor;
|
||||
bool valid;
|
||||
bool accepted;
|
||||
};
|
||||
|
||||
typedef std::vector <fd_entry_t> fd_table_t;
|
||||
fd_table_t fd_table;
|
||||
|
||||
typedef std::vector <fd_t> pending_list_t;
|
||||
pending_list_t pending_list;
|
||||
|
||||
// Pollset manipulation function.
|
||||
void devpoll_ctl (fd_t fd_, short events_);
|
||||
|
||||
// If true, thread is in the process of shutting down.
|
||||
bool stopping;
|
||||
|
||||
// Handle of the physical thread doing the I/O work.
|
||||
thread_t worker;
|
||||
|
||||
devpoll_t (const devpoll_t&);
|
||||
const devpoll_t &operator = (const devpoll_t&);
|
||||
short events;
|
||||
zmq::i_poll_events *reactor;
|
||||
bool valid;
|
||||
bool accepted;
|
||||
};
|
||||
|
||||
typedef devpoll_t poller_t;
|
||||
typedef std::vector<fd_entry_t> fd_table_t;
|
||||
fd_table_t fd_table;
|
||||
|
||||
typedef std::vector<fd_t> pending_list_t;
|
||||
pending_list_t pending_list;
|
||||
|
||||
// Pollset manipulation function.
|
||||
void devpoll_ctl (fd_t fd_, short events_);
|
||||
|
||||
// If true, thread is in the process of shutting down.
|
||||
bool stopping;
|
||||
|
||||
// Handle of the physical thread doing the I/O work.
|
||||
thread_t worker;
|
||||
|
||||
devpoll_t (const devpoll_t &);
|
||||
const devpoll_t &operator= (const devpoll_t &);
|
||||
};
|
||||
|
||||
typedef devpoll_t poller_t;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -53,7 +53,7 @@ zmq::dgram_t::~dgram_t ()
|
||||
|
||||
void zmq::dgram_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_)
|
||||
{
|
||||
LIBZMQ_UNUSED(subscribe_to_all_);
|
||||
LIBZMQ_UNUSED (subscribe_to_all_);
|
||||
|
||||
zmq_assert (pipe_);
|
||||
|
||||
@ -107,9 +107,7 @@ int zmq::dgram_t::xsend (msg_t *msg_)
|
||||
|
||||
// Expect one more message frame.
|
||||
more_out = true;
|
||||
}
|
||||
else {
|
||||
|
||||
} else {
|
||||
// dgram messages are two part only, reject part if more is set
|
||||
if (msg_->flags () & msg_t::more) {
|
||||
errno = EINVAL;
|
||||
@ -173,5 +171,5 @@ bool zmq::dgram_t::xhas_out ()
|
||||
|
||||
const zmq::blob_t &zmq::dgram_t::get_credential () const
|
||||
{
|
||||
return last_in? last_in->get_credential (): saved_credential;
|
||||
return last_in ? last_in->get_credential () : saved_credential;
|
||||
}
|
||||
|
@ -36,46 +36,41 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
class ctx_t;
|
||||
class msg_t;
|
||||
class pipe_t;
|
||||
class io_thread_t;
|
||||
|
||||
class ctx_t;
|
||||
class msg_t;
|
||||
class pipe_t;
|
||||
class io_thread_t;
|
||||
class dgram_t : public socket_base_t
|
||||
{
|
||||
public:
|
||||
dgram_t (zmq::ctx_t *parent_, uint32_t tid_, int sid);
|
||||
~dgram_t ();
|
||||
|
||||
class dgram_t :
|
||||
public socket_base_t
|
||||
{
|
||||
public:
|
||||
// Overrides of functions from socket_base_t.
|
||||
void xattach_pipe (zmq::pipe_t *pipe_, bool subscribe_to_all_);
|
||||
int xsend (zmq::msg_t *msg_);
|
||||
int xrecv (zmq::msg_t *msg_);
|
||||
bool xhas_in ();
|
||||
bool xhas_out ();
|
||||
const blob_t &get_credential () const;
|
||||
void xread_activated (zmq::pipe_t *pipe_);
|
||||
void xwrite_activated (zmq::pipe_t *pipe_);
|
||||
void xpipe_terminated (zmq::pipe_t *pipe_);
|
||||
|
||||
dgram_t (zmq::ctx_t *parent_, uint32_t tid_, int sid);
|
||||
~dgram_t ();
|
||||
private:
|
||||
zmq::pipe_t *pipe;
|
||||
|
||||
// Overrides of functions from socket_base_t.
|
||||
void xattach_pipe (zmq::pipe_t *pipe_, bool subscribe_to_all_);
|
||||
int xsend (zmq::msg_t *msg_);
|
||||
int xrecv (zmq::msg_t *msg_);
|
||||
bool xhas_in ();
|
||||
bool xhas_out ();
|
||||
const blob_t &get_credential () const;
|
||||
void xread_activated (zmq::pipe_t *pipe_);
|
||||
void xwrite_activated (zmq::pipe_t *pipe_);
|
||||
void xpipe_terminated (zmq::pipe_t *pipe_);
|
||||
zmq::pipe_t *last_in;
|
||||
|
||||
private:
|
||||
blob_t saved_credential;
|
||||
|
||||
zmq::pipe_t *pipe;
|
||||
|
||||
zmq::pipe_t *last_in;
|
||||
|
||||
blob_t saved_credential;
|
||||
|
||||
// If true, more outgoing message parts are expected.
|
||||
bool more_out;
|
||||
|
||||
dgram_t (const dgram_t&);
|
||||
const dgram_t &operator = (const dgram_t&);
|
||||
};
|
||||
// If true, more outgoing message parts are expected.
|
||||
bool more_out;
|
||||
|
||||
dgram_t (const dgram_t &);
|
||||
const dgram_t &operator= (const dgram_t &);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
55
src/dish.cpp
55
src/dish.cpp
@ -88,7 +88,7 @@ void zmq::dish_t::xhiccuped (pipe_t *pipe_)
|
||||
send_subscriptions (pipe_);
|
||||
}
|
||||
|
||||
int zmq::dish_t::xjoin (const char* group_)
|
||||
int zmq::dish_t::xjoin (const char *group_)
|
||||
{
|
||||
std::string group = std::string (group_);
|
||||
|
||||
@ -125,7 +125,7 @@ int zmq::dish_t::xjoin (const char* group_)
|
||||
return rc;
|
||||
}
|
||||
|
||||
int zmq::dish_t::xleave (const char* group_)
|
||||
int zmq::dish_t::xleave (const char *group_)
|
||||
{
|
||||
std::string group = std::string (group_);
|
||||
|
||||
@ -134,7 +134,8 @@ int zmq::dish_t::xleave (const char* group_)
|
||||
return -1;
|
||||
}
|
||||
|
||||
subscriptions_t::iterator it = std::find (subscriptions.begin (), subscriptions.end (), group);
|
||||
subscriptions_t::iterator it =
|
||||
std::find (subscriptions.begin (), subscriptions.end (), group);
|
||||
|
||||
if (it == subscriptions.end ()) {
|
||||
errno = EINVAL;
|
||||
@ -186,7 +187,6 @@ int zmq::dish_t::xrecv (msg_t *msg_)
|
||||
}
|
||||
|
||||
while (true) {
|
||||
|
||||
// Get a message using fair queueing algorithm.
|
||||
int rc = fq.recv (msg_);
|
||||
|
||||
@ -196,7 +196,8 @@ int zmq::dish_t::xrecv (msg_t *msg_)
|
||||
return -1;
|
||||
|
||||
// Filtering non matching messages
|
||||
subscriptions_t::iterator it = subscriptions.find (std::string(msg_->group ()));
|
||||
subscriptions_t::iterator it =
|
||||
subscriptions.find (std::string (msg_->group ()));
|
||||
if (it != subscriptions.end ())
|
||||
return 0;
|
||||
}
|
||||
@ -221,7 +222,8 @@ bool zmq::dish_t::xhas_in ()
|
||||
}
|
||||
|
||||
// Filtering non matching messages
|
||||
subscriptions_t::iterator it = subscriptions.find (std::string(message.group ()));
|
||||
subscriptions_t::iterator it =
|
||||
subscriptions.find (std::string (message.group ()));
|
||||
if (it != subscriptions.end ()) {
|
||||
has_message = true;
|
||||
return true;
|
||||
@ -236,12 +238,13 @@ const zmq::blob_t &zmq::dish_t::get_credential () const
|
||||
|
||||
void zmq::dish_t::send_subscriptions (pipe_t *pipe_)
|
||||
{
|
||||
for (subscriptions_t::iterator it = subscriptions.begin (); it != subscriptions.end (); ++it) {
|
||||
for (subscriptions_t::iterator it = subscriptions.begin ();
|
||||
it != subscriptions.end (); ++it) {
|
||||
msg_t msg;
|
||||
int rc = msg.init_join ();
|
||||
errno_assert (rc == 0);
|
||||
|
||||
rc = msg.set_group (it->c_str());
|
||||
rc = msg.set_group (it->c_str ());
|
||||
errno_assert (rc == 0);
|
||||
|
||||
// Send it to the pipe.
|
||||
@ -252,9 +255,11 @@ void zmq::dish_t::send_subscriptions (pipe_t *pipe_)
|
||||
pipe_->flush ();
|
||||
}
|
||||
|
||||
zmq::dish_session_t::dish_session_t (io_thread_t *io_thread_, bool connect_,
|
||||
socket_base_t *socket_, const options_t &options_,
|
||||
address_t *addr_) :
|
||||
zmq::dish_session_t::dish_session_t (io_thread_t *io_thread_,
|
||||
bool connect_,
|
||||
socket_base_t *socket_,
|
||||
const options_t &options_,
|
||||
address_t *addr_) :
|
||||
session_base_t (io_thread_, connect_, socket_, options_, addr_),
|
||||
state (group)
|
||||
{
|
||||
@ -267,12 +272,12 @@ zmq::dish_session_t::~dish_session_t ()
|
||||
int zmq::dish_session_t::push_msg (msg_t *msg_)
|
||||
{
|
||||
if (state == group) {
|
||||
if ((msg_->flags() & msg_t::more) != msg_t::more) {
|
||||
if ((msg_->flags () & msg_t::more) != msg_t::more) {
|
||||
errno = EFAULT;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (msg_->size() > ZMQ_GROUP_MAX_LENGTH) {
|
||||
if (msg_->size () > ZMQ_GROUP_MAX_LENGTH) {
|
||||
errno = EFAULT;
|
||||
return -1;
|
||||
}
|
||||
@ -283,23 +288,22 @@ int zmq::dish_session_t::push_msg (msg_t *msg_)
|
||||
int rc = msg_->init ();
|
||||
errno_assert (rc == 0);
|
||||
return 0;
|
||||
}
|
||||
else {
|
||||
const char *group_setting = msg_->group();
|
||||
} else {
|
||||
const char *group_setting = msg_->group ();
|
||||
int rc;
|
||||
if(group_setting[0] != 0)
|
||||
goto has_group;
|
||||
if (group_setting[0] != 0)
|
||||
goto has_group;
|
||||
|
||||
// Set the message group
|
||||
rc = msg_->set_group ((char*)group_msg.data (), group_msg. size());
|
||||
rc = msg_->set_group ((char *) group_msg.data (), group_msg.size ());
|
||||
errno_assert (rc == 0);
|
||||
|
||||
// We set the group, so we don't need the group_msg anymore
|
||||
rc = group_msg.close ();
|
||||
errno_assert (rc == 0);
|
||||
has_group:
|
||||
has_group:
|
||||
// Thread safe socket doesn't support multipart messages
|
||||
if ((msg_->flags() & msg_t::more) == msg_t::more) {
|
||||
if ((msg_->flags () & msg_t::more) == msg_t::more) {
|
||||
errno = EFAULT;
|
||||
return -1;
|
||||
}
|
||||
@ -331,19 +335,18 @@ int zmq::dish_session_t::pull_msg (msg_t *msg_)
|
||||
|
||||
if (msg_->is_join ()) {
|
||||
rc = command.init_size (group_length + 5);
|
||||
errno_assert(rc == 0);
|
||||
errno_assert (rc == 0);
|
||||
offset = 5;
|
||||
memcpy (command.data (), "\4JOIN", 5);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
rc = command.init_size (group_length + 6);
|
||||
errno_assert(rc == 0);
|
||||
errno_assert (rc == 0);
|
||||
offset = 6;
|
||||
memcpy (command.data (), "\5LEAVE", 6);
|
||||
}
|
||||
|
||||
command.set_flags (msg_t::command);
|
||||
char* command_data = (char*)command.data ();
|
||||
char *command_data = (char *) command.data ();
|
||||
|
||||
// Copy the group
|
||||
memcpy (command_data + offset, msg_->group (), group_length);
|
||||
|
144
src/dish.hpp
144
src/dish.hpp
@ -41,85 +41,81 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
class ctx_t;
|
||||
class pipe_t;
|
||||
class io_thread_t;
|
||||
|
||||
class ctx_t;
|
||||
class pipe_t;
|
||||
class io_thread_t;
|
||||
class dish_t : public socket_base_t
|
||||
{
|
||||
public:
|
||||
dish_t (zmq::ctx_t *parent_, uint32_t tid_, int sid_);
|
||||
~dish_t ();
|
||||
|
||||
class dish_t :
|
||||
public socket_base_t
|
||||
protected:
|
||||
// Overrides of functions from socket_base_t.
|
||||
void xattach_pipe (zmq::pipe_t *pipe_, bool subscribe_to_all_);
|
||||
int xsend (zmq::msg_t *msg_);
|
||||
bool xhas_out ();
|
||||
int xrecv (zmq::msg_t *msg_);
|
||||
bool xhas_in ();
|
||||
const blob_t &get_credential () const;
|
||||
void xread_activated (zmq::pipe_t *pipe_);
|
||||
void xwrite_activated (zmq::pipe_t *pipe_);
|
||||
void xhiccuped (pipe_t *pipe_);
|
||||
void xpipe_terminated (zmq::pipe_t *pipe_);
|
||||
int xjoin (const char *group_);
|
||||
int xleave (const char *group_);
|
||||
|
||||
private:
|
||||
// Send subscriptions to a pipe
|
||||
void send_subscriptions (pipe_t *pipe_);
|
||||
|
||||
// Fair queueing object for inbound pipes.
|
||||
fq_t fq;
|
||||
|
||||
// Object for distributing the subscriptions upstream.
|
||||
dist_t dist;
|
||||
|
||||
// The repository of subscriptions.
|
||||
typedef std::set<std::string> subscriptions_t;
|
||||
subscriptions_t subscriptions;
|
||||
|
||||
// If true, 'message' contains a matching message to return on the
|
||||
// next recv call.
|
||||
bool has_message;
|
||||
msg_t message;
|
||||
|
||||
dish_t (const dish_t &);
|
||||
const dish_t &operator= (const dish_t &);
|
||||
};
|
||||
|
||||
class dish_session_t : public session_base_t
|
||||
{
|
||||
public:
|
||||
dish_session_t (zmq::io_thread_t *io_thread_,
|
||||
bool connect_,
|
||||
zmq::socket_base_t *socket_,
|
||||
const options_t &options_,
|
||||
address_t *addr_);
|
||||
~dish_session_t ();
|
||||
|
||||
// Overrides of the functions from session_base_t.
|
||||
int push_msg (msg_t *msg_);
|
||||
int pull_msg (msg_t *msg_);
|
||||
void reset ();
|
||||
|
||||
private:
|
||||
enum
|
||||
{
|
||||
public:
|
||||
group,
|
||||
body
|
||||
} state;
|
||||
|
||||
dish_t (zmq::ctx_t *parent_, uint32_t tid_, int sid_);
|
||||
~dish_t ();
|
||||
|
||||
protected:
|
||||
|
||||
// Overrides of functions from socket_base_t.
|
||||
void xattach_pipe (zmq::pipe_t *pipe_, bool subscribe_to_all_);
|
||||
int xsend (zmq::msg_t *msg_);
|
||||
bool xhas_out ();
|
||||
int xrecv (zmq::msg_t *msg_);
|
||||
bool xhas_in ();
|
||||
const blob_t &get_credential () const;
|
||||
void xread_activated (zmq::pipe_t *pipe_);
|
||||
void xwrite_activated (zmq::pipe_t *pipe_);
|
||||
void xhiccuped (pipe_t *pipe_);
|
||||
void xpipe_terminated (zmq::pipe_t *pipe_);
|
||||
int xjoin (const char *group_);
|
||||
int xleave (const char *group_);
|
||||
private:
|
||||
|
||||
// Send subscriptions to a pipe
|
||||
void send_subscriptions (pipe_t *pipe_);
|
||||
|
||||
// Fair queueing object for inbound pipes.
|
||||
fq_t fq;
|
||||
|
||||
// Object for distributing the subscriptions upstream.
|
||||
dist_t dist;
|
||||
|
||||
// The repository of subscriptions.
|
||||
typedef std::set<std::string> subscriptions_t;
|
||||
subscriptions_t subscriptions;
|
||||
|
||||
// If true, 'message' contains a matching message to return on the
|
||||
// next recv call.
|
||||
bool has_message;
|
||||
msg_t message;
|
||||
|
||||
dish_t (const dish_t&);
|
||||
const dish_t &operator = (const dish_t&);
|
||||
};
|
||||
|
||||
class dish_session_t : public session_base_t
|
||||
{
|
||||
public:
|
||||
|
||||
dish_session_t (zmq::io_thread_t *io_thread_, bool connect_,
|
||||
zmq::socket_base_t *socket_, const options_t &options_,
|
||||
address_t *addr_);
|
||||
~dish_session_t ();
|
||||
|
||||
// Overrides of the functions from session_base_t.
|
||||
int push_msg (msg_t *msg_);
|
||||
int pull_msg (msg_t *msg_);
|
||||
void reset ();
|
||||
|
||||
private:
|
||||
|
||||
enum {
|
||||
group,
|
||||
body
|
||||
} state;
|
||||
|
||||
msg_t group_msg;
|
||||
|
||||
dish_session_t (const dish_session_t&);
|
||||
const dish_session_t &operator = (const dish_session_t&);
|
||||
};
|
||||
msg_t group_msg;
|
||||
|
||||
dish_session_t (const dish_session_t &);
|
||||
const dish_session_t &operator= (const dish_session_t &);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
23
src/dist.cpp
23
src/dist.cpp
@ -34,11 +34,7 @@
|
||||
#include "msg.hpp"
|
||||
#include "likely.hpp"
|
||||
|
||||
zmq::dist_t::dist_t () :
|
||||
matching (0),
|
||||
active (0),
|
||||
eligible (0),
|
||||
more (false)
|
||||
zmq::dist_t::dist_t () : matching (0), active (0), eligible (0), more (false)
|
||||
{
|
||||
}
|
||||
|
||||
@ -56,8 +52,7 @@ void zmq::dist_t::attach (pipe_t *pipe_)
|
||||
pipes.push_back (pipe_);
|
||||
pipes.swap (eligible, pipes.size () - 1);
|
||||
eligible++;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
pipes.push_back (pipe_);
|
||||
pipes.swap (active, pipes.size () - 1);
|
||||
active++;
|
||||
@ -85,14 +80,14 @@ void zmq::dist_t::reverse_match ()
|
||||
pipes_t::size_type prev_matching = matching;
|
||||
|
||||
// Reset matching to 0
|
||||
unmatch();
|
||||
unmatch ();
|
||||
|
||||
// Mark all matching pipes as not matching and vice-versa.
|
||||
// To do this, push all pipes that are eligible but not
|
||||
// matched - i.e. between "matching" and "eligible" -
|
||||
// to the beginning of the queue.
|
||||
for (pipes_t::size_type i = prev_matching; i < eligible; ++i) {
|
||||
pipes.swap(i, matching++);
|
||||
pipes.swap (i, matching++);
|
||||
}
|
||||
}
|
||||
|
||||
@ -173,9 +168,9 @@ void zmq::dist_t::distribute (msg_t *msg_)
|
||||
|
||||
if (msg_->is_vsm ()) {
|
||||
for (pipes_t::size_type i = 0; i < matching; ++i)
|
||||
if(!write (pipes [i], msg_))
|
||||
if (!write (pipes[i], msg_))
|
||||
--i; // Retry last write because index will have been swapped
|
||||
int rc = msg_->close();
|
||||
int rc = msg_->close ();
|
||||
errno_assert (rc == 0);
|
||||
rc = msg_->init ();
|
||||
errno_assert (rc == 0);
|
||||
@ -189,7 +184,7 @@ void zmq::dist_t::distribute (msg_t *msg_)
|
||||
// Push copy of the message to each matching pipe.
|
||||
int failed = 0;
|
||||
for (pipes_t::size_type i = 0; i < matching; ++i)
|
||||
if (!write (pipes [i], msg_)) {
|
||||
if (!write (pipes[i], msg_)) {
|
||||
++failed;
|
||||
--i; // Retry last write because index will have been swapped
|
||||
}
|
||||
@ -226,10 +221,8 @@ bool zmq::dist_t::write (pipe_t *pipe_, msg_t *msg_)
|
||||
bool zmq::dist_t::check_hwm ()
|
||||
{
|
||||
for (pipes_t::size_type i = 0; i < matching; ++i)
|
||||
if (!pipes [i]->check_hwm ())
|
||||
if (!pipes[i]->check_hwm ())
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
|
114
src/dist.hpp
114
src/dist.hpp
@ -37,84 +37,80 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
class pipe_t;
|
||||
class msg_t;
|
||||
|
||||
class pipe_t;
|
||||
class msg_t;
|
||||
// Class manages a set of outbound pipes. It sends each messages to
|
||||
// each of them.
|
||||
class dist_t
|
||||
{
|
||||
public:
|
||||
dist_t ();
|
||||
~dist_t ();
|
||||
|
||||
// Class manages a set of outbound pipes. It sends each messages to
|
||||
// each of them.
|
||||
class dist_t
|
||||
{
|
||||
public:
|
||||
// Adds the pipe to the distributor object.
|
||||
void attach (zmq::pipe_t *pipe_);
|
||||
|
||||
dist_t ();
|
||||
~dist_t ();
|
||||
// Activates pipe that have previously reached high watermark.
|
||||
void activated (zmq::pipe_t *pipe_);
|
||||
|
||||
// Adds the pipe to the distributor object.
|
||||
void attach (zmq::pipe_t *pipe_);
|
||||
// Mark the pipe as matching. Subsequent call to send_to_matching
|
||||
// will send message also to this pipe.
|
||||
void match (zmq::pipe_t *pipe_);
|
||||
|
||||
// Activates pipe that have previously reached high watermark.
|
||||
void activated (zmq::pipe_t *pipe_);
|
||||
// Marks all pipes that are not matched as matched and vice-versa.
|
||||
void reverse_match ();
|
||||
|
||||
// Mark the pipe as matching. Subsequent call to send_to_matching
|
||||
// will send message also to this pipe.
|
||||
void match (zmq::pipe_t *pipe_);
|
||||
// Mark all pipes as non-matching.
|
||||
void unmatch ();
|
||||
|
||||
// Marks all pipes that are not matched as matched and vice-versa.
|
||||
void reverse_match();
|
||||
// Removes the pipe from the distributor object.
|
||||
void pipe_terminated (zmq::pipe_t *pipe_);
|
||||
|
||||
// Mark all pipes as non-matching.
|
||||
void unmatch ();
|
||||
// Send the message to the matching outbound pipes.
|
||||
int send_to_matching (zmq::msg_t *msg_);
|
||||
|
||||
// Removes the pipe from the distributor object.
|
||||
void pipe_terminated (zmq::pipe_t *pipe_);
|
||||
// Send the message to all the outbound pipes.
|
||||
int send_to_all (zmq::msg_t *msg_);
|
||||
|
||||
// Send the message to the matching outbound pipes.
|
||||
int send_to_matching (zmq::msg_t *msg_);
|
||||
bool has_out ();
|
||||
|
||||
// Send the message to all the outbound pipes.
|
||||
int send_to_all (zmq::msg_t *msg_);
|
||||
// check HWM of all pipes matching
|
||||
bool check_hwm ();
|
||||
|
||||
bool has_out ();
|
||||
private:
|
||||
// Write the message to the pipe. Make the pipe inactive if writing
|
||||
// fails. In such a case false is returned.
|
||||
bool write (zmq::pipe_t *pipe_, zmq::msg_t *msg_);
|
||||
|
||||
// check HWM of all pipes matching
|
||||
bool check_hwm ();
|
||||
// Put the message to all active pipes.
|
||||
void distribute (zmq::msg_t *msg_);
|
||||
|
||||
private:
|
||||
// List of outbound pipes.
|
||||
typedef array_t<zmq::pipe_t, 2> pipes_t;
|
||||
pipes_t pipes;
|
||||
|
||||
// Write the message to the pipe. Make the pipe inactive if writing
|
||||
// fails. In such a case false is returned.
|
||||
bool write (zmq::pipe_t *pipe_, zmq::msg_t *msg_);
|
||||
// Number of all the pipes to send the next message to.
|
||||
pipes_t::size_type matching;
|
||||
|
||||
// Put the message to all active pipes.
|
||||
void distribute (zmq::msg_t *msg_);
|
||||
// Number of active pipes. All the active pipes are located at the
|
||||
// beginning of the pipes array. These are the pipes the messages
|
||||
// can be sent to at the moment.
|
||||
pipes_t::size_type active;
|
||||
|
||||
// List of outbound pipes.
|
||||
typedef array_t <zmq::pipe_t, 2> pipes_t;
|
||||
pipes_t pipes;
|
||||
// Number of pipes eligible for sending messages to. This includes all
|
||||
// the active pipes plus all the pipes that we can in theory send
|
||||
// messages to (the HWM is not yet reached), but sending a message
|
||||
// to them would result in partial message being delivered, ie. message
|
||||
// with initial parts missing.
|
||||
pipes_t::size_type eligible;
|
||||
|
||||
// Number of all the pipes to send the next message to.
|
||||
pipes_t::size_type matching;
|
||||
|
||||
// Number of active pipes. All the active pipes are located at the
|
||||
// beginning of the pipes array. These are the pipes the messages
|
||||
// can be sent to at the moment.
|
||||
pipes_t::size_type active;
|
||||
|
||||
// Number of pipes eligible for sending messages to. This includes all
|
||||
// the active pipes plus all the pipes that we can in theory send
|
||||
// messages to (the HWM is not yet reached), but sending a message
|
||||
// to them would result in partial message being delivered, ie. message
|
||||
// with initial parts missing.
|
||||
pipes_t::size_type eligible;
|
||||
|
||||
// True if last we are in the middle of a multipart message.
|
||||
bool more;
|
||||
|
||||
dist_t (const dist_t&);
|
||||
const dist_t &operator = (const dist_t&);
|
||||
};
|
||||
// True if last we are in the middle of a multipart message.
|
||||
bool more;
|
||||
|
||||
dist_t (const dist_t &);
|
||||
const dist_t &operator= (const dist_t &);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
223
src/encoder.hpp
223
src/encoder.hpp
@ -47,143 +47,134 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
// Helper base class for encoders. It implements the state machine that
|
||||
// fills the outgoing buffer. Derived classes should implement individual
|
||||
// state machine actions.
|
||||
|
||||
// Helper base class for encoders. It implements the state machine that
|
||||
// fills the outgoing buffer. Derived classes should implement individual
|
||||
// state machine actions.
|
||||
|
||||
template <typename T> class encoder_base_t : public i_encoder
|
||||
template <typename T> class encoder_base_t : public i_encoder
|
||||
{
|
||||
public:
|
||||
inline encoder_base_t (size_t bufsize_) :
|
||||
write_pos (0),
|
||||
to_write (0),
|
||||
next (NULL),
|
||||
new_msg_flag (false),
|
||||
bufsize (bufsize_),
|
||||
in_progress (NULL)
|
||||
{
|
||||
public:
|
||||
buf = (unsigned char *) malloc (bufsize_);
|
||||
alloc_assert (buf);
|
||||
}
|
||||
|
||||
inline encoder_base_t (size_t bufsize_) :
|
||||
write_pos(0),
|
||||
to_write(0),
|
||||
next(NULL),
|
||||
new_msg_flag(false),
|
||||
bufsize (bufsize_),
|
||||
in_progress (NULL)
|
||||
{
|
||||
buf = (unsigned char*) malloc (bufsize_);
|
||||
alloc_assert (buf);
|
||||
}
|
||||
// The destructor doesn't have to be virtual. It is made virtual
|
||||
// just to keep ICC and code checking tools from complaining.
|
||||
inline virtual ~encoder_base_t () { free (buf); }
|
||||
|
||||
// The destructor doesn't have to be virtual. It is made virtual
|
||||
// just to keep ICC and code checking tools from complaining.
|
||||
inline virtual ~encoder_base_t ()
|
||||
{
|
||||
free (buf);
|
||||
}
|
||||
// The function returns a batch of binary data. The data
|
||||
// are filled to a supplied buffer. If no buffer is supplied (data_
|
||||
// points to NULL) decoder object will provide buffer of its own.
|
||||
inline size_t encode (unsigned char **data_, size_t size_)
|
||||
{
|
||||
unsigned char *buffer = !*data_ ? buf : *data_;
|
||||
size_t buffersize = !*data_ ? bufsize : size_;
|
||||
|
||||
// The function returns a batch of binary data. The data
|
||||
// are filled to a supplied buffer. If no buffer is supplied (data_
|
||||
// points to NULL) decoder object will provide buffer of its own.
|
||||
inline size_t encode (unsigned char **data_, size_t size_)
|
||||
{
|
||||
unsigned char *buffer = !*data_ ? buf : *data_;
|
||||
size_t buffersize = !*data_ ? bufsize : size_;
|
||||
if (in_progress == NULL)
|
||||
return 0;
|
||||
|
||||
if (in_progress == NULL)
|
||||
return 0;
|
||||
|
||||
size_t pos = 0;
|
||||
while (pos < buffersize) {
|
||||
|
||||
// If there are no more data to return, run the state machine.
|
||||
// If there are still no data, return what we already have
|
||||
// in the buffer.
|
||||
if (!to_write) {
|
||||
if (new_msg_flag) {
|
||||
int rc = in_progress->close ();
|
||||
errno_assert (rc == 0);
|
||||
rc = in_progress->init ();
|
||||
errno_assert (rc == 0);
|
||||
in_progress = NULL;
|
||||
break;
|
||||
}
|
||||
(static_cast <T*> (this)->*next) ();
|
||||
size_t pos = 0;
|
||||
while (pos < buffersize) {
|
||||
// If there are no more data to return, run the state machine.
|
||||
// If there are still no data, return what we already have
|
||||
// in the buffer.
|
||||
if (!to_write) {
|
||||
if (new_msg_flag) {
|
||||
int rc = in_progress->close ();
|
||||
errno_assert (rc == 0);
|
||||
rc = in_progress->init ();
|
||||
errno_assert (rc == 0);
|
||||
in_progress = NULL;
|
||||
break;
|
||||
}
|
||||
|
||||
// If there are no data in the buffer yet and we are able to
|
||||
// fill whole buffer in a single go, let's use zero-copy.
|
||||
// There's no disadvantage to it as we cannot stuck multiple
|
||||
// messages into the buffer anyway. Note that subsequent
|
||||
// write(s) are non-blocking, thus each single write writes
|
||||
// at most SO_SNDBUF bytes at once not depending on how large
|
||||
// is the chunk returned from here.
|
||||
// As a consequence, large messages being sent won't block
|
||||
// other engines running in the same I/O thread for excessive
|
||||
// amounts of time.
|
||||
if (!pos && !*data_ && to_write >= buffersize) {
|
||||
*data_ = write_pos;
|
||||
pos = to_write;
|
||||
write_pos = NULL;
|
||||
to_write = 0;
|
||||
return pos;
|
||||
}
|
||||
|
||||
// Copy data to the buffer. If the buffer is full, return.
|
||||
size_t to_copy = std::min (to_write, buffersize - pos);
|
||||
memcpy (buffer + pos, write_pos, to_copy);
|
||||
pos += to_copy;
|
||||
write_pos += to_copy;
|
||||
to_write -= to_copy;
|
||||
(static_cast<T *> (this)->*next) ();
|
||||
}
|
||||
|
||||
*data_ = buffer;
|
||||
return pos;
|
||||
// If there are no data in the buffer yet and we are able to
|
||||
// fill whole buffer in a single go, let's use zero-copy.
|
||||
// There's no disadvantage to it as we cannot stuck multiple
|
||||
// messages into the buffer anyway. Note that subsequent
|
||||
// write(s) are non-blocking, thus each single write writes
|
||||
// at most SO_SNDBUF bytes at once not depending on how large
|
||||
// is the chunk returned from here.
|
||||
// As a consequence, large messages being sent won't block
|
||||
// other engines running in the same I/O thread for excessive
|
||||
// amounts of time.
|
||||
if (!pos && !*data_ && to_write >= buffersize) {
|
||||
*data_ = write_pos;
|
||||
pos = to_write;
|
||||
write_pos = NULL;
|
||||
to_write = 0;
|
||||
return pos;
|
||||
}
|
||||
|
||||
// Copy data to the buffer. If the buffer is full, return.
|
||||
size_t to_copy = std::min (to_write, buffersize - pos);
|
||||
memcpy (buffer + pos, write_pos, to_copy);
|
||||
pos += to_copy;
|
||||
write_pos += to_copy;
|
||||
to_write -= to_copy;
|
||||
}
|
||||
|
||||
void load_msg (msg_t *msg_)
|
||||
{
|
||||
zmq_assert (in_progress == NULL);
|
||||
in_progress = msg_;
|
||||
(static_cast <T*> (this)->*next) ();
|
||||
}
|
||||
*data_ = buffer;
|
||||
return pos;
|
||||
}
|
||||
|
||||
protected:
|
||||
void load_msg (msg_t *msg_)
|
||||
{
|
||||
zmq_assert (in_progress == NULL);
|
||||
in_progress = msg_;
|
||||
(static_cast<T *> (this)->*next) ();
|
||||
}
|
||||
|
||||
// Prototype of state machine action.
|
||||
typedef void (T::*step_t) ();
|
||||
protected:
|
||||
// Prototype of state machine action.
|
||||
typedef void (T::*step_t) ();
|
||||
|
||||
// This function should be called from derived class to write the data
|
||||
// to the buffer and schedule next state machine action.
|
||||
inline void next_step (void *write_pos_, size_t to_write_,
|
||||
step_t next_, bool new_msg_flag_)
|
||||
{
|
||||
write_pos = (unsigned char*) write_pos_;
|
||||
to_write = to_write_;
|
||||
next = next_;
|
||||
new_msg_flag = new_msg_flag_;
|
||||
}
|
||||
// This function should be called from derived class to write the data
|
||||
// to the buffer and schedule next state machine action.
|
||||
inline void next_step (void *write_pos_,
|
||||
size_t to_write_,
|
||||
step_t next_,
|
||||
bool new_msg_flag_)
|
||||
{
|
||||
write_pos = (unsigned char *) write_pos_;
|
||||
to_write = to_write_;
|
||||
next = next_;
|
||||
new_msg_flag = new_msg_flag_;
|
||||
}
|
||||
|
||||
private:
|
||||
private:
|
||||
// Where to get the data to write from.
|
||||
unsigned char *write_pos;
|
||||
|
||||
// Where to get the data to write from.
|
||||
unsigned char *write_pos;
|
||||
// How much data to write before next step should be executed.
|
||||
size_t to_write;
|
||||
|
||||
// How much data to write before next step should be executed.
|
||||
size_t to_write;
|
||||
// Next step. If set to NULL, it means that associated data stream
|
||||
// is dead.
|
||||
step_t next;
|
||||
|
||||
// Next step. If set to NULL, it means that associated data stream
|
||||
// is dead.
|
||||
step_t next;
|
||||
bool new_msg_flag;
|
||||
|
||||
bool new_msg_flag;
|
||||
// The buffer for encoded data.
|
||||
size_t bufsize;
|
||||
unsigned char *buf;
|
||||
|
||||
// The buffer for encoded data.
|
||||
size_t bufsize;
|
||||
unsigned char *buf;
|
||||
encoder_base_t (const encoder_base_t &);
|
||||
void operator= (const encoder_base_t &);
|
||||
|
||||
encoder_base_t (const encoder_base_t&);
|
||||
void operator = (const encoder_base_t&);
|
||||
|
||||
protected:
|
||||
|
||||
msg_t *in_progress;
|
||||
|
||||
};
|
||||
protected:
|
||||
msg_t *in_progress;
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -44,9 +44,7 @@
|
||||
#include "config.hpp"
|
||||
#include "i_poll_events.hpp"
|
||||
|
||||
zmq::epoll_t::epoll_t (const zmq::ctx_t &ctx_) :
|
||||
ctx(ctx_),
|
||||
stopping (false)
|
||||
zmq::epoll_t::epoll_t (const zmq::ctx_t &ctx_) : ctx (ctx_), stopping (false)
|
||||
{
|
||||
#ifdef ZMQ_USE_EPOLL_CLOEXEC
|
||||
// Setting this option result in sane behaviour when exec() functions
|
||||
@ -65,8 +63,9 @@ zmq::epoll_t::~epoll_t ()
|
||||
worker.stop ();
|
||||
|
||||
close (epoll_fd);
|
||||
for (retired_t::iterator it = retired.begin (); it != retired.end (); ++it) {
|
||||
LIBZMQ_DELETE(*it);
|
||||
for (retired_t::iterator it = retired.begin (); it != retired.end ();
|
||||
++it) {
|
||||
LIBZMQ_DELETE (*it);
|
||||
}
|
||||
}
|
||||
|
||||
@ -95,7 +94,7 @@ zmq::epoll_t::handle_t zmq::epoll_t::add_fd (fd_t fd_, i_poll_events *events_)
|
||||
|
||||
void zmq::epoll_t::rm_fd (handle_t handle_)
|
||||
{
|
||||
poll_entry_t *pe = (poll_entry_t*) handle_;
|
||||
poll_entry_t *pe = (poll_entry_t *) handle_;
|
||||
int rc = epoll_ctl (epoll_fd, EPOLL_CTL_DEL, pe->fd, &pe->ev);
|
||||
errno_assert (rc != -1);
|
||||
pe->fd = retired_fd;
|
||||
@ -109,7 +108,7 @@ void zmq::epoll_t::rm_fd (handle_t handle_)
|
||||
|
||||
void zmq::epoll_t::set_pollin (handle_t handle_)
|
||||
{
|
||||
poll_entry_t *pe = (poll_entry_t*) handle_;
|
||||
poll_entry_t *pe = (poll_entry_t *) handle_;
|
||||
pe->ev.events |= EPOLLIN;
|
||||
int rc = epoll_ctl (epoll_fd, EPOLL_CTL_MOD, pe->fd, &pe->ev);
|
||||
errno_assert (rc != -1);
|
||||
@ -117,7 +116,7 @@ void zmq::epoll_t::set_pollin (handle_t handle_)
|
||||
|
||||
void zmq::epoll_t::reset_pollin (handle_t handle_)
|
||||
{
|
||||
poll_entry_t *pe = (poll_entry_t*) handle_;
|
||||
poll_entry_t *pe = (poll_entry_t *) handle_;
|
||||
pe->ev.events &= ~((short) EPOLLIN);
|
||||
int rc = epoll_ctl (epoll_fd, EPOLL_CTL_MOD, pe->fd, &pe->ev);
|
||||
errno_assert (rc != -1);
|
||||
@ -125,7 +124,7 @@ void zmq::epoll_t::reset_pollin (handle_t handle_)
|
||||
|
||||
void zmq::epoll_t::set_pollout (handle_t handle_)
|
||||
{
|
||||
poll_entry_t *pe = (poll_entry_t*) handle_;
|
||||
poll_entry_t *pe = (poll_entry_t *) handle_;
|
||||
pe->ev.events |= EPOLLOUT;
|
||||
int rc = epoll_ctl (epoll_fd, EPOLL_CTL_MOD, pe->fd, &pe->ev);
|
||||
errno_assert (rc != -1);
|
||||
@ -133,7 +132,7 @@ void zmq::epoll_t::set_pollout (handle_t handle_)
|
||||
|
||||
void zmq::epoll_t::reset_pollout (handle_t handle_)
|
||||
{
|
||||
poll_entry_t *pe = (poll_entry_t*) handle_;
|
||||
poll_entry_t *pe = (poll_entry_t *) handle_;
|
||||
pe->ev.events &= ~((short) EPOLLOUT);
|
||||
int rc = epoll_ctl (epoll_fd, EPOLL_CTL_MOD, pe->fd, &pe->ev);
|
||||
errno_assert (rc != -1);
|
||||
@ -156,42 +155,42 @@ int zmq::epoll_t::max_fds ()
|
||||
|
||||
void zmq::epoll_t::loop ()
|
||||
{
|
||||
epoll_event ev_buf [max_io_events];
|
||||
epoll_event ev_buf[max_io_events];
|
||||
|
||||
while (!stopping) {
|
||||
|
||||
// Execute any due timers.
|
||||
int timeout = (int) execute_timers ();
|
||||
|
||||
// Wait for events.
|
||||
int n = epoll_wait (epoll_fd, &ev_buf [0], max_io_events,
|
||||
timeout ? timeout : -1);
|
||||
int n = epoll_wait (epoll_fd, &ev_buf[0], max_io_events,
|
||||
timeout ? timeout : -1);
|
||||
if (n == -1) {
|
||||
errno_assert (errno == EINTR);
|
||||
continue;
|
||||
}
|
||||
|
||||
for (int i = 0; i < n; i ++) {
|
||||
poll_entry_t *pe = ((poll_entry_t*) ev_buf [i].data.ptr);
|
||||
for (int i = 0; i < n; i++) {
|
||||
poll_entry_t *pe = ((poll_entry_t *) ev_buf[i].data.ptr);
|
||||
|
||||
if (pe->fd == retired_fd)
|
||||
continue;
|
||||
if (ev_buf [i].events & (EPOLLERR | EPOLLHUP))
|
||||
if (ev_buf[i].events & (EPOLLERR | EPOLLHUP))
|
||||
pe->events->in_event ();
|
||||
if (pe->fd == retired_fd)
|
||||
continue;
|
||||
if (ev_buf [i].events & EPOLLOUT)
|
||||
continue;
|
||||
if (ev_buf[i].events & EPOLLOUT)
|
||||
pe->events->out_event ();
|
||||
if (pe->fd == retired_fd)
|
||||
continue;
|
||||
if (ev_buf [i].events & EPOLLIN)
|
||||
if (ev_buf[i].events & EPOLLIN)
|
||||
pe->events->in_event ();
|
||||
}
|
||||
|
||||
// Destroy retired event sources.
|
||||
retired_sync.lock ();
|
||||
for (retired_t::iterator it = retired.begin (); it != retired.end (); ++it) {
|
||||
LIBZMQ_DELETE(*it);
|
||||
for (retired_t::iterator it = retired.begin (); it != retired.end ();
|
||||
++it) {
|
||||
LIBZMQ_DELETE (*it);
|
||||
}
|
||||
retired.clear ();
|
||||
retired_sync.unlock ();
|
||||
@ -200,7 +199,7 @@ void zmq::epoll_t::loop ()
|
||||
|
||||
void zmq::epoll_t::worker_routine (void *arg_)
|
||||
{
|
||||
((epoll_t*) arg_)->loop ();
|
||||
((epoll_t *) arg_)->loop ();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
116
src/epoll.hpp
116
src/epoll.hpp
@ -45,73 +45,69 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
struct i_poll_events;
|
||||
|
||||
struct i_poll_events;
|
||||
// This class implements socket polling mechanism using the Linux-specific
|
||||
// epoll mechanism.
|
||||
|
||||
// This class implements socket polling mechanism using the Linux-specific
|
||||
// epoll mechanism.
|
||||
class epoll_t : public poller_base_t
|
||||
{
|
||||
public:
|
||||
typedef void *handle_t;
|
||||
|
||||
class epoll_t : public poller_base_t
|
||||
epoll_t (const ctx_t &ctx_);
|
||||
~epoll_t ();
|
||||
|
||||
// "poller" concept.
|
||||
handle_t add_fd (fd_t fd_, zmq::i_poll_events *events_);
|
||||
void rm_fd (handle_t handle_);
|
||||
void set_pollin (handle_t handle_);
|
||||
void reset_pollin (handle_t handle_);
|
||||
void set_pollout (handle_t handle_);
|
||||
void reset_pollout (handle_t handle_);
|
||||
void start ();
|
||||
void stop ();
|
||||
|
||||
static int max_fds ();
|
||||
|
||||
private:
|
||||
// Main worker thread routine.
|
||||
static void worker_routine (void *arg_);
|
||||
|
||||
// Main event loop.
|
||||
void loop ();
|
||||
|
||||
// Reference to ZMQ context.
|
||||
const ctx_t &ctx;
|
||||
|
||||
// Main epoll file descriptor
|
||||
fd_t epoll_fd;
|
||||
|
||||
struct poll_entry_t
|
||||
{
|
||||
public:
|
||||
|
||||
typedef void* handle_t;
|
||||
|
||||
epoll_t (const ctx_t &ctx_);
|
||||
~epoll_t ();
|
||||
|
||||
// "poller" concept.
|
||||
handle_t add_fd (fd_t fd_, zmq::i_poll_events *events_);
|
||||
void rm_fd (handle_t handle_);
|
||||
void set_pollin (handle_t handle_);
|
||||
void reset_pollin (handle_t handle_);
|
||||
void set_pollout (handle_t handle_);
|
||||
void reset_pollout (handle_t handle_);
|
||||
void start ();
|
||||
void stop ();
|
||||
|
||||
static int max_fds ();
|
||||
|
||||
private:
|
||||
|
||||
// Main worker thread routine.
|
||||
static void worker_routine (void *arg_);
|
||||
|
||||
// Main event loop.
|
||||
void loop ();
|
||||
|
||||
// Reference to ZMQ context.
|
||||
const ctx_t &ctx;
|
||||
|
||||
// Main epoll file descriptor
|
||||
fd_t epoll_fd;
|
||||
|
||||
struct poll_entry_t
|
||||
{
|
||||
fd_t fd;
|
||||
epoll_event ev;
|
||||
zmq::i_poll_events *events;
|
||||
};
|
||||
|
||||
// List of retired event sources.
|
||||
typedef std::vector <poll_entry_t*> retired_t;
|
||||
retired_t retired;
|
||||
|
||||
// If true, thread is in the process of shutting down.
|
||||
bool stopping;
|
||||
|
||||
// Handle of the physical thread doing the I/O work.
|
||||
thread_t worker;
|
||||
|
||||
// Synchronisation of retired event sources
|
||||
mutex_t retired_sync;
|
||||
|
||||
epoll_t (const epoll_t&);
|
||||
const epoll_t &operator = (const epoll_t&);
|
||||
fd_t fd;
|
||||
epoll_event ev;
|
||||
zmq::i_poll_events *events;
|
||||
};
|
||||
|
||||
typedef epoll_t poller_t;
|
||||
// List of retired event sources.
|
||||
typedef std::vector<poll_entry_t *> retired_t;
|
||||
retired_t retired;
|
||||
|
||||
// If true, thread is in the process of shutting down.
|
||||
bool stopping;
|
||||
|
||||
// Handle of the physical thread doing the I/O work.
|
||||
thread_t worker;
|
||||
|
||||
// Synchronisation of retired event sources
|
||||
mutex_t retired_sync;
|
||||
|
||||
epoll_t (const epoll_t &);
|
||||
const epoll_t &operator= (const epoll_t &);
|
||||
};
|
||||
|
||||
typedef epoll_t poller_t;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
628
src/err.cpp
628
src/err.cpp
@ -34,189 +34,209 @@ const char *zmq::errno_to_string (int errno_)
|
||||
{
|
||||
switch (errno_) {
|
||||
#if defined ZMQ_HAVE_WINDOWS
|
||||
case ENOTSUP:
|
||||
return "Not supported";
|
||||
case EPROTONOSUPPORT:
|
||||
return "Protocol not supported";
|
||||
case ENOBUFS:
|
||||
return "No buffer space available";
|
||||
case ENETDOWN:
|
||||
return "Network is down";
|
||||
case EADDRINUSE:
|
||||
return "Address in use";
|
||||
case EADDRNOTAVAIL:
|
||||
return "Address not available";
|
||||
case ECONNREFUSED:
|
||||
return "Connection refused";
|
||||
case EINPROGRESS:
|
||||
return "Operation in progress";
|
||||
case ENOTSUP:
|
||||
return "Not supported";
|
||||
case EPROTONOSUPPORT:
|
||||
return "Protocol not supported";
|
||||
case ENOBUFS:
|
||||
return "No buffer space available";
|
||||
case ENETDOWN:
|
||||
return "Network is down";
|
||||
case EADDRINUSE:
|
||||
return "Address in use";
|
||||
case EADDRNOTAVAIL:
|
||||
return "Address not available";
|
||||
case ECONNREFUSED:
|
||||
return "Connection refused";
|
||||
case EINPROGRESS:
|
||||
return "Operation in progress";
|
||||
#endif
|
||||
case EFSM:
|
||||
return "Operation cannot be accomplished in current state";
|
||||
case ENOCOMPATPROTO:
|
||||
return "The protocol is not compatible with the socket type";
|
||||
case ETERM:
|
||||
return "Context was terminated";
|
||||
case EMTHREAD:
|
||||
return "No thread available";
|
||||
case EHOSTUNREACH:
|
||||
return "Host unreachable";
|
||||
default:
|
||||
case EFSM:
|
||||
return "Operation cannot be accomplished in current state";
|
||||
case ENOCOMPATPROTO:
|
||||
return "The protocol is not compatible with the socket type";
|
||||
case ETERM:
|
||||
return "Context was terminated";
|
||||
case EMTHREAD:
|
||||
return "No thread available";
|
||||
case EHOSTUNREACH:
|
||||
return "Host unreachable";
|
||||
default:
|
||||
#if defined _MSC_VER
|
||||
#pragma warning (push)
|
||||
#pragma warning (disable:4996)
|
||||
#pragma warning(push)
|
||||
#pragma warning(disable : 4996)
|
||||
#endif
|
||||
return strerror (errno_);
|
||||
return strerror (errno_);
|
||||
#if defined _MSC_VER
|
||||
#pragma warning (pop)
|
||||
#pragma warning(pop)
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
void zmq::zmq_abort(const char *errmsg_)
|
||||
void zmq::zmq_abort (const char *errmsg_)
|
||||
{
|
||||
#if defined ZMQ_HAVE_WINDOWS
|
||||
|
||||
// Raise STATUS_FATAL_APP_EXIT.
|
||||
ULONG_PTR extra_info [1];
|
||||
extra_info [0] = (ULONG_PTR) errmsg_;
|
||||
ULONG_PTR extra_info[1];
|
||||
extra_info[0] = (ULONG_PTR) errmsg_;
|
||||
RaiseException (0x40000015, EXCEPTION_NONCONTINUABLE, 1, extra_info);
|
||||
#else
|
||||
(void)errmsg_;
|
||||
print_backtrace();
|
||||
(void) errmsg_;
|
||||
print_backtrace ();
|
||||
abort ();
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef ZMQ_HAVE_WINDOWS
|
||||
|
||||
const char *zmq::wsa_error()
|
||||
const char *zmq::wsa_error ()
|
||||
{
|
||||
return wsa_error_no (WSAGetLastError(), NULL);
|
||||
return wsa_error_no (WSAGetLastError (), NULL);
|
||||
}
|
||||
|
||||
const char *zmq::wsa_error_no (int no_, const char * wsae_wouldblock_string)
|
||||
const char *zmq::wsa_error_no (int no_, const char *wsae_wouldblock_string)
|
||||
{
|
||||
// TODO: It seems that list of Windows socket errors is longer than this.
|
||||
// Investigate whether there's a way to convert it into the string
|
||||
// automatically (wsaError->HRESULT->string?).
|
||||
return
|
||||
(no_ == WSABASEERR) ?
|
||||
"No Error" :
|
||||
(no_ == WSAEINTR) ?
|
||||
"Interrupted system call" :
|
||||
(no_ == WSAEBADF) ?
|
||||
"Bad file number" :
|
||||
(no_ == WSAEACCES) ?
|
||||
"Permission denied" :
|
||||
(no_ == WSAEFAULT) ?
|
||||
"Bad address" :
|
||||
(no_ == WSAEINVAL) ?
|
||||
"Invalid argument" :
|
||||
(no_ == WSAEMFILE) ?
|
||||
"Too many open files" :
|
||||
(no_ == WSAEWOULDBLOCK) ?
|
||||
wsae_wouldblock_string :
|
||||
(no_ == WSAEINPROGRESS) ?
|
||||
"Operation now in progress" :
|
||||
(no_ == WSAEALREADY) ?
|
||||
"Operation already in progress" :
|
||||
(no_ == WSAENOTSOCK) ?
|
||||
"Socket operation on non-socket" :
|
||||
(no_ == WSAEDESTADDRREQ) ?
|
||||
"Destination address required" :
|
||||
(no_ == WSAEMSGSIZE) ?
|
||||
"Message too long" :
|
||||
(no_ == WSAEPROTOTYPE) ?
|
||||
"Protocol wrong type for socket" :
|
||||
(no_ == WSAENOPROTOOPT) ?
|
||||
"Bad protocol option" :
|
||||
(no_ == WSAEPROTONOSUPPORT) ?
|
||||
"Protocol not supported" :
|
||||
(no_ == WSAESOCKTNOSUPPORT) ?
|
||||
"Socket type not supported" :
|
||||
(no_ == WSAEOPNOTSUPP) ?
|
||||
"Operation not supported on socket" :
|
||||
(no_ == WSAEPFNOSUPPORT) ?
|
||||
"Protocol family not supported" :
|
||||
(no_ == WSAEAFNOSUPPORT) ?
|
||||
"Address family not supported by protocol family" :
|
||||
(no_ == WSAEADDRINUSE) ?
|
||||
"Address already in use" :
|
||||
(no_ == WSAEADDRNOTAVAIL) ?
|
||||
"Can't assign requested address" :
|
||||
(no_ == WSAENETDOWN) ?
|
||||
"Network is down" :
|
||||
(no_ == WSAENETUNREACH) ?
|
||||
"Network is unreachable" :
|
||||
(no_ == WSAENETRESET) ?
|
||||
"Net dropped connection or reset" :
|
||||
(no_ == WSAECONNABORTED) ?
|
||||
"Software caused connection abort" :
|
||||
(no_ == WSAECONNRESET) ?
|
||||
"Connection reset by peer" :
|
||||
(no_ == WSAENOBUFS) ?
|
||||
"No buffer space available" :
|
||||
(no_ == WSAEISCONN) ?
|
||||
"Socket is already connected" :
|
||||
(no_ == WSAENOTCONN) ?
|
||||
"Socket is not connected" :
|
||||
(no_ == WSAESHUTDOWN) ?
|
||||
"Can't send after socket shutdown" :
|
||||
(no_ == WSAETOOMANYREFS) ?
|
||||
"Too many references can't splice" :
|
||||
(no_ == WSAETIMEDOUT) ?
|
||||
"Connection timed out" :
|
||||
(no_ == WSAECONNREFUSED) ?
|
||||
"Connection refused" :
|
||||
(no_ == WSAELOOP) ?
|
||||
"Too many levels of symbolic links" :
|
||||
(no_ == WSAENAMETOOLONG) ?
|
||||
"File name too long" :
|
||||
(no_ == WSAEHOSTDOWN) ?
|
||||
"Host is down" :
|
||||
(no_ == WSAEHOSTUNREACH) ?
|
||||
"No Route to Host" :
|
||||
(no_ == WSAENOTEMPTY) ?
|
||||
"Directory not empty" :
|
||||
(no_ == WSAEPROCLIM) ?
|
||||
"Too many processes" :
|
||||
(no_ == WSAEUSERS) ?
|
||||
"Too many users" :
|
||||
(no_ == WSAEDQUOT) ?
|
||||
"Disc Quota Exceeded" :
|
||||
(no_ == WSAESTALE) ?
|
||||
"Stale NFS file handle" :
|
||||
(no_ == WSAEREMOTE) ?
|
||||
"Too many levels of remote in path" :
|
||||
(no_ == WSASYSNOTREADY) ?
|
||||
"Network SubSystem is unavailable" :
|
||||
(no_ == WSAVERNOTSUPPORTED) ?
|
||||
"WINSOCK DLL Version out of range" :
|
||||
(no_ == WSANOTINITIALISED) ?
|
||||
"Successful WSASTARTUP not yet performed" :
|
||||
(no_ == WSAHOST_NOT_FOUND) ?
|
||||
"Host not found" :
|
||||
(no_ == WSATRY_AGAIN) ?
|
||||
"Non-Authoritative Host not found" :
|
||||
(no_ == WSANO_RECOVERY) ?
|
||||
"Non-Recoverable errors: FORMERR REFUSED NOTIMP" :
|
||||
(no_ == WSANO_DATA) ?
|
||||
"Valid name no data record of requested" :
|
||||
"error not defined";
|
||||
return (no_ == WSABASEERR)
|
||||
? "No Error"
|
||||
: (no_ == WSAEINTR)
|
||||
? "Interrupted system call"
|
||||
: (no_ == WSAEBADF)
|
||||
? "Bad file number"
|
||||
: (no_ == WSAEACCES)
|
||||
? "Permission denied"
|
||||
: (no_ == WSAEFAULT)
|
||||
? "Bad address"
|
||||
: (no_ == WSAEINVAL)
|
||||
? "Invalid argument"
|
||||
: (no_ == WSAEMFILE)
|
||||
? "Too many open files"
|
||||
: (no_ == WSAEWOULDBLOCK)
|
||||
? wsae_wouldblock_string
|
||||
: (no_ == WSAEINPROGRESS)
|
||||
? "Operation now in progress"
|
||||
: (no_ == WSAEALREADY)
|
||||
? "Operation already in "
|
||||
"progress"
|
||||
: (no_ == WSAENOTSOCK)
|
||||
? "Socket operation on "
|
||||
"non-socket"
|
||||
: (no_ == WSAEDESTADDRREQ)
|
||||
? "Destination "
|
||||
"address required"
|
||||
: (no_ == WSAEMSGSIZE)
|
||||
? "Message too "
|
||||
"long"
|
||||
: (no_
|
||||
== WSAEPROTOTYPE)
|
||||
? "Protocol "
|
||||
"wrong type "
|
||||
"for socket"
|
||||
: (no_
|
||||
== WSAENOPROTOOPT)
|
||||
? "Bad "
|
||||
"protoco"
|
||||
"l "
|
||||
"option"
|
||||
: (no_
|
||||
== WSAEPROTONOSUPPORT)
|
||||
? "Pro"
|
||||
"toc"
|
||||
"ol "
|
||||
"not"
|
||||
" su"
|
||||
"ppo"
|
||||
"rte"
|
||||
"d"
|
||||
: (no_
|
||||
== WSAESOCKTNOSUPPORT)
|
||||
? "Socket type not supported"
|
||||
: (no_
|
||||
== WSAEOPNOTSUPP)
|
||||
? "Operation not supported on socket"
|
||||
: (no_
|
||||
== WSAEPFNOSUPPORT)
|
||||
? "Protocol family not supported"
|
||||
: (no_
|
||||
== WSAEAFNOSUPPORT)
|
||||
? "Address family not supported by protocol family"
|
||||
: (no_ == WSAEADDRINUSE) ? "Address already in use"
|
||||
: (no_ == WSAEADDRNOTAVAIL) ? "Can't assign requested address"
|
||||
: (no_ == WSAENETDOWN) ? "Network is down"
|
||||
: (no_ == WSAENETUNREACH) ? "Network is unreachable"
|
||||
: (no_ == WSAENETRESET) ? "Net dropped connection or reset"
|
||||
: (no_ == WSAECONNABORTED) ? "Software caused connection abort"
|
||||
: (no_ == WSAECONNRESET) ? "Connection reset by peer"
|
||||
: (no_
|
||||
== WSAENOBUFS)
|
||||
? "No buffer space available"
|
||||
: (no_ == WSAEISCONN) ? "Socket is already connected"
|
||||
: (no_
|
||||
== WSAENOTCONN)
|
||||
? "Socket is not connected"
|
||||
: (no_ == WSAESHUTDOWN) ? "Can't send after socket shutdown"
|
||||
: (no_ == WSAETOOMANYREFS) ? "Too many references can't splice"
|
||||
: (no_ == WSAETIMEDOUT) ? "Connection timed out"
|
||||
: (no_
|
||||
== WSAECONNREFUSED)
|
||||
? "Connection refused"
|
||||
: (no_
|
||||
== WSAELOOP)
|
||||
? "Too many levels of symbolic links"
|
||||
: (no_
|
||||
== WSAENAMETOOLONG)
|
||||
? "File name too long"
|
||||
: (no_ == WSAEHOSTDOWN) ? "Host is down"
|
||||
: (no_
|
||||
== WSAEHOSTUNREACH)
|
||||
? "No Route to Host"
|
||||
: (no_ == WSAENOTEMPTY) ? "Directory not empty"
|
||||
: (no_ == WSAEPROCLIM) ? "Too many processes"
|
||||
: (
|
||||
no_
|
||||
== WSAEUSERS)
|
||||
? "Too many users"
|
||||
: (no_
|
||||
== WSAEDQUOT)
|
||||
? "Disc Quota Exceeded"
|
||||
: (no_
|
||||
== WSAESTALE)
|
||||
? "Stale NFS file handle"
|
||||
: (no_ == WSAEREMOTE) ? "Too many levels of remote in path"
|
||||
: (no_
|
||||
== WSASYSNOTREADY)
|
||||
? "Network SubSystem is unavailable"
|
||||
: (no_ == WSAVERNOTSUPPORTED) ? "WINSOCK DLL Version out of range"
|
||||
: (no_
|
||||
== WSANOTINITIALISED)
|
||||
? "Successful WSASTARTUP not yet performed"
|
||||
: (no_ == WSAHOST_NOT_FOUND) ? "Host not found"
|
||||
: (no_
|
||||
== WSATRY_AGAIN)
|
||||
? "Non-Authoritative Host not found"
|
||||
: (no_ == WSANO_RECOVERY) ? "Non-Recoverable errors: FORMERR REFUSED NOTIMP"
|
||||
: (no_
|
||||
== WSANO_DATA)
|
||||
? "Valid name no data record of requested"
|
||||
: "error not defined";
|
||||
}
|
||||
|
||||
void zmq::win_error (char *buffer_, size_t buffer_size_)
|
||||
{
|
||||
DWORD errcode = GetLastError ();
|
||||
#if defined _WIN32_WCE
|
||||
DWORD rc = FormatMessageW (FORMAT_MESSAGE_FROM_SYSTEM |
|
||||
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, errcode, MAKELANGID(LANG_NEUTRAL,
|
||||
SUBLANG_DEFAULT), (LPWSTR)buffer_, buffer_size_ / sizeof(wchar_t), NULL);
|
||||
DWORD rc = FormatMessageW (
|
||||
FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, errcode,
|
||||
MAKELANGID (LANG_NEUTRAL, SUBLANG_DEFAULT), (LPWSTR) buffer_,
|
||||
buffer_size_ / sizeof (wchar_t), NULL);
|
||||
#else
|
||||
DWORD rc = FormatMessageA (FORMAT_MESSAGE_FROM_SYSTEM |
|
||||
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, errcode, MAKELANGID(LANG_NEUTRAL,
|
||||
SUBLANG_DEFAULT), buffer_, (DWORD) buffer_size_, NULL);
|
||||
DWORD rc = FormatMessageA (
|
||||
FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, errcode,
|
||||
MAKELANGID (LANG_NEUTRAL, SUBLANG_DEFAULT), buffer_, (DWORD) buffer_size_,
|
||||
NULL);
|
||||
#endif
|
||||
zmq_assert (rc);
|
||||
}
|
||||
@ -224,158 +244,158 @@ void zmq::win_error (char *buffer_, size_t buffer_size_)
|
||||
int zmq::wsa_error_to_errno (int errcode)
|
||||
{
|
||||
switch (errcode) {
|
||||
// 10004 - Interrupted system call.
|
||||
case WSAEINTR:
|
||||
return EINTR;
|
||||
// 10009 - File handle is not valid.
|
||||
case WSAEBADF:
|
||||
return EBADF;
|
||||
// 10013 - Permission denied.
|
||||
case WSAEACCES:
|
||||
return EACCES;
|
||||
// 10014 - Bad address.
|
||||
case WSAEFAULT:
|
||||
return EFAULT;
|
||||
// 10022 - Invalid argument.
|
||||
case WSAEINVAL:
|
||||
return EINVAL;
|
||||
// 10024 - Too many open files.
|
||||
case WSAEMFILE:
|
||||
return EMFILE;
|
||||
// 10035 - Operation would block.
|
||||
case WSAEWOULDBLOCK:
|
||||
return EBUSY;
|
||||
// 10036 - Operation now in progress.
|
||||
case WSAEINPROGRESS:
|
||||
return EAGAIN;
|
||||
// 10037 - Operation already in progress.
|
||||
case WSAEALREADY:
|
||||
return EAGAIN;
|
||||
// 10038 - Socket operation on non-socket.
|
||||
case WSAENOTSOCK:
|
||||
return ENOTSOCK;
|
||||
// 10039 - Destination address required.
|
||||
case WSAEDESTADDRREQ:
|
||||
return EFAULT;
|
||||
// 10040 - Message too long.
|
||||
case WSAEMSGSIZE:
|
||||
return EMSGSIZE;
|
||||
// 10041 - Protocol wrong type for socket.
|
||||
case WSAEPROTOTYPE:
|
||||
return EFAULT;
|
||||
// 10042 - Bad protocol option.
|
||||
case WSAENOPROTOOPT:
|
||||
return EINVAL;
|
||||
// 10043 - Protocol not supported.
|
||||
case WSAEPROTONOSUPPORT:
|
||||
return EPROTONOSUPPORT;
|
||||
// 10044 - Socket type not supported.
|
||||
case WSAESOCKTNOSUPPORT:
|
||||
return EFAULT;
|
||||
// 10045 - Operation not supported on socket.
|
||||
case WSAEOPNOTSUPP:
|
||||
return EFAULT;
|
||||
// 10046 - Protocol family not supported.
|
||||
case WSAEPFNOSUPPORT:
|
||||
return EPROTONOSUPPORT;
|
||||
// 10047 - Address family not supported by protocol family.
|
||||
case WSAEAFNOSUPPORT:
|
||||
return EAFNOSUPPORT;
|
||||
// 10048 - Address already in use.
|
||||
case WSAEADDRINUSE:
|
||||
return EADDRINUSE;
|
||||
// 10049 - Cannot assign requested address.
|
||||
case WSAEADDRNOTAVAIL:
|
||||
return EADDRNOTAVAIL;
|
||||
// 10050 - Network is down.
|
||||
case WSAENETDOWN:
|
||||
return ENETDOWN;
|
||||
// 10051 - Network is unreachable.
|
||||
case WSAENETUNREACH:
|
||||
return ENETUNREACH;
|
||||
// 10052 - Network dropped connection on reset.
|
||||
case WSAENETRESET:
|
||||
return ENETRESET;
|
||||
// 10053 - Software caused connection abort.
|
||||
case WSAECONNABORTED:
|
||||
return ECONNABORTED;
|
||||
// 10054 - Connection reset by peer.
|
||||
case WSAECONNRESET:
|
||||
return ECONNRESET;
|
||||
// 10055 - No buffer space available.
|
||||
case WSAENOBUFS:
|
||||
return ENOBUFS;
|
||||
// 10056 - Socket is already connected.
|
||||
case WSAEISCONN:
|
||||
return EFAULT;
|
||||
// 10057 - Socket is not connected.
|
||||
case WSAENOTCONN:
|
||||
return ENOTCONN;
|
||||
// 10058 - Can't send after socket shutdown.
|
||||
case WSAESHUTDOWN:
|
||||
return EFAULT;
|
||||
// 10059 - Too many references can't splice.
|
||||
case WSAETOOMANYREFS:
|
||||
return EFAULT;
|
||||
// 10060 - Connection timed out.
|
||||
case WSAETIMEDOUT:
|
||||
return ETIMEDOUT;
|
||||
// 10061 - Connection refused.
|
||||
case WSAECONNREFUSED:
|
||||
return ECONNREFUSED;
|
||||
// 10062 - Too many levels of symbolic links.
|
||||
case WSAELOOP:
|
||||
return EFAULT;
|
||||
// 10063 - File name too long.
|
||||
case WSAENAMETOOLONG:
|
||||
return EFAULT;
|
||||
// 10064 - Host is down.
|
||||
case WSAEHOSTDOWN:
|
||||
return EAGAIN;
|
||||
// 10065 - No route to host.
|
||||
case WSAEHOSTUNREACH:
|
||||
return EHOSTUNREACH;
|
||||
// 10066 - Directory not empty.
|
||||
case WSAENOTEMPTY:
|
||||
return EFAULT;
|
||||
// 10067 - Too many processes.
|
||||
case WSAEPROCLIM:
|
||||
return EFAULT;
|
||||
// 10068 - Too many users.
|
||||
case WSAEUSERS:
|
||||
return EFAULT;
|
||||
// 10069 - Disc Quota Exceeded.
|
||||
case WSAEDQUOT:
|
||||
return EFAULT;
|
||||
// 10070 - Stale NFS file handle.
|
||||
case WSAESTALE:
|
||||
return EFAULT;
|
||||
// 10071 - Too many levels of remote in path.
|
||||
case WSAEREMOTE:
|
||||
return EFAULT;
|
||||
// 10091 - Network SubSystem is unavailable.
|
||||
case WSASYSNOTREADY:
|
||||
return EFAULT;
|
||||
// 10092 - WINSOCK DLL Version out of range.
|
||||
case WSAVERNOTSUPPORTED:
|
||||
return EFAULT;
|
||||
// 10093 - Successful WSASTARTUP not yet performed.
|
||||
case WSANOTINITIALISED:
|
||||
return EFAULT;
|
||||
// 11001 - Host not found.
|
||||
case WSAHOST_NOT_FOUND:
|
||||
return EFAULT;
|
||||
// 11002 - Non-Authoritative Host not found.
|
||||
case WSATRY_AGAIN:
|
||||
return EFAULT;
|
||||
// 11003 - Non-Recoverable errors: FORMERR REFUSED NOTIMP.
|
||||
case WSANO_RECOVERY:
|
||||
return EFAULT;
|
||||
// 11004 - Valid name no data record of requested.
|
||||
case WSANO_DATA:
|
||||
return EFAULT;
|
||||
default:
|
||||
wsa_assert (false);
|
||||
// 10004 - Interrupted system call.
|
||||
case WSAEINTR:
|
||||
return EINTR;
|
||||
// 10009 - File handle is not valid.
|
||||
case WSAEBADF:
|
||||
return EBADF;
|
||||
// 10013 - Permission denied.
|
||||
case WSAEACCES:
|
||||
return EACCES;
|
||||
// 10014 - Bad address.
|
||||
case WSAEFAULT:
|
||||
return EFAULT;
|
||||
// 10022 - Invalid argument.
|
||||
case WSAEINVAL:
|
||||
return EINVAL;
|
||||
// 10024 - Too many open files.
|
||||
case WSAEMFILE:
|
||||
return EMFILE;
|
||||
// 10035 - Operation would block.
|
||||
case WSAEWOULDBLOCK:
|
||||
return EBUSY;
|
||||
// 10036 - Operation now in progress.
|
||||
case WSAEINPROGRESS:
|
||||
return EAGAIN;
|
||||
// 10037 - Operation already in progress.
|
||||
case WSAEALREADY:
|
||||
return EAGAIN;
|
||||
// 10038 - Socket operation on non-socket.
|
||||
case WSAENOTSOCK:
|
||||
return ENOTSOCK;
|
||||
// 10039 - Destination address required.
|
||||
case WSAEDESTADDRREQ:
|
||||
return EFAULT;
|
||||
// 10040 - Message too long.
|
||||
case WSAEMSGSIZE:
|
||||
return EMSGSIZE;
|
||||
// 10041 - Protocol wrong type for socket.
|
||||
case WSAEPROTOTYPE:
|
||||
return EFAULT;
|
||||
// 10042 - Bad protocol option.
|
||||
case WSAENOPROTOOPT:
|
||||
return EINVAL;
|
||||
// 10043 - Protocol not supported.
|
||||
case WSAEPROTONOSUPPORT:
|
||||
return EPROTONOSUPPORT;
|
||||
// 10044 - Socket type not supported.
|
||||
case WSAESOCKTNOSUPPORT:
|
||||
return EFAULT;
|
||||
// 10045 - Operation not supported on socket.
|
||||
case WSAEOPNOTSUPP:
|
||||
return EFAULT;
|
||||
// 10046 - Protocol family not supported.
|
||||
case WSAEPFNOSUPPORT:
|
||||
return EPROTONOSUPPORT;
|
||||
// 10047 - Address family not supported by protocol family.
|
||||
case WSAEAFNOSUPPORT:
|
||||
return EAFNOSUPPORT;
|
||||
// 10048 - Address already in use.
|
||||
case WSAEADDRINUSE:
|
||||
return EADDRINUSE;
|
||||
// 10049 - Cannot assign requested address.
|
||||
case WSAEADDRNOTAVAIL:
|
||||
return EADDRNOTAVAIL;
|
||||
// 10050 - Network is down.
|
||||
case WSAENETDOWN:
|
||||
return ENETDOWN;
|
||||
// 10051 - Network is unreachable.
|
||||
case WSAENETUNREACH:
|
||||
return ENETUNREACH;
|
||||
// 10052 - Network dropped connection on reset.
|
||||
case WSAENETRESET:
|
||||
return ENETRESET;
|
||||
// 10053 - Software caused connection abort.
|
||||
case WSAECONNABORTED:
|
||||
return ECONNABORTED;
|
||||
// 10054 - Connection reset by peer.
|
||||
case WSAECONNRESET:
|
||||
return ECONNRESET;
|
||||
// 10055 - No buffer space available.
|
||||
case WSAENOBUFS:
|
||||
return ENOBUFS;
|
||||
// 10056 - Socket is already connected.
|
||||
case WSAEISCONN:
|
||||
return EFAULT;
|
||||
// 10057 - Socket is not connected.
|
||||
case WSAENOTCONN:
|
||||
return ENOTCONN;
|
||||
// 10058 - Can't send after socket shutdown.
|
||||
case WSAESHUTDOWN:
|
||||
return EFAULT;
|
||||
// 10059 - Too many references can't splice.
|
||||
case WSAETOOMANYREFS:
|
||||
return EFAULT;
|
||||
// 10060 - Connection timed out.
|
||||
case WSAETIMEDOUT:
|
||||
return ETIMEDOUT;
|
||||
// 10061 - Connection refused.
|
||||
case WSAECONNREFUSED:
|
||||
return ECONNREFUSED;
|
||||
// 10062 - Too many levels of symbolic links.
|
||||
case WSAELOOP:
|
||||
return EFAULT;
|
||||
// 10063 - File name too long.
|
||||
case WSAENAMETOOLONG:
|
||||
return EFAULT;
|
||||
// 10064 - Host is down.
|
||||
case WSAEHOSTDOWN:
|
||||
return EAGAIN;
|
||||
// 10065 - No route to host.
|
||||
case WSAEHOSTUNREACH:
|
||||
return EHOSTUNREACH;
|
||||
// 10066 - Directory not empty.
|
||||
case WSAENOTEMPTY:
|
||||
return EFAULT;
|
||||
// 10067 - Too many processes.
|
||||
case WSAEPROCLIM:
|
||||
return EFAULT;
|
||||
// 10068 - Too many users.
|
||||
case WSAEUSERS:
|
||||
return EFAULT;
|
||||
// 10069 - Disc Quota Exceeded.
|
||||
case WSAEDQUOT:
|
||||
return EFAULT;
|
||||
// 10070 - Stale NFS file handle.
|
||||
case WSAESTALE:
|
||||
return EFAULT;
|
||||
// 10071 - Too many levels of remote in path.
|
||||
case WSAEREMOTE:
|
||||
return EFAULT;
|
||||
// 10091 - Network SubSystem is unavailable.
|
||||
case WSASYSNOTREADY:
|
||||
return EFAULT;
|
||||
// 10092 - WINSOCK DLL Version out of range.
|
||||
case WSAVERNOTSUPPORTED:
|
||||
return EFAULT;
|
||||
// 10093 - Successful WSASTARTUP not yet performed.
|
||||
case WSANOTINITIALISED:
|
||||
return EFAULT;
|
||||
// 11001 - Host not found.
|
||||
case WSAHOST_NOT_FOUND:
|
||||
return EFAULT;
|
||||
// 11002 - Non-Authoritative Host not found.
|
||||
case WSATRY_AGAIN:
|
||||
return EFAULT;
|
||||
// 11003 - Non-Recoverable errors: FORMERR REFUSED NOTIMP.
|
||||
case WSANO_RECOVERY:
|
||||
return EFAULT;
|
||||
// 11004 - Valid name no data record of requested.
|
||||
case WSANO_DATA:
|
||||
return EFAULT;
|
||||
default:
|
||||
wsa_assert (false);
|
||||
}
|
||||
// Not reachable
|
||||
return 0;
|
||||
@ -417,9 +437,9 @@ void zmq::print_backtrace (void)
|
||||
|
||||
rc = unw_get_proc_name (&cursor, func_name, 256, &offset);
|
||||
if (rc == -UNW_ENOINFO)
|
||||
strcpy(func_name, "?");
|
||||
strcpy (func_name, "?");
|
||||
|
||||
addr = (void *)(p_info.start_ip + offset);
|
||||
addr = (void *) (p_info.start_ip + offset);
|
||||
|
||||
if (dladdr (addr, &dl_info) && dl_info.dli_fname)
|
||||
file_name = dl_info.dli_fname;
|
||||
@ -433,7 +453,7 @@ void zmq::print_backtrace (void)
|
||||
free (demangled_name);
|
||||
}
|
||||
puts ("");
|
||||
|
||||
|
||||
fflush (stdout);
|
||||
mtx.unlock ();
|
||||
}
|
||||
|
158
src/err.hpp
158
src/err.hpp
@ -55,58 +55,60 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
const char *errno_to_string (int errno_);
|
||||
void zmq_abort (const char *errmsg_);
|
||||
void print_backtrace (void);
|
||||
const char *errno_to_string (int errno_);
|
||||
void zmq_abort (const char *errmsg_);
|
||||
void print_backtrace (void);
|
||||
}
|
||||
|
||||
#ifdef ZMQ_HAVE_WINDOWS
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
const char *wsa_error ();
|
||||
const char *wsa_error_no (int no_, const char * wsae_wouldblock_string = "Operation would block");
|
||||
void win_error (char *buffer_, size_t buffer_size_);
|
||||
int wsa_error_to_errno (int errcode);
|
||||
const char *wsa_error ();
|
||||
const char *
|
||||
wsa_error_no (int no_,
|
||||
const char *wsae_wouldblock_string = "Operation would block");
|
||||
void win_error (char *buffer_, size_t buffer_size_);
|
||||
int wsa_error_to_errno (int errcode);
|
||||
}
|
||||
|
||||
// Provides convenient way to check WSA-style errors on Windows.
|
||||
#define wsa_assert(x) \
|
||||
do {\
|
||||
if (unlikely (!(x))) {\
|
||||
const char *errstr = zmq::wsa_error ();\
|
||||
if (errstr != NULL) {\
|
||||
fprintf (stderr, "Assertion failed: %s (%s:%d)\n", errstr, \
|
||||
__FILE__, __LINE__);\
|
||||
fflush (stderr);\
|
||||
zmq::zmq_abort (errstr);\
|
||||
}\
|
||||
}\
|
||||
#define wsa_assert(x) \
|
||||
do { \
|
||||
if (unlikely (!(x))) { \
|
||||
const char *errstr = zmq::wsa_error (); \
|
||||
if (errstr != NULL) { \
|
||||
fprintf (stderr, "Assertion failed: %s (%s:%d)\n", errstr, \
|
||||
__FILE__, __LINE__); \
|
||||
fflush (stderr); \
|
||||
zmq::zmq_abort (errstr); \
|
||||
} \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
// Provides convenient way to assert on WSA-style errors on Windows.
|
||||
#define wsa_assert_no(no) \
|
||||
do {\
|
||||
const char *errstr = zmq::wsa_error_no (no);\
|
||||
if (errstr != NULL) {\
|
||||
fprintf (stderr, "Assertion failed: %s (%s:%d)\n", errstr, \
|
||||
__FILE__, __LINE__);\
|
||||
fflush (stderr);\
|
||||
zmq::zmq_abort (errstr);\
|
||||
}\
|
||||
#define wsa_assert_no(no) \
|
||||
do { \
|
||||
const char *errstr = zmq::wsa_error_no (no); \
|
||||
if (errstr != NULL) { \
|
||||
fprintf (stderr, "Assertion failed: %s (%s:%d)\n", errstr, \
|
||||
__FILE__, __LINE__); \
|
||||
fflush (stderr); \
|
||||
zmq::zmq_abort (errstr); \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
// Provides convenient way to check GetLastError-style errors on Windows.
|
||||
#define win_assert(x) \
|
||||
do {\
|
||||
if (unlikely (!(x))) {\
|
||||
char errstr [256];\
|
||||
zmq::win_error (errstr, 256);\
|
||||
fprintf (stderr, "Assertion failed: %s (%s:%d)\n", errstr, \
|
||||
__FILE__, __LINE__);\
|
||||
fflush (stderr);\
|
||||
zmq::zmq_abort (errstr);\
|
||||
}\
|
||||
#define win_assert(x) \
|
||||
do { \
|
||||
if (unlikely (!(x))) { \
|
||||
char errstr[256]; \
|
||||
zmq::win_error (errstr, 256); \
|
||||
fprintf (stderr, "Assertion failed: %s (%s:%d)\n", errstr, \
|
||||
__FILE__, __LINE__); \
|
||||
fflush (stderr); \
|
||||
zmq::zmq_abort (errstr); \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
#endif
|
||||
@ -114,60 +116,58 @@ namespace zmq
|
||||
// This macro works in exactly the same way as the normal assert. It is used
|
||||
// in its stead because standard assert on Win32 in broken - it prints nothing
|
||||
// when used within the scope of JNI library.
|
||||
#define zmq_assert(x) \
|
||||
do {\
|
||||
if (unlikely (!(x))) {\
|
||||
fprintf (stderr, "Assertion failed: %s (%s:%d)\n", #x, \
|
||||
__FILE__, __LINE__);\
|
||||
fflush (stderr);\
|
||||
zmq::zmq_abort (#x);\
|
||||
}\
|
||||
#define zmq_assert(x) \
|
||||
do { \
|
||||
if (unlikely (!(x))) { \
|
||||
fprintf (stderr, "Assertion failed: %s (%s:%d)\n", #x, __FILE__, \
|
||||
__LINE__); \
|
||||
fflush (stderr); \
|
||||
zmq::zmq_abort (#x); \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
// Provides convenient way to check for errno-style errors.
|
||||
#define errno_assert(x) \
|
||||
do {\
|
||||
if (unlikely (!(x))) {\
|
||||
const char *errstr = strerror (errno);\
|
||||
fprintf (stderr, "%s (%s:%d)\n", errstr, __FILE__, __LINE__);\
|
||||
fflush (stderr);\
|
||||
zmq::zmq_abort (errstr);\
|
||||
}\
|
||||
#define errno_assert(x) \
|
||||
do { \
|
||||
if (unlikely (!(x))) { \
|
||||
const char *errstr = strerror (errno); \
|
||||
fprintf (stderr, "%s (%s:%d)\n", errstr, __FILE__, __LINE__); \
|
||||
fflush (stderr); \
|
||||
zmq::zmq_abort (errstr); \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
// Provides convenient way to check for POSIX errors.
|
||||
#define posix_assert(x) \
|
||||
do {\
|
||||
if (unlikely (x)) {\
|
||||
const char *errstr = strerror (x);\
|
||||
fprintf (stderr, "%s (%s:%d)\n", errstr, __FILE__, __LINE__);\
|
||||
fflush (stderr);\
|
||||
zmq::zmq_abort (errstr);\
|
||||
}\
|
||||
#define posix_assert(x) \
|
||||
do { \
|
||||
if (unlikely (x)) { \
|
||||
const char *errstr = strerror (x); \
|
||||
fprintf (stderr, "%s (%s:%d)\n", errstr, __FILE__, __LINE__); \
|
||||
fflush (stderr); \
|
||||
zmq::zmq_abort (errstr); \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
// Provides convenient way to check for errors from getaddrinfo.
|
||||
#define gai_assert(x) \
|
||||
do {\
|
||||
if (unlikely (x)) {\
|
||||
const char *errstr = gai_strerror (x);\
|
||||
fprintf (stderr, "%s (%s:%d)\n", errstr, __FILE__, __LINE__);\
|
||||
fflush (stderr);\
|
||||
zmq::zmq_abort (errstr);\
|
||||
}\
|
||||
#define gai_assert(x) \
|
||||
do { \
|
||||
if (unlikely (x)) { \
|
||||
const char *errstr = gai_strerror (x); \
|
||||
fprintf (stderr, "%s (%s:%d)\n", errstr, __FILE__, __LINE__); \
|
||||
fflush (stderr); \
|
||||
zmq::zmq_abort (errstr); \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
// Provides convenient way to check whether memory allocation have succeeded.
|
||||
#define alloc_assert(x) \
|
||||
do {\
|
||||
if (unlikely (!x)) {\
|
||||
fprintf (stderr, "FATAL ERROR: OUT OF MEMORY (%s:%d)\n",\
|
||||
__FILE__, __LINE__);\
|
||||
fflush (stderr);\
|
||||
zmq::zmq_abort ("FATAL ERROR: OUT OF MEMORY");\
|
||||
}\
|
||||
#define alloc_assert(x) \
|
||||
do { \
|
||||
if (unlikely (!x)) { \
|
||||
fprintf (stderr, "FATAL ERROR: OUT OF MEMORY (%s:%d)\n", __FILE__, \
|
||||
__LINE__); \
|
||||
fflush (stderr); \
|
||||
zmq::zmq_abort ("FATAL ERROR: OUT OF MEMORY"); \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
|
23
src/fd.hpp
23
src/fd.hpp
@ -37,16 +37,25 @@
|
||||
namespace zmq
|
||||
{
|
||||
#ifdef ZMQ_HAVE_WINDOWS
|
||||
#if defined _MSC_VER &&_MSC_VER <= 1400
|
||||
typedef UINT_PTR fd_t;
|
||||
enum {retired_fd = (fd_t)(~0)};
|
||||
#if defined _MSC_VER && _MSC_VER <= 1400
|
||||
typedef UINT_PTR fd_t;
|
||||
enum
|
||||
{
|
||||
retired_fd = (fd_t) (~0)
|
||||
};
|
||||
#else
|
||||
typedef SOCKET fd_t;
|
||||
enum {retired_fd = (fd_t)INVALID_SOCKET};
|
||||
typedef SOCKET fd_t;
|
||||
enum
|
||||
{
|
||||
retired_fd = (fd_t) INVALID_SOCKET
|
||||
};
|
||||
#endif
|
||||
#else
|
||||
typedef int fd_t;
|
||||
enum {retired_fd = -1};
|
||||
typedef int fd_t;
|
||||
enum
|
||||
{
|
||||
retired_fd = -1
|
||||
};
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
21
src/fq.cpp
21
src/fq.cpp
@ -33,11 +33,7 @@
|
||||
#include "err.hpp"
|
||||
#include "msg.hpp"
|
||||
|
||||
zmq::fq_t::fq_t () :
|
||||
active (0),
|
||||
last_in (NULL),
|
||||
current (0),
|
||||
more (false)
|
||||
zmq::fq_t::fq_t () : active (0), last_in (NULL), current (0), more (false)
|
||||
{
|
||||
}
|
||||
|
||||
@ -93,20 +89,19 @@ int zmq::fq_t::recvpipe (msg_t *msg_, pipe_t **pipe_)
|
||||
|
||||
// Round-robin over the pipes to get the next message.
|
||||
while (active > 0) {
|
||||
|
||||
// Try to fetch new message. If we've already read part of the message
|
||||
// subsequent part should be immediately available.
|
||||
bool fetched = pipes [current]->read (msg_);
|
||||
bool fetched = pipes[current]->read (msg_);
|
||||
|
||||
// Note that when message is not fetched, current pipe is deactivated
|
||||
// and replaced by another active pipe. Thus we don't have to increase
|
||||
// the 'current' pointer.
|
||||
if (fetched) {
|
||||
if (pipe_)
|
||||
*pipe_ = pipes [current];
|
||||
more = msg_->flags () & msg_t::more? true: false;
|
||||
*pipe_ = pipes[current];
|
||||
more = msg_->flags () & msg_t::more ? true : false;
|
||||
if (!more) {
|
||||
last_in = pipes [current];
|
||||
last_in = pipes[current];
|
||||
current = (current + 1) % active;
|
||||
}
|
||||
return 0;
|
||||
@ -142,7 +137,7 @@ bool zmq::fq_t::has_in ()
|
||||
// get back to its original value. Otherwise it'll point to the first
|
||||
// pipe holding messages, skipping only pipes with no messages available.
|
||||
while (active > 0) {
|
||||
if (pipes [current]->check_read ())
|
||||
if (pipes[current]->check_read ())
|
||||
return true;
|
||||
|
||||
// Deactivate the pipe.
|
||||
@ -157,7 +152,5 @@ bool zmq::fq_t::has_in ()
|
||||
|
||||
const zmq::blob_t &zmq::fq_t::get_credential () const
|
||||
{
|
||||
return last_in?
|
||||
last_in->get_credential (): saved_credential;
|
||||
return last_in ? last_in->get_credential () : saved_credential;
|
||||
}
|
||||
|
||||
|
76
src/fq.hpp
76
src/fq.hpp
@ -37,56 +37,52 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
// Class manages a set of inbound pipes. On receive it performs fair
|
||||
// queueing so that senders gone berserk won't cause denial of
|
||||
// service for decent senders.
|
||||
|
||||
// Class manages a set of inbound pipes. On receive it performs fair
|
||||
// queueing so that senders gone berserk won't cause denial of
|
||||
// service for decent senders.
|
||||
class fq_t
|
||||
{
|
||||
public:
|
||||
fq_t ();
|
||||
~fq_t ();
|
||||
|
||||
class fq_t
|
||||
{
|
||||
public:
|
||||
void attach (pipe_t *pipe_);
|
||||
void activated (pipe_t *pipe_);
|
||||
void pipe_terminated (pipe_t *pipe_);
|
||||
|
||||
fq_t ();
|
||||
~fq_t ();
|
||||
int recv (msg_t *msg_);
|
||||
int recvpipe (msg_t *msg_, pipe_t **pipe_);
|
||||
bool has_in ();
|
||||
const blob_t &get_credential () const;
|
||||
|
||||
void attach (pipe_t *pipe_);
|
||||
void activated (pipe_t *pipe_);
|
||||
void pipe_terminated (pipe_t *pipe_);
|
||||
private:
|
||||
// Inbound pipes.
|
||||
typedef array_t<pipe_t, 1> pipes_t;
|
||||
pipes_t pipes;
|
||||
|
||||
int recv (msg_t *msg_);
|
||||
int recvpipe (msg_t *msg_, pipe_t **pipe_);
|
||||
bool has_in ();
|
||||
const blob_t &get_credential () const;
|
||||
// Number of active pipes. All the active pipes are located at the
|
||||
// beginning of the pipes array.
|
||||
pipes_t::size_type active;
|
||||
|
||||
private:
|
||||
// Pointer to the last pipe we received message from.
|
||||
// NULL when no message has been received or the pipe
|
||||
// has terminated.
|
||||
pipe_t *last_in;
|
||||
|
||||
// Inbound pipes.
|
||||
typedef array_t <pipe_t, 1> pipes_t;
|
||||
pipes_t pipes;
|
||||
// Index of the next bound pipe to read a message from.
|
||||
pipes_t::size_type current;
|
||||
|
||||
// Number of active pipes. All the active pipes are located at the
|
||||
// beginning of the pipes array.
|
||||
pipes_t::size_type active;
|
||||
// If true, part of a multipart message was already received, but
|
||||
// there are following parts still waiting in the current pipe.
|
||||
bool more;
|
||||
|
||||
// Pointer to the last pipe we received message from.
|
||||
// NULL when no message has been received or the pipe
|
||||
// has terminated.
|
||||
pipe_t *last_in;
|
||||
|
||||
// Index of the next bound pipe to read a message from.
|
||||
pipes_t::size_type current;
|
||||
|
||||
// If true, part of a multipart message was already received, but
|
||||
// there are following parts still waiting in the current pipe.
|
||||
bool more;
|
||||
|
||||
// Holds credential after the last_active_pipe has terminated.
|
||||
blob_t saved_credential;
|
||||
|
||||
fq_t (const fq_t&);
|
||||
const fq_t &operator = (const fq_t&);
|
||||
};
|
||||
// Holds credential after the last_active_pipe has terminated.
|
||||
blob_t saved_credential;
|
||||
|
||||
fq_t (const fq_t &);
|
||||
const fq_t &operator= (const fq_t &);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -68,7 +68,6 @@ int zmq::gather_t::xrecv (msg_t *msg_)
|
||||
|
||||
// Drop any messages with more flag
|
||||
while (rc == 0 && msg_->flags () & msg_t::more) {
|
||||
|
||||
// drop all frames of the current multi-frame message
|
||||
rc = fq.recvpipe (msg_, NULL);
|
||||
|
||||
|
@ -36,40 +36,33 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
class ctx_t;
|
||||
class pipe_t;
|
||||
class msg_t;
|
||||
class io_thread_t;
|
||||
|
||||
class ctx_t;
|
||||
class pipe_t;
|
||||
class msg_t;
|
||||
class io_thread_t;
|
||||
class gather_t : public socket_base_t
|
||||
{
|
||||
public:
|
||||
gather_t (zmq::ctx_t *parent_, uint32_t tid_, int sid_);
|
||||
~gather_t ();
|
||||
|
||||
class gather_t :
|
||||
public socket_base_t
|
||||
{
|
||||
public:
|
||||
protected:
|
||||
// Overrides of functions from socket_base_t.
|
||||
void xattach_pipe (zmq::pipe_t *pipe_, bool subscribe_to_all_);
|
||||
int xrecv (zmq::msg_t *msg_);
|
||||
bool xhas_in ();
|
||||
const blob_t &get_credential () const;
|
||||
void xread_activated (zmq::pipe_t *pipe_);
|
||||
void xpipe_terminated (zmq::pipe_t *pipe_);
|
||||
|
||||
gather_t (zmq::ctx_t *parent_, uint32_t tid_, int sid_);
|
||||
~gather_t ();
|
||||
|
||||
protected:
|
||||
|
||||
// Overrides of functions from socket_base_t.
|
||||
void xattach_pipe (zmq::pipe_t *pipe_, bool subscribe_to_all_);
|
||||
int xrecv (zmq::msg_t *msg_);
|
||||
bool xhas_in ();
|
||||
const blob_t &get_credential () const;
|
||||
void xread_activated (zmq::pipe_t *pipe_);
|
||||
void xpipe_terminated (zmq::pipe_t *pipe_);
|
||||
|
||||
private:
|
||||
|
||||
// Fair queueing object for inbound pipes.
|
||||
fq_t fq;
|
||||
|
||||
gather_t (const gather_t&);
|
||||
const gather_t &operator = (const gather_t&);
|
||||
|
||||
};
|
||||
private:
|
||||
// Fair queueing object for inbound pipes.
|
||||
fq_t fq;
|
||||
|
||||
gather_t (const gather_t &);
|
||||
const gather_t &operator= (const gather_t &);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -49,19 +49,22 @@ zmq::gssapi_client_t::gssapi_client_t (session_base_t *session_,
|
||||
mechs (),
|
||||
security_context_established (false)
|
||||
{
|
||||
const std::string::size_type service_size = options_.gss_service_principal.size();
|
||||
service_name = static_cast <char *>(malloc(service_size+1));
|
||||
assert(service_name);
|
||||
memcpy(service_name, options_.gss_service_principal.c_str(), service_size+1 );
|
||||
const std::string::size_type service_size =
|
||||
options_.gss_service_principal.size ();
|
||||
service_name = static_cast<char *> (malloc (service_size + 1));
|
||||
assert (service_name);
|
||||
memcpy (service_name, options_.gss_service_principal.c_str (),
|
||||
service_size + 1);
|
||||
|
||||
service_name_type = convert_nametype (options_.gss_service_principal_nt);
|
||||
maj_stat = GSS_S_COMPLETE;
|
||||
if(!options_.gss_principal.empty())
|
||||
{
|
||||
const std::string::size_type principal_size = options_.gss_principal.size();
|
||||
principal_name = static_cast <char *>(malloc(principal_size+1));
|
||||
assert(principal_name);
|
||||
memcpy(principal_name, options_.gss_principal.c_str(), principal_size+1 );
|
||||
if (!options_.gss_principal.empty ()) {
|
||||
const std::string::size_type principal_size =
|
||||
options_.gss_principal.size ();
|
||||
principal_name = static_cast<char *> (malloc (principal_size + 1));
|
||||
assert (principal_name);
|
||||
memcpy (principal_name, options_.gss_principal.c_str (),
|
||||
principal_size + 1);
|
||||
|
||||
gss_OID name_type = convert_nametype (options_.gss_principal_nt);
|
||||
if (acquire_credentials (principal_name, &cred, name_type) != 0)
|
||||
@ -74,16 +77,16 @@ zmq::gssapi_client_t::gssapi_client_t (session_base_t *session_,
|
||||
|
||||
zmq::gssapi_client_t::~gssapi_client_t ()
|
||||
{
|
||||
if(service_name)
|
||||
if (service_name)
|
||||
free (service_name);
|
||||
if(cred)
|
||||
gss_release_cred(&min_stat, &cred);
|
||||
if (cred)
|
||||
gss_release_cred (&min_stat, &cred);
|
||||
}
|
||||
|
||||
int zmq::gssapi_client_t::next_handshake_command (msg_t *msg_)
|
||||
{
|
||||
if (state == send_ready) {
|
||||
int rc = produce_ready(msg_);
|
||||
int rc = produce_ready (msg_);
|
||||
if (rc == 0)
|
||||
state = connected;
|
||||
|
||||
@ -107,8 +110,7 @@ int zmq::gssapi_client_t::next_handshake_command (msg_t *msg_)
|
||||
if (maj_stat == GSS_S_COMPLETE) {
|
||||
security_context_established = true;
|
||||
state = recv_ready;
|
||||
}
|
||||
else
|
||||
} else
|
||||
state = recv_next_token;
|
||||
|
||||
return 0;
|
||||
@ -117,7 +119,7 @@ int zmq::gssapi_client_t::next_handshake_command (msg_t *msg_)
|
||||
int zmq::gssapi_client_t::process_handshake_command (msg_t *msg_)
|
||||
{
|
||||
if (state == recv_ready) {
|
||||
int rc = process_ready(msg_);
|
||||
int rc = process_ready (msg_);
|
||||
if (rc == 0)
|
||||
state = send_ready;
|
||||
|
||||
@ -126,8 +128,7 @@ int zmq::gssapi_client_t::process_handshake_command (msg_t *msg_)
|
||||
|
||||
if (state != recv_next_token) {
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
session->get_endpoint (),
|
||||
ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND);
|
||||
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND);
|
||||
errno = EPROTO;
|
||||
return -1;
|
||||
}
|
||||
@ -151,7 +152,7 @@ int zmq::gssapi_client_t::encode (msg_t *msg_)
|
||||
zmq_assert (state == connected);
|
||||
|
||||
if (do_encryption)
|
||||
return encode_message (msg_);
|
||||
return encode_message (msg_);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -161,14 +162,14 @@ int zmq::gssapi_client_t::decode (msg_t *msg_)
|
||||
zmq_assert (state == connected);
|
||||
|
||||
if (do_encryption)
|
||||
return decode_message (msg_);
|
||||
return decode_message (msg_);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
zmq::mechanism_t::status_t zmq::gssapi_client_t::status () const
|
||||
{
|
||||
return state == connected? mechanism_t::ready: mechanism_t::handshaking;
|
||||
return state == connected ? mechanism_t::ready : mechanism_t::handshaking;
|
||||
}
|
||||
|
||||
int zmq::gssapi_client_t::initialize_context ()
|
||||
@ -180,22 +181,20 @@ int zmq::gssapi_client_t::initialize_context ()
|
||||
// First time through, import service_name into target_name
|
||||
if (target_name == GSS_C_NO_NAME) {
|
||||
send_tok.value = service_name;
|
||||
send_tok.length = strlen(service_name) + 1;
|
||||
OM_uint32 maj = gss_import_name(&min_stat, &send_tok,
|
||||
service_name_type,
|
||||
&target_name);
|
||||
send_tok.length = strlen (service_name) + 1;
|
||||
OM_uint32 maj = gss_import_name (&min_stat, &send_tok,
|
||||
service_name_type, &target_name);
|
||||
|
||||
if (maj != GSS_S_COMPLETE)
|
||||
return -1;
|
||||
}
|
||||
|
||||
maj_stat = gss_init_sec_context(&init_sec_min_stat, cred, &context,
|
||||
target_name, mechs.elements,
|
||||
gss_flags, 0, NULL, token_ptr, NULL,
|
||||
&send_tok, &ret_flags, NULL);
|
||||
maj_stat = gss_init_sec_context (
|
||||
&init_sec_min_stat, cred, &context, target_name, mechs.elements,
|
||||
gss_flags, 0, NULL, token_ptr, NULL, &send_tok, &ret_flags, NULL);
|
||||
|
||||
if (token_ptr != GSS_C_NO_BUFFER)
|
||||
free(recv_tok.value);
|
||||
free (recv_tok.value);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -203,18 +202,18 @@ int zmq::gssapi_client_t::initialize_context ()
|
||||
int zmq::gssapi_client_t::produce_next_token (msg_t *msg_)
|
||||
{
|
||||
if (send_tok.length != 0) { // Server expects another token
|
||||
if (produce_initiate(msg_, send_tok.value, send_tok.length) < 0) {
|
||||
gss_release_buffer(&min_stat, &send_tok);
|
||||
gss_release_name(&min_stat, &target_name);
|
||||
if (produce_initiate (msg_, send_tok.value, send_tok.length) < 0) {
|
||||
gss_release_buffer (&min_stat, &send_tok);
|
||||
gss_release_name (&min_stat, &target_name);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
gss_release_buffer(&min_stat, &send_tok);
|
||||
gss_release_buffer (&min_stat, &send_tok);
|
||||
|
||||
if (maj_stat != GSS_S_COMPLETE && maj_stat != GSS_S_CONTINUE_NEEDED) {
|
||||
gss_release_name(&min_stat, &target_name);
|
||||
gss_release_name (&min_stat, &target_name);
|
||||
if (context != GSS_C_NO_CONTEXT)
|
||||
gss_delete_sec_context(&min_stat, &context, GSS_C_NO_BUFFER);
|
||||
gss_delete_sec_context (&min_stat, &context, GSS_C_NO_BUFFER);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -224,8 +223,8 @@ int zmq::gssapi_client_t::produce_next_token (msg_t *msg_)
|
||||
int zmq::gssapi_client_t::process_next_token (msg_t *msg_)
|
||||
{
|
||||
if (maj_stat == GSS_S_CONTINUE_NEEDED) {
|
||||
if (process_initiate(msg_, &recv_tok.value, recv_tok.length) < 0) {
|
||||
gss_release_name(&min_stat, &target_name);
|
||||
if (process_initiate (msg_, &recv_tok.value, recv_tok.length) < 0) {
|
||||
gss_release_name (&min_stat, &target_name);
|
||||
return -1;
|
||||
}
|
||||
token_ptr = &recv_tok;
|
||||
|
@ -36,57 +36,55 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
class msg_t;
|
||||
class session_base_t;
|
||||
|
||||
class msg_t;
|
||||
class session_base_t;
|
||||
class gssapi_client_t : public gssapi_mechanism_base_t
|
||||
{
|
||||
public:
|
||||
gssapi_client_t (session_base_t *session_, const options_t &options_);
|
||||
virtual ~gssapi_client_t ();
|
||||
|
||||
class gssapi_client_t : public gssapi_mechanism_base_t
|
||||
// mechanism implementation
|
||||
virtual int next_handshake_command (msg_t *msg_);
|
||||
virtual int process_handshake_command (msg_t *msg_);
|
||||
virtual int encode (msg_t *msg_);
|
||||
virtual int decode (msg_t *msg_);
|
||||
virtual status_t status () const;
|
||||
|
||||
private:
|
||||
enum state_t
|
||||
{
|
||||
public:
|
||||
gssapi_client_t (session_base_t *session_, const options_t &options_);
|
||||
virtual ~gssapi_client_t ();
|
||||
|
||||
// mechanism implementation
|
||||
virtual int next_handshake_command (msg_t *msg_);
|
||||
virtual int process_handshake_command (msg_t *msg_);
|
||||
virtual int encode (msg_t *msg_);
|
||||
virtual int decode (msg_t *msg_);
|
||||
virtual status_t status () const;
|
||||
|
||||
private:
|
||||
|
||||
enum state_t {
|
||||
call_next_init,
|
||||
send_next_token,
|
||||
recv_next_token,
|
||||
send_ready,
|
||||
recv_ready,
|
||||
connected
|
||||
};
|
||||
|
||||
// Human-readable principal name of the service we are connecting to
|
||||
char * service_name;
|
||||
|
||||
gss_OID service_name_type;
|
||||
|
||||
// Current FSM state
|
||||
state_t state;
|
||||
|
||||
// Points to either send_tok or recv_tok
|
||||
// during context initialization
|
||||
gss_buffer_desc *token_ptr;
|
||||
|
||||
// The desired underlying mechanism
|
||||
gss_OID_set_desc mechs;
|
||||
|
||||
// True iff client considers the server authenticated
|
||||
bool security_context_established;
|
||||
|
||||
int initialize_context ();
|
||||
int produce_next_token (msg_t *msg_);
|
||||
int process_next_token (msg_t *msg_);
|
||||
call_next_init,
|
||||
send_next_token,
|
||||
recv_next_token,
|
||||
send_ready,
|
||||
recv_ready,
|
||||
connected
|
||||
};
|
||||
|
||||
// Human-readable principal name of the service we are connecting to
|
||||
char *service_name;
|
||||
|
||||
gss_OID service_name_type;
|
||||
|
||||
// Current FSM state
|
||||
state_t state;
|
||||
|
||||
// Points to either send_tok or recv_tok
|
||||
// during context initialization
|
||||
gss_buffer_desc *token_ptr;
|
||||
|
||||
// The desired underlying mechanism
|
||||
gss_OID_set_desc mechs;
|
||||
|
||||
// True iff client considers the server authenticated
|
||||
bool security_context_established;
|
||||
|
||||
int initialize_context ();
|
||||
int produce_next_token (msg_t *msg_);
|
||||
int process_next_token (msg_t *msg_);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -41,8 +41,7 @@
|
||||
#include "wire.hpp"
|
||||
|
||||
zmq::gssapi_mechanism_base_t::gssapi_mechanism_base_t (
|
||||
session_base_t *session_,
|
||||
const options_t &options_) :
|
||||
session_base_t *session_, const options_t &options_) :
|
||||
mechanism_base_t (session_, options_),
|
||||
send_tok (),
|
||||
recv_tok (),
|
||||
@ -62,10 +61,10 @@ zmq::gssapi_mechanism_base_t::gssapi_mechanism_base_t (
|
||||
|
||||
zmq::gssapi_mechanism_base_t::~gssapi_mechanism_base_t ()
|
||||
{
|
||||
if(target_name)
|
||||
gss_release_name(&min_stat, &target_name);
|
||||
if(context)
|
||||
gss_delete_sec_context(&min_stat, &context, GSS_C_NO_BUFFER);
|
||||
if (target_name)
|
||||
gss_release_name (&min_stat, &target_name);
|
||||
if (context)
|
||||
gss_delete_sec_context (&min_stat, &context, GSS_C_NO_BUFFER);
|
||||
}
|
||||
|
||||
int zmq::gssapi_mechanism_base_t::encode_message (msg_t *msg_)
|
||||
@ -81,17 +80,18 @@ int zmq::gssapi_mechanism_base_t::encode_message (msg_t *msg_)
|
||||
if (msg_->flags () & msg_t::command)
|
||||
flags |= 0x02;
|
||||
|
||||
uint8_t *plaintext_buffer = static_cast <uint8_t *>(malloc(msg_->size ()+1));
|
||||
alloc_assert(plaintext_buffer);
|
||||
uint8_t *plaintext_buffer =
|
||||
static_cast<uint8_t *> (malloc (msg_->size () + 1));
|
||||
alloc_assert (plaintext_buffer);
|
||||
|
||||
plaintext_buffer[0] = flags;
|
||||
memcpy (plaintext_buffer+1, msg_->data(), msg_->size());
|
||||
memcpy (plaintext_buffer + 1, msg_->data (), msg_->size ());
|
||||
|
||||
plaintext.value = plaintext_buffer;
|
||||
plaintext.length = msg_->size ()+1;
|
||||
plaintext.length = msg_->size () + 1;
|
||||
|
||||
maj_stat = gss_wrap(&min_stat, context, 1, GSS_C_QOP_DEFAULT,
|
||||
&plaintext, &state, &wrapped);
|
||||
maj_stat = gss_wrap (&min_stat, context, 1, GSS_C_QOP_DEFAULT, &plaintext,
|
||||
&state, &wrapped);
|
||||
|
||||
zmq_assert (maj_stat == GSS_S_COMPLETE);
|
||||
zmq_assert (state);
|
||||
@ -103,14 +103,14 @@ int zmq::gssapi_mechanism_base_t::encode_message (msg_t *msg_)
|
||||
rc = msg_->init_size (8 + 4 + wrapped.length);
|
||||
zmq_assert (rc == 0);
|
||||
|
||||
uint8_t *ptr = static_cast <uint8_t *> (msg_->data ());
|
||||
uint8_t *ptr = static_cast<uint8_t *> (msg_->data ());
|
||||
|
||||
// Add command string
|
||||
memcpy (ptr, "\x07MESSAGE", 8);
|
||||
ptr += 8;
|
||||
|
||||
// Add token length
|
||||
put_uint32 (ptr, static_cast <uint32_t> (wrapped.length));
|
||||
put_uint32 (ptr, static_cast<uint32_t> (wrapped.length));
|
||||
ptr += 4;
|
||||
|
||||
// Add wrapped token value
|
||||
@ -124,7 +124,7 @@ int zmq::gssapi_mechanism_base_t::encode_message (msg_t *msg_)
|
||||
|
||||
int zmq::gssapi_mechanism_base_t::decode_message (msg_t *msg_)
|
||||
{
|
||||
const uint8_t *ptr = static_cast <uint8_t *> (msg_->data ());
|
||||
const uint8_t *ptr = static_cast<uint8_t *> (msg_->data ());
|
||||
size_t bytes_left = msg_->size ();
|
||||
|
||||
int rc = check_basic_command_structure (msg_);
|
||||
@ -134,8 +134,7 @@ int zmq::gssapi_mechanism_base_t::decode_message (msg_t *msg_)
|
||||
// Get command string
|
||||
if (bytes_left < 8 || memcmp (ptr, "\x07MESSAGE", 8)) {
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
session->get_endpoint (),
|
||||
ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND);
|
||||
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND);
|
||||
errno = EPROTO;
|
||||
return -1;
|
||||
}
|
||||
@ -164,12 +163,12 @@ int zmq::gssapi_mechanism_base_t::decode_message (msg_t *msg_)
|
||||
return -1;
|
||||
}
|
||||
// TODO: instead of malloc/memcpy, can we just do: wrapped.value = ptr;
|
||||
const size_t alloc_length = wrapped.length? wrapped.length: 1;
|
||||
wrapped.value = static_cast <char *> (malloc (alloc_length));
|
||||
const size_t alloc_length = wrapped.length ? wrapped.length : 1;
|
||||
wrapped.value = static_cast<char *> (malloc (alloc_length));
|
||||
alloc_assert (wrapped.value);
|
||||
|
||||
if (wrapped.length) {
|
||||
memcpy(wrapped.value, ptr, wrapped.length);
|
||||
memcpy (wrapped.value, ptr, wrapped.length);
|
||||
ptr += wrapped.length;
|
||||
bytes_left -= wrapped.length;
|
||||
}
|
||||
@ -177,38 +176,37 @@ int zmq::gssapi_mechanism_base_t::decode_message (msg_t *msg_)
|
||||
// Unwrap the token value
|
||||
int state;
|
||||
gss_buffer_desc plaintext;
|
||||
maj_stat = gss_unwrap(&min_stat, context, &wrapped, &plaintext,
|
||||
&state, (gss_qop_t *) NULL);
|
||||
maj_stat = gss_unwrap (&min_stat, context, &wrapped, &plaintext, &state,
|
||||
(gss_qop_t *) NULL);
|
||||
|
||||
if (maj_stat != GSS_S_COMPLETE)
|
||||
{
|
||||
if (maj_stat != GSS_S_COMPLETE) {
|
||||
gss_release_buffer (&min_stat, &plaintext);
|
||||
free (wrapped.value);
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
session->get_endpoint (),
|
||||
ZMQ_PROTOCOL_ERROR_ZMTP_CRYPTOGRAPHIC);
|
||||
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_CRYPTOGRAPHIC);
|
||||
errno = EPROTO;
|
||||
return -1;
|
||||
}
|
||||
zmq_assert(state);
|
||||
zmq_assert (state);
|
||||
|
||||
// Re-initialize msg_ for plaintext
|
||||
rc = msg_->close ();
|
||||
zmq_assert (rc == 0);
|
||||
|
||||
rc = msg_->init_size (plaintext.length-1);
|
||||
rc = msg_->init_size (plaintext.length - 1);
|
||||
zmq_assert (rc == 0);
|
||||
|
||||
const uint8_t flags = static_cast <char *> (plaintext.value)[0];
|
||||
const uint8_t flags = static_cast<char *> (plaintext.value)[0];
|
||||
if (flags & 0x01)
|
||||
msg_->set_flags (msg_t::more);
|
||||
if (flags & 0x02)
|
||||
msg_->set_flags (msg_t::command);
|
||||
|
||||
memcpy (msg_->data (), static_cast <char *> (plaintext.value)+1, plaintext.length-1);
|
||||
memcpy (msg_->data (), static_cast<char *> (plaintext.value) + 1,
|
||||
plaintext.length - 1);
|
||||
|
||||
gss_release_buffer (&min_stat, &plaintext);
|
||||
free(wrapped.value);
|
||||
free (wrapped.value);
|
||||
|
||||
if (bytes_left > 0) {
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
@ -221,7 +219,9 @@ int zmq::gssapi_mechanism_base_t::decode_message (msg_t *msg_)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int zmq::gssapi_mechanism_base_t::produce_initiate (msg_t *msg_, void *token_value_, size_t token_length_)
|
||||
int zmq::gssapi_mechanism_base_t::produce_initiate (msg_t *msg_,
|
||||
void *token_value_,
|
||||
size_t token_length_)
|
||||
{
|
||||
zmq_assert (token_value_);
|
||||
zmq_assert (token_length_ <= 0xFFFFFFFFUL);
|
||||
@ -231,14 +231,14 @@ int zmq::gssapi_mechanism_base_t::produce_initiate (msg_t *msg_, void *token_val
|
||||
const int rc = msg_->init_size (command_size);
|
||||
errno_assert (rc == 0);
|
||||
|
||||
uint8_t *ptr = static_cast <uint8_t *> (msg_->data ());
|
||||
uint8_t *ptr = static_cast<uint8_t *> (msg_->data ());
|
||||
|
||||
// Add command string
|
||||
memcpy (ptr, "\x08INITIATE", 9);
|
||||
ptr += 9;
|
||||
|
||||
// Add token length
|
||||
put_uint32 (ptr, static_cast <uint32_t> (token_length_));
|
||||
put_uint32 (ptr, static_cast<uint32_t> (token_length_));
|
||||
ptr += 4;
|
||||
|
||||
// Add token value
|
||||
@ -248,11 +248,13 @@ int zmq::gssapi_mechanism_base_t::produce_initiate (msg_t *msg_, void *token_val
|
||||
return 0;
|
||||
}
|
||||
|
||||
int zmq::gssapi_mechanism_base_t::process_initiate (msg_t *msg_, void **token_value_, size_t &token_length_)
|
||||
int zmq::gssapi_mechanism_base_t::process_initiate (msg_t *msg_,
|
||||
void **token_value_,
|
||||
size_t &token_length_)
|
||||
{
|
||||
zmq_assert (token_value_);
|
||||
|
||||
const uint8_t *ptr = static_cast <uint8_t *> (msg_->data ());
|
||||
const uint8_t *ptr = static_cast<uint8_t *> (msg_->data ());
|
||||
size_t bytes_left = msg_->size ();
|
||||
|
||||
int rc = check_basic_command_structure (msg_);
|
||||
@ -262,8 +264,7 @@ int zmq::gssapi_mechanism_base_t::process_initiate (msg_t *msg_, void **token_va
|
||||
// Get command string
|
||||
if (bytes_left < 9 || memcmp (ptr, "\x08INITIATE", 9)) {
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
session->get_endpoint (),
|
||||
ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND);
|
||||
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND);
|
||||
errno = EPROTO;
|
||||
return -1;
|
||||
}
|
||||
@ -291,11 +292,12 @@ int zmq::gssapi_mechanism_base_t::process_initiate (msg_t *msg_, void **token_va
|
||||
return -1;
|
||||
}
|
||||
|
||||
*token_value_ = static_cast <char *> (malloc (token_length_ ? token_length_ : 1));
|
||||
*token_value_ =
|
||||
static_cast<char *> (malloc (token_length_ ? token_length_ : 1));
|
||||
alloc_assert (*token_value_);
|
||||
|
||||
if (token_length_) {
|
||||
memcpy(*token_value_, ptr, token_length_);
|
||||
memcpy (*token_value_, ptr, token_length_);
|
||||
ptr += token_length_;
|
||||
bytes_left -= token_length_;
|
||||
}
|
||||
@ -329,7 +331,7 @@ int zmq::gssapi_mechanism_base_t::process_ready (msg_t *msg_)
|
||||
return rc;
|
||||
}
|
||||
|
||||
const unsigned char *ptr = static_cast <unsigned char *> (msg_->data ());
|
||||
const unsigned char *ptr = static_cast<unsigned char *> (msg_->data ());
|
||||
size_t bytes_left = msg_->size ();
|
||||
|
||||
int rc = check_basic_command_structure (msg_);
|
||||
@ -338,8 +340,7 @@ int zmq::gssapi_mechanism_base_t::process_ready (msg_t *msg_)
|
||||
|
||||
if (bytes_left < 6 || memcmp (ptr, "\x05READY", 6)) {
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
session->get_endpoint (),
|
||||
ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND);
|
||||
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND);
|
||||
errno = EPROTO;
|
||||
return -1;
|
||||
}
|
||||
@ -347,9 +348,8 @@ int zmq::gssapi_mechanism_base_t::process_ready (msg_t *msg_)
|
||||
bytes_left -= 6;
|
||||
rc = parse_metadata (ptr, bytes_left);
|
||||
if (rc == -1)
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
session->get_endpoint (),
|
||||
ZMQ_PROTOCOL_ERROR_ZMTP_INVALID_METADATA);
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_INVALID_METADATA);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -363,7 +363,7 @@ const gss_OID zmq::gssapi_mechanism_base_t::convert_nametype (int zmq_nametype)
|
||||
return GSS_C_NT_USER_NAME;
|
||||
case ZMQ_GSSAPI_NT_KRB5_PRINCIPAL:
|
||||
#ifdef GSS_KRB5_NT_PRINCIPAL_NAME
|
||||
return (gss_OID)GSS_KRB5_NT_PRINCIPAL_NAME;
|
||||
return (gss_OID) GSS_KRB5_NT_PRINCIPAL_NAME;
|
||||
#else
|
||||
return GSS_C_NT_USER_NAME;
|
||||
#endif
|
||||
@ -371,7 +371,9 @@ const gss_OID zmq::gssapi_mechanism_base_t::convert_nametype (int zmq_nametype)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int zmq::gssapi_mechanism_base_t::acquire_credentials (char * service_name_, gss_cred_id_t * cred_, gss_OID name_type_)
|
||||
int zmq::gssapi_mechanism_base_t::acquire_credentials (char *service_name_,
|
||||
gss_cred_id_t *cred_,
|
||||
gss_OID name_type_)
|
||||
{
|
||||
OM_uint32 maj_stat;
|
||||
OM_uint32 min_stat;
|
||||
@ -381,20 +383,18 @@ int zmq::gssapi_mechanism_base_t::acquire_credentials (char * service_name_, gss
|
||||
name_buf.value = service_name_;
|
||||
name_buf.length = strlen ((char *) name_buf.value) + 1;
|
||||
|
||||
maj_stat = gss_import_name (&min_stat, &name_buf,
|
||||
name_type_, &server_name);
|
||||
maj_stat = gss_import_name (&min_stat, &name_buf, name_type_, &server_name);
|
||||
|
||||
if (maj_stat != GSS_S_COMPLETE)
|
||||
return -1;
|
||||
|
||||
maj_stat = gss_acquire_cred (&min_stat, server_name, 0,
|
||||
GSS_C_NO_OID_SET, GSS_C_BOTH,
|
||||
cred_, NULL, NULL);
|
||||
maj_stat = gss_acquire_cred (&min_stat, server_name, 0, GSS_C_NO_OID_SET,
|
||||
GSS_C_BOTH, cred_, NULL, NULL);
|
||||
|
||||
if (maj_stat != GSS_S_COMPLETE)
|
||||
return -1;
|
||||
|
||||
gss_release_name(&min_stat, &server_name);
|
||||
gss_release_name (&min_stat, &server_name);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -42,91 +42,89 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
class msg_t;
|
||||
|
||||
class msg_t;
|
||||
/// Commonalities between clients and servers are captured here.
|
||||
/// For example, clients and servers both need to produce and
|
||||
/// process context-level GSSAPI tokens (via INITIATE commands)
|
||||
/// and per-message GSSAPI tokens (via MESSAGE commands).
|
||||
class gssapi_mechanism_base_t : public virtual mechanism_base_t
|
||||
{
|
||||
public:
|
||||
gssapi_mechanism_base_t (session_base_t *session_,
|
||||
const options_t &options_);
|
||||
virtual ~gssapi_mechanism_base_t () = 0;
|
||||
|
||||
/// Commonalities between clients and servers are captured here.
|
||||
/// For example, clients and servers both need to produce and
|
||||
/// process context-level GSSAPI tokens (via INITIATE commands)
|
||||
/// and per-message GSSAPI tokens (via MESSAGE commands).
|
||||
class gssapi_mechanism_base_t : public virtual mechanism_base_t
|
||||
{
|
||||
public:
|
||||
gssapi_mechanism_base_t (session_base_t *session_,
|
||||
const options_t &options_);
|
||||
virtual ~gssapi_mechanism_base_t () = 0;
|
||||
protected:
|
||||
// Produce a context-level GSSAPI token (INITIATE command)
|
||||
// during security context initialization.
|
||||
int produce_initiate (msg_t *msg_, void *data_, size_t data_len_);
|
||||
|
||||
protected:
|
||||
// Produce a context-level GSSAPI token (INITIATE command)
|
||||
// during security context initialization.
|
||||
int produce_initiate (msg_t *msg_, void *data_, size_t data_len_);
|
||||
// Process a context-level GSSAPI token (INITIATE command)
|
||||
// during security context initialization.
|
||||
int process_initiate (msg_t *msg_, void **data_, size_t &data_len_);
|
||||
|
||||
// Process a context-level GSSAPI token (INITIATE command)
|
||||
// during security context initialization.
|
||||
int process_initiate (msg_t *msg_, void **data_, size_t &data_len_);
|
||||
// Produce a metadata ready msg (READY) to conclude handshake
|
||||
int produce_ready (msg_t *msg_);
|
||||
|
||||
// Produce a metadata ready msg (READY) to conclude handshake
|
||||
int produce_ready (msg_t *msg_);
|
||||
// Process a metadata ready msg (READY)
|
||||
int process_ready (msg_t *msg_);
|
||||
|
||||
// Process a metadata ready msg (READY)
|
||||
int process_ready (msg_t *msg_);
|
||||
// Encode a per-message GSSAPI token (MESSAGE command) using
|
||||
// the established security context.
|
||||
int encode_message (msg_t *msg_);
|
||||
|
||||
// Encode a per-message GSSAPI token (MESSAGE command) using
|
||||
// the established security context.
|
||||
int encode_message (msg_t *msg_);
|
||||
// Decode a per-message GSSAPI token (MESSAGE command) using
|
||||
// the established security context.
|
||||
int decode_message (msg_t *msg_);
|
||||
|
||||
// Decode a per-message GSSAPI token (MESSAGE command) using
|
||||
// the established security context.
|
||||
int decode_message (msg_t *msg_);
|
||||
// Convert ZMQ_GSSAPI_NT values to GSSAPI name_type
|
||||
static const gss_OID convert_nametype (int zmq_name_type_);
|
||||
|
||||
// Convert ZMQ_GSSAPI_NT values to GSSAPI name_type
|
||||
static const gss_OID convert_nametype (int zmq_name_type_);
|
||||
// Acquire security context credentials from the
|
||||
// underlying mechanism.
|
||||
static int acquire_credentials (char *principal_name_,
|
||||
gss_cred_id_t *cred_,
|
||||
gss_OID name_type_);
|
||||
|
||||
// Acquire security context credentials from the
|
||||
// underlying mechanism.
|
||||
static int acquire_credentials (char * principal_name_,
|
||||
gss_cred_id_t * cred_,
|
||||
gss_OID name_type_);
|
||||
protected:
|
||||
// Opaque GSSAPI token for outgoing data
|
||||
gss_buffer_desc send_tok;
|
||||
|
||||
protected:
|
||||
// Opaque GSSAPI token for outgoing data
|
||||
gss_buffer_desc send_tok;
|
||||
// Opaque GSSAPI token for incoming data
|
||||
gss_buffer_desc recv_tok;
|
||||
|
||||
// Opaque GSSAPI token for incoming data
|
||||
gss_buffer_desc recv_tok;
|
||||
// Opaque GSSAPI representation of principal
|
||||
gss_name_t target_name;
|
||||
|
||||
// Opaque GSSAPI representation of principal
|
||||
gss_name_t target_name;
|
||||
// Human-readable principal name
|
||||
char *principal_name;
|
||||
|
||||
// Human-readable principal name
|
||||
char * principal_name;
|
||||
// Status code returned by GSSAPI functions
|
||||
OM_uint32 maj_stat;
|
||||
|
||||
// Status code returned by GSSAPI functions
|
||||
OM_uint32 maj_stat;
|
||||
// Status code returned by the underlying mechanism
|
||||
OM_uint32 min_stat;
|
||||
|
||||
// Status code returned by the underlying mechanism
|
||||
OM_uint32 min_stat;
|
||||
// Status code returned by the underlying mechanism
|
||||
// during context initialization
|
||||
OM_uint32 init_sec_min_stat;
|
||||
|
||||
// Status code returned by the underlying mechanism
|
||||
// during context initialization
|
||||
OM_uint32 init_sec_min_stat;
|
||||
// Flags returned by GSSAPI (ignored)
|
||||
OM_uint32 ret_flags;
|
||||
|
||||
// Flags returned by GSSAPI (ignored)
|
||||
OM_uint32 ret_flags;
|
||||
// Flags returned by GSSAPI (ignored)
|
||||
OM_uint32 gss_flags;
|
||||
|
||||
// Flags returned by GSSAPI (ignored)
|
||||
OM_uint32 gss_flags;
|
||||
// Credentials used to establish security context
|
||||
gss_cred_id_t cred;
|
||||
|
||||
// Credentials used to establish security context
|
||||
gss_cred_id_t cred;
|
||||
|
||||
// Opaque GSSAPI representation of the security context
|
||||
gss_ctx_id_t context;
|
||||
|
||||
// If true, use gss to encrypt messages. If false, only utilize gss for auth.
|
||||
bool do_encryption;
|
||||
};
|
||||
// Opaque GSSAPI representation of the security context
|
||||
gss_ctx_id_t context;
|
||||
|
||||
// If true, use gss to encrypt messages. If false, only utilize gss for auth.
|
||||
bool do_encryption;
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -54,12 +54,13 @@ zmq::gssapi_server_t::gssapi_server_t (session_base_t *session_,
|
||||
security_context_established (false)
|
||||
{
|
||||
maj_stat = GSS_S_CONTINUE_NEEDED;
|
||||
if(!options_.gss_principal.empty())
|
||||
{
|
||||
const std::string::size_type principal_size = options_.gss_principal.size();
|
||||
principal_name = static_cast <char *>(malloc(principal_size+1));
|
||||
assert(principal_name);
|
||||
memcpy(principal_name, options_.gss_principal.c_str(), principal_size+1 );
|
||||
if (!options_.gss_principal.empty ()) {
|
||||
const std::string::size_type principal_size =
|
||||
options_.gss_principal.size ();
|
||||
principal_name = static_cast<char *> (malloc (principal_size + 1));
|
||||
assert (principal_name);
|
||||
memcpy (principal_name, options_.gss_principal.c_str (),
|
||||
principal_size + 1);
|
||||
gss_OID name_type = convert_nametype (options_.gss_principal_nt);
|
||||
if (acquire_credentials (principal_name, &cred, name_type) != 0)
|
||||
maj_stat = GSS_S_FAILURE;
|
||||
@ -68,17 +69,17 @@ zmq::gssapi_server_t::gssapi_server_t (session_base_t *session_,
|
||||
|
||||
zmq::gssapi_server_t::~gssapi_server_t ()
|
||||
{
|
||||
if(cred)
|
||||
gss_release_cred(&min_stat, &cred);
|
||||
if (cred)
|
||||
gss_release_cred (&min_stat, &cred);
|
||||
|
||||
if(target_name)
|
||||
gss_release_name(&min_stat, &target_name);
|
||||
if (target_name)
|
||||
gss_release_name (&min_stat, &target_name);
|
||||
}
|
||||
|
||||
int zmq::gssapi_server_t::next_handshake_command (msg_t *msg_)
|
||||
{
|
||||
if (state == send_ready) {
|
||||
int rc = produce_ready(msg_);
|
||||
int rc = produce_ready (msg_);
|
||||
if (rc == 0)
|
||||
state = recv_ready;
|
||||
|
||||
@ -108,7 +109,7 @@ int zmq::gssapi_server_t::next_handshake_command (msg_t *msg_)
|
||||
int zmq::gssapi_server_t::process_handshake_command (msg_t *msg_)
|
||||
{
|
||||
if (state == recv_ready) {
|
||||
int rc = process_ready(msg_);
|
||||
int rc = process_ready (msg_);
|
||||
if (rc == 0)
|
||||
state = connected;
|
||||
|
||||
@ -117,8 +118,7 @@ int zmq::gssapi_server_t::process_handshake_command (msg_t *msg_)
|
||||
|
||||
if (state != recv_next_token) {
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
session->get_endpoint (),
|
||||
ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND);
|
||||
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND);
|
||||
errno = EPROTO;
|
||||
return -1;
|
||||
}
|
||||
@ -138,7 +138,7 @@ int zmq::gssapi_server_t::process_handshake_command (msg_t *msg_)
|
||||
expecting_zap_reply = true;
|
||||
}
|
||||
}
|
||||
state = expecting_zap_reply? expect_zap_reply: send_ready;
|
||||
state = expecting_zap_reply ? expect_zap_reply : send_ready;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -158,9 +158,9 @@ void zmq::gssapi_server_t::send_zap_request ()
|
||||
{
|
||||
gss_buffer_desc principal;
|
||||
gss_display_name (&min_stat, target_name, &principal, NULL);
|
||||
zap_client_t::send_zap_request ("GSSAPI", 6,
|
||||
reinterpret_cast<const uint8_t *> (principal.value),
|
||||
principal.length);
|
||||
zap_client_t::send_zap_request (
|
||||
"GSSAPI", 6, reinterpret_cast<const uint8_t *> (principal.value),
|
||||
principal.length);
|
||||
|
||||
gss_release_buffer (&min_stat, &principal);
|
||||
}
|
||||
@ -170,7 +170,7 @@ int zmq::gssapi_server_t::encode (msg_t *msg_)
|
||||
zmq_assert (state == connected);
|
||||
|
||||
if (do_encryption)
|
||||
return encode_message (msg_);
|
||||
return encode_message (msg_);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -180,7 +180,7 @@ int zmq::gssapi_server_t::decode (msg_t *msg_)
|
||||
zmq_assert (state == connected);
|
||||
|
||||
if (do_encryption)
|
||||
return decode_message (msg_);
|
||||
return decode_message (msg_);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -199,21 +199,21 @@ int zmq::gssapi_server_t::zap_msg_available ()
|
||||
|
||||
zmq::mechanism_t::status_t zmq::gssapi_server_t::status () const
|
||||
{
|
||||
return state == connected? mechanism_t::ready: mechanism_t::handshaking;
|
||||
return state == connected ? mechanism_t::ready : mechanism_t::handshaking;
|
||||
}
|
||||
|
||||
int zmq::gssapi_server_t::produce_next_token (msg_t *msg_)
|
||||
{
|
||||
if (send_tok.length != 0) { // Client expects another token
|
||||
if (produce_initiate(msg_, send_tok.value, send_tok.length) < 0)
|
||||
if (produce_initiate (msg_, send_tok.value, send_tok.length) < 0)
|
||||
return -1;
|
||||
gss_release_buffer(&min_stat, &send_tok);
|
||||
gss_release_buffer (&min_stat, &send_tok);
|
||||
}
|
||||
|
||||
if (maj_stat != GSS_S_COMPLETE && maj_stat != GSS_S_CONTINUE_NEEDED) {
|
||||
gss_release_name(&min_stat, &target_name);
|
||||
gss_release_name (&min_stat, &target_name);
|
||||
if (context != GSS_C_NO_CONTEXT)
|
||||
gss_delete_sec_context(&min_stat, &context, GSS_C_NO_BUFFER);
|
||||
gss_delete_sec_context (&min_stat, &context, GSS_C_NO_BUFFER);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -223,9 +223,9 @@ int zmq::gssapi_server_t::produce_next_token (msg_t *msg_)
|
||||
int zmq::gssapi_server_t::process_next_token (msg_t *msg_)
|
||||
{
|
||||
if (maj_stat == GSS_S_CONTINUE_NEEDED) {
|
||||
if (process_initiate(msg_, &recv_tok.value, recv_tok.length) < 0) {
|
||||
if (process_initiate (msg_, &recv_tok.value, recv_tok.length) < 0) {
|
||||
if (target_name != GSS_C_NO_NAME)
|
||||
gss_release_name(&min_stat, &target_name);
|
||||
gss_release_name (&min_stat, &target_name);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
@ -235,10 +235,9 @@ int zmq::gssapi_server_t::process_next_token (msg_t *msg_)
|
||||
|
||||
void zmq::gssapi_server_t::accept_context ()
|
||||
{
|
||||
maj_stat = gss_accept_sec_context(&init_sec_min_stat, &context, cred,
|
||||
&recv_tok, GSS_C_NO_CHANNEL_BINDINGS,
|
||||
&target_name, &doid, &send_tok,
|
||||
&ret_flags, NULL, NULL);
|
||||
maj_stat = gss_accept_sec_context (
|
||||
&init_sec_min_stat, &context, cred, &recv_tok, GSS_C_NO_CHANNEL_BINDINGS,
|
||||
&target_name, &doid, &send_tok, &ret_flags, NULL, NULL);
|
||||
|
||||
if (recv_tok.value) {
|
||||
free (recv_tok.value);
|
||||
|
@ -37,57 +37,54 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
class msg_t;
|
||||
class session_base_t;
|
||||
|
||||
class msg_t;
|
||||
class session_base_t;
|
||||
class gssapi_server_t : public gssapi_mechanism_base_t, public zap_client_t
|
||||
{
|
||||
public:
|
||||
gssapi_server_t (session_base_t *session_,
|
||||
const std::string &peer_address,
|
||||
const options_t &options_);
|
||||
virtual ~gssapi_server_t ();
|
||||
|
||||
class gssapi_server_t
|
||||
: public gssapi_mechanism_base_t, public zap_client_t
|
||||
// mechanism implementation
|
||||
virtual int next_handshake_command (msg_t *msg_);
|
||||
virtual int process_handshake_command (msg_t *msg_);
|
||||
virtual int encode (msg_t *msg_);
|
||||
virtual int decode (msg_t *msg_);
|
||||
virtual int zap_msg_available ();
|
||||
virtual status_t status () const;
|
||||
|
||||
private:
|
||||
enum state_t
|
||||
{
|
||||
public:
|
||||
gssapi_server_t (session_base_t *session_,
|
||||
const std::string &peer_address,
|
||||
const options_t &options_);
|
||||
virtual ~gssapi_server_t ();
|
||||
|
||||
// mechanism implementation
|
||||
virtual int next_handshake_command (msg_t *msg_);
|
||||
virtual int process_handshake_command (msg_t *msg_);
|
||||
virtual int encode (msg_t *msg_);
|
||||
virtual int decode (msg_t *msg_);
|
||||
virtual int zap_msg_available ();
|
||||
virtual status_t status () const;
|
||||
|
||||
private:
|
||||
|
||||
enum state_t {
|
||||
send_next_token,
|
||||
recv_next_token,
|
||||
expect_zap_reply,
|
||||
send_ready,
|
||||
recv_ready,
|
||||
connected
|
||||
};
|
||||
|
||||
session_base_t * const session;
|
||||
|
||||
const std::string peer_address;
|
||||
|
||||
// Current FSM state
|
||||
state_t state;
|
||||
|
||||
// True iff server considers the client authenticated
|
||||
bool security_context_established;
|
||||
|
||||
// The underlying mechanism type (ignored)
|
||||
gss_OID doid;
|
||||
|
||||
void accept_context ();
|
||||
int produce_next_token (msg_t *msg_);
|
||||
int process_next_token (msg_t *msg_);
|
||||
void send_zap_request ();
|
||||
send_next_token,
|
||||
recv_next_token,
|
||||
expect_zap_reply,
|
||||
send_ready,
|
||||
recv_ready,
|
||||
connected
|
||||
};
|
||||
|
||||
session_base_t *const session;
|
||||
|
||||
const std::string peer_address;
|
||||
|
||||
// Current FSM state
|
||||
state_t state;
|
||||
|
||||
// True iff server considers the client authenticated
|
||||
bool security_context_established;
|
||||
|
||||
// The underlying mechanism type (ignored)
|
||||
gss_OID doid;
|
||||
|
||||
void accept_context ();
|
||||
int produce_next_token (msg_t *msg_);
|
||||
int process_next_token (msg_t *msg_);
|
||||
void send_zap_request ();
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -34,31 +34,27 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
class msg_t;
|
||||
|
||||
class msg_t;
|
||||
// Interface to be implemented by message decoder.
|
||||
|
||||
// Interface to be implemented by message decoder.
|
||||
class i_decoder
|
||||
{
|
||||
public:
|
||||
virtual ~i_decoder () {}
|
||||
|
||||
class i_decoder
|
||||
{
|
||||
public:
|
||||
virtual ~i_decoder () {}
|
||||
virtual void get_buffer (unsigned char **data_, size_t *size_) = 0;
|
||||
|
||||
virtual void get_buffer (unsigned char **data_, size_t *size_) = 0;
|
||||
|
||||
virtual void resize_buffer(size_t) = 0;
|
||||
// Decodes data pointed to by data_.
|
||||
// When a message is decoded, 1 is returned.
|
||||
// When the decoder needs more data, 0 is returned.
|
||||
// On error, -1 is returned and errno is set accordingly.
|
||||
virtual int decode (const unsigned char *data_, size_t size_,
|
||||
size_t &processed) = 0;
|
||||
|
||||
virtual msg_t *msg () = 0;
|
||||
|
||||
|
||||
};
|
||||
virtual void resize_buffer (size_t) = 0;
|
||||
// Decodes data pointed to by data_.
|
||||
// When a message is decoded, 1 is returned.
|
||||
// When the decoder needs more data, 0 is returned.
|
||||
// On error, -1 is returned and errno is set accordingly.
|
||||
virtual int
|
||||
decode (const unsigned char *data_, size_t size_, size_t &processed) = 0;
|
||||
|
||||
virtual msg_t *msg () = 0;
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -34,27 +34,24 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
// Forward declaration
|
||||
class msg_t;
|
||||
|
||||
// Forward declaration
|
||||
class msg_t;
|
||||
// Interface to be implemented by message encoder.
|
||||
|
||||
// Interface to be implemented by message encoder.
|
||||
struct i_encoder
|
||||
{
|
||||
virtual ~i_encoder () {}
|
||||
|
||||
struct i_encoder
|
||||
{
|
||||
virtual ~i_encoder () {}
|
||||
|
||||
// The function returns a batch of binary data. The data
|
||||
// are filled to a supplied buffer. If no buffer is supplied (data_
|
||||
// is NULL) encoder will provide buffer of its own.
|
||||
// Function returns 0 when a new message is required.
|
||||
virtual size_t encode (unsigned char **data_, size_t size) = 0;
|
||||
|
||||
// Load a new message into encoder.
|
||||
virtual void load_msg (msg_t *msg_) = 0;
|
||||
|
||||
};
|
||||
// The function returns a batch of binary data. The data
|
||||
// are filled to a supplied buffer. If no buffer is supplied (data_
|
||||
// is NULL) encoder will provide buffer of its own.
|
||||
// Function returns 0 when a new message is required.
|
||||
virtual size_t encode (unsigned char **data_, size_t size) = 0;
|
||||
|
||||
// Load a new message into encoder.
|
||||
virtual void load_msg (msg_t *msg_) = 0;
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -32,37 +32,34 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
class io_thread_t;
|
||||
|
||||
class io_thread_t;
|
||||
// Abstract interface to be implemented by various engines.
|
||||
|
||||
// Abstract interface to be implemented by various engines.
|
||||
struct i_engine
|
||||
{
|
||||
virtual ~i_engine () {}
|
||||
|
||||
struct i_engine
|
||||
{
|
||||
virtual ~i_engine () {}
|
||||
// Plug the engine to the session.
|
||||
virtual void plug (zmq::io_thread_t *io_thread_,
|
||||
class session_base_t *session_) = 0;
|
||||
|
||||
// Plug the engine to the session.
|
||||
virtual void plug (zmq::io_thread_t *io_thread_,
|
||||
class session_base_t *session_) = 0;
|
||||
// Terminate and deallocate the engine. Note that 'detached'
|
||||
// events are not fired on termination.
|
||||
virtual void terminate () = 0;
|
||||
|
||||
// Terminate and deallocate the engine. Note that 'detached'
|
||||
// events are not fired on termination.
|
||||
virtual void terminate () = 0;
|
||||
// This method is called by the session to signalise that more
|
||||
// messages can be written to the pipe.
|
||||
virtual void restart_input () = 0;
|
||||
|
||||
// This method is called by the session to signalise that more
|
||||
// messages can be written to the pipe.
|
||||
virtual void restart_input () = 0;
|
||||
// This method is called by the session to signalise that there
|
||||
// are messages to send available.
|
||||
virtual void restart_output () = 0;
|
||||
|
||||
// This method is called by the session to signalise that there
|
||||
// are messages to send available.
|
||||
virtual void restart_output () = 0;
|
||||
|
||||
virtual void zap_msg_available () = 0;
|
||||
|
||||
virtual const char * get_endpoint() const = 0;
|
||||
|
||||
};
|
||||
virtual void zap_msg_available () = 0;
|
||||
|
||||
virtual const char *get_endpoint () const = 0;
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -34,27 +34,24 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
// Interface to be implemented by mailbox.
|
||||
// Interface to be implemented by mailbox.
|
||||
|
||||
class i_mailbox
|
||||
{
|
||||
public:
|
||||
virtual ~i_mailbox () {}
|
||||
class i_mailbox
|
||||
{
|
||||
public:
|
||||
virtual ~i_mailbox () {}
|
||||
|
||||
virtual void send (const command_t &cmd_) = 0;
|
||||
virtual int recv (command_t *cmd_, int timeout_) = 0;
|
||||
virtual void send (const command_t &cmd_) = 0;
|
||||
virtual int recv (command_t *cmd_, int timeout_) = 0;
|
||||
|
||||
|
||||
#ifdef HAVE_FORK
|
||||
// close the file descriptors in the signaller. This is used in a forked
|
||||
// child process to close the file descriptors so that they do not interfere
|
||||
// with the context in the parent process.
|
||||
virtual void forked () = 0;
|
||||
// close the file descriptors in the signaller. This is used in a forked
|
||||
// child process to close the file descriptors so that they do not interfere
|
||||
// with the context in the parent process.
|
||||
virtual void forked () = 0;
|
||||
#endif
|
||||
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -32,24 +32,22 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
// Virtual interface to be exposed by object that want to be notified
|
||||
// about events on file descriptors.
|
||||
|
||||
// Virtual interface to be exposed by object that want to be notified
|
||||
// about events on file descriptors.
|
||||
struct i_poll_events
|
||||
{
|
||||
virtual ~i_poll_events () {}
|
||||
|
||||
struct i_poll_events
|
||||
{
|
||||
virtual ~i_poll_events () {}
|
||||
// Called by I/O thread when file descriptor is ready for reading.
|
||||
virtual void in_event () = 0;
|
||||
|
||||
// Called by I/O thread when file descriptor is ready for reading.
|
||||
virtual void in_event () = 0;
|
||||
|
||||
// Called by I/O thread when file descriptor is ready for writing.
|
||||
virtual void out_event () = 0;
|
||||
|
||||
// Called when timer expires.
|
||||
virtual void timer_event (int id_) = 0;
|
||||
};
|
||||
// Called by I/O thread when file descriptor is ready for writing.
|
||||
virtual void out_event () = 0;
|
||||
|
||||
// Called when timer expires.
|
||||
virtual void timer_event (int id_) = 0;
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -32,8 +32,7 @@
|
||||
#include "io_thread.hpp"
|
||||
#include "err.hpp"
|
||||
|
||||
zmq::io_object_t::io_object_t (io_thread_t *io_thread_) :
|
||||
poller (NULL)
|
||||
zmq::io_object_t::io_object_t (io_thread_t *io_thread_) : poller (NULL)
|
||||
{
|
||||
if (io_thread_)
|
||||
plug (io_thread_);
|
||||
|
@ -38,52 +38,47 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
class io_thread_t;
|
||||
|
||||
class io_thread_t;
|
||||
// Simple base class for objects that live in I/O threads.
|
||||
// It makes communication with the poller object easier and
|
||||
// makes defining unneeded event handlers unnecessary.
|
||||
|
||||
// Simple base class for objects that live in I/O threads.
|
||||
// It makes communication with the poller object easier and
|
||||
// makes defining unneeded event handlers unnecessary.
|
||||
class io_object_t : public i_poll_events
|
||||
{
|
||||
public:
|
||||
io_object_t (zmq::io_thread_t *io_thread_ = NULL);
|
||||
~io_object_t ();
|
||||
|
||||
class io_object_t : public i_poll_events
|
||||
{
|
||||
public:
|
||||
// When migrating an object from one I/O thread to another, first
|
||||
// unplug it, then migrate it, then plug it to the new thread.
|
||||
void plug (zmq::io_thread_t *io_thread_);
|
||||
void unplug ();
|
||||
|
||||
io_object_t (zmq::io_thread_t *io_thread_ = NULL);
|
||||
~io_object_t ();
|
||||
protected:
|
||||
typedef poller_t::handle_t handle_t;
|
||||
|
||||
// When migrating an object from one I/O thread to another, first
|
||||
// unplug it, then migrate it, then plug it to the new thread.
|
||||
void plug (zmq::io_thread_t *io_thread_);
|
||||
void unplug ();
|
||||
// Methods to access underlying poller object.
|
||||
handle_t add_fd (fd_t fd_);
|
||||
void rm_fd (handle_t handle_);
|
||||
void set_pollin (handle_t handle_);
|
||||
void reset_pollin (handle_t handle_);
|
||||
void set_pollout (handle_t handle_);
|
||||
void reset_pollout (handle_t handle_);
|
||||
void add_timer (int timout_, int id_);
|
||||
void cancel_timer (int id_);
|
||||
|
||||
protected:
|
||||
// i_poll_events interface implementation.
|
||||
void in_event ();
|
||||
void out_event ();
|
||||
void timer_event (int id_);
|
||||
|
||||
typedef poller_t::handle_t handle_t;
|
||||
|
||||
// Methods to access underlying poller object.
|
||||
handle_t add_fd (fd_t fd_);
|
||||
void rm_fd (handle_t handle_);
|
||||
void set_pollin (handle_t handle_);
|
||||
void reset_pollin (handle_t handle_);
|
||||
void set_pollout (handle_t handle_);
|
||||
void reset_pollout (handle_t handle_);
|
||||
void add_timer (int timout_, int id_);
|
||||
void cancel_timer (int id_);
|
||||
|
||||
// i_poll_events interface implementation.
|
||||
void in_event ();
|
||||
void out_event ();
|
||||
void timer_event (int id_);
|
||||
|
||||
private:
|
||||
|
||||
poller_t *poller;
|
||||
|
||||
io_object_t (const io_object_t&);
|
||||
const io_object_t &operator = (const io_object_t&);
|
||||
};
|
||||
private:
|
||||
poller_t *poller;
|
||||
|
||||
io_object_t (const io_object_t &);
|
||||
const io_object_t &operator= (const io_object_t &);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -51,7 +51,7 @@ zmq::io_thread_t::io_thread_t (ctx_t *ctx_, uint32_t tid_) :
|
||||
|
||||
zmq::io_thread_t::~io_thread_t ()
|
||||
{
|
||||
LIBZMQ_DELETE(poller);
|
||||
LIBZMQ_DELETE (poller);
|
||||
}
|
||||
|
||||
void zmq::io_thread_t::start ()
|
||||
|
@ -40,60 +40,56 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
class ctx_t;
|
||||
|
||||
class ctx_t;
|
||||
// Generic part of the I/O thread. Polling-mechanism-specific features
|
||||
// are implemented in separate "polling objects".
|
||||
|
||||
// Generic part of the I/O thread. Polling-mechanism-specific features
|
||||
// are implemented in separate "polling objects".
|
||||
class io_thread_t : public object_t, public i_poll_events
|
||||
{
|
||||
public:
|
||||
io_thread_t (zmq::ctx_t *ctx_, uint32_t tid_);
|
||||
|
||||
class io_thread_t : public object_t, public i_poll_events
|
||||
{
|
||||
public:
|
||||
// Clean-up. If the thread was started, it's necessary to call 'stop'
|
||||
// before invoking destructor. Otherwise the destructor would hang up.
|
||||
~io_thread_t ();
|
||||
|
||||
io_thread_t (zmq::ctx_t *ctx_, uint32_t tid_);
|
||||
// Launch the physical thread.
|
||||
void start ();
|
||||
|
||||
// Clean-up. If the thread was started, it's necessary to call 'stop'
|
||||
// before invoking destructor. Otherwise the destructor would hang up.
|
||||
~io_thread_t ();
|
||||
// Ask underlying thread to stop.
|
||||
void stop ();
|
||||
|
||||
// Launch the physical thread.
|
||||
void start ();
|
||||
// Returns mailbox associated with this I/O thread.
|
||||
mailbox_t *get_mailbox ();
|
||||
|
||||
// Ask underlying thread to stop.
|
||||
void stop ();
|
||||
// i_poll_events implementation.
|
||||
void in_event ();
|
||||
void out_event ();
|
||||
void timer_event (int id_);
|
||||
|
||||
// Returns mailbox associated with this I/O thread.
|
||||
mailbox_t *get_mailbox ();
|
||||
// Used by io_objects to retrieve the associated poller object.
|
||||
poller_t *get_poller ();
|
||||
|
||||
// i_poll_events implementation.
|
||||
void in_event ();
|
||||
void out_event ();
|
||||
void timer_event (int id_);
|
||||
// Command handlers.
|
||||
void process_stop ();
|
||||
|
||||
// Used by io_objects to retrieve the associated poller object.
|
||||
poller_t *get_poller ();
|
||||
// Returns load experienced by the I/O thread.
|
||||
int get_load ();
|
||||
|
||||
// Command handlers.
|
||||
void process_stop ();
|
||||
private:
|
||||
// I/O thread accesses incoming commands via this mailbox.
|
||||
mailbox_t mailbox;
|
||||
|
||||
// Returns load experienced by the I/O thread.
|
||||
int get_load ();
|
||||
// Handle associated with mailbox' file descriptor.
|
||||
poller_t::handle_t mailbox_handle;
|
||||
|
||||
private:
|
||||
|
||||
// I/O thread accesses incoming commands via this mailbox.
|
||||
mailbox_t mailbox;
|
||||
|
||||
// Handle associated with mailbox' file descriptor.
|
||||
poller_t::handle_t mailbox_handle;
|
||||
|
||||
// I/O multiplexing is performed using a poller object.
|
||||
poller_t *poller;
|
||||
|
||||
io_thread_t (const io_thread_t&);
|
||||
const io_thread_t &operator = (const io_thread_t&);
|
||||
};
|
||||
// I/O multiplexing is performed using a poller object.
|
||||
poller_t *poller;
|
||||
|
||||
io_thread_t (const io_thread_t &);
|
||||
const io_thread_t &operator= (const io_thread_t &);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
58
src/ip.cpp
58
src/ip.cpp
@ -64,9 +64,9 @@ zmq::fd_t zmq::open_socket (int domain_, int type_, int protocol_)
|
||||
return -1;
|
||||
#endif
|
||||
|
||||
// If there's no SOCK_CLOEXEC, let's try the second best option. Note that
|
||||
// race condition can cause socket not to be closed (if fork happens
|
||||
// between socket creation and this point).
|
||||
// If there's no SOCK_CLOEXEC, let's try the second best option. Note that
|
||||
// race condition can cause socket not to be closed (if fork happens
|
||||
// between socket creation and this point).
|
||||
#if !defined ZMQ_HAVE_SOCK_CLOEXEC && defined FD_CLOEXEC
|
||||
rc = fcntl (s, F_SETFD, FD_CLOEXEC);
|
||||
errno_assert (rc != -1);
|
||||
@ -106,7 +106,7 @@ void zmq::unblock_socket (fd_t s_)
|
||||
|
||||
void zmq::enable_ipv4_mapping (fd_t s_)
|
||||
{
|
||||
(void) s_;
|
||||
(void) s_;
|
||||
|
||||
#if defined IPV6_V6ONLY && !defined ZMQ_HAVE_OPENBSD
|
||||
#ifdef ZMQ_HAVE_WINDOWS
|
||||
@ -114,8 +114,8 @@ void zmq::enable_ipv4_mapping (fd_t s_)
|
||||
#else
|
||||
int flag = 0;
|
||||
#endif
|
||||
int rc = setsockopt (s_, IPPROTO_IPV6, IPV6_V6ONLY, (const char*) &flag,
|
||||
sizeof (flag));
|
||||
int rc = setsockopt (s_, IPPROTO_IPV6, IPV6_V6ONLY, (const char *) &flag,
|
||||
sizeof (flag));
|
||||
#ifdef ZMQ_HAVE_WINDOWS
|
||||
wsa_assert (rc != SOCKET_ERROR);
|
||||
#else
|
||||
@ -130,38 +130,36 @@ int zmq::get_peer_ip_address (fd_t sockfd_, std::string &ip_addr_)
|
||||
struct sockaddr_storage ss;
|
||||
|
||||
#if defined ZMQ_HAVE_HPUX || defined ZMQ_HAVE_WINDOWS
|
||||
int addrlen = static_cast <int> (sizeof ss);
|
||||
int addrlen = static_cast<int> (sizeof ss);
|
||||
#else
|
||||
socklen_t addrlen = sizeof ss;
|
||||
#endif
|
||||
rc = getpeername (sockfd_, (struct sockaddr*) &ss, &addrlen);
|
||||
rc = getpeername (sockfd_, (struct sockaddr *) &ss, &addrlen);
|
||||
#ifdef ZMQ_HAVE_WINDOWS
|
||||
if (rc == SOCKET_ERROR) {
|
||||
const int last_error = WSAGetLastError();
|
||||
wsa_assert (last_error != WSANOTINITIALISED &&
|
||||
last_error != WSAEFAULT &&
|
||||
last_error != WSAEINPROGRESS &&
|
||||
last_error != WSAENOTSOCK);
|
||||
const int last_error = WSAGetLastError ();
|
||||
wsa_assert (last_error != WSANOTINITIALISED && last_error != WSAEFAULT
|
||||
&& last_error != WSAEINPROGRESS
|
||||
&& last_error != WSAENOTSOCK);
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
if (rc == -1) {
|
||||
errno_assert (errno != EBADF &&
|
||||
errno != EFAULT &&
|
||||
errno != ENOTSOCK);
|
||||
errno_assert (errno != EBADF && errno != EFAULT && errno != ENOTSOCK);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
char host [NI_MAXHOST];
|
||||
rc = getnameinfo ((struct sockaddr*) &ss, addrlen, host, sizeof host,
|
||||
NULL, 0, NI_NUMERICHOST);
|
||||
char host[NI_MAXHOST];
|
||||
rc = getnameinfo ((struct sockaddr *) &ss, addrlen, host, sizeof host, NULL,
|
||||
0, NI_NUMERICHOST);
|
||||
if (rc != 0)
|
||||
return 0;
|
||||
|
||||
ip_addr_ = host;
|
||||
|
||||
union {
|
||||
union
|
||||
{
|
||||
struct sockaddr sa;
|
||||
struct sockaddr_storage sa_stor;
|
||||
} u;
|
||||
@ -172,7 +170,9 @@ int zmq::get_peer_ip_address (fd_t sockfd_, std::string &ip_addr_)
|
||||
|
||||
void zmq::set_ip_type_of_service (fd_t s_, int iptos)
|
||||
{
|
||||
int rc = setsockopt(s_, IPPROTO_IP, IP_TOS, reinterpret_cast<const char*>(&iptos), sizeof(iptos));
|
||||
int rc =
|
||||
setsockopt (s_, IPPROTO_IP, IP_TOS,
|
||||
reinterpret_cast<const char *> (&iptos), sizeof (iptos));
|
||||
|
||||
#ifdef ZMQ_HAVE_WINDOWS
|
||||
wsa_assert (rc != SOCKET_ERROR);
|
||||
@ -181,19 +181,14 @@ void zmq::set_ip_type_of_service (fd_t s_, int iptos)
|
||||
#endif
|
||||
|
||||
// Windows and Hurd do not support IPV6_TCLASS
|
||||
#if !defined (ZMQ_HAVE_WINDOWS) && defined (IPV6_TCLASS)
|
||||
rc = setsockopt(
|
||||
s_,
|
||||
IPPROTO_IPV6,
|
||||
IPV6_TCLASS,
|
||||
reinterpret_cast<const char*>(&iptos),
|
||||
sizeof(iptos));
|
||||
#if !defined(ZMQ_HAVE_WINDOWS) && defined(IPV6_TCLASS)
|
||||
rc = setsockopt (s_, IPPROTO_IPV6, IPV6_TCLASS,
|
||||
reinterpret_cast<const char *> (&iptos), sizeof (iptos));
|
||||
|
||||
// If IPv6 is not enabled ENOPROTOOPT will be returned on Linux and
|
||||
// EINVAL on OSX
|
||||
if (rc == -1) {
|
||||
errno_assert (errno == ENOPROTOOPT ||
|
||||
errno == EINVAL);
|
||||
errno_assert (errno == ENOPROTOOPT || errno == EINVAL);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
@ -221,7 +216,8 @@ int zmq::set_nosigpipe (fd_t s_)
|
||||
void zmq::bind_to_device (fd_t s_, std::string &bound_device_)
|
||||
{
|
||||
#ifdef ZMQ_HAVE_SO_BINDTODEVICE
|
||||
int rc = setsockopt(s_, SOL_SOCKET, SO_BINDTODEVICE, bound_device_.c_str (), bound_device_.length ());
|
||||
int rc = setsockopt (s_, SOL_SOCKET, SO_BINDTODEVICE,
|
||||
bound_device_.c_str (), bound_device_.length ());
|
||||
|
||||
#ifdef ZMQ_HAVE_WINDOWS
|
||||
wsa_assert (rc != SOCKET_ERROR);
|
||||
|
34
src/ip.hpp
34
src/ip.hpp
@ -35,30 +35,28 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
// Same as socket(2), but allows for transparent tweaking the options.
|
||||
fd_t open_socket (int domain_, int type_, int protocol_);
|
||||
|
||||
// Same as socket(2), but allows for transparent tweaking the options.
|
||||
fd_t open_socket (int domain_, int type_, int protocol_);
|
||||
// Sets the socket into non-blocking mode.
|
||||
void unblock_socket (fd_t s_);
|
||||
|
||||
// Sets the socket into non-blocking mode.
|
||||
void unblock_socket (fd_t s_);
|
||||
// Enable IPv4-mapping of addresses in case it is disabled by default.
|
||||
void enable_ipv4_mapping (fd_t s_);
|
||||
|
||||
// Enable IPv4-mapping of addresses in case it is disabled by default.
|
||||
void enable_ipv4_mapping (fd_t s_);
|
||||
// Returns string representation of peer's address.
|
||||
// Socket sockfd_ must be connected. Returns true iff successful.
|
||||
int get_peer_ip_address (fd_t sockfd_, std::string &ip_addr_);
|
||||
|
||||
// Returns string representation of peer's address.
|
||||
// Socket sockfd_ must be connected. Returns true iff successful.
|
||||
int get_peer_ip_address (fd_t sockfd_, std::string &ip_addr_);
|
||||
// Sets the IP Type-Of-Service for the underlying socket
|
||||
void set_ip_type_of_service (fd_t s_, int iptos);
|
||||
|
||||
// Sets the IP Type-Of-Service for the underlying socket
|
||||
void set_ip_type_of_service (fd_t s_, int iptos);
|
||||
|
||||
// Sets the SO_NOSIGPIPE option for the underlying socket.
|
||||
// Return 0 on success, -1 if the connection has been closed by the peer
|
||||
int set_nosigpipe (fd_t s_);
|
||||
|
||||
// Binds the underlying socket to the given device, eg. VRF or interface
|
||||
void bind_to_device (fd_t s_, std::string &bound_device_);
|
||||
// Sets the SO_NOSIGPIPE option for the underlying socket.
|
||||
// Return 0 on success, -1 if the connection has been closed by the peer
|
||||
int set_nosigpipe (fd_t s_);
|
||||
|
||||
// Binds the underlying socket to the given device, eg. VRF or interface
|
||||
void bind_to_device (fd_t s_, std::string &bound_device_);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -48,7 +48,7 @@ zmq::ipc_address_t::ipc_address_t (const sockaddr *sa, socklen_t sa_len)
|
||||
|
||||
memset (&address, 0, sizeof address);
|
||||
if (sa->sa_family == AF_UNIX)
|
||||
memcpy(&address, sa, sa_len);
|
||||
memcpy (&address, sa, sa_len);
|
||||
}
|
||||
|
||||
zmq::ipc_address_t::~ipc_address_t ()
|
||||
@ -61,7 +61,7 @@ int zmq::ipc_address_t::resolve (const char *path_)
|
||||
errno = ENAMETOOLONG;
|
||||
return -1;
|
||||
}
|
||||
if (path_ [0] == '@' && !path_ [1]) {
|
||||
if (path_[0] == '@' && !path_[1]) {
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
}
|
||||
@ -69,7 +69,7 @@ int zmq::ipc_address_t::resolve (const char *path_)
|
||||
address.sun_family = AF_UNIX;
|
||||
strcpy (address.sun_path, path_);
|
||||
/* Abstract sockets start with '\0' */
|
||||
if (path_ [0] == '@')
|
||||
if (path_[0] == '@')
|
||||
*address.sun_path = '\0';
|
||||
return 0;
|
||||
}
|
||||
@ -83,23 +83,24 @@ int zmq::ipc_address_t::to_string (std::string &addr_)
|
||||
|
||||
std::stringstream s;
|
||||
s << "ipc://";
|
||||
if (!address.sun_path [0] && address.sun_path [1])
|
||||
s << "@" << address.sun_path + 1;
|
||||
if (!address.sun_path[0] && address.sun_path[1])
|
||||
s << "@" << address.sun_path + 1;
|
||||
else
|
||||
s << address.sun_path;
|
||||
s << address.sun_path;
|
||||
addr_ = s.str ();
|
||||
return 0;
|
||||
}
|
||||
|
||||
const sockaddr *zmq::ipc_address_t::addr () const
|
||||
{
|
||||
return (sockaddr*) &address;
|
||||
return (sockaddr *) &address;
|
||||
}
|
||||
|
||||
socklen_t zmq::ipc_address_t::addrlen () const
|
||||
{
|
||||
if (!address.sun_path [0] && address.sun_path [1])
|
||||
return (socklen_t) strlen (address.sun_path + 1) + sizeof (sa_family_t) + 1;
|
||||
if (!address.sun_path[0] && address.sun_path[1])
|
||||
return (socklen_t) strlen (address.sun_path + 1) + sizeof (sa_family_t)
|
||||
+ 1;
|
||||
return (socklen_t) sizeof address;
|
||||
}
|
||||
|
||||
|
@ -39,36 +39,30 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
class ipc_address_t
|
||||
{
|
||||
public:
|
||||
ipc_address_t ();
|
||||
ipc_address_t (const sockaddr *sa, socklen_t sa_len);
|
||||
~ipc_address_t ();
|
||||
|
||||
class ipc_address_t
|
||||
{
|
||||
public:
|
||||
// This function sets up the address for UNIX domain transport.
|
||||
int resolve (const char *path_);
|
||||
|
||||
ipc_address_t ();
|
||||
ipc_address_t (const sockaddr *sa, socklen_t sa_len);
|
||||
~ipc_address_t ();
|
||||
// The opposite to resolve()
|
||||
int to_string (std::string &addr_);
|
||||
|
||||
// This function sets up the address for UNIX domain transport.
|
||||
int resolve (const char *path_);
|
||||
const sockaddr *addr () const;
|
||||
socklen_t addrlen () const;
|
||||
|
||||
// The opposite to resolve()
|
||||
int to_string (std::string &addr_);
|
||||
|
||||
const sockaddr *addr () const;
|
||||
socklen_t addrlen () const;
|
||||
|
||||
private:
|
||||
|
||||
struct sockaddr_un address;
|
||||
|
||||
ipc_address_t (const ipc_address_t&);
|
||||
const ipc_address_t &operator = (const ipc_address_t&);
|
||||
};
|
||||
private:
|
||||
struct sockaddr_un address;
|
||||
|
||||
ipc_address_t (const ipc_address_t &);
|
||||
const ipc_address_t &operator= (const ipc_address_t &);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -50,8 +50,10 @@
|
||||
#include <sys/un.h>
|
||||
|
||||
zmq::ipc_connecter_t::ipc_connecter_t (class io_thread_t *io_thread_,
|
||||
class session_base_t *session_, const options_t &options_,
|
||||
const address_t *addr_, bool delayed_start_) :
|
||||
class session_base_t *session_,
|
||||
const options_t &options_,
|
||||
const address_t *addr_,
|
||||
bool delayed_start_) :
|
||||
own_t (io_thread_, options_),
|
||||
io_object_t (io_thread_),
|
||||
addr (addr_),
|
||||
@ -60,12 +62,12 @@ zmq::ipc_connecter_t::ipc_connecter_t (class io_thread_t *io_thread_,
|
||||
delayed_start (delayed_start_),
|
||||
timer_started (false),
|
||||
session (session_),
|
||||
current_reconnect_ivl(options.reconnect_ivl)
|
||||
current_reconnect_ivl (options.reconnect_ivl)
|
||||
{
|
||||
zmq_assert (addr);
|
||||
zmq_assert (addr->protocol == "ipc");
|
||||
addr->to_string (endpoint);
|
||||
socket = session-> get_socket();
|
||||
socket = session->get_socket ();
|
||||
}
|
||||
|
||||
zmq::ipc_connecter_t::~ipc_connecter_t ()
|
||||
@ -118,12 +120,12 @@ void zmq::ipc_connecter_t::out_event ()
|
||||
// Handle the error condition by attempt to reconnect.
|
||||
if (fd == retired_fd) {
|
||||
close ();
|
||||
add_reconnect_timer();
|
||||
add_reconnect_timer ();
|
||||
return;
|
||||
}
|
||||
// Create the engine object for this connection.
|
||||
stream_engine_t *engine = new (std::nothrow)
|
||||
stream_engine_t (fd, options, endpoint);
|
||||
stream_engine_t *engine =
|
||||
new (std::nothrow) stream_engine_t (fd, options, endpoint);
|
||||
alloc_assert (engine);
|
||||
|
||||
// Attach the engine to the corresponding session object.
|
||||
@ -155,12 +157,11 @@ void zmq::ipc_connecter_t::start_connecting ()
|
||||
}
|
||||
|
||||
// Connection establishment may be delayed. Poll for its completion.
|
||||
else
|
||||
if (rc == -1 && errno == EINPROGRESS) {
|
||||
else if (rc == -1 && errno == EINPROGRESS) {
|
||||
handle = add_fd (s);
|
||||
handle_valid = true;
|
||||
set_pollout (handle);
|
||||
socket->event_connect_delayed (endpoint, zmq_errno());
|
||||
socket->event_connect_delayed (endpoint, zmq_errno ());
|
||||
}
|
||||
|
||||
// Handle any other error condition by eventual reconnect.
|
||||
@ -171,9 +172,9 @@ void zmq::ipc_connecter_t::start_connecting ()
|
||||
}
|
||||
}
|
||||
|
||||
void zmq::ipc_connecter_t::add_reconnect_timer()
|
||||
void zmq::ipc_connecter_t::add_reconnect_timer ()
|
||||
{
|
||||
int rc_ivl = get_new_reconnect_ivl();
|
||||
int rc_ivl = get_new_reconnect_ivl ();
|
||||
add_timer (rc_ivl, reconnect_timer_id);
|
||||
socket->event_connect_retried (endpoint, rc_ivl);
|
||||
timer_started = true;
|
||||
@ -182,17 +183,16 @@ void zmq::ipc_connecter_t::add_reconnect_timer()
|
||||
int zmq::ipc_connecter_t::get_new_reconnect_ivl ()
|
||||
{
|
||||
// The new interval is the current interval + random value.
|
||||
int this_interval = current_reconnect_ivl +
|
||||
(generate_random () % options.reconnect_ivl);
|
||||
int this_interval =
|
||||
current_reconnect_ivl + (generate_random () % options.reconnect_ivl);
|
||||
|
||||
// Only change the current reconnect interval if the maximum reconnect
|
||||
// interval was set and if it's larger than the reconnect interval.
|
||||
if (options.reconnect_ivl_max > 0 &&
|
||||
options.reconnect_ivl_max > options.reconnect_ivl) {
|
||||
|
||||
if (options.reconnect_ivl_max > 0
|
||||
&& options.reconnect_ivl_max > options.reconnect_ivl) {
|
||||
// Calculate the next interval
|
||||
current_reconnect_ivl = current_reconnect_ivl * 2;
|
||||
if(current_reconnect_ivl >= options.reconnect_ivl_max) {
|
||||
if (current_reconnect_ivl >= options.reconnect_ivl_max) {
|
||||
current_reconnect_ivl = options.reconnect_ivl_max;
|
||||
}
|
||||
}
|
||||
@ -212,9 +212,8 @@ int zmq::ipc_connecter_t::open ()
|
||||
unblock_socket (s);
|
||||
|
||||
// Connect to the remote peer.
|
||||
int rc = ::connect (
|
||||
s, addr->resolved.ipc_addr->addr (),
|
||||
addr->resolved.ipc_addr->addrlen ());
|
||||
int rc = ::connect (s, addr->resolved.ipc_addr->addr (),
|
||||
addr->resolved.ipc_addr->addrlen ());
|
||||
|
||||
// Connect was successful immediately.
|
||||
if (rc == 0)
|
||||
@ -251,20 +250,19 @@ zmq::fd_t zmq::ipc_connecter_t::connect ()
|
||||
#else
|
||||
socklen_t len = sizeof (err);
|
||||
#endif
|
||||
int rc = getsockopt (s, SOL_SOCKET, SO_ERROR, (char*) &err, &len);
|
||||
int rc = getsockopt (s, SOL_SOCKET, SO_ERROR, (char *) &err, &len);
|
||||
if (rc == -1) {
|
||||
if (errno == ENOPROTOOPT)
|
||||
errno = 0;
|
||||
err = errno;
|
||||
}
|
||||
if (err != 0) {
|
||||
|
||||
// Assert if the error was caused by 0MQ bug.
|
||||
// Networking problems are OK. No need to assert.
|
||||
errno = err;
|
||||
errno_assert (errno == ECONNREFUSED || errno == ECONNRESET ||
|
||||
errno == ETIMEDOUT || errno == EHOSTUNREACH ||
|
||||
errno == ENETUNREACH || errno == ENETDOWN);
|
||||
errno_assert (errno == ECONNREFUSED || errno == ECONNRESET
|
||||
|| errno == ETIMEDOUT || errno == EHOSTUNREACH
|
||||
|| errno == ENETUNREACH || errno == ENETDOWN);
|
||||
|
||||
return retired_fd;
|
||||
}
|
||||
@ -275,4 +273,3 @@ zmq::fd_t zmq::ipc_connecter_t::connect ()
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -39,97 +39,97 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
class io_thread_t;
|
||||
class session_base_t;
|
||||
struct address_t;
|
||||
|
||||
class io_thread_t;
|
||||
class session_base_t;
|
||||
struct address_t;
|
||||
class ipc_connecter_t : public own_t, public io_object_t
|
||||
{
|
||||
public:
|
||||
// If 'delayed_start' is true connecter first waits for a while,
|
||||
// then starts connection process.
|
||||
ipc_connecter_t (zmq::io_thread_t *io_thread_,
|
||||
zmq::session_base_t *session_,
|
||||
const options_t &options_,
|
||||
const address_t *addr_,
|
||||
bool delayed_start_);
|
||||
~ipc_connecter_t ();
|
||||
|
||||
class ipc_connecter_t : public own_t, public io_object_t
|
||||
private:
|
||||
// ID of the timer used to delay the reconnection.
|
||||
enum
|
||||
{
|
||||
public:
|
||||
|
||||
// If 'delayed_start' is true connecter first waits for a while,
|
||||
// then starts connection process.
|
||||
ipc_connecter_t (zmq::io_thread_t *io_thread_,
|
||||
zmq::session_base_t *session_, const options_t &options_,
|
||||
const address_t *addr_, bool delayed_start_);
|
||||
~ipc_connecter_t ();
|
||||
|
||||
private:
|
||||
|
||||
// ID of the timer used to delay the reconnection.
|
||||
enum {reconnect_timer_id = 1};
|
||||
|
||||
// Handlers for incoming commands.
|
||||
void process_plug ();
|
||||
void process_term (int linger_);
|
||||
|
||||
// Handlers for I/O events.
|
||||
void in_event ();
|
||||
void out_event ();
|
||||
void timer_event (int id_);
|
||||
|
||||
// Internal function to start the actual connection establishment.
|
||||
void start_connecting ();
|
||||
|
||||
// Internal function to add a reconnect timer
|
||||
void add_reconnect_timer();
|
||||
|
||||
// Internal function to return a reconnect backoff delay.
|
||||
// Will modify the current_reconnect_ivl used for next call
|
||||
// Returns the currently used interval
|
||||
int get_new_reconnect_ivl ();
|
||||
|
||||
// Open IPC connecting socket. Returns -1 in case of error,
|
||||
// 0 if connect was successful immediately. Returns -1 with
|
||||
// EAGAIN errno if async connect was launched.
|
||||
int open ();
|
||||
|
||||
// Close the connecting socket.
|
||||
int close ();
|
||||
|
||||
// Get the file descriptor of newly created connection. Returns
|
||||
// retired_fd if the connection was unsuccessful.
|
||||
fd_t connect ();
|
||||
|
||||
// Address to connect to. Owned by session_base_t.
|
||||
const address_t *addr;
|
||||
|
||||
// Underlying socket.
|
||||
fd_t s;
|
||||
|
||||
// Handle corresponding to the listening socket.
|
||||
handle_t handle;
|
||||
|
||||
// If true file descriptor is registered with the poller and 'handle'
|
||||
// contains valid value.
|
||||
bool handle_valid;
|
||||
|
||||
// If true, connecter is waiting a while before trying to connect.
|
||||
const bool delayed_start;
|
||||
|
||||
// True iff a timer has been started.
|
||||
bool timer_started;
|
||||
|
||||
// Reference to the session we belong to.
|
||||
zmq::session_base_t *session;
|
||||
|
||||
// Current reconnect ivl, updated for backoff strategy
|
||||
int current_reconnect_ivl;
|
||||
|
||||
// String representation of endpoint to connect to
|
||||
std::string endpoint;
|
||||
|
||||
// Socket
|
||||
zmq::socket_base_t *socket;
|
||||
|
||||
ipc_connecter_t (const ipc_connecter_t&);
|
||||
const ipc_connecter_t &operator = (const ipc_connecter_t&);
|
||||
reconnect_timer_id = 1
|
||||
};
|
||||
|
||||
// Handlers for incoming commands.
|
||||
void process_plug ();
|
||||
void process_term (int linger_);
|
||||
|
||||
// Handlers for I/O events.
|
||||
void in_event ();
|
||||
void out_event ();
|
||||
void timer_event (int id_);
|
||||
|
||||
// Internal function to start the actual connection establishment.
|
||||
void start_connecting ();
|
||||
|
||||
// Internal function to add a reconnect timer
|
||||
void add_reconnect_timer ();
|
||||
|
||||
// Internal function to return a reconnect backoff delay.
|
||||
// Will modify the current_reconnect_ivl used for next call
|
||||
// Returns the currently used interval
|
||||
int get_new_reconnect_ivl ();
|
||||
|
||||
// Open IPC connecting socket. Returns -1 in case of error,
|
||||
// 0 if connect was successful immediately. Returns -1 with
|
||||
// EAGAIN errno if async connect was launched.
|
||||
int open ();
|
||||
|
||||
// Close the connecting socket.
|
||||
int close ();
|
||||
|
||||
// Get the file descriptor of newly created connection. Returns
|
||||
// retired_fd if the connection was unsuccessful.
|
||||
fd_t connect ();
|
||||
|
||||
// Address to connect to. Owned by session_base_t.
|
||||
const address_t *addr;
|
||||
|
||||
// Underlying socket.
|
||||
fd_t s;
|
||||
|
||||
// Handle corresponding to the listening socket.
|
||||
handle_t handle;
|
||||
|
||||
// If true file descriptor is registered with the poller and 'handle'
|
||||
// contains valid value.
|
||||
bool handle_valid;
|
||||
|
||||
// If true, connecter is waiting a while before trying to connect.
|
||||
const bool delayed_start;
|
||||
|
||||
// True iff a timer has been started.
|
||||
bool timer_started;
|
||||
|
||||
// Reference to the session we belong to.
|
||||
zmq::session_base_t *session;
|
||||
|
||||
// Current reconnect ivl, updated for backoff strategy
|
||||
int current_reconnect_ivl;
|
||||
|
||||
// String representation of endpoint to connect to
|
||||
std::string endpoint;
|
||||
|
||||
// Socket
|
||||
zmq::socket_base_t *socket;
|
||||
|
||||
ipc_connecter_t (const ipc_connecter_t &);
|
||||
const ipc_connecter_t &operator= (const ipc_connecter_t &);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -52,42 +52,41 @@
|
||||
#include <sys/stat.h>
|
||||
|
||||
#ifdef ZMQ_HAVE_LOCAL_PEERCRED
|
||||
# include <sys/types.h>
|
||||
# include <sys/ucred.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/ucred.h>
|
||||
#endif
|
||||
#ifdef ZMQ_HAVE_SO_PEERCRED
|
||||
# include <sys/types.h>
|
||||
# include <pwd.h>
|
||||
# include <grp.h>
|
||||
# if defined ZMQ_HAVE_OPENBSD
|
||||
# define ucred sockpeercred
|
||||
# endif
|
||||
#include <sys/types.h>
|
||||
#include <pwd.h>
|
||||
#include <grp.h>
|
||||
#if defined ZMQ_HAVE_OPENBSD
|
||||
#define ucred sockpeercred
|
||||
#endif
|
||||
#endif
|
||||
|
||||
const char *zmq::ipc_listener_t::tmp_env_vars[] = {
|
||||
"TMPDIR",
|
||||
"TEMPDIR",
|
||||
"TMP",
|
||||
0 // Sentinel
|
||||
"TMPDIR", "TEMPDIR", "TMP",
|
||||
0 // Sentinel
|
||||
};
|
||||
|
||||
int zmq::ipc_listener_t::create_wildcard_address(std::string& path_,
|
||||
std::string& file_)
|
||||
int zmq::ipc_listener_t::create_wildcard_address (std::string &path_,
|
||||
std::string &file_)
|
||||
{
|
||||
std::string tmp_path;
|
||||
|
||||
// If TMPDIR, TEMPDIR, or TMP are available and are directories, create
|
||||
// the socket directory there.
|
||||
const char **tmp_env = tmp_env_vars;
|
||||
while ( tmp_path.empty() && *tmp_env != 0 ) {
|
||||
char *tmpdir = getenv(*tmp_env);
|
||||
while (tmp_path.empty () && *tmp_env != 0) {
|
||||
char *tmpdir = getenv (*tmp_env);
|
||||
struct stat statbuf;
|
||||
|
||||
// Confirm it is actually a directory before trying to use
|
||||
if ( tmpdir != 0 && ::stat(tmpdir, &statbuf) == 0 && S_ISDIR(statbuf.st_mode) ) {
|
||||
tmp_path.assign(tmpdir);
|
||||
if ( *(tmp_path.rbegin()) != '/' ) {
|
||||
tmp_path.push_back('/');
|
||||
if (tmpdir != 0 && ::stat (tmpdir, &statbuf) == 0
|
||||
&& S_ISDIR (statbuf.st_mode)) {
|
||||
tmp_path.assign (tmpdir);
|
||||
if (*(tmp_path.rbegin ()) != '/') {
|
||||
tmp_path.push_back ('/');
|
||||
}
|
||||
}
|
||||
|
||||
@ -96,10 +95,10 @@ int zmq::ipc_listener_t::create_wildcard_address(std::string& path_,
|
||||
}
|
||||
|
||||
// Append a directory name
|
||||
tmp_path.append("tmpXXXXXX");
|
||||
tmp_path.append ("tmpXXXXXX");
|
||||
|
||||
// We need room for tmp_path + trailing NUL
|
||||
std::vector<char> buffer(tmp_path.length()+1);
|
||||
std::vector<char> buffer (tmp_path.length () + 1);
|
||||
strcpy (&buffer[0], tmp_path.c_str ());
|
||||
|
||||
#ifdef HAVE_MKDTEMP
|
||||
@ -121,7 +120,7 @@ int zmq::ipc_listener_t::create_wildcard_address(std::string& path_,
|
||||
(void) path_;
|
||||
int fd = mkstemp (&buffer[0]);
|
||||
if (fd == -1)
|
||||
return -1;
|
||||
return -1;
|
||||
::close (fd);
|
||||
|
||||
file_.assign (&buffer[0]);
|
||||
@ -131,7 +130,8 @@ int zmq::ipc_listener_t::create_wildcard_address(std::string& path_,
|
||||
}
|
||||
|
||||
zmq::ipc_listener_t::ipc_listener_t (io_thread_t *io_thread_,
|
||||
socket_base_t *socket_, const options_t &options_) :
|
||||
socket_base_t *socket_,
|
||||
const options_t &options_) :
|
||||
own_t (io_thread_, options_),
|
||||
io_object_t (io_thread_),
|
||||
has_file (false),
|
||||
@ -166,13 +166,13 @@ void zmq::ipc_listener_t::in_event ()
|
||||
// If connection was reset by the peer in the meantime, just ignore it.
|
||||
// TODO: Handle specific errors like ENFILE/EMFILE etc.
|
||||
if (fd == retired_fd) {
|
||||
socket->event_accept_failed (endpoint, zmq_errno());
|
||||
socket->event_accept_failed (endpoint, zmq_errno ());
|
||||
return;
|
||||
}
|
||||
|
||||
// Create the engine object for this connection.
|
||||
stream_engine_t *engine = new (std::nothrow)
|
||||
stream_engine_t (fd, options, endpoint);
|
||||
stream_engine_t *engine =
|
||||
new (std::nothrow) stream_engine_t (fd, options, endpoint);
|
||||
alloc_assert (engine);
|
||||
|
||||
// Choose I/O thread to run connecter in. Given that we are already
|
||||
@ -181,8 +181,8 @@ void zmq::ipc_listener_t::in_event ()
|
||||
zmq_assert (io_thread);
|
||||
|
||||
// Create and launch a session object.
|
||||
session_base_t *session = session_base_t::create (io_thread, false, socket,
|
||||
options, NULL);
|
||||
session_base_t *session =
|
||||
session_base_t::create (io_thread, false, socket, options, NULL);
|
||||
errno_assert (session);
|
||||
session->inc_seqnum ();
|
||||
launch_child (session);
|
||||
@ -214,8 +214,8 @@ int zmq::ipc_listener_t::set_address (const char *addr_)
|
||||
std::string addr (addr_);
|
||||
|
||||
// Allow wildcard file
|
||||
if (options.use_fd == -1 && addr [0] == '*') {
|
||||
if ( create_wildcard_address(tmp_socket_dirname, addr) < 0 ) {
|
||||
if (options.use_fd == -1 && addr[0] == '*') {
|
||||
if (create_wildcard_address (tmp_socket_dirname, addr) < 0) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
@ -226,19 +226,19 @@ int zmq::ipc_listener_t::set_address (const char *addr_)
|
||||
// working after the first client connects. The user will take care of
|
||||
// cleaning up the file after the service is stopped.
|
||||
if (options.use_fd == -1) {
|
||||
::unlink (addr.c_str());
|
||||
::unlink (addr.c_str ());
|
||||
}
|
||||
filename.clear ();
|
||||
|
||||
// Initialise the address structure.
|
||||
ipc_address_t address;
|
||||
int rc = address.resolve (addr.c_str());
|
||||
int rc = address.resolve (addr.c_str ());
|
||||
if (rc != 0) {
|
||||
if ( !tmp_socket_dirname.empty() ) {
|
||||
if (!tmp_socket_dirname.empty ()) {
|
||||
// We need to preserve errno to return to the user
|
||||
int errno_ = errno;
|
||||
::rmdir(tmp_socket_dirname.c_str ());
|
||||
tmp_socket_dirname.clear();
|
||||
::rmdir (tmp_socket_dirname.c_str ());
|
||||
tmp_socket_dirname.clear ();
|
||||
errno = errno_;
|
||||
}
|
||||
return -1;
|
||||
@ -252,11 +252,11 @@ int zmq::ipc_listener_t::set_address (const char *addr_)
|
||||
// Create a listening socket.
|
||||
s = open_socket (AF_UNIX, SOCK_STREAM, 0);
|
||||
if (s == -1) {
|
||||
if ( !tmp_socket_dirname.empty() ) {
|
||||
if (!tmp_socket_dirname.empty ()) {
|
||||
// We need to preserve errno to return to the user
|
||||
int errno_ = errno;
|
||||
::rmdir(tmp_socket_dirname.c_str ());
|
||||
tmp_socket_dirname.clear();
|
||||
::rmdir (tmp_socket_dirname.c_str ());
|
||||
tmp_socket_dirname.clear ();
|
||||
errno = errno_;
|
||||
}
|
||||
return -1;
|
||||
@ -273,7 +273,7 @@ int zmq::ipc_listener_t::set_address (const char *addr_)
|
||||
goto error;
|
||||
}
|
||||
|
||||
filename.assign (addr.c_str());
|
||||
filename.assign (addr.c_str ());
|
||||
has_file = true;
|
||||
|
||||
socket->event_listening (endpoint, s);
|
||||
@ -298,13 +298,13 @@ int zmq::ipc_listener_t::close ()
|
||||
if (has_file && options.use_fd == -1) {
|
||||
rc = 0;
|
||||
|
||||
if ( rc == 0 && !tmp_socket_dirname.empty() ) {
|
||||
rc = ::rmdir(tmp_socket_dirname.c_str ());
|
||||
tmp_socket_dirname.clear();
|
||||
if (rc == 0 && !tmp_socket_dirname.empty ()) {
|
||||
rc = ::rmdir (tmp_socket_dirname.c_str ());
|
||||
tmp_socket_dirname.clear ();
|
||||
}
|
||||
|
||||
if (rc != 0) {
|
||||
socket->event_close_failed (endpoint, zmq_errno());
|
||||
socket->event_close_failed (endpoint, zmq_errno ());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
@ -317,9 +317,9 @@ int zmq::ipc_listener_t::close ()
|
||||
|
||||
bool zmq::ipc_listener_t::filter (fd_t sock)
|
||||
{
|
||||
if (options.ipc_uid_accept_filters.empty () &&
|
||||
options.ipc_pid_accept_filters.empty () &&
|
||||
options.ipc_gid_accept_filters.empty ())
|
||||
if (options.ipc_uid_accept_filters.empty ()
|
||||
&& options.ipc_pid_accept_filters.empty ()
|
||||
&& options.ipc_gid_accept_filters.empty ())
|
||||
return true;
|
||||
|
||||
struct ucred cred;
|
||||
@ -327,9 +327,12 @@ bool zmq::ipc_listener_t::filter (fd_t sock)
|
||||
|
||||
if (getsockopt (sock, SOL_SOCKET, SO_PEERCRED, &cred, &size))
|
||||
return false;
|
||||
if (options.ipc_uid_accept_filters.find (cred.uid) != options.ipc_uid_accept_filters.end () ||
|
||||
options.ipc_gid_accept_filters.find (cred.gid) != options.ipc_gid_accept_filters.end () ||
|
||||
options.ipc_pid_accept_filters.find (cred.pid) != options.ipc_pid_accept_filters.end ())
|
||||
if (options.ipc_uid_accept_filters.find (cred.uid)
|
||||
!= options.ipc_uid_accept_filters.end ()
|
||||
|| options.ipc_gid_accept_filters.find (cred.gid)
|
||||
!= options.ipc_gid_accept_filters.end ()
|
||||
|| options.ipc_pid_accept_filters.find (cred.pid)
|
||||
!= options.ipc_pid_accept_filters.end ())
|
||||
return true;
|
||||
|
||||
struct passwd *pw;
|
||||
@ -337,8 +340,9 @@ bool zmq::ipc_listener_t::filter (fd_t sock)
|
||||
|
||||
if (!(pw = getpwuid (cred.uid)))
|
||||
return false;
|
||||
for (options_t::ipc_gid_accept_filters_t::const_iterator it = options.ipc_gid_accept_filters.begin ();
|
||||
it != options.ipc_gid_accept_filters.end (); it++) {
|
||||
for (options_t::ipc_gid_accept_filters_t::const_iterator it =
|
||||
options.ipc_gid_accept_filters.begin ();
|
||||
it != options.ipc_gid_accept_filters.end (); it++) {
|
||||
if (!(gr = getgrgid (*it)))
|
||||
continue;
|
||||
for (char **mem = gr->gr_mem; *mem; mem++) {
|
||||
@ -353,8 +357,8 @@ bool zmq::ipc_listener_t::filter (fd_t sock)
|
||||
|
||||
bool zmq::ipc_listener_t::filter (fd_t sock)
|
||||
{
|
||||
if (options.ipc_uid_accept_filters.empty () &&
|
||||
options.ipc_gid_accept_filters.empty ())
|
||||
if (options.ipc_uid_accept_filters.empty ()
|
||||
&& options.ipc_gid_accept_filters.empty ())
|
||||
return true;
|
||||
|
||||
struct xucred cred;
|
||||
@ -364,10 +368,12 @@ bool zmq::ipc_listener_t::filter (fd_t sock)
|
||||
return false;
|
||||
if (cred.cr_version != XUCRED_VERSION)
|
||||
return false;
|
||||
if (options.ipc_uid_accept_filters.find (cred.cr_uid) != options.ipc_uid_accept_filters.end ())
|
||||
if (options.ipc_uid_accept_filters.find (cred.cr_uid)
|
||||
!= options.ipc_uid_accept_filters.end ())
|
||||
return true;
|
||||
for (int i = 0; i < cred.cr_ngroups; i++) {
|
||||
if (options.ipc_gid_accept_filters.find (cred.cr_groups[i]) != options.ipc_gid_accept_filters.end ())
|
||||
if (options.ipc_gid_accept_filters.find (cred.cr_groups[i])
|
||||
!= options.ipc_gid_accept_filters.end ())
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -388,13 +394,14 @@ zmq::fd_t zmq::ipc_listener_t::accept ()
|
||||
fd_t sock = ::accept (s, NULL, NULL);
|
||||
#endif
|
||||
if (sock == -1) {
|
||||
errno_assert (errno == EAGAIN || errno == EWOULDBLOCK ||
|
||||
errno == EINTR || errno == ECONNABORTED || errno == EPROTO ||
|
||||
errno == ENFILE);
|
||||
errno_assert (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR
|
||||
|| errno == ECONNABORTED || errno == EPROTO
|
||||
|| errno == ENFILE);
|
||||
return retired_fd;
|
||||
}
|
||||
|
||||
#if (!defined ZMQ_HAVE_SOCK_CLOEXEC || !defined HAVE_ACCEPT4) && defined FD_CLOEXEC
|
||||
#if (!defined ZMQ_HAVE_SOCK_CLOEXEC || !defined HAVE_ACCEPT4) \
|
||||
&& defined FD_CLOEXEC
|
||||
// Race condition can cause socket not to be closed (if fork happens
|
||||
// between accept and this point).
|
||||
int rc = fcntl (sock, F_SETFD, FD_CLOEXEC);
|
||||
|
@ -41,83 +41,78 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
class io_thread_t;
|
||||
class socket_base_t;
|
||||
|
||||
class io_thread_t;
|
||||
class socket_base_t;
|
||||
class ipc_listener_t : public own_t, public io_object_t
|
||||
{
|
||||
public:
|
||||
ipc_listener_t (zmq::io_thread_t *io_thread_,
|
||||
zmq::socket_base_t *socket_,
|
||||
const options_t &options_);
|
||||
~ipc_listener_t ();
|
||||
|
||||
class ipc_listener_t : public own_t, public io_object_t
|
||||
{
|
||||
public:
|
||||
// Set address to listen on.
|
||||
int set_address (const char *addr_);
|
||||
|
||||
ipc_listener_t (zmq::io_thread_t *io_thread_,
|
||||
zmq::socket_base_t *socket_, const options_t &options_);
|
||||
~ipc_listener_t ();
|
||||
// Get the bound address for use with wildcards
|
||||
int get_address (std::string &addr_);
|
||||
|
||||
// Set address to listen on.
|
||||
int set_address (const char *addr_);
|
||||
private:
|
||||
// Handlers for incoming commands.
|
||||
void process_plug ();
|
||||
void process_term (int linger_);
|
||||
|
||||
// Get the bound address for use with wildcards
|
||||
int get_address (std::string &addr_);
|
||||
// Handlers for I/O events.
|
||||
void in_event ();
|
||||
|
||||
private:
|
||||
// Close the listening socket.
|
||||
int close ();
|
||||
|
||||
// Handlers for incoming commands.
|
||||
void process_plug ();
|
||||
void process_term (int linger_);
|
||||
// Create wildcard path address
|
||||
static int create_wildcard_address (std::string &path_, std::string &file_);
|
||||
|
||||
// Handlers for I/O events.
|
||||
void in_event ();
|
||||
// Filter new connections if the OS provides a mechanism to get
|
||||
// the credentials of the peer process. Called from accept().
|
||||
#if defined ZMQ_HAVE_SO_PEERCRED || defined ZMQ_HAVE_LOCAL_PEERCRED
|
||||
bool filter (fd_t sock);
|
||||
#endif
|
||||
|
||||
// Close the listening socket.
|
||||
int close ();
|
||||
// Accept the new connection. Returns the file descriptor of the
|
||||
// newly created connection. The function may return retired_fd
|
||||
// if the connection was dropped while waiting in the listen backlog.
|
||||
fd_t accept ();
|
||||
|
||||
// Create wildcard path address
|
||||
static int create_wildcard_address(std::string& path_,
|
||||
std::string& file_);
|
||||
// True, if the underlying file for UNIX domain socket exists.
|
||||
bool has_file;
|
||||
|
||||
// Filter new connections if the OS provides a mechanism to get
|
||||
// the credentials of the peer process. Called from accept().
|
||||
# if defined ZMQ_HAVE_SO_PEERCRED || defined ZMQ_HAVE_LOCAL_PEERCRED
|
||||
bool filter (fd_t sock);
|
||||
# endif
|
||||
// Name of the temporary directory (if any) that has the
|
||||
// the UNIX domain socket
|
||||
std::string tmp_socket_dirname;
|
||||
|
||||
// Accept the new connection. Returns the file descriptor of the
|
||||
// newly created connection. The function may return retired_fd
|
||||
// if the connection was dropped while waiting in the listen backlog.
|
||||
fd_t accept ();
|
||||
// Name of the file associated with the UNIX domain address.
|
||||
std::string filename;
|
||||
|
||||
// True, if the underlying file for UNIX domain socket exists.
|
||||
bool has_file;
|
||||
// Underlying socket.
|
||||
fd_t s;
|
||||
|
||||
// Name of the temporary directory (if any) that has the
|
||||
// the UNIX domain socket
|
||||
std::string tmp_socket_dirname;
|
||||
// Handle corresponding to the listening socket.
|
||||
handle_t handle;
|
||||
|
||||
// Name of the file associated with the UNIX domain address.
|
||||
std::string filename;
|
||||
// Socket the listener belongs to.
|
||||
zmq::socket_base_t *socket;
|
||||
|
||||
// Underlying socket.
|
||||
fd_t s;
|
||||
// String representation of endpoint to bind to
|
||||
std::string endpoint;
|
||||
|
||||
// Handle corresponding to the listening socket.
|
||||
handle_t handle;
|
||||
|
||||
// Socket the listener belongs to.
|
||||
zmq::socket_base_t *socket;
|
||||
|
||||
// String representation of endpoint to bind to
|
||||
std::string endpoint;
|
||||
|
||||
// Acceptable temporary directory environment variables
|
||||
static const char *tmp_env_vars[];
|
||||
|
||||
ipc_listener_t (const ipc_listener_t&);
|
||||
const ipc_listener_t &operator = (const ipc_listener_t&);
|
||||
};
|
||||
// Acceptable temporary directory environment variables
|
||||
static const char *tmp_env_vars[];
|
||||
|
||||
ipc_listener_t (const ipc_listener_t &);
|
||||
const ipc_listener_t &operator= (const ipc_listener_t &);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -54,15 +54,13 @@
|
||||
#define kevent_udata_t void *
|
||||
#endif
|
||||
|
||||
zmq::kqueue_t::kqueue_t (const zmq::ctx_t &ctx_) :
|
||||
ctx(ctx_),
|
||||
stopping (false)
|
||||
zmq::kqueue_t::kqueue_t (const zmq::ctx_t &ctx_) : ctx (ctx_), stopping (false)
|
||||
{
|
||||
// Create event queue
|
||||
kqueue_fd = kqueue ();
|
||||
errno_assert (kqueue_fd != -1);
|
||||
#ifdef HAVE_FORK
|
||||
pid = getpid();
|
||||
pid = getpid ();
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -76,7 +74,7 @@ void zmq::kqueue_t::kevent_add (fd_t fd_, short filter_, void *udata_)
|
||||
{
|
||||
struct kevent ev;
|
||||
|
||||
EV_SET (&ev, fd_, filter_, EV_ADD, 0, 0, (kevent_udata_t)udata_);
|
||||
EV_SET (&ev, fd_, filter_, EV_ADD, 0, 0, (kevent_udata_t) udata_);
|
||||
int rc = kevent (kqueue_fd, &ev, 1, NULL, 0, NULL);
|
||||
errno_assert (rc != -1);
|
||||
}
|
||||
@ -91,7 +89,7 @@ void zmq::kqueue_t::kevent_delete (fd_t fd_, short filter_)
|
||||
}
|
||||
|
||||
zmq::kqueue_t::handle_t zmq::kqueue_t::add_fd (fd_t fd_,
|
||||
i_poll_events *reactor_)
|
||||
i_poll_events *reactor_)
|
||||
{
|
||||
poll_entry_t *pe = new (std::nothrow) poll_entry_t;
|
||||
alloc_assert (pe);
|
||||
@ -108,7 +106,7 @@ zmq::kqueue_t::handle_t zmq::kqueue_t::add_fd (fd_t fd_,
|
||||
|
||||
void zmq::kqueue_t::rm_fd (handle_t handle_)
|
||||
{
|
||||
poll_entry_t *pe = (poll_entry_t*) handle_;
|
||||
poll_entry_t *pe = (poll_entry_t *) handle_;
|
||||
if (pe->flag_pollin)
|
||||
kevent_delete (pe->fd, EVFILT_READ);
|
||||
if (pe->flag_pollout)
|
||||
@ -121,7 +119,7 @@ void zmq::kqueue_t::rm_fd (handle_t handle_)
|
||||
|
||||
void zmq::kqueue_t::set_pollin (handle_t handle_)
|
||||
{
|
||||
poll_entry_t *pe = (poll_entry_t*) handle_;
|
||||
poll_entry_t *pe = (poll_entry_t *) handle_;
|
||||
if (likely (!pe->flag_pollin)) {
|
||||
pe->flag_pollin = true;
|
||||
kevent_add (pe->fd, EVFILT_READ, pe);
|
||||
@ -130,7 +128,7 @@ void zmq::kqueue_t::set_pollin (handle_t handle_)
|
||||
|
||||
void zmq::kqueue_t::reset_pollin (handle_t handle_)
|
||||
{
|
||||
poll_entry_t *pe = (poll_entry_t*) handle_;
|
||||
poll_entry_t *pe = (poll_entry_t *) handle_;
|
||||
if (likely (pe->flag_pollin)) {
|
||||
pe->flag_pollin = false;
|
||||
kevent_delete (pe->fd, EVFILT_READ);
|
||||
@ -139,7 +137,7 @@ void zmq::kqueue_t::reset_pollin (handle_t handle_)
|
||||
|
||||
void zmq::kqueue_t::set_pollout (handle_t handle_)
|
||||
{
|
||||
poll_entry_t *pe = (poll_entry_t*) handle_;
|
||||
poll_entry_t *pe = (poll_entry_t *) handle_;
|
||||
if (likely (!pe->flag_pollout)) {
|
||||
pe->flag_pollout = true;
|
||||
kevent_add (pe->fd, EVFILT_WRITE, pe);
|
||||
@ -148,11 +146,11 @@ void zmq::kqueue_t::set_pollout (handle_t handle_)
|
||||
|
||||
void zmq::kqueue_t::reset_pollout (handle_t handle_)
|
||||
{
|
||||
poll_entry_t *pe = (poll_entry_t*) handle_;
|
||||
poll_entry_t *pe = (poll_entry_t *) handle_;
|
||||
if (likely (pe->flag_pollout)) {
|
||||
pe->flag_pollout = false;
|
||||
kevent_delete (pe->fd, EVFILT_WRITE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void zmq::kqueue_t::start ()
|
||||
@ -173,17 +171,16 @@ int zmq::kqueue_t::max_fds ()
|
||||
void zmq::kqueue_t::loop ()
|
||||
{
|
||||
while (!stopping) {
|
||||
|
||||
// Execute any due timers.
|
||||
int timeout = (int) execute_timers ();
|
||||
|
||||
// Wait for events.
|
||||
struct kevent ev_buf [max_io_events];
|
||||
struct kevent ev_buf[max_io_events];
|
||||
timespec ts = {timeout / 1000, (timeout % 1000) * 1000000};
|
||||
int n = kevent (kqueue_fd, NULL, 0, &ev_buf [0], max_io_events,
|
||||
timeout ? &ts: NULL);
|
||||
int n = kevent (kqueue_fd, NULL, 0, &ev_buf[0], max_io_events,
|
||||
timeout ? &ts : NULL);
|
||||
#ifdef HAVE_FORK
|
||||
if (unlikely(pid != getpid())) {
|
||||
if (unlikely (pid != getpid ())) {
|
||||
//printf("zmq::kqueue_t::loop aborting on forked child %d\n", (int)getpid());
|
||||
// simply exit the loop in a forked process.
|
||||
return;
|
||||
@ -194,26 +191,27 @@ void zmq::kqueue_t::loop ()
|
||||
continue;
|
||||
}
|
||||
|
||||
for (int i = 0; i < n; i ++) {
|
||||
poll_entry_t *pe = (poll_entry_t*) ev_buf [i].udata;
|
||||
for (int i = 0; i < n; i++) {
|
||||
poll_entry_t *pe = (poll_entry_t *) ev_buf[i].udata;
|
||||
|
||||
if (pe->fd == retired_fd)
|
||||
continue;
|
||||
if (ev_buf [i].flags & EV_EOF)
|
||||
if (ev_buf[i].flags & EV_EOF)
|
||||
pe->reactor->in_event ();
|
||||
if (pe->fd == retired_fd)
|
||||
continue;
|
||||
if (ev_buf [i].filter == EVFILT_WRITE)
|
||||
if (ev_buf[i].filter == EVFILT_WRITE)
|
||||
pe->reactor->out_event ();
|
||||
if (pe->fd == retired_fd)
|
||||
continue;
|
||||
if (ev_buf [i].filter == EVFILT_READ)
|
||||
if (ev_buf[i].filter == EVFILT_READ)
|
||||
pe->reactor->in_event ();
|
||||
}
|
||||
|
||||
// Destroy retired event sources.
|
||||
for (retired_t::iterator it = retired.begin (); it != retired.end (); ++it) {
|
||||
LIBZMQ_DELETE(*it);
|
||||
for (retired_t::iterator it = retired.begin (); it != retired.end ();
|
||||
++it) {
|
||||
LIBZMQ_DELETE (*it);
|
||||
}
|
||||
retired.clear ();
|
||||
}
|
||||
@ -221,7 +219,7 @@ void zmq::kqueue_t::loop ()
|
||||
|
||||
void zmq::kqueue_t::worker_routine (void *arg_)
|
||||
{
|
||||
((kqueue_t*) arg_)->loop ();
|
||||
((kqueue_t *) arg_)->loop ();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
134
src/kqueue.hpp
134
src/kqueue.hpp
@ -44,82 +44,78 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
struct i_poll_events;
|
||||
|
||||
struct i_poll_events;
|
||||
// Implements socket polling mechanism using the BSD-specific
|
||||
// kqueue interface.
|
||||
|
||||
// Implements socket polling mechanism using the BSD-specific
|
||||
// kqueue interface.
|
||||
class kqueue_t : public poller_base_t
|
||||
{
|
||||
public:
|
||||
typedef void *handle_t;
|
||||
|
||||
class kqueue_t : public poller_base_t
|
||||
kqueue_t (const ctx_t &ctx_);
|
||||
~kqueue_t ();
|
||||
|
||||
// "poller" concept.
|
||||
handle_t add_fd (fd_t fd_, zmq::i_poll_events *events_);
|
||||
void rm_fd (handle_t handle_);
|
||||
void set_pollin (handle_t handle_);
|
||||
void reset_pollin (handle_t handle_);
|
||||
void set_pollout (handle_t handle_);
|
||||
void reset_pollout (handle_t handle_);
|
||||
void start ();
|
||||
void stop ();
|
||||
|
||||
static int max_fds ();
|
||||
|
||||
private:
|
||||
// Main worker thread routine.
|
||||
static void worker_routine (void *arg_);
|
||||
|
||||
// Main event loop.
|
||||
void loop ();
|
||||
|
||||
// Reference to ZMQ context.
|
||||
const ctx_t &ctx;
|
||||
|
||||
// File descriptor referring to the kernel event queue.
|
||||
fd_t kqueue_fd;
|
||||
|
||||
// Adds the event to the kqueue.
|
||||
void kevent_add (fd_t fd_, short filter_, void *udata_);
|
||||
|
||||
// Deletes the event from the kqueue.
|
||||
void kevent_delete (fd_t fd_, short filter_);
|
||||
|
||||
struct poll_entry_t
|
||||
{
|
||||
public:
|
||||
|
||||
typedef void* handle_t;
|
||||
|
||||
kqueue_t (const ctx_t &ctx_);
|
||||
~kqueue_t ();
|
||||
|
||||
// "poller" concept.
|
||||
handle_t add_fd (fd_t fd_, zmq::i_poll_events *events_);
|
||||
void rm_fd (handle_t handle_);
|
||||
void set_pollin (handle_t handle_);
|
||||
void reset_pollin (handle_t handle_);
|
||||
void set_pollout (handle_t handle_);
|
||||
void reset_pollout (handle_t handle_);
|
||||
void start ();
|
||||
void stop ();
|
||||
|
||||
static int max_fds ();
|
||||
|
||||
private:
|
||||
|
||||
// Main worker thread routine.
|
||||
static void worker_routine (void *arg_);
|
||||
|
||||
// Main event loop.
|
||||
void loop ();
|
||||
|
||||
// Reference to ZMQ context.
|
||||
const ctx_t &ctx;
|
||||
|
||||
// File descriptor referring to the kernel event queue.
|
||||
fd_t kqueue_fd;
|
||||
|
||||
// Adds the event to the kqueue.
|
||||
void kevent_add (fd_t fd_, short filter_, void *udata_);
|
||||
|
||||
// Deletes the event from the kqueue.
|
||||
void kevent_delete (fd_t fd_, short filter_);
|
||||
|
||||
struct poll_entry_t
|
||||
{
|
||||
fd_t fd;
|
||||
bool flag_pollin;
|
||||
bool flag_pollout;
|
||||
zmq::i_poll_events *reactor;
|
||||
};
|
||||
|
||||
// List of retired event sources.
|
||||
typedef std::vector <poll_entry_t*> retired_t;
|
||||
retired_t retired;
|
||||
|
||||
// If true, thread is in the process of shutting down.
|
||||
bool stopping;
|
||||
|
||||
// Handle of the physical thread doing the I/O work.
|
||||
thread_t worker;
|
||||
|
||||
kqueue_t (const kqueue_t&);
|
||||
const kqueue_t &operator = (const kqueue_t&);
|
||||
|
||||
#ifdef HAVE_FORK
|
||||
// the process that created this context. Used to detect forking.
|
||||
pid_t pid;
|
||||
#endif
|
||||
fd_t fd;
|
||||
bool flag_pollin;
|
||||
bool flag_pollout;
|
||||
zmq::i_poll_events *reactor;
|
||||
};
|
||||
|
||||
typedef kqueue_t poller_t;
|
||||
// List of retired event sources.
|
||||
typedef std::vector<poll_entry_t *> retired_t;
|
||||
retired_t retired;
|
||||
|
||||
// If true, thread is in the process of shutting down.
|
||||
bool stopping;
|
||||
|
||||
// Handle of the physical thread doing the I/O work.
|
||||
thread_t worker;
|
||||
|
||||
kqueue_t (const kqueue_t &);
|
||||
const kqueue_t &operator= (const kqueue_t &);
|
||||
|
||||
#ifdef HAVE_FORK
|
||||
// the process that created this context. Used to detect forking.
|
||||
pid_t pid;
|
||||
#endif
|
||||
};
|
||||
|
||||
typedef kqueue_t poller_t;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
24
src/lb.cpp
24
src/lb.cpp
@ -33,11 +33,7 @@
|
||||
#include "err.hpp"
|
||||
#include "msg.hpp"
|
||||
|
||||
zmq::lb_t::lb_t () :
|
||||
active (0),
|
||||
current (0),
|
||||
more (false),
|
||||
dropping (false)
|
||||
zmq::lb_t::lb_t () : active (0), current (0), more (false), dropping (false)
|
||||
{
|
||||
}
|
||||
|
||||
@ -89,7 +85,6 @@ int zmq::lb_t::sendpipe (msg_t *msg_, pipe_t **pipe_)
|
||||
// Drop the message if required. If we are at the end of the message
|
||||
// switch back to non-dropping mode.
|
||||
if (dropping) {
|
||||
|
||||
more = msg_->flags () & msg_t::more ? true : false;
|
||||
dropping = more;
|
||||
|
||||
@ -101,19 +96,17 @@ int zmq::lb_t::sendpipe (msg_t *msg_, pipe_t **pipe_)
|
||||
}
|
||||
|
||||
while (active > 0) {
|
||||
if (pipes [current]->write (msg_))
|
||||
{
|
||||
if (pipes[current]->write (msg_)) {
|
||||
if (pipe_)
|
||||
*pipe_ = pipes [current];
|
||||
*pipe_ = pipes[current];
|
||||
break;
|
||||
}
|
||||
|
||||
// If send fails for multi-part msg rollback other
|
||||
// parts sent earlier and return EAGAIN.
|
||||
// Application should handle this as suitable
|
||||
if (more)
|
||||
{
|
||||
pipes [current]->rollback ();
|
||||
if (more) {
|
||||
pipes[current]->rollback ();
|
||||
more = 0;
|
||||
errno = EAGAIN;
|
||||
return -1;
|
||||
@ -134,9 +127,9 @@ int zmq::lb_t::sendpipe (msg_t *msg_, pipe_t **pipe_)
|
||||
|
||||
// If it's final part of the message we can flush it downstream and
|
||||
// continue round-robining (load balance).
|
||||
more = msg_->flags () & msg_t::more? true: false;
|
||||
more = msg_->flags () & msg_t::more ? true : false;
|
||||
if (!more) {
|
||||
pipes [current]->flush ();
|
||||
pipes[current]->flush ();
|
||||
|
||||
if (++current >= active)
|
||||
current = 0;
|
||||
@ -157,9 +150,8 @@ bool zmq::lb_t::has_out ()
|
||||
return true;
|
||||
|
||||
while (active > 0) {
|
||||
|
||||
// Check whether a pipe has room for another message.
|
||||
if (pipes [current]->check_write ())
|
||||
if (pipes[current]->check_write ())
|
||||
return true;
|
||||
|
||||
// Deactivate the pipe.
|
||||
|
70
src/lb.hpp
70
src/lb.hpp
@ -35,54 +35,50 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
// This class manages a set of outbound pipes. On send it load balances
|
||||
// messages fairly among the pipes.
|
||||
|
||||
// This class manages a set of outbound pipes. On send it load balances
|
||||
// messages fairly among the pipes.
|
||||
class lb_t
|
||||
{
|
||||
public:
|
||||
lb_t ();
|
||||
~lb_t ();
|
||||
|
||||
class lb_t
|
||||
{
|
||||
public:
|
||||
void attach (pipe_t *pipe_);
|
||||
void activated (pipe_t *pipe_);
|
||||
void pipe_terminated (pipe_t *pipe_);
|
||||
|
||||
lb_t ();
|
||||
~lb_t ();
|
||||
int send (msg_t *msg_);
|
||||
|
||||
void attach (pipe_t *pipe_);
|
||||
void activated (pipe_t *pipe_);
|
||||
void pipe_terminated (pipe_t *pipe_);
|
||||
// Sends a message and stores the pipe that was used in pipe_.
|
||||
// It is possible for this function to return success but keep pipe_
|
||||
// unset if the rest of a multipart message to a terminated pipe is
|
||||
// being dropped. For the first frame, this will never happen.
|
||||
int sendpipe (msg_t *msg_, pipe_t **pipe_);
|
||||
|
||||
int send (msg_t *msg_);
|
||||
bool has_out ();
|
||||
|
||||
// Sends a message and stores the pipe that was used in pipe_.
|
||||
// It is possible for this function to return success but keep pipe_
|
||||
// unset if the rest of a multipart message to a terminated pipe is
|
||||
// being dropped. For the first frame, this will never happen.
|
||||
int sendpipe (msg_t *msg_, pipe_t **pipe_);
|
||||
private:
|
||||
// List of outbound pipes.
|
||||
typedef array_t<pipe_t, 2> pipes_t;
|
||||
pipes_t pipes;
|
||||
|
||||
bool has_out ();
|
||||
// Number of active pipes. All the active pipes are located at the
|
||||
// beginning of the pipes array.
|
||||
pipes_t::size_type active;
|
||||
|
||||
private:
|
||||
// Points to the last pipe that the most recent message was sent to.
|
||||
pipes_t::size_type current;
|
||||
|
||||
// List of outbound pipes.
|
||||
typedef array_t <pipe_t, 2> pipes_t;
|
||||
pipes_t pipes;
|
||||
// True if last we are in the middle of a multipart message.
|
||||
bool more;
|
||||
|
||||
// Number of active pipes. All the active pipes are located at the
|
||||
// beginning of the pipes array.
|
||||
pipes_t::size_type active;
|
||||
|
||||
// Points to the last pipe that the most recent message was sent to.
|
||||
pipes_t::size_type current;
|
||||
|
||||
// True if last we are in the middle of a multipart message.
|
||||
bool more;
|
||||
|
||||
// True if we are dropping current message.
|
||||
bool dropping;
|
||||
|
||||
lb_t (const lb_t&);
|
||||
const lb_t &operator = (const lb_t&);
|
||||
};
|
||||
// True if we are dropping current message.
|
||||
bool dropping;
|
||||
|
||||
lb_t (const lb_t &);
|
||||
const lb_t &operator= (const lb_t &);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -3,10 +3,11 @@
|
||||
/* 0MQ Internal Use */
|
||||
/******************************************************************************/
|
||||
|
||||
#define LIBZMQ_UNUSED(object) (void)object
|
||||
#define LIBZMQ_DELETE(p_object) {\
|
||||
delete p_object; \
|
||||
p_object = 0; \
|
||||
}
|
||||
#define LIBZMQ_UNUSED(object) (void) object
|
||||
#define LIBZMQ_DELETE(p_object) \
|
||||
{ \
|
||||
delete p_object; \
|
||||
p_object = 0; \
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
@ -42,51 +42,47 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
class mailbox_t : public i_mailbox
|
||||
{
|
||||
public:
|
||||
mailbox_t ();
|
||||
~mailbox_t ();
|
||||
|
||||
class mailbox_t : public i_mailbox
|
||||
{
|
||||
public:
|
||||
fd_t get_fd () const;
|
||||
void send (const command_t &cmd_);
|
||||
int recv (command_t *cmd_, int timeout_);
|
||||
|
||||
mailbox_t ();
|
||||
~mailbox_t ();
|
||||
|
||||
fd_t get_fd () const;
|
||||
void send (const command_t &cmd_);
|
||||
int recv (command_t *cmd_, int timeout_);
|
||||
|
||||
bool valid () const;
|
||||
bool valid () const;
|
||||
|
||||
#ifdef HAVE_FORK
|
||||
// close the file descriptors in the signaller. This is used in a forked
|
||||
// child process to close the file descriptors so that they do not interfere
|
||||
// with the context in the parent process.
|
||||
void forked () { signaler.forked (); }
|
||||
// close the file descriptors in the signaller. This is used in a forked
|
||||
// child process to close the file descriptors so that they do not interfere
|
||||
// with the context in the parent process.
|
||||
void forked () { signaler.forked (); }
|
||||
#endif
|
||||
|
||||
private:
|
||||
private:
|
||||
// The pipe to store actual commands.
|
||||
typedef ypipe_t<command_t, command_pipe_granularity> cpipe_t;
|
||||
cpipe_t cpipe;
|
||||
|
||||
// The pipe to store actual commands.
|
||||
typedef ypipe_t <command_t, command_pipe_granularity> cpipe_t;
|
||||
cpipe_t cpipe;
|
||||
// Signaler to pass signals from writer thread to reader thread.
|
||||
signaler_t signaler;
|
||||
|
||||
// Signaler to pass signals from writer thread to reader thread.
|
||||
signaler_t signaler;
|
||||
// There's only one thread receiving from the mailbox, but there
|
||||
// is arbitrary number of threads sending. Given that ypipe requires
|
||||
// synchronised access on both of its endpoints, we have to synchronise
|
||||
// the sending side.
|
||||
mutex_t sync;
|
||||
|
||||
// There's only one thread receiving from the mailbox, but there
|
||||
// is arbitrary number of threads sending. Given that ypipe requires
|
||||
// synchronised access on both of its endpoints, we have to synchronise
|
||||
// the sending side.
|
||||
mutex_t sync;
|
||||
|
||||
// True if the underlying pipe is active, ie. when we are allowed to
|
||||
// read commands from it.
|
||||
bool active;
|
||||
|
||||
// Disable copying of mailbox_t object.
|
||||
mailbox_t (const mailbox_t&);
|
||||
const mailbox_t &operator = (const mailbox_t&);
|
||||
};
|
||||
// True if the underlying pipe is active, ie. when we are allowed to
|
||||
// read commands from it.
|
||||
bool active;
|
||||
|
||||
// Disable copying of mailbox_t object.
|
||||
mailbox_t (const mailbox_t &);
|
||||
const mailbox_t &operator= (const mailbox_t &);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -32,8 +32,7 @@
|
||||
#include "clock.hpp"
|
||||
#include "err.hpp"
|
||||
|
||||
zmq::mailbox_safe_t::mailbox_safe_t (mutex_t* sync_) :
|
||||
sync (sync_)
|
||||
zmq::mailbox_safe_t::mailbox_safe_t (mutex_t *sync_) : sync (sync_)
|
||||
{
|
||||
// Get the pipe into passive state. That way, if the users starts by
|
||||
// polling on the associated file descriptor it will get woken up when
|
||||
@ -52,23 +51,23 @@ zmq::mailbox_safe_t::~mailbox_safe_t ()
|
||||
sync->unlock ();
|
||||
}
|
||||
|
||||
void zmq::mailbox_safe_t::add_signaler (signaler_t* signaler)
|
||||
void zmq::mailbox_safe_t::add_signaler (signaler_t *signaler)
|
||||
{
|
||||
signalers.push_back(signaler);
|
||||
signalers.push_back (signaler);
|
||||
}
|
||||
|
||||
void zmq::mailbox_safe_t::remove_signaler (signaler_t* signaler)
|
||||
void zmq::mailbox_safe_t::remove_signaler (signaler_t *signaler)
|
||||
{
|
||||
std::vector<signaler_t*>::iterator it = signalers.begin();
|
||||
std::vector<signaler_t *>::iterator it = signalers.begin ();
|
||||
|
||||
// TODO: make a copy of array and signal outside the lock
|
||||
for (; it != signalers.end(); ++it){
|
||||
for (; it != signalers.end (); ++it) {
|
||||
if (*it == signaler)
|
||||
break;
|
||||
break;
|
||||
}
|
||||
|
||||
if (it != signalers.end())
|
||||
signalers.erase(it);
|
||||
if (it != signalers.end ())
|
||||
signalers.erase (it);
|
||||
}
|
||||
|
||||
void zmq::mailbox_safe_t::clear_signalers ()
|
||||
@ -84,8 +83,9 @@ void zmq::mailbox_safe_t::send (const command_t &cmd_)
|
||||
|
||||
if (!ok) {
|
||||
cond_var.broadcast ();
|
||||
for (std::vector<signaler_t*>::iterator it = signalers.begin(); it != signalers.end(); ++it){
|
||||
(*it)->send();
|
||||
for (std::vector<signaler_t *>::iterator it = signalers.begin ();
|
||||
it != signalers.end (); ++it) {
|
||||
(*it)->send ();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -44,51 +44,47 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
class mailbox_safe_t : public i_mailbox
|
||||
{
|
||||
public:
|
||||
mailbox_safe_t (mutex_t *sync_);
|
||||
~mailbox_safe_t ();
|
||||
|
||||
class mailbox_safe_t : public i_mailbox
|
||||
{
|
||||
public:
|
||||
void send (const command_t &cmd_);
|
||||
int recv (command_t *cmd_, int timeout_);
|
||||
|
||||
mailbox_safe_t (mutex_t* sync_);
|
||||
~mailbox_safe_t ();
|
||||
|
||||
void send (const command_t &cmd_);
|
||||
int recv (command_t *cmd_, int timeout_);
|
||||
|
||||
// Add signaler to mailbox which will be called when a message is ready
|
||||
void add_signaler (signaler_t* signaler);
|
||||
void remove_signaler (signaler_t* signaler);
|
||||
void clear_signalers ();
|
||||
// Add signaler to mailbox which will be called when a message is ready
|
||||
void add_signaler (signaler_t *signaler);
|
||||
void remove_signaler (signaler_t *signaler);
|
||||
void clear_signalers ();
|
||||
|
||||
#ifdef HAVE_FORK
|
||||
// close the file descriptors in the signaller. This is used in a forked
|
||||
// child process to close the file descriptors so that they do not interfere
|
||||
// with the context in the parent process.
|
||||
void forked ()
|
||||
{
|
||||
// TODO: call fork on the condition variable
|
||||
}
|
||||
// close the file descriptors in the signaller. This is used in a forked
|
||||
// child process to close the file descriptors so that they do not interfere
|
||||
// with the context in the parent process.
|
||||
void forked ()
|
||||
{
|
||||
// TODO: call fork on the condition variable
|
||||
}
|
||||
#endif
|
||||
|
||||
private:
|
||||
private:
|
||||
// The pipe to store actual commands.
|
||||
typedef ypipe_t<command_t, command_pipe_granularity> cpipe_t;
|
||||
cpipe_t cpipe;
|
||||
|
||||
// The pipe to store actual commands.
|
||||
typedef ypipe_t <command_t, command_pipe_granularity> cpipe_t;
|
||||
cpipe_t cpipe;
|
||||
// Condition variable to pass signals from writer thread to reader thread.
|
||||
condition_variable_t cond_var;
|
||||
|
||||
// Condition variable to pass signals from writer thread to reader thread.
|
||||
condition_variable_t cond_var;
|
||||
// Synchronize access to the mailbox from receivers and senders
|
||||
mutex_t *sync;
|
||||
|
||||
// Synchronize access to the mailbox from receivers and senders
|
||||
mutex_t* sync;
|
||||
|
||||
std::vector <zmq::signaler_t* > signalers;
|
||||
|
||||
// Disable copying of mailbox_t object.
|
||||
mailbox_safe_t (const mailbox_safe_t&);
|
||||
const mailbox_safe_t &operator = (const mailbox_safe_t&);
|
||||
};
|
||||
std::vector<zmq::signaler_t *> signalers;
|
||||
|
||||
// Disable copying of mailbox_t object.
|
||||
mailbox_safe_t (const mailbox_safe_t &);
|
||||
const mailbox_safe_t &operator= (const mailbox_safe_t &);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -37,8 +37,7 @@
|
||||
#include "wire.hpp"
|
||||
#include "session_base.hpp"
|
||||
|
||||
zmq::mechanism_t::mechanism_t (const options_t &options_) :
|
||||
options (options_)
|
||||
zmq::mechanism_t::mechanism_t (const options_t &options_) : options (options_)
|
||||
{
|
||||
}
|
||||
|
||||
@ -48,7 +47,7 @@ zmq::mechanism_t::~mechanism_t ()
|
||||
|
||||
void zmq::mechanism_t::set_peer_routing_id (const void *id_ptr, size_t id_size)
|
||||
{
|
||||
routing_id.set (static_cast <const unsigned char*> (id_ptr), id_size);
|
||||
routing_id.set (static_cast<const unsigned char *> (id_ptr), id_size);
|
||||
}
|
||||
|
||||
void zmq::mechanism_t::peer_routing_id (msg_t *msg_)
|
||||
@ -61,7 +60,7 @@ void zmq::mechanism_t::peer_routing_id (msg_t *msg_)
|
||||
|
||||
void zmq::mechanism_t::set_user_id (const void *data_, size_t size_)
|
||||
{
|
||||
user_id.set (static_cast <const unsigned char*> (data_), size_);
|
||||
user_id.set (static_cast<const unsigned char *> (data_), size_);
|
||||
zap_properties.ZMQ_MAP_INSERT_OR_EMPLACE (
|
||||
ZMQ_MSG_PROPERTY_USER_ID, std::string ((char *) data_, size_));
|
||||
}
|
||||
@ -73,14 +72,12 @@ const zmq::blob_t &zmq::mechanism_t::get_user_id () const
|
||||
|
||||
const char *zmq::mechanism_t::socket_type_string (int socket_type) const
|
||||
{
|
||||
static const char *names [] = {"PAIR", "PUB", "SUB", "REQ", "REP",
|
||||
"DEALER", "ROUTER", "PULL", "PUSH",
|
||||
"XPUB", "XSUB", "STREAM",
|
||||
"SERVER", "CLIENT",
|
||||
"RADIO", "DISH",
|
||||
"GATHER", "SCATTER", "DGRAM"};
|
||||
static const char *names[] = {
|
||||
"PAIR", "PUB", "SUB", "REQ", "REP", "DEALER", "ROUTER",
|
||||
"PULL", "PUSH", "XPUB", "XSUB", "STREAM", "SERVER", "CLIENT",
|
||||
"RADIO", "DISH", "GATHER", "SCATTER", "DGRAM"};
|
||||
zmq_assert (socket_type >= 0 && socket_type <= 18);
|
||||
return names [socket_type];
|
||||
return names[socket_type];
|
||||
}
|
||||
|
||||
static size_t property_len (size_t name_len, size_t value_len)
|
||||
@ -105,11 +102,11 @@ size_t zmq::mechanism_t::add_property (unsigned char *ptr,
|
||||
const size_t total_len = ::property_len (name_len, value_len);
|
||||
zmq_assert (total_len <= ptr_capacity);
|
||||
|
||||
*ptr++ = static_cast <unsigned char> (name_len);
|
||||
*ptr++ = static_cast<unsigned char> (name_len);
|
||||
memcpy (ptr, name, name_len);
|
||||
ptr += name_len;
|
||||
zmq_assert (value_len <= 0x7FFFFFFF);
|
||||
put_uint32 (ptr, static_cast <uint32_t> (value_len));
|
||||
put_uint32 (ptr, static_cast<uint32_t> (value_len));
|
||||
ptr += 4;
|
||||
memcpy (ptr, value, value_len);
|
||||
|
||||
@ -131,28 +128,26 @@ size_t zmq::mechanism_t::add_basic_properties (unsigned char *buf,
|
||||
|
||||
// Add socket type property
|
||||
const char *socket_type = socket_type_string (options.type);
|
||||
ptr += add_property (ptr, buf_capacity,
|
||||
ZMTP_PROPERTY_SOCKET_TYPE, socket_type,
|
||||
strlen (socket_type));
|
||||
ptr += add_property (ptr, buf_capacity, ZMTP_PROPERTY_SOCKET_TYPE,
|
||||
socket_type, strlen (socket_type));
|
||||
|
||||
// Add identity (aka routing id) property
|
||||
if (options.type == ZMQ_REQ || options.type == ZMQ_DEALER
|
||||
|| options.type == ZMQ_ROUTER)
|
||||
ptr += add_property (ptr, buf_capacity - (ptr - buf),
|
||||
ZMTP_PROPERTY_IDENTITY, options.routing_id,
|
||||
options.routing_id_size);
|
||||
ptr +=
|
||||
add_property (ptr, buf_capacity - (ptr - buf), ZMTP_PROPERTY_IDENTITY,
|
||||
options.routing_id, options.routing_id_size);
|
||||
|
||||
return ptr - buf;
|
||||
}
|
||||
|
||||
size_t zmq::mechanism_t::basic_properties_len() const
|
||||
size_t zmq::mechanism_t::basic_properties_len () const
|
||||
{
|
||||
const char *socket_type = socket_type_string (options.type);
|
||||
return property_len (ZMTP_PROPERTY_SOCKET_TYPE, strlen (socket_type))
|
||||
+ ((options.type == ZMQ_REQ || options.type == ZMQ_DEALER
|
||||
|| options.type == ZMQ_ROUTER)
|
||||
? property_len (ZMTP_PROPERTY_IDENTITY,
|
||||
options.routing_id_size)
|
||||
? property_len (ZMTP_PROPERTY_IDENTITY, options.routing_id_size)
|
||||
: 0);
|
||||
}
|
||||
|
||||
@ -169,8 +164,8 @@ void zmq::mechanism_t::make_command_with_basic_properties (
|
||||
memcpy (ptr, prefix, prefix_len);
|
||||
ptr += prefix_len;
|
||||
|
||||
add_basic_properties (
|
||||
ptr, command_size - (ptr - (unsigned char *) msg_->data ()));
|
||||
add_basic_properties (ptr, command_size
|
||||
- (ptr - (unsigned char *) msg_->data ()));
|
||||
}
|
||||
|
||||
int zmq::mechanism_t::parse_metadata (const unsigned char *ptr_,
|
||||
@ -180,7 +175,7 @@ int zmq::mechanism_t::parse_metadata (const unsigned char *ptr_,
|
||||
size_t bytes_left = length_;
|
||||
|
||||
while (bytes_left > 1) {
|
||||
const size_t name_length = static_cast <size_t> (*ptr_);
|
||||
const size_t name_length = static_cast<size_t> (*ptr_);
|
||||
ptr_ += 1;
|
||||
bytes_left -= 1;
|
||||
if (bytes_left < name_length)
|
||||
@ -192,7 +187,7 @@ int zmq::mechanism_t::parse_metadata (const unsigned char *ptr_,
|
||||
if (bytes_left < 4)
|
||||
break;
|
||||
|
||||
const size_t value_length = static_cast <size_t> (get_uint32 (ptr_));
|
||||
const size_t value_length = static_cast<size_t> (get_uint32 (ptr_));
|
||||
ptr_ += 4;
|
||||
bytes_left -= 4;
|
||||
if (bytes_left < value_length)
|
||||
@ -204,25 +199,23 @@ int zmq::mechanism_t::parse_metadata (const unsigned char *ptr_,
|
||||
|
||||
if (name == ZMTP_PROPERTY_IDENTITY && options.recv_routing_id)
|
||||
set_peer_routing_id (value, value_length);
|
||||
else
|
||||
if (name == ZMTP_PROPERTY_SOCKET_TYPE) {
|
||||
else if (name == ZMTP_PROPERTY_SOCKET_TYPE) {
|
||||
const std::string socket_type ((char *) value, value_length);
|
||||
if (!check_socket_type (socket_type)) {
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
const int rc = property (name, value, value_length);
|
||||
if (rc == -1)
|
||||
return -1;
|
||||
}
|
||||
if (zap_flag)
|
||||
zap_properties.ZMQ_MAP_INSERT_OR_EMPLACE (
|
||||
name, std::string ((char *) value, value_length));
|
||||
name, std::string ((char *) value, value_length));
|
||||
else
|
||||
zmtp_properties.ZMQ_MAP_INSERT_OR_EMPLACE (
|
||||
name, std::string ((char *) value, value_length));
|
||||
name, std::string ((char *) value, value_length));
|
||||
}
|
||||
if (bytes_left > 0) {
|
||||
errno = EPROTO;
|
||||
@ -231,15 +224,16 @@ int zmq::mechanism_t::parse_metadata (const unsigned char *ptr_,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int zmq::mechanism_t::property (const std::string& /* name_ */,
|
||||
const void * /* value_ */, size_t /* length_ */)
|
||||
int zmq::mechanism_t::property (const std::string & /* name_ */,
|
||||
const void * /* value_ */,
|
||||
size_t /* length_ */)
|
||||
{
|
||||
// Default implementation does not check
|
||||
// property values and returns 0 to signal success.
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool zmq::mechanism_t::check_socket_type (const std::string& type_) const
|
||||
bool zmq::mechanism_t::check_socket_type (const std::string &type_) const
|
||||
{
|
||||
switch (options.type) {
|
||||
case ZMQ_REQ:
|
||||
|
@ -37,116 +37,108 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
class msg_t;
|
||||
class session_base_t;
|
||||
|
||||
class msg_t;
|
||||
class session_base_t;
|
||||
// Abstract class representing security mechanism.
|
||||
// Different mechanism extends this class.
|
||||
|
||||
// Abstract class representing security mechanism.
|
||||
// Different mechanism extends this class.
|
||||
|
||||
class mechanism_t
|
||||
class mechanism_t
|
||||
{
|
||||
public:
|
||||
enum status_t
|
||||
{
|
||||
public:
|
||||
|
||||
enum status_t {
|
||||
handshaking,
|
||||
ready,
|
||||
error
|
||||
};
|
||||
|
||||
mechanism_t (const options_t &options_);
|
||||
|
||||
virtual ~mechanism_t ();
|
||||
|
||||
// Prepare next handshake command that is to be sent to the peer.
|
||||
virtual int next_handshake_command (msg_t *msg_) = 0;
|
||||
|
||||
// Process the handshake command received from the peer.
|
||||
virtual int process_handshake_command (msg_t *msg_) = 0;
|
||||
|
||||
virtual int encode (msg_t *) { return 0; }
|
||||
|
||||
virtual int decode (msg_t *) { return 0; }
|
||||
|
||||
// Notifies mechanism about availability of ZAP message.
|
||||
virtual int zap_msg_available () { return 0; }
|
||||
|
||||
// Returns the status of this mechanism.
|
||||
virtual status_t status () const = 0;
|
||||
|
||||
void set_peer_routing_id (const void *id_ptr, size_t id_size);
|
||||
|
||||
void peer_routing_id (msg_t *msg_);
|
||||
|
||||
void set_user_id (const void *user_id, size_t size);
|
||||
|
||||
const blob_t &get_user_id () const;
|
||||
|
||||
const metadata_t::dict_t& get_zmtp_properties () {
|
||||
return zmtp_properties;
|
||||
}
|
||||
|
||||
const metadata_t::dict_t& get_zap_properties () {
|
||||
return zap_properties;
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
// Only used to identify the socket for the Socket-Type
|
||||
// property in the wire protocol.
|
||||
const char *socket_type_string (int socket_type) const;
|
||||
|
||||
static size_t add_property (unsigned char *ptr,
|
||||
size_t ptr_capacity,
|
||||
const char *name,
|
||||
const void *value,
|
||||
size_t value_len);
|
||||
static size_t property_len (const char *name,
|
||||
size_t value_len);
|
||||
|
||||
size_t add_basic_properties (unsigned char *ptr, size_t ptr_capacity) const;
|
||||
size_t basic_properties_len () const;
|
||||
|
||||
void make_command_with_basic_properties (msg_t *msg_,
|
||||
const char *prefix,
|
||||
size_t prefix_len) const;
|
||||
|
||||
// Parses a metadata.
|
||||
// Metadata consists of a list of properties consisting of
|
||||
// name and value as size-specified strings.
|
||||
// Returns 0 on success and -1 on error, in which case errno is set.
|
||||
int parse_metadata (
|
||||
const unsigned char *ptr_, size_t length, bool zap_flag = false);
|
||||
|
||||
// This is called by parse_property method whenever it
|
||||
// parses a new property. The function should return 0
|
||||
// on success and -1 on error, in which case it should
|
||||
// set errno. Signaling error prevents parser from
|
||||
// parsing remaining data.
|
||||
// Derived classes are supposed to override this
|
||||
// method to handle custom processing.
|
||||
virtual int property (const std::string& name_,
|
||||
const void *value_, size_t length_);
|
||||
|
||||
// Properties received from ZMTP peer.
|
||||
metadata_t::dict_t zmtp_properties;
|
||||
|
||||
// Properties received from ZAP server.
|
||||
metadata_t::dict_t zap_properties;
|
||||
|
||||
options_t options;
|
||||
|
||||
private:
|
||||
|
||||
blob_t routing_id;
|
||||
|
||||
blob_t user_id;
|
||||
|
||||
// Returns true iff socket associated with the mechanism
|
||||
// is compatible with a given socket type 'type_'.
|
||||
bool check_socket_type (const std::string& type_) const;
|
||||
handshaking,
|
||||
ready,
|
||||
error
|
||||
};
|
||||
|
||||
mechanism_t (const options_t &options_);
|
||||
|
||||
virtual ~mechanism_t ();
|
||||
|
||||
// Prepare next handshake command that is to be sent to the peer.
|
||||
virtual int next_handshake_command (msg_t *msg_) = 0;
|
||||
|
||||
// Process the handshake command received from the peer.
|
||||
virtual int process_handshake_command (msg_t *msg_) = 0;
|
||||
|
||||
virtual int encode (msg_t *) { return 0; }
|
||||
|
||||
virtual int decode (msg_t *) { return 0; }
|
||||
|
||||
// Notifies mechanism about availability of ZAP message.
|
||||
virtual int zap_msg_available () { return 0; }
|
||||
|
||||
// Returns the status of this mechanism.
|
||||
virtual status_t status () const = 0;
|
||||
|
||||
void set_peer_routing_id (const void *id_ptr, size_t id_size);
|
||||
|
||||
void peer_routing_id (msg_t *msg_);
|
||||
|
||||
void set_user_id (const void *user_id, size_t size);
|
||||
|
||||
const blob_t &get_user_id () const;
|
||||
|
||||
const metadata_t::dict_t &get_zmtp_properties () { return zmtp_properties; }
|
||||
|
||||
const metadata_t::dict_t &get_zap_properties () { return zap_properties; }
|
||||
|
||||
protected:
|
||||
// Only used to identify the socket for the Socket-Type
|
||||
// property in the wire protocol.
|
||||
const char *socket_type_string (int socket_type) const;
|
||||
|
||||
static size_t add_property (unsigned char *ptr,
|
||||
size_t ptr_capacity,
|
||||
const char *name,
|
||||
const void *value,
|
||||
size_t value_len);
|
||||
static size_t property_len (const char *name, size_t value_len);
|
||||
|
||||
size_t add_basic_properties (unsigned char *ptr, size_t ptr_capacity) const;
|
||||
size_t basic_properties_len () const;
|
||||
|
||||
void make_command_with_basic_properties (msg_t *msg_,
|
||||
const char *prefix,
|
||||
size_t prefix_len) const;
|
||||
|
||||
// Parses a metadata.
|
||||
// Metadata consists of a list of properties consisting of
|
||||
// name and value as size-specified strings.
|
||||
// Returns 0 on success and -1 on error, in which case errno is set.
|
||||
int parse_metadata (const unsigned char *ptr_,
|
||||
size_t length,
|
||||
bool zap_flag = false);
|
||||
|
||||
// This is called by parse_property method whenever it
|
||||
// parses a new property. The function should return 0
|
||||
// on success and -1 on error, in which case it should
|
||||
// set errno. Signaling error prevents parser from
|
||||
// parsing remaining data.
|
||||
// Derived classes are supposed to override this
|
||||
// method to handle custom processing.
|
||||
virtual int
|
||||
property (const std::string &name_, const void *value_, size_t length_);
|
||||
|
||||
// Properties received from ZMTP peer.
|
||||
metadata_t::dict_t zmtp_properties;
|
||||
|
||||
// Properties received from ZAP server.
|
||||
metadata_t::dict_t zap_properties;
|
||||
|
||||
options_t options;
|
||||
|
||||
private:
|
||||
blob_t routing_id;
|
||||
|
||||
blob_t user_id;
|
||||
|
||||
// Returns true iff socket associated with the mechanism
|
||||
// is compatible with a given socket type 'type_'.
|
||||
bool check_socket_type (const std::string &type_) const;
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -37,7 +37,6 @@ zmq::mechanism_base_t::mechanism_base_t (session_base_t *const session_,
|
||||
mechanism_t (options_),
|
||||
session (session_)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
int zmq::mechanism_base_t::check_basic_command_structure (msg_t *msg_)
|
||||
@ -64,7 +63,7 @@ void zmq::mechanism_base_t::handle_error_reason (const char *error_reason,
|
||||
}
|
||||
}
|
||||
|
||||
bool zmq::mechanism_base_t::zap_required() const
|
||||
bool zmq::mechanism_base_t::zap_required () const
|
||||
{
|
||||
return !options.zap_domain.empty ();
|
||||
}
|
||||
|
@ -44,9 +44,10 @@ class mechanism_base_t : public mechanism_t
|
||||
|
||||
int check_basic_command_structure (msg_t *msg_);
|
||||
|
||||
void handle_error_reason (const char *error_reason, size_t error_reason_len);
|
||||
void handle_error_reason (const char *error_reason,
|
||||
size_t error_reason_len);
|
||||
|
||||
bool zap_required() const;
|
||||
bool zap_required () const;
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -30,24 +30,20 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "metadata.hpp"
|
||||
|
||||
zmq::metadata_t::metadata_t (const dict_t &dict) :
|
||||
ref_cnt (1),
|
||||
dict (dict)
|
||||
zmq::metadata_t::metadata_t (const dict_t &dict) : ref_cnt (1), dict (dict)
|
||||
{
|
||||
}
|
||||
|
||||
const char *zmq::metadata_t::get (const std::string &property) const
|
||||
{
|
||||
dict_t::const_iterator it = dict.find (property);
|
||||
if (it == dict.end())
|
||||
{
|
||||
if (it == dict.end ()) {
|
||||
/** \todo remove this when support for the deprecated name "Identity" is dropped */
|
||||
if (property == "Identity")
|
||||
return get (ZMQ_MSG_PROPERTY_ROUTING_ID);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
else
|
||||
} else
|
||||
return it->second.c_str ();
|
||||
}
|
||||
|
||||
|
@ -37,34 +37,33 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
class metadata_t
|
||||
{
|
||||
public:
|
||||
typedef std::map <std::string, std::string> dict_t;
|
||||
class metadata_t
|
||||
{
|
||||
public:
|
||||
typedef std::map<std::string, std::string> dict_t;
|
||||
|
||||
metadata_t (const dict_t &dict);
|
||||
metadata_t (const dict_t &dict);
|
||||
|
||||
// Returns pointer to property value or NULL if
|
||||
// property is not found.
|
||||
const char *get (const std::string &property) const;
|
||||
// Returns pointer to property value or NULL if
|
||||
// property is not found.
|
||||
const char *get (const std::string &property) const;
|
||||
|
||||
void add_ref ();
|
||||
void add_ref ();
|
||||
|
||||
// Drop reference. Returns true iff the reference
|
||||
// counter drops to zero.
|
||||
bool drop_ref ();
|
||||
// Drop reference. Returns true iff the reference
|
||||
// counter drops to zero.
|
||||
bool drop_ref ();
|
||||
|
||||
private:
|
||||
metadata_t(const metadata_t&);
|
||||
metadata_t & operator=(const metadata_t&);
|
||||
private:
|
||||
metadata_t (const metadata_t &);
|
||||
metadata_t &operator= (const metadata_t &);
|
||||
|
||||
// Reference counter.
|
||||
atomic_counter_t ref_cnt;
|
||||
|
||||
// Dictionary holding metadata.
|
||||
dict_t dict;
|
||||
};
|
||||
// Reference counter.
|
||||
atomic_counter_t ref_cnt;
|
||||
|
||||
// Dictionary holding metadata.
|
||||
dict_t dict;
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
182
src/msg.cpp
182
src/msg.cpp
@ -43,38 +43,31 @@
|
||||
// Check whether the sizes of public representation of the message (zmq_msg_t)
|
||||
// and private representation of the message (zmq::msg_t) match.
|
||||
|
||||
typedef char zmq_msg_size_check
|
||||
[2 * ((sizeof (zmq::msg_t) == sizeof (zmq_msg_t)) != 0) - 1];
|
||||
typedef char
|
||||
zmq_msg_size_check[2 * ((sizeof (zmq::msg_t) == sizeof (zmq_msg_t)) != 0)
|
||||
- 1];
|
||||
|
||||
bool zmq::msg_t::check () const
|
||||
{
|
||||
return u.base.type >= type_min && u.base.type <= type_max;
|
||||
return u.base.type >= type_min && u.base.type <= type_max;
|
||||
}
|
||||
|
||||
int zmq::msg_t::init (void* data_, size_t size_,
|
||||
msg_free_fn* ffn_, void* hint,
|
||||
content_t* content_)
|
||||
int zmq::msg_t::init (
|
||||
void *data_, size_t size_, msg_free_fn *ffn_, void *hint, content_t *content_)
|
||||
{
|
||||
if (size_ < max_vsm_size) {
|
||||
int const rc = init_size(size_);
|
||||
int const rc = init_size (size_);
|
||||
|
||||
if (rc != -1)
|
||||
{
|
||||
memcpy(data(), data_, size_);
|
||||
if (rc != -1) {
|
||||
memcpy (data (), data_, size_);
|
||||
return 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
else if(content_)
|
||||
{
|
||||
return init_external_storage(content_, data_, size_, ffn_, hint);
|
||||
}
|
||||
else
|
||||
{
|
||||
return init_data(data_, size_, ffn_, hint);
|
||||
} else if (content_) {
|
||||
return init_external_storage (content_, data_, size_, ffn_, hint);
|
||||
} else {
|
||||
return init_data (data_, size_, ffn_, hint);
|
||||
}
|
||||
}
|
||||
|
||||
@ -98,8 +91,7 @@ int zmq::msg_t::init_size (size_t size_)
|
||||
u.vsm.size = (unsigned char) size_;
|
||||
u.vsm.group[0] = '\0';
|
||||
u.vsm.routing_id = 0;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
u.lmsg.metadata = NULL;
|
||||
u.lmsg.type = type_lmsg;
|
||||
u.lmsg.flags = 0;
|
||||
@ -107,7 +99,7 @@ int zmq::msg_t::init_size (size_t size_)
|
||||
u.lmsg.routing_id = 0;
|
||||
u.lmsg.content = NULL;
|
||||
if (sizeof (content_t) + size_ > size_)
|
||||
u.lmsg.content = (content_t*) malloc (sizeof (content_t) + size_);
|
||||
u.lmsg.content = (content_t *) malloc (sizeof (content_t) + size_);
|
||||
if (unlikely (!u.lmsg.content)) {
|
||||
errno = ENOMEM;
|
||||
return -1;
|
||||
@ -122,11 +114,14 @@ int zmq::msg_t::init_size (size_t size_)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int zmq::msg_t::init_external_storage(content_t* content_, void* data_, size_t size_,
|
||||
msg_free_fn *ffn_, void* hint_)
|
||||
int zmq::msg_t::init_external_storage (content_t *content_,
|
||||
void *data_,
|
||||
size_t size_,
|
||||
msg_free_fn *ffn_,
|
||||
void *hint_)
|
||||
{
|
||||
zmq_assert(NULL != data_);
|
||||
zmq_assert(NULL != content_);
|
||||
zmq_assert (NULL != data_);
|
||||
zmq_assert (NULL != content_);
|
||||
|
||||
u.zclmsg.metadata = NULL;
|
||||
u.zclmsg.type = type_zclmsg;
|
||||
@ -139,13 +134,15 @@ int zmq::msg_t::init_external_storage(content_t* content_, void* data_, size_t s
|
||||
u.zclmsg.content->size = size_;
|
||||
u.zclmsg.content->ffn = ffn_;
|
||||
u.zclmsg.content->hint = hint_;
|
||||
new (&u.zclmsg.content->refcnt) zmq::atomic_counter_t();
|
||||
new (&u.zclmsg.content->refcnt) zmq::atomic_counter_t ();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int zmq::msg_t::init_data (void *data_, size_t size_,
|
||||
msg_free_fn *ffn_, void *hint_)
|
||||
int zmq::msg_t::init_data (void *data_,
|
||||
size_t size_,
|
||||
msg_free_fn *ffn_,
|
||||
void *hint_)
|
||||
{
|
||||
// If data is NULL and size is not 0, a segfault
|
||||
// would occur once the data is accessed
|
||||
@ -160,14 +157,13 @@ int zmq::msg_t::init_data (void *data_, size_t size_,
|
||||
u.cmsg.size = size_;
|
||||
u.cmsg.group[0] = '\0';
|
||||
u.cmsg.routing_id = 0;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
u.lmsg.metadata = NULL;
|
||||
u.lmsg.type = type_lmsg;
|
||||
u.lmsg.flags = 0;
|
||||
u.lmsg.group[0] = '\0';
|
||||
u.lmsg.routing_id = 0;
|
||||
u.lmsg.content = (content_t*) malloc (sizeof (content_t));
|
||||
u.lmsg.content = (content_t *) malloc (sizeof (content_t));
|
||||
if (!u.lmsg.content) {
|
||||
errno = ENOMEM;
|
||||
return -1;
|
||||
@ -180,7 +176,6 @@ int zmq::msg_t::init_data (void *data_, size_t size_,
|
||||
new (&u.lmsg.content->refcnt) zmq::atomic_counter_t ();
|
||||
}
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
int zmq::msg_t::init_delimiter ()
|
||||
@ -222,44 +217,40 @@ int zmq::msg_t::close ()
|
||||
}
|
||||
|
||||
if (u.base.type == type_lmsg) {
|
||||
|
||||
// If the content is not shared, or if it is shared and the reference
|
||||
// count has dropped to zero, deallocate it.
|
||||
if (!(u.lmsg.flags & msg_t::shared) ||
|
||||
!u.lmsg.content->refcnt.sub (1)) {
|
||||
|
||||
if (!(u.lmsg.flags & msg_t::shared)
|
||||
|| !u.lmsg.content->refcnt.sub (1)) {
|
||||
// We used "placement new" operator to initialize the reference
|
||||
// counter so we call the destructor explicitly now.
|
||||
u.lmsg.content->refcnt.~atomic_counter_t ();
|
||||
|
||||
if (u.lmsg.content->ffn)
|
||||
u.lmsg.content->ffn (u.lmsg.content->data,
|
||||
u.lmsg.content->hint);
|
||||
u.lmsg.content->hint);
|
||||
free (u.lmsg.content);
|
||||
}
|
||||
}
|
||||
|
||||
if (is_zcmsg())
|
||||
{
|
||||
zmq_assert(u.zclmsg.content->ffn);
|
||||
if (is_zcmsg ()) {
|
||||
zmq_assert (u.zclmsg.content->ffn);
|
||||
|
||||
// If the content is not shared, or if it is shared and the reference
|
||||
// count has dropped to zero, deallocate it.
|
||||
if (!(u.zclmsg.flags & msg_t::shared) ||
|
||||
!u.zclmsg.content->refcnt.sub (1)) {
|
||||
|
||||
if (!(u.zclmsg.flags & msg_t::shared)
|
||||
|| !u.zclmsg.content->refcnt.sub (1)) {
|
||||
// We used "placement new" operator to initialize the reference
|
||||
// counter so we call the destructor explicitly now.
|
||||
u.zclmsg.content->refcnt.~atomic_counter_t ();
|
||||
|
||||
u.zclmsg.content->ffn (u.zclmsg.content->data,
|
||||
u.zclmsg.content->hint);
|
||||
u.zclmsg.content->hint);
|
||||
}
|
||||
}
|
||||
|
||||
if (u.base.metadata != NULL) {
|
||||
if (u.base.metadata->drop_ref ()) {
|
||||
LIBZMQ_DELETE(u.base.metadata);
|
||||
LIBZMQ_DELETE (u.base.metadata);
|
||||
}
|
||||
u.base.metadata = NULL;
|
||||
}
|
||||
@ -303,8 +294,7 @@ int zmq::msg_t::copy (msg_t &src_)
|
||||
if (unlikely (rc < 0))
|
||||
return rc;
|
||||
|
||||
if (src_.u.base.type == type_lmsg ) {
|
||||
|
||||
if (src_.u.base.type == type_lmsg) {
|
||||
// One reference is added to shared messages. Non-shared messages
|
||||
// are turned into shared messages and reference count is set to 2.
|
||||
if (src_.u.lmsg.flags & msg_t::shared)
|
||||
@ -315,15 +305,14 @@ int zmq::msg_t::copy (msg_t &src_)
|
||||
}
|
||||
}
|
||||
|
||||
if (src_.is_zcmsg()) {
|
||||
|
||||
if (src_.is_zcmsg ()) {
|
||||
// One reference is added to shared messages. Non-shared messages
|
||||
// are turned into shared messages and reference count is set to 2.
|
||||
if (src_.u.zclmsg.flags & msg_t::shared)
|
||||
src_.refcnt()->add (1);
|
||||
src_.refcnt ()->add (1);
|
||||
else {
|
||||
src_.u.zclmsg.flags |= msg_t::shared;
|
||||
src_.refcnt()->set (2);
|
||||
src_.refcnt ()->set (2);
|
||||
}
|
||||
}
|
||||
if (src_.u.base.metadata != NULL)
|
||||
@ -332,7 +321,6 @@ int zmq::msg_t::copy (msg_t &src_)
|
||||
*this = src_;
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
void *zmq::msg_t::data ()
|
||||
@ -341,17 +329,17 @@ void *zmq::msg_t::data ()
|
||||
zmq_assert (check ());
|
||||
|
||||
switch (u.base.type) {
|
||||
case type_vsm:
|
||||
return u.vsm.data;
|
||||
case type_lmsg:
|
||||
return u.lmsg.content->data;
|
||||
case type_cmsg:
|
||||
return u.cmsg.data;
|
||||
case type_zclmsg:
|
||||
return u.zclmsg.content->data;
|
||||
default:
|
||||
zmq_assert (false);
|
||||
return NULL;
|
||||
case type_vsm:
|
||||
return u.vsm.data;
|
||||
case type_lmsg:
|
||||
return u.lmsg.content->data;
|
||||
case type_cmsg:
|
||||
return u.cmsg.data;
|
||||
case type_zclmsg:
|
||||
return u.zclmsg.content->data;
|
||||
default:
|
||||
zmq_assert (false);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@ -361,17 +349,17 @@ size_t zmq::msg_t::size () const
|
||||
zmq_assert (check ());
|
||||
|
||||
switch (u.base.type) {
|
||||
case type_vsm:
|
||||
return u.vsm.size;
|
||||
case type_lmsg:
|
||||
return u.lmsg.content->size;
|
||||
case type_zclmsg:
|
||||
return u.zclmsg.content->size;
|
||||
case type_cmsg:
|
||||
return u.cmsg.size;
|
||||
default:
|
||||
zmq_assert (false);
|
||||
return 0;
|
||||
case type_vsm:
|
||||
return u.vsm.size;
|
||||
case type_lmsg:
|
||||
return u.lmsg.content->size;
|
||||
case type_zclmsg:
|
||||
return u.zclmsg.content->size;
|
||||
case type_cmsg:
|
||||
return u.cmsg.size;
|
||||
default:
|
||||
zmq_assert (false);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
@ -407,7 +395,7 @@ void zmq::msg_t::reset_metadata ()
|
||||
{
|
||||
if (u.base.metadata) {
|
||||
if (u.base.metadata->drop_ref ()) {
|
||||
LIBZMQ_DELETE(u.base.metadata);
|
||||
LIBZMQ_DELETE (u.base.metadata);
|
||||
}
|
||||
u.base.metadata = NULL;
|
||||
}
|
||||
@ -438,17 +426,17 @@ bool zmq::msg_t::is_cmsg () const
|
||||
return u.base.type == type_cmsg;
|
||||
}
|
||||
|
||||
bool zmq::msg_t::is_zcmsg() const
|
||||
bool zmq::msg_t::is_zcmsg () const
|
||||
{
|
||||
return u.base.type == type_zclmsg;
|
||||
}
|
||||
|
||||
bool zmq::msg_t::is_join() const
|
||||
bool zmq::msg_t::is_join () const
|
||||
{
|
||||
return u.base.type == type_join;
|
||||
}
|
||||
|
||||
bool zmq::msg_t::is_leave() const
|
||||
bool zmq::msg_t::is_leave () const
|
||||
{
|
||||
return u.base.type == type_leave;
|
||||
}
|
||||
@ -466,11 +454,11 @@ void zmq::msg_t::add_refs (int refs_)
|
||||
|
||||
// VSMs, CMSGS and delimiters can be copied straight away. The only
|
||||
// message type that needs special care are long messages.
|
||||
if (u.base.type == type_lmsg || is_zcmsg() ) {
|
||||
if (u.base.type == type_lmsg || is_zcmsg ()) {
|
||||
if (u.base.flags & msg_t::shared)
|
||||
refcnt()->add (refs_);
|
||||
refcnt ()->add (refs_);
|
||||
else {
|
||||
refcnt()->set (refs_ + 1);
|
||||
refcnt ()->set (refs_ + 1);
|
||||
u.base.flags |= msg_t::shared;
|
||||
}
|
||||
}
|
||||
@ -488,13 +476,14 @@ bool zmq::msg_t::rm_refs (int refs_)
|
||||
return true;
|
||||
|
||||
// If there's only one reference close the message.
|
||||
if ( (u.base.type != type_zclmsg && u.base.type != type_lmsg) || !(u.base.flags & msg_t::shared)) {
|
||||
if ((u.base.type != type_zclmsg && u.base.type != type_lmsg)
|
||||
|| !(u.base.flags & msg_t::shared)) {
|
||||
close ();
|
||||
return false;
|
||||
}
|
||||
|
||||
// The only message type that needs special care are long and zcopy messages.
|
||||
if (u.base.type == type_lmsg && !u.lmsg.content->refcnt.sub(refs_)) {
|
||||
if (u.base.type == type_lmsg && !u.lmsg.content->refcnt.sub (refs_)) {
|
||||
// We used "placement new" operator to initialize the reference
|
||||
// counter so we call the destructor explicitly now.
|
||||
u.lmsg.content->refcnt.~atomic_counter_t ();
|
||||
@ -506,10 +495,11 @@ bool zmq::msg_t::rm_refs (int refs_)
|
||||
return false;
|
||||
}
|
||||
|
||||
if (is_zcmsg() && !u.zclmsg.content->refcnt.sub(refs_)) {
|
||||
if (is_zcmsg () && !u.zclmsg.content->refcnt.sub (refs_)) {
|
||||
// storage for rfcnt is provided externally
|
||||
if (u.zclmsg.content->ffn) {
|
||||
u.zclmsg.content->ffn(u.zclmsg.content->data, u.zclmsg.content->hint);
|
||||
u.zclmsg.content->ffn (u.zclmsg.content->data,
|
||||
u.zclmsg.content->hint);
|
||||
}
|
||||
|
||||
return false;
|
||||
@ -539,20 +529,19 @@ int zmq::msg_t::reset_routing_id ()
|
||||
return 0;
|
||||
}
|
||||
|
||||
const char * zmq::msg_t::group ()
|
||||
const char *zmq::msg_t::group ()
|
||||
{
|
||||
return u.base.group;
|
||||
}
|
||||
|
||||
int zmq::msg_t::set_group (const char * group_)
|
||||
int zmq::msg_t::set_group (const char *group_)
|
||||
{
|
||||
return set_group (group_, strlen (group_));
|
||||
}
|
||||
|
||||
int zmq::msg_t::set_group (const char * group_, size_t length_)
|
||||
int zmq::msg_t::set_group (const char *group_, size_t length_)
|
||||
{
|
||||
if (length_> ZMQ_GROUP_MAX_LENGTH)
|
||||
{
|
||||
if (length_ > ZMQ_GROUP_MAX_LENGTH) {
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
}
|
||||
@ -563,16 +552,15 @@ int zmq::msg_t::set_group (const char * group_, size_t length_)
|
||||
return 0;
|
||||
}
|
||||
|
||||
zmq::atomic_counter_t *zmq::msg_t::refcnt()
|
||||
zmq::atomic_counter_t *zmq::msg_t::refcnt ()
|
||||
{
|
||||
switch(u.base.type)
|
||||
{
|
||||
switch (u.base.type) {
|
||||
case type_lmsg:
|
||||
return &u.lmsg.content->refcnt;
|
||||
case type_zclmsg:
|
||||
return &u.zclmsg.content->refcnt;
|
||||
default:
|
||||
zmq_assert(false);
|
||||
zmq_assert (false);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
425
src/msg.hpp
425
src/msg.hpp
@ -42,227 +42,232 @@
|
||||
// Signature for free function to deallocate the message content.
|
||||
// Note that it has to be declared as "C" so that it is the same as
|
||||
// zmq_free_fn defined in zmq.h.
|
||||
extern "C"
|
||||
{
|
||||
typedef void (msg_free_fn) (void *data, void *hint);
|
||||
extern "C" {
|
||||
typedef void(msg_free_fn) (void *data, void *hint);
|
||||
}
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
// Note that this structure needs to be explicitly constructed
|
||||
// (init functions) and destructed (close function).
|
||||
|
||||
// Note that this structure needs to be explicitly constructed
|
||||
// (init functions) and destructed (close function).
|
||||
|
||||
class msg_t
|
||||
class msg_t
|
||||
{
|
||||
public:
|
||||
// Shared message buffer. Message data are either allocated in one
|
||||
// continuous block along with this structure - thus avoiding one
|
||||
// malloc/free pair or they are stored in user-supplied memory.
|
||||
// In the latter case, ffn member stores pointer to the function to be
|
||||
// used to deallocate the data. If the buffer is actually shared (there
|
||||
// are at least 2 references to it) refcount member contains number of
|
||||
// references.
|
||||
struct content_t
|
||||
{
|
||||
public:
|
||||
|
||||
// Shared message buffer. Message data are either allocated in one
|
||||
// continuous block along with this structure - thus avoiding one
|
||||
// malloc/free pair or they are stored in user-supplied memory.
|
||||
// In the latter case, ffn member stores pointer to the function to be
|
||||
// used to deallocate the data. If the buffer is actually shared (there
|
||||
// are at least 2 references to it) refcount member contains number of
|
||||
// references.
|
||||
struct content_t
|
||||
{
|
||||
void *data;
|
||||
size_t size;
|
||||
msg_free_fn *ffn;
|
||||
void *hint;
|
||||
zmq::atomic_counter_t refcnt;
|
||||
};
|
||||
|
||||
// Message flags.
|
||||
enum
|
||||
{
|
||||
more = 1, // Followed by more parts
|
||||
command = 2, // Command frame (see ZMTP spec)
|
||||
credential = 32,
|
||||
routing_id = 64,
|
||||
shared = 128
|
||||
};
|
||||
|
||||
bool check () const;
|
||||
int init();
|
||||
|
||||
int init (void* data, size_t size_,
|
||||
msg_free_fn* ffn_, void* hint,
|
||||
content_t* content_ = NULL);
|
||||
|
||||
int init_size (size_t size_);
|
||||
int init_data (void *data_, size_t size_, msg_free_fn *ffn_,
|
||||
void *hint_);
|
||||
int init_external_storage(content_t* content_, void *data_, size_t size_,
|
||||
msg_free_fn *ffn_, void *hint_);
|
||||
int init_delimiter ();
|
||||
int init_join ();
|
||||
int init_leave ();
|
||||
int close ();
|
||||
int move (msg_t &src_);
|
||||
int copy (msg_t &src_);
|
||||
void *data ();
|
||||
size_t size () const;
|
||||
unsigned char flags () const;
|
||||
void set_flags (unsigned char flags_);
|
||||
void reset_flags (unsigned char flags_);
|
||||
metadata_t *metadata () const;
|
||||
void set_metadata (metadata_t *metadata_);
|
||||
void reset_metadata ();
|
||||
bool is_routing_id () const;
|
||||
bool is_credential () const;
|
||||
bool is_delimiter () const;
|
||||
bool is_join () const;
|
||||
bool is_leave () const;
|
||||
bool is_vsm () const;
|
||||
bool is_cmsg () const;
|
||||
bool is_zcmsg() const;
|
||||
uint32_t get_routing_id ();
|
||||
int set_routing_id (uint32_t routing_id_);
|
||||
int reset_routing_id ();
|
||||
const char * group ();
|
||||
int set_group (const char* group_);
|
||||
int set_group (const char*, size_t length);
|
||||
|
||||
// After calling this function you can copy the message in POD-style
|
||||
// refs_ times. No need to call copy.
|
||||
void add_refs (int refs_);
|
||||
|
||||
// Removes references previously added by add_refs. If the number of
|
||||
// references drops to 0, the message is closed and false is returned.
|
||||
bool rm_refs (int refs_);
|
||||
|
||||
// Size in bytes of the largest message that is still copied around
|
||||
// rather than being reference-counted.
|
||||
enum { msg_t_size = 64 };
|
||||
enum { max_vsm_size = msg_t_size - (sizeof (metadata_t *) +
|
||||
3 +
|
||||
16 +
|
||||
sizeof (uint32_t))};
|
||||
private:
|
||||
zmq::atomic_counter_t* refcnt();
|
||||
|
||||
// Different message types.
|
||||
enum type_t
|
||||
{
|
||||
type_min = 101,
|
||||
// VSM messages store the content in the message itself
|
||||
type_vsm = 101,
|
||||
// LMSG messages store the content in malloc-ed memory
|
||||
type_lmsg = 102,
|
||||
// Delimiter messages are used in envelopes
|
||||
type_delimiter = 103,
|
||||
// CMSG messages point to constant data
|
||||
type_cmsg = 104,
|
||||
|
||||
// zero-copy LMSG message for v2_decoder
|
||||
type_zclmsg = 105,
|
||||
|
||||
// Join message for radio_dish
|
||||
type_join = 106,
|
||||
|
||||
// Leave message for radio_dish
|
||||
type_leave = 107,
|
||||
|
||||
type_max = 107
|
||||
};
|
||||
|
||||
// Note that fields shared between different message types are not
|
||||
// moved to the parent class (msg_t). This way we get tighter packing
|
||||
// of the data. Shared fields can be accessed via 'base' member of
|
||||
// the union.
|
||||
union {
|
||||
struct {
|
||||
metadata_t *metadata;
|
||||
unsigned char unused [msg_t_size - (sizeof (metadata_t *) +
|
||||
2 +
|
||||
16 +
|
||||
sizeof (uint32_t))];
|
||||
unsigned char type;
|
||||
unsigned char flags;
|
||||
char group [16];
|
||||
uint32_t routing_id;
|
||||
} base;
|
||||
struct {
|
||||
metadata_t *metadata;
|
||||
unsigned char data [max_vsm_size];
|
||||
unsigned char size;
|
||||
unsigned char type;
|
||||
unsigned char flags;
|
||||
char group [16];
|
||||
uint32_t routing_id;
|
||||
} vsm;
|
||||
struct {
|
||||
metadata_t *metadata;
|
||||
content_t *content;
|
||||
unsigned char unused [msg_t_size - (sizeof (metadata_t *) +
|
||||
sizeof (content_t*) +
|
||||
2 +
|
||||
16 +
|
||||
sizeof (uint32_t))];
|
||||
unsigned char type;
|
||||
unsigned char flags;
|
||||
char group [16];
|
||||
uint32_t routing_id;
|
||||
} lmsg;
|
||||
struct {
|
||||
metadata_t *metadata;
|
||||
content_t *content;
|
||||
unsigned char unused [msg_t_size - (sizeof (metadata_t *) +
|
||||
sizeof (content_t*) +
|
||||
2 +
|
||||
16 +
|
||||
sizeof (uint32_t))];
|
||||
unsigned char type;
|
||||
unsigned char flags;
|
||||
char group [16];
|
||||
uint32_t routing_id;
|
||||
} zclmsg;
|
||||
struct {
|
||||
metadata_t *metadata;
|
||||
void* data;
|
||||
size_t size;
|
||||
unsigned char unused [msg_t_size - (sizeof (metadata_t *) +
|
||||
sizeof (void*) +
|
||||
sizeof (size_t) +
|
||||
2 +
|
||||
16 +
|
||||
sizeof (uint32_t))];
|
||||
unsigned char type;
|
||||
unsigned char flags;
|
||||
char group [16];
|
||||
uint32_t routing_id;
|
||||
} cmsg;
|
||||
struct {
|
||||
metadata_t *metadata;
|
||||
unsigned char unused [msg_t_size - (sizeof (metadata_t *) +
|
||||
2 +
|
||||
16 +
|
||||
sizeof (uint32_t))];
|
||||
unsigned char type;
|
||||
unsigned char flags;
|
||||
char group [16];
|
||||
uint32_t routing_id;
|
||||
} delimiter;
|
||||
} u;
|
||||
void *data;
|
||||
size_t size;
|
||||
msg_free_fn *ffn;
|
||||
void *hint;
|
||||
zmq::atomic_counter_t refcnt;
|
||||
};
|
||||
|
||||
inline int close_and_return (zmq::msg_t *msg, int echo)
|
||||
// Message flags.
|
||||
enum
|
||||
{
|
||||
// Since we abort on close failure we preserve errno for success case.
|
||||
int err = errno;
|
||||
const int rc = msg->close ();
|
||||
errno_assert (rc == 0);
|
||||
errno = err;
|
||||
return echo;
|
||||
}
|
||||
more = 1, // Followed by more parts
|
||||
command = 2, // Command frame (see ZMTP spec)
|
||||
credential = 32,
|
||||
routing_id = 64,
|
||||
shared = 128
|
||||
};
|
||||
|
||||
inline int close_and_return (zmq::msg_t msg [], int count, int echo)
|
||||
bool check () const;
|
||||
int init ();
|
||||
|
||||
int init (void *data,
|
||||
size_t size_,
|
||||
msg_free_fn *ffn_,
|
||||
void *hint,
|
||||
content_t *content_ = NULL);
|
||||
|
||||
int init_size (size_t size_);
|
||||
int init_data (void *data_, size_t size_, msg_free_fn *ffn_, void *hint_);
|
||||
int init_external_storage (content_t *content_,
|
||||
void *data_,
|
||||
size_t size_,
|
||||
msg_free_fn *ffn_,
|
||||
void *hint_);
|
||||
int init_delimiter ();
|
||||
int init_join ();
|
||||
int init_leave ();
|
||||
int close ();
|
||||
int move (msg_t &src_);
|
||||
int copy (msg_t &src_);
|
||||
void *data ();
|
||||
size_t size () const;
|
||||
unsigned char flags () const;
|
||||
void set_flags (unsigned char flags_);
|
||||
void reset_flags (unsigned char flags_);
|
||||
metadata_t *metadata () const;
|
||||
void set_metadata (metadata_t *metadata_);
|
||||
void reset_metadata ();
|
||||
bool is_routing_id () const;
|
||||
bool is_credential () const;
|
||||
bool is_delimiter () const;
|
||||
bool is_join () const;
|
||||
bool is_leave () const;
|
||||
bool is_vsm () const;
|
||||
bool is_cmsg () const;
|
||||
bool is_zcmsg () const;
|
||||
uint32_t get_routing_id ();
|
||||
int set_routing_id (uint32_t routing_id_);
|
||||
int reset_routing_id ();
|
||||
const char *group ();
|
||||
int set_group (const char *group_);
|
||||
int set_group (const char *, size_t length);
|
||||
|
||||
// After calling this function you can copy the message in POD-style
|
||||
// refs_ times. No need to call copy.
|
||||
void add_refs (int refs_);
|
||||
|
||||
// Removes references previously added by add_refs. If the number of
|
||||
// references drops to 0, the message is closed and false is returned.
|
||||
bool rm_refs (int refs_);
|
||||
|
||||
// Size in bytes of the largest message that is still copied around
|
||||
// rather than being reference-counted.
|
||||
enum
|
||||
{
|
||||
for (int i = 0; i < count; i++)
|
||||
close_and_return (&msg [i], 0);
|
||||
return echo;
|
||||
}
|
||||
msg_t_size = 64
|
||||
};
|
||||
enum
|
||||
{
|
||||
max_vsm_size =
|
||||
msg_t_size - (sizeof (metadata_t *) + 3 + 16 + sizeof (uint32_t))
|
||||
};
|
||||
|
||||
private:
|
||||
zmq::atomic_counter_t *refcnt ();
|
||||
|
||||
// Different message types.
|
||||
enum type_t
|
||||
{
|
||||
type_min = 101,
|
||||
// VSM messages store the content in the message itself
|
||||
type_vsm = 101,
|
||||
// LMSG messages store the content in malloc-ed memory
|
||||
type_lmsg = 102,
|
||||
// Delimiter messages are used in envelopes
|
||||
type_delimiter = 103,
|
||||
// CMSG messages point to constant data
|
||||
type_cmsg = 104,
|
||||
|
||||
// zero-copy LMSG message for v2_decoder
|
||||
type_zclmsg = 105,
|
||||
|
||||
// Join message for radio_dish
|
||||
type_join = 106,
|
||||
|
||||
// Leave message for radio_dish
|
||||
type_leave = 107,
|
||||
|
||||
type_max = 107
|
||||
};
|
||||
|
||||
// Note that fields shared between different message types are not
|
||||
// moved to the parent class (msg_t). This way we get tighter packing
|
||||
// of the data. Shared fields can be accessed via 'base' member of
|
||||
// the union.
|
||||
union
|
||||
{
|
||||
struct
|
||||
{
|
||||
metadata_t *metadata;
|
||||
unsigned char
|
||||
unused[msg_t_size
|
||||
- (sizeof (metadata_t *) + 2 + 16 + sizeof (uint32_t))];
|
||||
unsigned char type;
|
||||
unsigned char flags;
|
||||
char group[16];
|
||||
uint32_t routing_id;
|
||||
} base;
|
||||
struct
|
||||
{
|
||||
metadata_t *metadata;
|
||||
unsigned char data[max_vsm_size];
|
||||
unsigned char size;
|
||||
unsigned char type;
|
||||
unsigned char flags;
|
||||
char group[16];
|
||||
uint32_t routing_id;
|
||||
} vsm;
|
||||
struct
|
||||
{
|
||||
metadata_t *metadata;
|
||||
content_t *content;
|
||||
unsigned char unused[msg_t_size
|
||||
- (sizeof (metadata_t *) + sizeof (content_t *)
|
||||
+ 2 + 16 + sizeof (uint32_t))];
|
||||
unsigned char type;
|
||||
unsigned char flags;
|
||||
char group[16];
|
||||
uint32_t routing_id;
|
||||
} lmsg;
|
||||
struct
|
||||
{
|
||||
metadata_t *metadata;
|
||||
content_t *content;
|
||||
unsigned char unused[msg_t_size
|
||||
- (sizeof (metadata_t *) + sizeof (content_t *)
|
||||
+ 2 + 16 + sizeof (uint32_t))];
|
||||
unsigned char type;
|
||||
unsigned char flags;
|
||||
char group[16];
|
||||
uint32_t routing_id;
|
||||
} zclmsg;
|
||||
struct
|
||||
{
|
||||
metadata_t *metadata;
|
||||
void *data;
|
||||
size_t size;
|
||||
unsigned char
|
||||
unused[msg_t_size
|
||||
- (sizeof (metadata_t *) + sizeof (void *)
|
||||
+ sizeof (size_t) + 2 + 16 + sizeof (uint32_t))];
|
||||
unsigned char type;
|
||||
unsigned char flags;
|
||||
char group[16];
|
||||
uint32_t routing_id;
|
||||
} cmsg;
|
||||
struct
|
||||
{
|
||||
metadata_t *metadata;
|
||||
unsigned char
|
||||
unused[msg_t_size
|
||||
- (sizeof (metadata_t *) + 2 + 16 + sizeof (uint32_t))];
|
||||
unsigned char type;
|
||||
unsigned char flags;
|
||||
char group[16];
|
||||
uint32_t routing_id;
|
||||
} delimiter;
|
||||
} u;
|
||||
};
|
||||
|
||||
inline int close_and_return (zmq::msg_t *msg, int echo)
|
||||
{
|
||||
// Since we abort on close failure we preserve errno for success case.
|
||||
int err = errno;
|
||||
const int rc = msg->close ();
|
||||
errno_assert (rc == 0);
|
||||
errno = err;
|
||||
return echo;
|
||||
}
|
||||
|
||||
inline int close_and_return (zmq::msg_t msg[], int count, int echo)
|
||||
{
|
||||
for (int i = 0; i < count; i++)
|
||||
close_and_return (&msg[i], 0);
|
||||
return echo;
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
184
src/mtrie.cpp
184
src/mtrie.cpp
@ -38,25 +38,20 @@
|
||||
#include "macros.hpp"
|
||||
#include "mtrie.hpp"
|
||||
|
||||
zmq::mtrie_t::mtrie_t () :
|
||||
pipes (0),
|
||||
min (0),
|
||||
count (0),
|
||||
live_nodes (0)
|
||||
zmq::mtrie_t::mtrie_t () : pipes (0), min (0), count (0), live_nodes (0)
|
||||
{
|
||||
}
|
||||
|
||||
zmq::mtrie_t::~mtrie_t ()
|
||||
{
|
||||
LIBZMQ_DELETE(pipes);
|
||||
LIBZMQ_DELETE (pipes);
|
||||
|
||||
if (count == 1) {
|
||||
zmq_assert (next.node);
|
||||
LIBZMQ_DELETE(next.node);
|
||||
}
|
||||
else if (count > 1) {
|
||||
LIBZMQ_DELETE (next.node);
|
||||
} else if (count > 1) {
|
||||
for (unsigned short i = 0; i != count; ++i) {
|
||||
LIBZMQ_DELETE(next.table[i]);
|
||||
LIBZMQ_DELETE (next.table[i]);
|
||||
}
|
||||
free (next.table);
|
||||
}
|
||||
@ -67,8 +62,9 @@ bool zmq::mtrie_t::add (unsigned char *prefix_, size_t size_, pipe_t *pipe_)
|
||||
return add_helper (prefix_, size_, pipe_);
|
||||
}
|
||||
|
||||
bool zmq::mtrie_t::add_helper (unsigned char *prefix_, size_t size_,
|
||||
pipe_t *pipe_)
|
||||
bool zmq::mtrie_t::add_helper (unsigned char *prefix_,
|
||||
size_t size_,
|
||||
pipe_t *pipe_)
|
||||
{
|
||||
// We are at the node corresponding to the prefix. We are done.
|
||||
if (!size_) {
|
||||
@ -83,49 +79,42 @@ bool zmq::mtrie_t::add_helper (unsigned char *prefix_, size_t size_,
|
||||
|
||||
unsigned char c = *prefix_;
|
||||
if (c < min || c >= min + count) {
|
||||
|
||||
// The character is out of range of currently handled
|
||||
// characters. We have to extend the table.
|
||||
if (!count) {
|
||||
min = c;
|
||||
count = 1;
|
||||
next.node = NULL;
|
||||
}
|
||||
else
|
||||
if (count == 1) {
|
||||
} else if (count == 1) {
|
||||
unsigned char oldc = min;
|
||||
mtrie_t *oldp = next.node;
|
||||
count = (min < c ? c - min : min - c) + 1;
|
||||
next.table = (mtrie_t**)
|
||||
malloc (sizeof (mtrie_t*) * count);
|
||||
next.table = (mtrie_t **) malloc (sizeof (mtrie_t *) * count);
|
||||
alloc_assert (next.table);
|
||||
for (unsigned short i = 0; i != count; ++i)
|
||||
next.table [i] = 0;
|
||||
next.table[i] = 0;
|
||||
min = std::min (min, c);
|
||||
next.table [oldc - min] = oldp;
|
||||
}
|
||||
else
|
||||
if (min < c) {
|
||||
next.table[oldc - min] = oldp;
|
||||
} else if (min < c) {
|
||||
// The new character is above the current character range.
|
||||
unsigned short old_count = count;
|
||||
count = c - min + 1;
|
||||
next.table = (mtrie_t**) realloc (next.table,
|
||||
sizeof (mtrie_t*) * count);
|
||||
next.table =
|
||||
(mtrie_t **) realloc (next.table, sizeof (mtrie_t *) * count);
|
||||
alloc_assert (next.table);
|
||||
for (unsigned short i = old_count; i != count; i++)
|
||||
next.table [i] = NULL;
|
||||
}
|
||||
else {
|
||||
next.table[i] = NULL;
|
||||
} else {
|
||||
// The new character is below the current character range.
|
||||
unsigned short old_count = count;
|
||||
count = (min + old_count) - c;
|
||||
next.table = (mtrie_t**) realloc (next.table,
|
||||
sizeof (mtrie_t*) * count);
|
||||
next.table =
|
||||
(mtrie_t **) realloc (next.table, sizeof (mtrie_t *) * count);
|
||||
alloc_assert (next.table);
|
||||
memmove (next.table + min - c, next.table,
|
||||
old_count * sizeof (mtrie_t*));
|
||||
old_count * sizeof (mtrie_t *));
|
||||
for (unsigned short i = 0; i != min - c; i++)
|
||||
next.table [i] = NULL;
|
||||
next.table[i] = NULL;
|
||||
min = c;
|
||||
}
|
||||
}
|
||||
@ -138,31 +127,38 @@ bool zmq::mtrie_t::add_helper (unsigned char *prefix_, size_t size_,
|
||||
++live_nodes;
|
||||
}
|
||||
return next.node->add_helper (prefix_ + 1, size_ - 1, pipe_);
|
||||
}
|
||||
else {
|
||||
if (!next.table [c - min]) {
|
||||
next.table [c - min] = new (std::nothrow) mtrie_t;
|
||||
alloc_assert (next.table [c - min]);
|
||||
} else {
|
||||
if (!next.table[c - min]) {
|
||||
next.table[c - min] = new (std::nothrow) mtrie_t;
|
||||
alloc_assert (next.table[c - min]);
|
||||
++live_nodes;
|
||||
}
|
||||
return next.table [c - min]->add_helper (prefix_ + 1, size_ - 1, pipe_);
|
||||
return next.table[c - min]->add_helper (prefix_ + 1, size_ - 1, pipe_);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void zmq::mtrie_t::rm (pipe_t *pipe_,
|
||||
void (*func_) (unsigned char *data_, size_t size_, void *arg_),
|
||||
void *arg_, bool call_on_uniq_)
|
||||
void (*func_) (unsigned char *data_,
|
||||
size_t size_,
|
||||
void *arg_),
|
||||
void *arg_,
|
||||
bool call_on_uniq_)
|
||||
{
|
||||
unsigned char *buff = NULL;
|
||||
rm_helper (pipe_, &buff, 0, 0, func_, arg_, call_on_uniq_);
|
||||
free (buff);
|
||||
}
|
||||
|
||||
void zmq::mtrie_t::rm_helper (pipe_t *pipe_, unsigned char **buff_,
|
||||
size_t buffsize_, size_t maxbuffsize_,
|
||||
void (*func_) (unsigned char *data_, size_t size_, void *arg_),
|
||||
void *arg_, bool call_on_uniq_)
|
||||
void zmq::mtrie_t::rm_helper (pipe_t *pipe_,
|
||||
unsigned char **buff_,
|
||||
size_t buffsize_,
|
||||
size_t maxbuffsize_,
|
||||
void (*func_) (unsigned char *data_,
|
||||
size_t size_,
|
||||
void *arg_),
|
||||
void *arg_,
|
||||
bool call_on_uniq_)
|
||||
{
|
||||
// Remove the subscription from this node.
|
||||
if (pipes && pipes->erase (pipe_)) {
|
||||
@ -171,14 +167,14 @@ void zmq::mtrie_t::rm_helper (pipe_t *pipe_, unsigned char **buff_,
|
||||
}
|
||||
|
||||
if (pipes->empty ()) {
|
||||
LIBZMQ_DELETE(pipes);
|
||||
LIBZMQ_DELETE (pipes);
|
||||
}
|
||||
}
|
||||
|
||||
// Adjust the buffer.
|
||||
if (buffsize_ >= maxbuffsize_) {
|
||||
maxbuffsize_ = buffsize_ + 256;
|
||||
*buff_ = (unsigned char*) realloc (*buff_, maxbuffsize_);
|
||||
*buff_ = (unsigned char *) realloc (*buff_, maxbuffsize_);
|
||||
alloc_assert (*buff_);
|
||||
}
|
||||
|
||||
@ -188,14 +184,14 @@ void zmq::mtrie_t::rm_helper (pipe_t *pipe_, unsigned char **buff_,
|
||||
|
||||
// If there's one subnode (optimisation).
|
||||
if (count == 1) {
|
||||
(*buff_) [buffsize_] = min;
|
||||
(*buff_)[buffsize_] = min;
|
||||
buffsize_++;
|
||||
next.node->rm_helper (pipe_, buff_, buffsize_, maxbuffsize_,
|
||||
func_, arg_, call_on_uniq_);
|
||||
next.node->rm_helper (pipe_, buff_, buffsize_, maxbuffsize_, func_,
|
||||
arg_, call_on_uniq_);
|
||||
|
||||
// Prune the node if it was made redundant by the removal
|
||||
if (next.node->is_redundant ()) {
|
||||
LIBZMQ_DELETE(next.node);
|
||||
LIBZMQ_DELETE (next.node);
|
||||
count = 0;
|
||||
--live_nodes;
|
||||
zmq_assert (live_nodes == 0);
|
||||
@ -210,19 +206,18 @@ void zmq::mtrie_t::rm_helper (pipe_t *pipe_, unsigned char **buff_,
|
||||
// New max non-null character in the node table after the removal
|
||||
unsigned char new_max = min;
|
||||
for (unsigned short c = 0; c != count; c++) {
|
||||
(*buff_) [buffsize_] = min + c;
|
||||
if (next.table [c]) {
|
||||
next.table [c]->rm_helper (pipe_, buff_, buffsize_ + 1,
|
||||
maxbuffsize_, func_, arg_, call_on_uniq_);
|
||||
(*buff_)[buffsize_] = min + c;
|
||||
if (next.table[c]) {
|
||||
next.table[c]->rm_helper (pipe_, buff_, buffsize_ + 1, maxbuffsize_,
|
||||
func_, arg_, call_on_uniq_);
|
||||
|
||||
// Prune redundant nodes from the mtrie
|
||||
if (next.table [c]->is_redundant ()) {
|
||||
LIBZMQ_DELETE(next.table[c]);
|
||||
if (next.table[c]->is_redundant ()) {
|
||||
LIBZMQ_DELETE (next.table[c]);
|
||||
|
||||
zmq_assert (live_nodes > 0);
|
||||
--live_nodes;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
// The node is not redundant, so it's a candidate for being
|
||||
// the new min/max node.
|
||||
//
|
||||
@ -247,22 +242,19 @@ void zmq::mtrie_t::rm_helper (pipe_t *pipe_, unsigned char **buff_,
|
||||
count = 0;
|
||||
}
|
||||
// Compact the node table if possible
|
||||
else
|
||||
if (live_nodes == 1) {
|
||||
else if (live_nodes == 1) {
|
||||
// If there's only one live node in the table we can
|
||||
// switch to using the more compact single-node
|
||||
// representation
|
||||
zmq_assert (new_min == new_max);
|
||||
zmq_assert (new_min >= min && new_min < min + count);
|
||||
mtrie_t *node = next.table [new_min - min];
|
||||
mtrie_t *node = next.table[new_min - min];
|
||||
zmq_assert (node);
|
||||
free (next.table);
|
||||
next.node = node;
|
||||
count = 1;
|
||||
min = new_min;
|
||||
}
|
||||
else
|
||||
if (new_min > min || new_max < min + count - 1) {
|
||||
} else if (new_min > min || new_max < min + count - 1) {
|
||||
zmq_assert (new_max - new_min + 1 > 1);
|
||||
|
||||
mtrie_t **old_table = next.table;
|
||||
@ -272,11 +264,11 @@ void zmq::mtrie_t::rm_helper (pipe_t *pipe_, unsigned char **buff_,
|
||||
zmq_assert (new_max - new_min + 1 < count);
|
||||
|
||||
count = new_max - new_min + 1;
|
||||
next.table = (mtrie_t**) malloc (sizeof (mtrie_t*) * count);
|
||||
next.table = (mtrie_t **) malloc (sizeof (mtrie_t *) * count);
|
||||
alloc_assert (next.table);
|
||||
|
||||
memmove (next.table, old_table + (new_min - min),
|
||||
sizeof (mtrie_t*) * count);
|
||||
sizeof (mtrie_t *) * count);
|
||||
free (old_table);
|
||||
|
||||
min = new_min;
|
||||
@ -288,15 +280,16 @@ bool zmq::mtrie_t::rm (unsigned char *prefix_, size_t size_, pipe_t *pipe_)
|
||||
return rm_helper (prefix_, size_, pipe_);
|
||||
}
|
||||
|
||||
bool zmq::mtrie_t::rm_helper (unsigned char *prefix_, size_t size_,
|
||||
pipe_t *pipe_)
|
||||
bool zmq::mtrie_t::rm_helper (unsigned char *prefix_,
|
||||
size_t size_,
|
||||
pipe_t *pipe_)
|
||||
{
|
||||
if (!size_) {
|
||||
if (pipes) {
|
||||
pipes_t::size_type erased = pipes->erase (pipe_);
|
||||
zmq_assert (erased == 1);
|
||||
if (pipes->empty ()) {
|
||||
LIBZMQ_DELETE(pipes);
|
||||
LIBZMQ_DELETE (pipes);
|
||||
}
|
||||
}
|
||||
return !pipes;
|
||||
@ -306,8 +299,7 @@ bool zmq::mtrie_t::rm_helper (unsigned char *prefix_, size_t size_,
|
||||
if (!count || c < min || c >= min + count)
|
||||
return false;
|
||||
|
||||
mtrie_t *next_node =
|
||||
count == 1 ? next.node : next.table [c - min];
|
||||
mtrie_t *next_node = count == 1 ? next.node : next.table[c - min];
|
||||
|
||||
if (!next_node)
|
||||
return false;
|
||||
@ -315,7 +307,7 @@ bool zmq::mtrie_t::rm_helper (unsigned char *prefix_, size_t size_,
|
||||
bool ret = next_node->rm_helper (prefix_ + 1, size_ - 1, pipe_);
|
||||
|
||||
if (next_node->is_redundant ()) {
|
||||
LIBZMQ_DELETE(next_node);
|
||||
LIBZMQ_DELETE (next_node);
|
||||
zmq_assert (count > 0);
|
||||
|
||||
if (count == 1) {
|
||||
@ -323,9 +315,8 @@ bool zmq::mtrie_t::rm_helper (unsigned char *prefix_, size_t size_,
|
||||
count = 0;
|
||||
--live_nodes;
|
||||
zmq_assert (live_nodes == 0);
|
||||
}
|
||||
else {
|
||||
next.table [c - min] = 0;
|
||||
} else {
|
||||
next.table[c - min] = 0;
|
||||
zmq_assert (live_nodes > 1);
|
||||
--live_nodes;
|
||||
|
||||
@ -336,47 +327,43 @@ bool zmq::mtrie_t::rm_helper (unsigned char *prefix_, size_t size_,
|
||||
// representation
|
||||
unsigned short i;
|
||||
for (i = 0; i < count; ++i)
|
||||
if (next.table [i])
|
||||
if (next.table[i])
|
||||
break;
|
||||
|
||||
zmq_assert (i < count);
|
||||
min += i;
|
||||
count = 1;
|
||||
mtrie_t *oldp = next.table [i];
|
||||
mtrie_t *oldp = next.table[i];
|
||||
free (next.table);
|
||||
next.node = oldp;
|
||||
}
|
||||
else
|
||||
if (c == min) {
|
||||
} else if (c == min) {
|
||||
// We can compact the table "from the left"
|
||||
unsigned short i;
|
||||
for (i = 1; i < count; ++i)
|
||||
if (next.table [i])
|
||||
if (next.table[i])
|
||||
break;
|
||||
|
||||
zmq_assert (i < count);
|
||||
min += i;
|
||||
count -= i;
|
||||
mtrie_t **old_table = next.table;
|
||||
next.table = (mtrie_t**) malloc (sizeof (mtrie_t*) * count);
|
||||
next.table = (mtrie_t **) malloc (sizeof (mtrie_t *) * count);
|
||||
alloc_assert (next.table);
|
||||
memmove (next.table, old_table + i, sizeof (mtrie_t*) * count);
|
||||
memmove (next.table, old_table + i, sizeof (mtrie_t *) * count);
|
||||
free (old_table);
|
||||
}
|
||||
else
|
||||
if (c == min + count - 1) {
|
||||
} else if (c == min + count - 1) {
|
||||
// We can compact the table "from the right"
|
||||
unsigned short i;
|
||||
for (i = 1; i < count; ++i)
|
||||
if (next.table [count - 1 - i])
|
||||
if (next.table[count - 1 - i])
|
||||
break;
|
||||
|
||||
zmq_assert (i < count);
|
||||
count -= i;
|
||||
mtrie_t **old_table = next.table;
|
||||
next.table = (mtrie_t**) malloc (sizeof (mtrie_t*) * count);
|
||||
next.table = (mtrie_t **) malloc (sizeof (mtrie_t *) * count);
|
||||
alloc_assert (next.table);
|
||||
memmove (next.table, old_table, sizeof (mtrie_t*) * count);
|
||||
memmove (next.table, old_table, sizeof (mtrie_t *) * count);
|
||||
free (old_table);
|
||||
}
|
||||
}
|
||||
@ -385,16 +372,17 @@ bool zmq::mtrie_t::rm_helper (unsigned char *prefix_, size_t size_,
|
||||
return ret;
|
||||
}
|
||||
|
||||
void zmq::mtrie_t::match (unsigned char *data_, size_t size_,
|
||||
void (*func_) (pipe_t *pipe_, void *arg_), void *arg_)
|
||||
void zmq::mtrie_t::match (unsigned char *data_,
|
||||
size_t size_,
|
||||
void (*func_) (pipe_t *pipe_, void *arg_),
|
||||
void *arg_)
|
||||
{
|
||||
mtrie_t *current = this;
|
||||
while (true) {
|
||||
|
||||
// Signal the pipes attached to this node.
|
||||
if (current->pipes) {
|
||||
for (pipes_t::iterator it = current->pipes->begin ();
|
||||
it != current->pipes->end (); ++it)
|
||||
it != current->pipes->end (); ++it)
|
||||
func_ (*it, arg_);
|
||||
}
|
||||
|
||||
@ -408,7 +396,7 @@ void zmq::mtrie_t::match (unsigned char *data_, size_t size_,
|
||||
|
||||
// If there's one subnode (optimisation).
|
||||
if (current->count == 1) {
|
||||
if (data_ [0] != current->min)
|
||||
if (data_[0] != current->min)
|
||||
break;
|
||||
current = current->next.node;
|
||||
data_++;
|
||||
@ -417,12 +405,12 @@ void zmq::mtrie_t::match (unsigned char *data_, size_t size_,
|
||||
}
|
||||
|
||||
// If there are multiple subnodes.
|
||||
if (data_ [0] < current->min || data_ [0] >=
|
||||
current->min + current->count)
|
||||
if (data_[0] < current->min
|
||||
|| data_[0] >= current->min + current->count)
|
||||
break;
|
||||
if (!current->next.table [data_ [0] - current->min])
|
||||
if (!current->next.table[data_[0] - current->min])
|
||||
break;
|
||||
current = current->next.table [data_ [0] - current->min];
|
||||
current = current->next.table[data_[0] - current->min];
|
||||
data_++;
|
||||
size_--;
|
||||
}
|
||||
|
111
src/mtrie.hpp
111
src/mtrie.hpp
@ -37,66 +37,67 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
class pipe_t;
|
||||
|
||||
class pipe_t;
|
||||
// Multi-trie. Each node in the trie is a set of pointers to pipes.
|
||||
|
||||
// Multi-trie. Each node in the trie is a set of pointers to pipes.
|
||||
class mtrie_t
|
||||
{
|
||||
public:
|
||||
mtrie_t ();
|
||||
~mtrie_t ();
|
||||
|
||||
class mtrie_t
|
||||
// Add key to the trie. Returns true if it's a new subscription
|
||||
// rather than a duplicate.
|
||||
bool add (unsigned char *prefix_, size_t size_, zmq::pipe_t *pipe_);
|
||||
|
||||
// Remove all subscriptions for a specific peer from the trie.
|
||||
// The call_on_uniq_ flag controls if the callback is invoked
|
||||
// when there are no subscriptions left on some topics or on
|
||||
// every removal.
|
||||
void rm (zmq::pipe_t *pipe_,
|
||||
void (*func_) (unsigned char *data_, size_t size_, void *arg_),
|
||||
void *arg_,
|
||||
bool call_on_uniq_);
|
||||
|
||||
// Remove specific subscription from the trie. Return true is it was
|
||||
// actually removed rather than de-duplicated.
|
||||
bool rm (unsigned char *prefix_, size_t size_, zmq::pipe_t *pipe_);
|
||||
|
||||
// Signal all the matching pipes.
|
||||
void match (unsigned char *data_,
|
||||
size_t size_,
|
||||
void (*func_) (zmq::pipe_t *pipe_, void *arg_),
|
||||
void *arg_);
|
||||
|
||||
private:
|
||||
bool add_helper (unsigned char *prefix_, size_t size_, zmq::pipe_t *pipe_);
|
||||
void
|
||||
rm_helper (zmq::pipe_t *pipe_,
|
||||
unsigned char **buff_,
|
||||
size_t buffsize_,
|
||||
size_t maxbuffsize_,
|
||||
void (*func_) (unsigned char *data_, size_t size_, void *arg_),
|
||||
void *arg_,
|
||||
bool call_on_uniq_);
|
||||
bool rm_helper (unsigned char *prefix_, size_t size_, zmq::pipe_t *pipe_);
|
||||
bool is_redundant () const;
|
||||
|
||||
typedef std::set<zmq::pipe_t *> pipes_t;
|
||||
pipes_t *pipes;
|
||||
|
||||
unsigned char min;
|
||||
unsigned short count;
|
||||
unsigned short live_nodes;
|
||||
union
|
||||
{
|
||||
public:
|
||||
|
||||
mtrie_t ();
|
||||
~mtrie_t ();
|
||||
|
||||
// Add key to the trie. Returns true if it's a new subscription
|
||||
// rather than a duplicate.
|
||||
bool add (unsigned char *prefix_, size_t size_, zmq::pipe_t *pipe_);
|
||||
|
||||
// Remove all subscriptions for a specific peer from the trie.
|
||||
// The call_on_uniq_ flag controls if the callback is invoked
|
||||
// when there are no subscriptions left on some topics or on
|
||||
// every removal.
|
||||
void rm (zmq::pipe_t *pipe_,
|
||||
void (*func_) (unsigned char *data_, size_t size_, void *arg_),
|
||||
void *arg_, bool call_on_uniq_);
|
||||
|
||||
// Remove specific subscription from the trie. Return true is it was
|
||||
// actually removed rather than de-duplicated.
|
||||
bool rm (unsigned char *prefix_, size_t size_, zmq::pipe_t *pipe_);
|
||||
|
||||
// Signal all the matching pipes.
|
||||
void match (unsigned char *data_, size_t size_,
|
||||
void (*func_) (zmq::pipe_t *pipe_, void *arg_), void *arg_);
|
||||
|
||||
private:
|
||||
|
||||
bool add_helper (unsigned char *prefix_, size_t size_,
|
||||
zmq::pipe_t *pipe_);
|
||||
void rm_helper (zmq::pipe_t *pipe_, unsigned char **buff_,
|
||||
size_t buffsize_, size_t maxbuffsize_,
|
||||
void (*func_) (unsigned char *data_, size_t size_, void *arg_),
|
||||
void *arg_, bool call_on_uniq_);
|
||||
bool rm_helper (unsigned char *prefix_, size_t size_,
|
||||
zmq::pipe_t *pipe_);
|
||||
bool is_redundant () const;
|
||||
|
||||
typedef std::set <zmq::pipe_t*> pipes_t;
|
||||
pipes_t *pipes;
|
||||
|
||||
unsigned char min;
|
||||
unsigned short count;
|
||||
unsigned short live_nodes;
|
||||
union {
|
||||
class mtrie_t *node;
|
||||
class mtrie_t **table;
|
||||
} next;
|
||||
|
||||
mtrie_t (const mtrie_t&);
|
||||
const mtrie_t &operator = (const mtrie_t&);
|
||||
};
|
||||
class mtrie_t *node;
|
||||
class mtrie_t **table;
|
||||
} next;
|
||||
|
||||
mtrie_t (const mtrie_t &);
|
||||
const mtrie_t &operator= (const mtrie_t &);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
224
src/mutex.hpp
224
src/mutex.hpp
@ -40,49 +40,31 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
class mutex_t
|
||||
{
|
||||
public:
|
||||
inline mutex_t () { InitializeCriticalSection (&cs); }
|
||||
|
||||
class mutex_t
|
||||
inline ~mutex_t () { DeleteCriticalSection (&cs); }
|
||||
|
||||
inline void lock () { EnterCriticalSection (&cs); }
|
||||
|
||||
inline bool try_lock ()
|
||||
{
|
||||
public:
|
||||
inline mutex_t ()
|
||||
{
|
||||
InitializeCriticalSection (&cs);
|
||||
}
|
||||
return (TryEnterCriticalSection (&cs)) ? true : false;
|
||||
}
|
||||
|
||||
inline ~mutex_t ()
|
||||
{
|
||||
DeleteCriticalSection (&cs);
|
||||
}
|
||||
inline void unlock () { LeaveCriticalSection (&cs); }
|
||||
|
||||
inline void lock ()
|
||||
{
|
||||
EnterCriticalSection (&cs);
|
||||
}
|
||||
inline CRITICAL_SECTION *get_cs () { return &cs; }
|
||||
|
||||
inline bool try_lock ()
|
||||
{
|
||||
return (TryEnterCriticalSection (&cs)) ? true : false;
|
||||
}
|
||||
|
||||
inline void unlock ()
|
||||
{
|
||||
LeaveCriticalSection (&cs);
|
||||
}
|
||||
|
||||
inline CRITICAL_SECTION* get_cs()
|
||||
{
|
||||
return &cs;
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
CRITICAL_SECTION cs;
|
||||
|
||||
// Disable copy construction and assignment.
|
||||
mutex_t (const mutex_t&);
|
||||
void operator = (const mutex_t&);
|
||||
};
|
||||
private:
|
||||
CRITICAL_SECTION cs;
|
||||
|
||||
// Disable copy construction and assignment.
|
||||
mutex_t (const mutex_t &);
|
||||
void operator= (const mutex_t &);
|
||||
};
|
||||
}
|
||||
|
||||
#else
|
||||
@ -91,68 +73,62 @@ namespace zmq
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
|
||||
class mutex_t
|
||||
class mutex_t
|
||||
{
|
||||
public:
|
||||
inline mutex_t ()
|
||||
{
|
||||
public:
|
||||
inline mutex_t ()
|
||||
{
|
||||
int rc = pthread_mutexattr_init(&attr);
|
||||
posix_assert (rc);
|
||||
int rc = pthread_mutexattr_init (&attr);
|
||||
posix_assert (rc);
|
||||
|
||||
rc = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
|
||||
posix_assert (rc);
|
||||
rc = pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_RECURSIVE);
|
||||
posix_assert (rc);
|
||||
|
||||
rc = pthread_mutex_init (&mutex, &attr);
|
||||
posix_assert (rc);
|
||||
}
|
||||
rc = pthread_mutex_init (&mutex, &attr);
|
||||
posix_assert (rc);
|
||||
}
|
||||
|
||||
inline ~mutex_t ()
|
||||
{
|
||||
int rc = pthread_mutex_destroy (&mutex);
|
||||
posix_assert (rc);
|
||||
inline ~mutex_t ()
|
||||
{
|
||||
int rc = pthread_mutex_destroy (&mutex);
|
||||
posix_assert (rc);
|
||||
|
||||
rc = pthread_mutexattr_destroy (&attr);
|
||||
posix_assert (rc);
|
||||
}
|
||||
rc = pthread_mutexattr_destroy (&attr);
|
||||
posix_assert (rc);
|
||||
}
|
||||
|
||||
inline void lock ()
|
||||
{
|
||||
int rc = pthread_mutex_lock (&mutex);
|
||||
posix_assert (rc);
|
||||
}
|
||||
inline void lock ()
|
||||
{
|
||||
int rc = pthread_mutex_lock (&mutex);
|
||||
posix_assert (rc);
|
||||
}
|
||||
|
||||
inline bool try_lock ()
|
||||
{
|
||||
int rc = pthread_mutex_trylock (&mutex);
|
||||
if (rc == EBUSY)
|
||||
return false;
|
||||
inline bool try_lock ()
|
||||
{
|
||||
int rc = pthread_mutex_trylock (&mutex);
|
||||
if (rc == EBUSY)
|
||||
return false;
|
||||
|
||||
posix_assert (rc);
|
||||
return true;
|
||||
}
|
||||
posix_assert (rc);
|
||||
return true;
|
||||
}
|
||||
|
||||
inline void unlock ()
|
||||
{
|
||||
int rc = pthread_mutex_unlock (&mutex);
|
||||
posix_assert (rc);
|
||||
}
|
||||
inline void unlock ()
|
||||
{
|
||||
int rc = pthread_mutex_unlock (&mutex);
|
||||
posix_assert (rc);
|
||||
}
|
||||
|
||||
inline pthread_mutex_t* get_mutex()
|
||||
{
|
||||
return &mutex;
|
||||
}
|
||||
inline pthread_mutex_t *get_mutex () { return &mutex; }
|
||||
|
||||
private:
|
||||
|
||||
pthread_mutex_t mutex;
|
||||
pthread_mutexattr_t attr;
|
||||
|
||||
// Disable copy construction and assignment.
|
||||
mutex_t (const mutex_t&);
|
||||
const mutex_t &operator = (const mutex_t&);
|
||||
};
|
||||
private:
|
||||
pthread_mutex_t mutex;
|
||||
pthread_mutexattr_t attr;
|
||||
|
||||
// Disable copy construction and assignment.
|
||||
mutex_t (const mutex_t &);
|
||||
const mutex_t &operator= (const mutex_t &);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
@ -160,54 +136,42 @@ namespace zmq
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
struct scoped_lock_t
|
||||
struct scoped_lock_t
|
||||
{
|
||||
scoped_lock_t (mutex_t &mutex_) : mutex (mutex_) { mutex.lock (); }
|
||||
|
||||
~scoped_lock_t () { mutex.unlock (); }
|
||||
|
||||
private:
|
||||
mutex_t &mutex;
|
||||
|
||||
// Disable copy construction and assignment.
|
||||
scoped_lock_t (const scoped_lock_t &);
|
||||
const scoped_lock_t &operator= (const scoped_lock_t &);
|
||||
};
|
||||
|
||||
|
||||
struct scoped_optional_lock_t
|
||||
{
|
||||
scoped_optional_lock_t (mutex_t *mutex_) : mutex (mutex_)
|
||||
{
|
||||
scoped_lock_t (mutex_t& mutex_)
|
||||
: mutex (mutex_)
|
||||
{
|
||||
mutex.lock ();
|
||||
}
|
||||
if (mutex != NULL)
|
||||
mutex->lock ();
|
||||
}
|
||||
|
||||
~scoped_lock_t ()
|
||||
{
|
||||
mutex.unlock ();
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
mutex_t& mutex;
|
||||
|
||||
// Disable copy construction and assignment.
|
||||
scoped_lock_t (const scoped_lock_t&);
|
||||
const scoped_lock_t &operator = (const scoped_lock_t&);
|
||||
};
|
||||
|
||||
|
||||
struct scoped_optional_lock_t
|
||||
~scoped_optional_lock_t ()
|
||||
{
|
||||
scoped_optional_lock_t (mutex_t* mutex_)
|
||||
: mutex (mutex_)
|
||||
{
|
||||
if(mutex != NULL)
|
||||
mutex->lock ();
|
||||
}
|
||||
|
||||
~scoped_optional_lock_t ()
|
||||
{
|
||||
if(mutex != NULL)
|
||||
mutex->unlock ();
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
mutex_t* mutex;
|
||||
|
||||
// Disable copy construction and assignment.
|
||||
scoped_optional_lock_t (const scoped_lock_t&);
|
||||
const scoped_optional_lock_t &operator = (const scoped_lock_t&);
|
||||
};
|
||||
if (mutex != NULL)
|
||||
mutex->unlock ();
|
||||
}
|
||||
|
||||
private:
|
||||
mutex_t *mutex;
|
||||
|
||||
// Disable copy construction and assignment.
|
||||
scoped_optional_lock_t (const scoped_lock_t &);
|
||||
const scoped_optional_lock_t &operator= (const scoped_lock_t &);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -9,88 +9,90 @@
|
||||
#include "session_base.hpp"
|
||||
#include "v2_protocol.hpp"
|
||||
|
||||
zmq::norm_engine_t::norm_engine_t(io_thread_t* parent_,
|
||||
const options_t& options_)
|
||||
: io_object_t(parent_), zmq_session(NULL), options(options_),
|
||||
norm_instance(NORM_INSTANCE_INVALID), norm_session(NORM_SESSION_INVALID),
|
||||
is_sender(false), is_receiver(false),
|
||||
zmq_encoder(0), norm_tx_stream(NORM_OBJECT_INVALID),
|
||||
tx_first_msg(true), tx_more_bit(false),
|
||||
zmq_output_ready(false), norm_tx_ready(false),
|
||||
tx_index(0), tx_len(0),
|
||||
zmq_input_ready(false)
|
||||
zmq::norm_engine_t::norm_engine_t (io_thread_t *parent_,
|
||||
const options_t &options_) :
|
||||
io_object_t (parent_),
|
||||
zmq_session (NULL),
|
||||
options (options_),
|
||||
norm_instance (NORM_INSTANCE_INVALID),
|
||||
norm_session (NORM_SESSION_INVALID),
|
||||
is_sender (false),
|
||||
is_receiver (false),
|
||||
zmq_encoder (0),
|
||||
norm_tx_stream (NORM_OBJECT_INVALID),
|
||||
tx_first_msg (true),
|
||||
tx_more_bit (false),
|
||||
zmq_output_ready (false),
|
||||
norm_tx_ready (false),
|
||||
tx_index (0),
|
||||
tx_len (0),
|
||||
zmq_input_ready (false)
|
||||
{
|
||||
int rc = tx_msg.init();
|
||||
errno_assert(0 == rc);
|
||||
int rc = tx_msg.init ();
|
||||
errno_assert (0 == rc);
|
||||
}
|
||||
|
||||
zmq::norm_engine_t::~norm_engine_t()
|
||||
zmq::norm_engine_t::~norm_engine_t ()
|
||||
{
|
||||
shutdown(); // in case it was not already called
|
||||
shutdown (); // in case it was not already called
|
||||
}
|
||||
|
||||
|
||||
int zmq::norm_engine_t::init(const char* network_, bool send, bool recv)
|
||||
int zmq::norm_engine_t::init (const char *network_, bool send, bool recv)
|
||||
{
|
||||
// Parse the "network_" address int "iface", "addr", and "port"
|
||||
// norm endpoint format: [id,][<iface>;]<addr>:<port>
|
||||
// First, look for optional local NormNodeId
|
||||
// (default NORM_NODE_ANY causes NORM to use host IP addr for NormNodeId)
|
||||
NormNodeId localId = NORM_NODE_ANY;
|
||||
const char* ifacePtr = strchr(network_, ',');
|
||||
if (NULL != ifacePtr)
|
||||
{
|
||||
const char *ifacePtr = strchr (network_, ',');
|
||||
if (NULL != ifacePtr) {
|
||||
size_t idLen = ifacePtr - network_;
|
||||
if (idLen > 31) idLen = 31;
|
||||
if (idLen > 31)
|
||||
idLen = 31;
|
||||
char idText[32];
|
||||
strncpy(idText, network_, idLen);
|
||||
strncpy (idText, network_, idLen);
|
||||
idText[idLen] = '\0';
|
||||
localId = (NormNodeId)atoi(idText);
|
||||
localId = (NormNodeId) atoi (idText);
|
||||
ifacePtr++;
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
ifacePtr = network_;
|
||||
}
|
||||
|
||||
// Second, look for optional multicast ifaceName
|
||||
char ifaceName[256];
|
||||
const char* addrPtr = strchr(ifacePtr, ';');
|
||||
if (NULL != addrPtr)
|
||||
{
|
||||
const char *addrPtr = strchr (ifacePtr, ';');
|
||||
if (NULL != addrPtr) {
|
||||
size_t ifaceLen = addrPtr - ifacePtr;
|
||||
if (ifaceLen > 255) ifaceLen = 255; // return error instead?
|
||||
strncpy(ifaceName, ifacePtr, ifaceLen);
|
||||
if (ifaceLen > 255)
|
||||
ifaceLen = 255; // return error instead?
|
||||
strncpy (ifaceName, ifacePtr, ifaceLen);
|
||||
ifaceName[ifaceLen] = '\0';
|
||||
ifacePtr = ifaceName;
|
||||
addrPtr++;
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
addrPtr = ifacePtr;
|
||||
ifacePtr = NULL;
|
||||
}
|
||||
|
||||
// Finally, parse IP address and port number
|
||||
const char* portPtr = strrchr(addrPtr, ':');
|
||||
if (NULL == portPtr)
|
||||
{
|
||||
const char *portPtr = strrchr (addrPtr, ':');
|
||||
if (NULL == portPtr) {
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
char addr[256];
|
||||
size_t addrLen = portPtr - addrPtr;
|
||||
if (addrLen > 255) addrLen = 255;
|
||||
strncpy(addr, addrPtr, addrLen);
|
||||
if (addrLen > 255)
|
||||
addrLen = 255;
|
||||
strncpy (addr, addrPtr, addrLen);
|
||||
addr[addrLen] = '\0';
|
||||
portPtr++;
|
||||
unsigned short portNumber = atoi(portPtr);
|
||||
unsigned short portNumber = atoi (portPtr);
|
||||
|
||||
if (NORM_INSTANCE_INVALID == norm_instance)
|
||||
{
|
||||
if (NORM_INSTANCE_INVALID == (norm_instance = NormCreateInstance()))
|
||||
{
|
||||
if (NORM_INSTANCE_INVALID == norm_instance) {
|
||||
if (NORM_INSTANCE_INVALID == (norm_instance = NormCreateInstance ())) {
|
||||
// errno set by whatever caused NormCreateInstance() to fail
|
||||
return -1;
|
||||
}
|
||||
@ -103,46 +105,44 @@ int zmq::norm_engine_t::init(const char* network_, bool send, bool recv)
|
||||
// c) Randomize and implement a NORM session layer
|
||||
// conflict detection/resolution protocol
|
||||
|
||||
norm_session = NormCreateSession(norm_instance, addr, portNumber, localId);
|
||||
if (NORM_SESSION_INVALID == norm_session)
|
||||
{
|
||||
norm_session = NormCreateSession (norm_instance, addr, portNumber, localId);
|
||||
if (NORM_SESSION_INVALID == norm_session) {
|
||||
int savedErrno = errno;
|
||||
NormDestroyInstance(norm_instance);
|
||||
NormDestroyInstance (norm_instance);
|
||||
norm_instance = NORM_INSTANCE_INVALID;
|
||||
errno = savedErrno;
|
||||
return -1;
|
||||
}
|
||||
// There's many other useful NORM options that could be applied here
|
||||
if (NormIsUnicastAddress(addr))
|
||||
{
|
||||
NormSetDefaultUnicastNack(norm_session, true);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (NormIsUnicastAddress (addr)) {
|
||||
NormSetDefaultUnicastNack (norm_session, true);
|
||||
} else {
|
||||
// These only apply for multicast sessions
|
||||
//NormSetTTL(norm_session, options.multicast_hops); // ZMQ default is 1
|
||||
NormSetTTL(norm_session, 255); // since the ZMQ_MULTICAST_HOPS socket option isn't well-supported
|
||||
NormSetRxPortReuse(norm_session, true); // port reuse doesn't work for non-connected unicast
|
||||
NormSetLoopback(norm_session, true); // needed when multicast users on same machine
|
||||
if (NULL != ifacePtr)
|
||||
{
|
||||
NormSetTTL (
|
||||
norm_session,
|
||||
255); // since the ZMQ_MULTICAST_HOPS socket option isn't well-supported
|
||||
NormSetRxPortReuse (
|
||||
norm_session,
|
||||
true); // port reuse doesn't work for non-connected unicast
|
||||
NormSetLoopback (norm_session,
|
||||
true); // needed when multicast users on same machine
|
||||
if (NULL != ifacePtr) {
|
||||
// Note a bad interface may not be caught until sender or receiver start
|
||||
// (Since sender/receiver is not yet started, this always succeeds here)
|
||||
NormSetMulticastInterface(norm_session, ifacePtr);
|
||||
NormSetMulticastInterface (norm_session, ifacePtr);
|
||||
}
|
||||
}
|
||||
|
||||
if (recv)
|
||||
{
|
||||
if (recv) {
|
||||
// The alternative NORM_SYNC_CURRENT here would provide "instant"
|
||||
// receiver sync to the sender's _current_ message transmission.
|
||||
// NORM_SYNC_STREAM tries to get everything the sender has cached/buffered
|
||||
NormSetDefaultSyncPolicy(norm_session, NORM_SYNC_STREAM);
|
||||
if (!NormStartReceiver(norm_session, 2*1024*1024))
|
||||
{
|
||||
NormSetDefaultSyncPolicy (norm_session, NORM_SYNC_STREAM);
|
||||
if (!NormStartReceiver (norm_session, 2 * 1024 * 1024)) {
|
||||
// errno set by whatever failed
|
||||
int savedErrno = errno;
|
||||
NormDestroyInstance(norm_instance); // session gets closed, too
|
||||
NormDestroyInstance (norm_instance); // session gets closed, too
|
||||
norm_session = NORM_SESSION_INVALID;
|
||||
norm_instance = NORM_INSTANCE_INVALID;
|
||||
errno = savedErrno;
|
||||
@ -151,29 +151,29 @@ int zmq::norm_engine_t::init(const char* network_, bool send, bool recv)
|
||||
is_receiver = true;
|
||||
}
|
||||
|
||||
if (send)
|
||||
{
|
||||
if (send) {
|
||||
// Pick a random sender instance id (aka norm sender session id)
|
||||
NormSessionId instanceId = NormGetRandomSessionId();
|
||||
NormSessionId instanceId = NormGetRandomSessionId ();
|
||||
// TBD - provide "options" for some NORM sender parameters
|
||||
if (!NormStartSender(norm_session, instanceId, 2*1024*1024, 1400, 16, 4))
|
||||
{
|
||||
if (!NormStartSender (norm_session, instanceId, 2 * 1024 * 1024, 1400,
|
||||
16, 4)) {
|
||||
// errno set by whatever failed
|
||||
int savedErrno = errno;
|
||||
NormDestroyInstance(norm_instance); // session gets closed, too
|
||||
NormDestroyInstance (norm_instance); // session gets closed, too
|
||||
norm_session = NORM_SESSION_INVALID;
|
||||
norm_instance = NORM_INSTANCE_INVALID;
|
||||
errno = savedErrno;
|
||||
return -1;
|
||||
}
|
||||
NormSetCongestionControl(norm_session, true);
|
||||
NormSetCongestionControl (norm_session, true);
|
||||
norm_tx_ready = true;
|
||||
is_sender = true;
|
||||
if (NORM_OBJECT_INVALID == (norm_tx_stream = NormStreamOpen(norm_session, 2*1024*1024)))
|
||||
{
|
||||
if (NORM_OBJECT_INVALID
|
||||
== (norm_tx_stream =
|
||||
NormStreamOpen (norm_session, 2 * 1024 * 1024))) {
|
||||
// errno set by whatever failed
|
||||
int savedErrno = errno;
|
||||
NormDestroyInstance(norm_instance); // session gets closed, too
|
||||
NormDestroyInstance (norm_instance); // session gets closed, too
|
||||
norm_session = NORM_SESSION_INVALID;
|
||||
norm_instance = NORM_INSTANCE_INVALID;
|
||||
errno = savedErrno;
|
||||
@ -185,100 +185,95 @@ int zmq::norm_engine_t::init(const char* network_, bool send, bool recv)
|
||||
//NormSetDebugLevel(3);
|
||||
//NormOpenDebugLog(norm_instance, "normLog.txt");
|
||||
|
||||
return 0; // no error
|
||||
} // end zmq::norm_engine_t::init()
|
||||
return 0; // no error
|
||||
} // end zmq::norm_engine_t::init()
|
||||
|
||||
void zmq::norm_engine_t::shutdown()
|
||||
void zmq::norm_engine_t::shutdown ()
|
||||
{
|
||||
// TBD - implement a more graceful shutdown option
|
||||
if (is_receiver)
|
||||
{
|
||||
NormStopReceiver(norm_session);
|
||||
if (is_receiver) {
|
||||
NormStopReceiver (norm_session);
|
||||
|
||||
// delete any active NormRxStreamState
|
||||
rx_pending_list.Destroy();
|
||||
rx_ready_list.Destroy();
|
||||
msg_ready_list.Destroy();
|
||||
rx_pending_list.Destroy ();
|
||||
rx_ready_list.Destroy ();
|
||||
msg_ready_list.Destroy ();
|
||||
|
||||
is_receiver = false;
|
||||
}
|
||||
if (is_sender)
|
||||
{
|
||||
NormStopSender(norm_session);
|
||||
if (is_sender) {
|
||||
NormStopSender (norm_session);
|
||||
is_sender = false;
|
||||
}
|
||||
if (NORM_SESSION_INVALID != norm_session)
|
||||
{
|
||||
NormDestroySession(norm_session);
|
||||
if (NORM_SESSION_INVALID != norm_session) {
|
||||
NormDestroySession (norm_session);
|
||||
norm_session = NORM_SESSION_INVALID;
|
||||
}
|
||||
if (NORM_INSTANCE_INVALID != norm_instance)
|
||||
{
|
||||
NormStopInstance(norm_instance);
|
||||
NormDestroyInstance(norm_instance);
|
||||
if (NORM_INSTANCE_INVALID != norm_instance) {
|
||||
NormStopInstance (norm_instance);
|
||||
NormDestroyInstance (norm_instance);
|
||||
norm_instance = NORM_INSTANCE_INVALID;
|
||||
}
|
||||
} // end zmq::norm_engine_t::shutdown()
|
||||
} // end zmq::norm_engine_t::shutdown()
|
||||
|
||||
void zmq::norm_engine_t::plug (io_thread_t* io_thread_, session_base_t *session_)
|
||||
void zmq::norm_engine_t::plug (io_thread_t *io_thread_,
|
||||
session_base_t *session_)
|
||||
{
|
||||
// TBD - we may assign the NORM engine to an io_thread in the future???
|
||||
zmq_session = session_;
|
||||
if (is_sender) zmq_output_ready = true;
|
||||
if (is_receiver) zmq_input_ready = true;
|
||||
if (is_sender)
|
||||
zmq_output_ready = true;
|
||||
if (is_receiver)
|
||||
zmq_input_ready = true;
|
||||
|
||||
fd_t normDescriptor = NormGetDescriptor(norm_instance);
|
||||
norm_descriptor_handle = add_fd(normDescriptor);
|
||||
fd_t normDescriptor = NormGetDescriptor (norm_instance);
|
||||
norm_descriptor_handle = add_fd (normDescriptor);
|
||||
// Set POLLIN for notification of pending NormEvents
|
||||
set_pollin(norm_descriptor_handle);
|
||||
set_pollin (norm_descriptor_handle);
|
||||
|
||||
if (is_sender) send_data();
|
||||
if (is_sender)
|
||||
send_data ();
|
||||
|
||||
} // end zmq::norm_engine_t::init()
|
||||
} // end zmq::norm_engine_t::init()
|
||||
|
||||
void zmq::norm_engine_t::unplug()
|
||||
void zmq::norm_engine_t::unplug ()
|
||||
{
|
||||
rm_fd(norm_descriptor_handle);
|
||||
rm_fd (norm_descriptor_handle);
|
||||
|
||||
zmq_session = NULL;
|
||||
} // end zmq::norm_engine_t::unplug()
|
||||
} // end zmq::norm_engine_t::unplug()
|
||||
|
||||
void zmq::norm_engine_t::terminate()
|
||||
void zmq::norm_engine_t::terminate ()
|
||||
{
|
||||
unplug();
|
||||
shutdown();
|
||||
unplug ();
|
||||
shutdown ();
|
||||
delete this;
|
||||
}
|
||||
|
||||
void zmq::norm_engine_t::restart_output()
|
||||
void zmq::norm_engine_t::restart_output ()
|
||||
{
|
||||
// There's new message data available from the session
|
||||
zmq_output_ready = true;
|
||||
if (norm_tx_ready) send_data();
|
||||
if (norm_tx_ready)
|
||||
send_data ();
|
||||
|
||||
} // end zmq::norm_engine_t::restart_output()
|
||||
} // end zmq::norm_engine_t::restart_output()
|
||||
|
||||
void zmq::norm_engine_t::send_data()
|
||||
void zmq::norm_engine_t::send_data ()
|
||||
{
|
||||
// Here we write as much as is available or we can
|
||||
while (zmq_output_ready && norm_tx_ready)
|
||||
{
|
||||
if (0 == tx_len)
|
||||
{
|
||||
while (zmq_output_ready && norm_tx_ready) {
|
||||
if (0 == tx_len) {
|
||||
// Our tx_buffer needs data to send
|
||||
// Get more data from encoder
|
||||
size_t space = BUFFER_SIZE;
|
||||
unsigned char* bufPtr = (unsigned char*)tx_buffer;
|
||||
tx_len = zmq_encoder.encode(&bufPtr, space);
|
||||
if (0 == tx_len)
|
||||
{
|
||||
if (tx_first_msg)
|
||||
{
|
||||
unsigned char *bufPtr = (unsigned char *) tx_buffer;
|
||||
tx_len = zmq_encoder.encode (&bufPtr, space);
|
||||
if (0 == tx_len) {
|
||||
if (tx_first_msg) {
|
||||
// We don't need to mark eom/flush until a message is sent
|
||||
tx_first_msg = false;
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
// A prior message was completely written to stream, so
|
||||
// mark end-of-message and possibly flush (to force packet transmission,
|
||||
// even if it's not a full segment so message gets delivered quickly)
|
||||
@ -286,16 +281,15 @@ void zmq::norm_engine_t::send_data()
|
||||
// Note NORM_FLUSH_ACTIVE makes NORM fairly chatty for low duty cycle messaging
|
||||
// but makes sure content is delivered quickly. Positive acknowledgements
|
||||
// with flush override would make NORM more succinct here
|
||||
NormStreamFlush(norm_tx_stream, true, NORM_FLUSH_ACTIVE);
|
||||
NormStreamFlush (norm_tx_stream, true, NORM_FLUSH_ACTIVE);
|
||||
}
|
||||
// Need to pull and load a new message to send
|
||||
if (-1 == zmq_session->pull_msg(&tx_msg))
|
||||
{
|
||||
if (-1 == zmq_session->pull_msg (&tx_msg)) {
|
||||
// We need to wait for "restart_output()" to be called by ZMQ
|
||||
zmq_output_ready = false;
|
||||
break;
|
||||
}
|
||||
zmq_encoder.load_msg(&tx_msg);
|
||||
zmq_encoder.load_msg (&tx_msg);
|
||||
// Should we write message size header for NORM to use? Or expect NORM
|
||||
// receiver to decode ZMQ message framing format(s)?
|
||||
// OK - we need to use a byte to denote when the ZMQ frame is the _first_
|
||||
@ -304,71 +298,68 @@ void zmq::norm_engine_t::send_data()
|
||||
// I.e.,If more_flag _was_ false previously, this is the first
|
||||
// frame of a ZMQ message.
|
||||
if (tx_more_bit)
|
||||
tx_buffer[0] = (char)0xff; // this is not first frame of message
|
||||
tx_buffer[0] =
|
||||
(char) 0xff; // this is not first frame of message
|
||||
else
|
||||
tx_buffer[0] = 0x00; // this is first frame of message
|
||||
tx_more_bit = (0 != (tx_msg.flags() & msg_t::more));
|
||||
tx_buffer[0] = 0x00; // this is first frame of message
|
||||
tx_more_bit = (0 != (tx_msg.flags () & msg_t::more));
|
||||
// Go ahead an get a first chunk of the message
|
||||
bufPtr++;
|
||||
space--;
|
||||
tx_len = 1 + zmq_encoder.encode(&bufPtr, space);
|
||||
tx_len = 1 + zmq_encoder.encode (&bufPtr, space);
|
||||
tx_index = 0;
|
||||
}
|
||||
}
|
||||
// Do we have data in our tx_buffer pending
|
||||
if (tx_index < tx_len)
|
||||
{
|
||||
if (tx_index < tx_len) {
|
||||
// We have data in our tx_buffer to send, so write it to the stream
|
||||
tx_index += NormStreamWrite(norm_tx_stream, tx_buffer + tx_index, tx_len - tx_index);
|
||||
if (tx_index < tx_len)
|
||||
{
|
||||
tx_index += NormStreamWrite (norm_tx_stream, tx_buffer + tx_index,
|
||||
tx_len - tx_index);
|
||||
if (tx_index < tx_len) {
|
||||
// NORM stream buffer full, wait for NORM_TX_QUEUE_VACANCY
|
||||
norm_tx_ready = false;
|
||||
break;
|
||||
}
|
||||
tx_len = 0; // all buffered data was written
|
||||
tx_len = 0; // all buffered data was written
|
||||
}
|
||||
} // end while (zmq_output_ready && norm_tx_ready)
|
||||
} // end zmq::norm_engine_t::send_data()
|
||||
} // end while (zmq_output_ready && norm_tx_ready)
|
||||
} // end zmq::norm_engine_t::send_data()
|
||||
|
||||
void zmq::norm_engine_t::in_event()
|
||||
void zmq::norm_engine_t::in_event ()
|
||||
{
|
||||
// This means a NormEvent is pending, so call NormGetNextEvent() and handle
|
||||
NormEvent event;
|
||||
if (!NormGetNextEvent(norm_instance, &event))
|
||||
{
|
||||
if (!NormGetNextEvent (norm_instance, &event)) {
|
||||
// NORM has died before we unplugged?!
|
||||
zmq_assert(false);
|
||||
zmq_assert (false);
|
||||
return;
|
||||
}
|
||||
|
||||
switch(event.type)
|
||||
{
|
||||
switch (event.type) {
|
||||
case NORM_TX_QUEUE_VACANCY:
|
||||
case NORM_TX_QUEUE_EMPTY:
|
||||
if (!norm_tx_ready)
|
||||
{
|
||||
if (!norm_tx_ready) {
|
||||
norm_tx_ready = true;
|
||||
send_data();
|
||||
send_data ();
|
||||
}
|
||||
break;
|
||||
|
||||
case NORM_RX_OBJECT_NEW:
|
||||
//break;
|
||||
case NORM_RX_OBJECT_UPDATED:
|
||||
recv_data(event.object);
|
||||
recv_data (event.object);
|
||||
break;
|
||||
|
||||
case NORM_RX_OBJECT_ABORTED:
|
||||
{
|
||||
NormRxStreamState* rxState = (NormRxStreamState*)NormObjectGetUserData(event.object);
|
||||
if (NULL != rxState)
|
||||
{
|
||||
case NORM_RX_OBJECT_ABORTED: {
|
||||
NormRxStreamState *rxState =
|
||||
(NormRxStreamState *) NormObjectGetUserData (event.object);
|
||||
if (NULL != rxState) {
|
||||
// Remove the state from the list it's in
|
||||
// This is now unnecessary since deletion takes care of list removal
|
||||
// but in the interest of being clear ...
|
||||
NormRxStreamState::List* list = rxState->AccessList();
|
||||
if (NULL != list) list->Remove(*rxState);
|
||||
NormRxStreamState::List *list = rxState->AccessList ();
|
||||
if (NULL != list)
|
||||
list->Remove (*rxState);
|
||||
}
|
||||
delete rxState;
|
||||
break;
|
||||
@ -382,278 +373,262 @@ void zmq::norm_engine_t::in_event()
|
||||
// user configurable timeout here to wait some amount of time
|
||||
// after this event to declare the remote sender truly dead
|
||||
// and delete its state???
|
||||
NormNodeDelete(event.sender);
|
||||
NormNodeDelete (event.sender);
|
||||
break;
|
||||
|
||||
default:
|
||||
// We ignore some NORM events
|
||||
break;
|
||||
}
|
||||
} // zmq::norm_engine_t::in_event()
|
||||
} // zmq::norm_engine_t::in_event()
|
||||
|
||||
void zmq::norm_engine_t::restart_input()
|
||||
void zmq::norm_engine_t::restart_input ()
|
||||
{
|
||||
// TBD - should we check/assert that zmq_input_ready was false???
|
||||
zmq_input_ready = true;
|
||||
// Process any pending received messages
|
||||
if (!msg_ready_list.IsEmpty())
|
||||
recv_data(NORM_OBJECT_INVALID);
|
||||
if (!msg_ready_list.IsEmpty ())
|
||||
recv_data (NORM_OBJECT_INVALID);
|
||||
|
||||
} // end zmq::norm_engine_t::restart_input()
|
||||
} // end zmq::norm_engine_t::restart_input()
|
||||
|
||||
void zmq::norm_engine_t::recv_data(NormObjectHandle object)
|
||||
void zmq::norm_engine_t::recv_data (NormObjectHandle object)
|
||||
{
|
||||
if (NORM_OBJECT_INVALID != object)
|
||||
{
|
||||
if (NORM_OBJECT_INVALID != object) {
|
||||
// Call result of NORM_RX_OBJECT_UPDATED notification
|
||||
// This is a rx_ready indication for a new or existing rx stream
|
||||
// First, determine if this is a stream we already know
|
||||
zmq_assert(NORM_OBJECT_STREAM == NormObjectGetType(object));
|
||||
zmq_assert (NORM_OBJECT_STREAM == NormObjectGetType (object));
|
||||
// Since there can be multiple senders (publishers), we keep
|
||||
// state for each separate rx stream.
|
||||
NormRxStreamState* rxState = (NormRxStreamState*)NormObjectGetUserData(object);
|
||||
if (NULL == rxState)
|
||||
{
|
||||
NormRxStreamState *rxState =
|
||||
(NormRxStreamState *) NormObjectGetUserData (object);
|
||||
if (NULL == rxState) {
|
||||
// This is a new stream, so create rxState with zmq decoder, etc
|
||||
rxState = new (std::nothrow) NormRxStreamState(object, options.maxmsgsize);
|
||||
errno_assert(rxState);
|
||||
rxState =
|
||||
new (std::nothrow) NormRxStreamState (object, options.maxmsgsize);
|
||||
errno_assert (rxState);
|
||||
|
||||
if (!rxState->Init())
|
||||
{
|
||||
errno_assert(false);
|
||||
if (!rxState->Init ()) {
|
||||
errno_assert (false);
|
||||
delete rxState;
|
||||
return;
|
||||
}
|
||||
NormObjectSetUserData(object, rxState);
|
||||
}
|
||||
else if (!rxState->IsRxReady())
|
||||
{
|
||||
NormObjectSetUserData (object, rxState);
|
||||
} else if (!rxState->IsRxReady ()) {
|
||||
// Existing non-ready stream, so remove from pending
|
||||
// list to be promoted to rx_ready_list ...
|
||||
rx_pending_list.Remove(*rxState);
|
||||
rx_pending_list.Remove (*rxState);
|
||||
}
|
||||
if (!rxState->IsRxReady())
|
||||
{
|
||||
if (!rxState->IsRxReady ()) {
|
||||
// TBD - prepend up front for immediate service?
|
||||
rxState->SetRxReady(true);
|
||||
rx_ready_list.Append(*rxState);
|
||||
rxState->SetRxReady (true);
|
||||
rx_ready_list.Append (*rxState);
|
||||
}
|
||||
}
|
||||
// This loop repeats until we've read all data available from "rx ready" inbound streams
|
||||
// and pushed any accumulated messages we can up to the zmq session.
|
||||
while (!rx_ready_list.IsEmpty() || (zmq_input_ready && !msg_ready_list.IsEmpty()))
|
||||
{
|
||||
while (!rx_ready_list.IsEmpty ()
|
||||
|| (zmq_input_ready && !msg_ready_list.IsEmpty ())) {
|
||||
// Iterate through our rx_ready streams, reading data into the decoder
|
||||
// (This services incoming "rx ready" streams in a round-robin fashion)
|
||||
NormRxStreamState::List::Iterator iterator(rx_ready_list);
|
||||
NormRxStreamState* rxState;
|
||||
while (NULL != (rxState = iterator.GetNextItem()))
|
||||
{
|
||||
switch(rxState->Decode())
|
||||
{
|
||||
case 1: // msg completed
|
||||
NormRxStreamState::List::Iterator iterator (rx_ready_list);
|
||||
NormRxStreamState *rxState;
|
||||
while (NULL != (rxState = iterator.GetNextItem ())) {
|
||||
switch (rxState->Decode ()) {
|
||||
case 1: // msg completed
|
||||
// Complete message decoded, move this stream to msg_ready_list
|
||||
// to push the message up to the session below. Note the stream
|
||||
// will be returned to the "rx_ready_list" after that's done
|
||||
rx_ready_list.Remove(*rxState);
|
||||
msg_ready_list.Append(*rxState);
|
||||
rx_ready_list.Remove (*rxState);
|
||||
msg_ready_list.Append (*rxState);
|
||||
continue;
|
||||
|
||||
case -1: // decoding error (shouldn't happen w/ NORM, but ...)
|
||||
// We need to re-sync this stream (decoder buffer was reset)
|
||||
rxState->SetSync(false);
|
||||
rxState->SetSync (false);
|
||||
break;
|
||||
|
||||
default: // 0 - need more data
|
||||
default: // 0 - need more data
|
||||
break;
|
||||
}
|
||||
// Get more data from this stream
|
||||
NormObjectHandle stream = rxState->GetStreamHandle();
|
||||
NormObjectHandle stream = rxState->GetStreamHandle ();
|
||||
// First, make sure we're in sync ...
|
||||
while (!rxState->InSync())
|
||||
{
|
||||
while (!rxState->InSync ()) {
|
||||
// seek NORM message start
|
||||
if (!NormStreamSeekMsgStart(stream))
|
||||
{
|
||||
if (!NormStreamSeekMsgStart (stream)) {
|
||||
// Need to wait for more data
|
||||
break;
|
||||
}
|
||||
// read message 'flag' byte to see if this it's a 'final' frame
|
||||
char syncFlag;
|
||||
unsigned int numBytes = 1;
|
||||
if (!NormStreamRead(stream, &syncFlag, &numBytes))
|
||||
{
|
||||
if (!NormStreamRead (stream, &syncFlag, &numBytes)) {
|
||||
// broken stream (shouldn't happen after seek msg start?)
|
||||
zmq_assert(false);
|
||||
zmq_assert (false);
|
||||
continue;
|
||||
}
|
||||
if (0 == numBytes)
|
||||
{
|
||||
if (0 == numBytes) {
|
||||
// This probably shouldn't happen either since we found msg start
|
||||
// Need to wait for more data
|
||||
break;
|
||||
}
|
||||
if (0 == syncFlag) rxState->SetSync(true);
|
||||
if (0 == syncFlag)
|
||||
rxState->SetSync (true);
|
||||
// else keep seeking ...
|
||||
} // end while(!rxState->InSync())
|
||||
if (!rxState->InSync())
|
||||
{
|
||||
} // end while(!rxState->InSync())
|
||||
if (!rxState->InSync ()) {
|
||||
// Need more data for this stream, so remove from "rx ready"
|
||||
// list and iterate to next "rx ready" stream
|
||||
rxState->SetRxReady(false);
|
||||
rxState->SetRxReady (false);
|
||||
// Move from rx_ready_list to rx_pending_list
|
||||
rx_ready_list.Remove(*rxState);
|
||||
rx_pending_list.Append(*rxState);
|
||||
rx_ready_list.Remove (*rxState);
|
||||
rx_pending_list.Append (*rxState);
|
||||
continue;
|
||||
}
|
||||
// Now we're actually ready to read data from the NORM stream to the zmq_decoder
|
||||
// the underlying zmq_decoder->get_buffer() call sets how much is needed.
|
||||
unsigned int numBytes = rxState->GetBytesNeeded();
|
||||
if (!NormStreamRead(stream, rxState->AccessBuffer(), &numBytes))
|
||||
{
|
||||
unsigned int numBytes = rxState->GetBytesNeeded ();
|
||||
if (!NormStreamRead (stream, rxState->AccessBuffer (), &numBytes)) {
|
||||
// broken NORM stream, so re-sync
|
||||
rxState->Init(); // TBD - check result
|
||||
rxState->Init (); // TBD - check result
|
||||
// This will retry syncing, and getting data from this stream
|
||||
// since we don't increment the "it" iterator
|
||||
continue;
|
||||
}
|
||||
rxState->IncrementBufferCount(numBytes);
|
||||
if (0 == numBytes)
|
||||
{
|
||||
rxState->IncrementBufferCount (numBytes);
|
||||
if (0 == numBytes) {
|
||||
// All the data available has been read
|
||||
// Need to wait for NORM_RX_OBJECT_UPDATED for this stream
|
||||
rxState->SetRxReady(false);
|
||||
rxState->SetRxReady (false);
|
||||
// Move from rx_ready_list to rx_pending_list
|
||||
rx_ready_list.Remove(*rxState);
|
||||
rx_pending_list.Append(*rxState);
|
||||
rx_ready_list.Remove (*rxState);
|
||||
rx_pending_list.Append (*rxState);
|
||||
}
|
||||
} // end while(NULL != (rxState = iterator.GetNextItem()))
|
||||
} // end while(NULL != (rxState = iterator.GetNextItem()))
|
||||
|
||||
if (zmq_input_ready)
|
||||
{
|
||||
if (zmq_input_ready) {
|
||||
// At this point, we've made a pass through the "rx_ready" stream list
|
||||
// Now make a pass through the "msg_pending" list (if the zmq session
|
||||
// ready for more input). This may possibly return streams back to
|
||||
// the "rx ready" stream list after their pending message is handled
|
||||
NormRxStreamState::List::Iterator iterator(msg_ready_list);
|
||||
NormRxStreamState* rxState;
|
||||
while (NULL != (rxState = iterator.GetNextItem()))
|
||||
{
|
||||
msg_t* msg = rxState->AccessMsg();
|
||||
int rc = zmq_session->push_msg(msg);
|
||||
if (-1 == rc)
|
||||
{
|
||||
if (EAGAIN == errno)
|
||||
{
|
||||
NormRxStreamState::List::Iterator iterator (msg_ready_list);
|
||||
NormRxStreamState *rxState;
|
||||
while (NULL != (rxState = iterator.GetNextItem ())) {
|
||||
msg_t *msg = rxState->AccessMsg ();
|
||||
int rc = zmq_session->push_msg (msg);
|
||||
if (-1 == rc) {
|
||||
if (EAGAIN == errno) {
|
||||
// need to wait until session calls "restart_input()"
|
||||
zmq_input_ready = false;
|
||||
break;
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
// session rejected message?
|
||||
// TBD - handle this better
|
||||
zmq_assert(false);
|
||||
zmq_assert (false);
|
||||
}
|
||||
}
|
||||
// else message was accepted.
|
||||
msg_ready_list.Remove(*rxState);
|
||||
if (rxState->IsRxReady()) // Move back to "rx_ready" list to read more data
|
||||
rx_ready_list.Append(*rxState);
|
||||
else // Move back to "rx_pending" list until NORM_RX_OBJECT_UPDATED
|
||||
msg_ready_list.Append(*rxState);
|
||||
} // end while(NULL != (rxState = iterator.GetNextItem()))
|
||||
} // end if (zmq_input_ready)
|
||||
} // end while ((!rx_ready_list.empty() || (zmq_input_ready && !msg_ready_list.empty()))
|
||||
msg_ready_list.Remove (*rxState);
|
||||
if (
|
||||
rxState
|
||||
->IsRxReady ()) // Move back to "rx_ready" list to read more data
|
||||
rx_ready_list.Append (*rxState);
|
||||
else // Move back to "rx_pending" list until NORM_RX_OBJECT_UPDATED
|
||||
msg_ready_list.Append (*rxState);
|
||||
} // end while(NULL != (rxState = iterator.GetNextItem()))
|
||||
} // end if (zmq_input_ready)
|
||||
} // end while ((!rx_ready_list.empty() || (zmq_input_ready && !msg_ready_list.empty()))
|
||||
|
||||
// Alert zmq of the messages we have pushed up
|
||||
zmq_session->flush();
|
||||
zmq_session->flush ();
|
||||
|
||||
} // end zmq::norm_engine_t::recv_data()
|
||||
} // end zmq::norm_engine_t::recv_data()
|
||||
|
||||
zmq::norm_engine_t::NormRxStreamState::NormRxStreamState(NormObjectHandle normStream,
|
||||
int64_t maxMsgSize)
|
||||
: norm_stream(normStream), max_msg_size(maxMsgSize),
|
||||
in_sync(false), rx_ready(false), zmq_decoder(NULL), skip_norm_sync(false),
|
||||
buffer_ptr(NULL), buffer_size(0), buffer_count(0),
|
||||
prev(NULL), next(NULL), list(NULL)
|
||||
zmq::norm_engine_t::NormRxStreamState::NormRxStreamState (
|
||||
NormObjectHandle normStream, int64_t maxMsgSize) :
|
||||
norm_stream (normStream),
|
||||
max_msg_size (maxMsgSize),
|
||||
in_sync (false),
|
||||
rx_ready (false),
|
||||
zmq_decoder (NULL),
|
||||
skip_norm_sync (false),
|
||||
buffer_ptr (NULL),
|
||||
buffer_size (0),
|
||||
buffer_count (0),
|
||||
prev (NULL),
|
||||
next (NULL),
|
||||
list (NULL)
|
||||
{
|
||||
}
|
||||
|
||||
zmq::norm_engine_t::NormRxStreamState::~NormRxStreamState()
|
||||
zmq::norm_engine_t::NormRxStreamState::~NormRxStreamState ()
|
||||
{
|
||||
if (NULL != zmq_decoder)
|
||||
{
|
||||
if (NULL != zmq_decoder) {
|
||||
delete zmq_decoder;
|
||||
zmq_decoder = NULL;
|
||||
}
|
||||
if (NULL != list)
|
||||
{
|
||||
list->Remove(*this);
|
||||
if (NULL != list) {
|
||||
list->Remove (*this);
|
||||
list = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
bool zmq::norm_engine_t::NormRxStreamState::Init()
|
||||
bool zmq::norm_engine_t::NormRxStreamState::Init ()
|
||||
{
|
||||
in_sync = false;
|
||||
skip_norm_sync = false;
|
||||
if (NULL != zmq_decoder) delete zmq_decoder;
|
||||
if (NULL != zmq_decoder)
|
||||
delete zmq_decoder;
|
||||
// Note "in_batch_size" comes from config.h
|
||||
zmq_decoder = new (std::nothrow) v2_decoder_t (in_batch_size, max_msg_size);
|
||||
alloc_assert (zmq_decoder);
|
||||
if (NULL != zmq_decoder)
|
||||
{
|
||||
if (NULL != zmq_decoder) {
|
||||
buffer_count = 0;
|
||||
buffer_size = 0;
|
||||
zmq_decoder->get_buffer(&buffer_ptr, &buffer_size);
|
||||
zmq_decoder->get_buffer (&buffer_ptr, &buffer_size);
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
} // end zmq::norm_engine_t::NormRxStreamState::Init()
|
||||
} // end zmq::norm_engine_t::NormRxStreamState::Init()
|
||||
|
||||
// This decodes any pending data sitting in our stream decoder buffer
|
||||
// It returns 1 upon message completion, -1 on error, 1 on msg completion
|
||||
int zmq::norm_engine_t::NormRxStreamState::Decode()
|
||||
int zmq::norm_engine_t::NormRxStreamState::Decode ()
|
||||
{
|
||||
// If we have pending bytes to decode, process those first
|
||||
while (buffer_count > 0)
|
||||
{
|
||||
while (buffer_count > 0) {
|
||||
// There's pending data for the decoder to decode
|
||||
size_t processed = 0;
|
||||
|
||||
// This a bit of a kludgy approach used to weed
|
||||
// out the NORM ZMQ message transport "syncFlag" byte
|
||||
// from the ZMQ message stream being decoded (but it works!)
|
||||
if (skip_norm_sync)
|
||||
{
|
||||
if (skip_norm_sync) {
|
||||
buffer_ptr++;
|
||||
buffer_count--;
|
||||
skip_norm_sync = false;
|
||||
}
|
||||
|
||||
int rc = zmq_decoder->decode(buffer_ptr, buffer_count, processed);
|
||||
int rc = zmq_decoder->decode (buffer_ptr, buffer_count, processed);
|
||||
buffer_ptr += processed;
|
||||
buffer_count -= processed;
|
||||
switch (rc)
|
||||
{
|
||||
switch (rc) {
|
||||
case 1:
|
||||
// msg completed
|
||||
if (0 == buffer_count)
|
||||
{
|
||||
if (0 == buffer_count) {
|
||||
buffer_size = 0;
|
||||
zmq_decoder->get_buffer(&buffer_ptr, &buffer_size);
|
||||
zmq_decoder->get_buffer (&buffer_ptr, &buffer_size);
|
||||
}
|
||||
skip_norm_sync = true;
|
||||
return 1;
|
||||
case -1:
|
||||
// decoder error (reset decoder and state variables)
|
||||
in_sync = false;
|
||||
skip_norm_sync = false; // will get consumed by norm sync check
|
||||
Init();
|
||||
skip_norm_sync = false; // will get consumed by norm sync check
|
||||
Init ();
|
||||
break;
|
||||
|
||||
case 0:
|
||||
@ -664,33 +639,32 @@ int zmq::norm_engine_t::NormRxStreamState::Decode()
|
||||
// Reset buffer pointer/count for next read
|
||||
buffer_count = 0;
|
||||
buffer_size = 0;
|
||||
zmq_decoder->get_buffer(&buffer_ptr, &buffer_size);
|
||||
return 0; // need more data
|
||||
zmq_decoder->get_buffer (&buffer_ptr, &buffer_size);
|
||||
return 0; // need more data
|
||||
|
||||
} // end zmq::norm_engine_t::NormRxStreamState::Decode()
|
||||
} // end zmq::norm_engine_t::NormRxStreamState::Decode()
|
||||
|
||||
zmq::norm_engine_t::NormRxStreamState::List::List()
|
||||
: head(NULL), tail(NULL)
|
||||
zmq::norm_engine_t::NormRxStreamState::List::List () : head (NULL), tail (NULL)
|
||||
{
|
||||
}
|
||||
|
||||
zmq::norm_engine_t::NormRxStreamState::List::~List()
|
||||
zmq::norm_engine_t::NormRxStreamState::List::~List ()
|
||||
{
|
||||
Destroy();
|
||||
Destroy ();
|
||||
}
|
||||
|
||||
void zmq::norm_engine_t::NormRxStreamState::List::Destroy()
|
||||
void zmq::norm_engine_t::NormRxStreamState::List::Destroy ()
|
||||
{
|
||||
NormRxStreamState* item = head;
|
||||
while (NULL != item)
|
||||
{
|
||||
Remove(*item);
|
||||
NormRxStreamState *item = head;
|
||||
while (NULL != item) {
|
||||
Remove (*item);
|
||||
delete item;
|
||||
item = head;
|
||||
}
|
||||
} // end zmq::norm_engine_t::NormRxStreamState::List::Destroy()
|
||||
} // end zmq::norm_engine_t::NormRxStreamState::List::Destroy()
|
||||
|
||||
void zmq::norm_engine_t::NormRxStreamState::List::Append(NormRxStreamState& item)
|
||||
void zmq::norm_engine_t::NormRxStreamState::List::Append (
|
||||
NormRxStreamState &item)
|
||||
{
|
||||
item.prev = tail;
|
||||
if (NULL != tail)
|
||||
@ -700,33 +674,37 @@ void zmq::norm_engine_t::NormRxStreamState::List::Append(NormRxStreamState& item
|
||||
item.next = NULL;
|
||||
tail = &item;
|
||||
item.list = this;
|
||||
} // end zmq::norm_engine_t::NormRxStreamState::List::Append()
|
||||
} // end zmq::norm_engine_t::NormRxStreamState::List::Append()
|
||||
|
||||
void zmq::norm_engine_t::NormRxStreamState::List::Remove(NormRxStreamState& item)
|
||||
void zmq::norm_engine_t::NormRxStreamState::List::Remove (
|
||||
NormRxStreamState &item)
|
||||
{
|
||||
if (NULL != item.prev)
|
||||
item.prev->next = item.next;
|
||||
else
|
||||
head = item.next;
|
||||
if (NULL != item.next)
|
||||
item.next ->prev = item.prev;
|
||||
item.next->prev = item.prev;
|
||||
else
|
||||
tail = item.prev;
|
||||
item.prev = item.next = NULL;
|
||||
item.list = NULL;
|
||||
} // end zmq::norm_engine_t::NormRxStreamState::List::Remove()
|
||||
} // end zmq::norm_engine_t::NormRxStreamState::List::Remove()
|
||||
|
||||
zmq::norm_engine_t::NormRxStreamState::List::Iterator::Iterator(const List& list)
|
||||
: next_item(list.head)
|
||||
zmq::norm_engine_t::NormRxStreamState::List::Iterator::Iterator (
|
||||
const List &list) :
|
||||
next_item (list.head)
|
||||
{
|
||||
}
|
||||
|
||||
zmq::norm_engine_t::NormRxStreamState* zmq::norm_engine_t::NormRxStreamState::List::Iterator::GetNextItem()
|
||||
zmq::norm_engine_t::NormRxStreamState *
|
||||
zmq::norm_engine_t::NormRxStreamState::List::Iterator::GetNextItem ()
|
||||
{
|
||||
NormRxStreamState* nextItem = next_item;
|
||||
if (NULL != nextItem) next_item = nextItem->next;
|
||||
NormRxStreamState *nextItem = next_item;
|
||||
if (NULL != nextItem)
|
||||
next_item = nextItem->next;
|
||||
return nextItem;
|
||||
} // end zmq::norm_engine_t::NormRxStreamState::List::Iterator::GetNextItem()
|
||||
} // end zmq::norm_engine_t::NormRxStreamState::List::Iterator::GetNextItem()
|
||||
|
||||
const char *zmq::norm_engine_t::get_endpoint () const
|
||||
{
|
||||
|
@ -14,175 +14,170 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
class io_thread_t;
|
||||
class session_base_t;
|
||||
class io_thread_t;
|
||||
class session_base_t;
|
||||
|
||||
class norm_engine_t : public io_object_t, public i_engine
|
||||
class norm_engine_t : public io_object_t, public i_engine
|
||||
{
|
||||
public:
|
||||
norm_engine_t (zmq::io_thread_t *parent_, const options_t &options_);
|
||||
~norm_engine_t ();
|
||||
|
||||
// create NORM instance, session, etc
|
||||
int init (const char *network_, bool send, bool recv);
|
||||
void shutdown ();
|
||||
|
||||
// i_engine interface implementation.
|
||||
// Plug the engine to the session.
|
||||
virtual void plug (zmq::io_thread_t *io_thread_,
|
||||
class session_base_t *session_);
|
||||
|
||||
// Terminate and deallocate the engine. Note that 'detached'
|
||||
// events are not fired on termination.
|
||||
virtual void terminate ();
|
||||
|
||||
// This method is called by the session to signalise that more
|
||||
// messages can be written to the pipe.
|
||||
virtual void restart_input ();
|
||||
|
||||
// This method is called by the session to signalise that there
|
||||
// are messages to send available.
|
||||
virtual void restart_output ();
|
||||
|
||||
virtual void zap_msg_available (){};
|
||||
|
||||
virtual const char *get_endpoint () const;
|
||||
|
||||
// i_poll_events interface implementation.
|
||||
// (we only need in_event() for NormEvent notification)
|
||||
// (i.e., don't have any output events or timers (yet))
|
||||
void in_event ();
|
||||
|
||||
private:
|
||||
void unplug ();
|
||||
void send_data ();
|
||||
void recv_data (NormObjectHandle stream);
|
||||
|
||||
|
||||
enum
|
||||
{
|
||||
public:
|
||||
norm_engine_t (zmq::io_thread_t *parent_, const options_t &options_);
|
||||
~norm_engine_t ();
|
||||
BUFFER_SIZE = 2048
|
||||
};
|
||||
|
||||
// create NORM instance, session, etc
|
||||
int init(const char* network_, bool send, bool recv);
|
||||
void shutdown();
|
||||
// Used to keep track of streams from multiple senders
|
||||
class NormRxStreamState
|
||||
{
|
||||
public:
|
||||
NormRxStreamState (NormObjectHandle normStream, int64_t maxMsgSize);
|
||||
~NormRxStreamState ();
|
||||
|
||||
// i_engine interface implementation.
|
||||
// Plug the engine to the session.
|
||||
virtual void plug (zmq::io_thread_t *io_thread_,
|
||||
class session_base_t *session_);
|
||||
NormObjectHandle GetStreamHandle () const { return norm_stream; }
|
||||
|
||||
// Terminate and deallocate the engine. Note that 'detached'
|
||||
// events are not fired on termination.
|
||||
virtual void terminate ();
|
||||
bool Init ();
|
||||
|
||||
// This method is called by the session to signalise that more
|
||||
// messages can be written to the pipe.
|
||||
virtual void restart_input ();
|
||||
void SetRxReady (bool state) { rx_ready = state; }
|
||||
bool IsRxReady () const { return rx_ready; }
|
||||
|
||||
// This method is called by the session to signalise that there
|
||||
// are messages to send available.
|
||||
virtual void restart_output ();
|
||||
void SetSync (bool state) { in_sync = state; }
|
||||
bool InSync () const { return in_sync; }
|
||||
|
||||
virtual void zap_msg_available () {};
|
||||
// These are used to feed data to decoder
|
||||
// and its underlying "msg" buffer
|
||||
char *AccessBuffer () { return (char *) (buffer_ptr + buffer_count); }
|
||||
size_t GetBytesNeeded () const { return (buffer_size - buffer_count); }
|
||||
void IncrementBufferCount (size_t count) { buffer_count += count; }
|
||||
msg_t *AccessMsg () { return zmq_decoder->msg (); }
|
||||
// This invokes the decoder "decode" method
|
||||
// returning 0 if more data is needed,
|
||||
// 1 if the message is complete, If an error
|
||||
// occurs the 'sync' is dropped and the
|
||||
// decoder re-initialized
|
||||
int Decode ();
|
||||
|
||||
virtual const char *get_endpoint () const;
|
||||
class List
|
||||
{
|
||||
public:
|
||||
List ();
|
||||
~List ();
|
||||
|
||||
// i_poll_events interface implementation.
|
||||
// (we only need in_event() for NormEvent notification)
|
||||
// (i.e., don't have any output events or timers (yet))
|
||||
void in_event ();
|
||||
void Append (NormRxStreamState &item);
|
||||
void Remove (NormRxStreamState &item);
|
||||
|
||||
private:
|
||||
void unplug();
|
||||
void send_data();
|
||||
void recv_data(NormObjectHandle stream);
|
||||
bool IsEmpty () const { return (NULL == head); }
|
||||
|
||||
void Destroy ();
|
||||
|
||||
enum {BUFFER_SIZE = 2048};
|
||||
|
||||
// Used to keep track of streams from multiple senders
|
||||
class NormRxStreamState
|
||||
class Iterator
|
||||
{
|
||||
public:
|
||||
NormRxStreamState(NormObjectHandle normStream,
|
||||
int64_t maxMsgSize);
|
||||
~NormRxStreamState();
|
||||
public:
|
||||
Iterator (const List &list);
|
||||
NormRxStreamState *GetNextItem ();
|
||||
|
||||
NormObjectHandle GetStreamHandle() const
|
||||
{return norm_stream;}
|
||||
private:
|
||||
NormRxStreamState *next_item;
|
||||
};
|
||||
friend class Iterator;
|
||||
|
||||
bool Init();
|
||||
private:
|
||||
NormRxStreamState *head;
|
||||
NormRxStreamState *tail;
|
||||
|
||||
void SetRxReady(bool state)
|
||||
{rx_ready = state;}
|
||||
bool IsRxReady() const
|
||||
{return rx_ready;}
|
||||
}; // end class zmq::norm_engine_t::NormRxStreamState::List
|
||||
|
||||
void SetSync(bool state)
|
||||
{in_sync = state;}
|
||||
bool InSync() const
|
||||
{return in_sync;}
|
||||
friend class List;
|
||||
|
||||
// These are used to feed data to decoder
|
||||
// and its underlying "msg" buffer
|
||||
char* AccessBuffer()
|
||||
{return (char*)(buffer_ptr + buffer_count);}
|
||||
size_t GetBytesNeeded() const
|
||||
{return (buffer_size - buffer_count);}
|
||||
void IncrementBufferCount(size_t count)
|
||||
{buffer_count += count;}
|
||||
msg_t* AccessMsg()
|
||||
{return zmq_decoder->msg();}
|
||||
// This invokes the decoder "decode" method
|
||||
// returning 0 if more data is needed,
|
||||
// 1 if the message is complete, If an error
|
||||
// occurs the 'sync' is dropped and the
|
||||
// decoder re-initialized
|
||||
int Decode();
|
||||
|
||||
class List
|
||||
{
|
||||
public:
|
||||
List();
|
||||
~List();
|
||||
|
||||
void Append(NormRxStreamState& item);
|
||||
void Remove(NormRxStreamState& item);
|
||||
|
||||
bool IsEmpty() const
|
||||
{return (NULL == head);}
|
||||
|
||||
void Destroy();
|
||||
|
||||
class Iterator
|
||||
{
|
||||
public:
|
||||
Iterator(const List& list);
|
||||
NormRxStreamState* GetNextItem();
|
||||
private:
|
||||
NormRxStreamState* next_item;
|
||||
};
|
||||
friend class Iterator;
|
||||
|
||||
private:
|
||||
NormRxStreamState* head;
|
||||
NormRxStreamState* tail;
|
||||
|
||||
}; // end class zmq::norm_engine_t::NormRxStreamState::List
|
||||
|
||||
friend class List;
|
||||
|
||||
List* AccessList()
|
||||
{return list;}
|
||||
List *AccessList () { return list; }
|
||||
|
||||
|
||||
private:
|
||||
NormObjectHandle norm_stream;
|
||||
int64_t max_msg_size;
|
||||
bool in_sync;
|
||||
bool rx_ready;
|
||||
v2_decoder_t* zmq_decoder;
|
||||
bool skip_norm_sync;
|
||||
unsigned char* buffer_ptr;
|
||||
size_t buffer_size;
|
||||
size_t buffer_count;
|
||||
private:
|
||||
NormObjectHandle norm_stream;
|
||||
int64_t max_msg_size;
|
||||
bool in_sync;
|
||||
bool rx_ready;
|
||||
v2_decoder_t *zmq_decoder;
|
||||
bool skip_norm_sync;
|
||||
unsigned char *buffer_ptr;
|
||||
size_t buffer_size;
|
||||
size_t buffer_count;
|
||||
|
||||
NormRxStreamState* prev;
|
||||
NormRxStreamState* next;
|
||||
NormRxStreamState::List* list;
|
||||
NormRxStreamState *prev;
|
||||
NormRxStreamState *next;
|
||||
NormRxStreamState::List *list;
|
||||
|
||||
}; // end class zmq::norm_engine_t::NormRxStreamState
|
||||
}; // end class zmq::norm_engine_t::NormRxStreamState
|
||||
|
||||
session_base_t* zmq_session;
|
||||
options_t options;
|
||||
NormInstanceHandle norm_instance;
|
||||
handle_t norm_descriptor_handle;
|
||||
NormSessionHandle norm_session;
|
||||
bool is_sender;
|
||||
bool is_receiver;
|
||||
// Sender state
|
||||
msg_t tx_msg;
|
||||
v2_encoder_t zmq_encoder; // for tx messages (we use v2 for now)
|
||||
NormObjectHandle norm_tx_stream;
|
||||
bool tx_first_msg;
|
||||
bool tx_more_bit;
|
||||
bool zmq_output_ready; // zmq has msg(s) to send
|
||||
bool norm_tx_ready; // norm has tx queue vacancy
|
||||
// TBD - maybe don't need buffer if can access zmq message buffer directly?
|
||||
char tx_buffer[BUFFER_SIZE];
|
||||
unsigned int tx_index;
|
||||
unsigned int tx_len;
|
||||
session_base_t *zmq_session;
|
||||
options_t options;
|
||||
NormInstanceHandle norm_instance;
|
||||
handle_t norm_descriptor_handle;
|
||||
NormSessionHandle norm_session;
|
||||
bool is_sender;
|
||||
bool is_receiver;
|
||||
// Sender state
|
||||
msg_t tx_msg;
|
||||
v2_encoder_t zmq_encoder; // for tx messages (we use v2 for now)
|
||||
NormObjectHandle norm_tx_stream;
|
||||
bool tx_first_msg;
|
||||
bool tx_more_bit;
|
||||
bool zmq_output_ready; // zmq has msg(s) to send
|
||||
bool norm_tx_ready; // norm has tx queue vacancy
|
||||
// TBD - maybe don't need buffer if can access zmq message buffer directly?
|
||||
char tx_buffer[BUFFER_SIZE];
|
||||
unsigned int tx_index;
|
||||
unsigned int tx_len;
|
||||
|
||||
// Receiver state
|
||||
// Lists of norm rx streams from remote senders
|
||||
bool zmq_input_ready; // zmq ready to receive msg(s)
|
||||
NormRxStreamState::List rx_pending_list; // rx streams waiting for data reception
|
||||
NormRxStreamState::List rx_ready_list; // rx streams ready for NormStreamRead()
|
||||
NormRxStreamState::List msg_ready_list; // rx streams w/ msg ready for push to zmq
|
||||
// Receiver state
|
||||
// Lists of norm rx streams from remote senders
|
||||
bool zmq_input_ready; // zmq ready to receive msg(s)
|
||||
NormRxStreamState::List
|
||||
rx_pending_list; // rx streams waiting for data reception
|
||||
NormRxStreamState::List
|
||||
rx_ready_list; // rx streams ready for NormStreamRead()
|
||||
NormRxStreamState::List
|
||||
msg_ready_list; // rx streams w/ msg ready for push to zmq
|
||||
|
||||
|
||||
}; // end class norm_engine_t
|
||||
}; // end class norm_engine_t
|
||||
}
|
||||
|
||||
#endif // ZMQ_HAVE_NORM
|
||||
|
@ -64,25 +64,23 @@ int zmq::null_mechanism_t::next_handshake_command (msg_t *msg_)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (zap_required() && !zap_reply_received) {
|
||||
if (zap_required () && !zap_reply_received) {
|
||||
if (zap_request_sent) {
|
||||
errno = EAGAIN;
|
||||
return -1;
|
||||
}
|
||||
int rc = session->zap_connect();
|
||||
if (rc == -1)
|
||||
{
|
||||
session->get_socket()->event_handshake_failed_no_detail (
|
||||
session->get_endpoint(),
|
||||
EFAULT);
|
||||
int rc = session->zap_connect ();
|
||||
if (rc == -1) {
|
||||
session->get_socket ()->event_handshake_failed_no_detail (
|
||||
session->get_endpoint (), EFAULT);
|
||||
return -1;
|
||||
}
|
||||
send_zap_request ();
|
||||
zap_request_sent = true;
|
||||
|
||||
// TODO actually, it is quite unlikely that we can read the ZAP
|
||||
// TODO actually, it is quite unlikely that we can read the ZAP
|
||||
// reply already, but removing this has some strange side-effect
|
||||
// (probably because the pipe's in_active flag is true until a read
|
||||
// (probably because the pipe's in_active flag is true until a read
|
||||
// is attempted)
|
||||
rc = receive_and_process_zap_reply ();
|
||||
if (rc != 0)
|
||||
@ -120,26 +118,23 @@ int zmq::null_mechanism_t::process_handshake_command (msg_t *msg_)
|
||||
{
|
||||
if (ready_command_received || error_command_received) {
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
session->get_endpoint (),
|
||||
ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND);
|
||||
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND);
|
||||
errno = EPROTO;
|
||||
return -1;
|
||||
}
|
||||
|
||||
const unsigned char *cmd_data =
|
||||
static_cast <unsigned char *> (msg_->data ());
|
||||
static_cast<unsigned char *> (msg_->data ());
|
||||
const size_t data_size = msg_->size ();
|
||||
|
||||
int rc = 0;
|
||||
if (data_size >= 6 && !memcmp (cmd_data, "\5READY", 6))
|
||||
rc = process_ready_command (cmd_data, data_size);
|
||||
else
|
||||
if (data_size >= 6 && !memcmp (cmd_data, "\5ERROR", 6))
|
||||
else if (data_size >= 6 && !memcmp (cmd_data, "\5ERROR", 6))
|
||||
rc = process_error_command (cmd_data, data_size);
|
||||
else {
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
session->get_endpoint (),
|
||||
ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND);
|
||||
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND);
|
||||
errno = EPROTO;
|
||||
rc = -1;
|
||||
}
|
||||
@ -153,15 +148,15 @@ int zmq::null_mechanism_t::process_handshake_command (msg_t *msg_)
|
||||
return rc;
|
||||
}
|
||||
|
||||
int zmq::null_mechanism_t::process_ready_command (
|
||||
const unsigned char *cmd_data, size_t data_size)
|
||||
int zmq::null_mechanism_t::process_ready_command (const unsigned char *cmd_data,
|
||||
size_t data_size)
|
||||
{
|
||||
ready_command_received = true;
|
||||
return parse_metadata (cmd_data + 6, data_size - 6);
|
||||
}
|
||||
|
||||
int zmq::null_mechanism_t::process_error_command (
|
||||
const unsigned char *cmd_data, size_t data_size)
|
||||
int zmq::null_mechanism_t::process_error_command (const unsigned char *cmd_data,
|
||||
size_t data_size)
|
||||
{
|
||||
if (data_size < 7) {
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
@ -171,7 +166,7 @@ int zmq::null_mechanism_t::process_error_command (
|
||||
errno = EPROTO;
|
||||
return -1;
|
||||
}
|
||||
const size_t error_reason_len = static_cast <size_t> (cmd_data [6]);
|
||||
const size_t error_reason_len = static_cast<size_t> (cmd_data[6]);
|
||||
if (error_reason_len > data_size - 7) {
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
session->get_endpoint (),
|
||||
@ -200,15 +195,13 @@ int zmq::null_mechanism_t::zap_msg_available ()
|
||||
|
||||
zmq::mechanism_t::status_t zmq::null_mechanism_t::status () const
|
||||
{
|
||||
const bool command_sent =
|
||||
ready_command_sent || error_command_sent;
|
||||
const bool command_sent = ready_command_sent || error_command_sent;
|
||||
const bool command_received =
|
||||
ready_command_received || error_command_received;
|
||||
ready_command_received || error_command_received;
|
||||
|
||||
if (ready_command_sent && ready_command_received)
|
||||
return mechanism_t::ready;
|
||||
else
|
||||
if (command_sent && command_received)
|
||||
else if (command_sent && command_received)
|
||||
return error;
|
||||
else
|
||||
return handshaking;
|
||||
|
@ -36,42 +36,36 @@
|
||||
|
||||
namespace zmq
|
||||
{
|
||||
class msg_t;
|
||||
class session_base_t;
|
||||
|
||||
class msg_t;
|
||||
class session_base_t;
|
||||
class null_mechanism_t : public zap_client_t
|
||||
{
|
||||
public:
|
||||
null_mechanism_t (session_base_t *session_,
|
||||
const std::string &peer_address,
|
||||
const options_t &options_);
|
||||
virtual ~null_mechanism_t ();
|
||||
|
||||
class null_mechanism_t : public zap_client_t
|
||||
{
|
||||
public:
|
||||
// mechanism implementation
|
||||
virtual int next_handshake_command (msg_t *msg_);
|
||||
virtual int process_handshake_command (msg_t *msg_);
|
||||
virtual int zap_msg_available ();
|
||||
virtual status_t status () const;
|
||||
|
||||
null_mechanism_t (session_base_t *session_,
|
||||
const std::string &peer_address,
|
||||
const options_t &options_);
|
||||
virtual ~null_mechanism_t ();
|
||||
private:
|
||||
bool ready_command_sent;
|
||||
bool error_command_sent;
|
||||
bool ready_command_received;
|
||||
bool error_command_received;
|
||||
bool zap_request_sent;
|
||||
bool zap_reply_received;
|
||||
|
||||
// mechanism implementation
|
||||
virtual int next_handshake_command (msg_t *msg_);
|
||||
virtual int process_handshake_command (msg_t *msg_);
|
||||
virtual int zap_msg_available ();
|
||||
virtual status_t status () const;
|
||||
|
||||
private:
|
||||
|
||||
bool ready_command_sent;
|
||||
bool error_command_sent;
|
||||
bool ready_command_received;
|
||||
bool error_command_received;
|
||||
bool zap_request_sent;
|
||||
bool zap_reply_received;
|
||||
|
||||
int process_ready_command (
|
||||
const unsigned char *cmd_data, size_t data_size);
|
||||
int process_error_command (
|
||||
const unsigned char *cmd_data, size_t data_size);
|
||||
|
||||
void send_zap_request ();
|
||||
};
|
||||
int process_ready_command (const unsigned char *cmd_data, size_t data_size);
|
||||
int process_error_command (const unsigned char *cmd_data, size_t data_size);
|
||||
|
||||
void send_zap_request ();
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user