mirror of
https://github.com/zeromq/libzmq.git
synced 2024-12-13 10:52:56 +01:00
Problem: inconsistent naming style for private data members, conflicts with naming of local variables and member functions
Solution: apply and check _lower_case naming style for private data members
This commit is contained in:
parent
06cfd0d8ad
commit
e3c73d9881
16
.clang-tidy
16
.clang-tidy
@ -270,9 +270,9 @@ CheckOptions:
|
||||
- key: readability-identifier-naming.LocalVariableSuffix
|
||||
value: ''
|
||||
# - key: readability-identifier-naming.MemberCase
|
||||
# value: aNy_CasE
|
||||
# value: lower_case
|
||||
# - key: readability-identifier-naming.MemberPrefix
|
||||
# value: ''
|
||||
# value: '_'
|
||||
# - key: readability-identifier-naming.MemberSuffix
|
||||
# value: ''
|
||||
# - key: readability-identifier-naming.MethodCase
|
||||
@ -299,12 +299,12 @@ CheckOptions:
|
||||
# value: ''
|
||||
- key: readability-identifier-naming.ParameterSuffix
|
||||
value: '_'
|
||||
# - key: readability-identifier-naming.PrivateMemberCase
|
||||
# value: aNy_CasE
|
||||
# - key: readability-identifier-naming.PrivateMemberPrefix
|
||||
# value: ''
|
||||
# - key: readability-identifier-naming.PrivateMemberSuffix
|
||||
# value: ''
|
||||
- key: readability-identifier-naming.PrivateMemberCase
|
||||
value: lower_case
|
||||
- key: readability-identifier-naming.PrivateMemberPrefix
|
||||
value: '_'
|
||||
- key: readability-identifier-naming.PrivateMemberSuffix
|
||||
value: ''
|
||||
# - key: readability-identifier-naming.PrivateMethodCase
|
||||
# value: aNy_CasE
|
||||
# - key: readability-identifier-naming.PrivateMethodPrefix
|
||||
|
@ -51,18 +51,18 @@ namespace zmq
|
||||
template <int ID = 0> class array_item_t
|
||||
{
|
||||
public:
|
||||
inline array_item_t () : array_index (-1) {}
|
||||
inline array_item_t () : _array_index (-1) {}
|
||||
|
||||
// The destructor doesn't have to be virtual. It is made virtual
|
||||
// just to keep ICC and code checking tools from complaining.
|
||||
inline virtual ~array_item_t () {}
|
||||
|
||||
inline void set_array_index (int index_) { array_index = index_; }
|
||||
inline void set_array_index (int index_) { _array_index = index_; }
|
||||
|
||||
inline int get_array_index () { return array_index; }
|
||||
inline int get_array_index () { return _array_index; }
|
||||
|
||||
private:
|
||||
int array_index;
|
||||
int _array_index;
|
||||
|
||||
array_item_t (const array_item_t &);
|
||||
const array_item_t &operator= (const array_item_t &);
|
||||
@ -81,17 +81,17 @@ template <typename T, int ID = 0> class array_t
|
||||
|
||||
inline ~array_t () {}
|
||||
|
||||
inline size_type size () { return items.size (); }
|
||||
inline size_type size () { return _items.size (); }
|
||||
|
||||
inline bool empty () { return items.empty (); }
|
||||
inline bool empty () { return _items.empty (); }
|
||||
|
||||
inline T *&operator[] (size_type index_) { return items[index_]; }
|
||||
inline T *&operator[] (size_type index_) { return _items[index_]; }
|
||||
|
||||
inline void push_back (T *item_)
|
||||
{
|
||||
if (item_)
|
||||
((item_t *) item_)->set_array_index ((int) items.size ());
|
||||
items.push_back (item_);
|
||||
((item_t *) item_)->set_array_index ((int) _items.size ());
|
||||
_items.push_back (item_);
|
||||
}
|
||||
|
||||
inline void erase (T *item_)
|
||||
@ -101,22 +101,22 @@ template <typename T, int ID = 0> class array_t
|
||||
|
||||
inline void erase (size_type index_)
|
||||
{
|
||||
if (items.back ())
|
||||
((item_t *) items.back ())->set_array_index ((int) index_);
|
||||
items[index_] = items.back ();
|
||||
items.pop_back ();
|
||||
if (_items.back ())
|
||||
((item_t *) _items.back ())->set_array_index ((int) index_);
|
||||
_items[index_] = _items.back ();
|
||||
_items.pop_back ();
|
||||
}
|
||||
|
||||
inline void swap (size_type index1_, size_type index2_)
|
||||
{
|
||||
if (items[index1_])
|
||||
((item_t *) items[index1_])->set_array_index ((int) index2_);
|
||||
if (items[index2_])
|
||||
((item_t *) items[index2_])->set_array_index ((int) index1_);
|
||||
std::swap (items[index1_], items[index2_]);
|
||||
if (_items[index1_])
|
||||
((item_t *) _items[index1_])->set_array_index ((int) index2_);
|
||||
if (_items[index2_])
|
||||
((item_t *) _items[index2_])->set_array_index ((int) index1_);
|
||||
std::swap (_items[index1_], _items[index2_]);
|
||||
}
|
||||
|
||||
inline void clear () { items.clear (); }
|
||||
inline void clear () { _items.clear (); }
|
||||
|
||||
inline size_type index (T *item_)
|
||||
{
|
||||
@ -125,7 +125,7 @@ template <typename T, int ID = 0> class array_t
|
||||
|
||||
private:
|
||||
typedef std::vector<T *> items_t;
|
||||
items_t items;
|
||||
items_t _items;
|
||||
|
||||
array_t (const array_t &);
|
||||
const array_t &operator= (const array_t &);
|
||||
|
@ -90,33 +90,33 @@ class atomic_counter_t
|
||||
public:
|
||||
typedef uint32_t integer_t;
|
||||
|
||||
inline atomic_counter_t (integer_t value_ = 0) : value (value_) {}
|
||||
inline atomic_counter_t (integer_t value_ = 0) : _value (value_) {}
|
||||
|
||||
inline ~atomic_counter_t () {}
|
||||
|
||||
// Set counter value (not thread-safe).
|
||||
inline void set (integer_t value_) { value = value_; }
|
||||
// Set counter _value (not thread-safe).
|
||||
inline void set (integer_t value_) { _value = value_; }
|
||||
|
||||
// Atomic addition. Returns the old value.
|
||||
// Atomic addition. Returns the old _value.
|
||||
inline integer_t add (integer_t increment_)
|
||||
{
|
||||
integer_t old_value;
|
||||
|
||||
#if defined ZMQ_ATOMIC_COUNTER_WINDOWS
|
||||
old_value = InterlockedExchangeAdd ((LONG *) &value, increment_);
|
||||
old_value = InterlockedExchangeAdd ((LONG *) &_value, increment_);
|
||||
#elif defined ZMQ_ATOMIC_COUNTER_INTRINSIC
|
||||
old_value = __atomic_fetch_add (&value, increment_, __ATOMIC_ACQ_REL);
|
||||
old_value = __atomic_fetch_add (&_value, increment_, __ATOMIC_ACQ_REL);
|
||||
#elif defined ZMQ_ATOMIC_COUNTER_CXX11
|
||||
old_value = value.fetch_add (increment_, std::memory_order_acq_rel);
|
||||
old_value = _value.fetch_add (increment_, std::memory_order_acq_rel);
|
||||
#elif defined ZMQ_ATOMIC_COUNTER_ATOMIC_H
|
||||
integer_t new_value = atomic_add_32_nv (&value, increment_);
|
||||
integer_t new_value = atomic_add_32_nv (&_value, increment_);
|
||||
old_value = new_value - increment_;
|
||||
#elif defined ZMQ_ATOMIC_COUNTER_TILE
|
||||
old_value = arch_atomic_add (&value, increment_);
|
||||
old_value = arch_atomic_add (&_value, increment_);
|
||||
#elif defined ZMQ_ATOMIC_COUNTER_X86
|
||||
__asm__ volatile("lock; xadd %0, %1 \n\t"
|
||||
: "=r"(old_value), "=m"(value)
|
||||
: "0"(increment_), "m"(value)
|
||||
: "=r"(old_value), "=m"(_value)
|
||||
: "0"(increment_), "m"(_value)
|
||||
: "cc", "memory");
|
||||
#elif defined ZMQ_ATOMIC_COUNTER_ARM
|
||||
integer_t flag, tmp;
|
||||
@ -128,13 +128,13 @@ class atomic_counter_t
|
||||
" bne 1b\n\t"
|
||||
" dmb sy\n\t"
|
||||
: "=&r"(old_value), "=&r"(flag), "=&r"(tmp),
|
||||
"+Qo"(value)
|
||||
: "Ir"(increment_), "r"(&value)
|
||||
"+Qo"(_value)
|
||||
: "Ir"(increment_), "r"(&_value)
|
||||
: "cc");
|
||||
#elif defined ZMQ_ATOMIC_COUNTER_MUTEX
|
||||
sync.lock ();
|
||||
old_value = value;
|
||||
value += increment_;
|
||||
old_value = _value;
|
||||
_value += increment_;
|
||||
sync.unlock ();
|
||||
#else
|
||||
#error atomic_counter is not implemented for this platform
|
||||
@ -147,26 +147,27 @@ class atomic_counter_t
|
||||
{
|
||||
#if defined ZMQ_ATOMIC_COUNTER_WINDOWS
|
||||
LONG delta = -((LONG) decrement_);
|
||||
integer_t old = InterlockedExchangeAdd ((LONG *) &value, delta);
|
||||
integer_t old = InterlockedExchangeAdd ((LONG *) &_value, delta);
|
||||
return old - decrement_ != 0;
|
||||
#elif defined ZMQ_ATOMIC_COUNTER_INTRINSIC
|
||||
integer_t nv =
|
||||
__atomic_sub_fetch (&value, decrement_, __ATOMIC_ACQ_REL);
|
||||
__atomic_sub_fetch (&_value, decrement_, __ATOMIC_ACQ_REL);
|
||||
return nv != 0;
|
||||
#elif defined ZMQ_ATOMIC_COUNTER_CXX11
|
||||
integer_t old = value.fetch_sub (decrement_, std::memory_order_acq_rel);
|
||||
integer_t old =
|
||||
_value.fetch_sub (decrement_, std::memory_order_acq_rel);
|
||||
return old - decrement_ != 0;
|
||||
#elif defined ZMQ_ATOMIC_COUNTER_ATOMIC_H
|
||||
int32_t delta = -((int32_t) decrement_);
|
||||
integer_t nv = atomic_add_32_nv (&value, delta);
|
||||
integer_t nv = atomic_add_32_nv (&_value, delta);
|
||||
return nv != 0;
|
||||
#elif defined ZMQ_ATOMIC_COUNTER_TILE
|
||||
int32_t delta = -((int32_t) decrement_);
|
||||
integer_t nv = arch_atomic_add (&value, delta);
|
||||
integer_t nv = arch_atomic_add (&_value, delta);
|
||||
return nv != 0;
|
||||
#elif defined ZMQ_ATOMIC_COUNTER_X86
|
||||
integer_t oldval = -decrement_;
|
||||
volatile integer_t *val = &value;
|
||||
volatile integer_t *val = &_value;
|
||||
__asm__ volatile("lock; xaddl %0,%1"
|
||||
: "=r"(oldval), "=m"(*val)
|
||||
: "0"(oldval), "m"(*val)
|
||||
@ -182,14 +183,14 @@ class atomic_counter_t
|
||||
" bne 1b\n\t"
|
||||
" dmb sy\n\t"
|
||||
: "=&r"(old_value), "=&r"(flag), "=&r"(tmp),
|
||||
"+Qo"(value)
|
||||
: "Ir"(decrement_), "r"(&value)
|
||||
"+Qo"(_value)
|
||||
: "Ir"(decrement_), "r"(&_value)
|
||||
: "cc");
|
||||
return old_value - decrement != 0;
|
||||
#elif defined ZMQ_ATOMIC_COUNTER_MUTEX
|
||||
sync.lock ();
|
||||
value -= decrement_;
|
||||
bool result = value ? true : false;
|
||||
_value -= decrement_;
|
||||
bool result = _value ? true : false;
|
||||
sync.unlock ();
|
||||
return result;
|
||||
#else
|
||||
@ -197,13 +198,13 @@ class atomic_counter_t
|
||||
#endif
|
||||
}
|
||||
|
||||
inline integer_t get () const { return value; }
|
||||
inline integer_t get () const { return _value; }
|
||||
|
||||
private:
|
||||
#if defined ZMQ_ATOMIC_COUNTER_CXX11
|
||||
std::atomic<integer_t> value;
|
||||
std::atomic<integer_t> _value;
|
||||
#else
|
||||
volatile integer_t value;
|
||||
volatile integer_t _value;
|
||||
#endif
|
||||
|
||||
#if defined ZMQ_ATOMIC_COUNTER_MUTEX
|
||||
|
@ -67,27 +67,27 @@
|
||||
namespace zmq
|
||||
{
|
||||
#if !defined ZMQ_ATOMIC_PTR_CXX11
|
||||
inline void *atomic_xchg_ptr (void **ptr,
|
||||
inline void *atomic_xchg_ptr (void **ptr_,
|
||||
void *const val_
|
||||
#if defined ZMQ_ATOMIC_PTR_MUTEX
|
||||
,
|
||||
mutex_t &sync
|
||||
mutex_t &_sync
|
||||
#endif
|
||||
)
|
||||
{
|
||||
#if defined ZMQ_ATOMIC_PTR_WINDOWS
|
||||
return InterlockedExchangePointer ((PVOID *) ptr, val_);
|
||||
return InterlockedExchangePointer ((PVOID *) ptr_, val_);
|
||||
#elif defined ZMQ_ATOMIC_PTR_INTRINSIC
|
||||
return __atomic_exchange_n (ptr, val_, __ATOMIC_ACQ_REL);
|
||||
return __atomic_exchange_n (ptr_, val_, __ATOMIC_ACQ_REL);
|
||||
#elif defined ZMQ_ATOMIC_PTR_ATOMIC_H
|
||||
return atomic_swap_ptr (ptr, val_);
|
||||
return atomic_swap_ptr (ptr_, val_);
|
||||
#elif defined ZMQ_ATOMIC_PTR_TILE
|
||||
return arch_atomic_exchange (ptr, val_);
|
||||
return arch_atomic_exchange (ptr_, val_);
|
||||
#elif defined ZMQ_ATOMIC_PTR_X86
|
||||
void *old;
|
||||
__asm__ volatile("lock; xchg %0, %2"
|
||||
: "=r"(old), "=m"(*ptr)
|
||||
: "m"(*ptr), "0"(val_));
|
||||
: "=r"(old), "=m"(*ptr_)
|
||||
: "m"(*ptr_), "0"(val_));
|
||||
return old;
|
||||
#elif defined ZMQ_ATOMIC_PTR_ARM
|
||||
void *old;
|
||||
@ -98,15 +98,15 @@ inline void *atomic_xchg_ptr (void **ptr,
|
||||
" teq %0, #0\n\t"
|
||||
" bne 1b\n\t"
|
||||
" dmb sy\n\t"
|
||||
: "=&r"(flag), "=&r"(old), "+Qo"(*ptr)
|
||||
: "r"(ptr), "r"(val_)
|
||||
: "=&r"(flag), "=&r"(old), "+Qo"(*ptr_)
|
||||
: "r"(ptr_), "r"(val_)
|
||||
: "cc");
|
||||
return old;
|
||||
#elif defined ZMQ_ATOMIC_PTR_MUTEX
|
||||
sync.lock ();
|
||||
void *old = *ptr;
|
||||
*ptr = val_;
|
||||
sync.unlock ();
|
||||
_sync.lock ();
|
||||
void *old = *ptr_;
|
||||
*ptr_ = val_;
|
||||
_sync.unlock ();
|
||||
return old;
|
||||
#else
|
||||
#error atomic_ptr is not implemented for this platform
|
||||
@ -118,7 +118,7 @@ inline void *atomic_cas (void *volatile *ptr_,
|
||||
void *val_
|
||||
#if defined ZMQ_ATOMIC_PTR_MUTEX
|
||||
,
|
||||
mutex_t &sync
|
||||
mutex_t &_sync
|
||||
#endif
|
||||
)
|
||||
{
|
||||
@ -158,11 +158,11 @@ inline void *atomic_cas (void *volatile *ptr_,
|
||||
: "cc");
|
||||
return old;
|
||||
#elif defined ZMQ_ATOMIC_PTR_MUTEX
|
||||
sync.lock ();
|
||||
_sync.lock ();
|
||||
void *old = *ptr_;
|
||||
if (*ptr_ == cmp_)
|
||||
*ptr_ = val_;
|
||||
sync.unlock ();
|
||||
_sync.unlock ();
|
||||
return old;
|
||||
#else
|
||||
#error atomic_ptr is not implemented for this platform
|
||||
@ -176,7 +176,7 @@ template <typename T> class atomic_ptr_t
|
||||
{
|
||||
public:
|
||||
// Initialise atomic pointer
|
||||
inline atomic_ptr_t () { ptr = NULL; }
|
||||
inline atomic_ptr_t () { _ptr = NULL; }
|
||||
|
||||
// Destroy atomic pointer
|
||||
inline ~atomic_ptr_t () {}
|
||||
@ -184,19 +184,19 @@ template <typename T> class atomic_ptr_t
|
||||
// Set value of atomic pointer in a non-threadsafe way
|
||||
// Use this function only when you are sure that at most one
|
||||
// thread is accessing the pointer at the moment.
|
||||
inline void set (T *ptr_) { this->ptr = ptr_; }
|
||||
inline void set (T *ptr_) { _ptr = ptr_; }
|
||||
|
||||
// Perform atomic 'exchange pointers' operation. Pointer is set
|
||||
// to the 'val' value. Old value is returned.
|
||||
// to the 'val_' value. Old value is returned.
|
||||
inline T *xchg (T *val_)
|
||||
{
|
||||
#if defined ZMQ_ATOMIC_PTR_CXX11
|
||||
return ptr.exchange (val_, std::memory_order_acq_rel);
|
||||
return _ptr.exchange (val_, std::memory_order_acq_rel);
|
||||
#else
|
||||
return (T *) atomic_xchg_ptr ((void **) &ptr, val_
|
||||
return (T *) atomic_xchg_ptr ((void **) &_ptr, val_
|
||||
#if defined ZMQ_ATOMIC_PTR_MUTEX
|
||||
,
|
||||
sync
|
||||
_sync
|
||||
#endif
|
||||
);
|
||||
#endif
|
||||
@ -204,18 +204,18 @@ template <typename T> class atomic_ptr_t
|
||||
|
||||
// Perform atomic 'compare and swap' operation on the pointer.
|
||||
// The pointer is compared to 'cmp' argument and if they are
|
||||
// equal, its value is set to 'val'. Old value of the pointer
|
||||
// equal, its value is set to 'val_'. Old value of the pointer
|
||||
// is returned.
|
||||
inline T *cas (T *cmp_, T *val_)
|
||||
{
|
||||
#if defined ZMQ_ATOMIC_PTR_CXX11
|
||||
ptr.compare_exchange_strong (cmp_, val_, std::memory_order_acq_rel);
|
||||
_ptr.compare_exchange_strong (cmp_, val_, std::memory_order_acq_rel);
|
||||
return cmp_;
|
||||
#else
|
||||
return (T *) atomic_cas ((void **) &ptr, cmp_, val_
|
||||
return (T *) atomic_cas ((void **) &_ptr, cmp_, val_
|
||||
#if defined ZMQ_ATOMIC_PTR_MUTEX
|
||||
,
|
||||
sync
|
||||
_sync
|
||||
#endif
|
||||
);
|
||||
#endif
|
||||
@ -223,13 +223,13 @@ template <typename T> class atomic_ptr_t
|
||||
|
||||
private:
|
||||
#if defined ZMQ_ATOMIC_PTR_CXX11
|
||||
std::atomic<T *> ptr;
|
||||
std::atomic<T *> _ptr;
|
||||
#else
|
||||
volatile T *ptr;
|
||||
volatile T *_ptr;
|
||||
#endif
|
||||
|
||||
#if defined ZMQ_ATOMIC_PTR_MUTEX
|
||||
mutex_t sync;
|
||||
mutex_t _sync;
|
||||
#endif
|
||||
|
||||
#if !defined ZMQ_ATOMIC_PTR_CXX11
|
||||
@ -240,19 +240,19 @@ template <typename T> class atomic_ptr_t
|
||||
|
||||
struct atomic_value_t
|
||||
{
|
||||
atomic_value_t (const int value_) : value (value_) {}
|
||||
atomic_value_t (const int value_) : _value (value_) {}
|
||||
|
||||
atomic_value_t (const atomic_value_t &src_) : value (src_.load ()) {}
|
||||
atomic_value_t (const atomic_value_t &src_) : _value (src_.load ()) {}
|
||||
|
||||
void store (const int value_)
|
||||
{
|
||||
#if defined ZMQ_ATOMIC_PTR_CXX11
|
||||
value.store (value_, std::memory_order_release);
|
||||
_value.store (value_, std::memory_order_release);
|
||||
#else
|
||||
atomic_xchg_ptr ((void **) &value, (void *) (ptrdiff_t) value_
|
||||
atomic_xchg_ptr ((void **) &_value, (void *) (ptrdiff_t) value_
|
||||
#if defined ZMQ_ATOMIC_PTR_MUTEX
|
||||
,
|
||||
sync
|
||||
_sync
|
||||
#endif
|
||||
);
|
||||
#endif
|
||||
@ -261,15 +261,15 @@ struct atomic_value_t
|
||||
int load () const
|
||||
{
|
||||
#if defined ZMQ_ATOMIC_PTR_CXX11
|
||||
return value.load (std::memory_order_acquire);
|
||||
return _value.load (std::memory_order_acquire);
|
||||
#else
|
||||
return (int) (ptrdiff_t) atomic_cas ((void **) &value, 0, 0
|
||||
return (int) (ptrdiff_t) atomic_cas ((void **) &_value, 0, 0
|
||||
#if defined ZMQ_ATOMIC_PTR_MUTEX
|
||||
,
|
||||
#if defined __SUNPRO_CC
|
||||
const_cast<mutex_t &> (sync)
|
||||
const_cast<mutex_t &> (_sync)
|
||||
#else
|
||||
sync
|
||||
_sync
|
||||
#endif
|
||||
#endif
|
||||
);
|
||||
@ -278,13 +278,13 @@ struct atomic_value_t
|
||||
|
||||
private:
|
||||
#if defined ZMQ_ATOMIC_PTR_CXX11
|
||||
std::atomic<int> value;
|
||||
std::atomic<int> _value;
|
||||
#else
|
||||
volatile ptrdiff_t value;
|
||||
volatile ptrdiff_t _value;
|
||||
#endif
|
||||
|
||||
#if defined ZMQ_ATOMIC_PTR_MUTEX
|
||||
mutable mutex_t sync;
|
||||
mutable mutex_t _sync;
|
||||
#endif
|
||||
|
||||
private:
|
||||
|
92
src/blob.hpp
92
src/blob.hpp
@ -71,26 +71,26 @@ struct reference_tag_t
|
||||
struct blob_t
|
||||
{
|
||||
// Creates an empty blob_t.
|
||||
blob_t () : data_ (0), size_ (0), owned_ (true) {}
|
||||
blob_t () : _data (0), _size (0), _owned (true) {}
|
||||
|
||||
// Creates a blob_t of a given size, with uninitialized content.
|
||||
explicit blob_t (const size_t size_) :
|
||||
data_ (static_cast<unsigned char *> (malloc (size_))),
|
||||
size_ (size_),
|
||||
owned_ (true)
|
||||
_data (static_cast<unsigned char *> (malloc (size_))),
|
||||
_size (size_),
|
||||
_owned (true)
|
||||
{
|
||||
alloc_assert (data_);
|
||||
alloc_assert (_data);
|
||||
}
|
||||
|
||||
// Creates a blob_t of a given size, an initializes content by copying
|
||||
// from another buffer.
|
||||
blob_t (const unsigned char *const data_, const size_t size_) :
|
||||
data_ (static_cast<unsigned char *> (malloc (size_))),
|
||||
size_ (size_),
|
||||
owned_ (true)
|
||||
_data (static_cast<unsigned char *> (malloc (size_))),
|
||||
_size (size_),
|
||||
_owned (true)
|
||||
{
|
||||
alloc_assert (this->data_);
|
||||
memcpy (this->data_, data_, size_);
|
||||
alloc_assert (_data);
|
||||
memcpy (_data, data_, size_);
|
||||
}
|
||||
|
||||
// Creates a blob_t for temporary use that only references a
|
||||
@ -98,65 +98,65 @@ struct blob_t
|
||||
// Use with caution and ensure that the blob_t will not outlive
|
||||
// the referenced data.
|
||||
blob_t (unsigned char *const data_, const size_t size_, reference_tag_t) :
|
||||
data_ (data_),
|
||||
size_ (size_),
|
||||
owned_ (false)
|
||||
_data (data_),
|
||||
_size (size_),
|
||||
_owned (false)
|
||||
{
|
||||
}
|
||||
|
||||
// Returns the size of the blob_t.
|
||||
size_t size () const { return size_; }
|
||||
size_t size () const { return _size; }
|
||||
|
||||
// Returns a pointer to the data of the blob_t.
|
||||
const unsigned char *data () const { return data_; }
|
||||
const unsigned char *data () const { return _data; }
|
||||
|
||||
// Returns a pointer to the data of the blob_t.
|
||||
unsigned char *data () { return data_; }
|
||||
unsigned char *data () { return _data; }
|
||||
|
||||
// Defines an order relationship on blob_t.
|
||||
bool operator< (blob_t const &other_) const
|
||||
{
|
||||
int cmpres =
|
||||
memcmp (data_, other_.data_, std::min (size_, other_.size_));
|
||||
return cmpres < 0 || (cmpres == 0 && size_ < other_.size_);
|
||||
memcmp (_data, other_._data, std::min (_size, other_._size));
|
||||
return cmpres < 0 || (cmpres == 0 && _size < other_._size);
|
||||
}
|
||||
|
||||
// Sets a blob_t to a deep copy of another blob_t.
|
||||
void set_deep_copy (blob_t const &other_)
|
||||
{
|
||||
clear ();
|
||||
data_ = static_cast<unsigned char *> (malloc (other_.size_));
|
||||
alloc_assert (data_);
|
||||
size_ = other_.size_;
|
||||
owned_ = true;
|
||||
memcpy (data_, other_.data_, size_);
|
||||
_data = static_cast<unsigned char *> (malloc (other_._size));
|
||||
alloc_assert (_data);
|
||||
_size = other_._size;
|
||||
_owned = true;
|
||||
memcpy (_data, other_._data, _size);
|
||||
}
|
||||
|
||||
// Sets a blob_t to a copy of a given buffer.
|
||||
void set (const unsigned char *const data_, const size_t size_)
|
||||
{
|
||||
clear ();
|
||||
this->data_ = static_cast<unsigned char *> (malloc (size_));
|
||||
alloc_assert (this->data_);
|
||||
this->size_ = size_;
|
||||
owned_ = true;
|
||||
memcpy (this->data_, data_, size_);
|
||||
_data = static_cast<unsigned char *> (malloc (size_));
|
||||
alloc_assert (_data);
|
||||
_size = size_;
|
||||
_owned = true;
|
||||
memcpy (_data, data_, size_);
|
||||
}
|
||||
|
||||
// Empties a blob_t.
|
||||
void clear ()
|
||||
{
|
||||
if (owned_) {
|
||||
free (data_);
|
||||
if (_owned) {
|
||||
free (_data);
|
||||
}
|
||||
data_ = 0;
|
||||
size_ = 0;
|
||||
_data = 0;
|
||||
_size = 0;
|
||||
}
|
||||
|
||||
~blob_t ()
|
||||
{
|
||||
if (owned_) {
|
||||
free (data_);
|
||||
if (_owned) {
|
||||
free (_data);
|
||||
}
|
||||
}
|
||||
|
||||
@ -165,25 +165,25 @@ struct blob_t
|
||||
blob_t &operator= (const blob_t &) = delete;
|
||||
|
||||
blob_t (blob_t &&other_) :
|
||||
data_ (other_.data_),
|
||||
size_ (other_.size_),
|
||||
owned_ (other_.owned_)
|
||||
_data (other_._data),
|
||||
_size (other_._size),
|
||||
_owned (other_._owned)
|
||||
{
|
||||
other_.owned_ = false;
|
||||
other_._owned = false;
|
||||
}
|
||||
blob_t &operator= (blob_t &&other_)
|
||||
{
|
||||
if (this != &other_) {
|
||||
clear ();
|
||||
data_ = other_.data_;
|
||||
size_ = other_.size_;
|
||||
owned_ = other_.owned_;
|
||||
other_.owned_ = false;
|
||||
_data = other_._data;
|
||||
_size = other_._size;
|
||||
_owned = other_._owned;
|
||||
other_._owned = false;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
#else
|
||||
blob_t (const blob_t &other) : owned_ (false) { set_deep_copy (other); }
|
||||
blob_t (const blob_t &other) : _owned (false) { set_deep_copy (other); }
|
||||
blob_t &operator= (const blob_t &other)
|
||||
{
|
||||
if (this != &other) {
|
||||
@ -195,9 +195,9 @@ struct blob_t
|
||||
#endif
|
||||
|
||||
private:
|
||||
unsigned char *data_;
|
||||
size_t size_;
|
||||
bool owned_;
|
||||
unsigned char *_data;
|
||||
size_t _size;
|
||||
bool _owned;
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -49,8 +49,8 @@ void zmq::client_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_)
|
||||
|
||||
zmq_assert (pipe_);
|
||||
|
||||
fq.attach (pipe_);
|
||||
lb.attach (pipe_);
|
||||
_fq.attach (pipe_);
|
||||
_lb.attach (pipe_);
|
||||
}
|
||||
|
||||
int zmq::client_t::xsend (msg_t *msg_)
|
||||
@ -60,24 +60,24 @@ int zmq::client_t::xsend (msg_t *msg_)
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
}
|
||||
return lb.sendpipe (msg_, NULL);
|
||||
return _lb.sendpipe (msg_, NULL);
|
||||
}
|
||||
|
||||
int zmq::client_t::xrecv (msg_t *msg_)
|
||||
{
|
||||
int rc = fq.recvpipe (msg_, NULL);
|
||||
int rc = _fq.recvpipe (msg_, NULL);
|
||||
|
||||
// Drop any messages with more flag
|
||||
while (rc == 0 && msg_->flags () & msg_t::more) {
|
||||
// drop all frames of the current multi-frame message
|
||||
rc = fq.recvpipe (msg_, NULL);
|
||||
rc = _fq.recvpipe (msg_, NULL);
|
||||
|
||||
while (rc == 0 && msg_->flags () & msg_t::more)
|
||||
rc = fq.recvpipe (msg_, NULL);
|
||||
rc = _fq.recvpipe (msg_, NULL);
|
||||
|
||||
// get the new message
|
||||
if (rc == 0)
|
||||
rc = fq.recvpipe (msg_, NULL);
|
||||
rc = _fq.recvpipe (msg_, NULL);
|
||||
}
|
||||
|
||||
return rc;
|
||||
@ -85,31 +85,31 @@ int zmq::client_t::xrecv (msg_t *msg_)
|
||||
|
||||
bool zmq::client_t::xhas_in ()
|
||||
{
|
||||
return fq.has_in ();
|
||||
return _fq.has_in ();
|
||||
}
|
||||
|
||||
bool zmq::client_t::xhas_out ()
|
||||
{
|
||||
return lb.has_out ();
|
||||
return _lb.has_out ();
|
||||
}
|
||||
|
||||
const zmq::blob_t &zmq::client_t::get_credential () const
|
||||
{
|
||||
return fq.get_credential ();
|
||||
return _fq.get_credential ();
|
||||
}
|
||||
|
||||
void zmq::client_t::xread_activated (pipe_t *pipe_)
|
||||
{
|
||||
fq.activated (pipe_);
|
||||
_fq.activated (pipe_);
|
||||
}
|
||||
|
||||
void zmq::client_t::xwrite_activated (pipe_t *pipe_)
|
||||
{
|
||||
lb.activated (pipe_);
|
||||
_lb.activated (pipe_);
|
||||
}
|
||||
|
||||
void zmq::client_t::xpipe_terminated (pipe_t *pipe_)
|
||||
{
|
||||
fq.pipe_terminated (pipe_);
|
||||
lb.pipe_terminated (pipe_);
|
||||
_fq.pipe_terminated (pipe_);
|
||||
_lb.pipe_terminated (pipe_);
|
||||
}
|
||||
|
@ -62,8 +62,8 @@ class client_t : public socket_base_t
|
||||
private:
|
||||
// Messages are fair-queued from inbound pipes. And load-balanced to
|
||||
// the outbound pipes.
|
||||
fq_t fq;
|
||||
lb_t lb;
|
||||
fq_t _fq;
|
||||
lb_t _lb;
|
||||
|
||||
client_t (const client_t &);
|
||||
const client_t &operator= (const client_t &);
|
||||
|
@ -127,11 +127,11 @@ const uint64_t usecs_per_sec = 1000000;
|
||||
const uint64_t nsecs_per_usec = 1000;
|
||||
|
||||
zmq::clock_t::clock_t () :
|
||||
last_tsc (rdtsc ()),
|
||||
_last_tsc (rdtsc ()),
|
||||
#ifdef ZMQ_HAVE_WINDOWS
|
||||
last_time (static_cast<uint64_t> ((*my_get_tick_count64) ()))
|
||||
_last_time (static_cast<uint64_t> ((*my_get_tick_count64) ()))
|
||||
#else
|
||||
last_time (now_us () / usecs_per_msec)
|
||||
_last_time (now_us () / usecs_per_msec)
|
||||
#endif
|
||||
{
|
||||
}
|
||||
@ -218,16 +218,16 @@ uint64_t zmq::clock_t::now_ms ()
|
||||
// If TSC haven't jumped back (in case of migration to a different
|
||||
// CPU core) and if not too much time elapsed since last measurement,
|
||||
// we can return cached time value.
|
||||
if (likely (tsc - last_tsc <= (clock_precision / 2) && tsc >= last_tsc))
|
||||
return last_time;
|
||||
if (likely (tsc - _last_tsc <= (clock_precision / 2) && tsc >= _last_tsc))
|
||||
return _last_time;
|
||||
|
||||
last_tsc = tsc;
|
||||
_last_tsc = tsc;
|
||||
#ifdef ZMQ_HAVE_WINDOWS
|
||||
last_time = static_cast<uint64_t> ((*my_get_tick_count64) ());
|
||||
_last_time = static_cast<uint64_t> ((*my_get_tick_count64) ());
|
||||
#else
|
||||
last_time = now_us () / usecs_per_msec;
|
||||
_last_time = now_us () / usecs_per_msec;
|
||||
#endif
|
||||
return last_time;
|
||||
return _last_time;
|
||||
}
|
||||
|
||||
uint64_t zmq::clock_t::rdtsc ()
|
||||
|
@ -67,10 +67,10 @@ class clock_t
|
||||
|
||||
private:
|
||||
// TSC timestamp of when last time measurement was made.
|
||||
uint64_t last_tsc;
|
||||
uint64_t _last_tsc;
|
||||
|
||||
// Physical time corresponding to the TSC above (in milliseconds).
|
||||
uint64_t last_time;
|
||||
uint64_t _last_time;
|
||||
|
||||
clock_t (const clock_t &);
|
||||
const clock_t &operator= (const clock_t &);
|
||||
|
@ -93,13 +93,13 @@ namespace zmq
|
||||
class condition_variable_t
|
||||
{
|
||||
public:
|
||||
inline condition_variable_t () { InitializeConditionVariable (&cv); }
|
||||
inline condition_variable_t () { InitializeConditionVariable (&_cv); }
|
||||
|
||||
inline ~condition_variable_t () {}
|
||||
|
||||
inline int wait (mutex_t *mutex_, int timeout_)
|
||||
{
|
||||
int rc = SleepConditionVariableCS (&cv, mutex_->get_cs (), timeout_);
|
||||
int rc = SleepConditionVariableCS (&_cv, mutex_->get_cs (), timeout_);
|
||||
|
||||
if (rc != 0)
|
||||
return 0;
|
||||
@ -113,10 +113,10 @@ class condition_variable_t
|
||||
return -1;
|
||||
}
|
||||
|
||||
inline void broadcast () { WakeAllConditionVariable (&cv); }
|
||||
inline void broadcast () { WakeAllConditionVariable (&_cv); }
|
||||
|
||||
private:
|
||||
CONDITION_VARIABLE cv;
|
||||
CONDITION_VARIABLE _cv;
|
||||
|
||||
// Disable copy construction and assignment.
|
||||
condition_variable_t (const condition_variable_t &);
|
||||
@ -132,13 +132,13 @@ class condition_variable_t
|
||||
|
||||
inline int wait (mutex_t *mutex_, int timeout_)
|
||||
{
|
||||
std::unique_lock<std::mutex> lck (mtx); // lock mtx
|
||||
mutex_->unlock (); // unlock mutex_
|
||||
std::unique_lock<std::mutex> lck (_mtx); // lock mtx
|
||||
mutex_->unlock (); // unlock mutex_
|
||||
int res = 0;
|
||||
if (timeout_ == -1) {
|
||||
cv.wait (
|
||||
_cv.wait (
|
||||
lck); // unlock mtx and wait cv.notify_all(), lock mtx after cv.notify_all()
|
||||
} else if (cv.wait_for (lck, std::chrono::milliseconds (timeout_))
|
||||
} else if (_cv.wait_for (lck, std::chrono::milliseconds (timeout_))
|
||||
== std::cv_status::timeout) {
|
||||
// time expired
|
||||
errno = EAGAIN;
|
||||
@ -151,13 +151,13 @@ class condition_variable_t
|
||||
|
||||
inline void broadcast ()
|
||||
{
|
||||
std::unique_lock<std::mutex> lck (mtx); // lock mtx
|
||||
cv.notify_all ();
|
||||
std::unique_lock<std::mutex> lck (_mtx); // lock mtx
|
||||
_cv.notify_all ();
|
||||
}
|
||||
|
||||
private:
|
||||
std::condition_variable cv;
|
||||
std::mutex mtx;
|
||||
std::condition_variable _cv;
|
||||
std::mutex _mtx;
|
||||
|
||||
// Disable copy construction and assignment.
|
||||
condition_variable_t (const condition_variable_t &);
|
||||
@ -182,9 +182,9 @@ class condition_variable_t
|
||||
|
||||
inline ~condition_variable_t ()
|
||||
{
|
||||
scoped_lock_t l (m_listenersMutex);
|
||||
for (size_t i = 0; i < m_listeners.size (); i++) {
|
||||
semDelete (m_listeners[i]);
|
||||
scoped_lock_t l (_listenersMutex);
|
||||
for (size_t i = 0; i < _listeners.size (); i++) {
|
||||
semDelete (_listeners[i]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -198,8 +198,8 @@ class condition_variable_t
|
||||
|
||||
SEM_ID sem = semBCreate (SEM_Q_PRIORITY, SEM_EMPTY);
|
||||
{
|
||||
scoped_lock_t l (m_listenersMutex);
|
||||
m_listeners.push_back (sem);
|
||||
scoped_lock_t l (_listenersMutex);
|
||||
_listeners.push_back (sem);
|
||||
}
|
||||
mutex_->unlock ();
|
||||
|
||||
@ -213,11 +213,11 @@ class condition_variable_t
|
||||
}
|
||||
|
||||
{
|
||||
scoped_lock_t l (m_listenersMutex);
|
||||
scoped_lock_t l (_listenersMutex);
|
||||
// remove sem from listeners
|
||||
for (size_t i = 0; i < m_listeners.size (); i++) {
|
||||
if (m_listeners[i] == sem) {
|
||||
m_listeners.erase (m_listeners.begin () + i);
|
||||
for (size_t i = 0; i < _listeners.size (); i++) {
|
||||
if (_listeners[i] == sem) {
|
||||
_listeners.erase (_listeners.begin () + i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -238,15 +238,15 @@ class condition_variable_t
|
||||
|
||||
inline void broadcast ()
|
||||
{
|
||||
scoped_lock_t l (m_listenersMutex);
|
||||
for (size_t i = 0; i < m_listeners.size (); i++) {
|
||||
semGive (m_listeners[i]);
|
||||
scoped_lock_t l (_listenersMutex);
|
||||
for (size_t i = 0; i < _listeners.size (); i++) {
|
||||
semGive (_listeners[i]);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
mutex_t m_listenersMutex;
|
||||
std::vector<SEM_ID> m_listeners;
|
||||
mutex_t _listenersMutex;
|
||||
std::vector<SEM_ID> _listeners;
|
||||
|
||||
// Disable copy construction and assignment.
|
||||
condition_variable_t (const condition_variable_t &);
|
||||
@ -276,13 +276,13 @@ class condition_variable_t
|
||||
#if !defined(ZMQ_HAVE_OSX) && !defined(ANDROID_LEGACY)
|
||||
pthread_condattr_setclock (&attr, CLOCK_MONOTONIC);
|
||||
#endif
|
||||
int rc = pthread_cond_init (&cond, &attr);
|
||||
int rc = pthread_cond_init (&_cond, &attr);
|
||||
posix_assert (rc);
|
||||
}
|
||||
|
||||
inline ~condition_variable_t ()
|
||||
{
|
||||
int rc = pthread_cond_destroy (&cond);
|
||||
int rc = pthread_cond_destroy (&_cond);
|
||||
posix_assert (rc);
|
||||
}
|
||||
|
||||
@ -309,15 +309,16 @@ class condition_variable_t
|
||||
}
|
||||
#ifdef ZMQ_HAVE_OSX
|
||||
rc = pthread_cond_timedwait_relative_np (
|
||||
&cond, mutex_->get_mutex (), &timeout);
|
||||
&_cond, mutex_->get_mutex (), &timeout);
|
||||
#elif defined(ANDROID_LEGACY)
|
||||
rc = pthread_cond_timedwait_monotonic_np (
|
||||
&cond, mutex_->get_mutex (), &timeout);
|
||||
&_cond, mutex_->get_mutex (), &timeout);
|
||||
#else
|
||||
rc = pthread_cond_timedwait (&cond, mutex_->get_mutex (), &timeout);
|
||||
rc =
|
||||
pthread_cond_timedwait (&_cond, mutex_->get_mutex (), &timeout);
|
||||
#endif
|
||||
} else
|
||||
rc = pthread_cond_wait (&cond, mutex_->get_mutex ());
|
||||
rc = pthread_cond_wait (&_cond, mutex_->get_mutex ());
|
||||
|
||||
if (rc == 0)
|
||||
return 0;
|
||||
@ -333,12 +334,12 @@ class condition_variable_t
|
||||
|
||||
inline void broadcast ()
|
||||
{
|
||||
int rc = pthread_cond_broadcast (&cond);
|
||||
int rc = pthread_cond_broadcast (&_cond);
|
||||
posix_assert (rc);
|
||||
}
|
||||
|
||||
private:
|
||||
pthread_cond_t cond;
|
||||
pthread_cond_t _cond;
|
||||
|
||||
// Disable copy construction and assignment.
|
||||
condition_variable_t (const condition_variable_t &);
|
||||
|
340
src/ctx.cpp
340
src/ctx.cpp
@ -66,25 +66,25 @@ int clipped_maxsocket (int max_requested_)
|
||||
}
|
||||
|
||||
zmq::ctx_t::ctx_t () :
|
||||
tag (ZMQ_CTX_TAG_VALUE_GOOD),
|
||||
starting (true),
|
||||
terminating (false),
|
||||
reaper (NULL),
|
||||
slot_count (0),
|
||||
slots (NULL),
|
||||
max_sockets (clipped_maxsocket (ZMQ_MAX_SOCKETS_DFLT)),
|
||||
max_msgsz (INT_MAX),
|
||||
io_thread_count (ZMQ_IO_THREADS_DFLT),
|
||||
blocky (true),
|
||||
ipv6 (false),
|
||||
zero_copy (true)
|
||||
_tag (ZMQ_CTX_TAG_VALUE_GOOD),
|
||||
_starting (true),
|
||||
_terminating (false),
|
||||
_reaper (NULL),
|
||||
_slot_count (0),
|
||||
_slots (NULL),
|
||||
_max_sockets (clipped_maxsocket (ZMQ_MAX_SOCKETS_DFLT)),
|
||||
_max_msgsz (INT_MAX),
|
||||
_io_thread_count (ZMQ_IO_THREADS_DFLT),
|
||||
_blocky (true),
|
||||
_ipv6 (false),
|
||||
_zero_copy (true)
|
||||
{
|
||||
#ifdef HAVE_FORK
|
||||
pid = getpid ();
|
||||
_pid = getpid ();
|
||||
#endif
|
||||
#ifdef ZMQ_HAVE_VMCI
|
||||
vmci_fd = -1;
|
||||
vmci_family = -1;
|
||||
_vmci_fd = -1;
|
||||
_vmci_family = -1;
|
||||
#endif
|
||||
|
||||
// Initialise crypto library, if needed.
|
||||
@ -93,54 +93,54 @@ zmq::ctx_t::ctx_t () :
|
||||
|
||||
bool zmq::ctx_t::check_tag ()
|
||||
{
|
||||
return tag == ZMQ_CTX_TAG_VALUE_GOOD;
|
||||
return _tag == ZMQ_CTX_TAG_VALUE_GOOD;
|
||||
}
|
||||
|
||||
zmq::ctx_t::~ctx_t ()
|
||||
{
|
||||
// Check that there are no remaining sockets.
|
||||
zmq_assert (sockets.empty ());
|
||||
// Check that there are no remaining _sockets.
|
||||
zmq_assert (_sockets.empty ());
|
||||
|
||||
// Ask I/O threads to terminate. If stop signal wasn't sent to I/O
|
||||
// thread subsequent invocation of destructor would hang-up.
|
||||
for (io_threads_t::size_type i = 0; i != io_threads.size (); i++) {
|
||||
io_threads[i]->stop ();
|
||||
for (io_threads_t::size_type i = 0; i != _io_threads.size (); i++) {
|
||||
_io_threads[i]->stop ();
|
||||
}
|
||||
|
||||
// Wait till I/O threads actually terminate.
|
||||
for (io_threads_t::size_type i = 0; i != io_threads.size (); i++) {
|
||||
LIBZMQ_DELETE (io_threads[i]);
|
||||
for (io_threads_t::size_type i = 0; i != _io_threads.size (); i++) {
|
||||
LIBZMQ_DELETE (_io_threads[i]);
|
||||
}
|
||||
|
||||
// Deallocate the reaper thread object.
|
||||
LIBZMQ_DELETE (reaper);
|
||||
LIBZMQ_DELETE (_reaper);
|
||||
|
||||
// Deallocate the array of mailboxes. No special work is
|
||||
// needed as mailboxes themselves were deallocated with their
|
||||
// corresponding io_thread/socket objects.
|
||||
free (slots);
|
||||
free (_slots);
|
||||
|
||||
// De-initialise crypto library, if needed.
|
||||
zmq::random_close ();
|
||||
|
||||
// Remove the tag, so that the object is considered dead.
|
||||
tag = ZMQ_CTX_TAG_VALUE_BAD;
|
||||
_tag = ZMQ_CTX_TAG_VALUE_BAD;
|
||||
}
|
||||
|
||||
bool zmq::ctx_t::valid () const
|
||||
{
|
||||
return term_mailbox.valid ();
|
||||
return _term_mailbox.valid ();
|
||||
}
|
||||
|
||||
int zmq::ctx_t::terminate ()
|
||||
{
|
||||
slot_sync.lock ();
|
||||
_slot_sync.lock ();
|
||||
|
||||
bool save_terminating = terminating;
|
||||
terminating = false;
|
||||
bool save_terminating = _terminating;
|
||||
_terminating = false;
|
||||
|
||||
// Connect up any pending inproc connections, otherwise we will hang
|
||||
pending_connections_t copy = pending_connections;
|
||||
pending_connections_t copy = _pending_connections;
|
||||
for (pending_connections_t::iterator p = copy.begin (); p != copy.end ();
|
||||
++p) {
|
||||
zmq::socket_base_t *s = create_socket (ZMQ_PAIR);
|
||||
@ -149,57 +149,57 @@ int zmq::ctx_t::terminate ()
|
||||
s->bind (p->first.c_str ());
|
||||
s->close ();
|
||||
}
|
||||
terminating = save_terminating;
|
||||
_terminating = save_terminating;
|
||||
|
||||
if (!starting) {
|
||||
if (!_starting) {
|
||||
#ifdef HAVE_FORK
|
||||
if (pid != getpid ()) {
|
||||
if (_pid != getpid ()) {
|
||||
// we are a forked child process. Close all file descriptors
|
||||
// inherited from the parent.
|
||||
for (sockets_t::size_type i = 0; i != sockets.size (); i++)
|
||||
sockets[i]->get_mailbox ()->forked ();
|
||||
for (sockets_t::size_type i = 0; i != _sockets.size (); i++)
|
||||
_sockets[i]->get_mailbox ()->forked ();
|
||||
|
||||
term_mailbox.forked ();
|
||||
_term_mailbox.forked ();
|
||||
}
|
||||
#endif
|
||||
|
||||
// Check whether termination was already underway, but interrupted and now
|
||||
// restarted.
|
||||
bool restarted = terminating;
|
||||
terminating = true;
|
||||
bool restarted = _terminating;
|
||||
_terminating = true;
|
||||
|
||||
// First attempt to terminate the context.
|
||||
if (!restarted) {
|
||||
// First send stop command to sockets so that any blocking calls
|
||||
// can be interrupted. If there are no sockets we can ask reaper
|
||||
// thread to stop.
|
||||
for (sockets_t::size_type i = 0; i != sockets.size (); i++)
|
||||
sockets[i]->stop ();
|
||||
if (sockets.empty ())
|
||||
reaper->stop ();
|
||||
for (sockets_t::size_type i = 0; i != _sockets.size (); i++)
|
||||
_sockets[i]->stop ();
|
||||
if (_sockets.empty ())
|
||||
_reaper->stop ();
|
||||
}
|
||||
slot_sync.unlock ();
|
||||
_slot_sync.unlock ();
|
||||
|
||||
// Wait till reaper thread closes all the sockets.
|
||||
command_t cmd;
|
||||
int rc = term_mailbox.recv (&cmd, -1);
|
||||
int rc = _term_mailbox.recv (&cmd, -1);
|
||||
if (rc == -1 && errno == EINTR)
|
||||
return -1;
|
||||
errno_assert (rc == 0);
|
||||
zmq_assert (cmd.type == command_t::done);
|
||||
slot_sync.lock ();
|
||||
zmq_assert (sockets.empty ());
|
||||
_slot_sync.lock ();
|
||||
zmq_assert (_sockets.empty ());
|
||||
}
|
||||
slot_sync.unlock ();
|
||||
_slot_sync.unlock ();
|
||||
|
||||
#ifdef ZMQ_HAVE_VMCI
|
||||
vmci_sync.lock ();
|
||||
_vmci_sync.lock ();
|
||||
|
||||
VMCISock_ReleaseAFValueFd (vmci_fd);
|
||||
vmci_family = -1;
|
||||
vmci_fd = -1;
|
||||
VMCISock_ReleaseAFValueFd (_vmci_fd);
|
||||
_vmci_family = -1;
|
||||
_vmci_fd = -1;
|
||||
|
||||
vmci_sync.unlock ();
|
||||
_vmci_sync.unlock ();
|
||||
#endif
|
||||
|
||||
// Deallocate the resources.
|
||||
@ -210,18 +210,18 @@ int zmq::ctx_t::terminate ()
|
||||
|
||||
int zmq::ctx_t::shutdown ()
|
||||
{
|
||||
scoped_lock_t locker (slot_sync);
|
||||
scoped_lock_t locker (_slot_sync);
|
||||
|
||||
if (!starting && !terminating) {
|
||||
terminating = true;
|
||||
if (!_starting && !_terminating) {
|
||||
_terminating = true;
|
||||
|
||||
// Send stop command to sockets so that any blocking calls
|
||||
// can be interrupted. If there are no sockets we can ask reaper
|
||||
// thread to stop.
|
||||
for (sockets_t::size_type i = 0; i != sockets.size (); i++)
|
||||
sockets[i]->stop ();
|
||||
if (sockets.empty ())
|
||||
reaper->stop ();
|
||||
for (sockets_t::size_type i = 0; i != _sockets.size (); i++)
|
||||
_sockets[i]->stop ();
|
||||
if (_sockets.empty ())
|
||||
_reaper->stop ();
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -232,23 +232,23 @@ int zmq::ctx_t::set (int option_, int optval_)
|
||||
int rc = 0;
|
||||
if (option_ == ZMQ_MAX_SOCKETS && optval_ >= 1
|
||||
&& optval_ == clipped_maxsocket (optval_)) {
|
||||
scoped_lock_t locker (opt_sync);
|
||||
max_sockets = optval_;
|
||||
scoped_lock_t locker (_opt_sync);
|
||||
_max_sockets = optval_;
|
||||
} else if (option_ == ZMQ_IO_THREADS && optval_ >= 0) {
|
||||
scoped_lock_t locker (opt_sync);
|
||||
io_thread_count = optval_;
|
||||
scoped_lock_t locker (_opt_sync);
|
||||
_io_thread_count = optval_;
|
||||
} else if (option_ == ZMQ_IPV6 && optval_ >= 0) {
|
||||
scoped_lock_t locker (opt_sync);
|
||||
ipv6 = (optval_ != 0);
|
||||
scoped_lock_t locker (_opt_sync);
|
||||
_ipv6 = (optval_ != 0);
|
||||
} else if (option_ == ZMQ_BLOCKY && optval_ >= 0) {
|
||||
scoped_lock_t locker (opt_sync);
|
||||
blocky = (optval_ != 0);
|
||||
scoped_lock_t locker (_opt_sync);
|
||||
_blocky = (optval_ != 0);
|
||||
} else if (option_ == ZMQ_MAX_MSGSZ && optval_ >= 0) {
|
||||
scoped_lock_t locker (opt_sync);
|
||||
max_msgsz = optval_ < INT_MAX ? optval_ : INT_MAX;
|
||||
scoped_lock_t locker (_opt_sync);
|
||||
_max_msgsz = optval_ < INT_MAX ? optval_ : INT_MAX;
|
||||
} else if (option_ == ZMQ_ZERO_COPY_RECV && optval_ >= 0) {
|
||||
scoped_lock_t locker (opt_sync);
|
||||
zero_copy = (optval_ != 0);
|
||||
scoped_lock_t locker (_opt_sync);
|
||||
_zero_copy = (optval_ != 0);
|
||||
} else {
|
||||
rc = thread_ctx_t::set (option_, optval_);
|
||||
}
|
||||
@ -259,21 +259,21 @@ int zmq::ctx_t::get (int option_)
|
||||
{
|
||||
int rc = 0;
|
||||
if (option_ == ZMQ_MAX_SOCKETS)
|
||||
rc = max_sockets;
|
||||
rc = _max_sockets;
|
||||
else if (option_ == ZMQ_SOCKET_LIMIT)
|
||||
rc = clipped_maxsocket (65535);
|
||||
else if (option_ == ZMQ_IO_THREADS)
|
||||
rc = io_thread_count;
|
||||
rc = _io_thread_count;
|
||||
else if (option_ == ZMQ_IPV6)
|
||||
rc = ipv6;
|
||||
rc = _ipv6;
|
||||
else if (option_ == ZMQ_BLOCKY)
|
||||
rc = blocky;
|
||||
rc = _blocky;
|
||||
else if (option_ == ZMQ_MAX_MSGSZ)
|
||||
rc = max_msgsz;
|
||||
rc = _max_msgsz;
|
||||
else if (option_ == ZMQ_MSG_T_SIZE)
|
||||
rc = sizeof (zmq_msg_t);
|
||||
else if (option_ == ZMQ_ZERO_COPY_RECV) {
|
||||
rc = zero_copy;
|
||||
rc = _zero_copy;
|
||||
} else {
|
||||
errno = EINVAL;
|
||||
rc = -1;
|
||||
@ -285,36 +285,36 @@ bool zmq::ctx_t::start ()
|
||||
{
|
||||
// Initialise the array of mailboxes. Additional three slots are for
|
||||
// zmq_ctx_term thread and reaper thread.
|
||||
opt_sync.lock ();
|
||||
int mazmq = max_sockets;
|
||||
int ios = io_thread_count;
|
||||
opt_sync.unlock ();
|
||||
slot_count = mazmq + ios + 2;
|
||||
slots =
|
||||
static_cast<i_mailbox **> (malloc (sizeof (i_mailbox *) * slot_count));
|
||||
if (!slots) {
|
||||
_opt_sync.lock ();
|
||||
int mazmq = _max_sockets;
|
||||
int ios = _io_thread_count;
|
||||
_opt_sync.unlock ();
|
||||
_slot_count = mazmq + ios + 2;
|
||||
_slots =
|
||||
static_cast<i_mailbox **> (malloc (sizeof (i_mailbox *) * _slot_count));
|
||||
if (!_slots) {
|
||||
errno = ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
// Initialise the infrastructure for zmq_ctx_term thread.
|
||||
slots[term_tid] = &term_mailbox;
|
||||
_slots[term_tid] = &_term_mailbox;
|
||||
|
||||
// Create the reaper thread.
|
||||
reaper = new (std::nothrow) reaper_t (this, reaper_tid);
|
||||
if (!reaper) {
|
||||
_reaper = new (std::nothrow) reaper_t (this, reaper_tid);
|
||||
if (!_reaper) {
|
||||
errno = ENOMEM;
|
||||
goto fail_cleanup_slots;
|
||||
}
|
||||
if (!reaper->get_mailbox ()->valid ())
|
||||
if (!_reaper->get_mailbox ()->valid ())
|
||||
goto fail_cleanup_reaper;
|
||||
slots[reaper_tid] = reaper->get_mailbox ();
|
||||
reaper->start ();
|
||||
_slots[reaper_tid] = _reaper->get_mailbox ();
|
||||
_reaper->start ();
|
||||
|
||||
// Create I/O thread objects and launch them.
|
||||
for (int32_t i = static_cast<int32_t> (slot_count) - 1;
|
||||
for (int32_t i = static_cast<int32_t> (_slot_count) - 1;
|
||||
i >= static_cast<int32_t> (2); i--) {
|
||||
slots[i] = NULL;
|
||||
_slots[i] = NULL;
|
||||
}
|
||||
|
||||
for (int i = 2; i != ios + 2; i++) {
|
||||
@ -327,28 +327,28 @@ bool zmq::ctx_t::start ()
|
||||
delete io_thread;
|
||||
goto fail_cleanup_reaper;
|
||||
}
|
||||
io_threads.push_back (io_thread);
|
||||
slots[i] = io_thread->get_mailbox ();
|
||||
_io_threads.push_back (io_thread);
|
||||
_slots[i] = io_thread->get_mailbox ();
|
||||
io_thread->start ();
|
||||
}
|
||||
|
||||
// In the unused part of the slot array, create a list of empty slots.
|
||||
for (int32_t i = static_cast<int32_t> (slot_count) - 1;
|
||||
for (int32_t i = static_cast<int32_t> (_slot_count) - 1;
|
||||
i >= static_cast<int32_t> (ios) + 2; i--) {
|
||||
empty_slots.push_back (i);
|
||||
_empty_slots.push_back (i);
|
||||
}
|
||||
|
||||
starting = false;
|
||||
_starting = false;
|
||||
return true;
|
||||
|
||||
fail_cleanup_reaper:
|
||||
reaper->stop ();
|
||||
delete reaper;
|
||||
reaper = NULL;
|
||||
_reaper->stop ();
|
||||
delete _reaper;
|
||||
_reaper = NULL;
|
||||
|
||||
fail_cleanup_slots:
|
||||
free (slots);
|
||||
slots = NULL;
|
||||
free (_slots);
|
||||
_slots = NULL;
|
||||
|
||||
fail:
|
||||
return false;
|
||||
@ -356,28 +356,28 @@ fail:
|
||||
|
||||
zmq::socket_base_t *zmq::ctx_t::create_socket (int type_)
|
||||
{
|
||||
scoped_lock_t locker (slot_sync);
|
||||
scoped_lock_t locker (_slot_sync);
|
||||
|
||||
if (unlikely (starting)) {
|
||||
if (unlikely (_starting)) {
|
||||
if (!start ())
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Once zmq_ctx_term() was called, we can't create new sockets.
|
||||
if (terminating) {
|
||||
if (_terminating) {
|
||||
errno = ETERM;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// If max_sockets limit was reached, return error.
|
||||
if (empty_slots.empty ()) {
|
||||
if (_empty_slots.empty ()) {
|
||||
errno = EMFILE;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Choose a slot for the socket.
|
||||
uint32_t slot = empty_slots.back ();
|
||||
empty_slots.pop_back ();
|
||||
uint32_t slot = _empty_slots.back ();
|
||||
_empty_slots.pop_back ();
|
||||
|
||||
// Generate new unique socket ID.
|
||||
int sid = (static_cast<int> (max_socket_id.add (1))) + 1;
|
||||
@ -385,41 +385,41 @@ zmq::socket_base_t *zmq::ctx_t::create_socket (int type_)
|
||||
// Create the socket and register its mailbox.
|
||||
socket_base_t *s = socket_base_t::create (type_, this, slot, sid);
|
||||
if (!s) {
|
||||
empty_slots.push_back (slot);
|
||||
_empty_slots.push_back (slot);
|
||||
return NULL;
|
||||
}
|
||||
sockets.push_back (s);
|
||||
slots[slot] = s->get_mailbox ();
|
||||
_sockets.push_back (s);
|
||||
_slots[slot] = s->get_mailbox ();
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
void zmq::ctx_t::destroy_socket (class socket_base_t *socket_)
|
||||
{
|
||||
scoped_lock_t locker (slot_sync);
|
||||
scoped_lock_t locker (_slot_sync);
|
||||
|
||||
// Free the associated thread slot.
|
||||
uint32_t tid = socket_->get_tid ();
|
||||
empty_slots.push_back (tid);
|
||||
slots[tid] = NULL;
|
||||
_empty_slots.push_back (tid);
|
||||
_slots[tid] = NULL;
|
||||
|
||||
// Remove the socket from the list of sockets.
|
||||
sockets.erase (socket_);
|
||||
_sockets.erase (socket_);
|
||||
|
||||
// If zmq_ctx_term() was already called and there are no more socket
|
||||
// we can ask reaper thread to terminate.
|
||||
if (terminating && sockets.empty ())
|
||||
reaper->stop ();
|
||||
if (_terminating && _sockets.empty ())
|
||||
_reaper->stop ();
|
||||
}
|
||||
|
||||
zmq::object_t *zmq::ctx_t::get_reaper ()
|
||||
{
|
||||
return reaper;
|
||||
return _reaper;
|
||||
}
|
||||
|
||||
zmq::thread_ctx_t::thread_ctx_t () :
|
||||
thread_priority (ZMQ_THREAD_PRIORITY_DFLT),
|
||||
thread_sched_policy (ZMQ_THREAD_SCHED_POLICY_DFLT)
|
||||
_thread_priority (ZMQ_THREAD_PRIORITY_DFLT),
|
||||
_thread_sched_policy (ZMQ_THREAD_SCHED_POLICY_DFLT)
|
||||
{
|
||||
}
|
||||
|
||||
@ -429,13 +429,13 @@ void zmq::thread_ctx_t::start_thread (thread_t &thread_,
|
||||
{
|
||||
static unsigned int nthreads_started = 0;
|
||||
|
||||
thread_.setSchedulingParameters (thread_priority, thread_sched_policy,
|
||||
thread_affinity_cpus);
|
||||
thread_.setSchedulingParameters (_thread_priority, _thread_sched_policy,
|
||||
_thread_affinity_cpus);
|
||||
thread_.start (tfn_, arg_);
|
||||
#ifndef ZMQ_HAVE_ANDROID
|
||||
std::ostringstream s;
|
||||
if (!thread_name_prefix.empty ())
|
||||
s << thread_name_prefix << "/";
|
||||
if (!_thread_name_prefix.empty ())
|
||||
s << _thread_name_prefix << "/";
|
||||
s << "ZMQbg/" << nthreads_started;
|
||||
thread_.setThreadName (s.str ().c_str ());
|
||||
#endif
|
||||
@ -446,16 +446,16 @@ int zmq::thread_ctx_t::set (int option_, int optval_)
|
||||
{
|
||||
int rc = 0;
|
||||
if (option_ == ZMQ_THREAD_SCHED_POLICY && optval_ >= 0) {
|
||||
scoped_lock_t locker (opt_sync);
|
||||
thread_sched_policy = optval_;
|
||||
scoped_lock_t locker (_opt_sync);
|
||||
_thread_sched_policy = optval_;
|
||||
} else if (option_ == ZMQ_THREAD_AFFINITY_CPU_ADD && optval_ >= 0) {
|
||||
scoped_lock_t locker (opt_sync);
|
||||
thread_affinity_cpus.insert (optval_);
|
||||
scoped_lock_t locker (_opt_sync);
|
||||
_thread_affinity_cpus.insert (optval_);
|
||||
} else if (option_ == ZMQ_THREAD_AFFINITY_CPU_REMOVE && optval_ >= 0) {
|
||||
scoped_lock_t locker (opt_sync);
|
||||
std::set<int>::iterator it = thread_affinity_cpus.find (optval_);
|
||||
if (it != thread_affinity_cpus.end ()) {
|
||||
thread_affinity_cpus.erase (it);
|
||||
scoped_lock_t locker (_opt_sync);
|
||||
std::set<int>::iterator it = _thread_affinity_cpus.find (optval_);
|
||||
if (it != _thread_affinity_cpus.end ()) {
|
||||
_thread_affinity_cpus.erase (it);
|
||||
} else {
|
||||
errno = EINVAL;
|
||||
rc = -1;
|
||||
@ -463,11 +463,11 @@ int zmq::thread_ctx_t::set (int option_, int optval_)
|
||||
} else if (option_ == ZMQ_THREAD_NAME_PREFIX && optval_ >= 0) {
|
||||
std::ostringstream s;
|
||||
s << optval_;
|
||||
scoped_lock_t locker (opt_sync);
|
||||
thread_name_prefix = s.str ();
|
||||
scoped_lock_t locker (_opt_sync);
|
||||
_thread_name_prefix = s.str ();
|
||||
} else if (option_ == ZMQ_THREAD_PRIORITY && optval_ >= 0) {
|
||||
scoped_lock_t locker (opt_sync);
|
||||
thread_priority = optval_;
|
||||
scoped_lock_t locker (_opt_sync);
|
||||
_thread_priority = optval_;
|
||||
} else {
|
||||
errno = EINVAL;
|
||||
rc = -1;
|
||||
@ -477,23 +477,23 @@ int zmq::thread_ctx_t::set (int option_, int optval_)
|
||||
|
||||
void zmq::ctx_t::send_command (uint32_t tid_, const command_t &command_)
|
||||
{
|
||||
slots[tid_]->send (command_);
|
||||
_slots[tid_]->send (command_);
|
||||
}
|
||||
|
||||
zmq::io_thread_t *zmq::ctx_t::choose_io_thread (uint64_t affinity_)
|
||||
{
|
||||
if (io_threads.empty ())
|
||||
if (_io_threads.empty ())
|
||||
return NULL;
|
||||
|
||||
// Find the I/O thread with minimum load.
|
||||
int min_load = -1;
|
||||
io_thread_t *selected_io_thread = NULL;
|
||||
for (io_threads_t::size_type i = 0; i != io_threads.size (); i++) {
|
||||
for (io_threads_t::size_type i = 0; i != _io_threads.size (); i++) {
|
||||
if (!affinity_ || (affinity_ & (uint64_t (1) << i))) {
|
||||
int load = io_threads[i]->get_load ();
|
||||
int load = _io_threads[i]->get_load ();
|
||||
if (selected_io_thread == NULL || load < min_load) {
|
||||
min_load = load;
|
||||
selected_io_thread = io_threads[i];
|
||||
selected_io_thread = _io_threads[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -503,10 +503,10 @@ zmq::io_thread_t *zmq::ctx_t::choose_io_thread (uint64_t affinity_)
|
||||
int zmq::ctx_t::register_endpoint (const char *addr_,
|
||||
const endpoint_t &endpoint_)
|
||||
{
|
||||
scoped_lock_t locker (endpoints_sync);
|
||||
scoped_lock_t locker (_endpoints_sync);
|
||||
|
||||
const bool inserted =
|
||||
endpoints.ZMQ_MAP_INSERT_OR_EMPLACE (std::string (addr_), endpoint_)
|
||||
_endpoints.ZMQ_MAP_INSERT_OR_EMPLACE (std::string (addr_), endpoint_)
|
||||
.second;
|
||||
if (!inserted) {
|
||||
errno = EADDRINUSE;
|
||||
@ -518,30 +518,30 @@ int zmq::ctx_t::register_endpoint (const char *addr_,
|
||||
int zmq::ctx_t::unregister_endpoint (const std::string &addr_,
|
||||
socket_base_t *socket_)
|
||||
{
|
||||
scoped_lock_t locker (endpoints_sync);
|
||||
scoped_lock_t locker (_endpoints_sync);
|
||||
|
||||
const endpoints_t::iterator it = endpoints.find (addr_);
|
||||
if (it == endpoints.end () || it->second.socket != socket_) {
|
||||
const endpoints_t::iterator it = _endpoints.find (addr_);
|
||||
if (it == _endpoints.end () || it->second.socket != socket_) {
|
||||
errno = ENOENT;
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Remove endpoint.
|
||||
endpoints.erase (it);
|
||||
_endpoints.erase (it);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void zmq::ctx_t::unregister_endpoints (socket_base_t *socket_)
|
||||
{
|
||||
scoped_lock_t locker (endpoints_sync);
|
||||
scoped_lock_t locker (_endpoints_sync);
|
||||
|
||||
endpoints_t::iterator it = endpoints.begin ();
|
||||
while (it != endpoints.end ()) {
|
||||
endpoints_t::iterator it = _endpoints.begin ();
|
||||
while (it != _endpoints.end ()) {
|
||||
if (it->second.socket == socket_) {
|
||||
endpoints_t::iterator to_erase = it;
|
||||
++it;
|
||||
endpoints.erase (to_erase);
|
||||
_endpoints.erase (to_erase);
|
||||
continue;
|
||||
}
|
||||
++it;
|
||||
@ -550,10 +550,10 @@ void zmq::ctx_t::unregister_endpoints (socket_base_t *socket_)
|
||||
|
||||
zmq::endpoint_t zmq::ctx_t::find_endpoint (const char *addr_)
|
||||
{
|
||||
scoped_lock_t locker (endpoints_sync);
|
||||
scoped_lock_t locker (_endpoints_sync);
|
||||
|
||||
endpoints_t::iterator it = endpoints.find (addr_);
|
||||
if (it == endpoints.end ()) {
|
||||
endpoints_t::iterator it = _endpoints.find (addr_);
|
||||
if (it == _endpoints.end ()) {
|
||||
errno = ECONNREFUSED;
|
||||
endpoint_t empty = {NULL, options_t ()};
|
||||
return empty;
|
||||
@ -573,17 +573,17 @@ void zmq::ctx_t::pend_connection (const std::string &addr_,
|
||||
const endpoint_t &endpoint_,
|
||||
pipe_t **pipes_)
|
||||
{
|
||||
scoped_lock_t locker (endpoints_sync);
|
||||
scoped_lock_t locker (_endpoints_sync);
|
||||
|
||||
const pending_connection_t pending_connection = {endpoint_, pipes_[0],
|
||||
pipes_[1]};
|
||||
|
||||
endpoints_t::iterator it = endpoints.find (addr_);
|
||||
if (it == endpoints.end ()) {
|
||||
endpoints_t::iterator it = _endpoints.find (addr_);
|
||||
if (it == _endpoints.end ()) {
|
||||
// Still no bind.
|
||||
endpoint_.socket->inc_seqnum ();
|
||||
pending_connections.ZMQ_MAP_INSERT_OR_EMPLACE (addr_,
|
||||
pending_connection);
|
||||
_pending_connections.ZMQ_MAP_INSERT_OR_EMPLACE (addr_,
|
||||
pending_connection);
|
||||
} else {
|
||||
// Bind has happened in the mean time, connect directly
|
||||
connect_inproc_sockets (it->second.socket, it->second.options,
|
||||
@ -594,16 +594,16 @@ void zmq::ctx_t::pend_connection (const std::string &addr_,
|
||||
void zmq::ctx_t::connect_pending (const char *addr_,
|
||||
zmq::socket_base_t *bind_socket_)
|
||||
{
|
||||
scoped_lock_t locker (endpoints_sync);
|
||||
scoped_lock_t locker (_endpoints_sync);
|
||||
|
||||
std::pair<pending_connections_t::iterator, pending_connections_t::iterator>
|
||||
pending = pending_connections.equal_range (addr_);
|
||||
pending = _pending_connections.equal_range (addr_);
|
||||
for (pending_connections_t::iterator p = pending.first; p != pending.second;
|
||||
++p)
|
||||
connect_inproc_sockets (bind_socket_, endpoints[addr_].options,
|
||||
connect_inproc_sockets (bind_socket_, _endpoints[addr_].options,
|
||||
p->second, bind_side);
|
||||
|
||||
pending_connections.erase (pending.first, pending.second);
|
||||
_pending_connections.erase (pending.first, pending.second);
|
||||
}
|
||||
|
||||
void zmq::ctx_t::connect_inproc_sockets (
|
||||
@ -682,20 +682,20 @@ void zmq::ctx_t::connect_inproc_sockets (
|
||||
|
||||
int zmq::ctx_t::get_vmci_socket_family ()
|
||||
{
|
||||
zmq::scoped_lock_t locker (vmci_sync);
|
||||
zmq::scoped_lock_t locker (_vmci_sync);
|
||||
|
||||
if (vmci_fd == -1) {
|
||||
vmci_family = VMCISock_GetAFValueFd (&vmci_fd);
|
||||
if (_vmci_fd == -1) {
|
||||
_vmci_family = VMCISock_GetAFValueFd (&_vmci_fd);
|
||||
|
||||
if (vmci_fd != -1) {
|
||||
if (_vmci_fd != -1) {
|
||||
#ifdef FD_CLOEXEC
|
||||
int rc = fcntl (vmci_fd, F_SETFD, FD_CLOEXEC);
|
||||
int rc = fcntl (_vmci_fd, F_SETFD, FD_CLOEXEC);
|
||||
errno_assert (rc != -1);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
return vmci_family;
|
||||
return _vmci_family;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
58
src/ctx.hpp
58
src/ctx.hpp
@ -73,14 +73,14 @@ class thread_ctx_t
|
||||
|
||||
protected:
|
||||
// Synchronisation of access to context options.
|
||||
mutex_t opt_sync;
|
||||
mutex_t _opt_sync;
|
||||
|
||||
private:
|
||||
// Thread parameters.
|
||||
int thread_priority;
|
||||
int thread_sched_policy;
|
||||
std::set<int> thread_affinity_cpus;
|
||||
std::string thread_name_prefix;
|
||||
int _thread_priority;
|
||||
int _thread_sched_policy;
|
||||
std::set<int> _thread_affinity_cpus;
|
||||
std::string _thread_name_prefix;
|
||||
};
|
||||
|
||||
// Context object encapsulates all the global state associated with
|
||||
@ -165,84 +165,84 @@ class ctx_t : public thread_ctx_t
|
||||
};
|
||||
|
||||
// Used to check whether the object is a context.
|
||||
uint32_t tag;
|
||||
uint32_t _tag;
|
||||
|
||||
// Sockets belonging to this context. We need the list so that
|
||||
// we can notify the sockets when zmq_ctx_term() is called.
|
||||
// The sockets will return ETERM then.
|
||||
typedef array_t<socket_base_t> sockets_t;
|
||||
sockets_t sockets;
|
||||
sockets_t _sockets;
|
||||
|
||||
// List of unused thread slots.
|
||||
typedef std::vector<uint32_t> empty_slots_t;
|
||||
empty_slots_t empty_slots;
|
||||
empty_slots_t _empty_slots;
|
||||
|
||||
// If true, zmq_init has been called but no socket has been created
|
||||
// yet. Launching of I/O threads is delayed.
|
||||
bool starting;
|
||||
bool _starting;
|
||||
|
||||
// If true, zmq_ctx_term was already called.
|
||||
bool terminating;
|
||||
bool _terminating;
|
||||
|
||||
// Synchronisation of accesses to global slot-related data:
|
||||
// sockets, empty_slots, terminating. It also synchronises
|
||||
// access to zombie sockets as such (as opposed to slots) and provides
|
||||
// a memory barrier to ensure that all CPU cores see the same data.
|
||||
mutex_t slot_sync;
|
||||
mutex_t _slot_sync;
|
||||
|
||||
// The reaper thread.
|
||||
zmq::reaper_t *reaper;
|
||||
zmq::reaper_t *_reaper;
|
||||
|
||||
// I/O threads.
|
||||
typedef std::vector<zmq::io_thread_t *> io_threads_t;
|
||||
io_threads_t io_threads;
|
||||
io_threads_t _io_threads;
|
||||
|
||||
// Array of pointers to mailboxes for both application and I/O threads.
|
||||
uint32_t slot_count;
|
||||
i_mailbox **slots;
|
||||
uint32_t _slot_count;
|
||||
i_mailbox **_slots;
|
||||
|
||||
// Mailbox for zmq_ctx_term thread.
|
||||
mailbox_t term_mailbox;
|
||||
mailbox_t _term_mailbox;
|
||||
|
||||
// List of inproc endpoints within this context.
|
||||
typedef std::map<std::string, endpoint_t> endpoints_t;
|
||||
endpoints_t endpoints;
|
||||
endpoints_t _endpoints;
|
||||
|
||||
// List of inproc connection endpoints pending a bind
|
||||
typedef std::multimap<std::string, pending_connection_t>
|
||||
pending_connections_t;
|
||||
pending_connections_t pending_connections;
|
||||
pending_connections_t _pending_connections;
|
||||
|
||||
// Synchronisation of access to the list of inproc endpoints.
|
||||
mutex_t endpoints_sync;
|
||||
mutex_t _endpoints_sync;
|
||||
|
||||
// Maximum socket ID.
|
||||
static atomic_counter_t max_socket_id;
|
||||
|
||||
// Maximum number of sockets that can be opened at the same time.
|
||||
int max_sockets;
|
||||
int _max_sockets;
|
||||
|
||||
// Maximum allowed message size
|
||||
int max_msgsz;
|
||||
int _max_msgsz;
|
||||
|
||||
// Number of I/O threads to launch.
|
||||
int io_thread_count;
|
||||
int _io_thread_count;
|
||||
|
||||
// Does context wait (possibly forever) on termination?
|
||||
bool blocky;
|
||||
bool _blocky;
|
||||
|
||||
// Is IPv6 enabled on this context?
|
||||
bool ipv6;
|
||||
bool _ipv6;
|
||||
|
||||
// Should we use zero copy message decoding in this context?
|
||||
bool zero_copy;
|
||||
bool _zero_copy;
|
||||
|
||||
ctx_t (const ctx_t &);
|
||||
const ctx_t &operator= (const ctx_t &);
|
||||
|
||||
#ifdef HAVE_FORK
|
||||
// the process that created this context. Used to detect forking.
|
||||
pid_t pid;
|
||||
pid_t _pid;
|
||||
#endif
|
||||
enum side
|
||||
{
|
||||
@ -256,9 +256,9 @@ class ctx_t : public thread_ctx_t
|
||||
side side_);
|
||||
|
||||
#ifdef ZMQ_HAVE_VMCI
|
||||
int vmci_fd;
|
||||
int vmci_family;
|
||||
mutex_t vmci_sync;
|
||||
int _vmci_fd;
|
||||
int _vmci_family;
|
||||
mutex_t _vmci_sync;
|
||||
#endif
|
||||
};
|
||||
}
|
||||
|
@ -44,10 +44,10 @@ zmq::curve_client_t::curve_client_t (session_base_t *session_,
|
||||
mechanism_base_t (session_, options_),
|
||||
curve_mechanism_base_t (
|
||||
session_, options_, "CurveZMQMESSAGEC", "CurveZMQMESSAGES"),
|
||||
state (send_hello),
|
||||
tools (options_.curve_public_key,
|
||||
options_.curve_secret_key,
|
||||
options_.curve_server_key)
|
||||
_state (send_hello),
|
||||
_tools (options_.curve_public_key,
|
||||
options_.curve_secret_key,
|
||||
options_.curve_server_key)
|
||||
{
|
||||
}
|
||||
|
||||
@ -59,16 +59,16 @@ int zmq::curve_client_t::next_handshake_command (msg_t *msg_)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
switch (state) {
|
||||
switch (_state) {
|
||||
case send_hello:
|
||||
rc = produce_hello (msg_);
|
||||
if (rc == 0)
|
||||
state = expect_welcome;
|
||||
_state = expect_welcome;
|
||||
break;
|
||||
case send_initiate:
|
||||
rc = produce_initiate (msg_);
|
||||
if (rc == 0)
|
||||
state = expect_ready;
|
||||
_state = expect_ready;
|
||||
break;
|
||||
default:
|
||||
errno = EAGAIN;
|
||||
@ -111,21 +111,21 @@ int zmq::curve_client_t::process_handshake_command (msg_t *msg_)
|
||||
|
||||
int zmq::curve_client_t::encode (msg_t *msg_)
|
||||
{
|
||||
zmq_assert (state == connected);
|
||||
zmq_assert (_state == connected);
|
||||
return curve_mechanism_base_t::encode (msg_);
|
||||
}
|
||||
|
||||
int zmq::curve_client_t::decode (msg_t *msg_)
|
||||
{
|
||||
zmq_assert (state == connected);
|
||||
zmq_assert (_state == connected);
|
||||
return curve_mechanism_base_t::decode (msg_);
|
||||
}
|
||||
|
||||
zmq::mechanism_t::status_t zmq::curve_client_t::status () const
|
||||
{
|
||||
if (state == connected)
|
||||
if (_state == connected)
|
||||
return mechanism_t::ready;
|
||||
if (state == error_received)
|
||||
if (_state == error_received)
|
||||
return mechanism_t::error;
|
||||
else
|
||||
return mechanism_t::handshaking;
|
||||
@ -136,7 +136,7 @@ int zmq::curve_client_t::produce_hello (msg_t *msg_)
|
||||
int rc = msg_->init_size (200);
|
||||
errno_assert (rc == 0);
|
||||
|
||||
rc = tools.produce_hello (msg_->data (), cn_nonce);
|
||||
rc = _tools.produce_hello (msg_->data (), cn_nonce);
|
||||
if (rc == -1) {
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_CRYPTOGRAPHIC);
|
||||
@ -157,7 +157,7 @@ int zmq::curve_client_t::produce_hello (msg_t *msg_)
|
||||
int zmq::curve_client_t::process_welcome (const uint8_t *msg_data_,
|
||||
size_t msg_size_)
|
||||
{
|
||||
int rc = tools.process_welcome (msg_data_, msg_size_, cn_precom);
|
||||
int rc = _tools.process_welcome (msg_data_, msg_size_, cn_precom);
|
||||
|
||||
if (rc == -1) {
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
@ -167,7 +167,7 @@ int zmq::curve_client_t::process_welcome (const uint8_t *msg_data_,
|
||||
return -1;
|
||||
}
|
||||
|
||||
state = send_initiate;
|
||||
_state = send_initiate;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -185,8 +185,8 @@ int zmq::curve_client_t::produce_initiate (msg_t *msg_)
|
||||
int rc = msg_->init_size (msg_size);
|
||||
errno_assert (rc == 0);
|
||||
|
||||
rc = tools.produce_initiate (msg_->data (), msg_size, cn_nonce,
|
||||
metadata_plaintext, metadata_length);
|
||||
rc = _tools.produce_initiate (msg_->data (), msg_size, cn_nonce,
|
||||
metadata_plaintext, metadata_length);
|
||||
|
||||
free (metadata_plaintext);
|
||||
|
||||
@ -248,7 +248,7 @@ int zmq::curve_client_t::process_ready (const uint8_t *msg_data_,
|
||||
free (ready_plaintext);
|
||||
|
||||
if (rc == 0)
|
||||
state = connected;
|
||||
_state = connected;
|
||||
else {
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_INVALID_METADATA);
|
||||
@ -261,7 +261,7 @@ int zmq::curve_client_t::process_ready (const uint8_t *msg_data_,
|
||||
int zmq::curve_client_t::process_error (const uint8_t *msg_data_,
|
||||
size_t msg_size_)
|
||||
{
|
||||
if (state != expect_welcome && state != expect_ready) {
|
||||
if (_state != expect_welcome && _state != expect_ready) {
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND);
|
||||
errno = EPROTO;
|
||||
@ -284,7 +284,7 @@ int zmq::curve_client_t::process_error (const uint8_t *msg_data_,
|
||||
}
|
||||
const char *error_reason = reinterpret_cast<const char *> (msg_data_) + 7;
|
||||
handle_error_reason (error_reason, error_reason_len);
|
||||
state = error_received;
|
||||
_state = error_received;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -66,10 +66,10 @@ class curve_client_t : public curve_mechanism_base_t
|
||||
};
|
||||
|
||||
// Current FSM state
|
||||
state_t state;
|
||||
state_t _state;
|
||||
|
||||
// CURVE protocol tools
|
||||
curve_client_tools_t tools;
|
||||
curve_client_tools_t _tools;
|
||||
|
||||
int produce_hello (msg_t *msg_);
|
||||
int process_welcome (const uint8_t *cmd_data_, size_t data_size_);
|
||||
|
@ -49,10 +49,10 @@ zmq::curve_server_t::curve_server_t (session_base_t *session_,
|
||||
{
|
||||
int rc;
|
||||
// Fetch our secret key from socket options
|
||||
memcpy (secret_key, options_.curve_secret_key, crypto_box_SECRETKEYBYTES);
|
||||
memcpy (_secret_key, options_.curve_secret_key, crypto_box_SECRETKEYBYTES);
|
||||
|
||||
// Generate short-term key pair
|
||||
rc = crypto_box_keypair (cn_public, cn_secret);
|
||||
rc = crypto_box_keypair (_cn_public, _cn_secret);
|
||||
zmq_assert (rc == 0);
|
||||
}
|
||||
|
||||
@ -171,7 +171,7 @@ int zmq::curve_server_t::process_hello (msg_t *msg_)
|
||||
}
|
||||
|
||||
// Save client's short-term public key (C')
|
||||
memcpy (cn_client, hello + 80, 32);
|
||||
memcpy (_cn_client, hello + 80, 32);
|
||||
|
||||
uint8_t hello_nonce[crypto_box_NONCEBYTES];
|
||||
uint8_t hello_plaintext[crypto_box_ZEROBYTES + 64];
|
||||
@ -186,7 +186,7 @@ int zmq::curve_server_t::process_hello (msg_t *msg_)
|
||||
|
||||
// Open Box [64 * %x0](C'->S)
|
||||
rc = crypto_box_open (hello_plaintext, hello_box, sizeof hello_box,
|
||||
hello_nonce, cn_client, secret_key);
|
||||
hello_nonce, _cn_client, _secret_key);
|
||||
if (rc != 0) {
|
||||
// CURVE I: cannot open client HELLO -- wrong server key?
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
@ -212,16 +212,16 @@ int zmq::curve_server_t::produce_welcome (msg_t *msg_)
|
||||
|
||||
// Generate cookie = Box [C' + s'](t)
|
||||
memset (cookie_plaintext, 0, crypto_secretbox_ZEROBYTES);
|
||||
memcpy (cookie_plaintext + crypto_secretbox_ZEROBYTES, cn_client, 32);
|
||||
memcpy (cookie_plaintext + crypto_secretbox_ZEROBYTES + 32, cn_secret, 32);
|
||||
memcpy (cookie_plaintext + crypto_secretbox_ZEROBYTES, _cn_client, 32);
|
||||
memcpy (cookie_plaintext + crypto_secretbox_ZEROBYTES + 32, _cn_secret, 32);
|
||||
|
||||
// Generate fresh cookie key
|
||||
randombytes (cookie_key, crypto_secretbox_KEYBYTES);
|
||||
randombytes (_cookie_key, crypto_secretbox_KEYBYTES);
|
||||
|
||||
// Encrypt using symmetric cookie key
|
||||
int rc =
|
||||
crypto_secretbox (cookie_ciphertext, cookie_plaintext,
|
||||
sizeof cookie_plaintext, cookie_nonce, cookie_key);
|
||||
sizeof cookie_plaintext, cookie_nonce, _cookie_key);
|
||||
zmq_assert (rc == 0);
|
||||
|
||||
uint8_t welcome_nonce[crypto_box_NONCEBYTES];
|
||||
@ -235,15 +235,15 @@ int zmq::curve_server_t::produce_welcome (msg_t *msg_)
|
||||
|
||||
// Create 144-byte Box [S' + cookie](S->C')
|
||||
memset (welcome_plaintext, 0, crypto_box_ZEROBYTES);
|
||||
memcpy (welcome_plaintext + crypto_box_ZEROBYTES, cn_public, 32);
|
||||
memcpy (welcome_plaintext + crypto_box_ZEROBYTES, _cn_public, 32);
|
||||
memcpy (welcome_plaintext + crypto_box_ZEROBYTES + 32, cookie_nonce + 8,
|
||||
16);
|
||||
memcpy (welcome_plaintext + crypto_box_ZEROBYTES + 48,
|
||||
cookie_ciphertext + crypto_secretbox_BOXZEROBYTES, 80);
|
||||
|
||||
rc = crypto_box (welcome_ciphertext, welcome_plaintext,
|
||||
sizeof welcome_plaintext, welcome_nonce, cn_client,
|
||||
secret_key);
|
||||
sizeof welcome_plaintext, welcome_nonce, _cn_client,
|
||||
_secret_key);
|
||||
|
||||
// TODO I think we should change this back to zmq_assert (rc == 0);
|
||||
// as it was before https://github.com/zeromq/libzmq/pull/1832
|
||||
@ -301,7 +301,7 @@ int zmq::curve_server_t::process_initiate (msg_t *msg_)
|
||||
memcpy (cookie_nonce + 8, initiate + 9, 16);
|
||||
|
||||
rc = crypto_secretbox_open (cookie_plaintext, cookie_box, sizeof cookie_box,
|
||||
cookie_nonce, cookie_key);
|
||||
cookie_nonce, _cookie_key);
|
||||
if (rc != 0) {
|
||||
// CURVE I: cannot open client INITIATE cookie
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
@ -311,9 +311,9 @@ int zmq::curve_server_t::process_initiate (msg_t *msg_)
|
||||
}
|
||||
|
||||
// Check cookie plain text is as expected [C' + s']
|
||||
if (memcmp (cookie_plaintext + crypto_secretbox_ZEROBYTES, cn_client, 32)
|
||||
if (memcmp (cookie_plaintext + crypto_secretbox_ZEROBYTES, _cn_client, 32)
|
||||
|| memcmp (cookie_plaintext + crypto_secretbox_ZEROBYTES + 32,
|
||||
cn_secret, 32)) {
|
||||
_cn_secret, 32)) {
|
||||
// TODO this case is very hard to test, as it would require a modified
|
||||
// client that knows the server's secret temporary cookie key
|
||||
|
||||
@ -340,7 +340,7 @@ int zmq::curve_server_t::process_initiate (msg_t *msg_)
|
||||
cn_peer_nonce = get_uint64 (initiate + 105);
|
||||
|
||||
rc = crypto_box_open (initiate_plaintext, initiate_box, clen,
|
||||
initiate_nonce, cn_client, cn_secret);
|
||||
initiate_nonce, _cn_client, _cn_secret);
|
||||
if (rc != 0) {
|
||||
// CURVE I: cannot open client INITIATE
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
@ -365,7 +365,7 @@ int zmq::curve_server_t::process_initiate (msg_t *msg_)
|
||||
16);
|
||||
|
||||
rc = crypto_box_open (vouch_plaintext, vouch_box, sizeof vouch_box,
|
||||
vouch_nonce, client_key, cn_secret);
|
||||
vouch_nonce, client_key, _cn_secret);
|
||||
if (rc != 0) {
|
||||
// CURVE I: cannot open client INITIATE vouch
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
@ -375,7 +375,7 @@ int zmq::curve_server_t::process_initiate (msg_t *msg_)
|
||||
}
|
||||
|
||||
// What we decrypted must be the client's short-term public key
|
||||
if (memcmp (vouch_plaintext + crypto_box_ZEROBYTES, cn_client, 32)) {
|
||||
if (memcmp (vouch_plaintext + crypto_box_ZEROBYTES, _cn_client, 32)) {
|
||||
// TODO this case is very hard to test, as it would require a modified
|
||||
// client that knows the server's secret short-term key
|
||||
|
||||
@ -387,7 +387,7 @@ int zmq::curve_server_t::process_initiate (msg_t *msg_)
|
||||
}
|
||||
|
||||
// Precompute connection secret from client key
|
||||
rc = crypto_box_beforenm (cn_precom, cn_client, cn_secret);
|
||||
rc = crypto_box_beforenm (cn_precom, _cn_client, _cn_secret);
|
||||
zmq_assert (rc == 0);
|
||||
|
||||
// Given this is a backward-incompatible change, it's behind a socket
|
||||
|
@ -59,19 +59,19 @@ class curve_server_t : public zap_client_common_handshake_t,
|
||||
|
||||
private:
|
||||
// Our secret key (s)
|
||||
uint8_t secret_key[crypto_box_SECRETKEYBYTES];
|
||||
uint8_t _secret_key[crypto_box_SECRETKEYBYTES];
|
||||
|
||||
// Our short-term public key (S')
|
||||
uint8_t cn_public[crypto_box_PUBLICKEYBYTES];
|
||||
uint8_t _cn_public[crypto_box_PUBLICKEYBYTES];
|
||||
|
||||
// Our short-term secret key (s')
|
||||
uint8_t cn_secret[crypto_box_SECRETKEYBYTES];
|
||||
uint8_t _cn_secret[crypto_box_SECRETKEYBYTES];
|
||||
|
||||
// Client's short-term public key (C')
|
||||
uint8_t cn_client[crypto_box_PUBLICKEYBYTES];
|
||||
uint8_t _cn_client[crypto_box_PUBLICKEYBYTES];
|
||||
|
||||
// Key used to produce cookie
|
||||
uint8_t cookie_key[crypto_secretbox_KEYBYTES];
|
||||
uint8_t _cookie_key[crypto_secretbox_KEYBYTES];
|
||||
|
||||
int process_hello (msg_t *msg_);
|
||||
int produce_welcome (msg_t *msg_);
|
||||
|
@ -59,18 +59,18 @@ template <> class dbuffer_t<msg_t>
|
||||
{
|
||||
public:
|
||||
inline dbuffer_t () :
|
||||
back (&storage[0]),
|
||||
front (&storage[1]),
|
||||
has_msg (false)
|
||||
_back (&_storage[0]),
|
||||
_front (&_storage[1]),
|
||||
_has_msg (false)
|
||||
{
|
||||
back->init ();
|
||||
front->init ();
|
||||
_back->init ();
|
||||
_front->init ();
|
||||
}
|
||||
|
||||
inline ~dbuffer_t ()
|
||||
{
|
||||
back->close ();
|
||||
front->close ();
|
||||
_back->close ();
|
||||
_front->close ();
|
||||
}
|
||||
|
||||
inline void write (const msg_t &value_)
|
||||
@ -78,15 +78,15 @@ template <> class dbuffer_t<msg_t>
|
||||
msg_t &xvalue = const_cast<msg_t &> (value_);
|
||||
|
||||
zmq_assert (xvalue.check ());
|
||||
back->move (xvalue); // cannot just overwrite, might leak
|
||||
_back->move (xvalue); // cannot just overwrite, might leak
|
||||
|
||||
zmq_assert (back->check ());
|
||||
zmq_assert (_back->check ());
|
||||
|
||||
if (sync.try_lock ()) {
|
||||
std::swap (back, front);
|
||||
has_msg = true;
|
||||
if (_sync.try_lock ()) {
|
||||
std::swap (_back, _front);
|
||||
_has_msg = true;
|
||||
|
||||
sync.unlock ();
|
||||
_sync.unlock ();
|
||||
}
|
||||
}
|
||||
|
||||
@ -96,16 +96,16 @@ template <> class dbuffer_t<msg_t>
|
||||
return false;
|
||||
|
||||
{
|
||||
scoped_lock_t lock (sync);
|
||||
if (!has_msg)
|
||||
scoped_lock_t lock (_sync);
|
||||
if (!_has_msg)
|
||||
return false;
|
||||
|
||||
zmq_assert (front->check ());
|
||||
zmq_assert (_front->check ());
|
||||
|
||||
*value_ = *front;
|
||||
front->init (); // avoid double free
|
||||
*value_ = *_front;
|
||||
_front->init (); // avoid double free
|
||||
|
||||
has_msg = false;
|
||||
_has_msg = false;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -113,24 +113,24 @@ template <> class dbuffer_t<msg_t>
|
||||
|
||||
inline bool check_read ()
|
||||
{
|
||||
scoped_lock_t lock (sync);
|
||||
scoped_lock_t lock (_sync);
|
||||
|
||||
return has_msg;
|
||||
return _has_msg;
|
||||
}
|
||||
|
||||
inline bool probe (bool (*fn_) (const msg_t &))
|
||||
{
|
||||
scoped_lock_t lock (sync);
|
||||
return (*fn_) (*front);
|
||||
scoped_lock_t lock (_sync);
|
||||
return (*fn_) (*_front);
|
||||
}
|
||||
|
||||
|
||||
private:
|
||||
msg_t storage[2];
|
||||
msg_t *back, *front;
|
||||
msg_t _storage[2];
|
||||
msg_t *_back, *_front;
|
||||
|
||||
mutex_t sync;
|
||||
bool has_msg;
|
||||
mutex_t _sync;
|
||||
bool _has_msg;
|
||||
|
||||
// Disable copying of dbuffer.
|
||||
dbuffer_t (const dbuffer_t &);
|
||||
|
@ -35,7 +35,7 @@
|
||||
|
||||
zmq::dealer_t::dealer_t (class ctx_t *parent_, uint32_t tid_, int sid_) :
|
||||
socket_base_t (parent_, tid_, sid_),
|
||||
probe_router (false)
|
||||
_probe_router (false)
|
||||
{
|
||||
options.type = ZMQ_DEALER;
|
||||
}
|
||||
@ -50,7 +50,7 @@ void zmq::dealer_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_)
|
||||
|
||||
zmq_assert (pipe_);
|
||||
|
||||
if (probe_router) {
|
||||
if (_probe_router) {
|
||||
msg_t probe_msg;
|
||||
int rc = probe_msg.init ();
|
||||
errno_assert (rc == 0);
|
||||
@ -65,8 +65,8 @@ void zmq::dealer_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_)
|
||||
errno_assert (rc == 0);
|
||||
}
|
||||
|
||||
fq.attach (pipe_);
|
||||
lb.attach (pipe_);
|
||||
_fq.attach (pipe_);
|
||||
_lb.attach (pipe_);
|
||||
}
|
||||
|
||||
int zmq::dealer_t::xsetsockopt (int option_,
|
||||
@ -81,7 +81,7 @@ int zmq::dealer_t::xsetsockopt (int option_,
|
||||
switch (option_) {
|
||||
case ZMQ_PROBE_ROUTER:
|
||||
if (is_int && value >= 0) {
|
||||
probe_router = (value != 0);
|
||||
_probe_router = (value != 0);
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
@ -106,42 +106,42 @@ int zmq::dealer_t::xrecv (msg_t *msg_)
|
||||
|
||||
bool zmq::dealer_t::xhas_in ()
|
||||
{
|
||||
return fq.has_in ();
|
||||
return _fq.has_in ();
|
||||
}
|
||||
|
||||
bool zmq::dealer_t::xhas_out ()
|
||||
{
|
||||
return lb.has_out ();
|
||||
return _lb.has_out ();
|
||||
}
|
||||
|
||||
const zmq::blob_t &zmq::dealer_t::get_credential () const
|
||||
{
|
||||
return fq.get_credential ();
|
||||
return _fq.get_credential ();
|
||||
}
|
||||
|
||||
|
||||
void zmq::dealer_t::xread_activated (pipe_t *pipe_)
|
||||
{
|
||||
fq.activated (pipe_);
|
||||
_fq.activated (pipe_);
|
||||
}
|
||||
|
||||
void zmq::dealer_t::xwrite_activated (pipe_t *pipe_)
|
||||
{
|
||||
lb.activated (pipe_);
|
||||
_lb.activated (pipe_);
|
||||
}
|
||||
|
||||
void zmq::dealer_t::xpipe_terminated (pipe_t *pipe_)
|
||||
{
|
||||
fq.pipe_terminated (pipe_);
|
||||
lb.pipe_terminated (pipe_);
|
||||
_fq.pipe_terminated (pipe_);
|
||||
_lb.pipe_terminated (pipe_);
|
||||
}
|
||||
|
||||
int zmq::dealer_t::sendpipe (msg_t *msg_, pipe_t **pipe_)
|
||||
{
|
||||
return lb.sendpipe (msg_, pipe_);
|
||||
return _lb.sendpipe (msg_, pipe_);
|
||||
}
|
||||
|
||||
int zmq::dealer_t::recvpipe (msg_t *msg_, pipe_t **pipe_)
|
||||
{
|
||||
return fq.recvpipe (msg_, pipe_);
|
||||
return _fq.recvpipe (msg_, pipe_);
|
||||
}
|
||||
|
@ -69,11 +69,11 @@ class dealer_t : public socket_base_t
|
||||
private:
|
||||
// Messages are fair-queued from inbound pipes. And load-balanced to
|
||||
// the outbound pipes.
|
||||
fq_t fq;
|
||||
lb_t lb;
|
||||
fq_t _fq;
|
||||
lb_t _lb;
|
||||
|
||||
// if true, send an empty message to every connected router peer
|
||||
bool probe_router;
|
||||
bool _probe_router;
|
||||
|
||||
dealer_t (const dealer_t &);
|
||||
const dealer_t &operator= (const dealer_t &);
|
||||
|
@ -58,22 +58,22 @@ class decoder_base_t : public i_decoder
|
||||
{
|
||||
public:
|
||||
explicit decoder_base_t (const size_t buf_size_) :
|
||||
next (NULL),
|
||||
read_pos (NULL),
|
||||
to_read (0),
|
||||
allocator (buf_size_)
|
||||
_next (NULL),
|
||||
_read_pos (NULL),
|
||||
_to_read (0),
|
||||
_allocator (buf_size_)
|
||||
{
|
||||
buf = allocator.allocate ();
|
||||
_buf = _allocator.allocate ();
|
||||
}
|
||||
|
||||
// The destructor doesn't have to be virtual. It is made virtual
|
||||
// just to keep ICC and code checking tools from complaining.
|
||||
virtual ~decoder_base_t () { allocator.deallocate (); }
|
||||
virtual ~decoder_base_t () { _allocator.deallocate (); }
|
||||
|
||||
// Returns a buffer to be filled with binary data.
|
||||
void get_buffer (unsigned char **data_, std::size_t *size_)
|
||||
{
|
||||
buf = allocator.allocate ();
|
||||
_buf = _allocator.allocate ();
|
||||
|
||||
// If we are expected to read large message, we'll opt for zero-
|
||||
// copy, i.e. we'll ask caller to fill the data directly to the
|
||||
@ -83,14 +83,14 @@ class decoder_base_t : public i_decoder
|
||||
// As a consequence, large messages being received won't block
|
||||
// other engines running in the same I/O thread for excessive
|
||||
// amounts of time.
|
||||
if (to_read >= allocator.size ()) {
|
||||
*data_ = read_pos;
|
||||
*size_ = to_read;
|
||||
if (_to_read >= _allocator.size ()) {
|
||||
*data_ = _read_pos;
|
||||
*size_ = _to_read;
|
||||
return;
|
||||
}
|
||||
|
||||
*data_ = buf;
|
||||
*size_ = allocator.size ();
|
||||
*data_ = _buf;
|
||||
*size_ = _allocator.size ();
|
||||
}
|
||||
|
||||
// Processes the data in the buffer previously allocated using
|
||||
@ -108,15 +108,15 @@ class decoder_base_t : public i_decoder
|
||||
// In case of zero-copy simply adjust the pointers, no copying
|
||||
// is required. Also, run the state machine in case all the data
|
||||
// were processed.
|
||||
if (data_ == read_pos) {
|
||||
zmq_assert (size_ <= to_read);
|
||||
read_pos += size_;
|
||||
to_read -= size_;
|
||||
if (data_ == _read_pos) {
|
||||
zmq_assert (size_ <= _to_read);
|
||||
_read_pos += size_;
|
||||
_to_read -= size_;
|
||||
bytes_used_ = size_;
|
||||
|
||||
while (!to_read) {
|
||||
while (!_to_read) {
|
||||
const int rc =
|
||||
(static_cast<T *> (this)->*next) (data_ + bytes_used_);
|
||||
(static_cast<T *> (this)->*_next) (data_ + bytes_used_);
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
}
|
||||
@ -125,22 +125,22 @@ class decoder_base_t : public i_decoder
|
||||
|
||||
while (bytes_used_ < size_) {
|
||||
// Copy the data from buffer to the message.
|
||||
const size_t to_copy = std::min (to_read, size_ - bytes_used_);
|
||||
const size_t to_copy = std::min (_to_read, size_ - bytes_used_);
|
||||
// Only copy when destination address is different from the
|
||||
// current address in the buffer.
|
||||
if (read_pos != data_ + bytes_used_) {
|
||||
memcpy (read_pos, data_ + bytes_used_, to_copy);
|
||||
if (_read_pos != data_ + bytes_used_) {
|
||||
memcpy (_read_pos, data_ + bytes_used_, to_copy);
|
||||
}
|
||||
|
||||
read_pos += to_copy;
|
||||
to_read -= to_copy;
|
||||
_read_pos += to_copy;
|
||||
_to_read -= to_copy;
|
||||
bytes_used_ += to_copy;
|
||||
// Try to get more space in the message to fill in.
|
||||
// If none is available, return.
|
||||
while (to_read == 0) {
|
||||
while (_to_read == 0) {
|
||||
// pass current address in the buffer
|
||||
const int rc =
|
||||
(static_cast<T *> (this)->*next) (data_ + bytes_used_);
|
||||
(static_cast<T *> (this)->*_next) (data_ + bytes_used_);
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
}
|
||||
@ -151,7 +151,7 @@ class decoder_base_t : public i_decoder
|
||||
|
||||
virtual void resize_buffer (std::size_t new_size_)
|
||||
{
|
||||
allocator.resize (new_size_);
|
||||
_allocator.resize (new_size_);
|
||||
}
|
||||
|
||||
protected:
|
||||
@ -163,28 +163,28 @@ class decoder_base_t : public i_decoder
|
||||
// from the buffer and schedule next state machine action.
|
||||
void next_step (void *read_pos_, std::size_t to_read_, step_t next_)
|
||||
{
|
||||
read_pos = static_cast<unsigned char *> (read_pos_);
|
||||
to_read = to_read_;
|
||||
next = next_;
|
||||
_read_pos = static_cast<unsigned char *> (read_pos_);
|
||||
_to_read = to_read_;
|
||||
_next = next_;
|
||||
}
|
||||
|
||||
A &get_allocator () { return allocator; }
|
||||
A &get_allocator () { return _allocator; }
|
||||
|
||||
private:
|
||||
// Next step. If set to NULL, it means that associated data stream
|
||||
// is dead. Note that there can be still data in the process in such
|
||||
// case.
|
||||
step_t next;
|
||||
step_t _next;
|
||||
|
||||
// Where to store the read data.
|
||||
unsigned char *read_pos;
|
||||
unsigned char *_read_pos;
|
||||
|
||||
// How much data to read before taking next step.
|
||||
std::size_t to_read;
|
||||
std::size_t _to_read;
|
||||
|
||||
// The duffer for data to decode.
|
||||
A allocator;
|
||||
unsigned char *buf;
|
||||
A _allocator;
|
||||
unsigned char *_buf;
|
||||
|
||||
decoder_base_t (const decoder_base_t &);
|
||||
const decoder_base_t &operator= (const decoder_base_t &);
|
||||
|
@ -36,23 +36,23 @@
|
||||
|
||||
zmq::shared_message_memory_allocator::shared_message_memory_allocator (
|
||||
std::size_t bufsize_) :
|
||||
buf (NULL),
|
||||
bufsize (0),
|
||||
max_size (bufsize_),
|
||||
msg_content (NULL),
|
||||
maxCounters (static_cast<size_t> (
|
||||
std::ceil (static_cast<double> (max_size)
|
||||
_buf (NULL),
|
||||
_buf_size (0),
|
||||
_max_size (bufsize_),
|
||||
_msg_content (NULL),
|
||||
_max_counters (static_cast<size_t> (
|
||||
std::ceil (static_cast<double> (_max_size)
|
||||
/ static_cast<double> (msg_t::max_vsm_size))))
|
||||
{
|
||||
}
|
||||
|
||||
zmq::shared_message_memory_allocator::shared_message_memory_allocator (
|
||||
std::size_t bufsize_, std::size_t max_messages_) :
|
||||
buf (NULL),
|
||||
bufsize (0),
|
||||
max_size (bufsize_),
|
||||
msg_content (NULL),
|
||||
maxCounters (max_messages_)
|
||||
_buf (NULL),
|
||||
_buf_size (0),
|
||||
_max_size (bufsize_),
|
||||
_msg_content (NULL),
|
||||
_max_counters (max_messages_)
|
||||
{
|
||||
}
|
||||
|
||||
@ -63,10 +63,10 @@ zmq::shared_message_memory_allocator::~shared_message_memory_allocator ()
|
||||
|
||||
unsigned char *zmq::shared_message_memory_allocator::allocate ()
|
||||
{
|
||||
if (buf) {
|
||||
if (_buf) {
|
||||
// release reference count to couple lifetime to messages
|
||||
zmq::atomic_counter_t *c =
|
||||
reinterpret_cast<zmq::atomic_counter_t *> (buf);
|
||||
reinterpret_cast<zmq::atomic_counter_t *> (_buf);
|
||||
|
||||
// if refcnt drops to 0, there are no message using the buffer
|
||||
// because either all messages have been closed or only vsm-messages
|
||||
@ -79,55 +79,55 @@ unsigned char *zmq::shared_message_memory_allocator::allocate ()
|
||||
}
|
||||
|
||||
// if buf != NULL it is not used by any message so we can re-use it for the next run
|
||||
if (!buf) {
|
||||
if (!_buf) {
|
||||
// allocate memory for reference counters together with reception buffer
|
||||
std::size_t const allocationsize =
|
||||
max_size + sizeof (zmq::atomic_counter_t)
|
||||
+ maxCounters * sizeof (zmq::msg_t::content_t);
|
||||
_max_size + sizeof (zmq::atomic_counter_t)
|
||||
+ _max_counters * sizeof (zmq::msg_t::content_t);
|
||||
|
||||
buf = static_cast<unsigned char *> (std::malloc (allocationsize));
|
||||
alloc_assert (buf);
|
||||
_buf = static_cast<unsigned char *> (std::malloc (allocationsize));
|
||||
alloc_assert (_buf);
|
||||
|
||||
new (buf) atomic_counter_t (1);
|
||||
new (_buf) atomic_counter_t (1);
|
||||
} else {
|
||||
// release reference count to couple lifetime to messages
|
||||
zmq::atomic_counter_t *c =
|
||||
reinterpret_cast<zmq::atomic_counter_t *> (buf);
|
||||
reinterpret_cast<zmq::atomic_counter_t *> (_buf);
|
||||
c->set (1);
|
||||
}
|
||||
|
||||
bufsize = max_size;
|
||||
msg_content = reinterpret_cast<zmq::msg_t::content_t *> (
|
||||
buf + sizeof (atomic_counter_t) + max_size);
|
||||
return buf + sizeof (zmq::atomic_counter_t);
|
||||
_buf_size = _max_size;
|
||||
_msg_content = reinterpret_cast<zmq::msg_t::content_t *> (
|
||||
_buf + sizeof (atomic_counter_t) + _max_size);
|
||||
return _buf + sizeof (zmq::atomic_counter_t);
|
||||
}
|
||||
|
||||
void zmq::shared_message_memory_allocator::deallocate ()
|
||||
{
|
||||
zmq::atomic_counter_t *c = reinterpret_cast<zmq::atomic_counter_t *> (buf);
|
||||
if (buf && !c->sub (1)) {
|
||||
std::free (buf);
|
||||
zmq::atomic_counter_t *c = reinterpret_cast<zmq::atomic_counter_t *> (_buf);
|
||||
if (_buf && !c->sub (1)) {
|
||||
std::free (_buf);
|
||||
}
|
||||
clear ();
|
||||
}
|
||||
|
||||
unsigned char *zmq::shared_message_memory_allocator::release ()
|
||||
{
|
||||
unsigned char *b = buf;
|
||||
unsigned char *b = _buf;
|
||||
clear ();
|
||||
return b;
|
||||
}
|
||||
|
||||
void zmq::shared_message_memory_allocator::clear ()
|
||||
{
|
||||
buf = NULL;
|
||||
bufsize = 0;
|
||||
msg_content = NULL;
|
||||
_buf = NULL;
|
||||
_buf_size = 0;
|
||||
_msg_content = NULL;
|
||||
}
|
||||
|
||||
void zmq::shared_message_memory_allocator::inc_ref ()
|
||||
{
|
||||
(reinterpret_cast<zmq::atomic_counter_t *> (buf))->add (1);
|
||||
(reinterpret_cast<zmq::atomic_counter_t *> (_buf))->add (1);
|
||||
}
|
||||
|
||||
void zmq::shared_message_memory_allocator::call_dec_ref (void *, void *hint_)
|
||||
@ -146,10 +146,10 @@ void zmq::shared_message_memory_allocator::call_dec_ref (void *, void *hint_)
|
||||
|
||||
std::size_t zmq::shared_message_memory_allocator::size () const
|
||||
{
|
||||
return bufsize;
|
||||
return _buf_size;
|
||||
}
|
||||
|
||||
unsigned char *zmq::shared_message_memory_allocator::data ()
|
||||
{
|
||||
return buf + sizeof (zmq::atomic_counter_t);
|
||||
return _buf + sizeof (zmq::atomic_counter_t);
|
||||
}
|
||||
|
@ -44,25 +44,25 @@ class c_single_allocator
|
||||
{
|
||||
public:
|
||||
explicit c_single_allocator (std::size_t bufsize_) :
|
||||
bufsize (bufsize_),
|
||||
buf (static_cast<unsigned char *> (std::malloc (bufsize)))
|
||||
_buf_size (bufsize_),
|
||||
_buf (static_cast<unsigned char *> (std::malloc (_buf_size)))
|
||||
{
|
||||
alloc_assert (buf);
|
||||
alloc_assert (_buf);
|
||||
}
|
||||
|
||||
~c_single_allocator () { std::free (buf); }
|
||||
~c_single_allocator () { std::free (_buf); }
|
||||
|
||||
unsigned char *allocate () { return buf; }
|
||||
unsigned char *allocate () { return _buf; }
|
||||
|
||||
void deallocate () {}
|
||||
|
||||
std::size_t size () const { return bufsize; }
|
||||
std::size_t size () const { return _buf_size; }
|
||||
|
||||
void resize (std::size_t new_size_) { bufsize = new_size_; }
|
||||
void resize (std::size_t new_size_) { _buf_size = new_size_; }
|
||||
|
||||
private:
|
||||
std::size_t bufsize;
|
||||
unsigned char *buf;
|
||||
std::size_t _buf_size;
|
||||
unsigned char *_buf;
|
||||
|
||||
c_single_allocator (c_single_allocator const &);
|
||||
c_single_allocator &operator= (c_single_allocator const &);
|
||||
@ -111,22 +111,22 @@ class shared_message_memory_allocator
|
||||
unsigned char *data ();
|
||||
|
||||
// Return pointer to the first byte of the buffer.
|
||||
unsigned char *buffer () { return buf; }
|
||||
unsigned char *buffer () { return _buf; }
|
||||
|
||||
void resize (std::size_t new_size_) { bufsize = new_size_; }
|
||||
void resize (std::size_t new_size_) { _buf_size = new_size_; }
|
||||
|
||||
zmq::msg_t::content_t *provide_content () { return msg_content; }
|
||||
zmq::msg_t::content_t *provide_content () { return _msg_content; }
|
||||
|
||||
void advance_content () { msg_content++; }
|
||||
void advance_content () { _msg_content++; }
|
||||
|
||||
private:
|
||||
void clear ();
|
||||
|
||||
unsigned char *buf;
|
||||
std::size_t bufsize;
|
||||
const std::size_t max_size;
|
||||
zmq::msg_t::content_t *msg_content;
|
||||
std::size_t maxCounters;
|
||||
unsigned char *_buf;
|
||||
std::size_t _buf_size;
|
||||
const std::size_t _max_size;
|
||||
zmq::msg_t::content_t *_msg_content;
|
||||
std::size_t _max_counters;
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -38,9 +38,9 @@
|
||||
|
||||
zmq::dgram_t::dgram_t (class ctx_t *parent_, uint32_t tid_, int sid_) :
|
||||
socket_base_t (parent_, tid_, sid_),
|
||||
pipe (NULL),
|
||||
last_in (NULL),
|
||||
more_out (false)
|
||||
_pipe (NULL),
|
||||
_last_in (NULL),
|
||||
_more_out (false)
|
||||
{
|
||||
options.type = ZMQ_DGRAM;
|
||||
options.raw_socket = true;
|
||||
@ -48,7 +48,7 @@ zmq::dgram_t::dgram_t (class ctx_t *parent_, uint32_t tid_, int sid_) :
|
||||
|
||||
zmq::dgram_t::~dgram_t ()
|
||||
{
|
||||
zmq_assert (!pipe);
|
||||
zmq_assert (!_pipe);
|
||||
}
|
||||
|
||||
void zmq::dgram_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_)
|
||||
@ -59,20 +59,20 @@ void zmq::dgram_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_)
|
||||
|
||||
// ZMQ_DGRAM socket can only be connected to a single peer.
|
||||
// The socket rejects any further connection requests.
|
||||
if (pipe == NULL)
|
||||
pipe = pipe_;
|
||||
if (_pipe == NULL)
|
||||
_pipe = pipe_;
|
||||
else
|
||||
pipe_->terminate (false);
|
||||
}
|
||||
|
||||
void zmq::dgram_t::xpipe_terminated (pipe_t *pipe_)
|
||||
{
|
||||
if (pipe_ == pipe) {
|
||||
if (last_in == pipe) {
|
||||
saved_credential.set_deep_copy (last_in->get_credential ());
|
||||
last_in = NULL;
|
||||
if (pipe_ == _pipe) {
|
||||
if (_last_in == _pipe) {
|
||||
_saved_credential.set_deep_copy (_last_in->get_credential ());
|
||||
_last_in = NULL;
|
||||
}
|
||||
pipe = NULL;
|
||||
_pipe = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@ -91,7 +91,7 @@ void zmq::dgram_t::xwrite_activated (pipe_t *)
|
||||
int zmq::dgram_t::xsend (msg_t *msg_)
|
||||
{
|
||||
// If there's no out pipe, just drop it.
|
||||
if (!pipe) {
|
||||
if (!_pipe) {
|
||||
int rc = msg_->close ();
|
||||
errno_assert (rc == 0);
|
||||
return -1;
|
||||
@ -99,14 +99,14 @@ int zmq::dgram_t::xsend (msg_t *msg_)
|
||||
|
||||
// If this is the first part of the message it's the ID of the
|
||||
// peer to send the message to.
|
||||
if (!more_out) {
|
||||
if (!_more_out) {
|
||||
if (!(msg_->flags () & msg_t::more)) {
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Expect one more message frame.
|
||||
more_out = true;
|
||||
_more_out = true;
|
||||
} else {
|
||||
// dgram messages are two part only, reject part if more is set
|
||||
if (msg_->flags () & msg_t::more) {
|
||||
@ -115,17 +115,17 @@ int zmq::dgram_t::xsend (msg_t *msg_)
|
||||
}
|
||||
|
||||
// This is the last part of the message.
|
||||
more_out = false;
|
||||
_more_out = false;
|
||||
}
|
||||
|
||||
// Push the message into the pipe.
|
||||
if (!pipe->write (msg_)) {
|
||||
if (!_pipe->write (msg_)) {
|
||||
errno = EAGAIN;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!(msg_->flags () & msg_t::more))
|
||||
pipe->flush ();
|
||||
_pipe->flush ();
|
||||
|
||||
// Detach the message from the data buffer.
|
||||
int rc = msg_->init ();
|
||||
@ -140,7 +140,7 @@ int zmq::dgram_t::xrecv (msg_t *msg_)
|
||||
int rc = msg_->close ();
|
||||
errno_assert (rc == 0);
|
||||
|
||||
if (!pipe || !pipe->read (msg_)) {
|
||||
if (!_pipe || !_pipe->read (msg_)) {
|
||||
// Initialise the output parameter to be a 0-byte message.
|
||||
rc = msg_->init ();
|
||||
errno_assert (rc == 0);
|
||||
@ -148,28 +148,28 @@ int zmq::dgram_t::xrecv (msg_t *msg_)
|
||||
errno = EAGAIN;
|
||||
return -1;
|
||||
}
|
||||
last_in = pipe;
|
||||
_last_in = _pipe;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool zmq::dgram_t::xhas_in ()
|
||||
{
|
||||
if (!pipe)
|
||||
if (!_pipe)
|
||||
return false;
|
||||
|
||||
return pipe->check_read ();
|
||||
return _pipe->check_read ();
|
||||
}
|
||||
|
||||
bool zmq::dgram_t::xhas_out ()
|
||||
{
|
||||
if (!pipe)
|
||||
if (!_pipe)
|
||||
return false;
|
||||
|
||||
return pipe->check_write ();
|
||||
return _pipe->check_write ();
|
||||
}
|
||||
|
||||
const zmq::blob_t &zmq::dgram_t::get_credential () const
|
||||
{
|
||||
return last_in ? last_in->get_credential () : saved_credential;
|
||||
return _last_in ? _last_in->get_credential () : _saved_credential;
|
||||
}
|
||||
|
@ -59,14 +59,14 @@ class dgram_t : public socket_base_t
|
||||
void xpipe_terminated (zmq::pipe_t *pipe_);
|
||||
|
||||
private:
|
||||
zmq::pipe_t *pipe;
|
||||
zmq::pipe_t *_pipe;
|
||||
|
||||
zmq::pipe_t *last_in;
|
||||
zmq::pipe_t *_last_in;
|
||||
|
||||
blob_t saved_credential;
|
||||
blob_t _saved_credential;
|
||||
|
||||
// If true, more outgoing message parts are expected.
|
||||
bool more_out;
|
||||
bool _more_out;
|
||||
|
||||
dgram_t (const dgram_t &);
|
||||
const dgram_t &operator= (const dgram_t &);
|
||||
|
80
src/dish.cpp
80
src/dish.cpp
@ -36,7 +36,7 @@
|
||||
|
||||
zmq::dish_t::dish_t (class ctx_t *parent_, uint32_t tid_, int sid_) :
|
||||
socket_base_t (parent_, tid_, sid_, true),
|
||||
has_message (false)
|
||||
_has_message (false)
|
||||
{
|
||||
options.type = ZMQ_DISH;
|
||||
|
||||
@ -44,13 +44,13 @@ zmq::dish_t::dish_t (class ctx_t *parent_, uint32_t tid_, int sid_) :
|
||||
// subscription commands are sent to the wire.
|
||||
options.linger.store (0);
|
||||
|
||||
int rc = message.init ();
|
||||
int rc = _message.init ();
|
||||
errno_assert (rc == 0);
|
||||
}
|
||||
|
||||
zmq::dish_t::~dish_t ()
|
||||
{
|
||||
int rc = message.close ();
|
||||
int rc = _message.close ();
|
||||
errno_assert (rc == 0);
|
||||
}
|
||||
|
||||
@ -59,8 +59,8 @@ void zmq::dish_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_)
|
||||
LIBZMQ_UNUSED (subscribe_to_all_);
|
||||
|
||||
zmq_assert (pipe_);
|
||||
fq.attach (pipe_);
|
||||
dist.attach (pipe_);
|
||||
_fq.attach (pipe_);
|
||||
_dist.attach (pipe_);
|
||||
|
||||
// Send all the cached subscriptions to the new upstream peer.
|
||||
send_subscriptions (pipe_);
|
||||
@ -68,18 +68,18 @@ void zmq::dish_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_)
|
||||
|
||||
void zmq::dish_t::xread_activated (pipe_t *pipe_)
|
||||
{
|
||||
fq.activated (pipe_);
|
||||
_fq.activated (pipe_);
|
||||
}
|
||||
|
||||
void zmq::dish_t::xwrite_activated (pipe_t *pipe_)
|
||||
{
|
||||
dist.activated (pipe_);
|
||||
_dist.activated (pipe_);
|
||||
}
|
||||
|
||||
void zmq::dish_t::xpipe_terminated (pipe_t *pipe_)
|
||||
{
|
||||
fq.pipe_terminated (pipe_);
|
||||
dist.pipe_terminated (pipe_);
|
||||
_fq.pipe_terminated (pipe_);
|
||||
_dist.pipe_terminated (pipe_);
|
||||
}
|
||||
|
||||
void zmq::dish_t::xhiccuped (pipe_t *pipe_)
|
||||
@ -97,15 +97,15 @@ int zmq::dish_t::xjoin (const char *group_)
|
||||
return -1;
|
||||
}
|
||||
|
||||
subscriptions_t::iterator it = subscriptions.find (group);
|
||||
subscriptions_t::iterator it = _subscriptions.find (group);
|
||||
|
||||
// User cannot join same group twice
|
||||
if (it != subscriptions.end ()) {
|
||||
if (it != _subscriptions.end ()) {
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
subscriptions.insert (group);
|
||||
_subscriptions.insert (group);
|
||||
|
||||
msg_t msg;
|
||||
int rc = msg.init_join ();
|
||||
@ -115,7 +115,7 @@ int zmq::dish_t::xjoin (const char *group_)
|
||||
errno_assert (rc == 0);
|
||||
|
||||
int err = 0;
|
||||
rc = dist.send_to_all (&msg);
|
||||
rc = _dist.send_to_all (&msg);
|
||||
if (rc != 0)
|
||||
err = errno;
|
||||
int rc2 = msg.close ();
|
||||
@ -135,14 +135,14 @@ int zmq::dish_t::xleave (const char *group_)
|
||||
}
|
||||
|
||||
subscriptions_t::iterator it =
|
||||
std::find (subscriptions.begin (), subscriptions.end (), group);
|
||||
std::find (_subscriptions.begin (), _subscriptions.end (), group);
|
||||
|
||||
if (it == subscriptions.end ()) {
|
||||
if (it == _subscriptions.end ()) {
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
subscriptions.erase (it);
|
||||
_subscriptions.erase (it);
|
||||
|
||||
msg_t msg;
|
||||
int rc = msg.init_leave ();
|
||||
@ -152,7 +152,7 @@ int zmq::dish_t::xleave (const char *group_)
|
||||
errno_assert (rc == 0);
|
||||
|
||||
int err = 0;
|
||||
rc = dist.send_to_all (&msg);
|
||||
rc = _dist.send_to_all (&msg);
|
||||
if (rc != 0)
|
||||
err = errno;
|
||||
int rc2 = msg.close ();
|
||||
@ -179,16 +179,16 @@ int zmq::dish_t::xrecv (msg_t *msg_)
|
||||
{
|
||||
// If there's already a message prepared by a previous call to zmq_poll,
|
||||
// return it straight ahead.
|
||||
if (has_message) {
|
||||
int rc = msg_->move (message);
|
||||
if (_has_message) {
|
||||
int rc = msg_->move (_message);
|
||||
errno_assert (rc == 0);
|
||||
has_message = false;
|
||||
_has_message = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
while (true) {
|
||||
// Get a message using fair queueing algorithm.
|
||||
int rc = fq.recv (msg_);
|
||||
int rc = _fq.recv (msg_);
|
||||
|
||||
// If there's no message available, return immediately.
|
||||
// The same when error occurs.
|
||||
@ -197,8 +197,8 @@ int zmq::dish_t::xrecv (msg_t *msg_)
|
||||
|
||||
// Filtering non matching messages
|
||||
subscriptions_t::iterator it =
|
||||
subscriptions.find (std::string (msg_->group ()));
|
||||
if (it != subscriptions.end ())
|
||||
_subscriptions.find (std::string (msg_->group ()));
|
||||
if (it != _subscriptions.end ())
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@ -207,12 +207,12 @@ bool zmq::dish_t::xhas_in ()
|
||||
{
|
||||
// If there's already a message prepared by a previous call to zmq_poll,
|
||||
// return straight ahead.
|
||||
if (has_message)
|
||||
if (_has_message)
|
||||
return true;
|
||||
|
||||
while (true) {
|
||||
// Get a message using fair queueing algorithm.
|
||||
int rc = fq.recv (&message);
|
||||
int rc = _fq.recv (&_message);
|
||||
|
||||
// If there's no message available, return immediately.
|
||||
// The same when error occurs.
|
||||
@ -223,9 +223,9 @@ bool zmq::dish_t::xhas_in ()
|
||||
|
||||
// Filtering non matching messages
|
||||
subscriptions_t::iterator it =
|
||||
subscriptions.find (std::string (message.group ()));
|
||||
if (it != subscriptions.end ()) {
|
||||
has_message = true;
|
||||
_subscriptions.find (std::string (_message.group ()));
|
||||
if (it != _subscriptions.end ()) {
|
||||
_has_message = true;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -233,13 +233,13 @@ bool zmq::dish_t::xhas_in ()
|
||||
|
||||
const zmq::blob_t &zmq::dish_t::get_credential () const
|
||||
{
|
||||
return fq.get_credential ();
|
||||
return _fq.get_credential ();
|
||||
}
|
||||
|
||||
void zmq::dish_t::send_subscriptions (pipe_t *pipe_)
|
||||
{
|
||||
for (subscriptions_t::iterator it = subscriptions.begin ();
|
||||
it != subscriptions.end (); ++it) {
|
||||
for (subscriptions_t::iterator it = _subscriptions.begin ();
|
||||
it != _subscriptions.end (); ++it) {
|
||||
msg_t msg;
|
||||
int rc = msg.init_join ();
|
||||
errno_assert (rc == 0);
|
||||
@ -261,7 +261,7 @@ zmq::dish_session_t::dish_session_t (io_thread_t *io_thread_,
|
||||
const options_t &options_,
|
||||
address_t *addr_) :
|
||||
session_base_t (io_thread_, connect_, socket_, options_, addr_),
|
||||
state (group)
|
||||
_state (group)
|
||||
{
|
||||
}
|
||||
|
||||
@ -271,7 +271,7 @@ zmq::dish_session_t::~dish_session_t ()
|
||||
|
||||
int zmq::dish_session_t::push_msg (msg_t *msg_)
|
||||
{
|
||||
if (state == group) {
|
||||
if (_state == group) {
|
||||
if ((msg_->flags () & msg_t::more) != msg_t::more) {
|
||||
errno = EFAULT;
|
||||
return -1;
|
||||
@ -282,8 +282,8 @@ int zmq::dish_session_t::push_msg (msg_t *msg_)
|
||||
return -1;
|
||||
}
|
||||
|
||||
group_msg = *msg_;
|
||||
state = body;
|
||||
_group_msg = *msg_;
|
||||
_state = body;
|
||||
|
||||
int rc = msg_->init ();
|
||||
errno_assert (rc == 0);
|
||||
@ -295,12 +295,12 @@ int zmq::dish_session_t::push_msg (msg_t *msg_)
|
||||
goto has_group;
|
||||
|
||||
// Set the message group
|
||||
rc = msg_->set_group (static_cast<char *> (group_msg.data ()),
|
||||
group_msg.size ());
|
||||
rc = msg_->set_group (static_cast<char *> (_group_msg.data ()),
|
||||
_group_msg.size ());
|
||||
errno_assert (rc == 0);
|
||||
|
||||
// We set the group, so we don't need the group_msg anymore
|
||||
rc = group_msg.close ();
|
||||
rc = _group_msg.close ();
|
||||
errno_assert (rc == 0);
|
||||
has_group:
|
||||
// Thread safe socket doesn't support multipart messages
|
||||
@ -313,7 +313,7 @@ has_group:
|
||||
rc = session_base_t::push_msg (msg_);
|
||||
|
||||
if (rc == 0)
|
||||
state = group;
|
||||
_state = group;
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -363,5 +363,5 @@ int zmq::dish_session_t::pull_msg (msg_t *msg_)
|
||||
void zmq::dish_session_t::reset ()
|
||||
{
|
||||
session_base_t::reset ();
|
||||
state = group;
|
||||
_state = group;
|
||||
}
|
||||
|
14
src/dish.hpp
14
src/dish.hpp
@ -70,19 +70,19 @@ class dish_t : public socket_base_t
|
||||
void send_subscriptions (pipe_t *pipe_);
|
||||
|
||||
// Fair queueing object for inbound pipes.
|
||||
fq_t fq;
|
||||
fq_t _fq;
|
||||
|
||||
// Object for distributing the subscriptions upstream.
|
||||
dist_t dist;
|
||||
dist_t _dist;
|
||||
|
||||
// The repository of subscriptions.
|
||||
typedef std::set<std::string> subscriptions_t;
|
||||
subscriptions_t subscriptions;
|
||||
subscriptions_t _subscriptions;
|
||||
|
||||
// If true, 'message' contains a matching message to return on the
|
||||
// next recv call.
|
||||
bool has_message;
|
||||
msg_t message;
|
||||
bool _has_message;
|
||||
msg_t _message;
|
||||
|
||||
dish_t (const dish_t &);
|
||||
const dish_t &operator= (const dish_t &);
|
||||
@ -108,9 +108,9 @@ class dish_session_t : public session_base_t
|
||||
{
|
||||
group,
|
||||
body
|
||||
} state;
|
||||
} _state;
|
||||
|
||||
msg_t group_msg;
|
||||
msg_t _group_msg;
|
||||
|
||||
dish_session_t (const dish_session_t &);
|
||||
const dish_session_t &operator= (const dish_session_t &);
|
||||
|
106
src/dist.cpp
106
src/dist.cpp
@ -34,13 +34,17 @@
|
||||
#include "msg.hpp"
|
||||
#include "likely.hpp"
|
||||
|
||||
zmq::dist_t::dist_t () : matching (0), active (0), eligible (0), more (false)
|
||||
zmq::dist_t::dist_t () :
|
||||
_matching (0),
|
||||
_active (0),
|
||||
_eligible (0),
|
||||
_more (false)
|
||||
{
|
||||
}
|
||||
|
||||
zmq::dist_t::~dist_t ()
|
||||
{
|
||||
zmq_assert (pipes.empty ());
|
||||
zmq_assert (_pipes.empty ());
|
||||
}
|
||||
|
||||
void zmq::dist_t::attach (pipe_t *pipe_)
|
||||
@ -48,36 +52,36 @@ void zmq::dist_t::attach (pipe_t *pipe_)
|
||||
// If we are in the middle of sending a message, we'll add new pipe
|
||||
// into the list of eligible pipes. Otherwise we add it to the list
|
||||
// of active pipes.
|
||||
if (more) {
|
||||
pipes.push_back (pipe_);
|
||||
pipes.swap (eligible, pipes.size () - 1);
|
||||
eligible++;
|
||||
if (_more) {
|
||||
_pipes.push_back (pipe_);
|
||||
_pipes.swap (_eligible, _pipes.size () - 1);
|
||||
_eligible++;
|
||||
} else {
|
||||
pipes.push_back (pipe_);
|
||||
pipes.swap (active, pipes.size () - 1);
|
||||
active++;
|
||||
eligible++;
|
||||
_pipes.push_back (pipe_);
|
||||
_pipes.swap (_active, _pipes.size () - 1);
|
||||
_active++;
|
||||
_eligible++;
|
||||
}
|
||||
}
|
||||
|
||||
void zmq::dist_t::match (pipe_t *pipe_)
|
||||
{
|
||||
// If pipe is already matching do nothing.
|
||||
if (pipes.index (pipe_) < matching)
|
||||
if (_pipes.index (pipe_) < _matching)
|
||||
return;
|
||||
|
||||
// If the pipe isn't eligible, ignore it.
|
||||
if (pipes.index (pipe_) >= eligible)
|
||||
if (_pipes.index (pipe_) >= _eligible)
|
||||
return;
|
||||
|
||||
// Mark the pipe as matching.
|
||||
pipes.swap (pipes.index (pipe_), matching);
|
||||
matching++;
|
||||
_pipes.swap (_pipes.index (pipe_), _matching);
|
||||
_matching++;
|
||||
}
|
||||
|
||||
void zmq::dist_t::reverse_match ()
|
||||
{
|
||||
pipes_t::size_type prev_matching = matching;
|
||||
pipes_t::size_type prev_matching = _matching;
|
||||
|
||||
// Reset matching to 0
|
||||
unmatch ();
|
||||
@ -86,55 +90,55 @@ void zmq::dist_t::reverse_match ()
|
||||
// To do this, push all pipes that are eligible but not
|
||||
// matched - i.e. between "matching" and "eligible" -
|
||||
// to the beginning of the queue.
|
||||
for (pipes_t::size_type i = prev_matching; i < eligible; ++i) {
|
||||
pipes.swap (i, matching++);
|
||||
for (pipes_t::size_type i = prev_matching; i < _eligible; ++i) {
|
||||
_pipes.swap (i, _matching++);
|
||||
}
|
||||
}
|
||||
|
||||
void zmq::dist_t::unmatch ()
|
||||
{
|
||||
matching = 0;
|
||||
_matching = 0;
|
||||
}
|
||||
|
||||
void zmq::dist_t::pipe_terminated (pipe_t *pipe_)
|
||||
{
|
||||
// Remove the pipe from the list; adjust number of matching, active and/or
|
||||
// eligible pipes accordingly.
|
||||
if (pipes.index (pipe_) < matching) {
|
||||
pipes.swap (pipes.index (pipe_), matching - 1);
|
||||
matching--;
|
||||
if (_pipes.index (pipe_) < _matching) {
|
||||
_pipes.swap (_pipes.index (pipe_), _matching - 1);
|
||||
_matching--;
|
||||
}
|
||||
if (pipes.index (pipe_) < active) {
|
||||
pipes.swap (pipes.index (pipe_), active - 1);
|
||||
active--;
|
||||
if (_pipes.index (pipe_) < _active) {
|
||||
_pipes.swap (_pipes.index (pipe_), _active - 1);
|
||||
_active--;
|
||||
}
|
||||
if (pipes.index (pipe_) < eligible) {
|
||||
pipes.swap (pipes.index (pipe_), eligible - 1);
|
||||
eligible--;
|
||||
if (_pipes.index (pipe_) < _eligible) {
|
||||
_pipes.swap (_pipes.index (pipe_), _eligible - 1);
|
||||
_eligible--;
|
||||
}
|
||||
|
||||
pipes.erase (pipe_);
|
||||
_pipes.erase (pipe_);
|
||||
}
|
||||
|
||||
void zmq::dist_t::activated (pipe_t *pipe_)
|
||||
{
|
||||
// Move the pipe from passive to eligible state.
|
||||
if (eligible < pipes.size ()) {
|
||||
pipes.swap (pipes.index (pipe_), eligible);
|
||||
eligible++;
|
||||
if (_eligible < _pipes.size ()) {
|
||||
_pipes.swap (_pipes.index (pipe_), _eligible);
|
||||
_eligible++;
|
||||
}
|
||||
|
||||
// If there's no message being sent at the moment, move it to
|
||||
// the active state.
|
||||
if (!more && active < pipes.size ()) {
|
||||
pipes.swap (eligible - 1, active);
|
||||
active++;
|
||||
if (!_more && _active < _pipes.size ()) {
|
||||
_pipes.swap (_eligible - 1, _active);
|
||||
_active++;
|
||||
}
|
||||
}
|
||||
|
||||
int zmq::dist_t::send_to_all (msg_t *msg_)
|
||||
{
|
||||
matching = active;
|
||||
_matching = _active;
|
||||
return send_to_matching (msg_);
|
||||
}
|
||||
|
||||
@ -148,9 +152,9 @@ int zmq::dist_t::send_to_matching (msg_t *msg_)
|
||||
|
||||
// If multipart message is fully sent, activate all the eligible pipes.
|
||||
if (!msg_more)
|
||||
active = eligible;
|
||||
_active = _eligible;
|
||||
|
||||
more = msg_more;
|
||||
_more = msg_more;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -158,7 +162,7 @@ int zmq::dist_t::send_to_matching (msg_t *msg_)
|
||||
void zmq::dist_t::distribute (msg_t *msg_)
|
||||
{
|
||||
// If there are no matching pipes available, simply drop the message.
|
||||
if (matching == 0) {
|
||||
if (_matching == 0) {
|
||||
int rc = msg_->close ();
|
||||
errno_assert (rc == 0);
|
||||
rc = msg_->init ();
|
||||
@ -167,8 +171,8 @@ void zmq::dist_t::distribute (msg_t *msg_)
|
||||
}
|
||||
|
||||
if (msg_->is_vsm ()) {
|
||||
for (pipes_t::size_type i = 0; i < matching; ++i)
|
||||
if (!write (pipes[i], msg_))
|
||||
for (pipes_t::size_type i = 0; i < _matching; ++i)
|
||||
if (!write (_pipes[i], msg_))
|
||||
--i; // Retry last write because index will have been swapped
|
||||
int rc = msg_->close ();
|
||||
errno_assert (rc == 0);
|
||||
@ -179,12 +183,12 @@ void zmq::dist_t::distribute (msg_t *msg_)
|
||||
|
||||
// Add matching-1 references to the message. We already hold one reference,
|
||||
// that's why -1.
|
||||
msg_->add_refs (static_cast<int> (matching) - 1);
|
||||
msg_->add_refs (static_cast<int> (_matching) - 1);
|
||||
|
||||
// Push copy of the message to each matching pipe.
|
||||
int failed = 0;
|
||||
for (pipes_t::size_type i = 0; i < matching; ++i)
|
||||
if (!write (pipes[i], msg_)) {
|
||||
for (pipes_t::size_type i = 0; i < _matching; ++i)
|
||||
if (!write (_pipes[i], msg_)) {
|
||||
++failed;
|
||||
--i; // Retry last write because index will have been swapped
|
||||
}
|
||||
@ -205,12 +209,12 @@ bool zmq::dist_t::has_out ()
|
||||
bool zmq::dist_t::write (pipe_t *pipe_, msg_t *msg_)
|
||||
{
|
||||
if (!pipe_->write (msg_)) {
|
||||
pipes.swap (pipes.index (pipe_), matching - 1);
|
||||
matching--;
|
||||
pipes.swap (pipes.index (pipe_), active - 1);
|
||||
active--;
|
||||
pipes.swap (active, eligible - 1);
|
||||
eligible--;
|
||||
_pipes.swap (_pipes.index (pipe_), _matching - 1);
|
||||
_matching--;
|
||||
_pipes.swap (_pipes.index (pipe_), _active - 1);
|
||||
_active--;
|
||||
_pipes.swap (_active, _eligible - 1);
|
||||
_eligible--;
|
||||
return false;
|
||||
}
|
||||
if (!(msg_->flags () & msg_t::more))
|
||||
@ -220,8 +224,8 @@ bool zmq::dist_t::write (pipe_t *pipe_, msg_t *msg_)
|
||||
|
||||
bool zmq::dist_t::check_hwm ()
|
||||
{
|
||||
for (pipes_t::size_type i = 0; i < matching; ++i)
|
||||
if (!pipes[i]->check_hwm ())
|
||||
for (pipes_t::size_type i = 0; i < _matching; ++i)
|
||||
if (!_pipes[i]->check_hwm ())
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
10
src/dist.hpp
10
src/dist.hpp
@ -87,25 +87,25 @@ class dist_t
|
||||
|
||||
// List of outbound pipes.
|
||||
typedef array_t<zmq::pipe_t, 2> pipes_t;
|
||||
pipes_t pipes;
|
||||
pipes_t _pipes;
|
||||
|
||||
// Number of all the pipes to send the next message to.
|
||||
pipes_t::size_type matching;
|
||||
pipes_t::size_type _matching;
|
||||
|
||||
// Number of active pipes. All the active pipes are located at the
|
||||
// beginning of the pipes array. These are the pipes the messages
|
||||
// can be sent to at the moment.
|
||||
pipes_t::size_type active;
|
||||
pipes_t::size_type _active;
|
||||
|
||||
// Number of pipes eligible for sending messages to. This includes all
|
||||
// the active pipes plus all the pipes that we can in theory send
|
||||
// messages to (the HWM is not yet reached), but sending a message
|
||||
// to them would result in partial message being delivered, ie. message
|
||||
// with initial parts missing.
|
||||
pipes_t::size_type eligible;
|
||||
pipes_t::size_type _eligible;
|
||||
|
||||
// True if last we are in the middle of a multipart message.
|
||||
bool more;
|
||||
bool _more;
|
||||
|
||||
dist_t (const dist_t &);
|
||||
const dist_t &operator= (const dist_t &);
|
||||
|
@ -55,28 +55,28 @@ template <typename T> class encoder_base_t : public i_encoder
|
||||
{
|
||||
public:
|
||||
inline encoder_base_t (size_t bufsize_) :
|
||||
write_pos (0),
|
||||
to_write (0),
|
||||
next (NULL),
|
||||
new_msg_flag (false),
|
||||
bufsize (bufsize_),
|
||||
buf (static_cast<unsigned char *> (malloc (bufsize_))),
|
||||
_write_pos (0),
|
||||
_to_write (0),
|
||||
_next (NULL),
|
||||
_new_msg_flag (false),
|
||||
_buf_size (bufsize_),
|
||||
_buf (static_cast<unsigned char *> (malloc (bufsize_))),
|
||||
in_progress (NULL)
|
||||
{
|
||||
alloc_assert (buf);
|
||||
alloc_assert (_buf);
|
||||
}
|
||||
|
||||
// The destructor doesn't have to be virtual. It is made virtual
|
||||
// just to keep ICC and code checking tools from complaining.
|
||||
inline virtual ~encoder_base_t () { free (buf); }
|
||||
inline virtual ~encoder_base_t () { free (_buf); }
|
||||
|
||||
// The function returns a batch of binary data. The data
|
||||
// are filled to a supplied buffer. If no buffer is supplied (data_
|
||||
// points to NULL) decoder object will provide buffer of its own.
|
||||
inline size_t encode (unsigned char **data_, size_t size_)
|
||||
{
|
||||
unsigned char *buffer = !*data_ ? buf : *data_;
|
||||
size_t buffersize = !*data_ ? bufsize : size_;
|
||||
unsigned char *buffer = !*data_ ? _buf : *data_;
|
||||
size_t buffersize = !*data_ ? _buf_size : size_;
|
||||
|
||||
if (in_progress == NULL)
|
||||
return 0;
|
||||
@ -86,8 +86,8 @@ template <typename T> class encoder_base_t : public i_encoder
|
||||
// If there are no more data to return, run the state machine.
|
||||
// If there are still no data, return what we already have
|
||||
// in the buffer.
|
||||
if (!to_write) {
|
||||
if (new_msg_flag) {
|
||||
if (!_to_write) {
|
||||
if (_new_msg_flag) {
|
||||
int rc = in_progress->close ();
|
||||
errno_assert (rc == 0);
|
||||
rc = in_progress->init ();
|
||||
@ -95,7 +95,7 @@ template <typename T> class encoder_base_t : public i_encoder
|
||||
in_progress = NULL;
|
||||
break;
|
||||
}
|
||||
(static_cast<T *> (this)->*next) ();
|
||||
(static_cast<T *> (this)->*_next) ();
|
||||
}
|
||||
|
||||
// If there are no data in the buffer yet and we are able to
|
||||
@ -108,20 +108,20 @@ template <typename T> class encoder_base_t : public i_encoder
|
||||
// As a consequence, large messages being sent won't block
|
||||
// other engines running in the same I/O thread for excessive
|
||||
// amounts of time.
|
||||
if (!pos && !*data_ && to_write >= buffersize) {
|
||||
*data_ = write_pos;
|
||||
pos = to_write;
|
||||
write_pos = NULL;
|
||||
to_write = 0;
|
||||
if (!pos && !*data_ && _to_write >= buffersize) {
|
||||
*data_ = _write_pos;
|
||||
pos = _to_write;
|
||||
_write_pos = NULL;
|
||||
_to_write = 0;
|
||||
return pos;
|
||||
}
|
||||
|
||||
// Copy data to the buffer. If the buffer is full, return.
|
||||
size_t to_copy = std::min (to_write, buffersize - pos);
|
||||
memcpy (buffer + pos, write_pos, to_copy);
|
||||
size_t to_copy = std::min (_to_write, buffersize - pos);
|
||||
memcpy (buffer + pos, _write_pos, to_copy);
|
||||
pos += to_copy;
|
||||
write_pos += to_copy;
|
||||
to_write -= to_copy;
|
||||
_write_pos += to_copy;
|
||||
_to_write -= to_copy;
|
||||
}
|
||||
|
||||
*data_ = buffer;
|
||||
@ -132,7 +132,7 @@ template <typename T> class encoder_base_t : public i_encoder
|
||||
{
|
||||
zmq_assert (in_progress == NULL);
|
||||
in_progress = msg_;
|
||||
(static_cast<T *> (this)->*next) ();
|
||||
(static_cast<T *> (this)->*_next) ();
|
||||
}
|
||||
|
||||
protected:
|
||||
@ -146,28 +146,28 @@ template <typename T> class encoder_base_t : public i_encoder
|
||||
step_t next_,
|
||||
bool new_msg_flag_)
|
||||
{
|
||||
write_pos = static_cast<unsigned char *> (write_pos_);
|
||||
to_write = to_write_;
|
||||
next = next_;
|
||||
new_msg_flag = new_msg_flag_;
|
||||
_write_pos = static_cast<unsigned char *> (write_pos_);
|
||||
_to_write = to_write_;
|
||||
_next = next_;
|
||||
_new_msg_flag = new_msg_flag_;
|
||||
}
|
||||
|
||||
private:
|
||||
// Where to get the data to write from.
|
||||
unsigned char *write_pos;
|
||||
unsigned char *_write_pos;
|
||||
|
||||
// How much data to write before next step should be executed.
|
||||
size_t to_write;
|
||||
size_t _to_write;
|
||||
|
||||
// Next step. If set to NULL, it means that associated data stream
|
||||
// is dead.
|
||||
step_t next;
|
||||
step_t _next;
|
||||
|
||||
bool new_msg_flag;
|
||||
bool _new_msg_flag;
|
||||
|
||||
// The buffer for encoded data.
|
||||
const size_t bufsize;
|
||||
unsigned char *const buf;
|
||||
const size_t _buf_size;
|
||||
unsigned char *const _buf;
|
||||
|
||||
encoder_base_t (const encoder_base_t &);
|
||||
void operator= (const encoder_base_t &);
|
||||
|
@ -58,11 +58,11 @@ zmq::epoll_t::epoll_t (const zmq::thread_ctx_t &ctx_) :
|
||||
// Setting this option result in sane behaviour when exec() functions
|
||||
// are used. Old sockets are closed and don't block TCP ports, avoid
|
||||
// leaks, etc.
|
||||
epoll_fd = epoll_create1 (EPOLL_CLOEXEC);
|
||||
_epoll_fd = epoll_create1 (EPOLL_CLOEXEC);
|
||||
#else
|
||||
epoll_fd = epoll_create (1);
|
||||
_epoll_fd = epoll_create (1);
|
||||
#endif
|
||||
errno_assert (epoll_fd != epoll_retired_fd);
|
||||
errno_assert (_epoll_fd != epoll_retired_fd);
|
||||
}
|
||||
|
||||
zmq::epoll_t::~epoll_t ()
|
||||
@ -71,11 +71,11 @@ zmq::epoll_t::~epoll_t ()
|
||||
stop_worker ();
|
||||
|
||||
#ifdef ZMQ_HAVE_WINDOWS
|
||||
epoll_close (epoll_fd);
|
||||
epoll_close (_epoll_fd);
|
||||
#else
|
||||
close (epoll_fd);
|
||||
close (_epoll_fd);
|
||||
#endif
|
||||
for (retired_t::iterator it = retired.begin (); it != retired.end ();
|
||||
for (retired_t::iterator it = _retired.begin (); it != _retired.end ();
|
||||
++it) {
|
||||
LIBZMQ_DELETE (*it);
|
||||
}
|
||||
@ -96,7 +96,7 @@ zmq::epoll_t::handle_t zmq::epoll_t::add_fd (fd_t fd_, i_poll_events *events_)
|
||||
pe->ev.data.ptr = pe;
|
||||
pe->events = events_;
|
||||
|
||||
int rc = epoll_ctl (epoll_fd, EPOLL_CTL_ADD, fd_, &pe->ev);
|
||||
int rc = epoll_ctl (_epoll_fd, EPOLL_CTL_ADD, fd_, &pe->ev);
|
||||
errno_assert (rc != -1);
|
||||
|
||||
// Increase the load metric of the thread.
|
||||
@ -109,12 +109,12 @@ void zmq::epoll_t::rm_fd (handle_t handle_)
|
||||
{
|
||||
check_thread ();
|
||||
poll_entry_t *pe = (poll_entry_t *) handle_;
|
||||
int rc = epoll_ctl (epoll_fd, EPOLL_CTL_DEL, pe->fd, &pe->ev);
|
||||
int rc = epoll_ctl (_epoll_fd, EPOLL_CTL_DEL, pe->fd, &pe->ev);
|
||||
errno_assert (rc != -1);
|
||||
pe->fd = retired_fd;
|
||||
retired_sync.lock ();
|
||||
retired.push_back (pe);
|
||||
retired_sync.unlock ();
|
||||
_retired_sync.lock ();
|
||||
_retired.push_back (pe);
|
||||
_retired_sync.unlock ();
|
||||
|
||||
// Decrease the load metric of the thread.
|
||||
adjust_load (-1);
|
||||
@ -125,7 +125,7 @@ void zmq::epoll_t::set_pollin (handle_t handle_)
|
||||
check_thread ();
|
||||
poll_entry_t *pe = (poll_entry_t *) handle_;
|
||||
pe->ev.events |= EPOLLIN;
|
||||
int rc = epoll_ctl (epoll_fd, EPOLL_CTL_MOD, pe->fd, &pe->ev);
|
||||
int rc = epoll_ctl (_epoll_fd, EPOLL_CTL_MOD, pe->fd, &pe->ev);
|
||||
errno_assert (rc != -1);
|
||||
}
|
||||
|
||||
@ -134,7 +134,7 @@ void zmq::epoll_t::reset_pollin (handle_t handle_)
|
||||
check_thread ();
|
||||
poll_entry_t *pe = (poll_entry_t *) handle_;
|
||||
pe->ev.events &= ~((short) EPOLLIN);
|
||||
int rc = epoll_ctl (epoll_fd, EPOLL_CTL_MOD, pe->fd, &pe->ev);
|
||||
int rc = epoll_ctl (_epoll_fd, EPOLL_CTL_MOD, pe->fd, &pe->ev);
|
||||
errno_assert (rc != -1);
|
||||
}
|
||||
|
||||
@ -143,7 +143,7 @@ void zmq::epoll_t::set_pollout (handle_t handle_)
|
||||
check_thread ();
|
||||
poll_entry_t *pe = (poll_entry_t *) handle_;
|
||||
pe->ev.events |= EPOLLOUT;
|
||||
int rc = epoll_ctl (epoll_fd, EPOLL_CTL_MOD, pe->fd, &pe->ev);
|
||||
int rc = epoll_ctl (_epoll_fd, EPOLL_CTL_MOD, pe->fd, &pe->ev);
|
||||
errno_assert (rc != -1);
|
||||
}
|
||||
|
||||
@ -152,7 +152,7 @@ void zmq::epoll_t::reset_pollout (handle_t handle_)
|
||||
check_thread ();
|
||||
poll_entry_t *pe = (poll_entry_t *) handle_;
|
||||
pe->ev.events &= ~((short) EPOLLOUT);
|
||||
int rc = epoll_ctl (epoll_fd, EPOLL_CTL_MOD, pe->fd, &pe->ev);
|
||||
int rc = epoll_ctl (_epoll_fd, EPOLL_CTL_MOD, pe->fd, &pe->ev);
|
||||
errno_assert (rc != -1);
|
||||
}
|
||||
|
||||
@ -183,7 +183,7 @@ void zmq::epoll_t::loop ()
|
||||
}
|
||||
|
||||
// Wait for events.
|
||||
int n = epoll_wait (epoll_fd, &ev_buf[0], max_io_events,
|
||||
int n = epoll_wait (_epoll_fd, &ev_buf[0], max_io_events,
|
||||
timeout ? timeout : -1);
|
||||
if (n == -1) {
|
||||
errno_assert (errno == EINTR);
|
||||
@ -208,13 +208,13 @@ void zmq::epoll_t::loop ()
|
||||
}
|
||||
|
||||
// Destroy retired event sources.
|
||||
retired_sync.lock ();
|
||||
for (retired_t::iterator it = retired.begin (); it != retired.end ();
|
||||
_retired_sync.lock ();
|
||||
for (retired_t::iterator it = _retired.begin (); it != _retired.end ();
|
||||
++it) {
|
||||
LIBZMQ_DELETE (*it);
|
||||
}
|
||||
retired.clear ();
|
||||
retired_sync.unlock ();
|
||||
_retired.clear ();
|
||||
_retired_sync.unlock ();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -90,7 +90,7 @@ class epoll_t : public worker_poller_base_t
|
||||
void loop ();
|
||||
|
||||
// Main epoll file descriptor
|
||||
epoll_fd_t epoll_fd;
|
||||
epoll_fd_t _epoll_fd;
|
||||
|
||||
struct poll_entry_t
|
||||
{
|
||||
@ -101,13 +101,13 @@ class epoll_t : public worker_poller_base_t
|
||||
|
||||
// List of retired event sources.
|
||||
typedef std::vector<poll_entry_t *> retired_t;
|
||||
retired_t retired;
|
||||
retired_t _retired;
|
||||
|
||||
// Handle of the physical thread doing the I/O work.
|
||||
thread_t worker;
|
||||
thread_t _worker;
|
||||
|
||||
// Synchronisation of retired event sources
|
||||
mutex_t retired_sync;
|
||||
mutex_t _retired_sync;
|
||||
|
||||
epoll_t (const epoll_t &);
|
||||
const epoll_t &operator= (const epoll_t &);
|
||||
|
74
src/fq.cpp
74
src/fq.cpp
@ -33,47 +33,47 @@
|
||||
#include "err.hpp"
|
||||
#include "msg.hpp"
|
||||
|
||||
zmq::fq_t::fq_t () : active (0), last_in (NULL), current (0), more (false)
|
||||
zmq::fq_t::fq_t () : _active (0), _last_in (NULL), _current (0), _more (false)
|
||||
{
|
||||
}
|
||||
|
||||
zmq::fq_t::~fq_t ()
|
||||
{
|
||||
zmq_assert (pipes.empty ());
|
||||
zmq_assert (_pipes.empty ());
|
||||
}
|
||||
|
||||
void zmq::fq_t::attach (pipe_t *pipe_)
|
||||
{
|
||||
pipes.push_back (pipe_);
|
||||
pipes.swap (active, pipes.size () - 1);
|
||||
active++;
|
||||
_pipes.push_back (pipe_);
|
||||
_pipes.swap (_active, _pipes.size () - 1);
|
||||
_active++;
|
||||
}
|
||||
|
||||
void zmq::fq_t::pipe_terminated (pipe_t *pipe_)
|
||||
{
|
||||
const pipes_t::size_type index = pipes.index (pipe_);
|
||||
const pipes_t::size_type index = _pipes.index (pipe_);
|
||||
|
||||
// Remove the pipe from the list; adjust number of active pipes
|
||||
// accordingly.
|
||||
if (index < active) {
|
||||
active--;
|
||||
pipes.swap (index, active);
|
||||
if (current == active)
|
||||
current = 0;
|
||||
if (index < _active) {
|
||||
_active--;
|
||||
_pipes.swap (index, _active);
|
||||
if (_current == _active)
|
||||
_current = 0;
|
||||
}
|
||||
pipes.erase (pipe_);
|
||||
_pipes.erase (pipe_);
|
||||
|
||||
if (last_in == pipe_) {
|
||||
saved_credential.set_deep_copy (last_in->get_credential ());
|
||||
last_in = NULL;
|
||||
if (_last_in == pipe_) {
|
||||
_saved_credential.set_deep_copy (_last_in->get_credential ());
|
||||
_last_in = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void zmq::fq_t::activated (pipe_t *pipe_)
|
||||
{
|
||||
// Move the pipe to the list of active pipes.
|
||||
pipes.swap (pipes.index (pipe_), active);
|
||||
active++;
|
||||
_pipes.swap (_pipes.index (pipe_), _active);
|
||||
_active++;
|
||||
}
|
||||
|
||||
int zmq::fq_t::recv (msg_t *msg_)
|
||||
@ -88,21 +88,21 @@ int zmq::fq_t::recvpipe (msg_t *msg_, pipe_t **pipe_)
|
||||
errno_assert (rc == 0);
|
||||
|
||||
// Round-robin over the pipes to get the next message.
|
||||
while (active > 0) {
|
||||
while (_active > 0) {
|
||||
// Try to fetch new message. If we've already read part of the message
|
||||
// subsequent part should be immediately available.
|
||||
bool fetched = pipes[current]->read (msg_);
|
||||
bool fetched = _pipes[_current]->read (msg_);
|
||||
|
||||
// Note that when message is not fetched, current pipe is deactivated
|
||||
// and replaced by another active pipe. Thus we don't have to increase
|
||||
// the 'current' pointer.
|
||||
if (fetched) {
|
||||
if (pipe_)
|
||||
*pipe_ = pipes[current];
|
||||
more = (msg_->flags () & msg_t::more) != 0;
|
||||
if (!more) {
|
||||
last_in = pipes[current];
|
||||
current = (current + 1) % active;
|
||||
*pipe_ = _pipes[_current];
|
||||
_more = (msg_->flags () & msg_t::more) != 0;
|
||||
if (!_more) {
|
||||
_last_in = _pipes[_current];
|
||||
_current = (_current + 1) % _active;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -110,12 +110,12 @@ int zmq::fq_t::recvpipe (msg_t *msg_, pipe_t **pipe_)
|
||||
// Check the atomicity of the message.
|
||||
// If we've already received the first part of the message
|
||||
// we should get the remaining parts without blocking.
|
||||
zmq_assert (!more);
|
||||
zmq_assert (!_more);
|
||||
|
||||
active--;
|
||||
pipes.swap (current, active);
|
||||
if (current == active)
|
||||
current = 0;
|
||||
_active--;
|
||||
_pipes.swap (_current, _active);
|
||||
if (_current == _active)
|
||||
_current = 0;
|
||||
}
|
||||
|
||||
// No message is available. Initialise the output parameter
|
||||
@ -129,22 +129,22 @@ int zmq::fq_t::recvpipe (msg_t *msg_, pipe_t **pipe_)
|
||||
bool zmq::fq_t::has_in ()
|
||||
{
|
||||
// There are subsequent parts of the partly-read message available.
|
||||
if (more)
|
||||
if (_more)
|
||||
return true;
|
||||
|
||||
// Note that messing with current doesn't break the fairness of fair
|
||||
// queueing algorithm. If there are no messages available current will
|
||||
// get back to its original value. Otherwise it'll point to the first
|
||||
// pipe holding messages, skipping only pipes with no messages available.
|
||||
while (active > 0) {
|
||||
if (pipes[current]->check_read ())
|
||||
while (_active > 0) {
|
||||
if (_pipes[_current]->check_read ())
|
||||
return true;
|
||||
|
||||
// Deactivate the pipe.
|
||||
active--;
|
||||
pipes.swap (current, active);
|
||||
if (current == active)
|
||||
current = 0;
|
||||
_active--;
|
||||
_pipes.swap (_current, _active);
|
||||
if (_current == _active)
|
||||
_current = 0;
|
||||
}
|
||||
|
||||
return false;
|
||||
@ -152,5 +152,5 @@ bool zmq::fq_t::has_in ()
|
||||
|
||||
const zmq::blob_t &zmq::fq_t::get_credential () const
|
||||
{
|
||||
return last_in ? last_in->get_credential () : saved_credential;
|
||||
return _last_in ? _last_in->get_credential () : _saved_credential;
|
||||
}
|
||||
|
12
src/fq.hpp
12
src/fq.hpp
@ -60,26 +60,26 @@ class fq_t
|
||||
private:
|
||||
// Inbound pipes.
|
||||
typedef array_t<pipe_t, 1> pipes_t;
|
||||
pipes_t pipes;
|
||||
pipes_t _pipes;
|
||||
|
||||
// Number of active pipes. All the active pipes are located at the
|
||||
// beginning of the pipes array.
|
||||
pipes_t::size_type active;
|
||||
pipes_t::size_type _active;
|
||||
|
||||
// Pointer to the last pipe we received message from.
|
||||
// NULL when no message has been received or the pipe
|
||||
// has terminated.
|
||||
pipe_t *last_in;
|
||||
pipe_t *_last_in;
|
||||
|
||||
// Index of the next bound pipe to read a message from.
|
||||
pipes_t::size_type current;
|
||||
pipes_t::size_type _current;
|
||||
|
||||
// If true, part of a multipart message was already received, but
|
||||
// there are following parts still waiting in the current pipe.
|
||||
bool more;
|
||||
bool _more;
|
||||
|
||||
// Holds credential after the last_active_pipe has terminated.
|
||||
blob_t saved_credential;
|
||||
blob_t _saved_credential;
|
||||
|
||||
fq_t (const fq_t &);
|
||||
const fq_t &operator= (const fq_t &);
|
||||
|
@ -49,34 +49,34 @@ void zmq::gather_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_)
|
||||
LIBZMQ_UNUSED (subscribe_to_all_);
|
||||
|
||||
zmq_assert (pipe_);
|
||||
fq.attach (pipe_);
|
||||
_fq.attach (pipe_);
|
||||
}
|
||||
|
||||
void zmq::gather_t::xread_activated (pipe_t *pipe_)
|
||||
{
|
||||
fq.activated (pipe_);
|
||||
_fq.activated (pipe_);
|
||||
}
|
||||
|
||||
void zmq::gather_t::xpipe_terminated (pipe_t *pipe_)
|
||||
{
|
||||
fq.pipe_terminated (pipe_);
|
||||
_fq.pipe_terminated (pipe_);
|
||||
}
|
||||
|
||||
int zmq::gather_t::xrecv (msg_t *msg_)
|
||||
{
|
||||
int rc = fq.recvpipe (msg_, NULL);
|
||||
int rc = _fq.recvpipe (msg_, NULL);
|
||||
|
||||
// Drop any messages with more flag
|
||||
while (rc == 0 && msg_->flags () & msg_t::more) {
|
||||
// drop all frames of the current multi-frame message
|
||||
rc = fq.recvpipe (msg_, NULL);
|
||||
rc = _fq.recvpipe (msg_, NULL);
|
||||
|
||||
while (rc == 0 && msg_->flags () & msg_t::more)
|
||||
rc = fq.recvpipe (msg_, NULL);
|
||||
rc = _fq.recvpipe (msg_, NULL);
|
||||
|
||||
// get the new message
|
||||
if (rc == 0)
|
||||
rc = fq.recvpipe (msg_, NULL);
|
||||
rc = _fq.recvpipe (msg_, NULL);
|
||||
}
|
||||
|
||||
return rc;
|
||||
@ -84,10 +84,10 @@ int zmq::gather_t::xrecv (msg_t *msg_)
|
||||
|
||||
bool zmq::gather_t::xhas_in ()
|
||||
{
|
||||
return fq.has_in ();
|
||||
return _fq.has_in ();
|
||||
}
|
||||
|
||||
const zmq::blob_t &zmq::gather_t::get_credential () const
|
||||
{
|
||||
return fq.get_credential ();
|
||||
return _fq.get_credential ();
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ class gather_t : public socket_base_t
|
||||
|
||||
private:
|
||||
// Fair queueing object for inbound pipes.
|
||||
fq_t fq;
|
||||
fq_t _fq;
|
||||
|
||||
gather_t (const gather_t &);
|
||||
const gather_t &operator= (const gather_t &);
|
||||
|
@ -96,16 +96,16 @@ template <typename T> class generic_mtrie_t
|
||||
bool is_redundant () const;
|
||||
|
||||
typedef std::set<value_t *> pipes_t;
|
||||
pipes_t *pipes;
|
||||
pipes_t *_pipes;
|
||||
|
||||
unsigned char min;
|
||||
unsigned short count;
|
||||
unsigned short live_nodes;
|
||||
unsigned char _min;
|
||||
unsigned short _count;
|
||||
unsigned short _live_nodes;
|
||||
union
|
||||
{
|
||||
class generic_mtrie_t<value_t> *node;
|
||||
class generic_mtrie_t<value_t> **table;
|
||||
} next;
|
||||
} _next;
|
||||
|
||||
generic_mtrie_t (const generic_mtrie_t<value_t> &);
|
||||
const generic_mtrie_t<value_t> &
|
||||
|
@ -43,25 +43,25 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
template <typename T>
|
||||
zmq::generic_mtrie_t<T>::generic_mtrie_t () :
|
||||
pipes (0),
|
||||
min (0),
|
||||
count (0),
|
||||
live_nodes (0)
|
||||
_pipes (0),
|
||||
_min (0),
|
||||
_count (0),
|
||||
_live_nodes (0)
|
||||
{
|
||||
}
|
||||
|
||||
template <typename T> zmq::generic_mtrie_t<T>::~generic_mtrie_t ()
|
||||
{
|
||||
LIBZMQ_DELETE (pipes);
|
||||
LIBZMQ_DELETE (_pipes);
|
||||
|
||||
if (count == 1) {
|
||||
zmq_assert (next.node);
|
||||
LIBZMQ_DELETE (next.node);
|
||||
} else if (count > 1) {
|
||||
for (unsigned short i = 0; i != count; ++i) {
|
||||
LIBZMQ_DELETE (next.table[i]);
|
||||
if (_count == 1) {
|
||||
zmq_assert (_next.node);
|
||||
LIBZMQ_DELETE (_next.node);
|
||||
} else if (_count > 1) {
|
||||
for (unsigned short i = 0; i != _count; ++i) {
|
||||
LIBZMQ_DELETE (_next.table[i]);
|
||||
}
|
||||
free (next.table);
|
||||
free (_next.table);
|
||||
}
|
||||
}
|
||||
|
||||
@ -80,73 +80,73 @@ bool zmq::generic_mtrie_t<T>::add_helper (prefix_t prefix_,
|
||||
{
|
||||
// We are at the node corresponding to the prefix. We are done.
|
||||
if (!size_) {
|
||||
bool result = !pipes;
|
||||
if (!pipes) {
|
||||
pipes = new (std::nothrow) pipes_t;
|
||||
alloc_assert (pipes);
|
||||
bool result = !_pipes;
|
||||
if (!_pipes) {
|
||||
_pipes = new (std::nothrow) pipes_t;
|
||||
alloc_assert (_pipes);
|
||||
}
|
||||
pipes->insert (pipe_);
|
||||
_pipes->insert (pipe_);
|
||||
return result;
|
||||
}
|
||||
|
||||
unsigned char c = *prefix_;
|
||||
if (c < min || c >= min + count) {
|
||||
if (c < _min || c >= _min + _count) {
|
||||
// The character is out of range of currently handled
|
||||
// characters. We have to extend the table.
|
||||
if (!count) {
|
||||
min = c;
|
||||
count = 1;
|
||||
next.node = NULL;
|
||||
} else if (count == 1) {
|
||||
unsigned char oldc = min;
|
||||
generic_mtrie_t *oldp = next.node;
|
||||
count = (min < c ? c - min : min - c) + 1;
|
||||
next.table =
|
||||
(generic_mtrie_t **) malloc (sizeof (generic_mtrie_t *) * count);
|
||||
alloc_assert (next.table);
|
||||
for (unsigned short i = 0; i != count; ++i)
|
||||
next.table[i] = 0;
|
||||
min = std::min (min, c);
|
||||
next.table[oldc - min] = oldp;
|
||||
} else if (min < c) {
|
||||
if (!_count) {
|
||||
_min = c;
|
||||
_count = 1;
|
||||
_next.node = NULL;
|
||||
} else if (_count == 1) {
|
||||
unsigned char oldc = _min;
|
||||
generic_mtrie_t *oldp = _next.node;
|
||||
_count = (_min < c ? c - _min : _min - c) + 1;
|
||||
_next.table =
|
||||
(generic_mtrie_t **) malloc (sizeof (generic_mtrie_t *) * _count);
|
||||
alloc_assert (_next.table);
|
||||
for (unsigned short i = 0; i != _count; ++i)
|
||||
_next.table[i] = 0;
|
||||
_min = std::min (_min, c);
|
||||
_next.table[oldc - _min] = oldp;
|
||||
} else if (_min < c) {
|
||||
// The new character is above the current character range.
|
||||
unsigned short old_count = count;
|
||||
count = c - min + 1;
|
||||
next.table = (generic_mtrie_t **) realloc (
|
||||
next.table, sizeof (generic_mtrie_t *) * count);
|
||||
alloc_assert (next.table);
|
||||
for (unsigned short i = old_count; i != count; i++)
|
||||
next.table[i] = NULL;
|
||||
unsigned short old_count = _count;
|
||||
_count = c - _min + 1;
|
||||
_next.table = (generic_mtrie_t **) realloc (
|
||||
_next.table, sizeof (generic_mtrie_t *) * _count);
|
||||
alloc_assert (_next.table);
|
||||
for (unsigned short i = old_count; i != _count; i++)
|
||||
_next.table[i] = NULL;
|
||||
} else {
|
||||
// The new character is below the current character range.
|
||||
unsigned short old_count = count;
|
||||
count = (min + old_count) - c;
|
||||
next.table = (generic_mtrie_t **) realloc (
|
||||
next.table, sizeof (generic_mtrie_t *) * count);
|
||||
alloc_assert (next.table);
|
||||
memmove (next.table + min - c, next.table,
|
||||
unsigned short old_count = _count;
|
||||
_count = (_min + old_count) - c;
|
||||
_next.table = (generic_mtrie_t **) realloc (
|
||||
_next.table, sizeof (generic_mtrie_t *) * _count);
|
||||
alloc_assert (_next.table);
|
||||
memmove (_next.table + _min - c, _next.table,
|
||||
old_count * sizeof (generic_mtrie_t *));
|
||||
for (unsigned short i = 0; i != min - c; i++)
|
||||
next.table[i] = NULL;
|
||||
min = c;
|
||||
for (unsigned short i = 0; i != _min - c; i++)
|
||||
_next.table[i] = NULL;
|
||||
_min = c;
|
||||
}
|
||||
}
|
||||
|
||||
// If next node does not exist, create one.
|
||||
if (count == 1) {
|
||||
if (!next.node) {
|
||||
next.node = new (std::nothrow) generic_mtrie_t;
|
||||
alloc_assert (next.node);
|
||||
++live_nodes;
|
||||
if (_count == 1) {
|
||||
if (!_next.node) {
|
||||
_next.node = new (std::nothrow) generic_mtrie_t;
|
||||
alloc_assert (_next.node);
|
||||
++_live_nodes;
|
||||
}
|
||||
return next.node->add_helper (prefix_ + 1, size_ - 1, pipe_);
|
||||
return _next.node->add_helper (prefix_ + 1, size_ - 1, pipe_);
|
||||
}
|
||||
if (!next.table[c - min]) {
|
||||
next.table[c - min] = new (std::nothrow) generic_mtrie_t;
|
||||
alloc_assert (next.table[c - min]);
|
||||
++live_nodes;
|
||||
if (!_next.table[c - _min]) {
|
||||
_next.table[c - _min] = new (std::nothrow) generic_mtrie_t;
|
||||
alloc_assert (_next.table[c - _min]);
|
||||
++_live_nodes;
|
||||
}
|
||||
return next.table[c - min]->add_helper (prefix_ + 1, size_ - 1, pipe_);
|
||||
return _next.table[c - _min]->add_helper (prefix_ + 1, size_ - 1, pipe_);
|
||||
}
|
||||
|
||||
|
||||
@ -177,13 +177,13 @@ void zmq::generic_mtrie_t<T>::rm_helper (value_t *pipe_,
|
||||
bool call_on_uniq_)
|
||||
{
|
||||
// Remove the subscription from this node.
|
||||
if (pipes && pipes->erase (pipe_)) {
|
||||
if (!call_on_uniq_ || pipes->empty ()) {
|
||||
if (_pipes && _pipes->erase (pipe_)) {
|
||||
if (!call_on_uniq_ || _pipes->empty ()) {
|
||||
func_ (*buff_, buffsize_, arg_);
|
||||
}
|
||||
|
||||
if (pipes->empty ()) {
|
||||
LIBZMQ_DELETE (pipes);
|
||||
if (_pipes->empty ()) {
|
||||
LIBZMQ_DELETE (_pipes);
|
||||
}
|
||||
}
|
||||
|
||||
@ -195,22 +195,22 @@ void zmq::generic_mtrie_t<T>::rm_helper (value_t *pipe_,
|
||||
}
|
||||
|
||||
// If there are no subnodes in the trie, return.
|
||||
if (count == 0)
|
||||
if (_count == 0)
|
||||
return;
|
||||
|
||||
// If there's one subnode (optimisation).
|
||||
if (count == 1) {
|
||||
(*buff_)[buffsize_] = min;
|
||||
if (_count == 1) {
|
||||
(*buff_)[buffsize_] = _min;
|
||||
buffsize_++;
|
||||
next.node->rm_helper (pipe_, buff_, buffsize_, maxbuffsize_, func_,
|
||||
arg_, call_on_uniq_);
|
||||
_next.node->rm_helper (pipe_, buff_, buffsize_, maxbuffsize_, func_,
|
||||
arg_, call_on_uniq_);
|
||||
|
||||
// Prune the node if it was made redundant by the removal
|
||||
if (next.node->is_redundant ()) {
|
||||
LIBZMQ_DELETE (next.node);
|
||||
count = 0;
|
||||
--live_nodes;
|
||||
zmq_assert (live_nodes == 0);
|
||||
if (_next.node->is_redundant ()) {
|
||||
LIBZMQ_DELETE (_next.node);
|
||||
_count = 0;
|
||||
--_live_nodes;
|
||||
zmq_assert (_live_nodes == 0);
|
||||
}
|
||||
return;
|
||||
}
|
||||
@ -218,21 +218,22 @@ void zmq::generic_mtrie_t<T>::rm_helper (value_t *pipe_,
|
||||
// If there are multiple subnodes.
|
||||
//
|
||||
// New min non-null character in the node table after the removal
|
||||
unsigned char new_min = min + count - 1;
|
||||
unsigned char new_min = _min + _count - 1;
|
||||
// New max non-null character in the node table after the removal
|
||||
unsigned char new_max = min;
|
||||
for (unsigned short c = 0; c != count; c++) {
|
||||
(*buff_)[buffsize_] = min + c;
|
||||
if (next.table[c]) {
|
||||
next.table[c]->rm_helper (pipe_, buff_, buffsize_ + 1, maxbuffsize_,
|
||||
func_, arg_, call_on_uniq_);
|
||||
unsigned char new_max = _min;
|
||||
for (unsigned short c = 0; c != _count; c++) {
|
||||
(*buff_)[buffsize_] = _min + c;
|
||||
if (_next.table[c]) {
|
||||
_next.table[c]->rm_helper (pipe_, buff_, buffsize_ + 1,
|
||||
maxbuffsize_, func_, arg_,
|
||||
call_on_uniq_);
|
||||
|
||||
// Prune redundant nodes from the mtrie
|
||||
if (next.table[c]->is_redundant ()) {
|
||||
LIBZMQ_DELETE (next.table[c]);
|
||||
if (_next.table[c]->is_redundant ()) {
|
||||
LIBZMQ_DELETE (_next.table[c]);
|
||||
|
||||
zmq_assert (live_nodes > 0);
|
||||
--live_nodes;
|
||||
zmq_assert (_live_nodes > 0);
|
||||
--_live_nodes;
|
||||
} else {
|
||||
// The node is not redundant, so it's a candidate for being
|
||||
// the new min/max node.
|
||||
@ -241,54 +242,54 @@ void zmq::generic_mtrie_t<T>::rm_helper (value_t *pipe_,
|
||||
// first non-null, non-redundant node encountered is the new
|
||||
// minimum index. Conversely, the last non-redundant, non-null
|
||||
// node encountered is the new maximum index.
|
||||
if (c + min < new_min)
|
||||
new_min = c + min;
|
||||
if (c + min > new_max)
|
||||
new_max = c + min;
|
||||
if (c + _min < new_min)
|
||||
new_min = c + _min;
|
||||
if (c + _min > new_max)
|
||||
new_max = c + _min;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
zmq_assert (count > 1);
|
||||
zmq_assert (_count > 1);
|
||||
|
||||
// Free the node table if it's no longer used.
|
||||
if (live_nodes == 0) {
|
||||
free (next.table);
|
||||
next.table = NULL;
|
||||
count = 0;
|
||||
if (_live_nodes == 0) {
|
||||
free (_next.table);
|
||||
_next.table = NULL;
|
||||
_count = 0;
|
||||
}
|
||||
// Compact the node table if possible
|
||||
else if (live_nodes == 1) {
|
||||
else if (_live_nodes == 1) {
|
||||
// If there's only one live node in the table we can
|
||||
// switch to using the more compact single-node
|
||||
// representation
|
||||
zmq_assert (new_min == new_max);
|
||||
zmq_assert (new_min >= min && new_min < min + count);
|
||||
generic_mtrie_t *node = next.table[new_min - min];
|
||||
zmq_assert (new_min >= _min && new_min < _min + _count);
|
||||
generic_mtrie_t *node = _next.table[new_min - _min];
|
||||
zmq_assert (node);
|
||||
free (next.table);
|
||||
next.node = node;
|
||||
count = 1;
|
||||
min = new_min;
|
||||
} else if (new_min > min || new_max < min + count - 1) {
|
||||
free (_next.table);
|
||||
_next.node = node;
|
||||
_count = 1;
|
||||
_min = new_min;
|
||||
} else if (new_min > _min || new_max < _min + _count - 1) {
|
||||
zmq_assert (new_max - new_min + 1 > 1);
|
||||
|
||||
generic_mtrie_t **old_table = next.table;
|
||||
zmq_assert (new_min > min || new_max < min + count - 1);
|
||||
zmq_assert (new_min >= min);
|
||||
zmq_assert (new_max <= min + count - 1);
|
||||
zmq_assert (new_max - new_min + 1 < count);
|
||||
generic_mtrie_t **old_table = _next.table;
|
||||
zmq_assert (new_min > _min || new_max < _min + _count - 1);
|
||||
zmq_assert (new_min >= _min);
|
||||
zmq_assert (new_max <= _min + _count - 1);
|
||||
zmq_assert (new_max - new_min + 1 < _count);
|
||||
|
||||
count = new_max - new_min + 1;
|
||||
next.table =
|
||||
(generic_mtrie_t **) malloc (sizeof (generic_mtrie_t *) * count);
|
||||
alloc_assert (next.table);
|
||||
_count = new_max - new_min + 1;
|
||||
_next.table =
|
||||
(generic_mtrie_t **) malloc (sizeof (generic_mtrie_t *) * _count);
|
||||
alloc_assert (_next.table);
|
||||
|
||||
memmove (next.table, old_table + (new_min - min),
|
||||
sizeof (generic_mtrie_t *) * count);
|
||||
memmove (_next.table, old_table + (new_min - _min),
|
||||
sizeof (generic_mtrie_t *) * _count);
|
||||
free (old_table);
|
||||
|
||||
min = new_min;
|
||||
_min = new_min;
|
||||
}
|
||||
}
|
||||
|
||||
@ -304,23 +305,24 @@ typename zmq::generic_mtrie_t<T>::rm_result zmq::generic_mtrie_t<T>::rm_helper (
|
||||
prefix_t prefix_, size_t size_, value_t *pipe_)
|
||||
{
|
||||
if (!size_) {
|
||||
if (!pipes)
|
||||
if (!_pipes)
|
||||
return not_found;
|
||||
|
||||
typename pipes_t::size_type erased = pipes->erase (pipe_);
|
||||
if (pipes->empty ()) {
|
||||
typename pipes_t::size_type erased = _pipes->erase (pipe_);
|
||||
if (_pipes->empty ()) {
|
||||
zmq_assert (erased == 1);
|
||||
LIBZMQ_DELETE (pipes);
|
||||
LIBZMQ_DELETE (_pipes);
|
||||
return last_value_removed;
|
||||
}
|
||||
return (erased == 1) ? values_remain : not_found;
|
||||
}
|
||||
|
||||
unsigned char c = *prefix_;
|
||||
if (!count || c < min || c >= min + count)
|
||||
if (!_count || c < _min || c >= _min + _count)
|
||||
return not_found;
|
||||
|
||||
generic_mtrie_t *next_node = count == 1 ? next.node : next.table[c - min];
|
||||
generic_mtrie_t *next_node =
|
||||
_count == 1 ? _next.node : _next.table[c - _min];
|
||||
|
||||
if (!next_node)
|
||||
return not_found;
|
||||
@ -329,66 +331,66 @@ typename zmq::generic_mtrie_t<T>::rm_result zmq::generic_mtrie_t<T>::rm_helper (
|
||||
|
||||
if (next_node->is_redundant ()) {
|
||||
LIBZMQ_DELETE (next_node);
|
||||
zmq_assert (count > 0);
|
||||
zmq_assert (_count > 0);
|
||||
|
||||
if (count == 1) {
|
||||
next.node = 0;
|
||||
count = 0;
|
||||
--live_nodes;
|
||||
zmq_assert (live_nodes == 0);
|
||||
if (_count == 1) {
|
||||
_next.node = 0;
|
||||
_count = 0;
|
||||
--_live_nodes;
|
||||
zmq_assert (_live_nodes == 0);
|
||||
} else {
|
||||
next.table[c - min] = 0;
|
||||
zmq_assert (live_nodes > 1);
|
||||
--live_nodes;
|
||||
_next.table[c - _min] = 0;
|
||||
zmq_assert (_live_nodes > 1);
|
||||
--_live_nodes;
|
||||
|
||||
// Compact the table if possible
|
||||
if (live_nodes == 1) {
|
||||
if (_live_nodes == 1) {
|
||||
// If there's only one live node in the table we can
|
||||
// switch to using the more compact single-node
|
||||
// representation
|
||||
unsigned short i;
|
||||
for (i = 0; i < count; ++i)
|
||||
if (next.table[i])
|
||||
for (i = 0; i < _count; ++i)
|
||||
if (_next.table[i])
|
||||
break;
|
||||
|
||||
zmq_assert (i < count);
|
||||
min += i;
|
||||
count = 1;
|
||||
generic_mtrie_t *oldp = next.table[i];
|
||||
free (next.table);
|
||||
next.node = oldp;
|
||||
} else if (c == min) {
|
||||
zmq_assert (i < _count);
|
||||
_min += i;
|
||||
_count = 1;
|
||||
generic_mtrie_t *oldp = _next.table[i];
|
||||
free (_next.table);
|
||||
_next.node = oldp;
|
||||
} else if (c == _min) {
|
||||
// We can compact the table "from the left"
|
||||
unsigned short i;
|
||||
for (i = 1; i < count; ++i)
|
||||
if (next.table[i])
|
||||
for (i = 1; i < _count; ++i)
|
||||
if (_next.table[i])
|
||||
break;
|
||||
|
||||
zmq_assert (i < count);
|
||||
min += i;
|
||||
count -= i;
|
||||
generic_mtrie_t **old_table = next.table;
|
||||
next.table = (generic_mtrie_t **) malloc (
|
||||
sizeof (generic_mtrie_t *) * count);
|
||||
alloc_assert (next.table);
|
||||
memmove (next.table, old_table + i,
|
||||
sizeof (generic_mtrie_t *) * count);
|
||||
zmq_assert (i < _count);
|
||||
_min += i;
|
||||
_count -= i;
|
||||
generic_mtrie_t **old_table = _next.table;
|
||||
_next.table = (generic_mtrie_t **) malloc (
|
||||
sizeof (generic_mtrie_t *) * _count);
|
||||
alloc_assert (_next.table);
|
||||
memmove (_next.table, old_table + i,
|
||||
sizeof (generic_mtrie_t *) * _count);
|
||||
free (old_table);
|
||||
} else if (c == min + count - 1) {
|
||||
} else if (c == _min + _count - 1) {
|
||||
// We can compact the table "from the right"
|
||||
unsigned short i;
|
||||
for (i = 1; i < count; ++i)
|
||||
if (next.table[count - 1 - i])
|
||||
for (i = 1; i < _count; ++i)
|
||||
if (_next.table[_count - 1 - i])
|
||||
break;
|
||||
|
||||
zmq_assert (i < count);
|
||||
count -= i;
|
||||
generic_mtrie_t **old_table = next.table;
|
||||
next.table = (generic_mtrie_t **) malloc (
|
||||
sizeof (generic_mtrie_t *) * count);
|
||||
alloc_assert (next.table);
|
||||
memmove (next.table, old_table,
|
||||
sizeof (generic_mtrie_t *) * count);
|
||||
zmq_assert (i < _count);
|
||||
_count -= i;
|
||||
generic_mtrie_t **old_table = _next.table;
|
||||
_next.table = (generic_mtrie_t **) malloc (
|
||||
sizeof (generic_mtrie_t *) * _count);
|
||||
alloc_assert (_next.table);
|
||||
memmove (_next.table, old_table,
|
||||
sizeof (generic_mtrie_t *) * _count);
|
||||
free (old_table);
|
||||
}
|
||||
}
|
||||
@ -407,9 +409,9 @@ void zmq::generic_mtrie_t<T>::match (prefix_t data_,
|
||||
generic_mtrie_t *current = this;
|
||||
while (true) {
|
||||
// Signal the pipes attached to this node.
|
||||
if (current->pipes) {
|
||||
for (typename pipes_t::iterator it = current->pipes->begin ();
|
||||
it != current->pipes->end (); ++it)
|
||||
if (current->_pipes) {
|
||||
for (typename pipes_t::iterator it = current->_pipes->begin ();
|
||||
it != current->_pipes->end (); ++it)
|
||||
func_ (*it, arg_);
|
||||
}
|
||||
|
||||
@ -418,26 +420,26 @@ void zmq::generic_mtrie_t<T>::match (prefix_t data_,
|
||||
break;
|
||||
|
||||
// If there are no subnodes in the trie, return.
|
||||
if (current->count == 0)
|
||||
if (current->_count == 0)
|
||||
break;
|
||||
|
||||
// If there's one subnode (optimisation).
|
||||
if (current->count == 1) {
|
||||
if (data_[0] != current->min)
|
||||
if (current->_count == 1) {
|
||||
if (data_[0] != current->_min)
|
||||
break;
|
||||
current = current->next.node;
|
||||
current = current->_next.node;
|
||||
data_++;
|
||||
size_--;
|
||||
continue;
|
||||
}
|
||||
|
||||
// If there are multiple subnodes.
|
||||
if (data_[0] < current->min
|
||||
|| data_[0] >= current->min + current->count)
|
||||
if (data_[0] < current->_min
|
||||
|| data_[0] >= current->_min + current->_count)
|
||||
break;
|
||||
if (!current->next.table[data_[0] - current->min])
|
||||
if (!current->_next.table[data_[0] - current->_min])
|
||||
break;
|
||||
current = current->next.table[data_[0] - current->min];
|
||||
current = current->_next.table[data_[0] - current->_min];
|
||||
data_++;
|
||||
size_--;
|
||||
}
|
||||
@ -445,7 +447,7 @@ void zmq::generic_mtrie_t<T>::match (prefix_t data_,
|
||||
|
||||
template <typename T> bool zmq::generic_mtrie_t<T>::is_redundant () const
|
||||
{
|
||||
return !pipes && live_nodes == 0;
|
||||
return !_pipes && _live_nodes == 0;
|
||||
}
|
||||
|
||||
|
||||
|
@ -32,7 +32,7 @@
|
||||
#include "io_thread.hpp"
|
||||
#include "err.hpp"
|
||||
|
||||
zmq::io_object_t::io_object_t (io_thread_t *io_thread_) : poller (NULL)
|
||||
zmq::io_object_t::io_object_t (io_thread_t *io_thread_) : _poller (NULL)
|
||||
{
|
||||
if (io_thread_)
|
||||
plug (io_thread_);
|
||||
@ -45,59 +45,59 @@ zmq::io_object_t::~io_object_t ()
|
||||
void zmq::io_object_t::plug (io_thread_t *io_thread_)
|
||||
{
|
||||
zmq_assert (io_thread_);
|
||||
zmq_assert (!poller);
|
||||
zmq_assert (!_poller);
|
||||
|
||||
// Retrieve the poller from the thread we are running in.
|
||||
poller = io_thread_->get_poller ();
|
||||
_poller = io_thread_->get_poller ();
|
||||
}
|
||||
|
||||
void zmq::io_object_t::unplug ()
|
||||
{
|
||||
zmq_assert (poller);
|
||||
zmq_assert (_poller);
|
||||
|
||||
// Forget about old poller in preparation to be migrated
|
||||
// to a different I/O thread.
|
||||
poller = NULL;
|
||||
_poller = NULL;
|
||||
}
|
||||
|
||||
zmq::io_object_t::handle_t zmq::io_object_t::add_fd (fd_t fd_)
|
||||
{
|
||||
return poller->add_fd (fd_, this);
|
||||
return _poller->add_fd (fd_, this);
|
||||
}
|
||||
|
||||
void zmq::io_object_t::rm_fd (handle_t handle_)
|
||||
{
|
||||
poller->rm_fd (handle_);
|
||||
_poller->rm_fd (handle_);
|
||||
}
|
||||
|
||||
void zmq::io_object_t::set_pollin (handle_t handle_)
|
||||
{
|
||||
poller->set_pollin (handle_);
|
||||
_poller->set_pollin (handle_);
|
||||
}
|
||||
|
||||
void zmq::io_object_t::reset_pollin (handle_t handle_)
|
||||
{
|
||||
poller->reset_pollin (handle_);
|
||||
_poller->reset_pollin (handle_);
|
||||
}
|
||||
|
||||
void zmq::io_object_t::set_pollout (handle_t handle_)
|
||||
{
|
||||
poller->set_pollout (handle_);
|
||||
_poller->set_pollout (handle_);
|
||||
}
|
||||
|
||||
void zmq::io_object_t::reset_pollout (handle_t handle_)
|
||||
{
|
||||
poller->reset_pollout (handle_);
|
||||
_poller->reset_pollout (handle_);
|
||||
}
|
||||
|
||||
void zmq::io_object_t::add_timer (int timeout_, int id_)
|
||||
{
|
||||
poller->add_timer (timeout_, this, id_);
|
||||
_poller->add_timer (timeout_, this, id_);
|
||||
}
|
||||
|
||||
void zmq::io_object_t::cancel_timer (int id_)
|
||||
{
|
||||
poller->cancel_timer (this, id_);
|
||||
_poller->cancel_timer (this, id_);
|
||||
}
|
||||
|
||||
void zmq::io_object_t::in_event ()
|
||||
|
@ -74,7 +74,7 @@ class io_object_t : public i_poll_events
|
||||
void timer_event (int id_);
|
||||
|
||||
private:
|
||||
poller_t *poller;
|
||||
poller_t *_poller;
|
||||
|
||||
io_object_t (const io_object_t &);
|
||||
const io_object_t &operator= (const io_object_t &);
|
||||
|
@ -38,26 +38,26 @@
|
||||
|
||||
zmq::io_thread_t::io_thread_t (ctx_t *ctx_, uint32_t tid_) :
|
||||
object_t (ctx_, tid_),
|
||||
mailbox_handle (static_cast<poller_t::handle_t> (NULL))
|
||||
_mailbox_handle (static_cast<poller_t::handle_t> (NULL))
|
||||
{
|
||||
poller = new (std::nothrow) poller_t (*ctx_);
|
||||
alloc_assert (poller);
|
||||
_poller = new (std::nothrow) poller_t (*ctx_);
|
||||
alloc_assert (_poller);
|
||||
|
||||
if (mailbox.get_fd () != retired_fd) {
|
||||
mailbox_handle = poller->add_fd (mailbox.get_fd (), this);
|
||||
poller->set_pollin (mailbox_handle);
|
||||
if (_mailbox.get_fd () != retired_fd) {
|
||||
_mailbox_handle = _poller->add_fd (_mailbox.get_fd (), this);
|
||||
_poller->set_pollin (_mailbox_handle);
|
||||
}
|
||||
}
|
||||
|
||||
zmq::io_thread_t::~io_thread_t ()
|
||||
{
|
||||
LIBZMQ_DELETE (poller);
|
||||
LIBZMQ_DELETE (_poller);
|
||||
}
|
||||
|
||||
void zmq::io_thread_t::start ()
|
||||
{
|
||||
// Start the underlying I/O thread.
|
||||
poller->start ();
|
||||
_poller->start ();
|
||||
}
|
||||
|
||||
void zmq::io_thread_t::stop ()
|
||||
@ -67,12 +67,12 @@ void zmq::io_thread_t::stop ()
|
||||
|
||||
zmq::mailbox_t *zmq::io_thread_t::get_mailbox ()
|
||||
{
|
||||
return &mailbox;
|
||||
return &_mailbox;
|
||||
}
|
||||
|
||||
int zmq::io_thread_t::get_load ()
|
||||
{
|
||||
return poller->get_load ();
|
||||
return _poller->get_load ();
|
||||
}
|
||||
|
||||
void zmq::io_thread_t::in_event ()
|
||||
@ -81,12 +81,12 @@ void zmq::io_thread_t::in_event ()
|
||||
// process in a single go?
|
||||
|
||||
command_t cmd;
|
||||
int rc = mailbox.recv (&cmd, 0);
|
||||
int rc = _mailbox.recv (&cmd, 0);
|
||||
|
||||
while (rc == 0 || errno == EINTR) {
|
||||
if (rc == 0)
|
||||
cmd.destination->process_command (cmd);
|
||||
rc = mailbox.recv (&cmd, 0);
|
||||
rc = _mailbox.recv (&cmd, 0);
|
||||
}
|
||||
|
||||
errno_assert (rc != 0 && errno == EAGAIN);
|
||||
@ -106,13 +106,13 @@ void zmq::io_thread_t::timer_event (int)
|
||||
|
||||
zmq::poller_t *zmq::io_thread_t::get_poller ()
|
||||
{
|
||||
zmq_assert (poller);
|
||||
return poller;
|
||||
zmq_assert (_poller);
|
||||
return _poller;
|
||||
}
|
||||
|
||||
void zmq::io_thread_t::process_stop ()
|
||||
{
|
||||
zmq_assert (mailbox_handle);
|
||||
poller->rm_fd (mailbox_handle);
|
||||
poller->stop ();
|
||||
zmq_assert (_mailbox_handle);
|
||||
_poller->rm_fd (_mailbox_handle);
|
||||
_poller->stop ();
|
||||
}
|
||||
|
@ -77,13 +77,13 @@ class io_thread_t : public object_t, public i_poll_events
|
||||
|
||||
private:
|
||||
// I/O thread accesses incoming commands via this mailbox.
|
||||
mailbox_t mailbox;
|
||||
mailbox_t _mailbox;
|
||||
|
||||
// Handle associated with mailbox' file descriptor.
|
||||
poller_t::handle_t mailbox_handle;
|
||||
poller_t::handle_t _mailbox_handle;
|
||||
|
||||
// I/O multiplexing is performed using a poller object.
|
||||
poller_t *poller;
|
||||
poller_t *_poller;
|
||||
|
||||
io_thread_t (const io_thread_t &);
|
||||
const io_thread_t &operator= (const io_thread_t &);
|
||||
|
@ -95,18 +95,18 @@ zmq::ip_addr_t zmq::ip_addr_t::any (int family_)
|
||||
}
|
||||
|
||||
zmq::ip_resolver_options_t::ip_resolver_options_t () :
|
||||
bindable_wanted (false),
|
||||
nic_name_allowed (false),
|
||||
ipv6_wanted (false),
|
||||
port_expected (false),
|
||||
dns_allowed (false)
|
||||
_bindable_wanted (false),
|
||||
_nic_name_allowed (false),
|
||||
_ipv6_wanted (false),
|
||||
_port_expected (false),
|
||||
_dns_allowed (false)
|
||||
{
|
||||
}
|
||||
|
||||
zmq::ip_resolver_options_t &
|
||||
zmq::ip_resolver_options_t::bindable (bool bindable_)
|
||||
{
|
||||
bindable_wanted = bindable_;
|
||||
_bindable_wanted = bindable_;
|
||||
|
||||
return *this;
|
||||
}
|
||||
@ -114,14 +114,14 @@ zmq::ip_resolver_options_t::bindable (bool bindable_)
|
||||
zmq::ip_resolver_options_t &
|
||||
zmq::ip_resolver_options_t::allow_nic_name (bool allow_)
|
||||
{
|
||||
nic_name_allowed = allow_;
|
||||
_nic_name_allowed = allow_;
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
zmq::ip_resolver_options_t &zmq::ip_resolver_options_t::ipv6 (bool ipv6_)
|
||||
{
|
||||
ipv6_wanted = ipv6_;
|
||||
_ipv6_wanted = ipv6_;
|
||||
|
||||
return *this;
|
||||
}
|
||||
@ -131,45 +131,45 @@ zmq::ip_resolver_options_t &zmq::ip_resolver_options_t::ipv6 (bool ipv6_)
|
||||
zmq::ip_resolver_options_t &
|
||||
zmq::ip_resolver_options_t::expect_port (bool expect_)
|
||||
{
|
||||
port_expected = expect_;
|
||||
_port_expected = expect_;
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
zmq::ip_resolver_options_t &zmq::ip_resolver_options_t::allow_dns (bool allow_)
|
||||
{
|
||||
dns_allowed = allow_;
|
||||
_dns_allowed = allow_;
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool zmq::ip_resolver_options_t::bindable ()
|
||||
{
|
||||
return bindable_wanted;
|
||||
return _bindable_wanted;
|
||||
}
|
||||
|
||||
bool zmq::ip_resolver_options_t::allow_nic_name ()
|
||||
{
|
||||
return nic_name_allowed;
|
||||
return _nic_name_allowed;
|
||||
}
|
||||
|
||||
bool zmq::ip_resolver_options_t::ipv6 ()
|
||||
{
|
||||
return ipv6_wanted;
|
||||
return _ipv6_wanted;
|
||||
}
|
||||
|
||||
bool zmq::ip_resolver_options_t::expect_port ()
|
||||
{
|
||||
return port_expected;
|
||||
return _port_expected;
|
||||
}
|
||||
|
||||
bool zmq::ip_resolver_options_t::allow_dns ()
|
||||
{
|
||||
return dns_allowed;
|
||||
return _dns_allowed;
|
||||
}
|
||||
|
||||
zmq::ip_resolver_t::ip_resolver_t (ip_resolver_options_t opts_) :
|
||||
options (opts_)
|
||||
_options (opts_)
|
||||
{
|
||||
}
|
||||
|
||||
@ -178,7 +178,7 @@ int zmq::ip_resolver_t::resolve (ip_addr_t *ip_addr_, const char *name_)
|
||||
std::string addr;
|
||||
uint16_t port;
|
||||
|
||||
if (options.expect_port ()) {
|
||||
if (_options.expect_port ()) {
|
||||
// We expect 'addr:port'. It's important to use str*r*chr to only get
|
||||
// the latest colon since IPv6 addresses use colons as delemiters.
|
||||
const char *delim = strrchr (name_, ':');
|
||||
@ -192,7 +192,7 @@ int zmq::ip_resolver_t::resolve (ip_addr_t *ip_addr_, const char *name_)
|
||||
std::string port_str = std::string (delim + 1);
|
||||
|
||||
if (port_str == "*") {
|
||||
if (options.bindable ()) {
|
||||
if (_options.bindable ()) {
|
||||
// Resolve wildcard to 0 to allow autoselection of port
|
||||
port = 0;
|
||||
} else {
|
||||
@ -248,13 +248,13 @@ int zmq::ip_resolver_t::resolve (ip_addr_t *ip_addr_, const char *name_)
|
||||
bool resolved = false;
|
||||
const char *addr_str = addr.c_str ();
|
||||
|
||||
if (options.bindable () && addr == "*") {
|
||||
if (_options.bindable () && addr == "*") {
|
||||
// Return an ANY address
|
||||
*ip_addr_ = ip_addr_t::any (options.ipv6 () ? AF_INET6 : AF_INET);
|
||||
*ip_addr_ = ip_addr_t::any (_options.ipv6 () ? AF_INET6 : AF_INET);
|
||||
resolved = true;
|
||||
}
|
||||
|
||||
if (!resolved && options.allow_nic_name ()) {
|
||||
if (!resolved && _options.allow_nic_name ()) {
|
||||
// Try to resolve the string as a NIC name.
|
||||
int rc = resolve_nic_name (ip_addr_, addr_str);
|
||||
|
||||
@ -303,18 +303,18 @@ int zmq::ip_resolver_t::resolve_getaddrinfo (ip_addr_t *ip_addr_,
|
||||
|
||||
// Choose IPv4 or IPv6 protocol family. Note that IPv6 allows for
|
||||
// IPv4-in-IPv6 addresses.
|
||||
req.ai_family = options.ipv6 () ? AF_INET6 : AF_INET;
|
||||
req.ai_family = _options.ipv6 () ? AF_INET6 : AF_INET;
|
||||
|
||||
// Arbitrary, not used in the output, but avoids duplicate results.
|
||||
req.ai_socktype = SOCK_STREAM;
|
||||
|
||||
req.ai_flags = 0;
|
||||
|
||||
if (options.bindable ()) {
|
||||
if (_options.bindable ()) {
|
||||
req.ai_flags |= AI_PASSIVE;
|
||||
}
|
||||
|
||||
if (!options.allow_dns ()) {
|
||||
if (!_options.allow_dns ()) {
|
||||
req.ai_flags |= AI_NUMERICHOST;
|
||||
}
|
||||
|
||||
@ -355,7 +355,7 @@ int zmq::ip_resolver_t::resolve_getaddrinfo (ip_addr_t *ip_addr_,
|
||||
errno = ENOMEM;
|
||||
break;
|
||||
default:
|
||||
if (options.bindable ()) {
|
||||
if (_options.bindable ()) {
|
||||
errno = ENODEV;
|
||||
} else {
|
||||
errno = EINVAL;
|
||||
@ -444,7 +444,7 @@ int zmq::ip_resolver_t::resolve_nic_name (ip_addr_t *ip_addr_, const char *nic_)
|
||||
{
|
||||
#if defined ZMQ_HAVE_AIX || defined ZMQ_HAVE_HPUX
|
||||
// IPv6 support not implemented for AIX or HP/UX.
|
||||
if (options.ipv6 ()) {
|
||||
if (_options.ipv6 ()) {
|
||||
errno = ENODEV;
|
||||
return -1;
|
||||
}
|
||||
@ -452,7 +452,7 @@ int zmq::ip_resolver_t::resolve_nic_name (ip_addr_t *ip_addr_, const char *nic_)
|
||||
|
||||
// Create a socket.
|
||||
const int sd =
|
||||
open_socket (options.ipv6 () ? AF_INET6 : AF_INET, SOCK_DGRAM, 0);
|
||||
open_socket (_options.ipv6 () ? AF_INET6 : AF_INET, SOCK_DGRAM, 0);
|
||||
errno_assert (sd != -1);
|
||||
|
||||
struct ifreq ifr;
|
||||
@ -472,7 +472,7 @@ int zmq::ip_resolver_t::resolve_nic_name (ip_addr_t *ip_addr_, const char *nic_)
|
||||
}
|
||||
|
||||
const int family = ifr.ifr_addr.sa_family;
|
||||
if (family == (options.ipv6 () ? AF_INET6 : AF_INET)
|
||||
if (family == (_options.ipv6 () ? AF_INET6 : AF_INET)
|
||||
&& !strcmp (nic_, ifr.ifr_name)) {
|
||||
memcpy (ip_addr_, &ifr.ifr_addr,
|
||||
(family == AF_INET) ? sizeof (struct sockaddr_in)
|
||||
@ -524,7 +524,7 @@ int zmq::ip_resolver_t::resolve_nic_name (ip_addr_t *ip_addr_, const char *nic_)
|
||||
continue;
|
||||
|
||||
const int family = ifp->ifa_addr->sa_family;
|
||||
if (family == (options.ipv6 () ? AF_INET6 : AF_INET)
|
||||
if (family == (_options.ipv6 () ? AF_INET6 : AF_INET)
|
||||
&& !strcmp (nic_, ifp->ifa_name)) {
|
||||
memcpy (ip_addr_, ifp->ifa_addr,
|
||||
(family == AF_INET) ? sizeof (struct sockaddr_in)
|
||||
@ -647,7 +647,7 @@ int zmq::ip_resolver_t::resolve_nic_name (ip_addr_t *ip_addr_, const char *nic_)
|
||||
ADDRESS_FAMILY family =
|
||||
current_unicast_address->Address.lpSockaddr->sa_family;
|
||||
|
||||
if (family == (options.ipv6 () ? AF_INET6 : AF_INET)) {
|
||||
if (family == (_options.ipv6 () ? AF_INET6 : AF_INET)) {
|
||||
memcpy (
|
||||
ip_addr_, current_unicast_address->Address.lpSockaddr,
|
||||
(family == AF_INET) ? sizeof (struct sockaddr_in)
|
||||
|
@ -73,11 +73,11 @@ class ip_resolver_options_t
|
||||
bool allow_dns ();
|
||||
|
||||
private:
|
||||
bool bindable_wanted;
|
||||
bool nic_name_allowed;
|
||||
bool ipv6_wanted;
|
||||
bool port_expected;
|
||||
bool dns_allowed;
|
||||
bool _bindable_wanted;
|
||||
bool _nic_name_allowed;
|
||||
bool _ipv6_wanted;
|
||||
bool _port_expected;
|
||||
bool _dns_allowed;
|
||||
};
|
||||
|
||||
class ip_resolver_t
|
||||
@ -88,7 +88,7 @@ class ip_resolver_t
|
||||
int resolve (ip_addr_t *ip_addr_, const char *name_);
|
||||
|
||||
protected:
|
||||
ip_resolver_options_t options;
|
||||
ip_resolver_options_t _options;
|
||||
|
||||
int resolve_nic_name (ip_addr_t *ip_addr_, const char *nic_);
|
||||
int resolve_getaddrinfo (ip_addr_t *ip_addr_, const char *addr_);
|
||||
|
80
src/lb.cpp
80
src/lb.cpp
@ -33,46 +33,46 @@
|
||||
#include "err.hpp"
|
||||
#include "msg.hpp"
|
||||
|
||||
zmq::lb_t::lb_t () : active (0), current (0), more (false), dropping (false)
|
||||
zmq::lb_t::lb_t () : _active (0), _current (0), _more (false), _dropping (false)
|
||||
{
|
||||
}
|
||||
|
||||
zmq::lb_t::~lb_t ()
|
||||
{
|
||||
zmq_assert (pipes.empty ());
|
||||
zmq_assert (_pipes.empty ());
|
||||
}
|
||||
|
||||
void zmq::lb_t::attach (pipe_t *pipe_)
|
||||
{
|
||||
pipes.push_back (pipe_);
|
||||
_pipes.push_back (pipe_);
|
||||
activated (pipe_);
|
||||
}
|
||||
|
||||
void zmq::lb_t::pipe_terminated (pipe_t *pipe_)
|
||||
{
|
||||
pipes_t::size_type index = pipes.index (pipe_);
|
||||
pipes_t::size_type index = _pipes.index (pipe_);
|
||||
|
||||
// If we are in the middle of multipart message and current pipe
|
||||
// have disconnected, we have to drop the remainder of the message.
|
||||
if (index == current && more)
|
||||
dropping = true;
|
||||
if (index == _current && _more)
|
||||
_dropping = true;
|
||||
|
||||
// Remove the pipe from the list; adjust number of active pipes
|
||||
// accordingly.
|
||||
if (index < active) {
|
||||
active--;
|
||||
pipes.swap (index, active);
|
||||
if (current == active)
|
||||
current = 0;
|
||||
if (index < _active) {
|
||||
_active--;
|
||||
_pipes.swap (index, _active);
|
||||
if (_current == _active)
|
||||
_current = 0;
|
||||
}
|
||||
pipes.erase (pipe_);
|
||||
_pipes.erase (pipe_);
|
||||
}
|
||||
|
||||
void zmq::lb_t::activated (pipe_t *pipe_)
|
||||
{
|
||||
// Move the pipe to the list of active pipes.
|
||||
pipes.swap (pipes.index (pipe_), active);
|
||||
active++;
|
||||
_pipes.swap (_pipes.index (pipe_), _active);
|
||||
_active++;
|
||||
}
|
||||
|
||||
int zmq::lb_t::send (msg_t *msg_)
|
||||
@ -84,9 +84,9 @@ int zmq::lb_t::sendpipe (msg_t *msg_, pipe_t **pipe_)
|
||||
{
|
||||
// Drop the message if required. If we are at the end of the message
|
||||
// switch back to non-dropping mode.
|
||||
if (dropping) {
|
||||
more = (msg_->flags () & msg_t::more) != 0;
|
||||
dropping = more;
|
||||
if (_dropping) {
|
||||
_more = (msg_->flags () & msg_t::more) != 0;
|
||||
_dropping = _more;
|
||||
|
||||
int rc = msg_->close ();
|
||||
errno_assert (rc == 0);
|
||||
@ -95,44 +95,44 @@ int zmq::lb_t::sendpipe (msg_t *msg_, pipe_t **pipe_)
|
||||
return 0;
|
||||
}
|
||||
|
||||
while (active > 0) {
|
||||
if (pipes[current]->write (msg_)) {
|
||||
while (_active > 0) {
|
||||
if (_pipes[_current]->write (msg_)) {
|
||||
if (pipe_)
|
||||
*pipe_ = pipes[current];
|
||||
*pipe_ = _pipes[_current];
|
||||
break;
|
||||
}
|
||||
|
||||
// If send fails for multi-part msg rollback other
|
||||
// parts sent earlier and return EAGAIN.
|
||||
// Application should handle this as suitable
|
||||
if (more) {
|
||||
pipes[current]->rollback ();
|
||||
more = false;
|
||||
if (_more) {
|
||||
_pipes[_current]->rollback ();
|
||||
_more = false;
|
||||
errno = EAGAIN;
|
||||
return -1;
|
||||
}
|
||||
|
||||
active--;
|
||||
if (current < active)
|
||||
pipes.swap (current, active);
|
||||
_active--;
|
||||
if (_current < _active)
|
||||
_pipes.swap (_current, _active);
|
||||
else
|
||||
current = 0;
|
||||
_current = 0;
|
||||
}
|
||||
|
||||
// If there are no pipes we cannot send the message.
|
||||
if (active == 0) {
|
||||
if (_active == 0) {
|
||||
errno = EAGAIN;
|
||||
return -1;
|
||||
}
|
||||
|
||||
// If it's final part of the message we can flush it downstream and
|
||||
// continue round-robining (load balance).
|
||||
more = (msg_->flags () & msg_t::more) != 0;
|
||||
if (!more) {
|
||||
pipes[current]->flush ();
|
||||
_more = (msg_->flags () & msg_t::more) != 0;
|
||||
if (!_more) {
|
||||
_pipes[_current]->flush ();
|
||||
|
||||
if (++current >= active)
|
||||
current = 0;
|
||||
if (++_current >= _active)
|
||||
_current = 0;
|
||||
}
|
||||
|
||||
// Detach the message from the data buffer.
|
||||
@ -146,19 +146,19 @@ bool zmq::lb_t::has_out ()
|
||||
{
|
||||
// If one part of the message was already written we can definitely
|
||||
// write the rest of the message.
|
||||
if (more)
|
||||
if (_more)
|
||||
return true;
|
||||
|
||||
while (active > 0) {
|
||||
while (_active > 0) {
|
||||
// Check whether a pipe has room for another message.
|
||||
if (pipes[current]->check_write ())
|
||||
if (_pipes[_current]->check_write ())
|
||||
return true;
|
||||
|
||||
// Deactivate the pipe.
|
||||
active--;
|
||||
pipes.swap (current, active);
|
||||
if (current == active)
|
||||
current = 0;
|
||||
_active--;
|
||||
_pipes.swap (_current, _active);
|
||||
if (_current == _active)
|
||||
_current = 0;
|
||||
}
|
||||
|
||||
return false;
|
||||
|
10
src/lb.hpp
10
src/lb.hpp
@ -63,20 +63,20 @@ class lb_t
|
||||
private:
|
||||
// List of outbound pipes.
|
||||
typedef array_t<pipe_t, 2> pipes_t;
|
||||
pipes_t pipes;
|
||||
pipes_t _pipes;
|
||||
|
||||
// Number of active pipes. All the active pipes are located at the
|
||||
// beginning of the pipes array.
|
||||
pipes_t::size_type active;
|
||||
pipes_t::size_type _active;
|
||||
|
||||
// Points to the last pipe that the most recent message was sent to.
|
||||
pipes_t::size_type current;
|
||||
pipes_t::size_type _current;
|
||||
|
||||
// True if last we are in the middle of a multipart message.
|
||||
bool more;
|
||||
bool _more;
|
||||
|
||||
// True if we are dropping current message.
|
||||
bool dropping;
|
||||
bool _dropping;
|
||||
|
||||
lb_t (const lb_t &);
|
||||
const lb_t &operator= (const lb_t &);
|
||||
|
@ -36,71 +36,71 @@ zmq::mailbox_t::mailbox_t ()
|
||||
// Get the pipe into passive state. That way, if the users starts by
|
||||
// polling on the associated file descriptor it will get woken up when
|
||||
// new command is posted.
|
||||
const bool ok = cpipe.check_read ();
|
||||
const bool ok = _cpipe.check_read ();
|
||||
zmq_assert (!ok);
|
||||
active = false;
|
||||
_active = false;
|
||||
}
|
||||
|
||||
zmq::mailbox_t::~mailbox_t ()
|
||||
{
|
||||
// TODO: Retrieve and deallocate commands inside the cpipe.
|
||||
// TODO: Retrieve and deallocate commands inside the _cpipe.
|
||||
|
||||
// Work around problem that other threads might still be in our
|
||||
// send() method, by waiting on the mutex before disappearing.
|
||||
sync.lock ();
|
||||
sync.unlock ();
|
||||
_sync.lock ();
|
||||
_sync.unlock ();
|
||||
}
|
||||
|
||||
zmq::fd_t zmq::mailbox_t::get_fd () const
|
||||
{
|
||||
return signaler.get_fd ();
|
||||
return _signaler.get_fd ();
|
||||
}
|
||||
|
||||
void zmq::mailbox_t::send (const command_t &cmd_)
|
||||
{
|
||||
sync.lock ();
|
||||
cpipe.write (cmd_, false);
|
||||
const bool ok = cpipe.flush ();
|
||||
sync.unlock ();
|
||||
_sync.lock ();
|
||||
_cpipe.write (cmd_, false);
|
||||
const bool ok = _cpipe.flush ();
|
||||
_sync.unlock ();
|
||||
if (!ok)
|
||||
signaler.send ();
|
||||
_signaler.send ();
|
||||
}
|
||||
|
||||
int zmq::mailbox_t::recv (command_t *cmd_, int timeout_)
|
||||
{
|
||||
// Try to get the command straight away.
|
||||
if (active) {
|
||||
if (cpipe.read (cmd_))
|
||||
if (_active) {
|
||||
if (_cpipe.read (cmd_))
|
||||
return 0;
|
||||
|
||||
// If there are no more commands available, switch into passive state.
|
||||
active = false;
|
||||
_active = false;
|
||||
}
|
||||
|
||||
// Wait for signal from the command sender.
|
||||
int rc = signaler.wait (timeout_);
|
||||
int rc = _signaler.wait (timeout_);
|
||||
if (rc == -1) {
|
||||
errno_assert (errno == EAGAIN || errno == EINTR);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Receive the signal.
|
||||
rc = signaler.recv_failable ();
|
||||
rc = _signaler.recv_failable ();
|
||||
if (rc == -1) {
|
||||
errno_assert (errno == EAGAIN);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Switch into active state.
|
||||
active = true;
|
||||
_active = true;
|
||||
|
||||
// Get a command.
|
||||
const bool ok = cpipe.read (cmd_);
|
||||
const bool ok = _cpipe.read (cmd_);
|
||||
zmq_assert (ok);
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool zmq::mailbox_t::valid () const
|
||||
{
|
||||
return signaler.valid ();
|
||||
return _signaler.valid ();
|
||||
}
|
||||
|
@ -58,26 +58,26 @@ class mailbox_t : public i_mailbox
|
||||
// close the file descriptors in the signaller. This is used in a forked
|
||||
// child process to close the file descriptors so that they do not interfere
|
||||
// with the context in the parent process.
|
||||
void forked () { signaler.forked (); }
|
||||
void forked () { _signaler.forked (); }
|
||||
#endif
|
||||
|
||||
private:
|
||||
// The pipe to store actual commands.
|
||||
typedef ypipe_t<command_t, command_pipe_granularity> cpipe_t;
|
||||
cpipe_t cpipe;
|
||||
cpipe_t _cpipe;
|
||||
|
||||
// Signaler to pass signals from writer thread to reader thread.
|
||||
signaler_t signaler;
|
||||
signaler_t _signaler;
|
||||
|
||||
// There's only one thread receiving from the mailbox, but there
|
||||
// is arbitrary number of threads sending. Given that ypipe requires
|
||||
// synchronised access on both of its endpoints, we have to synchronise
|
||||
// the sending side.
|
||||
mutex_t sync;
|
||||
mutex_t _sync;
|
||||
|
||||
// True if the underlying pipe is active, ie. when we are allowed to
|
||||
// read commands from it.
|
||||
bool active;
|
||||
bool _active;
|
||||
|
||||
// Disable copying of mailbox_t object.
|
||||
mailbox_t (const mailbox_t &);
|
||||
|
@ -32,12 +32,12 @@
|
||||
#include "clock.hpp"
|
||||
#include "err.hpp"
|
||||
|
||||
zmq::mailbox_safe_t::mailbox_safe_t (mutex_t *sync_) : sync (sync_)
|
||||
zmq::mailbox_safe_t::mailbox_safe_t (mutex_t *sync_) : _sync (sync_)
|
||||
{
|
||||
// Get the pipe into passive state. That way, if the users starts by
|
||||
// polling on the associated file descriptor it will get woken up when
|
||||
// new command is posted.
|
||||
const bool ok = cpipe.check_read ();
|
||||
const bool ok = _cpipe.check_read ();
|
||||
zmq_assert (!ok);
|
||||
}
|
||||
|
||||
@ -47,66 +47,66 @@ zmq::mailbox_safe_t::~mailbox_safe_t ()
|
||||
|
||||
// Work around problem that other threads might still be in our
|
||||
// send() method, by waiting on the mutex before disappearing.
|
||||
sync->lock ();
|
||||
sync->unlock ();
|
||||
_sync->lock ();
|
||||
_sync->unlock ();
|
||||
}
|
||||
|
||||
void zmq::mailbox_safe_t::add_signaler (signaler_t *signaler_)
|
||||
{
|
||||
signalers.push_back (signaler_);
|
||||
_signalers.push_back (signaler_);
|
||||
}
|
||||
|
||||
void zmq::mailbox_safe_t::remove_signaler (signaler_t *signaler_)
|
||||
{
|
||||
std::vector<signaler_t *>::iterator it = signalers.begin ();
|
||||
std::vector<signaler_t *>::iterator it = _signalers.begin ();
|
||||
|
||||
// TODO: make a copy of array and signal outside the lock
|
||||
for (; it != signalers.end (); ++it) {
|
||||
for (; it != _signalers.end (); ++it) {
|
||||
if (*it == signaler_)
|
||||
break;
|
||||
}
|
||||
|
||||
if (it != signalers.end ())
|
||||
signalers.erase (it);
|
||||
if (it != _signalers.end ())
|
||||
_signalers.erase (it);
|
||||
}
|
||||
|
||||
void zmq::mailbox_safe_t::clear_signalers ()
|
||||
{
|
||||
signalers.clear ();
|
||||
_signalers.clear ();
|
||||
}
|
||||
|
||||
void zmq::mailbox_safe_t::send (const command_t &cmd_)
|
||||
{
|
||||
sync->lock ();
|
||||
cpipe.write (cmd_, false);
|
||||
const bool ok = cpipe.flush ();
|
||||
_sync->lock ();
|
||||
_cpipe.write (cmd_, false);
|
||||
const bool ok = _cpipe.flush ();
|
||||
|
||||
if (!ok) {
|
||||
cond_var.broadcast ();
|
||||
for (std::vector<signaler_t *>::iterator it = signalers.begin ();
|
||||
it != signalers.end (); ++it) {
|
||||
_cond_var.broadcast ();
|
||||
for (std::vector<signaler_t *>::iterator it = _signalers.begin ();
|
||||
it != _signalers.end (); ++it) {
|
||||
(*it)->send ();
|
||||
}
|
||||
}
|
||||
|
||||
sync->unlock ();
|
||||
_sync->unlock ();
|
||||
}
|
||||
|
||||
int zmq::mailbox_safe_t::recv (command_t *cmd_, int timeout_)
|
||||
{
|
||||
// Try to get the command straight away.
|
||||
if (cpipe.read (cmd_))
|
||||
if (_cpipe.read (cmd_))
|
||||
return 0;
|
||||
|
||||
// Wait for signal from the command sender.
|
||||
int rc = cond_var.wait (sync, timeout_);
|
||||
int rc = _cond_var.wait (_sync, timeout_);
|
||||
if (rc == -1) {
|
||||
errno_assert (errno == EAGAIN || errno == EINTR);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Another thread may already fetch the command
|
||||
const bool ok = cpipe.read (cmd_);
|
||||
const bool ok = _cpipe.read (cmd_);
|
||||
|
||||
if (!ok) {
|
||||
errno = EAGAIN;
|
||||
|
@ -71,15 +71,15 @@ class mailbox_safe_t : public i_mailbox
|
||||
private:
|
||||
// The pipe to store actual commands.
|
||||
typedef ypipe_t<command_t, command_pipe_granularity> cpipe_t;
|
||||
cpipe_t cpipe;
|
||||
cpipe_t _cpipe;
|
||||
|
||||
// Condition variable to pass signals from writer thread to reader thread.
|
||||
condition_variable_t cond_var;
|
||||
condition_variable_t _cond_var;
|
||||
|
||||
// Synchronize access to the mailbox from receivers and senders
|
||||
mutex_t *const sync;
|
||||
mutex_t *const _sync;
|
||||
|
||||
std::vector<zmq::signaler_t *> signalers;
|
||||
std::vector<zmq::signaler_t *> _signalers;
|
||||
|
||||
// Disable copying of mailbox_t object.
|
||||
mailbox_safe_t (const mailbox_safe_t &);
|
||||
|
@ -48,20 +48,20 @@ zmq::mechanism_t::~mechanism_t ()
|
||||
void zmq::mechanism_t::set_peer_routing_id (const void *id_ptr_,
|
||||
size_t id_size_)
|
||||
{
|
||||
routing_id.set (static_cast<const unsigned char *> (id_ptr_), id_size_);
|
||||
_routing_id.set (static_cast<const unsigned char *> (id_ptr_), id_size_);
|
||||
}
|
||||
|
||||
void zmq::mechanism_t::peer_routing_id (msg_t *msg_)
|
||||
{
|
||||
const int rc = msg_->init_size (routing_id.size ());
|
||||
const int rc = msg_->init_size (_routing_id.size ());
|
||||
errno_assert (rc == 0);
|
||||
memcpy (msg_->data (), routing_id.data (), routing_id.size ());
|
||||
memcpy (msg_->data (), _routing_id.data (), _routing_id.size ());
|
||||
msg_->set_flags (msg_t::routing_id);
|
||||
}
|
||||
|
||||
void zmq::mechanism_t::set_user_id (const void *data_, size_t size_)
|
||||
{
|
||||
user_id.set (static_cast<const unsigned char *> (data_), size_);
|
||||
_user_id.set (static_cast<const unsigned char *> (data_), size_);
|
||||
zap_properties.ZMQ_MAP_INSERT_OR_EMPLACE (
|
||||
std::string (ZMQ_MSG_PROPERTY_USER_ID),
|
||||
std::string ((char *) data_, size_));
|
||||
@ -69,7 +69,7 @@ void zmq::mechanism_t::set_user_id (const void *data_, size_t size_)
|
||||
|
||||
const zmq::blob_t &zmq::mechanism_t::get_user_id () const
|
||||
{
|
||||
return user_id;
|
||||
return _user_id;
|
||||
}
|
||||
|
||||
const char socket_type_pair[] = "PAIR";
|
||||
|
@ -132,9 +132,9 @@ class mechanism_t
|
||||
const options_t options;
|
||||
|
||||
private:
|
||||
blob_t routing_id;
|
||||
blob_t _routing_id;
|
||||
|
||||
blob_t user_id;
|
||||
blob_t _user_id;
|
||||
|
||||
// Returns true iff socket associated with the mechanism
|
||||
// is compatible with a given socket type 'type_'.
|
||||
|
@ -30,14 +30,14 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "metadata.hpp"
|
||||
|
||||
zmq::metadata_t::metadata_t (const dict_t &dict_) : ref_cnt (1), dict (dict_)
|
||||
zmq::metadata_t::metadata_t (const dict_t &dict_) : _ref_cnt (1), _dict (dict_)
|
||||
{
|
||||
}
|
||||
|
||||
const char *zmq::metadata_t::get (const std::string &property_) const
|
||||
{
|
||||
dict_t::const_iterator it = dict.find (property_);
|
||||
if (it == dict.end ()) {
|
||||
dict_t::const_iterator it = _dict.find (property_);
|
||||
if (it == _dict.end ()) {
|
||||
/** \todo remove this when support for the deprecated name "Identity" is dropped */
|
||||
if (property_ == "Identity")
|
||||
return get (ZMQ_MSG_PROPERTY_ROUTING_ID);
|
||||
@ -49,10 +49,10 @@ const char *zmq::metadata_t::get (const std::string &property_) const
|
||||
|
||||
void zmq::metadata_t::add_ref ()
|
||||
{
|
||||
ref_cnt.add (1);
|
||||
_ref_cnt.add (1);
|
||||
}
|
||||
|
||||
bool zmq::metadata_t::drop_ref ()
|
||||
{
|
||||
return !ref_cnt.sub (1);
|
||||
return !_ref_cnt.sub (1);
|
||||
}
|
||||
|
@ -59,10 +59,10 @@ class metadata_t
|
||||
metadata_t &operator= (const metadata_t &);
|
||||
|
||||
// Reference counter.
|
||||
atomic_counter_t ref_cnt;
|
||||
atomic_counter_t _ref_cnt;
|
||||
|
||||
// Dictionary holding metadata.
|
||||
const dict_t dict;
|
||||
const dict_t _dict;
|
||||
};
|
||||
}
|
||||
|
||||
|
305
src/msg.cpp
305
src/msg.cpp
@ -49,7 +49,7 @@ typedef char
|
||||
|
||||
bool zmq::msg_t::check () const
|
||||
{
|
||||
return u.base.type >= type_min && u.base.type <= type_max;
|
||||
return _u.base.type >= type_min && _u.base.type <= type_max;
|
||||
}
|
||||
|
||||
int zmq::msg_t::init (void *data_,
|
||||
@ -76,44 +76,44 @@ int zmq::msg_t::init (void *data_,
|
||||
|
||||
int zmq::msg_t::init ()
|
||||
{
|
||||
u.vsm.metadata = NULL;
|
||||
u.vsm.type = type_vsm;
|
||||
u.vsm.flags = 0;
|
||||
u.vsm.size = 0;
|
||||
u.vsm.group[0] = '\0';
|
||||
u.vsm.routing_id = 0;
|
||||
_u.vsm.metadata = NULL;
|
||||
_u.vsm.type = type_vsm;
|
||||
_u.vsm.flags = 0;
|
||||
_u.vsm.size = 0;
|
||||
_u.vsm.group[0] = '\0';
|
||||
_u.vsm.routing_id = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int zmq::msg_t::init_size (size_t size_)
|
||||
{
|
||||
if (size_ <= max_vsm_size) {
|
||||
u.vsm.metadata = NULL;
|
||||
u.vsm.type = type_vsm;
|
||||
u.vsm.flags = 0;
|
||||
u.vsm.size = static_cast<unsigned char> (size_);
|
||||
u.vsm.group[0] = '\0';
|
||||
u.vsm.routing_id = 0;
|
||||
_u.vsm.metadata = NULL;
|
||||
_u.vsm.type = type_vsm;
|
||||
_u.vsm.flags = 0;
|
||||
_u.vsm.size = static_cast<unsigned char> (size_);
|
||||
_u.vsm.group[0] = '\0';
|
||||
_u.vsm.routing_id = 0;
|
||||
} else {
|
||||
u.lmsg.metadata = NULL;
|
||||
u.lmsg.type = type_lmsg;
|
||||
u.lmsg.flags = 0;
|
||||
u.lmsg.group[0] = '\0';
|
||||
u.lmsg.routing_id = 0;
|
||||
u.lmsg.content = NULL;
|
||||
_u.lmsg.metadata = NULL;
|
||||
_u.lmsg.type = type_lmsg;
|
||||
_u.lmsg.flags = 0;
|
||||
_u.lmsg.group[0] = '\0';
|
||||
_u.lmsg.routing_id = 0;
|
||||
_u.lmsg.content = NULL;
|
||||
if (sizeof (content_t) + size_ > size_)
|
||||
u.lmsg.content =
|
||||
_u.lmsg.content =
|
||||
static_cast<content_t *> (malloc (sizeof (content_t) + size_));
|
||||
if (unlikely (!u.lmsg.content)) {
|
||||
if (unlikely (!_u.lmsg.content)) {
|
||||
errno = ENOMEM;
|
||||
return -1;
|
||||
}
|
||||
|
||||
u.lmsg.content->data = u.lmsg.content + 1;
|
||||
u.lmsg.content->size = size_;
|
||||
u.lmsg.content->ffn = NULL;
|
||||
u.lmsg.content->hint = NULL;
|
||||
new (&u.lmsg.content->refcnt) zmq::atomic_counter_t ();
|
||||
_u.lmsg.content->data = _u.lmsg.content + 1;
|
||||
_u.lmsg.content->size = size_;
|
||||
_u.lmsg.content->ffn = NULL;
|
||||
_u.lmsg.content->hint = NULL;
|
||||
new (&_u.lmsg.content->refcnt) zmq::atomic_counter_t ();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -127,18 +127,18 @@ int zmq::msg_t::init_external_storage (content_t *content_,
|
||||
zmq_assert (NULL != data_);
|
||||
zmq_assert (NULL != content_);
|
||||
|
||||
u.zclmsg.metadata = NULL;
|
||||
u.zclmsg.type = type_zclmsg;
|
||||
u.zclmsg.flags = 0;
|
||||
u.zclmsg.group[0] = '\0';
|
||||
u.zclmsg.routing_id = 0;
|
||||
_u.zclmsg.metadata = NULL;
|
||||
_u.zclmsg.type = type_zclmsg;
|
||||
_u.zclmsg.flags = 0;
|
||||
_u.zclmsg.group[0] = '\0';
|
||||
_u.zclmsg.routing_id = 0;
|
||||
|
||||
u.zclmsg.content = content_;
|
||||
u.zclmsg.content->data = data_;
|
||||
u.zclmsg.content->size = size_;
|
||||
u.zclmsg.content->ffn = ffn_;
|
||||
u.zclmsg.content->hint = hint_;
|
||||
new (&u.zclmsg.content->refcnt) zmq::atomic_counter_t ();
|
||||
_u.zclmsg.content = content_;
|
||||
_u.zclmsg.content->data = data_;
|
||||
_u.zclmsg.content->size = size_;
|
||||
_u.zclmsg.content->ffn = ffn_;
|
||||
_u.zclmsg.content->hint = hint_;
|
||||
new (&_u.zclmsg.content->refcnt) zmq::atomic_counter_t ();
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -154,61 +154,62 @@ int zmq::msg_t::init_data (void *data_,
|
||||
|
||||
// Initialize constant message if there's no need to deallocate
|
||||
if (ffn_ == NULL) {
|
||||
u.cmsg.metadata = NULL;
|
||||
u.cmsg.type = type_cmsg;
|
||||
u.cmsg.flags = 0;
|
||||
u.cmsg.data = data_;
|
||||
u.cmsg.size = size_;
|
||||
u.cmsg.group[0] = '\0';
|
||||
u.cmsg.routing_id = 0;
|
||||
_u.cmsg.metadata = NULL;
|
||||
_u.cmsg.type = type_cmsg;
|
||||
_u.cmsg.flags = 0;
|
||||
_u.cmsg.data = data_;
|
||||
_u.cmsg.size = size_;
|
||||
_u.cmsg.group[0] = '\0';
|
||||
_u.cmsg.routing_id = 0;
|
||||
} else {
|
||||
u.lmsg.metadata = NULL;
|
||||
u.lmsg.type = type_lmsg;
|
||||
u.lmsg.flags = 0;
|
||||
u.lmsg.group[0] = '\0';
|
||||
u.lmsg.routing_id = 0;
|
||||
u.lmsg.content = static_cast<content_t *> (malloc (sizeof (content_t)));
|
||||
if (!u.lmsg.content) {
|
||||
_u.lmsg.metadata = NULL;
|
||||
_u.lmsg.type = type_lmsg;
|
||||
_u.lmsg.flags = 0;
|
||||
_u.lmsg.group[0] = '\0';
|
||||
_u.lmsg.routing_id = 0;
|
||||
_u.lmsg.content =
|
||||
static_cast<content_t *> (malloc (sizeof (content_t)));
|
||||
if (!_u.lmsg.content) {
|
||||
errno = ENOMEM;
|
||||
return -1;
|
||||
}
|
||||
|
||||
u.lmsg.content->data = data_;
|
||||
u.lmsg.content->size = size_;
|
||||
u.lmsg.content->ffn = ffn_;
|
||||
u.lmsg.content->hint = hint_;
|
||||
new (&u.lmsg.content->refcnt) zmq::atomic_counter_t ();
|
||||
_u.lmsg.content->data = data_;
|
||||
_u.lmsg.content->size = size_;
|
||||
_u.lmsg.content->ffn = ffn_;
|
||||
_u.lmsg.content->hint = hint_;
|
||||
new (&_u.lmsg.content->refcnt) zmq::atomic_counter_t ();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int zmq::msg_t::init_delimiter ()
|
||||
{
|
||||
u.delimiter.metadata = NULL;
|
||||
u.delimiter.type = type_delimiter;
|
||||
u.delimiter.flags = 0;
|
||||
u.delimiter.group[0] = '\0';
|
||||
u.delimiter.routing_id = 0;
|
||||
_u.delimiter.metadata = NULL;
|
||||
_u.delimiter.type = type_delimiter;
|
||||
_u.delimiter.flags = 0;
|
||||
_u.delimiter.group[0] = '\0';
|
||||
_u.delimiter.routing_id = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int zmq::msg_t::init_join ()
|
||||
{
|
||||
u.base.metadata = NULL;
|
||||
u.base.type = type_join;
|
||||
u.base.flags = 0;
|
||||
u.base.group[0] = '\0';
|
||||
u.base.routing_id = 0;
|
||||
_u.base.metadata = NULL;
|
||||
_u.base.type = type_join;
|
||||
_u.base.flags = 0;
|
||||
_u.base.group[0] = '\0';
|
||||
_u.base.routing_id = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int zmq::msg_t::init_leave ()
|
||||
{
|
||||
u.base.metadata = NULL;
|
||||
u.base.type = type_leave;
|
||||
u.base.flags = 0;
|
||||
u.base.group[0] = '\0';
|
||||
u.base.routing_id = 0;
|
||||
_u.base.metadata = NULL;
|
||||
_u.base.type = type_leave;
|
||||
_u.base.flags = 0;
|
||||
_u.base.group[0] = '\0';
|
||||
_u.base.routing_id = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -220,47 +221,47 @@ int zmq::msg_t::close ()
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (u.base.type == type_lmsg) {
|
||||
if (_u.base.type == type_lmsg) {
|
||||
// If the content is not shared, or if it is shared and the reference
|
||||
// count has dropped to zero, deallocate it.
|
||||
if (!(u.lmsg.flags & msg_t::shared)
|
||||
|| !u.lmsg.content->refcnt.sub (1)) {
|
||||
if (!(_u.lmsg.flags & msg_t::shared)
|
||||
|| !_u.lmsg.content->refcnt.sub (1)) {
|
||||
// We used "placement new" operator to initialize the reference
|
||||
// counter so we call the destructor explicitly now.
|
||||
u.lmsg.content->refcnt.~atomic_counter_t ();
|
||||
_u.lmsg.content->refcnt.~atomic_counter_t ();
|
||||
|
||||
if (u.lmsg.content->ffn)
|
||||
u.lmsg.content->ffn (u.lmsg.content->data,
|
||||
u.lmsg.content->hint);
|
||||
free (u.lmsg.content);
|
||||
if (_u.lmsg.content->ffn)
|
||||
_u.lmsg.content->ffn (_u.lmsg.content->data,
|
||||
_u.lmsg.content->hint);
|
||||
free (_u.lmsg.content);
|
||||
}
|
||||
}
|
||||
|
||||
if (is_zcmsg ()) {
|
||||
zmq_assert (u.zclmsg.content->ffn);
|
||||
zmq_assert (_u.zclmsg.content->ffn);
|
||||
|
||||
// If the content is not shared, or if it is shared and the reference
|
||||
// count has dropped to zero, deallocate it.
|
||||
if (!(u.zclmsg.flags & msg_t::shared)
|
||||
|| !u.zclmsg.content->refcnt.sub (1)) {
|
||||
if (!(_u.zclmsg.flags & msg_t::shared)
|
||||
|| !_u.zclmsg.content->refcnt.sub (1)) {
|
||||
// We used "placement new" operator to initialize the reference
|
||||
// counter so we call the destructor explicitly now.
|
||||
u.zclmsg.content->refcnt.~atomic_counter_t ();
|
||||
_u.zclmsg.content->refcnt.~atomic_counter_t ();
|
||||
|
||||
u.zclmsg.content->ffn (u.zclmsg.content->data,
|
||||
u.zclmsg.content->hint);
|
||||
_u.zclmsg.content->ffn (_u.zclmsg.content->data,
|
||||
_u.zclmsg.content->hint);
|
||||
}
|
||||
}
|
||||
|
||||
if (u.base.metadata != NULL) {
|
||||
if (u.base.metadata->drop_ref ()) {
|
||||
LIBZMQ_DELETE (u.base.metadata);
|
||||
if (_u.base.metadata != NULL) {
|
||||
if (_u.base.metadata->drop_ref ()) {
|
||||
LIBZMQ_DELETE (_u.base.metadata);
|
||||
}
|
||||
u.base.metadata = NULL;
|
||||
_u.base.metadata = NULL;
|
||||
}
|
||||
|
||||
// Make the message invalid.
|
||||
u.base.type = 0;
|
||||
_u.base.type = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -298,29 +299,29 @@ int zmq::msg_t::copy (msg_t &src_)
|
||||
if (unlikely (rc < 0))
|
||||
return rc;
|
||||
|
||||
if (src_.u.base.type == type_lmsg) {
|
||||
if (src_._u.base.type == type_lmsg) {
|
||||
// One reference is added to shared messages. Non-shared messages
|
||||
// are turned into shared messages and reference count is set to 2.
|
||||
if (src_.u.lmsg.flags & msg_t::shared)
|
||||
src_.u.lmsg.content->refcnt.add (1);
|
||||
if (src_._u.lmsg.flags & msg_t::shared)
|
||||
src_._u.lmsg.content->refcnt.add (1);
|
||||
else {
|
||||
src_.u.lmsg.flags |= msg_t::shared;
|
||||
src_.u.lmsg.content->refcnt.set (2);
|
||||
src_._u.lmsg.flags |= msg_t::shared;
|
||||
src_._u.lmsg.content->refcnt.set (2);
|
||||
}
|
||||
}
|
||||
|
||||
if (src_.is_zcmsg ()) {
|
||||
// One reference is added to shared messages. Non-shared messages
|
||||
// are turned into shared messages and reference count is set to 2.
|
||||
if (src_.u.zclmsg.flags & msg_t::shared)
|
||||
if (src_._u.zclmsg.flags & msg_t::shared)
|
||||
src_.refcnt ()->add (1);
|
||||
else {
|
||||
src_.u.zclmsg.flags |= msg_t::shared;
|
||||
src_._u.zclmsg.flags |= msg_t::shared;
|
||||
src_.refcnt ()->set (2);
|
||||
}
|
||||
}
|
||||
if (src_.u.base.metadata != NULL)
|
||||
src_.u.base.metadata->add_ref ();
|
||||
if (src_._u.base.metadata != NULL)
|
||||
src_._u.base.metadata->add_ref ();
|
||||
|
||||
*this = src_;
|
||||
|
||||
@ -332,15 +333,15 @@ void *zmq::msg_t::data ()
|
||||
// Check the validity of the message.
|
||||
zmq_assert (check ());
|
||||
|
||||
switch (u.base.type) {
|
||||
switch (_u.base.type) {
|
||||
case type_vsm:
|
||||
return u.vsm.data;
|
||||
return _u.vsm.data;
|
||||
case type_lmsg:
|
||||
return u.lmsg.content->data;
|
||||
return _u.lmsg.content->data;
|
||||
case type_cmsg:
|
||||
return u.cmsg.data;
|
||||
return _u.cmsg.data;
|
||||
case type_zclmsg:
|
||||
return u.zclmsg.content->data;
|
||||
return _u.zclmsg.content->data;
|
||||
default:
|
||||
zmq_assert (false);
|
||||
return NULL;
|
||||
@ -352,15 +353,15 @@ size_t zmq::msg_t::size () const
|
||||
// Check the validity of the message.
|
||||
zmq_assert (check ());
|
||||
|
||||
switch (u.base.type) {
|
||||
switch (_u.base.type) {
|
||||
case type_vsm:
|
||||
return u.vsm.size;
|
||||
return _u.vsm.size;
|
||||
case type_lmsg:
|
||||
return u.lmsg.content->size;
|
||||
return _u.lmsg.content->size;
|
||||
case type_zclmsg:
|
||||
return u.zclmsg.content->size;
|
||||
return _u.zclmsg.content->size;
|
||||
case type_cmsg:
|
||||
return u.cmsg.size;
|
||||
return _u.cmsg.size;
|
||||
default:
|
||||
zmq_assert (false);
|
||||
return 0;
|
||||
@ -369,80 +370,80 @@ size_t zmq::msg_t::size () const
|
||||
|
||||
unsigned char zmq::msg_t::flags () const
|
||||
{
|
||||
return u.base.flags;
|
||||
return _u.base.flags;
|
||||
}
|
||||
|
||||
void zmq::msg_t::set_flags (unsigned char flags_)
|
||||
{
|
||||
u.base.flags |= flags_;
|
||||
_u.base.flags |= flags_;
|
||||
}
|
||||
|
||||
void zmq::msg_t::reset_flags (unsigned char flags_)
|
||||
{
|
||||
u.base.flags &= ~flags_;
|
||||
_u.base.flags &= ~flags_;
|
||||
}
|
||||
|
||||
zmq::metadata_t *zmq::msg_t::metadata () const
|
||||
{
|
||||
return u.base.metadata;
|
||||
return _u.base.metadata;
|
||||
}
|
||||
|
||||
void zmq::msg_t::set_metadata (zmq::metadata_t *metadata_)
|
||||
{
|
||||
assert (metadata_ != NULL);
|
||||
assert (u.base.metadata == NULL);
|
||||
assert (_u.base.metadata == NULL);
|
||||
metadata_->add_ref ();
|
||||
u.base.metadata = metadata_;
|
||||
_u.base.metadata = metadata_;
|
||||
}
|
||||
|
||||
void zmq::msg_t::reset_metadata ()
|
||||
{
|
||||
if (u.base.metadata) {
|
||||
if (u.base.metadata->drop_ref ()) {
|
||||
LIBZMQ_DELETE (u.base.metadata);
|
||||
if (_u.base.metadata) {
|
||||
if (_u.base.metadata->drop_ref ()) {
|
||||
LIBZMQ_DELETE (_u.base.metadata);
|
||||
}
|
||||
u.base.metadata = NULL;
|
||||
_u.base.metadata = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
bool zmq::msg_t::is_routing_id () const
|
||||
{
|
||||
return (u.base.flags & routing_id) == routing_id;
|
||||
return (_u.base.flags & routing_id) == routing_id;
|
||||
}
|
||||
|
||||
bool zmq::msg_t::is_credential () const
|
||||
{
|
||||
return (u.base.flags & credential) == credential;
|
||||
return (_u.base.flags & credential) == credential;
|
||||
}
|
||||
|
||||
bool zmq::msg_t::is_delimiter () const
|
||||
{
|
||||
return u.base.type == type_delimiter;
|
||||
return _u.base.type == type_delimiter;
|
||||
}
|
||||
|
||||
bool zmq::msg_t::is_vsm () const
|
||||
{
|
||||
return u.base.type == type_vsm;
|
||||
return _u.base.type == type_vsm;
|
||||
}
|
||||
|
||||
bool zmq::msg_t::is_cmsg () const
|
||||
{
|
||||
return u.base.type == type_cmsg;
|
||||
return _u.base.type == type_cmsg;
|
||||
}
|
||||
|
||||
bool zmq::msg_t::is_zcmsg () const
|
||||
{
|
||||
return u.base.type == type_zclmsg;
|
||||
return _u.base.type == type_zclmsg;
|
||||
}
|
||||
|
||||
bool zmq::msg_t::is_join () const
|
||||
{
|
||||
return u.base.type == type_join;
|
||||
return _u.base.type == type_join;
|
||||
}
|
||||
|
||||
bool zmq::msg_t::is_leave () const
|
||||
{
|
||||
return u.base.type == type_leave;
|
||||
return _u.base.type == type_leave;
|
||||
}
|
||||
|
||||
void zmq::msg_t::add_refs (int refs_)
|
||||
@ -450,7 +451,7 @@ void zmq::msg_t::add_refs (int refs_)
|
||||
zmq_assert (refs_ >= 0);
|
||||
|
||||
// Operation not supported for messages with metadata.
|
||||
zmq_assert (u.base.metadata == NULL);
|
||||
zmq_assert (_u.base.metadata == NULL);
|
||||
|
||||
// No copies required.
|
||||
if (!refs_)
|
||||
@ -458,12 +459,12 @@ void zmq::msg_t::add_refs (int refs_)
|
||||
|
||||
// VSMs, CMSGS and delimiters can be copied straight away. The only
|
||||
// message type that needs special care are long messages.
|
||||
if (u.base.type == type_lmsg || is_zcmsg ()) {
|
||||
if (u.base.flags & msg_t::shared)
|
||||
if (_u.base.type == type_lmsg || is_zcmsg ()) {
|
||||
if (_u.base.flags & msg_t::shared)
|
||||
refcnt ()->add (refs_);
|
||||
else {
|
||||
refcnt ()->set (refs_ + 1);
|
||||
u.base.flags |= msg_t::shared;
|
||||
_u.base.flags |= msg_t::shared;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -473,37 +474,37 @@ bool zmq::msg_t::rm_refs (int refs_)
|
||||
zmq_assert (refs_ >= 0);
|
||||
|
||||
// Operation not supported for messages with metadata.
|
||||
zmq_assert (u.base.metadata == NULL);
|
||||
zmq_assert (_u.base.metadata == NULL);
|
||||
|
||||
// No copies required.
|
||||
if (!refs_)
|
||||
return true;
|
||||
|
||||
// If there's only one reference close the message.
|
||||
if ((u.base.type != type_zclmsg && u.base.type != type_lmsg)
|
||||
|| !(u.base.flags & msg_t::shared)) {
|
||||
if ((_u.base.type != type_zclmsg && _u.base.type != type_lmsg)
|
||||
|| !(_u.base.flags & msg_t::shared)) {
|
||||
close ();
|
||||
return false;
|
||||
}
|
||||
|
||||
// The only message type that needs special care are long and zcopy messages.
|
||||
if (u.base.type == type_lmsg && !u.lmsg.content->refcnt.sub (refs_)) {
|
||||
if (_u.base.type == type_lmsg && !_u.lmsg.content->refcnt.sub (refs_)) {
|
||||
// We used "placement new" operator to initialize the reference
|
||||
// counter so we call the destructor explicitly now.
|
||||
u.lmsg.content->refcnt.~atomic_counter_t ();
|
||||
_u.lmsg.content->refcnt.~atomic_counter_t ();
|
||||
|
||||
if (u.lmsg.content->ffn)
|
||||
u.lmsg.content->ffn (u.lmsg.content->data, u.lmsg.content->hint);
|
||||
free (u.lmsg.content);
|
||||
if (_u.lmsg.content->ffn)
|
||||
_u.lmsg.content->ffn (_u.lmsg.content->data, _u.lmsg.content->hint);
|
||||
free (_u.lmsg.content);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
if (is_zcmsg () && !u.zclmsg.content->refcnt.sub (refs_)) {
|
||||
if (is_zcmsg () && !_u.zclmsg.content->refcnt.sub (refs_)) {
|
||||
// storage for rfcnt is provided externally
|
||||
if (u.zclmsg.content->ffn) {
|
||||
u.zclmsg.content->ffn (u.zclmsg.content->data,
|
||||
u.zclmsg.content->hint);
|
||||
if (_u.zclmsg.content->ffn) {
|
||||
_u.zclmsg.content->ffn (_u.zclmsg.content->data,
|
||||
_u.zclmsg.content->hint);
|
||||
}
|
||||
|
||||
return false;
|
||||
@ -514,13 +515,13 @@ bool zmq::msg_t::rm_refs (int refs_)
|
||||
|
||||
uint32_t zmq::msg_t::get_routing_id ()
|
||||
{
|
||||
return u.base.routing_id;
|
||||
return _u.base.routing_id;
|
||||
}
|
||||
|
||||
int zmq::msg_t::set_routing_id (uint32_t routing_id_)
|
||||
{
|
||||
if (routing_id_) {
|
||||
u.base.routing_id = routing_id_;
|
||||
_u.base.routing_id = routing_id_;
|
||||
return 0;
|
||||
}
|
||||
errno = EINVAL;
|
||||
@ -529,13 +530,13 @@ int zmq::msg_t::set_routing_id (uint32_t routing_id_)
|
||||
|
||||
int zmq::msg_t::reset_routing_id ()
|
||||
{
|
||||
u.base.routing_id = 0;
|
||||
_u.base.routing_id = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
const char *zmq::msg_t::group ()
|
||||
{
|
||||
return u.base.group;
|
||||
return _u.base.group;
|
||||
}
|
||||
|
||||
int zmq::msg_t::set_group (const char *group_)
|
||||
@ -550,19 +551,19 @@ int zmq::msg_t::set_group (const char *group_, size_t length_)
|
||||
return -1;
|
||||
}
|
||||
|
||||
strncpy (u.base.group, group_, length_);
|
||||
u.base.group[length_] = '\0';
|
||||
strncpy (_u.base.group, group_, length_);
|
||||
_u.base.group[length_] = '\0';
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
zmq::atomic_counter_t *zmq::msg_t::refcnt ()
|
||||
{
|
||||
switch (u.base.type) {
|
||||
switch (_u.base.type) {
|
||||
case type_lmsg:
|
||||
return &u.lmsg.content->refcnt;
|
||||
return &_u.lmsg.content->refcnt;
|
||||
case type_zclmsg:
|
||||
return &u.zclmsg.content->refcnt;
|
||||
return &_u.zclmsg.content->refcnt;
|
||||
default:
|
||||
zmq_assert (false);
|
||||
return NULL;
|
||||
|
@ -249,7 +249,7 @@ class msg_t
|
||||
char group[16];
|
||||
uint32_t routing_id;
|
||||
} delimiter;
|
||||
} u;
|
||||
} _u;
|
||||
};
|
||||
|
||||
inline int close_and_return (zmq::msg_t *msg_, int echo_)
|
||||
|
@ -43,23 +43,23 @@ namespace zmq
|
||||
class mutex_t
|
||||
{
|
||||
public:
|
||||
inline mutex_t () { InitializeCriticalSection (&cs); }
|
||||
inline mutex_t () { InitializeCriticalSection (&_cs); }
|
||||
|
||||
inline ~mutex_t () { DeleteCriticalSection (&cs); }
|
||||
inline ~mutex_t () { DeleteCriticalSection (&_cs); }
|
||||
|
||||
inline void lock () { EnterCriticalSection (&cs); }
|
||||
inline void lock () { EnterCriticalSection (&_cs); }
|
||||
|
||||
inline bool try_lock ()
|
||||
{
|
||||
return (TryEnterCriticalSection (&cs)) ? true : false;
|
||||
return (TryEnterCriticalSection (&_cs)) ? true : false;
|
||||
}
|
||||
|
||||
inline void unlock () { LeaveCriticalSection (&cs); }
|
||||
inline void unlock () { LeaveCriticalSection (&_cs); }
|
||||
|
||||
inline CRITICAL_SECTION *get_cs () { return &cs; }
|
||||
inline CRITICAL_SECTION *get_cs () { return &_cs; }
|
||||
|
||||
private:
|
||||
CRITICAL_SECTION cs;
|
||||
CRITICAL_SECTION _cs;
|
||||
|
||||
// Disable copy construction and assignment.
|
||||
mutex_t (const mutex_t &);
|
||||
@ -79,26 +79,26 @@ class mutex_t
|
||||
public:
|
||||
inline mutex_t ()
|
||||
{
|
||||
m_semId =
|
||||
_semId =
|
||||
semMCreate (SEM_Q_PRIORITY | SEM_INVERSION_SAFE | SEM_DELETE_SAFE);
|
||||
}
|
||||
|
||||
inline ~mutex_t () { semDelete (m_semId); }
|
||||
inline ~mutex_t () { semDelete (_semId); }
|
||||
|
||||
inline void lock () { semTake (m_semId, WAIT_FOREVER); }
|
||||
inline void lock () { semTake (_semId, WAIT_FOREVER); }
|
||||
|
||||
inline bool try_lock ()
|
||||
{
|
||||
if (semTake (m_semId, NO_WAIT) == OK) {
|
||||
if (semTake (_semId, NO_WAIT) == OK) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
inline void unlock () { semGive (m_semId); }
|
||||
inline void unlock () { semGive (_semId); }
|
||||
|
||||
private:
|
||||
SEM_ID m_semId;
|
||||
SEM_ID _semId;
|
||||
|
||||
// Disable copy construction and assignment.
|
||||
mutex_t (const mutex_t &);
|
||||
@ -117,34 +117,34 @@ class mutex_t
|
||||
public:
|
||||
inline mutex_t ()
|
||||
{
|
||||
int rc = pthread_mutexattr_init (&attr);
|
||||
int rc = pthread_mutexattr_init (&_attr);
|
||||
posix_assert (rc);
|
||||
|
||||
rc = pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_RECURSIVE);
|
||||
rc = pthread_mutexattr_settype (&_attr, PTHREAD_MUTEX_RECURSIVE);
|
||||
posix_assert (rc);
|
||||
|
||||
rc = pthread_mutex_init (&mutex, &attr);
|
||||
rc = pthread_mutex_init (&_mutex, &_attr);
|
||||
posix_assert (rc);
|
||||
}
|
||||
|
||||
inline ~mutex_t ()
|
||||
{
|
||||
int rc = pthread_mutex_destroy (&mutex);
|
||||
int rc = pthread_mutex_destroy (&_mutex);
|
||||
posix_assert (rc);
|
||||
|
||||
rc = pthread_mutexattr_destroy (&attr);
|
||||
rc = pthread_mutexattr_destroy (&_attr);
|
||||
posix_assert (rc);
|
||||
}
|
||||
|
||||
inline void lock ()
|
||||
{
|
||||
int rc = pthread_mutex_lock (&mutex);
|
||||
int rc = pthread_mutex_lock (&_mutex);
|
||||
posix_assert (rc);
|
||||
}
|
||||
|
||||
inline bool try_lock ()
|
||||
{
|
||||
int rc = pthread_mutex_trylock (&mutex);
|
||||
int rc = pthread_mutex_trylock (&_mutex);
|
||||
if (rc == EBUSY)
|
||||
return false;
|
||||
|
||||
@ -154,15 +154,15 @@ class mutex_t
|
||||
|
||||
inline void unlock ()
|
||||
{
|
||||
int rc = pthread_mutex_unlock (&mutex);
|
||||
int rc = pthread_mutex_unlock (&_mutex);
|
||||
posix_assert (rc);
|
||||
}
|
||||
|
||||
inline pthread_mutex_t *get_mutex () { return &mutex; }
|
||||
inline pthread_mutex_t *get_mutex () { return &_mutex; }
|
||||
|
||||
private:
|
||||
pthread_mutex_t mutex;
|
||||
pthread_mutexattr_t attr;
|
||||
pthread_mutex_t _mutex;
|
||||
pthread_mutexattr_t _attr;
|
||||
|
||||
// Disable copy construction and assignment.
|
||||
mutex_t (const mutex_t &);
|
||||
@ -177,12 +177,12 @@ namespace zmq
|
||||
{
|
||||
struct scoped_lock_t
|
||||
{
|
||||
scoped_lock_t (mutex_t &mutex_) : mutex (mutex_) { mutex.lock (); }
|
||||
scoped_lock_t (mutex_t &mutex_) : _mutex (mutex_) { _mutex.lock (); }
|
||||
|
||||
~scoped_lock_t () { mutex.unlock (); }
|
||||
~scoped_lock_t () { _mutex.unlock (); }
|
||||
|
||||
private:
|
||||
mutex_t &mutex;
|
||||
mutex_t &_mutex;
|
||||
|
||||
// Disable copy construction and assignment.
|
||||
scoped_lock_t (const scoped_lock_t &);
|
||||
@ -192,20 +192,20 @@ struct scoped_lock_t
|
||||
|
||||
struct scoped_optional_lock_t
|
||||
{
|
||||
scoped_optional_lock_t (mutex_t *mutex_) : mutex (mutex_)
|
||||
scoped_optional_lock_t (mutex_t *mutex_) : _mutex (mutex_)
|
||||
{
|
||||
if (mutex != NULL)
|
||||
mutex->lock ();
|
||||
if (_mutex != NULL)
|
||||
_mutex->lock ();
|
||||
}
|
||||
|
||||
~scoped_optional_lock_t ()
|
||||
{
|
||||
if (mutex != NULL)
|
||||
mutex->unlock ();
|
||||
if (_mutex != NULL)
|
||||
_mutex->unlock ();
|
||||
}
|
||||
|
||||
private:
|
||||
mutex_t *mutex;
|
||||
mutex_t *_mutex;
|
||||
|
||||
// Disable copy construction and assignment.
|
||||
scoped_optional_lock_t (const scoped_lock_t &);
|
||||
|
@ -43,12 +43,12 @@ zmq::null_mechanism_t::null_mechanism_t (session_base_t *session_,
|
||||
const options_t &options_) :
|
||||
mechanism_base_t (session_, options_),
|
||||
zap_client_t (session_, peer_address_, options_),
|
||||
ready_command_sent (false),
|
||||
error_command_sent (false),
|
||||
ready_command_received (false),
|
||||
error_command_received (false),
|
||||
zap_request_sent (false),
|
||||
zap_reply_received (false)
|
||||
_ready_command_sent (false),
|
||||
_error_command_sent (false),
|
||||
_ready_command_received (false),
|
||||
_error_command_received (false),
|
||||
_zap_request_sent (false),
|
||||
_zap_reply_received (false)
|
||||
{
|
||||
}
|
||||
|
||||
@ -58,13 +58,13 @@ zmq::null_mechanism_t::~null_mechanism_t ()
|
||||
|
||||
int zmq::null_mechanism_t::next_handshake_command (msg_t *msg_)
|
||||
{
|
||||
if (ready_command_sent || error_command_sent) {
|
||||
if (_ready_command_sent || _error_command_sent) {
|
||||
errno = EAGAIN;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (zap_required () && !zap_reply_received) {
|
||||
if (zap_request_sent) {
|
||||
if (zap_required () && !_zap_reply_received) {
|
||||
if (_zap_request_sent) {
|
||||
errno = EAGAIN;
|
||||
return -1;
|
||||
}
|
||||
@ -78,7 +78,7 @@ int zmq::null_mechanism_t::next_handshake_command (msg_t *msg_)
|
||||
}
|
||||
if (rc == 0) {
|
||||
send_zap_request ();
|
||||
zap_request_sent = true;
|
||||
_zap_request_sent = true;
|
||||
|
||||
// TODO actually, it is quite unlikely that we can read the ZAP
|
||||
// reply already, but removing this has some strange side-effect
|
||||
@ -88,12 +88,12 @@ int zmq::null_mechanism_t::next_handshake_command (msg_t *msg_)
|
||||
if (rc != 0)
|
||||
return -1;
|
||||
|
||||
zap_reply_received = true;
|
||||
_zap_reply_received = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (zap_reply_received && status_code != "200") {
|
||||
error_command_sent = true;
|
||||
if (_zap_reply_received && status_code != "200") {
|
||||
_error_command_sent = true;
|
||||
if (status_code != "300") {
|
||||
const size_t status_code_len = 3;
|
||||
const int rc = msg_->init_size (6 + 1 + status_code_len);
|
||||
@ -111,14 +111,14 @@ int zmq::null_mechanism_t::next_handshake_command (msg_t *msg_)
|
||||
|
||||
make_command_with_basic_properties (msg_, "\5READY", 6);
|
||||
|
||||
ready_command_sent = true;
|
||||
_ready_command_sent = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int zmq::null_mechanism_t::process_handshake_command (msg_t *msg_)
|
||||
{
|
||||
if (ready_command_received || error_command_received) {
|
||||
if (_ready_command_received || _error_command_received) {
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND);
|
||||
errno = EPROTO;
|
||||
@ -153,7 +153,7 @@ int zmq::null_mechanism_t::process_handshake_command (msg_t *msg_)
|
||||
int zmq::null_mechanism_t::process_ready_command (
|
||||
const unsigned char *cmd_data_, size_t data_size_)
|
||||
{
|
||||
ready_command_received = true;
|
||||
_ready_command_received = true;
|
||||
return parse_metadata (cmd_data_ + 6, data_size_ - 6);
|
||||
}
|
||||
|
||||
@ -179,29 +179,29 @@ int zmq::null_mechanism_t::process_error_command (
|
||||
}
|
||||
const char *error_reason = reinterpret_cast<const char *> (cmd_data_) + 7;
|
||||
handle_error_reason (error_reason, error_reason_len);
|
||||
error_command_received = true;
|
||||
_error_command_received = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int zmq::null_mechanism_t::zap_msg_available ()
|
||||
{
|
||||
if (zap_reply_received) {
|
||||
if (_zap_reply_received) {
|
||||
errno = EFSM;
|
||||
return -1;
|
||||
}
|
||||
const int rc = receive_and_process_zap_reply ();
|
||||
if (rc == 0)
|
||||
zap_reply_received = true;
|
||||
_zap_reply_received = true;
|
||||
return rc == -1 ? -1 : 0;
|
||||
}
|
||||
|
||||
zmq::mechanism_t::status_t zmq::null_mechanism_t::status () const
|
||||
{
|
||||
const bool command_sent = ready_command_sent || error_command_sent;
|
||||
const bool command_sent = _ready_command_sent || _error_command_sent;
|
||||
const bool command_received =
|
||||
ready_command_received || error_command_received;
|
||||
_ready_command_received || _error_command_received;
|
||||
|
||||
if (ready_command_sent && ready_command_received)
|
||||
if (_ready_command_sent && _ready_command_received)
|
||||
return mechanism_t::ready;
|
||||
if (command_sent && command_received)
|
||||
return error;
|
||||
|
@ -54,12 +54,12 @@ class null_mechanism_t : public zap_client_t
|
||||
virtual status_t status () const;
|
||||
|
||||
private:
|
||||
bool ready_command_sent;
|
||||
bool error_command_sent;
|
||||
bool ready_command_received;
|
||||
bool error_command_received;
|
||||
bool zap_request_sent;
|
||||
bool zap_reply_received;
|
||||
bool _ready_command_sent;
|
||||
bool _error_command_sent;
|
||||
bool _ready_command_received;
|
||||
bool _error_command_received;
|
||||
bool _zap_request_sent;
|
||||
bool _zap_reply_received;
|
||||
|
||||
int process_ready_command (const unsigned char *cmd_data_,
|
||||
size_t data_size_);
|
||||
|
@ -39,13 +39,13 @@
|
||||
#include "session_base.hpp"
|
||||
#include "socket_base.hpp"
|
||||
|
||||
zmq::object_t::object_t (ctx_t *ctx_, uint32_t tid_) : ctx (ctx_), tid (tid_)
|
||||
zmq::object_t::object_t (ctx_t *ctx_, uint32_t tid_) : _ctx (ctx_), _tid (tid_)
|
||||
{
|
||||
}
|
||||
|
||||
zmq::object_t::object_t (object_t *parent_) :
|
||||
ctx (parent_->ctx),
|
||||
tid (parent_->tid)
|
||||
_ctx (parent_->_ctx),
|
||||
_tid (parent_->_tid)
|
||||
{
|
||||
}
|
||||
|
||||
@ -55,17 +55,17 @@ zmq::object_t::~object_t ()
|
||||
|
||||
uint32_t zmq::object_t::get_tid ()
|
||||
{
|
||||
return tid;
|
||||
return _tid;
|
||||
}
|
||||
|
||||
void zmq::object_t::set_tid (uint32_t id_)
|
||||
{
|
||||
tid = id_;
|
||||
_tid = id_;
|
||||
}
|
||||
|
||||
zmq::ctx_t *zmq::object_t::get_ctx ()
|
||||
{
|
||||
return ctx;
|
||||
return _ctx;
|
||||
}
|
||||
|
||||
void zmq::object_t::process_command (command_t &cmd_)
|
||||
@ -157,46 +157,46 @@ void zmq::object_t::process_command (command_t &cmd_)
|
||||
int zmq::object_t::register_endpoint (const char *addr_,
|
||||
const endpoint_t &endpoint_)
|
||||
{
|
||||
return ctx->register_endpoint (addr_, endpoint_);
|
||||
return _ctx->register_endpoint (addr_, endpoint_);
|
||||
}
|
||||
|
||||
int zmq::object_t::unregister_endpoint (const std::string &addr_,
|
||||
socket_base_t *socket_)
|
||||
{
|
||||
return ctx->unregister_endpoint (addr_, socket_);
|
||||
return _ctx->unregister_endpoint (addr_, socket_);
|
||||
}
|
||||
|
||||
void zmq::object_t::unregister_endpoints (socket_base_t *socket_)
|
||||
{
|
||||
return ctx->unregister_endpoints (socket_);
|
||||
return _ctx->unregister_endpoints (socket_);
|
||||
}
|
||||
|
||||
zmq::endpoint_t zmq::object_t::find_endpoint (const char *addr_)
|
||||
{
|
||||
return ctx->find_endpoint (addr_);
|
||||
return _ctx->find_endpoint (addr_);
|
||||
}
|
||||
|
||||
void zmq::object_t::pend_connection (const std::string &addr_,
|
||||
const endpoint_t &endpoint_,
|
||||
pipe_t **pipes_)
|
||||
{
|
||||
ctx->pend_connection (addr_, endpoint_, pipes_);
|
||||
_ctx->pend_connection (addr_, endpoint_, pipes_);
|
||||
}
|
||||
|
||||
void zmq::object_t::connect_pending (const char *addr_,
|
||||
zmq::socket_base_t *bind_socket_)
|
||||
{
|
||||
return ctx->connect_pending (addr_, bind_socket_);
|
||||
return _ctx->connect_pending (addr_, bind_socket_);
|
||||
}
|
||||
|
||||
void zmq::object_t::destroy_socket (socket_base_t *socket_)
|
||||
{
|
||||
ctx->destroy_socket (socket_);
|
||||
_ctx->destroy_socket (socket_);
|
||||
}
|
||||
|
||||
zmq::io_thread_t *zmq::object_t::choose_io_thread (uint64_t affinity_)
|
||||
{
|
||||
return ctx->choose_io_thread (affinity_);
|
||||
return _ctx->choose_io_thread (affinity_);
|
||||
}
|
||||
|
||||
void zmq::object_t::send_stop ()
|
||||
@ -206,7 +206,7 @@ void zmq::object_t::send_stop ()
|
||||
command_t cmd;
|
||||
cmd.destination = this;
|
||||
cmd.type = command_t::stop;
|
||||
ctx->send_command (tid, cmd);
|
||||
_ctx->send_command (_tid, cmd);
|
||||
}
|
||||
|
||||
void zmq::object_t::send_plug (own_t *destination_, bool inc_seqnum_)
|
||||
@ -352,7 +352,7 @@ void zmq::object_t::send_term_endpoint (own_t *destination_,
|
||||
void zmq::object_t::send_reap (class socket_base_t *socket_)
|
||||
{
|
||||
command_t cmd;
|
||||
cmd.destination = ctx->get_reaper ();
|
||||
cmd.destination = _ctx->get_reaper ();
|
||||
cmd.type = command_t::reap;
|
||||
cmd.args.reap.socket = socket_;
|
||||
send_command (cmd);
|
||||
@ -361,7 +361,7 @@ void zmq::object_t::send_reap (class socket_base_t *socket_)
|
||||
void zmq::object_t::send_reaped ()
|
||||
{
|
||||
command_t cmd;
|
||||
cmd.destination = ctx->get_reaper ();
|
||||
cmd.destination = _ctx->get_reaper ();
|
||||
cmd.type = command_t::reaped;
|
||||
send_command (cmd);
|
||||
}
|
||||
@ -379,7 +379,7 @@ void zmq::object_t::send_done ()
|
||||
command_t cmd;
|
||||
cmd.destination = NULL;
|
||||
cmd.type = command_t::done;
|
||||
ctx->send_command (ctx_t::term_tid, cmd);
|
||||
_ctx->send_command (ctx_t::term_tid, cmd);
|
||||
}
|
||||
|
||||
void zmq::object_t::process_stop ()
|
||||
@ -474,5 +474,5 @@ void zmq::object_t::process_seqnum ()
|
||||
|
||||
void zmq::object_t::send_command (command_t &cmd_)
|
||||
{
|
||||
ctx->send_command (cmd_.destination->get_tid (), cmd_);
|
||||
_ctx->send_command (cmd_.destination->get_tid (), cmd_);
|
||||
}
|
||||
|
@ -134,10 +134,10 @@ class object_t
|
||||
|
||||
private:
|
||||
// Context provides access to the global state.
|
||||
zmq::ctx_t *const ctx;
|
||||
zmq::ctx_t *const _ctx;
|
||||
|
||||
// Thread ID of the thread the object belongs to.
|
||||
uint32_t tid;
|
||||
uint32_t _tid;
|
||||
|
||||
void send_command (command_t &cmd_);
|
||||
|
||||
|
74
src/own.cpp
74
src/own.cpp
@ -34,22 +34,22 @@
|
||||
|
||||
zmq::own_t::own_t (class ctx_t *parent_, uint32_t tid_) :
|
||||
object_t (parent_, tid_),
|
||||
terminating (false),
|
||||
sent_seqnum (0),
|
||||
processed_seqnum (0),
|
||||
owner (NULL),
|
||||
term_acks (0)
|
||||
_terminating (false),
|
||||
_sent_seqnum (0),
|
||||
_processed_seqnum (0),
|
||||
_owner (NULL),
|
||||
_term_acks (0)
|
||||
{
|
||||
}
|
||||
|
||||
zmq::own_t::own_t (io_thread_t *io_thread_, const options_t &options_) :
|
||||
object_t (io_thread_),
|
||||
options (options_),
|
||||
terminating (false),
|
||||
sent_seqnum (0),
|
||||
processed_seqnum (0),
|
||||
owner (NULL),
|
||||
term_acks (0)
|
||||
_terminating (false),
|
||||
_sent_seqnum (0),
|
||||
_processed_seqnum (0),
|
||||
_owner (NULL),
|
||||
_term_acks (0)
|
||||
{
|
||||
}
|
||||
|
||||
@ -59,20 +59,20 @@ zmq::own_t::~own_t ()
|
||||
|
||||
void zmq::own_t::set_owner (own_t *owner_)
|
||||
{
|
||||
zmq_assert (!owner);
|
||||
owner = owner_;
|
||||
zmq_assert (!_owner);
|
||||
_owner = owner_;
|
||||
}
|
||||
|
||||
void zmq::own_t::inc_seqnum ()
|
||||
{
|
||||
// This function may be called from a different thread!
|
||||
sent_seqnum.add (1);
|
||||
_sent_seqnum.add (1);
|
||||
}
|
||||
|
||||
void zmq::own_t::process_seqnum ()
|
||||
{
|
||||
// Catch up with counter of processed commands.
|
||||
processed_seqnum++;
|
||||
_processed_seqnum++;
|
||||
|
||||
// We may have catched up and still have pending terms acks.
|
||||
check_term_acks ();
|
||||
@ -99,18 +99,18 @@ void zmq::own_t::process_term_req (own_t *object_)
|
||||
{
|
||||
// When shutting down we can ignore termination requests from owned
|
||||
// objects. The termination request was already sent to the object.
|
||||
if (terminating)
|
||||
if (_terminating)
|
||||
return;
|
||||
|
||||
// If I/O object is well and alive let's ask it to terminate.
|
||||
owned_t::iterator it = std::find (owned.begin (), owned.end (), object_);
|
||||
owned_t::iterator it = std::find (_owned.begin (), _owned.end (), object_);
|
||||
|
||||
// If not found, we assume that termination request was already sent to
|
||||
// the object so we can safely ignore the request.
|
||||
if (it == owned.end ())
|
||||
if (it == _owned.end ())
|
||||
return;
|
||||
|
||||
owned.erase (it);
|
||||
_owned.erase (it);
|
||||
register_term_acks (1);
|
||||
|
||||
// Note that this object is the root of the (partial shutdown) thus, its
|
||||
@ -122,65 +122,65 @@ void zmq::own_t::process_own (own_t *object_)
|
||||
{
|
||||
// If the object is already being shut down, new owned objects are
|
||||
// immediately asked to terminate. Note that linger is set to zero.
|
||||
if (terminating) {
|
||||
if (_terminating) {
|
||||
register_term_acks (1);
|
||||
send_term (object_, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
// Store the reference to the owned object.
|
||||
owned.insert (object_);
|
||||
_owned.insert (object_);
|
||||
}
|
||||
|
||||
void zmq::own_t::terminate ()
|
||||
{
|
||||
// If termination is already underway, there's no point
|
||||
// in starting it anew.
|
||||
if (terminating)
|
||||
if (_terminating)
|
||||
return;
|
||||
|
||||
// As for the root of the ownership tree, there's no one to terminate it,
|
||||
// so it has to terminate itself.
|
||||
if (!owner) {
|
||||
if (!_owner) {
|
||||
process_term (options.linger.load ());
|
||||
return;
|
||||
}
|
||||
|
||||
// If I am an owned object, I'll ask my owner to terminate me.
|
||||
send_term_req (owner, this);
|
||||
send_term_req (_owner, this);
|
||||
}
|
||||
|
||||
bool zmq::own_t::is_terminating ()
|
||||
{
|
||||
return terminating;
|
||||
return _terminating;
|
||||
}
|
||||
|
||||
void zmq::own_t::process_term (int linger_)
|
||||
{
|
||||
// Double termination should never happen.
|
||||
zmq_assert (!terminating);
|
||||
zmq_assert (!_terminating);
|
||||
|
||||
// Send termination request to all owned objects.
|
||||
for (owned_t::iterator it = owned.begin (); it != owned.end (); ++it)
|
||||
for (owned_t::iterator it = _owned.begin (); it != _owned.end (); ++it)
|
||||
send_term (*it, linger_);
|
||||
register_term_acks (static_cast<int> (owned.size ()));
|
||||
owned.clear ();
|
||||
register_term_acks (static_cast<int> (_owned.size ()));
|
||||
_owned.clear ();
|
||||
|
||||
// Start termination process and check whether by chance we cannot
|
||||
// terminate immediately.
|
||||
terminating = true;
|
||||
_terminating = true;
|
||||
check_term_acks ();
|
||||
}
|
||||
|
||||
void zmq::own_t::register_term_acks (int count_)
|
||||
{
|
||||
term_acks += count_;
|
||||
_term_acks += count_;
|
||||
}
|
||||
|
||||
void zmq::own_t::unregister_term_ack ()
|
||||
{
|
||||
zmq_assert (term_acks > 0);
|
||||
term_acks--;
|
||||
zmq_assert (_term_acks > 0);
|
||||
_term_acks--;
|
||||
|
||||
// This may be a last ack we are waiting for before termination...
|
||||
check_term_acks ();
|
||||
@ -193,15 +193,15 @@ void zmq::own_t::process_term_ack ()
|
||||
|
||||
void zmq::own_t::check_term_acks ()
|
||||
{
|
||||
if (terminating && processed_seqnum == sent_seqnum.get ()
|
||||
&& term_acks == 0) {
|
||||
if (_terminating && _processed_seqnum == _sent_seqnum.get ()
|
||||
&& _term_acks == 0) {
|
||||
// Sanity check. There should be no active children at this point.
|
||||
zmq_assert (owned.empty ());
|
||||
zmq_assert (_owned.empty ());
|
||||
|
||||
// The root object has nobody to confirm the termination to.
|
||||
// Other nodes will confirm the termination to the owner.
|
||||
if (owner)
|
||||
send_term_ack (owner);
|
||||
if (_owner)
|
||||
send_term_ack (_owner);
|
||||
|
||||
// Deallocate the resources.
|
||||
process_destroy ();
|
||||
|
12
src/own.hpp
12
src/own.hpp
@ -120,25 +120,25 @@ class own_t : public object_t
|
||||
|
||||
// True if termination was already initiated. If so, we can destroy
|
||||
// the object if there are no more child objects or pending term acks.
|
||||
bool terminating;
|
||||
bool _terminating;
|
||||
|
||||
// Sequence number of the last command sent to this object.
|
||||
atomic_counter_t sent_seqnum;
|
||||
atomic_counter_t _sent_seqnum;
|
||||
|
||||
// Sequence number of the last command processed by this object.
|
||||
uint64_t processed_seqnum;
|
||||
uint64_t _processed_seqnum;
|
||||
|
||||
// Socket owning this object. It's responsible for shutting down
|
||||
// this object.
|
||||
own_t *owner;
|
||||
own_t *_owner;
|
||||
|
||||
// List of all objects owned by this socket. We are responsible
|
||||
// for deallocating them before we quit.
|
||||
typedef std::set<own_t *> owned_t;
|
||||
owned_t owned;
|
||||
owned_t _owned;
|
||||
|
||||
// Number of events we have to get before we can destroy the object.
|
||||
int term_acks;
|
||||
int _term_acks;
|
||||
|
||||
own_t (const own_t &);
|
||||
const own_t &operator= (const own_t &);
|
||||
|
38
src/pair.cpp
38
src/pair.cpp
@ -36,15 +36,15 @@
|
||||
|
||||
zmq::pair_t::pair_t (class ctx_t *parent_, uint32_t tid_, int sid_) :
|
||||
socket_base_t (parent_, tid_, sid_),
|
||||
pipe (NULL),
|
||||
last_in (NULL)
|
||||
_pipe (NULL),
|
||||
_last_in (NULL)
|
||||
{
|
||||
options.type = ZMQ_PAIR;
|
||||
}
|
||||
|
||||
zmq::pair_t::~pair_t ()
|
||||
{
|
||||
zmq_assert (!pipe);
|
||||
zmq_assert (!_pipe);
|
||||
}
|
||||
|
||||
void zmq::pair_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_)
|
||||
@ -55,20 +55,20 @@ void zmq::pair_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_)
|
||||
|
||||
// ZMQ_PAIR socket can only be connected to a single peer.
|
||||
// The socket rejects any further connection requests.
|
||||
if (pipe == NULL)
|
||||
pipe = pipe_;
|
||||
if (_pipe == NULL)
|
||||
_pipe = pipe_;
|
||||
else
|
||||
pipe_->terminate (false);
|
||||
}
|
||||
|
||||
void zmq::pair_t::xpipe_terminated (pipe_t *pipe_)
|
||||
{
|
||||
if (pipe_ == pipe) {
|
||||
if (last_in == pipe) {
|
||||
saved_credential.set_deep_copy (last_in->get_credential ());
|
||||
last_in = NULL;
|
||||
if (pipe_ == _pipe) {
|
||||
if (_last_in == _pipe) {
|
||||
_saved_credential.set_deep_copy (_last_in->get_credential ());
|
||||
_last_in = NULL;
|
||||
}
|
||||
pipe = NULL;
|
||||
_pipe = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@ -86,13 +86,13 @@ void zmq::pair_t::xwrite_activated (pipe_t *)
|
||||
|
||||
int zmq::pair_t::xsend (msg_t *msg_)
|
||||
{
|
||||
if (!pipe || !pipe->write (msg_)) {
|
||||
if (!_pipe || !_pipe->write (msg_)) {
|
||||
errno = EAGAIN;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!(msg_->flags () & msg_t::more))
|
||||
pipe->flush ();
|
||||
_pipe->flush ();
|
||||
|
||||
// Detach the original message from the data buffer.
|
||||
int rc = msg_->init ();
|
||||
@ -107,7 +107,7 @@ int zmq::pair_t::xrecv (msg_t *msg_)
|
||||
int rc = msg_->close ();
|
||||
errno_assert (rc == 0);
|
||||
|
||||
if (!pipe || !pipe->read (msg_)) {
|
||||
if (!_pipe || !_pipe->read (msg_)) {
|
||||
// Initialise the output parameter to be a 0-byte message.
|
||||
rc = msg_->init ();
|
||||
errno_assert (rc == 0);
|
||||
@ -115,27 +115,27 @@ int zmq::pair_t::xrecv (msg_t *msg_)
|
||||
errno = EAGAIN;
|
||||
return -1;
|
||||
}
|
||||
last_in = pipe;
|
||||
_last_in = _pipe;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool zmq::pair_t::xhas_in ()
|
||||
{
|
||||
if (!pipe)
|
||||
if (!_pipe)
|
||||
return false;
|
||||
|
||||
return pipe->check_read ();
|
||||
return _pipe->check_read ();
|
||||
}
|
||||
|
||||
bool zmq::pair_t::xhas_out ()
|
||||
{
|
||||
if (!pipe)
|
||||
if (!_pipe)
|
||||
return false;
|
||||
|
||||
return pipe->check_write ();
|
||||
return _pipe->check_write ();
|
||||
}
|
||||
|
||||
const zmq::blob_t &zmq::pair_t::get_credential () const
|
||||
{
|
||||
return last_in ? last_in->get_credential () : saved_credential;
|
||||
return _last_in ? _last_in->get_credential () : _saved_credential;
|
||||
}
|
||||
|
@ -59,11 +59,11 @@ class pair_t : public socket_base_t
|
||||
void xpipe_terminated (zmq::pipe_t *pipe_);
|
||||
|
||||
private:
|
||||
zmq::pipe_t *pipe;
|
||||
zmq::pipe_t *_pipe;
|
||||
|
||||
zmq::pipe_t *last_in;
|
||||
zmq::pipe_t *_last_in;
|
||||
|
||||
blob_t saved_credential;
|
||||
blob_t _saved_credential;
|
||||
|
||||
pair_t (const pair_t &);
|
||||
const pair_t &operator= (const pair_t &);
|
||||
|
265
src/pipe.cpp
265
src/pipe.cpp
@ -83,23 +83,23 @@ zmq::pipe_t::pipe_t (object_t *parent_,
|
||||
int outhwm_,
|
||||
bool conflate_) :
|
||||
object_t (parent_),
|
||||
inpipe (inpipe_),
|
||||
outpipe (outpipe_),
|
||||
in_active (true),
|
||||
out_active (true),
|
||||
hwm (outhwm_),
|
||||
lwm (compute_lwm (inhwm_)),
|
||||
inhwmboost (-1),
|
||||
outhwmboost (-1),
|
||||
msgs_read (0),
|
||||
msgs_written (0),
|
||||
peers_msgs_read (0),
|
||||
peer (NULL),
|
||||
sink (NULL),
|
||||
state (active),
|
||||
delay (true),
|
||||
server_socket_routing_id (0),
|
||||
conflate (conflate_)
|
||||
_in_pipe (inpipe_),
|
||||
_out_pipe (outpipe_),
|
||||
_in_active (true),
|
||||
_out_active (true),
|
||||
_hwm (outhwm_),
|
||||
_lwm (compute_lwm (inhwm_)),
|
||||
_in_hwm_boost (-1),
|
||||
_out_hwm_boost (-1),
|
||||
_msgs_read (0),
|
||||
_msgs_written (0),
|
||||
_peers_msgs_read (0),
|
||||
_peer (NULL),
|
||||
_sink (NULL),
|
||||
_state (active),
|
||||
_delay (true),
|
||||
_server_socket_routing_id (0),
|
||||
_conflate (conflate_)
|
||||
{
|
||||
}
|
||||
|
||||
@ -110,62 +110,62 @@ zmq::pipe_t::~pipe_t ()
|
||||
void zmq::pipe_t::set_peer (pipe_t *peer_)
|
||||
{
|
||||
// Peer can be set once only.
|
||||
zmq_assert (!peer);
|
||||
peer = peer_;
|
||||
zmq_assert (!_peer);
|
||||
_peer = peer_;
|
||||
}
|
||||
|
||||
void zmq::pipe_t::set_event_sink (i_pipe_events *sink_)
|
||||
{
|
||||
// Sink can be set once only.
|
||||
zmq_assert (!sink);
|
||||
sink = sink_;
|
||||
zmq_assert (!_sink);
|
||||
_sink = sink_;
|
||||
}
|
||||
|
||||
void zmq::pipe_t::set_server_socket_routing_id (
|
||||
uint32_t server_socket_routing_id_)
|
||||
{
|
||||
server_socket_routing_id = server_socket_routing_id_;
|
||||
_server_socket_routing_id = server_socket_routing_id_;
|
||||
}
|
||||
|
||||
uint32_t zmq::pipe_t::get_server_socket_routing_id ()
|
||||
{
|
||||
return server_socket_routing_id;
|
||||
return _server_socket_routing_id;
|
||||
}
|
||||
|
||||
void zmq::pipe_t::set_router_socket_routing_id (
|
||||
const blob_t &router_socket_routing_id_)
|
||||
{
|
||||
router_socket_routing_id.set_deep_copy (router_socket_routing_id_);
|
||||
_router_socket_routing_id.set_deep_copy (router_socket_routing_id_);
|
||||
}
|
||||
|
||||
const zmq::blob_t &zmq::pipe_t::get_routing_id ()
|
||||
{
|
||||
return router_socket_routing_id;
|
||||
return _router_socket_routing_id;
|
||||
}
|
||||
|
||||
const zmq::blob_t &zmq::pipe_t::get_credential () const
|
||||
{
|
||||
return credential;
|
||||
return _credential;
|
||||
}
|
||||
|
||||
bool zmq::pipe_t::check_read ()
|
||||
{
|
||||
if (unlikely (!in_active))
|
||||
if (unlikely (!_in_active))
|
||||
return false;
|
||||
if (unlikely (state != active && state != waiting_for_delimiter))
|
||||
if (unlikely (_state != active && _state != waiting_for_delimiter))
|
||||
return false;
|
||||
|
||||
// Check if there's an item in the pipe.
|
||||
if (!inpipe->check_read ()) {
|
||||
in_active = false;
|
||||
if (!_in_pipe->check_read ()) {
|
||||
_in_active = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
// If the next item in the pipe is message delimiter,
|
||||
// initiate termination process.
|
||||
if (inpipe->probe (is_delimiter)) {
|
||||
if (_in_pipe->probe (is_delimiter)) {
|
||||
msg_t msg;
|
||||
bool ok = inpipe->read (&msg);
|
||||
bool ok = _in_pipe->read (&msg);
|
||||
zmq_assert (ok);
|
||||
process_delimiter ();
|
||||
return false;
|
||||
@ -176,14 +176,14 @@ bool zmq::pipe_t::check_read ()
|
||||
|
||||
bool zmq::pipe_t::read (msg_t *msg_)
|
||||
{
|
||||
if (unlikely (!in_active))
|
||||
if (unlikely (!_in_active))
|
||||
return false;
|
||||
if (unlikely (state != active && state != waiting_for_delimiter))
|
||||
if (unlikely (_state != active && _state != waiting_for_delimiter))
|
||||
return false;
|
||||
|
||||
for (bool payload_read = false; !payload_read;) {
|
||||
if (!inpipe->read (msg_)) {
|
||||
in_active = false;
|
||||
if (!_in_pipe->read (msg_)) {
|
||||
_in_active = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -191,7 +191,7 @@ bool zmq::pipe_t::read (msg_t *msg_)
|
||||
if (unlikely (msg_->is_credential ())) {
|
||||
const unsigned char *data =
|
||||
static_cast<const unsigned char *> (msg_->data ());
|
||||
credential.set (data, msg_->size ());
|
||||
_credential.set (data, msg_->size ());
|
||||
const int rc = msg_->close ();
|
||||
zmq_assert (rc == 0);
|
||||
} else
|
||||
@ -205,23 +205,23 @@ bool zmq::pipe_t::read (msg_t *msg_)
|
||||
}
|
||||
|
||||
if (!(msg_->flags () & msg_t::more) && !msg_->is_routing_id ())
|
||||
msgs_read++;
|
||||
_msgs_read++;
|
||||
|
||||
if (lwm > 0 && msgs_read % lwm == 0)
|
||||
send_activate_write (peer, msgs_read);
|
||||
if (_lwm > 0 && _msgs_read % _lwm == 0)
|
||||
send_activate_write (_peer, _msgs_read);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool zmq::pipe_t::check_write ()
|
||||
{
|
||||
if (unlikely (!out_active || state != active))
|
||||
if (unlikely (!_out_active || _state != active))
|
||||
return false;
|
||||
|
||||
bool full = !check_hwm ();
|
||||
|
||||
if (unlikely (full)) {
|
||||
out_active = false;
|
||||
_out_active = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -235,9 +235,9 @@ bool zmq::pipe_t::write (msg_t *msg_)
|
||||
|
||||
bool more = (msg_->flags () & msg_t::more) != 0;
|
||||
const bool is_routing_id = msg_->is_routing_id ();
|
||||
outpipe->write (*msg_, more);
|
||||
_out_pipe->write (*msg_, more);
|
||||
if (!more && !is_routing_id)
|
||||
msgs_written++;
|
||||
_msgs_written++;
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -246,8 +246,8 @@ void zmq::pipe_t::rollback ()
|
||||
{
|
||||
// Remove incomplete message from the outbound pipe.
|
||||
msg_t msg;
|
||||
if (outpipe) {
|
||||
while (outpipe->unwrite (&msg)) {
|
||||
if (_out_pipe) {
|
||||
while (_out_pipe->unwrite (&msg)) {
|
||||
zmq_assert (msg.flags () & msg_t::more);
|
||||
int rc = msg.close ();
|
||||
errno_assert (rc == 0);
|
||||
@ -258,29 +258,29 @@ void zmq::pipe_t::rollback ()
|
||||
void zmq::pipe_t::flush ()
|
||||
{
|
||||
// The peer does not exist anymore at this point.
|
||||
if (state == term_ack_sent)
|
||||
if (_state == term_ack_sent)
|
||||
return;
|
||||
|
||||
if (outpipe && !outpipe->flush ())
|
||||
send_activate_read (peer);
|
||||
if (_out_pipe && !_out_pipe->flush ())
|
||||
send_activate_read (_peer);
|
||||
}
|
||||
|
||||
void zmq::pipe_t::process_activate_read ()
|
||||
{
|
||||
if (!in_active && (state == active || state == waiting_for_delimiter)) {
|
||||
in_active = true;
|
||||
sink->read_activated (this);
|
||||
if (!_in_active && (_state == active || _state == waiting_for_delimiter)) {
|
||||
_in_active = true;
|
||||
_sink->read_activated (this);
|
||||
}
|
||||
}
|
||||
|
||||
void zmq::pipe_t::process_activate_write (uint64_t msgs_read_)
|
||||
{
|
||||
// Remember the peer's message sequence number.
|
||||
peers_msgs_read = msgs_read_;
|
||||
_peers_msgs_read = msgs_read_;
|
||||
|
||||
if (!out_active && state == active) {
|
||||
out_active = true;
|
||||
sink->write_activated (this);
|
||||
if (!_out_active && _state == active) {
|
||||
_out_active = true;
|
||||
_sink->write_activated (this);
|
||||
}
|
||||
}
|
||||
|
||||
@ -288,80 +288,80 @@ void zmq::pipe_t::process_hiccup (void *pipe_)
|
||||
{
|
||||
// Destroy old outpipe. Note that the read end of the pipe was already
|
||||
// migrated to this thread.
|
||||
zmq_assert (outpipe);
|
||||
outpipe->flush ();
|
||||
zmq_assert (_out_pipe);
|
||||
_out_pipe->flush ();
|
||||
msg_t msg;
|
||||
while (outpipe->read (&msg)) {
|
||||
while (_out_pipe->read (&msg)) {
|
||||
if (!(msg.flags () & msg_t::more))
|
||||
msgs_written--;
|
||||
_msgs_written--;
|
||||
int rc = msg.close ();
|
||||
errno_assert (rc == 0);
|
||||
}
|
||||
LIBZMQ_DELETE (outpipe);
|
||||
LIBZMQ_DELETE (_out_pipe);
|
||||
|
||||
// Plug in the new outpipe.
|
||||
zmq_assert (pipe_);
|
||||
outpipe = static_cast<upipe_t *> (pipe_);
|
||||
out_active = true;
|
||||
_out_pipe = static_cast<upipe_t *> (pipe_);
|
||||
_out_active = true;
|
||||
|
||||
// If appropriate, notify the user about the hiccup.
|
||||
if (state == active)
|
||||
sink->hiccuped (this);
|
||||
if (_state == active)
|
||||
_sink->hiccuped (this);
|
||||
}
|
||||
|
||||
void zmq::pipe_t::process_pipe_term ()
|
||||
{
|
||||
zmq_assert (state == active || state == delimiter_received
|
||||
|| state == term_req_sent1);
|
||||
zmq_assert (_state == active || _state == delimiter_received
|
||||
|| _state == term_req_sent1);
|
||||
|
||||
// This is the simple case of peer-induced termination. If there are no
|
||||
// more pending messages to read, or if the pipe was configured to drop
|
||||
// pending messages, we can move directly to the term_ack_sent state.
|
||||
// Otherwise we'll hang up in waiting_for_delimiter state till all
|
||||
// pending messages are read.
|
||||
if (state == active) {
|
||||
if (delay)
|
||||
state = waiting_for_delimiter;
|
||||
if (_state == active) {
|
||||
if (_delay)
|
||||
_state = waiting_for_delimiter;
|
||||
else {
|
||||
state = term_ack_sent;
|
||||
outpipe = NULL;
|
||||
send_pipe_term_ack (peer);
|
||||
_state = term_ack_sent;
|
||||
_out_pipe = NULL;
|
||||
send_pipe_term_ack (_peer);
|
||||
}
|
||||
}
|
||||
|
||||
// Delimiter happened to arrive before the term command. Now we have the
|
||||
// term command as well, so we can move straight to term_ack_sent state.
|
||||
else if (state == delimiter_received) {
|
||||
state = term_ack_sent;
|
||||
outpipe = NULL;
|
||||
send_pipe_term_ack (peer);
|
||||
else if (_state == delimiter_received) {
|
||||
_state = term_ack_sent;
|
||||
_out_pipe = NULL;
|
||||
send_pipe_term_ack (_peer);
|
||||
}
|
||||
|
||||
// This is the case where both ends of the pipe are closed in parallel.
|
||||
// We simply reply to the request by ack and continue waiting for our
|
||||
// own ack.
|
||||
else if (state == term_req_sent1) {
|
||||
state = term_req_sent2;
|
||||
outpipe = NULL;
|
||||
send_pipe_term_ack (peer);
|
||||
else if (_state == term_req_sent1) {
|
||||
_state = term_req_sent2;
|
||||
_out_pipe = NULL;
|
||||
send_pipe_term_ack (_peer);
|
||||
}
|
||||
}
|
||||
|
||||
void zmq::pipe_t::process_pipe_term_ack ()
|
||||
{
|
||||
// Notify the user that all the references to the pipe should be dropped.
|
||||
zmq_assert (sink);
|
||||
sink->pipe_terminated (this);
|
||||
zmq_assert (_sink);
|
||||
_sink->pipe_terminated (this);
|
||||
|
||||
// In term_ack_sent and term_req_sent2 states there's nothing to do.
|
||||
// Simply deallocate the pipe. In term_req_sent1 state we have to ack
|
||||
// the peer before deallocating this side of the pipe.
|
||||
// All the other states are invalid.
|
||||
if (state == term_req_sent1) {
|
||||
outpipe = NULL;
|
||||
send_pipe_term_ack (peer);
|
||||
if (_state == term_req_sent1) {
|
||||
_out_pipe = NULL;
|
||||
send_pipe_term_ack (_peer);
|
||||
} else
|
||||
zmq_assert (state == term_ack_sent || state == term_req_sent2);
|
||||
zmq_assert (_state == term_ack_sent || _state == term_req_sent2);
|
||||
|
||||
// We'll deallocate the inbound pipe, the peer will deallocate the outbound
|
||||
// pipe (which is an inbound pipe from its point of view).
|
||||
@ -369,15 +369,15 @@ void zmq::pipe_t::process_pipe_term_ack ()
|
||||
// hand because msg_t doesn't have automatic destructor. Then deallocate
|
||||
// the ypipe itself.
|
||||
|
||||
if (!conflate) {
|
||||
if (!_conflate) {
|
||||
msg_t msg;
|
||||
while (inpipe->read (&msg)) {
|
||||
while (_in_pipe->read (&msg)) {
|
||||
int rc = msg.close ();
|
||||
errno_assert (rc == 0);
|
||||
}
|
||||
}
|
||||
|
||||
LIBZMQ_DELETE (inpipe);
|
||||
LIBZMQ_DELETE (_in_pipe);
|
||||
|
||||
// Deallocate the pipe object
|
||||
delete this;
|
||||
@ -390,47 +390,47 @@ void zmq::pipe_t::process_pipe_hwm (int inhwm_, int outhwm_)
|
||||
|
||||
void zmq::pipe_t::set_nodelay ()
|
||||
{
|
||||
this->delay = false;
|
||||
this->_delay = false;
|
||||
}
|
||||
|
||||
void zmq::pipe_t::terminate (bool delay_)
|
||||
{
|
||||
// Overload the value specified at pipe creation.
|
||||
delay = delay_;
|
||||
_delay = delay_;
|
||||
|
||||
// If terminate was already called, we can ignore the duplicate invocation.
|
||||
if (state == term_req_sent1 || state == term_req_sent2) {
|
||||
if (_state == term_req_sent1 || _state == term_req_sent2) {
|
||||
return;
|
||||
}
|
||||
// If the pipe is in the final phase of async termination, it's going to
|
||||
// closed anyway. No need to do anything special here.
|
||||
if (state == term_ack_sent) {
|
||||
if (_state == term_ack_sent) {
|
||||
return;
|
||||
}
|
||||
// The simple sync termination case. Ask the peer to terminate and wait
|
||||
// for the ack.
|
||||
else if (state == active) {
|
||||
send_pipe_term (peer);
|
||||
state = term_req_sent1;
|
||||
else if (_state == active) {
|
||||
send_pipe_term (_peer);
|
||||
_state = term_req_sent1;
|
||||
}
|
||||
// There are still pending messages available, but the user calls
|
||||
// 'terminate'. We can act as if all the pending messages were read.
|
||||
else if (state == waiting_for_delimiter && !delay) {
|
||||
else if (_state == waiting_for_delimiter && !_delay) {
|
||||
// Drop any unfinished outbound messages.
|
||||
rollback ();
|
||||
outpipe = NULL;
|
||||
send_pipe_term_ack (peer);
|
||||
state = term_ack_sent;
|
||||
_out_pipe = NULL;
|
||||
send_pipe_term_ack (_peer);
|
||||
_state = term_ack_sent;
|
||||
}
|
||||
// If there are pending messages still available, do nothing.
|
||||
else if (state == waiting_for_delimiter) {
|
||||
else if (_state == waiting_for_delimiter) {
|
||||
}
|
||||
// We've already got delimiter, but not term command yet. We can ignore
|
||||
// the delimiter and ack synchronously terminate as if we were in
|
||||
// active state.
|
||||
else if (state == delimiter_received) {
|
||||
send_pipe_term (peer);
|
||||
state = term_req_sent1;
|
||||
else if (_state == delimiter_received) {
|
||||
send_pipe_term (_peer);
|
||||
_state = term_req_sent1;
|
||||
}
|
||||
// There are no other states.
|
||||
else {
|
||||
@ -438,9 +438,9 @@ void zmq::pipe_t::terminate (bool delay_)
|
||||
}
|
||||
|
||||
// Stop outbound flow of messages.
|
||||
out_active = false;
|
||||
_out_active = false;
|
||||
|
||||
if (outpipe) {
|
||||
if (_out_pipe) {
|
||||
// Drop any unfinished outbound messages.
|
||||
rollback ();
|
||||
|
||||
@ -448,7 +448,7 @@ void zmq::pipe_t::terminate (bool delay_)
|
||||
// checked; thus the delimiter can be written even when the pipe is full.
|
||||
msg_t msg;
|
||||
msg.init_delimiter ();
|
||||
outpipe->write (msg, false);
|
||||
_out_pipe->write (msg, false);
|
||||
flush ();
|
||||
}
|
||||
}
|
||||
@ -483,69 +483,70 @@ int zmq::pipe_t::compute_lwm (int hwm_)
|
||||
|
||||
void zmq::pipe_t::process_delimiter ()
|
||||
{
|
||||
zmq_assert (state == active || state == waiting_for_delimiter);
|
||||
zmq_assert (_state == active || _state == waiting_for_delimiter);
|
||||
|
||||
if (state == active)
|
||||
state = delimiter_received;
|
||||
if (_state == active)
|
||||
_state = delimiter_received;
|
||||
else {
|
||||
outpipe = NULL;
|
||||
send_pipe_term_ack (peer);
|
||||
state = term_ack_sent;
|
||||
_out_pipe = NULL;
|
||||
send_pipe_term_ack (_peer);
|
||||
_state = term_ack_sent;
|
||||
}
|
||||
}
|
||||
|
||||
void zmq::pipe_t::hiccup ()
|
||||
{
|
||||
// If termination is already under way do nothing.
|
||||
if (state != active)
|
||||
if (_state != active)
|
||||
return;
|
||||
|
||||
// We'll drop the pointer to the inpipe. From now on, the peer is
|
||||
// responsible for deallocating it.
|
||||
inpipe = NULL;
|
||||
_in_pipe = NULL;
|
||||
|
||||
// Create new inpipe.
|
||||
if (conflate)
|
||||
inpipe = new (std::nothrow) ypipe_conflate_t<msg_t> ();
|
||||
if (_conflate)
|
||||
_in_pipe = new (std::nothrow) ypipe_conflate_t<msg_t> ();
|
||||
else
|
||||
inpipe = new (std::nothrow) ypipe_t<msg_t, message_pipe_granularity> ();
|
||||
_in_pipe =
|
||||
new (std::nothrow) ypipe_t<msg_t, message_pipe_granularity> ();
|
||||
|
||||
alloc_assert (inpipe);
|
||||
in_active = true;
|
||||
alloc_assert (_in_pipe);
|
||||
_in_active = true;
|
||||
|
||||
// Notify the peer about the hiccup.
|
||||
send_hiccup (peer, (void *) inpipe);
|
||||
send_hiccup (_peer, (void *) _in_pipe);
|
||||
}
|
||||
|
||||
void zmq::pipe_t::set_hwms (int inhwm_, int outhwm_)
|
||||
{
|
||||
int in = inhwm_ + (inhwmboost > 0 ? inhwmboost : 0);
|
||||
int out = outhwm_ + (outhwmboost > 0 ? outhwmboost : 0);
|
||||
int in = inhwm_ + (_in_hwm_boost > 0 ? _in_hwm_boost : 0);
|
||||
int out = outhwm_ + (_out_hwm_boost > 0 ? _out_hwm_boost : 0);
|
||||
|
||||
// if either send or recv side has hwm <= 0 it means infinite so we should set hwms infinite
|
||||
if (inhwm_ <= 0 || inhwmboost == 0)
|
||||
if (inhwm_ <= 0 || _in_hwm_boost == 0)
|
||||
in = 0;
|
||||
|
||||
if (outhwm_ <= 0 || outhwmboost == 0)
|
||||
if (outhwm_ <= 0 || _out_hwm_boost == 0)
|
||||
out = 0;
|
||||
|
||||
lwm = compute_lwm (in);
|
||||
hwm = out;
|
||||
_lwm = compute_lwm (in);
|
||||
_hwm = out;
|
||||
}
|
||||
|
||||
void zmq::pipe_t::set_hwms_boost (int inhwmboost_, int outhwmboost_)
|
||||
{
|
||||
inhwmboost = inhwmboost_;
|
||||
outhwmboost = outhwmboost_;
|
||||
_in_hwm_boost = inhwmboost_;
|
||||
_out_hwm_boost = outhwmboost_;
|
||||
}
|
||||
|
||||
bool zmq::pipe_t::check_hwm () const
|
||||
{
|
||||
bool full = hwm > 0 && msgs_written - peers_msgs_read >= uint64_t (hwm);
|
||||
bool full = _hwm > 0 && _msgs_written - _peers_msgs_read >= uint64_t (_hwm);
|
||||
return (!full);
|
||||
}
|
||||
|
||||
void zmq::pipe_t::send_hwms_to_peer (int inhwm_, int outhwm_)
|
||||
{
|
||||
send_pipe_hwm (peer, inhwm_, outhwm_);
|
||||
send_pipe_hwm (_peer, inhwm_, outhwm_);
|
||||
}
|
||||
|
38
src/pipe.hpp
38
src/pipe.hpp
@ -174,36 +174,36 @@ class pipe_t : public object_t,
|
||||
~pipe_t ();
|
||||
|
||||
// Underlying pipes for both directions.
|
||||
upipe_t *inpipe;
|
||||
upipe_t *outpipe;
|
||||
upipe_t *_in_pipe;
|
||||
upipe_t *_out_pipe;
|
||||
|
||||
// Can the pipe be read from / written to?
|
||||
bool in_active;
|
||||
bool out_active;
|
||||
bool _in_active;
|
||||
bool _out_active;
|
||||
|
||||
// High watermark for the outbound pipe.
|
||||
int hwm;
|
||||
int _hwm;
|
||||
|
||||
// Low watermark for the inbound pipe.
|
||||
int lwm;
|
||||
int _lwm;
|
||||
|
||||
// boosts for high and low watermarks, used with inproc sockets so hwm are sum of send and recv hmws on each side of pipe
|
||||
int inhwmboost;
|
||||
int outhwmboost;
|
||||
int _in_hwm_boost;
|
||||
int _out_hwm_boost;
|
||||
|
||||
// Number of messages read and written so far.
|
||||
uint64_t msgs_read;
|
||||
uint64_t msgs_written;
|
||||
uint64_t _msgs_read;
|
||||
uint64_t _msgs_written;
|
||||
|
||||
// Last received peer's msgs_read. The actual number in the peer
|
||||
// can be higher at the moment.
|
||||
uint64_t peers_msgs_read;
|
||||
uint64_t _peers_msgs_read;
|
||||
|
||||
// The pipe object on the other side of the pipepair.
|
||||
pipe_t *peer;
|
||||
pipe_t *_peer;
|
||||
|
||||
// Sink to send events to.
|
||||
i_pipe_events *sink;
|
||||
i_pipe_events *_sink;
|
||||
|
||||
// States of the pipe endpoint:
|
||||
// active: common state before any termination begins,
|
||||
@ -224,21 +224,21 @@ class pipe_t : public object_t,
|
||||
term_ack_sent,
|
||||
term_req_sent1,
|
||||
term_req_sent2
|
||||
} state;
|
||||
} _state;
|
||||
|
||||
// If true, we receive all the pending inbound messages before
|
||||
// terminating. If false, we terminate immediately when the peer
|
||||
// asks us to.
|
||||
bool delay;
|
||||
bool _delay;
|
||||
|
||||
// Routing id of the writer. Used uniquely by the reader side.
|
||||
blob_t router_socket_routing_id;
|
||||
blob_t _router_socket_routing_id;
|
||||
|
||||
// Routing id of the writer. Used uniquely by the reader side.
|
||||
int server_socket_routing_id;
|
||||
int _server_socket_routing_id;
|
||||
|
||||
// Pipe's credential.
|
||||
blob_t credential;
|
||||
blob_t _credential;
|
||||
|
||||
// Returns true if the message is delimiter; false otherwise.
|
||||
static bool is_delimiter (const msg_t &msg_);
|
||||
@ -246,7 +246,7 @@ class pipe_t : public object_t,
|
||||
// Computes appropriate low watermark from the given high watermark.
|
||||
static int compute_lwm (int hwm_);
|
||||
|
||||
const bool conflate;
|
||||
const bool _conflate;
|
||||
|
||||
// Disable copying.
|
||||
pipe_t (const pipe_t &);
|
||||
|
@ -40,7 +40,7 @@
|
||||
zmq::plain_client_t::plain_client_t (session_base_t *const session_,
|
||||
const options_t &options_) :
|
||||
mechanism_base_t (session_, options_),
|
||||
state (sending_hello)
|
||||
_state (sending_hello)
|
||||
{
|
||||
}
|
||||
|
||||
@ -52,16 +52,16 @@ int zmq::plain_client_t::next_handshake_command (msg_t *msg_)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
switch (state) {
|
||||
switch (_state) {
|
||||
case sending_hello:
|
||||
rc = produce_hello (msg_);
|
||||
if (rc == 0)
|
||||
state = waiting_for_welcome;
|
||||
_state = waiting_for_welcome;
|
||||
break;
|
||||
case sending_initiate:
|
||||
rc = produce_initiate (msg_);
|
||||
if (rc == 0)
|
||||
state = waiting_for_ready;
|
||||
_state = waiting_for_ready;
|
||||
break;
|
||||
default:
|
||||
errno = EAGAIN;
|
||||
@ -102,9 +102,9 @@ int zmq::plain_client_t::process_handshake_command (msg_t *msg_)
|
||||
|
||||
zmq::mechanism_t::status_t zmq::plain_client_t::status () const
|
||||
{
|
||||
if (state == ready)
|
||||
if (_state == ready)
|
||||
return mechanism_t::ready;
|
||||
if (state == error_command_received)
|
||||
if (_state == error_command_received)
|
||||
return mechanism_t::error;
|
||||
else
|
||||
return mechanism_t::handshaking;
|
||||
@ -143,7 +143,7 @@ int zmq::plain_client_t::process_welcome (const unsigned char *cmd_data_,
|
||||
{
|
||||
LIBZMQ_UNUSED (cmd_data_);
|
||||
|
||||
if (state != waiting_for_welcome) {
|
||||
if (_state != waiting_for_welcome) {
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND);
|
||||
errno = EPROTO;
|
||||
@ -156,7 +156,7 @@ int zmq::plain_client_t::process_welcome (const unsigned char *cmd_data_,
|
||||
errno = EPROTO;
|
||||
return -1;
|
||||
}
|
||||
state = sending_initiate;
|
||||
_state = sending_initiate;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -170,7 +170,7 @@ int zmq::plain_client_t::produce_initiate (msg_t *msg_) const
|
||||
int zmq::plain_client_t::process_ready (const unsigned char *cmd_data_,
|
||||
size_t data_size_)
|
||||
{
|
||||
if (state != waiting_for_ready) {
|
||||
if (_state != waiting_for_ready) {
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND);
|
||||
errno = EPROTO;
|
||||
@ -178,7 +178,7 @@ int zmq::plain_client_t::process_ready (const unsigned char *cmd_data_,
|
||||
}
|
||||
const int rc = parse_metadata (cmd_data_ + 6, data_size_ - 6);
|
||||
if (rc == 0)
|
||||
state = ready;
|
||||
_state = ready;
|
||||
else
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_INVALID_METADATA);
|
||||
@ -189,7 +189,7 @@ int zmq::plain_client_t::process_ready (const unsigned char *cmd_data_,
|
||||
int zmq::plain_client_t::process_error (const unsigned char *cmd_data_,
|
||||
size_t data_size_)
|
||||
{
|
||||
if (state != waiting_for_welcome && state != waiting_for_ready) {
|
||||
if (_state != waiting_for_welcome && _state != waiting_for_ready) {
|
||||
session->get_socket ()->event_handshake_failed_protocol (
|
||||
session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND);
|
||||
errno = EPROTO;
|
||||
@ -212,6 +212,6 @@ int zmq::plain_client_t::process_error (const unsigned char *cmd_data_,
|
||||
}
|
||||
const char *error_reason = reinterpret_cast<const char *> (cmd_data_) + 7;
|
||||
handle_error_reason (error_reason, error_reason_len);
|
||||
state = error_command_received;
|
||||
_state = error_command_received;
|
||||
return 0;
|
||||
}
|
||||
|
@ -59,7 +59,7 @@ class plain_client_t : public mechanism_base_t
|
||||
ready
|
||||
};
|
||||
|
||||
state_t state;
|
||||
state_t _state;
|
||||
|
||||
int produce_hello (msg_t *msg_) const;
|
||||
int produce_initiate (msg_t *msg_) const;
|
||||
|
@ -44,30 +44,30 @@ zmq::poller_base_t::~poller_base_t ()
|
||||
|
||||
int zmq::poller_base_t::get_load () const
|
||||
{
|
||||
return load.get ();
|
||||
return _load.get ();
|
||||
}
|
||||
|
||||
void zmq::poller_base_t::adjust_load (int amount_)
|
||||
{
|
||||
if (amount_ > 0)
|
||||
load.add (amount_);
|
||||
_load.add (amount_);
|
||||
else if (amount_ < 0)
|
||||
load.sub (-amount_);
|
||||
_load.sub (-amount_);
|
||||
}
|
||||
|
||||
void zmq::poller_base_t::add_timer (int timeout_, i_poll_events *sink_, int id_)
|
||||
{
|
||||
uint64_t expiration = clock.now_ms () + timeout_;
|
||||
uint64_t expiration = _clock.now_ms () + timeout_;
|
||||
timer_info_t info = {sink_, id_};
|
||||
timers.insert (timers_t::value_type (expiration, info));
|
||||
_timers.insert (timers_t::value_type (expiration, info));
|
||||
}
|
||||
|
||||
void zmq::poller_base_t::cancel_timer (i_poll_events *sink_, int id_)
|
||||
{
|
||||
// Complexity of this operation is O(n). We assume it is rarely used.
|
||||
for (timers_t::iterator it = timers.begin (); it != timers.end (); ++it)
|
||||
for (timers_t::iterator it = _timers.begin (); it != _timers.end (); ++it)
|
||||
if (it->second.sink == sink_ && it->second.id == id_) {
|
||||
timers.erase (it);
|
||||
_timers.erase (it);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -78,15 +78,15 @@ void zmq::poller_base_t::cancel_timer (i_poll_events *sink_, int id_)
|
||||
uint64_t zmq::poller_base_t::execute_timers ()
|
||||
{
|
||||
// Fast track.
|
||||
if (timers.empty ())
|
||||
if (_timers.empty ())
|
||||
return 0;
|
||||
|
||||
// Get the current time.
|
||||
uint64_t current = clock.now_ms ();
|
||||
uint64_t current = _clock.now_ms ();
|
||||
|
||||
// Execute the timers that are already due.
|
||||
timers_t::iterator it = timers.begin ();
|
||||
while (it != timers.end ()) {
|
||||
timers_t::iterator it = _timers.begin ();
|
||||
while (it != _timers.end ()) {
|
||||
// If we have to wait to execute the item, same will be true about
|
||||
// all the following items (multimap is sorted). Thus we can stop
|
||||
// checking the subsequent timers and return the time to wait for
|
||||
@ -100,7 +100,7 @@ uint64_t zmq::poller_base_t::execute_timers ()
|
||||
// Remove it from the list of active timers.
|
||||
timers_t::iterator o = it;
|
||||
++it;
|
||||
timers.erase (o);
|
||||
_timers.erase (o);
|
||||
}
|
||||
|
||||
// There are no more timers.
|
||||
@ -108,25 +108,25 @@ uint64_t zmq::poller_base_t::execute_timers ()
|
||||
}
|
||||
|
||||
zmq::worker_poller_base_t::worker_poller_base_t (const thread_ctx_t &ctx_) :
|
||||
ctx (ctx_)
|
||||
_ctx (ctx_)
|
||||
{
|
||||
}
|
||||
|
||||
void zmq::worker_poller_base_t::stop_worker ()
|
||||
{
|
||||
worker.stop ();
|
||||
_worker.stop ();
|
||||
}
|
||||
|
||||
void zmq::worker_poller_base_t::start ()
|
||||
{
|
||||
zmq_assert (get_load () > 0);
|
||||
ctx.start_thread (worker, worker_routine, this);
|
||||
_ctx.start_thread (_worker, worker_routine, this);
|
||||
}
|
||||
|
||||
void zmq::worker_poller_base_t::check_thread ()
|
||||
{
|
||||
#ifdef _DEBUG
|
||||
zmq_assert (!worker.get_started () || worker.is_current_thread ());
|
||||
zmq_assert (!_worker.get_started () || _worker.is_current_thread ());
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -140,7 +140,7 @@ class poller_base_t
|
||||
|
||||
private:
|
||||
// Clock instance private to this I/O thread.
|
||||
clock_t clock;
|
||||
clock_t _clock;
|
||||
|
||||
// List of active timers.
|
||||
struct timer_info_t
|
||||
@ -149,11 +149,11 @@ class poller_base_t
|
||||
int id;
|
||||
};
|
||||
typedef std::multimap<uint64_t, timer_info_t> timers_t;
|
||||
timers_t timers;
|
||||
timers_t _timers;
|
||||
|
||||
// Load of the poller. Currently the number of file descriptors
|
||||
// registered.
|
||||
atomic_counter_t load;
|
||||
atomic_counter_t _load;
|
||||
|
||||
poller_base_t (const poller_base_t &);
|
||||
const poller_base_t &operator= (const poller_base_t &);
|
||||
@ -186,10 +186,10 @@ class worker_poller_base_t : public poller_base_t
|
||||
virtual void loop () = 0;
|
||||
|
||||
// Reference to ZMQ context.
|
||||
const thread_ctx_t &ctx;
|
||||
const thread_ctx_t &_ctx;
|
||||
|
||||
// Handle of the physical thread doing the I/O work.
|
||||
thread_t worker;
|
||||
thread_t _worker;
|
||||
};
|
||||
}
|
||||
|
||||
|
12
src/pull.cpp
12
src/pull.cpp
@ -49,30 +49,30 @@ void zmq::pull_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_)
|
||||
LIBZMQ_UNUSED (subscribe_to_all_);
|
||||
|
||||
zmq_assert (pipe_);
|
||||
fq.attach (pipe_);
|
||||
_fq.attach (pipe_);
|
||||
}
|
||||
|
||||
void zmq::pull_t::xread_activated (pipe_t *pipe_)
|
||||
{
|
||||
fq.activated (pipe_);
|
||||
_fq.activated (pipe_);
|
||||
}
|
||||
|
||||
void zmq::pull_t::xpipe_terminated (pipe_t *pipe_)
|
||||
{
|
||||
fq.pipe_terminated (pipe_);
|
||||
_fq.pipe_terminated (pipe_);
|
||||
}
|
||||
|
||||
int zmq::pull_t::xrecv (msg_t *msg_)
|
||||
{
|
||||
return fq.recv (msg_);
|
||||
return _fq.recv (msg_);
|
||||
}
|
||||
|
||||
bool zmq::pull_t::xhas_in ()
|
||||
{
|
||||
return fq.has_in ();
|
||||
return _fq.has_in ();
|
||||
}
|
||||
|
||||
const zmq::blob_t &zmq::pull_t::get_credential () const
|
||||
{
|
||||
return fq.get_credential ();
|
||||
return _fq.get_credential ();
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ class pull_t : public socket_base_t
|
||||
|
||||
private:
|
||||
// Fair queueing object for inbound pipes.
|
||||
fq_t fq;
|
||||
fq_t _fq;
|
||||
|
||||
pull_t (const pull_t &);
|
||||
const pull_t &operator= (const pull_t &);
|
||||
|
10
src/push.cpp
10
src/push.cpp
@ -53,25 +53,25 @@ void zmq::push_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_)
|
||||
pipe_->set_nodelay ();
|
||||
|
||||
zmq_assert (pipe_);
|
||||
lb.attach (pipe_);
|
||||
_lb.attach (pipe_);
|
||||
}
|
||||
|
||||
void zmq::push_t::xwrite_activated (pipe_t *pipe_)
|
||||
{
|
||||
lb.activated (pipe_);
|
||||
_lb.activated (pipe_);
|
||||
}
|
||||
|
||||
void zmq::push_t::xpipe_terminated (pipe_t *pipe_)
|
||||
{
|
||||
lb.pipe_terminated (pipe_);
|
||||
_lb.pipe_terminated (pipe_);
|
||||
}
|
||||
|
||||
int zmq::push_t::xsend (msg_t *msg_)
|
||||
{
|
||||
return lb.send (msg_);
|
||||
return _lb.send (msg_);
|
||||
}
|
||||
|
||||
bool zmq::push_t::xhas_out ()
|
||||
{
|
||||
return lb.has_out ();
|
||||
return _lb.has_out ();
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ class push_t : public socket_base_t
|
||||
|
||||
private:
|
||||
// Load balancer managing the outbound pipes.
|
||||
lb_t lb;
|
||||
lb_t _lb;
|
||||
|
||||
push_t (const push_t &);
|
||||
const push_t &operator= (const push_t &);
|
||||
|
@ -38,7 +38,7 @@
|
||||
|
||||
zmq::radio_t::radio_t (class ctx_t *parent_, uint32_t tid_, int sid_) :
|
||||
socket_base_t (parent_, tid_, sid_, true),
|
||||
lossy (true)
|
||||
_lossy (true)
|
||||
{
|
||||
options.type = ZMQ_RADIO;
|
||||
}
|
||||
@ -57,10 +57,10 @@ void zmq::radio_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_)
|
||||
// to receive the delimiter.
|
||||
pipe_->set_nodelay ();
|
||||
|
||||
dist.attach (pipe_);
|
||||
_dist.attach (pipe_);
|
||||
|
||||
if (subscribe_to_all_)
|
||||
udp_pipes.push_back (pipe_);
|
||||
_udp_pipes.push_back (pipe_);
|
||||
// The pipe is active when attached. Let's read the subscriptions from
|
||||
// it, if any.
|
||||
else
|
||||
@ -77,16 +77,16 @@ void zmq::radio_t::xread_activated (pipe_t *pipe_)
|
||||
std::string group = std::string (msg.group ());
|
||||
|
||||
if (msg.is_join ())
|
||||
subscriptions.ZMQ_MAP_INSERT_OR_EMPLACE (ZMQ_MOVE (group),
|
||||
pipe_);
|
||||
_subscriptions.ZMQ_MAP_INSERT_OR_EMPLACE (ZMQ_MOVE (group),
|
||||
pipe_);
|
||||
else {
|
||||
std::pair<subscriptions_t::iterator, subscriptions_t::iterator>
|
||||
range = subscriptions.equal_range (group);
|
||||
range = _subscriptions.equal_range (group);
|
||||
|
||||
for (subscriptions_t::iterator it = range.first;
|
||||
it != range.second; ++it) {
|
||||
if (it->second == pipe_) {
|
||||
subscriptions.erase (it);
|
||||
_subscriptions.erase (it);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -98,7 +98,7 @@ void zmq::radio_t::xread_activated (pipe_t *pipe_)
|
||||
|
||||
void zmq::radio_t::xwrite_activated (pipe_t *pipe_)
|
||||
{
|
||||
dist.activated (pipe_);
|
||||
_dist.activated (pipe_);
|
||||
}
|
||||
int zmq::radio_t::xsetsockopt (int option_,
|
||||
const void *optval_,
|
||||
@ -109,7 +109,7 @@ int zmq::radio_t::xsetsockopt (int option_,
|
||||
return -1;
|
||||
}
|
||||
if (option_ == ZMQ_XPUB_NODROP)
|
||||
lossy = (*static_cast<const int *> (optval_) == 0);
|
||||
_lossy = (*static_cast<const int *> (optval_) == 0);
|
||||
else {
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
@ -121,21 +121,21 @@ void zmq::radio_t::xpipe_terminated (pipe_t *pipe_)
|
||||
{
|
||||
// NOTE: erase invalidates an iterator, and that's why it's not incrementing in post-loop
|
||||
// read-after-free caught by Valgrind, see https://github.com/zeromq/libzmq/pull/1771
|
||||
for (subscriptions_t::iterator it = subscriptions.begin ();
|
||||
it != subscriptions.end ();) {
|
||||
for (subscriptions_t::iterator it = _subscriptions.begin ();
|
||||
it != _subscriptions.end ();) {
|
||||
if (it->second == pipe_) {
|
||||
subscriptions.erase (it++);
|
||||
_subscriptions.erase (it++);
|
||||
} else {
|
||||
++it;
|
||||
}
|
||||
}
|
||||
|
||||
udp_pipes_t::iterator it =
|
||||
std::find (udp_pipes.begin (), udp_pipes.end (), pipe_);
|
||||
if (it != udp_pipes.end ())
|
||||
udp_pipes.erase (it);
|
||||
std::find (_udp_pipes.begin (), _udp_pipes.end (), pipe_);
|
||||
if (it != _udp_pipes.end ())
|
||||
_udp_pipes.erase (it);
|
||||
|
||||
dist.pipe_terminated (pipe_);
|
||||
_dist.pipe_terminated (pipe_);
|
||||
}
|
||||
|
||||
int zmq::radio_t::xsend (msg_t *msg_)
|
||||
@ -146,21 +146,21 @@ int zmq::radio_t::xsend (msg_t *msg_)
|
||||
return -1;
|
||||
}
|
||||
|
||||
dist.unmatch ();
|
||||
_dist.unmatch ();
|
||||
|
||||
std::pair<subscriptions_t::iterator, subscriptions_t::iterator> range =
|
||||
subscriptions.equal_range (std::string (msg_->group ()));
|
||||
_subscriptions.equal_range (std::string (msg_->group ()));
|
||||
|
||||
for (subscriptions_t::iterator it = range.first; it != range.second; ++it)
|
||||
dist.match (it->second);
|
||||
_dist.match (it->second);
|
||||
|
||||
for (udp_pipes_t::iterator it = udp_pipes.begin (); it != udp_pipes.end ();
|
||||
++it)
|
||||
dist.match (*it);
|
||||
for (udp_pipes_t::iterator it = _udp_pipes.begin ();
|
||||
it != _udp_pipes.end (); ++it)
|
||||
_dist.match (*it);
|
||||
|
||||
int rc = -1;
|
||||
if (lossy || dist.check_hwm ()) {
|
||||
if (dist.send_to_matching (msg_) == 0) {
|
||||
if (_lossy || _dist.check_hwm ()) {
|
||||
if (_dist.send_to_matching (msg_) == 0) {
|
||||
rc = 0; // Yay, sent successfully
|
||||
}
|
||||
} else
|
||||
@ -171,7 +171,7 @@ int zmq::radio_t::xsend (msg_t *msg_)
|
||||
|
||||
bool zmq::radio_t::xhas_out ()
|
||||
{
|
||||
return dist.has_out ();
|
||||
return _dist.has_out ();
|
||||
}
|
||||
|
||||
int zmq::radio_t::xrecv (msg_t *msg_)
|
||||
@ -193,7 +193,7 @@ zmq::radio_session_t::radio_session_t (io_thread_t *io_thread_,
|
||||
const options_t &options_,
|
||||
address_t *addr_) :
|
||||
session_base_t (io_thread_, connect_, socket_, options_, addr_),
|
||||
state (group)
|
||||
_state (group)
|
||||
{
|
||||
}
|
||||
|
||||
@ -246,12 +246,12 @@ int zmq::radio_session_t::push_msg (msg_t *msg_)
|
||||
|
||||
int zmq::radio_session_t::pull_msg (msg_t *msg_)
|
||||
{
|
||||
if (state == group) {
|
||||
int rc = session_base_t::pull_msg (&pending_msg);
|
||||
if (_state == group) {
|
||||
int rc = session_base_t::pull_msg (&_pending_msg);
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
|
||||
const char *group = pending_msg.group ();
|
||||
const char *group = _pending_msg.group ();
|
||||
int length = static_cast<int> (strlen (group));
|
||||
|
||||
// First frame is the group
|
||||
@ -261,16 +261,16 @@ int zmq::radio_session_t::pull_msg (msg_t *msg_)
|
||||
memcpy (msg_->data (), group, length);
|
||||
|
||||
// Next status is the body
|
||||
state = body;
|
||||
_state = body;
|
||||
return 0;
|
||||
}
|
||||
*msg_ = pending_msg;
|
||||
state = group;
|
||||
*msg_ = _pending_msg;
|
||||
_state = group;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void zmq::radio_session_t::reset ()
|
||||
{
|
||||
session_base_t::reset ();
|
||||
state = group;
|
||||
_state = group;
|
||||
}
|
||||
|
@ -65,17 +65,17 @@ class radio_t : public socket_base_t
|
||||
private:
|
||||
// List of all subscriptions mapped to corresponding pipes.
|
||||
typedef std::multimap<std::string, pipe_t *> subscriptions_t;
|
||||
subscriptions_t subscriptions;
|
||||
subscriptions_t _subscriptions;
|
||||
|
||||
// List of udp pipes
|
||||
typedef std::vector<pipe_t *> udp_pipes_t;
|
||||
udp_pipes_t udp_pipes;
|
||||
udp_pipes_t _udp_pipes;
|
||||
|
||||
// Distributor of messages holding the list of outbound pipes.
|
||||
dist_t dist;
|
||||
dist_t _dist;
|
||||
|
||||
// Drop messages if HWM reached, otherwise return with EAGAIN
|
||||
bool lossy;
|
||||
bool _lossy;
|
||||
|
||||
radio_t (const radio_t &);
|
||||
const radio_t &operator= (const radio_t &);
|
||||
@ -101,9 +101,9 @@ class radio_session_t : public session_base_t
|
||||
{
|
||||
group,
|
||||
body
|
||||
} state;
|
||||
} _state;
|
||||
|
||||
msg_t pending_msg;
|
||||
msg_t _pending_msg;
|
||||
|
||||
radio_session_t (const radio_session_t &);
|
||||
const radio_session_t &operator= (const radio_session_t &);
|
||||
|
@ -34,22 +34,22 @@
|
||||
#include "raw_decoder.hpp"
|
||||
#include "err.hpp"
|
||||
|
||||
zmq::raw_decoder_t::raw_decoder_t (size_t bufsize_) : allocator (bufsize_, 1)
|
||||
zmq::raw_decoder_t::raw_decoder_t (size_t bufsize_) : _allocator (bufsize_, 1)
|
||||
{
|
||||
int rc = in_progress.init ();
|
||||
int rc = _in_progress.init ();
|
||||
errno_assert (rc == 0);
|
||||
}
|
||||
|
||||
zmq::raw_decoder_t::~raw_decoder_t ()
|
||||
{
|
||||
int rc = in_progress.close ();
|
||||
int rc = _in_progress.close ();
|
||||
errno_assert (rc == 0);
|
||||
}
|
||||
|
||||
void zmq::raw_decoder_t::get_buffer (unsigned char **data_, size_t *size_)
|
||||
{
|
||||
*data_ = allocator.allocate ();
|
||||
*size_ = allocator.size ();
|
||||
*data_ = _allocator.allocate ();
|
||||
*size_ = _allocator.size ();
|
||||
}
|
||||
|
||||
int zmq::raw_decoder_t::decode (const uint8_t *data_,
|
||||
@ -57,15 +57,15 @@ int zmq::raw_decoder_t::decode (const uint8_t *data_,
|
||||
size_t &bytes_used_)
|
||||
{
|
||||
int rc =
|
||||
in_progress.init (const_cast<unsigned char *> (data_), size_,
|
||||
shared_message_memory_allocator::call_dec_ref,
|
||||
allocator.buffer (), allocator.provide_content ());
|
||||
_in_progress.init (const_cast<unsigned char *> (data_), size_,
|
||||
shared_message_memory_allocator::call_dec_ref,
|
||||
_allocator.buffer (), _allocator.provide_content ());
|
||||
|
||||
// if the buffer serves as memory for a zero-copy message, release it
|
||||
// and allocate a new buffer in get_buffer for the next decode
|
||||
if (in_progress.is_zcmsg ()) {
|
||||
allocator.advance_content ();
|
||||
allocator.release ();
|
||||
if (_in_progress.is_zcmsg ()) {
|
||||
_allocator.advance_content ();
|
||||
_allocator.release ();
|
||||
}
|
||||
|
||||
errno_assert (rc != -1);
|
||||
|
@ -52,14 +52,14 @@ class raw_decoder_t : public i_decoder
|
||||
virtual int
|
||||
decode (const unsigned char *data_, size_t size_, size_t &processed_);
|
||||
|
||||
virtual msg_t *msg () { return &in_progress; }
|
||||
virtual msg_t *msg () { return &_in_progress; }
|
||||
|
||||
virtual void resize_buffer (size_t) {}
|
||||
|
||||
private:
|
||||
msg_t in_progress;
|
||||
msg_t _in_progress;
|
||||
|
||||
shared_message_memory_allocator allocator;
|
||||
shared_message_memory_allocator _allocator;
|
||||
|
||||
raw_decoder_t (const raw_decoder_t &);
|
||||
void operator= (const raw_decoder_t &);
|
||||
|
@ -35,43 +35,43 @@
|
||||
|
||||
zmq::reaper_t::reaper_t (class ctx_t *ctx_, uint32_t tid_) :
|
||||
object_t (ctx_, tid_),
|
||||
mailbox_handle (static_cast<poller_t::handle_t> (NULL)),
|
||||
poller (NULL),
|
||||
sockets (0),
|
||||
terminating (false)
|
||||
_mailbox_handle (static_cast<poller_t::handle_t> (NULL)),
|
||||
_poller (NULL),
|
||||
_sockets (0),
|
||||
_terminating (false)
|
||||
{
|
||||
if (!mailbox.valid ())
|
||||
if (!_mailbox.valid ())
|
||||
return;
|
||||
|
||||
poller = new (std::nothrow) poller_t (*ctx_);
|
||||
alloc_assert (poller);
|
||||
_poller = new (std::nothrow) poller_t (*ctx_);
|
||||
alloc_assert (_poller);
|
||||
|
||||
if (mailbox.get_fd () != retired_fd) {
|
||||
mailbox_handle = poller->add_fd (mailbox.get_fd (), this);
|
||||
poller->set_pollin (mailbox_handle);
|
||||
if (_mailbox.get_fd () != retired_fd) {
|
||||
_mailbox_handle = _poller->add_fd (_mailbox.get_fd (), this);
|
||||
_poller->set_pollin (_mailbox_handle);
|
||||
}
|
||||
|
||||
#ifdef HAVE_FORK
|
||||
pid = getpid ();
|
||||
_pid = getpid ();
|
||||
#endif
|
||||
}
|
||||
|
||||
zmq::reaper_t::~reaper_t ()
|
||||
{
|
||||
LIBZMQ_DELETE (poller);
|
||||
LIBZMQ_DELETE (_poller);
|
||||
}
|
||||
|
||||
zmq::mailbox_t *zmq::reaper_t::get_mailbox ()
|
||||
{
|
||||
return &mailbox;
|
||||
return &_mailbox;
|
||||
}
|
||||
|
||||
void zmq::reaper_t::start ()
|
||||
{
|
||||
zmq_assert (mailbox.valid ());
|
||||
zmq_assert (_mailbox.valid ());
|
||||
|
||||
// Start the thread.
|
||||
poller->start ();
|
||||
_poller->start ();
|
||||
}
|
||||
|
||||
void zmq::reaper_t::stop ()
|
||||
@ -85,7 +85,7 @@ void zmq::reaper_t::in_event ()
|
||||
{
|
||||
while (true) {
|
||||
#ifdef HAVE_FORK
|
||||
if (unlikely (pid != getpid ())) {
|
||||
if (unlikely (_pid != getpid ())) {
|
||||
//printf("zmq::reaper_t::in_event return in child process %d\n", (int)getpid());
|
||||
return;
|
||||
}
|
||||
@ -93,7 +93,7 @@ void zmq::reaper_t::in_event ()
|
||||
|
||||
// Get the next command. If there is none, exit.
|
||||
command_t cmd;
|
||||
int rc = mailbox.recv (&cmd, 0);
|
||||
int rc = _mailbox.recv (&cmd, 0);
|
||||
if (rc != 0 && errno == EINTR)
|
||||
continue;
|
||||
if (rc != 0 && errno == EAGAIN)
|
||||
@ -117,33 +117,33 @@ void zmq::reaper_t::timer_event (int)
|
||||
|
||||
void zmq::reaper_t::process_stop ()
|
||||
{
|
||||
terminating = true;
|
||||
_terminating = true;
|
||||
|
||||
// If there are no sockets being reaped finish immediately.
|
||||
if (!sockets) {
|
||||
if (!_sockets) {
|
||||
send_done ();
|
||||
poller->rm_fd (mailbox_handle);
|
||||
poller->stop ();
|
||||
_poller->rm_fd (_mailbox_handle);
|
||||
_poller->stop ();
|
||||
}
|
||||
}
|
||||
|
||||
void zmq::reaper_t::process_reap (socket_base_t *socket_)
|
||||
{
|
||||
// Add the socket to the poller.
|
||||
socket_->start_reaping (poller);
|
||||
socket_->start_reaping (_poller);
|
||||
|
||||
++sockets;
|
||||
++_sockets;
|
||||
}
|
||||
|
||||
void zmq::reaper_t::process_reaped ()
|
||||
{
|
||||
--sockets;
|
||||
--_sockets;
|
||||
|
||||
// If reaped was already asked to terminate and there are no more sockets,
|
||||
// finish immediately.
|
||||
if (!sockets && terminating) {
|
||||
if (!_sockets && _terminating) {
|
||||
send_done ();
|
||||
poller->rm_fd (mailbox_handle);
|
||||
poller->stop ();
|
||||
_poller->rm_fd (_mailbox_handle);
|
||||
_poller->stop ();
|
||||
}
|
||||
}
|
||||
|
@ -63,26 +63,26 @@ class reaper_t : public object_t, public i_poll_events
|
||||
void process_reaped ();
|
||||
|
||||
// Reaper thread accesses incoming commands via this mailbox.
|
||||
mailbox_t mailbox;
|
||||
mailbox_t _mailbox;
|
||||
|
||||
// Handle associated with mailbox' file descriptor.
|
||||
poller_t::handle_t mailbox_handle;
|
||||
poller_t::handle_t _mailbox_handle;
|
||||
|
||||
// I/O multiplexing is performed using a poller object.
|
||||
poller_t *poller;
|
||||
poller_t *_poller;
|
||||
|
||||
// Number of sockets being reaped at the moment.
|
||||
int sockets;
|
||||
int _sockets;
|
||||
|
||||
// If true, we were already asked to terminate.
|
||||
bool terminating;
|
||||
bool _terminating;
|
||||
|
||||
reaper_t (const reaper_t &);
|
||||
const reaper_t &operator= (const reaper_t &);
|
||||
|
||||
#ifdef HAVE_FORK
|
||||
// the process that created this context. Used to detect forking.
|
||||
pid_t pid;
|
||||
pid_t _pid;
|
||||
#endif
|
||||
};
|
||||
}
|
||||
|
22
src/rep.cpp
22
src/rep.cpp
@ -34,8 +34,8 @@
|
||||
|
||||
zmq::rep_t::rep_t (class ctx_t *parent_, uint32_t tid_, int sid_) :
|
||||
router_t (parent_, tid_, sid_),
|
||||
sending_reply (false),
|
||||
request_begins (true)
|
||||
_sending_reply (false),
|
||||
_request_begins (true)
|
||||
{
|
||||
options.type = ZMQ_REP;
|
||||
}
|
||||
@ -47,7 +47,7 @@ zmq::rep_t::~rep_t ()
|
||||
int zmq::rep_t::xsend (msg_t *msg_)
|
||||
{
|
||||
// If we are in the middle of receiving a request, we cannot send reply.
|
||||
if (!sending_reply) {
|
||||
if (!_sending_reply) {
|
||||
errno = EFSM;
|
||||
return -1;
|
||||
}
|
||||
@ -61,7 +61,7 @@ int zmq::rep_t::xsend (msg_t *msg_)
|
||||
|
||||
// If the reply is complete flip the FSM back to request receiving state.
|
||||
if (!more)
|
||||
sending_reply = false;
|
||||
_sending_reply = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -69,14 +69,14 @@ int zmq::rep_t::xsend (msg_t *msg_)
|
||||
int zmq::rep_t::xrecv (msg_t *msg_)
|
||||
{
|
||||
// If we are in middle of sending a reply, we cannot receive next request.
|
||||
if (sending_reply) {
|
||||
if (_sending_reply) {
|
||||
errno = EFSM;
|
||||
return -1;
|
||||
}
|
||||
|
||||
// First thing to do when receiving a request is to copy all the labels
|
||||
// to the reply pipe.
|
||||
if (request_begins) {
|
||||
if (_request_begins) {
|
||||
while (true) {
|
||||
int rc = router_t::xrecv (msg_);
|
||||
if (rc != 0)
|
||||
@ -99,7 +99,7 @@ int zmq::rep_t::xrecv (msg_t *msg_)
|
||||
errno_assert (rc == 0);
|
||||
}
|
||||
}
|
||||
request_begins = false;
|
||||
_request_begins = false;
|
||||
}
|
||||
|
||||
// Get next message part to return to the user.
|
||||
@ -109,8 +109,8 @@ int zmq::rep_t::xrecv (msg_t *msg_)
|
||||
|
||||
// If whole request is read, flip the FSM to reply-sending state.
|
||||
if (!(msg_->flags () & msg_t::more)) {
|
||||
sending_reply = true;
|
||||
request_begins = true;
|
||||
_sending_reply = true;
|
||||
_request_begins = true;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -118,7 +118,7 @@ int zmq::rep_t::xrecv (msg_t *msg_)
|
||||
|
||||
bool zmq::rep_t::xhas_in ()
|
||||
{
|
||||
if (sending_reply)
|
||||
if (_sending_reply)
|
||||
return false;
|
||||
|
||||
return router_t::xhas_in ();
|
||||
@ -126,7 +126,7 @@ bool zmq::rep_t::xhas_in ()
|
||||
|
||||
bool zmq::rep_t::xhas_out ()
|
||||
{
|
||||
if (!sending_reply)
|
||||
if (!_sending_reply)
|
||||
return false;
|
||||
|
||||
return router_t::xhas_out ();
|
||||
|
@ -54,11 +54,11 @@ class rep_t : public router_t
|
||||
private:
|
||||
// If true, we are in process of sending the reply. If false we are
|
||||
// in process of receiving a request.
|
||||
bool sending_reply;
|
||||
bool _sending_reply;
|
||||
|
||||
// If true, we are starting to receive a request. The beginning
|
||||
// of the request is the backtrace stack.
|
||||
bool request_begins;
|
||||
bool _request_begins;
|
||||
|
||||
rep_t (const rep_t &);
|
||||
const rep_t &operator= (const rep_t &);
|
||||
|
86
src/req.cpp
86
src/req.cpp
@ -46,12 +46,12 @@ static void free_id (void *data_, void *hint_)
|
||||
|
||||
zmq::req_t::req_t (class ctx_t *parent_, uint32_t tid_, int sid_) :
|
||||
dealer_t (parent_, tid_, sid_),
|
||||
receiving_reply (false),
|
||||
message_begins (true),
|
||||
reply_pipe (NULL),
|
||||
request_id_frames_enabled (false),
|
||||
request_id (generate_random ()),
|
||||
strict (true)
|
||||
_receiving_reply (false),
|
||||
_message_begins (true),
|
||||
_reply_pipe (NULL),
|
||||
_request_id_frames_enabled (false),
|
||||
_request_id (generate_random ()),
|
||||
_strict (true)
|
||||
{
|
||||
options.type = ZMQ_REQ;
|
||||
}
|
||||
@ -64,29 +64,29 @@ int zmq::req_t::xsend (msg_t *msg_)
|
||||
{
|
||||
// If we've sent a request and we still haven't got the reply,
|
||||
// we can't send another request unless the strict option is disabled.
|
||||
if (receiving_reply) {
|
||||
if (strict) {
|
||||
if (_receiving_reply) {
|
||||
if (_strict) {
|
||||
errno = EFSM;
|
||||
return -1;
|
||||
}
|
||||
|
||||
receiving_reply = false;
|
||||
message_begins = true;
|
||||
_receiving_reply = false;
|
||||
_message_begins = true;
|
||||
}
|
||||
|
||||
// First part of the request is the request routing id.
|
||||
if (message_begins) {
|
||||
reply_pipe = NULL;
|
||||
if (_message_begins) {
|
||||
_reply_pipe = NULL;
|
||||
|
||||
if (request_id_frames_enabled) {
|
||||
request_id++;
|
||||
if (_request_id_frames_enabled) {
|
||||
_request_id++;
|
||||
|
||||
// Copy request id before sending (see issue #1695 for details).
|
||||
uint32_t *request_id_copy =
|
||||
static_cast<uint32_t *> (malloc (sizeof (uint32_t)));
|
||||
zmq_assert (request_id_copy);
|
||||
|
||||
*request_id_copy = request_id;
|
||||
*request_id_copy = _request_id;
|
||||
|
||||
msg_t id;
|
||||
int rc =
|
||||
@ -94,7 +94,7 @@ int zmq::req_t::xsend (msg_t *msg_)
|
||||
errno_assert (rc == 0);
|
||||
id.set_flags (msg_t::more);
|
||||
|
||||
rc = dealer_t::sendpipe (&id, &reply_pipe);
|
||||
rc = dealer_t::sendpipe (&id, &_reply_pipe);
|
||||
if (rc != 0)
|
||||
return -1;
|
||||
}
|
||||
@ -104,12 +104,12 @@ int zmq::req_t::xsend (msg_t *msg_)
|
||||
errno_assert (rc == 0);
|
||||
bottom.set_flags (msg_t::more);
|
||||
|
||||
rc = dealer_t::sendpipe (&bottom, &reply_pipe);
|
||||
rc = dealer_t::sendpipe (&bottom, &_reply_pipe);
|
||||
if (rc != 0)
|
||||
return -1;
|
||||
zmq_assert (reply_pipe);
|
||||
zmq_assert (_reply_pipe);
|
||||
|
||||
message_begins = false;
|
||||
_message_begins = false;
|
||||
|
||||
// Eat all currently available messages before the request is fully
|
||||
// sent. This is done to avoid:
|
||||
@ -135,8 +135,8 @@ int zmq::req_t::xsend (msg_t *msg_)
|
||||
|
||||
// If the request was fully sent, flip the FSM into reply-receiving state.
|
||||
if (!more) {
|
||||
receiving_reply = true;
|
||||
message_begins = true;
|
||||
_receiving_reply = true;
|
||||
_message_begins = true;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -145,23 +145,23 @@ int zmq::req_t::xsend (msg_t *msg_)
|
||||
int zmq::req_t::xrecv (msg_t *msg_)
|
||||
{
|
||||
// If request wasn't send, we can't wait for reply.
|
||||
if (!receiving_reply) {
|
||||
if (!_receiving_reply) {
|
||||
errno = EFSM;
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Skip messages until one with the right first frames is found.
|
||||
while (message_begins) {
|
||||
while (_message_begins) {
|
||||
// If enabled, the first frame must have the correct request_id.
|
||||
if (request_id_frames_enabled) {
|
||||
if (_request_id_frames_enabled) {
|
||||
int rc = recv_reply_pipe (msg_);
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
|
||||
if (unlikely (!(msg_->flags () & msg_t::more)
|
||||
|| msg_->size () != sizeof (request_id)
|
||||
|| msg_->size () != sizeof (_request_id)
|
||||
|| *static_cast<uint32_t *> (msg_->data ())
|
||||
!= request_id)) {
|
||||
!= _request_id)) {
|
||||
// Skip the remaining frames and try the next message
|
||||
while (msg_->flags () & msg_t::more) {
|
||||
rc = recv_reply_pipe (msg_);
|
||||
@ -186,7 +186,7 @@ int zmq::req_t::xrecv (msg_t *msg_)
|
||||
continue;
|
||||
}
|
||||
|
||||
message_begins = false;
|
||||
_message_begins = false;
|
||||
}
|
||||
|
||||
int rc = recv_reply_pipe (msg_);
|
||||
@ -195,8 +195,8 @@ int zmq::req_t::xrecv (msg_t *msg_)
|
||||
|
||||
// If the reply is fully received, flip the FSM into request-sending state.
|
||||
if (!(msg_->flags () & msg_t::more)) {
|
||||
receiving_reply = false;
|
||||
message_begins = true;
|
||||
_receiving_reply = false;
|
||||
_message_begins = true;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -206,7 +206,7 @@ bool zmq::req_t::xhas_in ()
|
||||
{
|
||||
// TODO: Duplicates should be removed here.
|
||||
|
||||
if (!receiving_reply)
|
||||
if (!_receiving_reply)
|
||||
return false;
|
||||
|
||||
return dealer_t::xhas_in ();
|
||||
@ -214,7 +214,7 @@ bool zmq::req_t::xhas_in ()
|
||||
|
||||
bool zmq::req_t::xhas_out ()
|
||||
{
|
||||
if (receiving_reply && strict)
|
||||
if (_receiving_reply && _strict)
|
||||
return false;
|
||||
|
||||
return dealer_t::xhas_out ();
|
||||
@ -232,14 +232,14 @@ int zmq::req_t::xsetsockopt (int option_,
|
||||
switch (option_) {
|
||||
case ZMQ_REQ_CORRELATE:
|
||||
if (is_int && value >= 0) {
|
||||
request_id_frames_enabled = (value != 0);
|
||||
_request_id_frames_enabled = (value != 0);
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
|
||||
case ZMQ_REQ_RELAXED:
|
||||
if (is_int && value >= 0) {
|
||||
strict = (value == 0);
|
||||
_strict = (value == 0);
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
@ -253,8 +253,8 @@ int zmq::req_t::xsetsockopt (int option_,
|
||||
|
||||
void zmq::req_t::xpipe_terminated (pipe_t *pipe_)
|
||||
{
|
||||
if (reply_pipe == pipe_)
|
||||
reply_pipe = NULL;
|
||||
if (_reply_pipe == pipe_)
|
||||
_reply_pipe = NULL;
|
||||
dealer_t::xpipe_terminated (pipe_);
|
||||
}
|
||||
|
||||
@ -265,7 +265,7 @@ int zmq::req_t::recv_reply_pipe (msg_t *msg_)
|
||||
int rc = dealer_t::recvpipe (msg_, &pipe);
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
if (!reply_pipe || pipe == reply_pipe)
|
||||
if (!_reply_pipe || pipe == _reply_pipe)
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@ -276,7 +276,7 @@ zmq::req_session_t::req_session_t (io_thread_t *io_thread_,
|
||||
const options_t &options_,
|
||||
address_t *addr_) :
|
||||
session_base_t (io_thread_, connect_, socket_, options_, addr_),
|
||||
state (bottom)
|
||||
_state (bottom)
|
||||
{
|
||||
}
|
||||
|
||||
@ -291,25 +291,25 @@ int zmq::req_session_t::push_msg (msg_t *msg_)
|
||||
if (unlikely (msg_->flags () & msg_t::command))
|
||||
return 0;
|
||||
|
||||
switch (state) {
|
||||
switch (_state) {
|
||||
case bottom:
|
||||
if (msg_->flags () == msg_t::more) {
|
||||
// In case option ZMQ_CORRELATE is on, allow request_id to be
|
||||
// transfered as first frame (would be too cumbersome to check
|
||||
// whether the option is actually on or not).
|
||||
if (msg_->size () == sizeof (uint32_t)) {
|
||||
state = request_id;
|
||||
_state = request_id;
|
||||
return session_base_t::push_msg (msg_);
|
||||
}
|
||||
if (msg_->size () == 0) {
|
||||
state = body;
|
||||
_state = body;
|
||||
return session_base_t::push_msg (msg_);
|
||||
}
|
||||
}
|
||||
break;
|
||||
case request_id:
|
||||
if (msg_->flags () == msg_t::more && msg_->size () == 0) {
|
||||
state = body;
|
||||
_state = body;
|
||||
return session_base_t::push_msg (msg_);
|
||||
}
|
||||
break;
|
||||
@ -317,7 +317,7 @@ int zmq::req_session_t::push_msg (msg_t *msg_)
|
||||
if (msg_->flags () == msg_t::more)
|
||||
return session_base_t::push_msg (msg_);
|
||||
if (msg_->flags () == 0) {
|
||||
state = bottom;
|
||||
_state = bottom;
|
||||
return session_base_t::push_msg (msg_);
|
||||
}
|
||||
break;
|
||||
@ -329,5 +329,5 @@ int zmq::req_session_t::push_msg (msg_t *msg_)
|
||||
void zmq::req_session_t::reset ()
|
||||
{
|
||||
session_base_t::reset ();
|
||||
state = bottom;
|
||||
_state = bottom;
|
||||
}
|
||||
|
14
src/req.hpp
14
src/req.hpp
@ -62,26 +62,26 @@ class req_t : public dealer_t
|
||||
private:
|
||||
// If true, request was already sent and reply wasn't received yet or
|
||||
// was received partially.
|
||||
bool receiving_reply;
|
||||
bool _receiving_reply;
|
||||
|
||||
// If true, we are starting to send/recv a message. The first part
|
||||
// of the message must be empty message part (backtrace stack bottom).
|
||||
bool message_begins;
|
||||
bool _message_begins;
|
||||
|
||||
// The pipe the request was sent to and where the reply is expected.
|
||||
zmq::pipe_t *reply_pipe;
|
||||
zmq::pipe_t *_reply_pipe;
|
||||
|
||||
// Whether request id frames shall be sent and expected.
|
||||
bool request_id_frames_enabled;
|
||||
bool _request_id_frames_enabled;
|
||||
|
||||
// The current request id. It is incremented every time before a new
|
||||
// request is sent.
|
||||
uint32_t request_id;
|
||||
uint32_t _request_id;
|
||||
|
||||
// If false, send() will reset its internal state and terminate the
|
||||
// reply_pipe's connection instead of failing if a previous request is
|
||||
// still pending.
|
||||
bool strict;
|
||||
bool _strict;
|
||||
|
||||
req_t (const req_t &);
|
||||
const req_t &operator= (const req_t &);
|
||||
@ -107,7 +107,7 @@ class req_session_t : public session_base_t
|
||||
bottom,
|
||||
request_id,
|
||||
body
|
||||
} state;
|
||||
} _state;
|
||||
|
||||
req_session_t (const req_session_t &);
|
||||
const req_session_t &operator= (const req_session_t &);
|
||||
|
260
src/router.cpp
260
src/router.cpp
@ -38,35 +38,35 @@
|
||||
|
||||
zmq::router_t::router_t (class ctx_t *parent_, uint32_t tid_, int sid_) :
|
||||
socket_base_t (parent_, tid_, sid_),
|
||||
prefetched (false),
|
||||
routing_id_sent (false),
|
||||
current_in (NULL),
|
||||
terminate_current_in (false),
|
||||
more_in (false),
|
||||
current_out (NULL),
|
||||
more_out (false),
|
||||
next_integral_routing_id (generate_random ()),
|
||||
mandatory (false),
|
||||
_prefetched (false),
|
||||
_routing_id_sent (false),
|
||||
_current_in (NULL),
|
||||
_terminate_current_in (false),
|
||||
_more_in (false),
|
||||
_current_out (NULL),
|
||||
_more_out (false),
|
||||
_next_integral_routing_id (generate_random ()),
|
||||
_mandatory (false),
|
||||
// raw_socket functionality in ROUTER is deprecated
|
||||
raw_socket (false),
|
||||
probe_router (false),
|
||||
handover (false)
|
||||
_raw_socket (false),
|
||||
_probe_router (false),
|
||||
_handover (false)
|
||||
{
|
||||
options.type = ZMQ_ROUTER;
|
||||
options.recv_routing_id = true;
|
||||
options.raw_socket = false;
|
||||
|
||||
prefetched_id.init ();
|
||||
prefetched_msg.init ();
|
||||
_prefetched_id.init ();
|
||||
_prefetched_msg.init ();
|
||||
}
|
||||
|
||||
zmq::router_t::~router_t ()
|
||||
{
|
||||
zmq_assert (anonymous_pipes.empty ());
|
||||
zmq_assert (_anonymous_pipes.empty ());
|
||||
;
|
||||
zmq_assert (outpipes.empty ());
|
||||
prefetched_id.close ();
|
||||
prefetched_msg.close ();
|
||||
zmq_assert (_out_pipes.empty ());
|
||||
_prefetched_id.close ();
|
||||
_prefetched_msg.close ();
|
||||
}
|
||||
|
||||
void zmq::router_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_)
|
||||
@ -75,7 +75,7 @@ void zmq::router_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_)
|
||||
|
||||
zmq_assert (pipe_);
|
||||
|
||||
if (probe_router) {
|
||||
if (_probe_router) {
|
||||
msg_t probe_msg;
|
||||
int rc = probe_msg.init ();
|
||||
errno_assert (rc == 0);
|
||||
@ -90,9 +90,9 @@ void zmq::router_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_)
|
||||
|
||||
bool routing_id_ok = identify_peer (pipe_);
|
||||
if (routing_id_ok)
|
||||
fq.attach (pipe_);
|
||||
_fq.attach (pipe_);
|
||||
else
|
||||
anonymous_pipes.insert (pipe_);
|
||||
_anonymous_pipes.insert (pipe_);
|
||||
}
|
||||
|
||||
int zmq::router_t::xsetsockopt (int option_,
|
||||
@ -116,8 +116,8 @@ int zmq::router_t::xsetsockopt (int option_,
|
||||
|
||||
case ZMQ_ROUTER_RAW:
|
||||
if (is_int && value >= 0) {
|
||||
raw_socket = (value != 0);
|
||||
if (raw_socket) {
|
||||
_raw_socket = (value != 0);
|
||||
if (_raw_socket) {
|
||||
options.recv_routing_id = false;
|
||||
options.raw_socket = true;
|
||||
}
|
||||
@ -127,21 +127,21 @@ int zmq::router_t::xsetsockopt (int option_,
|
||||
|
||||
case ZMQ_ROUTER_MANDATORY:
|
||||
if (is_int && value >= 0) {
|
||||
mandatory = (value != 0);
|
||||
_mandatory = (value != 0);
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
|
||||
case ZMQ_PROBE_ROUTER:
|
||||
if (is_int && value >= 0) {
|
||||
probe_router = (value != 0);
|
||||
_probe_router = (value != 0);
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
|
||||
case ZMQ_ROUTER_HANDOVER:
|
||||
if (is_int && value >= 0) {
|
||||
handover = (value != 0);
|
||||
_handover = (value != 0);
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
@ -156,30 +156,30 @@ int zmq::router_t::xsetsockopt (int option_,
|
||||
|
||||
void zmq::router_t::xpipe_terminated (pipe_t *pipe_)
|
||||
{
|
||||
std::set<pipe_t *>::iterator it = anonymous_pipes.find (pipe_);
|
||||
if (it != anonymous_pipes.end ())
|
||||
anonymous_pipes.erase (it);
|
||||
std::set<pipe_t *>::iterator it = _anonymous_pipes.find (pipe_);
|
||||
if (it != _anonymous_pipes.end ())
|
||||
_anonymous_pipes.erase (it);
|
||||
else {
|
||||
outpipes_t::iterator iter = outpipes.find (pipe_->get_routing_id ());
|
||||
zmq_assert (iter != outpipes.end ());
|
||||
outpipes.erase (iter);
|
||||
fq.pipe_terminated (pipe_);
|
||||
outpipes_t::iterator iter = _out_pipes.find (pipe_->get_routing_id ());
|
||||
zmq_assert (iter != _out_pipes.end ());
|
||||
_out_pipes.erase (iter);
|
||||
_fq.pipe_terminated (pipe_);
|
||||
pipe_->rollback ();
|
||||
if (pipe_ == current_out)
|
||||
current_out = NULL;
|
||||
if (pipe_ == _current_out)
|
||||
_current_out = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void zmq::router_t::xread_activated (pipe_t *pipe_)
|
||||
{
|
||||
std::set<pipe_t *>::iterator it = anonymous_pipes.find (pipe_);
|
||||
if (it == anonymous_pipes.end ())
|
||||
fq.activated (pipe_);
|
||||
std::set<pipe_t *>::iterator it = _anonymous_pipes.find (pipe_);
|
||||
if (it == _anonymous_pipes.end ())
|
||||
_fq.activated (pipe_);
|
||||
else {
|
||||
bool routing_id_ok = identify_peer (pipe_);
|
||||
if (routing_id_ok) {
|
||||
anonymous_pipes.erase (it);
|
||||
fq.attach (pipe_);
|
||||
_anonymous_pipes.erase (it);
|
||||
_fq.attach (pipe_);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -187,11 +187,11 @@ void zmq::router_t::xread_activated (pipe_t *pipe_)
|
||||
void zmq::router_t::xwrite_activated (pipe_t *pipe_)
|
||||
{
|
||||
outpipes_t::iterator it;
|
||||
for (it = outpipes.begin (); it != outpipes.end (); ++it)
|
||||
for (it = _out_pipes.begin (); it != _out_pipes.end (); ++it)
|
||||
if (it->second.pipe == pipe_)
|
||||
break;
|
||||
|
||||
zmq_assert (it != outpipes.end ());
|
||||
zmq_assert (it != _out_pipes.end ());
|
||||
zmq_assert (!it->second.active);
|
||||
it->second.active = true;
|
||||
}
|
||||
@ -200,34 +200,34 @@ int zmq::router_t::xsend (msg_t *msg_)
|
||||
{
|
||||
// If this is the first part of the message it's the ID of the
|
||||
// peer to send the message to.
|
||||
if (!more_out) {
|
||||
zmq_assert (!current_out);
|
||||
if (!_more_out) {
|
||||
zmq_assert (!_current_out);
|
||||
|
||||
// If we have malformed message (prefix with no subsequent message)
|
||||
// then just silently ignore it.
|
||||
// TODO: The connections should be killed instead.
|
||||
if (msg_->flags () & msg_t::more) {
|
||||
more_out = true;
|
||||
_more_out = true;
|
||||
|
||||
// Find the pipe associated with the routing id stored in the prefix.
|
||||
// If there's no such pipe just silently ignore the message, unless
|
||||
// router_mandatory is set.
|
||||
blob_t routing_id (static_cast<unsigned char *> (msg_->data ()),
|
||||
msg_->size (), zmq::reference_tag_t ());
|
||||
outpipes_t::iterator it = outpipes.find (routing_id);
|
||||
outpipes_t::iterator it = _out_pipes.find (routing_id);
|
||||
|
||||
if (it != outpipes.end ()) {
|
||||
current_out = it->second.pipe;
|
||||
if (it != _out_pipes.end ()) {
|
||||
_current_out = it->second.pipe;
|
||||
|
||||
// Check whether pipe is closed or not
|
||||
if (!current_out->check_write ()) {
|
||||
if (!_current_out->check_write ()) {
|
||||
// Check whether pipe is full or not
|
||||
bool pipe_full = !current_out->check_hwm ();
|
||||
bool pipe_full = !_current_out->check_hwm ();
|
||||
it->second.active = false;
|
||||
current_out = NULL;
|
||||
_current_out = NULL;
|
||||
|
||||
if (mandatory) {
|
||||
more_out = false;
|
||||
if (_mandatory) {
|
||||
_more_out = false;
|
||||
if (pipe_full)
|
||||
errno = EAGAIN;
|
||||
else
|
||||
@ -235,8 +235,8 @@ int zmq::router_t::xsend (msg_t *msg_)
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
} else if (mandatory) {
|
||||
more_out = false;
|
||||
} else if (_mandatory) {
|
||||
_more_out = false;
|
||||
errno = EHOSTUNREACH;
|
||||
return -1;
|
||||
}
|
||||
@ -254,36 +254,36 @@ int zmq::router_t::xsend (msg_t *msg_)
|
||||
msg_->reset_flags (msg_t::more);
|
||||
|
||||
// Check whether this is the last part of the message.
|
||||
more_out = (msg_->flags () & msg_t::more) != 0;
|
||||
_more_out = (msg_->flags () & msg_t::more) != 0;
|
||||
|
||||
// Push the message into the pipe. If there's no out pipe, just drop it.
|
||||
if (current_out) {
|
||||
if (_current_out) {
|
||||
// Close the remote connection if user has asked to do so
|
||||
// by sending zero length message.
|
||||
// Pending messages in the pipe will be dropped (on receiving term- ack)
|
||||
if (raw_socket && msg_->size () == 0) {
|
||||
current_out->terminate (false);
|
||||
if (_raw_socket && msg_->size () == 0) {
|
||||
_current_out->terminate (false);
|
||||
int rc = msg_->close ();
|
||||
errno_assert (rc == 0);
|
||||
rc = msg_->init ();
|
||||
errno_assert (rc == 0);
|
||||
current_out = NULL;
|
||||
_current_out = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool ok = current_out->write (msg_);
|
||||
bool ok = _current_out->write (msg_);
|
||||
if (unlikely (!ok)) {
|
||||
// Message failed to send - we must close it ourselves.
|
||||
int rc = msg_->close ();
|
||||
errno_assert (rc == 0);
|
||||
// HWM was checked before, so the pipe must be gone. Roll back
|
||||
// messages that were piped, for example REP labels.
|
||||
current_out->rollback ();
|
||||
current_out = NULL;
|
||||
_current_out->rollback ();
|
||||
_current_out = NULL;
|
||||
} else {
|
||||
if (!more_out) {
|
||||
current_out->flush ();
|
||||
current_out = NULL;
|
||||
if (!_more_out) {
|
||||
_current_out->flush ();
|
||||
_current_out = NULL;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -300,36 +300,36 @@ int zmq::router_t::xsend (msg_t *msg_)
|
||||
|
||||
int zmq::router_t::xrecv (msg_t *msg_)
|
||||
{
|
||||
if (prefetched) {
|
||||
if (!routing_id_sent) {
|
||||
int rc = msg_->move (prefetched_id);
|
||||
if (_prefetched) {
|
||||
if (!_routing_id_sent) {
|
||||
int rc = msg_->move (_prefetched_id);
|
||||
errno_assert (rc == 0);
|
||||
routing_id_sent = true;
|
||||
_routing_id_sent = true;
|
||||
} else {
|
||||
int rc = msg_->move (prefetched_msg);
|
||||
int rc = msg_->move (_prefetched_msg);
|
||||
errno_assert (rc == 0);
|
||||
prefetched = false;
|
||||
_prefetched = false;
|
||||
}
|
||||
more_in = (msg_->flags () & msg_t::more) != 0;
|
||||
_more_in = (msg_->flags () & msg_t::more) != 0;
|
||||
|
||||
if (!more_in) {
|
||||
if (terminate_current_in) {
|
||||
current_in->terminate (true);
|
||||
terminate_current_in = false;
|
||||
if (!_more_in) {
|
||||
if (_terminate_current_in) {
|
||||
_current_in->terminate (true);
|
||||
_terminate_current_in = false;
|
||||
}
|
||||
current_in = NULL;
|
||||
_current_in = NULL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
pipe_t *pipe = NULL;
|
||||
int rc = fq.recvpipe (msg_, &pipe);
|
||||
int rc = _fq.recvpipe (msg_, &pipe);
|
||||
|
||||
// It's possible that we receive peer's routing id. That happens
|
||||
// after reconnection. The current implementation assumes that
|
||||
// the peer always uses the same routing id.
|
||||
while (rc == 0 && msg_->is_routing_id ())
|
||||
rc = fq.recvpipe (msg_, &pipe);
|
||||
rc = _fq.recvpipe (msg_, &pipe);
|
||||
|
||||
if (rc != 0)
|
||||
return -1;
|
||||
@ -337,33 +337,33 @@ int zmq::router_t::xrecv (msg_t *msg_)
|
||||
zmq_assert (pipe != NULL);
|
||||
|
||||
// If we are in the middle of reading a message, just return the next part.
|
||||
if (more_in) {
|
||||
more_in = (msg_->flags () & msg_t::more) != 0;
|
||||
if (_more_in) {
|
||||
_more_in = (msg_->flags () & msg_t::more) != 0;
|
||||
|
||||
if (!more_in) {
|
||||
if (terminate_current_in) {
|
||||
current_in->terminate (true);
|
||||
terminate_current_in = false;
|
||||
if (!_more_in) {
|
||||
if (_terminate_current_in) {
|
||||
_current_in->terminate (true);
|
||||
_terminate_current_in = false;
|
||||
}
|
||||
current_in = NULL;
|
||||
_current_in = NULL;
|
||||
}
|
||||
} else {
|
||||
// We are at the beginning of a message.
|
||||
// Keep the message part we have in the prefetch buffer
|
||||
// and return the ID of the peer instead.
|
||||
rc = prefetched_msg.move (*msg_);
|
||||
rc = _prefetched_msg.move (*msg_);
|
||||
errno_assert (rc == 0);
|
||||
prefetched = true;
|
||||
current_in = pipe;
|
||||
_prefetched = true;
|
||||
_current_in = pipe;
|
||||
|
||||
const blob_t &routing_id = pipe->get_routing_id ();
|
||||
rc = msg_->init_size (routing_id.size ());
|
||||
errno_assert (rc == 0);
|
||||
memcpy (msg_->data (), routing_id.data (), routing_id.size ());
|
||||
msg_->set_flags (msg_t::more);
|
||||
if (prefetched_msg.metadata ())
|
||||
msg_->set_metadata (prefetched_msg.metadata ());
|
||||
routing_id_sent = true;
|
||||
if (_prefetched_msg.metadata ())
|
||||
msg_->set_metadata (_prefetched_msg.metadata ());
|
||||
_routing_id_sent = true;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -371,10 +371,10 @@ int zmq::router_t::xrecv (msg_t *msg_)
|
||||
|
||||
int zmq::router_t::rollback ()
|
||||
{
|
||||
if (current_out) {
|
||||
current_out->rollback ();
|
||||
current_out = NULL;
|
||||
more_out = false;
|
||||
if (_current_out) {
|
||||
_current_out->rollback ();
|
||||
_current_out = NULL;
|
||||
_more_out = false;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -383,24 +383,24 @@ bool zmq::router_t::xhas_in ()
|
||||
{
|
||||
// If we are in the middle of reading the messages, there are
|
||||
// definitely more parts available.
|
||||
if (more_in)
|
||||
if (_more_in)
|
||||
return true;
|
||||
|
||||
// We may already have a message pre-fetched.
|
||||
if (prefetched)
|
||||
if (_prefetched)
|
||||
return true;
|
||||
|
||||
// Try to read the next message.
|
||||
// The message, if read, is kept in the pre-fetch buffer.
|
||||
pipe_t *pipe = NULL;
|
||||
int rc = fq.recvpipe (&prefetched_msg, &pipe);
|
||||
int rc = _fq.recvpipe (&_prefetched_msg, &pipe);
|
||||
|
||||
// It's possible that we receive peer's routing id. That happens
|
||||
// after reconnection. The current implementation assumes that
|
||||
// the peer always uses the same routing id.
|
||||
// TODO: handle the situation when the peer changes its routing id.
|
||||
while (rc == 0 && prefetched_msg.is_routing_id ())
|
||||
rc = fq.recvpipe (&prefetched_msg, &pipe);
|
||||
while (rc == 0 && _prefetched_msg.is_routing_id ())
|
||||
rc = _fq.recvpipe (&_prefetched_msg, &pipe);
|
||||
|
||||
if (rc != 0)
|
||||
return false;
|
||||
@ -408,14 +408,14 @@ bool zmq::router_t::xhas_in ()
|
||||
zmq_assert (pipe != NULL);
|
||||
|
||||
const blob_t &routing_id = pipe->get_routing_id ();
|
||||
rc = prefetched_id.init_size (routing_id.size ());
|
||||
rc = _prefetched_id.init_size (routing_id.size ());
|
||||
errno_assert (rc == 0);
|
||||
memcpy (prefetched_id.data (), routing_id.data (), routing_id.size ());
|
||||
prefetched_id.set_flags (msg_t::more);
|
||||
memcpy (_prefetched_id.data (), routing_id.data (), routing_id.size ());
|
||||
_prefetched_id.set_flags (msg_t::more);
|
||||
|
||||
prefetched = true;
|
||||
routing_id_sent = false;
|
||||
current_in = pipe;
|
||||
_prefetched = true;
|
||||
_routing_id_sent = false;
|
||||
_current_in = pipe;
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -426,12 +426,12 @@ bool zmq::router_t::xhas_out ()
|
||||
// MANDATORY is set). Whether actual attempt to write succeeds depends
|
||||
// on whitch pipe the message is going to be routed to.
|
||||
|
||||
if (!mandatory)
|
||||
if (!_mandatory)
|
||||
return true;
|
||||
|
||||
bool has_out = false;
|
||||
outpipes_t::iterator it;
|
||||
for (it = outpipes.begin (); it != outpipes.end (); ++it)
|
||||
for (it = _out_pipes.begin (); it != _out_pipes.end (); ++it)
|
||||
has_out |= it->second.pipe->check_hwm ();
|
||||
|
||||
return has_out;
|
||||
@ -439,7 +439,7 @@ bool zmq::router_t::xhas_out ()
|
||||
|
||||
const zmq::blob_t &zmq::router_t::get_credential () const
|
||||
{
|
||||
return fq.get_credential ();
|
||||
return _fq.get_credential ();
|
||||
}
|
||||
|
||||
int zmq::router_t::get_peer_state (const void *routing_id_,
|
||||
@ -448,13 +448,13 @@ int zmq::router_t::get_peer_state (const void *routing_id_,
|
||||
int res = 0;
|
||||
|
||||
blob_t routing_id_blob ((unsigned char *) routing_id_, routing_id_size_);
|
||||
outpipes_t::const_iterator it = outpipes.find (routing_id_blob);
|
||||
if (it == outpipes.end ()) {
|
||||
outpipes_t::const_iterator it = _out_pipes.find (routing_id_blob);
|
||||
if (it == _out_pipes.end ()) {
|
||||
errno = EHOSTUNREACH;
|
||||
return -1;
|
||||
}
|
||||
|
||||
const outpipe_t &outpipe = it->second;
|
||||
const out_pipe_t &outpipe = it->second;
|
||||
if (outpipe.pipe->check_hwm ())
|
||||
res |= ZMQ_POLLOUT;
|
||||
|
||||
@ -473,15 +473,15 @@ bool zmq::router_t::identify_peer (pipe_t *pipe_)
|
||||
routing_id.set ((unsigned char *) connect_routing_id.c_str (),
|
||||
connect_routing_id.length ());
|
||||
connect_routing_id.clear ();
|
||||
outpipes_t::iterator it = outpipes.find (routing_id);
|
||||
if (it != outpipes.end ())
|
||||
outpipes_t::iterator it = _out_pipes.find (routing_id);
|
||||
if (it != _out_pipes.end ())
|
||||
zmq_assert (false); // Not allowed to duplicate an existing rid
|
||||
} else if (
|
||||
options
|
||||
.raw_socket) { // Always assign an integral routing id for raw-socket
|
||||
unsigned char buf[5];
|
||||
buf[0] = 0;
|
||||
put_uint32 (buf + 1, next_integral_routing_id++);
|
||||
put_uint32 (buf + 1, _next_integral_routing_id++);
|
||||
routing_id.set (buf, sizeof buf);
|
||||
} else if (!options.raw_socket) {
|
||||
// Pick up handshake cases and also case where next integral routing id is set
|
||||
@ -494,17 +494,17 @@ bool zmq::router_t::identify_peer (pipe_t *pipe_)
|
||||
// Fall back on the auto-generation
|
||||
unsigned char buf[5];
|
||||
buf[0] = 0;
|
||||
put_uint32 (buf + 1, next_integral_routing_id++);
|
||||
put_uint32 (buf + 1, _next_integral_routing_id++);
|
||||
routing_id.set (buf, sizeof buf);
|
||||
msg.close ();
|
||||
} else {
|
||||
routing_id.set (static_cast<unsigned char *> (msg.data ()),
|
||||
msg.size ());
|
||||
outpipes_t::iterator it = outpipes.find (routing_id);
|
||||
outpipes_t::iterator it = _out_pipes.find (routing_id);
|
||||
msg.close ();
|
||||
|
||||
if (it != outpipes.end ()) {
|
||||
if (!handover)
|
||||
if (it != _out_pipes.end ()) {
|
||||
if (!_handover)
|
||||
// Ignore peers with duplicate ID
|
||||
return false;
|
||||
|
||||
@ -513,14 +513,14 @@ bool zmq::router_t::identify_peer (pipe_t *pipe_)
|
||||
// existing pipe so we can terminate it asynchronously.
|
||||
unsigned char buf[5];
|
||||
buf[0] = 0;
|
||||
put_uint32 (buf + 1, next_integral_routing_id++);
|
||||
put_uint32 (buf + 1, _next_integral_routing_id++);
|
||||
blob_t new_routing_id (buf, sizeof buf);
|
||||
|
||||
it->second.pipe->set_router_socket_routing_id (new_routing_id);
|
||||
outpipe_t existing_outpipe = {it->second.pipe,
|
||||
it->second.active};
|
||||
out_pipe_t existing_outpipe = {it->second.pipe,
|
||||
it->second.active};
|
||||
|
||||
ok = outpipes
|
||||
ok = _out_pipes
|
||||
.ZMQ_MAP_INSERT_OR_EMPLACE (ZMQ_MOVE (new_routing_id),
|
||||
existing_outpipe)
|
||||
.second;
|
||||
@ -528,10 +528,10 @@ bool zmq::router_t::identify_peer (pipe_t *pipe_)
|
||||
|
||||
// Remove the existing routing id entry to allow the new
|
||||
// connection to take the routing id.
|
||||
outpipes.erase (it);
|
||||
_out_pipes.erase (it);
|
||||
|
||||
if (existing_outpipe.pipe == current_in)
|
||||
terminate_current_in = true;
|
||||
if (existing_outpipe.pipe == _current_in)
|
||||
_terminate_current_in = true;
|
||||
else
|
||||
existing_outpipe.pipe->terminate (true);
|
||||
}
|
||||
@ -540,8 +540,8 @@ bool zmq::router_t::identify_peer (pipe_t *pipe_)
|
||||
|
||||
pipe_->set_router_socket_routing_id (routing_id);
|
||||
// Add the record into output pipes lookup table
|
||||
outpipe_t outpipe = {pipe_, true};
|
||||
ok = outpipes.ZMQ_MAP_INSERT_OR_EMPLACE (ZMQ_MOVE (routing_id), outpipe)
|
||||
out_pipe_t outpipe = {pipe_, true};
|
||||
ok = _out_pipes.ZMQ_MAP_INSERT_OR_EMPLACE (ZMQ_MOVE (routing_id), outpipe)
|
||||
.second;
|
||||
zmq_assert (ok);
|
||||
|
||||
|
549
src/router.cpp~RF40cad05.TMP
Normal file
549
src/router.cpp~RF40cad05.TMP
Normal file
@ -0,0 +1,549 @@
|
||||
/*
|
||||
Copyright (c) 2007-2016 Contributors as noted in the AUTHORS file
|
||||
|
||||
This file is part of libzmq, the ZeroMQ core engine in C++.
|
||||
|
||||
libzmq is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU Lesser General Public License (LGPL) as published
|
||||
by the Free Software Foundation; either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
As a special exception, the Contributors give you permission to link
|
||||
this library with independent modules to produce an executable,
|
||||
regardless of the license terms of these independent modules, and to
|
||||
copy and distribute the resulting executable under terms of your choice,
|
||||
provided that you also meet, for each linked independent module, the
|
||||
terms and conditions of the license of that module. An independent
|
||||
module is a module which is not derived from or based on this library.
|
||||
If you modify this library, you must extend this exception to your
|
||||
version of the library.
|
||||
|
||||
libzmq is distributed in the hope that it will be useful, but WITHOUT
|
||||
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
|
||||
License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "macros.hpp"
|
||||
#include "router.hpp"
|
||||
#include "pipe.hpp"
|
||||
#include "wire.hpp"
|
||||
#include "random.hpp"
|
||||
#include "likely.hpp"
|
||||
#include "err.hpp"
|
||||
|
||||
zmq::router_t::router_t (class ctx_t *parent_, uint32_t tid_, int sid_) :
|
||||
socket_base_t (parent_, tid_, sid_),
|
||||
prefetched (false),
|
||||
routing_id_sent (false),
|
||||
current_in (NULL),
|
||||
terminate_current_in (false),
|
||||
more_in (false),
|
||||
current_out (NULL),
|
||||
more_out (false),
|
||||
next_integral_routing_id (generate_random ()),
|
||||
mandatory (false),
|
||||
// raw_socket functionality in ROUTER is deprecated
|
||||
raw_socket (false),
|
||||
probe_router (false),
|
||||
handover (false)
|
||||
{
|
||||
options.type = ZMQ_ROUTER;
|
||||
options.recv_routing_id = true;
|
||||
options.raw_socket = false;
|
||||
|
||||
prefetched_id.init ();
|
||||
prefetched_msg.init ();
|
||||
}
|
||||
|
||||
zmq::router_t::~router_t ()
|
||||
{
|
||||
zmq_assert (anonymous_pipes.empty ());
|
||||
;
|
||||
zmq_assert (outpipes.empty ());
|
||||
prefetched_id.close ();
|
||||
prefetched_msg.close ();
|
||||
}
|
||||
|
||||
void zmq::router_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_)
|
||||
{
|
||||
LIBZMQ_UNUSED (subscribe_to_all_);
|
||||
|
||||
zmq_assert (pipe_);
|
||||
|
||||
if (probe_router) {
|
||||
msg_t probe_msg;
|
||||
int rc = probe_msg.init ();
|
||||
errno_assert (rc == 0);
|
||||
|
||||
rc = pipe_->write (&probe_msg);
|
||||
// zmq_assert (rc) is not applicable here, since it is not a bug.
|
||||
pipe_->flush ();
|
||||
|
||||
rc = probe_msg.close ();
|
||||
errno_assert (rc == 0);
|
||||
}
|
||||
|
||||
bool routing_id_ok = identify_peer (pipe_);
|
||||
if (routing_id_ok)
|
||||
fq.attach (pipe_);
|
||||
else
|
||||
anonymous_pipes.insert (pipe_);
|
||||
}
|
||||
|
||||
int zmq::router_t::xsetsockopt (int option_,
|
||||
const void *optval_,
|
||||
size_t optvallen_)
|
||||
{
|
||||
bool is_int = (optvallen_ == sizeof (int));
|
||||
int value = 0;
|
||||
if (is_int)
|
||||
memcpy (&value, optval_, sizeof (int));
|
||||
|
||||
switch (option_) {
|
||||
case ZMQ_CONNECT_ROUTING_ID:
|
||||
// TODO why isn't it possible to set an empty connect_routing_id
|
||||
// (which is the default value)
|
||||
if (optval_ && optvallen_) {
|
||||
connect_routing_id.assign ((char *) optval_, optvallen_);
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
|
||||
case ZMQ_ROUTER_RAW:
|
||||
if (is_int && value >= 0) {
|
||||
raw_socket = (value != 0);
|
||||
if (raw_socket) {
|
||||
options.recv_routing_id = false;
|
||||
options.raw_socket = true;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
|
||||
case ZMQ_ROUTER_MANDATORY:
|
||||
if (is_int && value >= 0) {
|
||||
mandatory = (value != 0);
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
|
||||
case ZMQ_PROBE_ROUTER:
|
||||
if (is_int && value >= 0) {
|
||||
probe_router = (value != 0);
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
|
||||
case ZMQ_ROUTER_HANDOVER:
|
||||
if (is_int && value >= 0) {
|
||||
handover = (value != 0);
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
void zmq::router_t::xpipe_terminated (pipe_t *pipe_)
|
||||
{
|
||||
std::set<pipe_t *>::iterator it = anonymous_pipes.find (pipe_);
|
||||
if (it != anonymous_pipes.end ())
|
||||
anonymous_pipes.erase (it);
|
||||
else {
|
||||
outpipes_t::iterator iter = outpipes.find (pipe_->get_routing_id ());
|
||||
zmq_assert (iter != outpipes.end ());
|
||||
outpipes.erase (iter);
|
||||
fq.pipe_terminated (pipe_);
|
||||
pipe_->rollback ();
|
||||
if (pipe_ == current_out)
|
||||
current_out = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void zmq::router_t::xread_activated (pipe_t *pipe_)
|
||||
{
|
||||
std::set<pipe_t *>::iterator it = anonymous_pipes.find (pipe_);
|
||||
if (it == anonymous_pipes.end ())
|
||||
fq.activated (pipe_);
|
||||
else {
|
||||
bool routing_id_ok = identify_peer (pipe_);
|
||||
if (routing_id_ok) {
|
||||
anonymous_pipes.erase (it);
|
||||
fq.attach (pipe_);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void zmq::router_t::xwrite_activated (pipe_t *pipe_)
|
||||
{
|
||||
outpipes_t::iterator it;
|
||||
for (it = outpipes.begin (); it != outpipes.end (); ++it)
|
||||
if (it->second.pipe == pipe_)
|
||||
break;
|
||||
|
||||
zmq_assert (it != outpipes.end ());
|
||||
zmq_assert (!it->second.active);
|
||||
it->second.active = true;
|
||||
}
|
||||
|
||||
int zmq::router_t::xsend (msg_t *msg_)
|
||||
{
|
||||
// If this is the first part of the message it's the ID of the
|
||||
// peer to send the message to.
|
||||
if (!more_out) {
|
||||
zmq_assert (!current_out);
|
||||
|
||||
// If we have malformed message (prefix with no subsequent message)
|
||||
// then just silently ignore it.
|
||||
// TODO: The connections should be killed instead.
|
||||
if (msg_->flags () & msg_t::more) {
|
||||
more_out = true;
|
||||
|
||||
// Find the pipe associated with the routing id stored in the prefix.
|
||||
// If there's no such pipe just silently ignore the message, unless
|
||||
// router_mandatory is set.
|
||||
blob_t routing_id (static_cast<unsigned char *> (msg_->data ()),
|
||||
msg_->size (), zmq::reference_tag_t ());
|
||||
outpipes_t::iterator it = outpipes.find (routing_id);
|
||||
|
||||
if (it != outpipes.end ()) {
|
||||
current_out = it->second.pipe;
|
||||
|
||||
// Check whether pipe is closed or not
|
||||
if (!current_out->check_write ()) {
|
||||
// Check whether pipe is full or not
|
||||
bool pipe_full = !current_out->check_hwm ();
|
||||
it->second.active = false;
|
||||
current_out = NULL;
|
||||
|
||||
if (mandatory) {
|
||||
more_out = false;
|
||||
if (pipe_full)
|
||||
errno = EAGAIN;
|
||||
else
|
||||
errno = EHOSTUNREACH;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
} else if (mandatory) {
|
||||
more_out = false;
|
||||
errno = EHOSTUNREACH;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
int rc = msg_->close ();
|
||||
errno_assert (rc == 0);
|
||||
rc = msg_->init ();
|
||||
errno_assert (rc == 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Ignore the MORE flag for raw-sock or assert?
|
||||
if (options.raw_socket)
|
||||
msg_->reset_flags (msg_t::more);
|
||||
|
||||
// Check whether this is the last part of the message.
|
||||
more_out = (msg_->flags () & msg_t::more) != 0;
|
||||
|
||||
// Push the message into the pipe. If there's no out pipe, just drop it.
|
||||
if (current_out) {
|
||||
// Close the remote connection if user has asked to do so
|
||||
// by sending zero length message.
|
||||
// Pending messages in the pipe will be dropped (on receiving term- ack)
|
||||
if (raw_socket && msg_->size () == 0) {
|
||||
current_out->terminate (false);
|
||||
int rc = msg_->close ();
|
||||
errno_assert (rc == 0);
|
||||
rc = msg_->init ();
|
||||
errno_assert (rc == 0);
|
||||
current_out = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool ok = current_out->write (msg_);
|
||||
if (unlikely (!ok)) {
|
||||
// Message failed to send - we must close it ourselves.
|
||||
int rc = msg_->close ();
|
||||
errno_assert (rc == 0);
|
||||
// HWM was checked before, so the pipe must be gone. Roll back
|
||||
// messages that were piped, for example REP labels.
|
||||
current_out->rollback ();
|
||||
current_out = NULL;
|
||||
} else {
|
||||
if (!more_out) {
|
||||
current_out->flush ();
|
||||
current_out = NULL;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
int rc = msg_->close ();
|
||||
errno_assert (rc == 0);
|
||||
}
|
||||
|
||||
// Detach the message from the data buffer.
|
||||
int rc = msg_->init ();
|
||||
errno_assert (rc == 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int zmq::router_t::xrecv (msg_t *msg_)
|
||||
{
|
||||
if (prefetched) {
|
||||
if (!routing_id_sent) {
|
||||
int rc = msg_->move (prefetched_id);
|
||||
errno_assert (rc == 0);
|
||||
routing_id_sent = true;
|
||||
} else {
|
||||
int rc = msg_->move (prefetched_msg);
|
||||
errno_assert (rc == 0);
|
||||
prefetched = false;
|
||||
}
|
||||
more_in = (msg_->flags () & msg_t::more) != 0;
|
||||
|
||||
if (!more_in) {
|
||||
if (terminate_current_in) {
|
||||
current_in->terminate (true);
|
||||
terminate_current_in = false;
|
||||
}
|
||||
current_in = NULL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
pipe_t *pipe = NULL;
|
||||
int rc = fq.recvpipe (msg_, &pipe);
|
||||
|
||||
// It's possible that we receive peer's routing id. That happens
|
||||
// after reconnection. The current implementation assumes that
|
||||
// the peer always uses the same routing id.
|
||||
while (rc == 0 && msg_->is_routing_id ())
|
||||
rc = fq.recvpipe (msg_, &pipe);
|
||||
|
||||
if (rc != 0)
|
||||
return -1;
|
||||
|
||||
zmq_assert (pipe != NULL);
|
||||
|
||||
// If we are in the middle of reading a message, just return the next part.
|
||||
if (more_in) {
|
||||
more_in = (msg_->flags () & msg_t::more) != 0;
|
||||
|
||||
if (!more_in) {
|
||||
if (terminate_current_in) {
|
||||
current_in->terminate (true);
|
||||
terminate_current_in = false;
|
||||
}
|
||||
current_in = NULL;
|
||||
}
|
||||
} else {
|
||||
// We are at the beginning of a message.
|
||||
// Keep the message part we have in the prefetch buffer
|
||||
// and return the ID of the peer instead.
|
||||
rc = prefetched_msg.move (*msg_);
|
||||
errno_assert (rc == 0);
|
||||
prefetched = true;
|
||||
current_in = pipe;
|
||||
|
||||
const blob_t &routing_id = pipe->get_routing_id ();
|
||||
rc = msg_->init_size (routing_id.size ());
|
||||
errno_assert (rc == 0);
|
||||
memcpy (msg_->data (), routing_id.data (), routing_id.size ());
|
||||
msg_->set_flags (msg_t::more);
|
||||
if (prefetched_msg.metadata ())
|
||||
msg_->set_metadata (prefetched_msg.metadata ());
|
||||
routing_id_sent = true;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int zmq::router_t::rollback ()
|
||||
{
|
||||
if (current_out) {
|
||||
current_out->rollback ();
|
||||
current_out = NULL;
|
||||
more_out = false;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool zmq::router_t::xhas_in ()
|
||||
{
|
||||
// If we are in the middle of reading the messages, there are
|
||||
// definitely more parts available.
|
||||
if (more_in)
|
||||
return true;
|
||||
|
||||
// We may already have a message pre-fetched.
|
||||
if (prefetched)
|
||||
return true;
|
||||
|
||||
// Try to read the next message.
|
||||
// The message, if read, is kept in the pre-fetch buffer.
|
||||
pipe_t *pipe = NULL;
|
||||
int rc = fq.recvpipe (&prefetched_msg, &pipe);
|
||||
|
||||
// It's possible that we receive peer's routing id. That happens
|
||||
// after reconnection. The current implementation assumes that
|
||||
// the peer always uses the same routing id.
|
||||
// TODO: handle the situation when the peer changes its routing id.
|
||||
while (rc == 0 && prefetched_msg.is_routing_id ())
|
||||
rc = fq.recvpipe (&prefetched_msg, &pipe);
|
||||
|
||||
if (rc != 0)
|
||||
return false;
|
||||
|
||||
zmq_assert (pipe != NULL);
|
||||
|
||||
const blob_t &routing_id = pipe->get_routing_id ();
|
||||
rc = prefetched_id.init_size (routing_id.size ());
|
||||
errno_assert (rc == 0);
|
||||
memcpy (prefetched_id.data (), routing_id.data (), routing_id.size ());
|
||||
prefetched_id.set_flags (msg_t::more);
|
||||
|
||||
prefetched = true;
|
||||
routing_id_sent = false;
|
||||
current_in = pipe;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool zmq::router_t::xhas_out ()
|
||||
{
|
||||
// In theory, ROUTER socket is always ready for writing (except when
|
||||
// MANDATORY is set). Whether actual attempt to write succeeds depends
|
||||
// on whitch pipe the message is going to be routed to.
|
||||
|
||||
if (!mandatory)
|
||||
return true;
|
||||
|
||||
bool has_out = false;
|
||||
outpipes_t::iterator it;
|
||||
for (it = outpipes.begin (); it != outpipes.end (); ++it)
|
||||
has_out |= it->second.pipe->check_hwm ();
|
||||
|
||||
return has_out;
|
||||
}
|
||||
|
||||
const zmq::blob_t &zmq::router_t::get_credential () const
|
||||
{
|
||||
return fq.get_credential ();
|
||||
}
|
||||
|
||||
int zmq::router_t::get_peer_state (const void *routing_id_,
|
||||
size_t routing_id_size_) const
|
||||
{
|
||||
int res = 0;
|
||||
|
||||
blob_t routing_id_blob ((unsigned char *) routing_id_, routing_id_size_);
|
||||
outpipes_t::const_iterator it = outpipes.find (routing_id_blob);
|
||||
if (it == outpipes.end ()) {
|
||||
errno = EHOSTUNREACH;
|
||||
return -1;
|
||||
}
|
||||
|
||||
const outpipe_t &outpipe = it->second;
|
||||
if (outpipe.pipe->check_hwm ())
|
||||
res |= ZMQ_POLLOUT;
|
||||
|
||||
/** \todo does it make any sense to check the inpipe as well? */
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
bool zmq::router_t::identify_peer (pipe_t *pipe_)
|
||||
{
|
||||
msg_t msg;
|
||||
bool ok;
|
||||
blob_t routing_id;
|
||||
|
||||
if (connect_routing_id.length ()) {
|
||||
routing_id.set ((unsigned char *) connect_routing_id.c_str (),
|
||||
connect_routing_id.length ());
|
||||
connect_routing_id.clear ();
|
||||
outpipes_t::iterator it = outpipes.find (routing_id);
|
||||
if (it != outpipes.end ())
|
||||
zmq_assert (false); // Not allowed to duplicate an existing rid
|
||||
} else if (
|
||||
options
|
||||
.raw_socket) { // Always assign an integral routing id for raw-socket
|
||||
unsigned char buf[5];
|
||||
buf[0] = 0;
|
||||
put_uint32 (buf + 1, next_integral_routing_id++);
|
||||
routing_id.set (buf, sizeof buf);
|
||||
} else if (!options.raw_socket) {
|
||||
// Pick up handshake cases and also case where next integral routing id is set
|
||||
msg.init ();
|
||||
ok = pipe_->read (&msg);
|
||||
if (!ok)
|
||||
return false;
|
||||
|
||||
if (msg.size () == 0) {
|
||||
// Fall back on the auto-generation
|
||||
unsigned char buf[5];
|
||||
buf[0] = 0;
|
||||
put_uint32 (buf + 1, next_integral_routing_id++);
|
||||
routing_id.set (buf, sizeof buf);
|
||||
msg.close ();
|
||||
} else {
|
||||
routing_id.set (static_cast<unsigned char *> (msg.data ()),
|
||||
msg.size ());
|
||||
outpipes_t::iterator it = outpipes.find (routing_id);
|
||||
msg.close ();
|
||||
|
||||
if (it != outpipes.end ()) {
|
||||
if (!handover)
|
||||
// Ignore peers with duplicate ID
|
||||
return false;
|
||||
|
||||
// We will allow the new connection to take over this
|
||||
// routing id. Temporarily assign a new routing id to the
|
||||
// existing pipe so we can terminate it asynchronously.
|
||||
unsigned char buf[5];
|
||||
buf[0] = 0;
|
||||
put_uint32 (buf + 1, next_integral_routing_id++);
|
||||
blob_t new_routing_id (buf, sizeof buf);
|
||||
|
||||
it->second.pipe->set_router_socket_routing_id (new_routing_id);
|
||||
outpipe_t existing_outpipe = {it->second.pipe,
|
||||
it->second.active};
|
||||
|
||||
ok = outpipes
|
||||
.ZMQ_MAP_INSERT_OR_EMPLACE (ZMQ_MOVE (new_routing_id),
|
||||
existing_outpipe)
|
||||
.second;
|
||||
zmq_assert (ok);
|
||||
|
||||
// Remove the existing routing id entry to allow the new
|
||||
// connection to take the routing id.
|
||||
outpipes.erase (it);
|
||||
|
||||
if (existing_outpipe.pipe == current_in)
|
||||
terminate_current_in = true;
|
||||
else
|
||||
existing_outpipe.pipe->terminate (true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pipe_->set_router_socket_routing_id (routing_id);
|
||||
// Add the record into output pipes lookup table
|
||||
outpipe_t outpipe = {pipe_, true};
|
||||
ok = outpipes.ZMQ_MAP_INSERT_OR_EMPLACE (ZMQ_MOVE (routing_id), outpipe)
|
||||
.second;
|
||||
zmq_assert (ok);
|
||||
|
||||
return true;
|
||||
}
|
@ -73,65 +73,65 @@ class router_t : public socket_base_t
|
||||
bool identify_peer (pipe_t *pipe_);
|
||||
|
||||
// Fair queueing object for inbound pipes.
|
||||
fq_t fq;
|
||||
fq_t _fq;
|
||||
|
||||
// True iff there is a message held in the pre-fetch buffer.
|
||||
bool prefetched;
|
||||
bool _prefetched;
|
||||
|
||||
// If true, the receiver got the message part with
|
||||
// the peer's identity.
|
||||
bool routing_id_sent;
|
||||
bool _routing_id_sent;
|
||||
|
||||
// Holds the prefetched identity.
|
||||
msg_t prefetched_id;
|
||||
msg_t _prefetched_id;
|
||||
|
||||
// Holds the prefetched message.
|
||||
msg_t prefetched_msg;
|
||||
msg_t _prefetched_msg;
|
||||
|
||||
// The pipe we are currently reading from
|
||||
zmq::pipe_t *current_in;
|
||||
zmq::pipe_t *_current_in;
|
||||
|
||||
// Should current_in should be terminate after all parts received?
|
||||
bool terminate_current_in;
|
||||
bool _terminate_current_in;
|
||||
|
||||
// If true, more incoming message parts are expected.
|
||||
bool more_in;
|
||||
bool _more_in;
|
||||
|
||||
struct outpipe_t
|
||||
struct out_pipe_t
|
||||
{
|
||||
zmq::pipe_t *pipe;
|
||||
bool active;
|
||||
};
|
||||
|
||||
// We keep a set of pipes that have not been identified yet.
|
||||
std::set<pipe_t *> anonymous_pipes;
|
||||
std::set<pipe_t *> _anonymous_pipes;
|
||||
|
||||
// Outbound pipes indexed by the peer IDs.
|
||||
typedef std::map<blob_t, outpipe_t> outpipes_t;
|
||||
outpipes_t outpipes;
|
||||
typedef std::map<blob_t, out_pipe_t> outpipes_t;
|
||||
outpipes_t _out_pipes;
|
||||
|
||||
// The pipe we are currently writing to.
|
||||
zmq::pipe_t *current_out;
|
||||
zmq::pipe_t *_current_out;
|
||||
|
||||
// If true, more outgoing message parts are expected.
|
||||
bool more_out;
|
||||
bool _more_out;
|
||||
|
||||
// Routing IDs are generated. It's a simple increment and wrap-over
|
||||
// algorithm. This value is the next ID to use (if not used already).
|
||||
uint32_t next_integral_routing_id;
|
||||
uint32_t _next_integral_routing_id;
|
||||
|
||||
// If true, report EAGAIN to the caller instead of silently dropping
|
||||
// the message targeting an unknown peer.
|
||||
bool mandatory;
|
||||
bool raw_socket;
|
||||
bool _mandatory;
|
||||
bool _raw_socket;
|
||||
|
||||
// if true, send an empty message to every connected router peer
|
||||
bool probe_router;
|
||||
bool _probe_router;
|
||||
|
||||
// If true, the router will reassign an identity upon encountering a
|
||||
// name collision. The new pipe will take the identity, the old pipe
|
||||
// will be terminated.
|
||||
bool handover;
|
||||
bool _handover;
|
||||
|
||||
router_t (const router_t &);
|
||||
const router_t &operator= (const router_t &);
|
||||
|
@ -53,17 +53,17 @@ void zmq::scatter_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_)
|
||||
pipe_->set_nodelay ();
|
||||
|
||||
zmq_assert (pipe_);
|
||||
lb.attach (pipe_);
|
||||
_lb.attach (pipe_);
|
||||
}
|
||||
|
||||
void zmq::scatter_t::xwrite_activated (pipe_t *pipe_)
|
||||
{
|
||||
lb.activated (pipe_);
|
||||
_lb.activated (pipe_);
|
||||
}
|
||||
|
||||
void zmq::scatter_t::xpipe_terminated (pipe_t *pipe_)
|
||||
{
|
||||
lb.pipe_terminated (pipe_);
|
||||
_lb.pipe_terminated (pipe_);
|
||||
}
|
||||
|
||||
int zmq::scatter_t::xsend (msg_t *msg_)
|
||||
@ -74,10 +74,10 @@ int zmq::scatter_t::xsend (msg_t *msg_)
|
||||
return -1;
|
||||
}
|
||||
|
||||
return lb.send (msg_);
|
||||
return _lb.send (msg_);
|
||||
}
|
||||
|
||||
bool zmq::scatter_t::xhas_out ()
|
||||
{
|
||||
return lb.has_out ();
|
||||
return _lb.has_out ();
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ class scatter_t : public socket_base_t
|
||||
|
||||
private:
|
||||
// Load balancer managing the outbound pipes.
|
||||
lb_t lb;
|
||||
lb_t _lb;
|
||||
|
||||
scatter_t (const scatter_t &);
|
||||
const scatter_t &operator= (const scatter_t &);
|
||||
|
@ -59,14 +59,14 @@ zmq::select_t::select_t (const zmq::thread_ctx_t &ctx_) :
|
||||
worker_poller_base_t (ctx_),
|
||||
#if defined ZMQ_HAVE_WINDOWS
|
||||
// Fine as long as map is not cleared.
|
||||
current_family_entry_it (family_entries.end ())
|
||||
_current_family_entry_it (_family_entries.end ())
|
||||
#else
|
||||
maxfd (retired_fd)
|
||||
_max_fd (retired_fd)
|
||||
#endif
|
||||
{
|
||||
#if defined ZMQ_HAVE_WINDOWS
|
||||
for (size_t i = 0; i < fd_family_cache_size; ++i)
|
||||
fd_family_cache[i] = std::make_pair (retired_fd, 0);
|
||||
_fd_family_cache[i] = std::make_pair (retired_fd, 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -87,14 +87,16 @@ zmq::select_t::handle_t zmq::select_t::add_fd (fd_t fd_, i_poll_events *events_)
|
||||
#if defined ZMQ_HAVE_WINDOWS
|
||||
u_short family = get_fd_family (fd_);
|
||||
wsa_assert (family != AF_UNSPEC);
|
||||
family_entry_t &family_entry = family_entries[family];
|
||||
family_entry_t &family_entry = _family_entries[family];
|
||||
#else
|
||||
family_entry_t &family_entry = _family_entry;
|
||||
#endif
|
||||
family_entry.fd_entries.push_back (fd_entry);
|
||||
FD_SET (fd_, &family_entry.fds_set.error);
|
||||
|
||||
#if !defined ZMQ_HAVE_WINDOWS
|
||||
if (fd_ > maxfd)
|
||||
maxfd = fd_;
|
||||
if (fd_ > _max_fd)
|
||||
_max_fd = fd_;
|
||||
#endif
|
||||
|
||||
adjust_load (1);
|
||||
@ -171,7 +173,7 @@ int zmq::select_t::try_retire_fd_entry (
|
||||
fd_entry_t &fd_entry = *fd_entry_it;
|
||||
zmq_assert (fd_entry.fd != retired_fd);
|
||||
|
||||
if (family_entry_it_ != current_family_entry_it) {
|
||||
if (family_entry_it_ != _current_family_entry_it) {
|
||||
// Family is not currently being iterated and can be safely
|
||||
// modified in-place. So later it can be skipped without
|
||||
// re-verifying its content.
|
||||
@ -195,16 +197,16 @@ void zmq::select_t::rm_fd (handle_t handle_)
|
||||
u_short family = get_fd_family (handle_);
|
||||
if (family != AF_UNSPEC) {
|
||||
family_entries_t::iterator family_entry_it =
|
||||
family_entries.find (family);
|
||||
_family_entries.find (family);
|
||||
|
||||
retired += try_retire_fd_entry (family_entry_it, handle_);
|
||||
} else {
|
||||
// get_fd_family may fail and return AF_UNSPEC if the socket was not
|
||||
// successfully connected. In that case, we need to look for the
|
||||
// socket in all family_entries.
|
||||
family_entries_t::iterator end = family_entries.end ();
|
||||
family_entries_t::iterator end = _family_entries.end ();
|
||||
for (family_entries_t::iterator family_entry_it =
|
||||
family_entries.begin ();
|
||||
_family_entries.begin ();
|
||||
family_entry_it != end; ++family_entry_it) {
|
||||
if (retired += try_retire_fd_entry (family_entry_it, handle_)) {
|
||||
break;
|
||||
@ -213,24 +215,24 @@ void zmq::select_t::rm_fd (handle_t handle_)
|
||||
}
|
||||
#else
|
||||
fd_entries_t::iterator fd_entry_it =
|
||||
find_fd_entry_by_handle (family_entry.fd_entries, handle_);
|
||||
assert (fd_entry_it != family_entry.fd_entries.end ());
|
||||
find_fd_entry_by_handle (_family_entry.fd_entries, handle_);
|
||||
assert (fd_entry_it != _family_entry.fd_entries.end ());
|
||||
|
||||
zmq_assert (fd_entry_it->fd != retired_fd);
|
||||
fd_entry_it->fd = retired_fd;
|
||||
family_entry.fds_set.remove_fd (handle_);
|
||||
_family_entry.fds_set.remove_fd (handle_);
|
||||
|
||||
++retired;
|
||||
|
||||
if (handle_ == maxfd) {
|
||||
maxfd = retired_fd;
|
||||
for (fd_entry_it = family_entry.fd_entries.begin ();
|
||||
fd_entry_it != family_entry.fd_entries.end (); ++fd_entry_it)
|
||||
if (fd_entry_it->fd > maxfd)
|
||||
maxfd = fd_entry_it->fd;
|
||||
if (handle_ == _max_fd) {
|
||||
_max_fd = retired_fd;
|
||||
for (fd_entry_it = _family_entry.fd_entries.begin ();
|
||||
fd_entry_it != _family_entry.fd_entries.end (); ++fd_entry_it)
|
||||
if (fd_entry_it->fd > _max_fd)
|
||||
_max_fd = fd_entry_it->fd;
|
||||
}
|
||||
|
||||
family_entry.has_retired = true;
|
||||
_family_entry.has_retired = true;
|
||||
#endif
|
||||
zmq_assert (retired == 1);
|
||||
adjust_load (-1);
|
||||
@ -242,7 +244,9 @@ void zmq::select_t::set_pollin (handle_t handle_)
|
||||
#if defined ZMQ_HAVE_WINDOWS
|
||||
u_short family = get_fd_family (handle_);
|
||||
wsa_assert (family != AF_UNSPEC);
|
||||
family_entry_t &family_entry = family_entries[family];
|
||||
family_entry_t &family_entry = _family_entries[family];
|
||||
#else
|
||||
family_entry_t &family_entry = _family_entry;
|
||||
#endif
|
||||
FD_SET (handle_, &family_entry.fds_set.read);
|
||||
}
|
||||
@ -253,7 +257,9 @@ void zmq::select_t::reset_pollin (handle_t handle_)
|
||||
#if defined ZMQ_HAVE_WINDOWS
|
||||
u_short family = get_fd_family (handle_);
|
||||
wsa_assert (family != AF_UNSPEC);
|
||||
family_entry_t &family_entry = family_entries[family];
|
||||
family_entry_t &family_entry = _family_entries[family];
|
||||
#else
|
||||
family_entry_t &family_entry = _family_entry;
|
||||
#endif
|
||||
FD_CLR (handle_, &family_entry.fds_set.read);
|
||||
}
|
||||
@ -264,7 +270,9 @@ void zmq::select_t::set_pollout (handle_t handle_)
|
||||
#if defined ZMQ_HAVE_WINDOWS
|
||||
u_short family = get_fd_family (handle_);
|
||||
wsa_assert (family != AF_UNSPEC);
|
||||
family_entry_t &family_entry = family_entries[family];
|
||||
family_entry_t &family_entry = _family_entries[family];
|
||||
#else
|
||||
family_entry_t &family_entry = _family_entry;
|
||||
#endif
|
||||
FD_SET (handle_, &family_entry.fds_set.write);
|
||||
}
|
||||
@ -275,7 +283,9 @@ void zmq::select_t::reset_pollout (handle_t handle_)
|
||||
#if defined ZMQ_HAVE_WINDOWS
|
||||
u_short family = get_fd_family (handle_);
|
||||
wsa_assert (family != AF_UNSPEC);
|
||||
family_entry_t &family_entry = family_entries[family];
|
||||
family_entry_t &family_entry = _family_entries[family];
|
||||
#else
|
||||
family_entry_t &family_entry = _family_entry;
|
||||
#endif
|
||||
FD_CLR (handle_, &family_entry.fds_set.write);
|
||||
}
|
||||
@ -300,9 +310,9 @@ void zmq::select_t::loop ()
|
||||
cleanup_retired ();
|
||||
|
||||
#ifdef _WIN32
|
||||
if (family_entries.empty ()) {
|
||||
if (_family_entries.empty ()) {
|
||||
#else
|
||||
if (family_entry.fd_entries.empty ()) {
|
||||
if (_family_entry.fd_entries.empty ()) {
|
||||
#endif
|
||||
zmq_assert (get_load () == 0);
|
||||
|
||||
@ -338,7 +348,7 @@ void zmq::select_t::loop ()
|
||||
|
||||
// If there is just one family, there is no reason to use WSA events.
|
||||
int rc = 0;
|
||||
const bool use_wsa_events = family_entries.size () > 1;
|
||||
const bool use_wsa_events = _family_entries.size () > 1;
|
||||
if (use_wsa_events) {
|
||||
// TODO: I don't really understand why we are doing this. If any of
|
||||
// the events was signaled, we will call select for each fd_family
|
||||
@ -350,8 +360,8 @@ void zmq::select_t::loop ()
|
||||
wsa_events_t wsa_events;
|
||||
|
||||
for (family_entries_t::iterator family_entry_it =
|
||||
family_entries.begin ();
|
||||
family_entry_it != family_entries.end (); ++family_entry_it) {
|
||||
_family_entries.begin ();
|
||||
family_entry_it != _family_entries.end (); ++family_entry_it) {
|
||||
family_entry_t &family_entry = family_entry_it->second;
|
||||
|
||||
for (fd_entries_t::iterator fd_entry_it =
|
||||
@ -392,10 +402,10 @@ void zmq::select_t::loop ()
|
||||
continue;
|
||||
}
|
||||
|
||||
for (current_family_entry_it = family_entries.begin ();
|
||||
current_family_entry_it != family_entries.end ();
|
||||
++current_family_entry_it) {
|
||||
family_entry_t &family_entry = current_family_entry_it->second;
|
||||
for (_current_family_entry_it = _family_entries.begin ();
|
||||
_current_family_entry_it != _family_entries.end ();
|
||||
++_current_family_entry_it) {
|
||||
family_entry_t &family_entry = _current_family_entry_it->second;
|
||||
|
||||
|
||||
if (use_wsa_events) {
|
||||
@ -408,7 +418,7 @@ void zmq::select_t::loop ()
|
||||
}
|
||||
}
|
||||
#else
|
||||
select_family_entry (family_entry, maxfd + 1, timeout > 0, tv);
|
||||
select_family_entry (_family_entry, _max_fd + 1, timeout > 0, tv);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
@ -518,15 +528,15 @@ bool zmq::select_t::cleanup_retired (family_entry_t &family_entry_)
|
||||
void zmq::select_t::cleanup_retired ()
|
||||
{
|
||||
#ifdef _WIN32
|
||||
for (family_entries_t::iterator it = family_entries.begin ();
|
||||
it != family_entries.end ();) {
|
||||
for (family_entries_t::iterator it = _family_entries.begin ();
|
||||
it != _family_entries.end ();) {
|
||||
if (cleanup_retired (it->second))
|
||||
it = family_entries.erase (it);
|
||||
it = _family_entries.erase (it);
|
||||
else
|
||||
++it;
|
||||
}
|
||||
#else
|
||||
cleanup_retired (family_entry);
|
||||
cleanup_retired (_family_entry);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -547,7 +557,7 @@ u_short zmq::select_t::get_fd_family (fd_t fd_)
|
||||
// for the same sockets, and determine_fd_family is expensive
|
||||
size_t i;
|
||||
for (i = 0; i < fd_family_cache_size; ++i) {
|
||||
const std::pair<fd_t, u_short> &entry = fd_family_cache[i];
|
||||
const std::pair<fd_t, u_short> &entry = _fd_family_cache[i];
|
||||
if (entry.first == fd_) {
|
||||
return entry.second;
|
||||
}
|
||||
@ -558,11 +568,11 @@ u_short zmq::select_t::get_fd_family (fd_t fd_)
|
||||
std::pair<fd_t, u_short> res =
|
||||
std::make_pair (fd_, determine_fd_family (fd_));
|
||||
if (i < fd_family_cache_size) {
|
||||
fd_family_cache[i] = res;
|
||||
_fd_family_cache[i] = res;
|
||||
} else {
|
||||
// just overwrite a random entry
|
||||
// could be optimized by some LRU strategy
|
||||
fd_family_cache[rand () % fd_family_cache_size] = res;
|
||||
_fd_family_cache[rand () % fd_family_cache_size] = res;
|
||||
}
|
||||
|
||||
return res.second;
|
||||
|
@ -131,15 +131,15 @@ class select_t : public worker_poller_base_t
|
||||
WSAEVENT events[4];
|
||||
};
|
||||
|
||||
family_entries_t family_entries;
|
||||
family_entries_t _family_entries;
|
||||
// See loop for details.
|
||||
family_entries_t::iterator current_family_entry_it;
|
||||
family_entries_t::iterator _current_family_entry_it;
|
||||
|
||||
int try_retire_fd_entry (family_entries_t::iterator family_entry_it_,
|
||||
zmq::fd_t &handle_);
|
||||
|
||||
static const size_t fd_family_cache_size = 8;
|
||||
std::pair<fd_t, u_short> fd_family_cache[fd_family_cache_size];
|
||||
std::pair<fd_t, u_short> _fd_family_cache[fd_family_cache_size];
|
||||
|
||||
u_short get_fd_family (fd_t fd_);
|
||||
|
||||
@ -147,8 +147,8 @@ class select_t : public worker_poller_base_t
|
||||
static u_short determine_fd_family (fd_t fd_);
|
||||
#else
|
||||
// on non-Windows, we can treat all fds as one family
|
||||
family_entry_t family_entry;
|
||||
fd_t maxfd;
|
||||
family_entry_t _family_entry;
|
||||
fd_t _max_fd;
|
||||
#endif
|
||||
|
||||
void cleanup_retired ();
|
||||
|
@ -38,14 +38,14 @@
|
||||
|
||||
zmq::server_t::server_t (class ctx_t *parent_, uint32_t tid_, int sid_) :
|
||||
socket_base_t (parent_, tid_, sid_, true),
|
||||
next_routing_id (generate_random ())
|
||||
_next_routing_id (generate_random ())
|
||||
{
|
||||
options.type = ZMQ_SERVER;
|
||||
}
|
||||
|
||||
zmq::server_t::~server_t ()
|
||||
{
|
||||
zmq_assert (outpipes.empty ());
|
||||
zmq_assert (_out_pipes.empty ());
|
||||
}
|
||||
|
||||
void zmq::server_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_)
|
||||
@ -54,41 +54,41 @@ void zmq::server_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_)
|
||||
|
||||
zmq_assert (pipe_);
|
||||
|
||||
uint32_t routing_id = next_routing_id++;
|
||||
uint32_t routing_id = _next_routing_id++;
|
||||
if (!routing_id)
|
||||
routing_id = next_routing_id++; // Never use Routing ID zero
|
||||
routing_id = _next_routing_id++; // Never use Routing ID zero
|
||||
|
||||
pipe_->set_server_socket_routing_id (routing_id);
|
||||
// Add the record into output pipes lookup table
|
||||
outpipe_t outpipe = {pipe_, true};
|
||||
bool ok = outpipes.ZMQ_MAP_INSERT_OR_EMPLACE (routing_id, outpipe).second;
|
||||
bool ok = _out_pipes.ZMQ_MAP_INSERT_OR_EMPLACE (routing_id, outpipe).second;
|
||||
zmq_assert (ok);
|
||||
|
||||
fq.attach (pipe_);
|
||||
_fq.attach (pipe_);
|
||||
}
|
||||
|
||||
void zmq::server_t::xpipe_terminated (pipe_t *pipe_)
|
||||
{
|
||||
outpipes_t::iterator it =
|
||||
outpipes.find (pipe_->get_server_socket_routing_id ());
|
||||
zmq_assert (it != outpipes.end ());
|
||||
outpipes.erase (it);
|
||||
fq.pipe_terminated (pipe_);
|
||||
out_pipes_t::iterator it =
|
||||
_out_pipes.find (pipe_->get_server_socket_routing_id ());
|
||||
zmq_assert (it != _out_pipes.end ());
|
||||
_out_pipes.erase (it);
|
||||
_fq.pipe_terminated (pipe_);
|
||||
}
|
||||
|
||||
void zmq::server_t::xread_activated (pipe_t *pipe_)
|
||||
{
|
||||
fq.activated (pipe_);
|
||||
_fq.activated (pipe_);
|
||||
}
|
||||
|
||||
void zmq::server_t::xwrite_activated (pipe_t *pipe_)
|
||||
{
|
||||
outpipes_t::iterator it;
|
||||
for (it = outpipes.begin (); it != outpipes.end (); ++it)
|
||||
out_pipes_t::iterator it;
|
||||
for (it = _out_pipes.begin (); it != _out_pipes.end (); ++it)
|
||||
if (it->second.pipe == pipe_)
|
||||
break;
|
||||
|
||||
zmq_assert (it != outpipes.end ());
|
||||
zmq_assert (it != _out_pipes.end ());
|
||||
zmq_assert (!it->second.active);
|
||||
it->second.active = true;
|
||||
}
|
||||
@ -102,9 +102,9 @@ int zmq::server_t::xsend (msg_t *msg_)
|
||||
}
|
||||
// Find the pipe associated with the routing stored in the message.
|
||||
uint32_t routing_id = msg_->get_routing_id ();
|
||||
outpipes_t::iterator it = outpipes.find (routing_id);
|
||||
out_pipes_t::iterator it = _out_pipes.find (routing_id);
|
||||
|
||||
if (it != outpipes.end ()) {
|
||||
if (it != _out_pipes.end ()) {
|
||||
if (!it->second.pipe->check_write ()) {
|
||||
it->second.active = false;
|
||||
errno = EAGAIN;
|
||||
@ -137,19 +137,19 @@ int zmq::server_t::xsend (msg_t *msg_)
|
||||
int zmq::server_t::xrecv (msg_t *msg_)
|
||||
{
|
||||
pipe_t *pipe = NULL;
|
||||
int rc = fq.recvpipe (msg_, &pipe);
|
||||
int rc = _fq.recvpipe (msg_, &pipe);
|
||||
|
||||
// Drop any messages with more flag
|
||||
while (rc == 0 && msg_->flags () & msg_t::more) {
|
||||
// drop all frames of the current multi-frame message
|
||||
rc = fq.recvpipe (msg_, NULL);
|
||||
rc = _fq.recvpipe (msg_, NULL);
|
||||
|
||||
while (rc == 0 && msg_->flags () & msg_t::more)
|
||||
rc = fq.recvpipe (msg_, NULL);
|
||||
rc = _fq.recvpipe (msg_, NULL);
|
||||
|
||||
// get the new message
|
||||
if (rc == 0)
|
||||
rc = fq.recvpipe (msg_, &pipe);
|
||||
rc = _fq.recvpipe (msg_, &pipe);
|
||||
}
|
||||
|
||||
if (rc != 0)
|
||||
@ -165,7 +165,7 @@ int zmq::server_t::xrecv (msg_t *msg_)
|
||||
|
||||
bool zmq::server_t::xhas_in ()
|
||||
{
|
||||
return fq.has_in ();
|
||||
return _fq.has_in ();
|
||||
}
|
||||
|
||||
bool zmq::server_t::xhas_out ()
|
||||
@ -178,5 +178,5 @@ bool zmq::server_t::xhas_out ()
|
||||
|
||||
const zmq::blob_t &zmq::server_t::get_credential () const
|
||||
{
|
||||
return fq.get_credential ();
|
||||
return _fq.get_credential ();
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ class server_t : public socket_base_t
|
||||
|
||||
private:
|
||||
// Fair queueing object for inbound pipes.
|
||||
fq_t fq;
|
||||
fq_t _fq;
|
||||
|
||||
struct outpipe_t
|
||||
{
|
||||
@ -75,12 +75,12 @@ class server_t : public socket_base_t
|
||||
};
|
||||
|
||||
// Outbound pipes indexed by the peer IDs.
|
||||
typedef std::map<uint32_t, outpipe_t> outpipes_t;
|
||||
outpipes_t outpipes;
|
||||
typedef std::map<uint32_t, outpipe_t> out_pipes_t;
|
||||
out_pipes_t _out_pipes;
|
||||
|
||||
// Routing IDs are generated. It's a simple increment and wrap-over
|
||||
// algorithm. This value is the next ID to use (if not used already).
|
||||
uint32_t next_routing_id;
|
||||
uint32_t _next_routing_id;
|
||||
|
||||
server_t (const server_t &);
|
||||
const server_t &operator= (const server_t &);
|
||||
|
@ -104,59 +104,59 @@ zmq::session_base_t::session_base_t (class io_thread_t *io_thread_,
|
||||
address_t *addr_) :
|
||||
own_t (io_thread_, options_),
|
||||
io_object_t (io_thread_),
|
||||
active (active_),
|
||||
pipe (NULL),
|
||||
zap_pipe (NULL),
|
||||
incomplete_in (false),
|
||||
pending (false),
|
||||
engine (NULL),
|
||||
socket (socket_),
|
||||
io_thread (io_thread_),
|
||||
has_linger_timer (false),
|
||||
addr (addr_)
|
||||
_active (active_),
|
||||
_pipe (NULL),
|
||||
_zap_pipe (NULL),
|
||||
_incomplete_in (false),
|
||||
_pending (false),
|
||||
_engine (NULL),
|
||||
_socket (socket_),
|
||||
_io_thread (io_thread_),
|
||||
_has_linger_timer (false),
|
||||
_addr (addr_)
|
||||
{
|
||||
}
|
||||
|
||||
const char *zmq::session_base_t::get_endpoint () const
|
||||
{
|
||||
return engine->get_endpoint ();
|
||||
return _engine->get_endpoint ();
|
||||
}
|
||||
|
||||
zmq::session_base_t::~session_base_t ()
|
||||
{
|
||||
zmq_assert (!pipe);
|
||||
zmq_assert (!zap_pipe);
|
||||
zmq_assert (!_pipe);
|
||||
zmq_assert (!_zap_pipe);
|
||||
|
||||
// If there's still a pending linger timer, remove it.
|
||||
if (has_linger_timer) {
|
||||
if (_has_linger_timer) {
|
||||
cancel_timer (linger_timer_id);
|
||||
has_linger_timer = false;
|
||||
_has_linger_timer = false;
|
||||
}
|
||||
|
||||
// Close the engine.
|
||||
if (engine)
|
||||
engine->terminate ();
|
||||
if (_engine)
|
||||
_engine->terminate ();
|
||||
|
||||
LIBZMQ_DELETE (addr);
|
||||
LIBZMQ_DELETE (_addr);
|
||||
}
|
||||
|
||||
void zmq::session_base_t::attach_pipe (pipe_t *pipe_)
|
||||
{
|
||||
zmq_assert (!is_terminating ());
|
||||
zmq_assert (!pipe);
|
||||
zmq_assert (!_pipe);
|
||||
zmq_assert (pipe_);
|
||||
pipe = pipe_;
|
||||
pipe->set_event_sink (this);
|
||||
_pipe = pipe_;
|
||||
_pipe->set_event_sink (this);
|
||||
}
|
||||
|
||||
int zmq::session_base_t::pull_msg (msg_t *msg_)
|
||||
{
|
||||
if (!pipe || !pipe->read (msg_)) {
|
||||
if (!_pipe || !_pipe->read (msg_)) {
|
||||
errno = EAGAIN;
|
||||
return -1;
|
||||
}
|
||||
|
||||
incomplete_in = (msg_->flags () & msg_t::more) != 0;
|
||||
_incomplete_in = (msg_->flags () & msg_t::more) != 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -165,7 +165,7 @@ int zmq::session_base_t::push_msg (msg_t *msg_)
|
||||
{
|
||||
if (msg_->flags () & msg_t::command)
|
||||
return 0;
|
||||
if (pipe && pipe->write (msg_)) {
|
||||
if (_pipe && _pipe->write (msg_)) {
|
||||
int rc = msg_->init ();
|
||||
errno_assert (rc == 0);
|
||||
return 0;
|
||||
@ -177,12 +177,12 @@ int zmq::session_base_t::push_msg (msg_t *msg_)
|
||||
|
||||
int zmq::session_base_t::read_zap_msg (msg_t *msg_)
|
||||
{
|
||||
if (zap_pipe == NULL) {
|
||||
if (_zap_pipe == NULL) {
|
||||
errno = ENOTCONN;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!zap_pipe->read (msg_)) {
|
||||
if (!_zap_pipe->read (msg_)) {
|
||||
errno = EAGAIN;
|
||||
return -1;
|
||||
}
|
||||
@ -192,13 +192,13 @@ int zmq::session_base_t::read_zap_msg (msg_t *msg_)
|
||||
|
||||
int zmq::session_base_t::write_zap_msg (msg_t *msg_)
|
||||
{
|
||||
if (zap_pipe == NULL || !zap_pipe->write (msg_)) {
|
||||
if (_zap_pipe == NULL || !_zap_pipe->write (msg_)) {
|
||||
errno = ENOTCONN;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ((msg_->flags () & msg_t::more) == 0)
|
||||
zap_pipe->flush ();
|
||||
_zap_pipe->flush ();
|
||||
|
||||
const int rc = msg_->init ();
|
||||
errno_assert (rc == 0);
|
||||
@ -211,21 +211,21 @@ void zmq::session_base_t::reset ()
|
||||
|
||||
void zmq::session_base_t::flush ()
|
||||
{
|
||||
if (pipe)
|
||||
pipe->flush ();
|
||||
if (_pipe)
|
||||
_pipe->flush ();
|
||||
}
|
||||
|
||||
void zmq::session_base_t::clean_pipes ()
|
||||
{
|
||||
zmq_assert (pipe != NULL);
|
||||
zmq_assert (_pipe != NULL);
|
||||
|
||||
// Get rid of half-processed messages in the out pipe. Flush any
|
||||
// unflushed messages upstream.
|
||||
pipe->rollback ();
|
||||
pipe->flush ();
|
||||
_pipe->rollback ();
|
||||
_pipe->flush ();
|
||||
|
||||
// Remove any half-read message from the in pipe.
|
||||
while (incomplete_in) {
|
||||
while (_incomplete_in) {
|
||||
msg_t msg;
|
||||
int rc = msg.init ();
|
||||
errno_assert (rc == 0);
|
||||
@ -239,26 +239,26 @@ void zmq::session_base_t::clean_pipes ()
|
||||
void zmq::session_base_t::pipe_terminated (pipe_t *pipe_)
|
||||
{
|
||||
// Drop the reference to the deallocated pipe if required.
|
||||
zmq_assert (pipe_ == pipe || pipe_ == zap_pipe
|
||||
|| terminating_pipes.count (pipe_) == 1);
|
||||
zmq_assert (pipe_ == _pipe || pipe_ == _zap_pipe
|
||||
|| _terminating_pipes.count (pipe_) == 1);
|
||||
|
||||
if (pipe_ == pipe) {
|
||||
if (pipe_ == _pipe) {
|
||||
// If this is our current pipe, remove it
|
||||
pipe = NULL;
|
||||
if (has_linger_timer) {
|
||||
_pipe = NULL;
|
||||
if (_has_linger_timer) {
|
||||
cancel_timer (linger_timer_id);
|
||||
has_linger_timer = false;
|
||||
_has_linger_timer = false;
|
||||
}
|
||||
} else if (pipe_ == zap_pipe)
|
||||
zap_pipe = NULL;
|
||||
} else if (pipe_ == _zap_pipe)
|
||||
_zap_pipe = NULL;
|
||||
else
|
||||
// Remove the pipe from the detached pipes set
|
||||
terminating_pipes.erase (pipe_);
|
||||
_terminating_pipes.erase (pipe_);
|
||||
|
||||
if (!is_terminating () && options.raw_socket) {
|
||||
if (engine) {
|
||||
engine->terminate ();
|
||||
engine = NULL;
|
||||
if (_engine) {
|
||||
_engine->terminate ();
|
||||
_engine = NULL;
|
||||
}
|
||||
terminate ();
|
||||
}
|
||||
@ -266,8 +266,8 @@ void zmq::session_base_t::pipe_terminated (pipe_t *pipe_)
|
||||
// If we are waiting for pending messages to be sent, at this point
|
||||
// we are sure that there will be no more messages and we can proceed
|
||||
// with termination safely.
|
||||
if (pending && !pipe && !zap_pipe && terminating_pipes.empty ()) {
|
||||
pending = false;
|
||||
if (_pending && !_pipe && !_zap_pipe && _terminating_pipes.empty ()) {
|
||||
_pending = false;
|
||||
own_t::process_term (0);
|
||||
}
|
||||
}
|
||||
@ -275,34 +275,34 @@ void zmq::session_base_t::pipe_terminated (pipe_t *pipe_)
|
||||
void zmq::session_base_t::read_activated (pipe_t *pipe_)
|
||||
{
|
||||
// Skip activating if we're detaching this pipe
|
||||
if (unlikely (pipe_ != pipe && pipe_ != zap_pipe)) {
|
||||
zmq_assert (terminating_pipes.count (pipe_) == 1);
|
||||
if (unlikely (pipe_ != _pipe && pipe_ != _zap_pipe)) {
|
||||
zmq_assert (_terminating_pipes.count (pipe_) == 1);
|
||||
return;
|
||||
}
|
||||
|
||||
if (unlikely (engine == NULL)) {
|
||||
pipe->check_read ();
|
||||
if (unlikely (_engine == NULL)) {
|
||||
_pipe->check_read ();
|
||||
return;
|
||||
}
|
||||
|
||||
if (likely (pipe_ == pipe))
|
||||
engine->restart_output ();
|
||||
if (likely (pipe_ == _pipe))
|
||||
_engine->restart_output ();
|
||||
else {
|
||||
// i.e. pipe_ == zap_pipe
|
||||
engine->zap_msg_available ();
|
||||
_engine->zap_msg_available ();
|
||||
}
|
||||
}
|
||||
|
||||
void zmq::session_base_t::write_activated (pipe_t *pipe_)
|
||||
{
|
||||
// Skip activating if we're detaching this pipe
|
||||
if (pipe != pipe_) {
|
||||
zmq_assert (terminating_pipes.count (pipe_) == 1);
|
||||
if (_pipe != pipe_) {
|
||||
zmq_assert (_terminating_pipes.count (pipe_) == 1);
|
||||
return;
|
||||
}
|
||||
|
||||
if (engine)
|
||||
engine->restart_input ();
|
||||
if (_engine)
|
||||
_engine->restart_input ();
|
||||
}
|
||||
|
||||
void zmq::session_base_t::hiccuped (pipe_t *)
|
||||
@ -314,12 +314,12 @@ void zmq::session_base_t::hiccuped (pipe_t *)
|
||||
|
||||
zmq::socket_base_t *zmq::session_base_t::get_socket ()
|
||||
{
|
||||
return socket;
|
||||
return _socket;
|
||||
}
|
||||
|
||||
void zmq::session_base_t::process_plug ()
|
||||
{
|
||||
if (active)
|
||||
if (_active)
|
||||
start_connecting (false);
|
||||
}
|
||||
|
||||
@ -331,7 +331,7 @@ void zmq::session_base_t::process_plug ()
|
||||
// security flaw.
|
||||
int zmq::session_base_t::zap_connect ()
|
||||
{
|
||||
if (zap_pipe != NULL)
|
||||
if (_zap_pipe != NULL)
|
||||
return 0;
|
||||
|
||||
endpoint_t peer = find_endpoint ("inproc://zeromq.zap.01");
|
||||
@ -352,9 +352,9 @@ int zmq::session_base_t::zap_connect ()
|
||||
errno_assert (rc == 0);
|
||||
|
||||
// Attach local end of the pipe to this socket object.
|
||||
zap_pipe = new_pipes[0];
|
||||
zap_pipe->set_nodelay ();
|
||||
zap_pipe->set_event_sink (this);
|
||||
_zap_pipe = new_pipes[0];
|
||||
_zap_pipe->set_nodelay ();
|
||||
_zap_pipe->set_event_sink (this);
|
||||
|
||||
send_bind (peer.socket, new_pipes[1], false);
|
||||
|
||||
@ -364,9 +364,9 @@ int zmq::session_base_t::zap_connect ()
|
||||
rc = id.init ();
|
||||
errno_assert (rc == 0);
|
||||
id.set_flags (msg_t::routing_id);
|
||||
bool ok = zap_pipe->write (&id);
|
||||
bool ok = _zap_pipe->write (&id);
|
||||
zmq_assert (ok);
|
||||
zap_pipe->flush ();
|
||||
_zap_pipe->flush ();
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -382,8 +382,8 @@ void zmq::session_base_t::process_attach (i_engine *engine_)
|
||||
zmq_assert (engine_ != NULL);
|
||||
|
||||
// Create the pipe if it does not exist yet.
|
||||
if (!pipe && !is_terminating ()) {
|
||||
object_t *parents[2] = {this, socket};
|
||||
if (!_pipe && !is_terminating ()) {
|
||||
object_t *parents[2] = {this, _socket};
|
||||
pipe_t *pipes[2] = {NULL, NULL};
|
||||
|
||||
bool conflate =
|
||||
@ -402,27 +402,27 @@ void zmq::session_base_t::process_attach (i_engine *engine_)
|
||||
pipes[0]->set_event_sink (this);
|
||||
|
||||
// Remember the local end of the pipe.
|
||||
zmq_assert (!pipe);
|
||||
pipe = pipes[0];
|
||||
zmq_assert (!_pipe);
|
||||
_pipe = pipes[0];
|
||||
|
||||
// Ask socket to plug into the remote end of the pipe.
|
||||
send_bind (socket, pipes[1]);
|
||||
send_bind (_socket, pipes[1]);
|
||||
}
|
||||
|
||||
// Plug in the engine.
|
||||
zmq_assert (!engine);
|
||||
engine = engine_;
|
||||
engine->plug (io_thread, this);
|
||||
zmq_assert (!_engine);
|
||||
_engine = engine_;
|
||||
_engine->plug (_io_thread, this);
|
||||
}
|
||||
|
||||
void zmq::session_base_t::engine_error (
|
||||
zmq::stream_engine_t::error_reason_t reason_)
|
||||
{
|
||||
// Engine is dead. Let's forget about it.
|
||||
engine = NULL;
|
||||
_engine = NULL;
|
||||
|
||||
// Remove any half-done messages from the pipes.
|
||||
if (pipe)
|
||||
if (_pipe)
|
||||
clean_pipes ();
|
||||
|
||||
zmq_assert (reason_ == stream_engine_t::connection_error
|
||||
@ -433,17 +433,17 @@ void zmq::session_base_t::engine_error (
|
||||
case stream_engine_t::timeout_error:
|
||||
/* FALLTHROUGH */
|
||||
case stream_engine_t::connection_error:
|
||||
if (active) {
|
||||
if (_active) {
|
||||
reconnect ();
|
||||
break;
|
||||
}
|
||||
/* FALLTHROUGH */
|
||||
case stream_engine_t::protocol_error:
|
||||
if (pending) {
|
||||
if (pipe)
|
||||
pipe->terminate (false);
|
||||
if (zap_pipe)
|
||||
zap_pipe->terminate (false);
|
||||
if (_pending) {
|
||||
if (_pipe)
|
||||
_pipe->terminate (false);
|
||||
if (_zap_pipe)
|
||||
_zap_pipe->terminate (false);
|
||||
} else {
|
||||
terminate ();
|
||||
}
|
||||
@ -451,50 +451,50 @@ void zmq::session_base_t::engine_error (
|
||||
}
|
||||
|
||||
// Just in case there's only a delimiter in the pipe.
|
||||
if (pipe)
|
||||
pipe->check_read ();
|
||||
if (_pipe)
|
||||
_pipe->check_read ();
|
||||
|
||||
if (zap_pipe)
|
||||
zap_pipe->check_read ();
|
||||
if (_zap_pipe)
|
||||
_zap_pipe->check_read ();
|
||||
}
|
||||
|
||||
void zmq::session_base_t::process_term (int linger_)
|
||||
{
|
||||
zmq_assert (!pending);
|
||||
zmq_assert (!_pending);
|
||||
|
||||
// If the termination of the pipe happens before the term command is
|
||||
// delivered there's nothing much to do. We can proceed with the
|
||||
// standard termination immediately.
|
||||
if (!pipe && !zap_pipe && terminating_pipes.empty ()) {
|
||||
if (!_pipe && !_zap_pipe && _terminating_pipes.empty ()) {
|
||||
own_t::process_term (0);
|
||||
return;
|
||||
}
|
||||
|
||||
pending = true;
|
||||
_pending = true;
|
||||
|
||||
if (pipe != NULL) {
|
||||
if (_pipe != NULL) {
|
||||
// If there's finite linger value, delay the termination.
|
||||
// If linger is infinite (negative) we don't even have to set
|
||||
// the timer.
|
||||
if (linger_ > 0) {
|
||||
zmq_assert (!has_linger_timer);
|
||||
zmq_assert (!_has_linger_timer);
|
||||
add_timer (linger_, linger_timer_id);
|
||||
has_linger_timer = true;
|
||||
_has_linger_timer = true;
|
||||
}
|
||||
|
||||
// Start pipe termination process. Delay the termination till all messages
|
||||
// are processed in case the linger time is non-zero.
|
||||
pipe->terminate (linger_ != 0);
|
||||
_pipe->terminate (linger_ != 0);
|
||||
|
||||
// TODO: Should this go into pipe_t::terminate ?
|
||||
// In case there's no engine and there's only delimiter in the
|
||||
// pipe it wouldn't be ever read. Thus we check for it explicitly.
|
||||
if (!engine)
|
||||
pipe->check_read ();
|
||||
if (!_engine)
|
||||
_pipe->check_read ();
|
||||
}
|
||||
|
||||
if (zap_pipe != NULL)
|
||||
zap_pipe->terminate (false);
|
||||
if (_zap_pipe != NULL)
|
||||
_zap_pipe->terminate (false);
|
||||
}
|
||||
|
||||
void zmq::session_base_t::timer_event (int id_)
|
||||
@ -502,28 +502,28 @@ void zmq::session_base_t::timer_event (int id_)
|
||||
// Linger period expired. We can proceed with termination even though
|
||||
// there are still pending messages to be sent.
|
||||
zmq_assert (id_ == linger_timer_id);
|
||||
has_linger_timer = false;
|
||||
_has_linger_timer = false;
|
||||
|
||||
// Ask pipe to terminate even though there may be pending messages in it.
|
||||
zmq_assert (pipe);
|
||||
pipe->terminate (false);
|
||||
zmq_assert (_pipe);
|
||||
_pipe->terminate (false);
|
||||
}
|
||||
|
||||
void zmq::session_base_t::reconnect ()
|
||||
{
|
||||
// For delayed connect situations, terminate the pipe
|
||||
// and reestablish later on
|
||||
if (pipe && options.immediate == 1 && addr->protocol != "pgm"
|
||||
&& addr->protocol != "epgm" && addr->protocol != "norm"
|
||||
&& addr->protocol != "udp") {
|
||||
pipe->hiccup ();
|
||||
pipe->terminate (false);
|
||||
terminating_pipes.insert (pipe);
|
||||
pipe = NULL;
|
||||
if (_pipe && options.immediate == 1 && _addr->protocol != "pgm"
|
||||
&& _addr->protocol != "epgm" && _addr->protocol != "norm"
|
||||
&& _addr->protocol != "udp") {
|
||||
_pipe->hiccup ();
|
||||
_pipe->terminate (false);
|
||||
_terminating_pipes.insert (_pipe);
|
||||
_pipe = NULL;
|
||||
|
||||
if (has_linger_timer) {
|
||||
if (_has_linger_timer) {
|
||||
cancel_timer (linger_timer_id);
|
||||
has_linger_timer = false;
|
||||
_has_linger_timer = false;
|
||||
}
|
||||
}
|
||||
|
||||
@ -534,21 +534,21 @@ void zmq::session_base_t::reconnect ()
|
||||
start_connecting (true);
|
||||
else {
|
||||
std::string *ep = new (std::string);
|
||||
addr->to_string (*ep);
|
||||
send_term_endpoint (socket, ep);
|
||||
_addr->to_string (*ep);
|
||||
send_term_endpoint (_socket, ep);
|
||||
}
|
||||
|
||||
// For subscriber sockets we hiccup the inbound pipe, which will cause
|
||||
// the socket object to resend all the subscriptions.
|
||||
if (pipe
|
||||
if (_pipe
|
||||
&& (options.type == ZMQ_SUB || options.type == ZMQ_XSUB
|
||||
|| options.type == ZMQ_DISH))
|
||||
pipe->hiccup ();
|
||||
_pipe->hiccup ();
|
||||
}
|
||||
|
||||
void zmq::session_base_t::start_connecting (bool wait_)
|
||||
{
|
||||
zmq_assert (active);
|
||||
zmq_assert (_active);
|
||||
|
||||
// Choose I/O thread to run connecter in. Given that we are already
|
||||
// running in an I/O thread, there must be at least one available.
|
||||
@ -557,19 +557,19 @@ void zmq::session_base_t::start_connecting (bool wait_)
|
||||
|
||||
// Create the connecter object.
|
||||
|
||||
if (addr->protocol == "tcp") {
|
||||
if (_addr->protocol == "tcp") {
|
||||
if (!options.socks_proxy_address.empty ()) {
|
||||
address_t *proxy_address = new (std::nothrow)
|
||||
address_t ("tcp", options.socks_proxy_address, this->get_ctx ());
|
||||
alloc_assert (proxy_address);
|
||||
socks_connecter_t *connecter =
|
||||
new (std::nothrow) socks_connecter_t (io_thread, this, options,
|
||||
addr, proxy_address, wait_);
|
||||
socks_connecter_t *connecter = new (std::nothrow)
|
||||
socks_connecter_t (io_thread, this, options, _addr, proxy_address,
|
||||
wait_);
|
||||
alloc_assert (connecter);
|
||||
launch_child (connecter);
|
||||
} else {
|
||||
tcp_connecter_t *connecter = new (std::nothrow)
|
||||
tcp_connecter_t (io_thread, this, options, addr, wait_);
|
||||
tcp_connecter_t (io_thread, this, options, _addr, wait_);
|
||||
alloc_assert (connecter);
|
||||
launch_child (connecter);
|
||||
}
|
||||
@ -578,25 +578,25 @@ void zmq::session_base_t::start_connecting (bool wait_)
|
||||
|
||||
#if !defined ZMQ_HAVE_WINDOWS && !defined ZMQ_HAVE_OPENVMS \
|
||||
&& !defined ZMQ_HAVE_VXWORKS
|
||||
if (addr->protocol == "ipc") {
|
||||
if (_addr->protocol == "ipc") {
|
||||
ipc_connecter_t *connecter = new (std::nothrow)
|
||||
ipc_connecter_t (io_thread, this, options, addr, wait_);
|
||||
ipc_connecter_t (io_thread, this, options, _addr, wait_);
|
||||
alloc_assert (connecter);
|
||||
launch_child (connecter);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
#if defined ZMQ_HAVE_TIPC
|
||||
if (addr->protocol == "tipc") {
|
||||
if (_addr->protocol == "tipc") {
|
||||
tipc_connecter_t *connecter = new (std::nothrow)
|
||||
tipc_connecter_t (io_thread, this, options, addr, wait_);
|
||||
tipc_connecter_t (io_thread, this, options, _addr, wait_);
|
||||
alloc_assert (connecter);
|
||||
launch_child (connecter);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (addr->protocol == "udp") {
|
||||
if (_addr->protocol == "udp") {
|
||||
zmq_assert (options.type == ZMQ_DISH || options.type == ZMQ_RADIO
|
||||
|| options.type == ZMQ_DGRAM);
|
||||
|
||||
@ -617,7 +617,7 @@ void zmq::session_base_t::start_connecting (bool wait_)
|
||||
recv = true;
|
||||
}
|
||||
|
||||
int rc = engine->init (addr, send, recv);
|
||||
int rc = engine->init (_addr, send, recv);
|
||||
errno_assert (rc == 0);
|
||||
|
||||
send_attach (this, engine);
|
||||
@ -628,12 +628,12 @@ void zmq::session_base_t::start_connecting (bool wait_)
|
||||
#ifdef ZMQ_HAVE_OPENPGM
|
||||
|
||||
// Both PGM and EPGM transports are using the same infrastructure.
|
||||
if (addr->protocol == "pgm" || addr->protocol == "epgm") {
|
||||
if (_addr->protocol == "pgm" || _addr->protocol == "epgm") {
|
||||
zmq_assert (options.type == ZMQ_PUB || options.type == ZMQ_XPUB
|
||||
|| options.type == ZMQ_SUB || options.type == ZMQ_XSUB);
|
||||
|
||||
// For EPGM transport with UDP encapsulation of PGM is used.
|
||||
bool const udp_encapsulation = addr->protocol == "epgm";
|
||||
bool const udp_encapsulation = _addr->protocol == "epgm";
|
||||
|
||||
// At this point we'll create message pipes to the session straight
|
||||
// away. There's no point in delaying it as no concept of 'connect'
|
||||
@ -645,7 +645,7 @@ void zmq::session_base_t::start_connecting (bool wait_)
|
||||
alloc_assert (pgm_sender);
|
||||
|
||||
int rc =
|
||||
pgm_sender->init (udp_encapsulation, addr->address.c_str ());
|
||||
pgm_sender->init (udp_encapsulation, _addr->address.c_str ());
|
||||
errno_assert (rc == 0);
|
||||
|
||||
send_attach (this, pgm_sender);
|
||||
@ -656,7 +656,7 @@ void zmq::session_base_t::start_connecting (bool wait_)
|
||||
alloc_assert (pgm_receiver);
|
||||
|
||||
int rc =
|
||||
pgm_receiver->init (udp_encapsulation, addr->address.c_str ());
|
||||
pgm_receiver->init (udp_encapsulation, _addr->address.c_str ());
|
||||
errno_assert (rc == 0);
|
||||
|
||||
send_attach (this, pgm_receiver);
|
||||
@ -667,7 +667,7 @@ void zmq::session_base_t::start_connecting (bool wait_)
|
||||
#endif
|
||||
|
||||
#ifdef ZMQ_HAVE_NORM
|
||||
if (addr->protocol == "norm") {
|
||||
if (_addr->protocol == "norm") {
|
||||
// At this point we'll create message pipes to the session straight
|
||||
// away. There's no point in delaying it as no concept of 'connect'
|
||||
// exists with NORM anyway.
|
||||
@ -677,7 +677,7 @@ void zmq::session_base_t::start_connecting (bool wait_)
|
||||
new (std::nothrow) norm_engine_t (io_thread, options);
|
||||
alloc_assert (norm_sender);
|
||||
|
||||
int rc = norm_sender->init (addr->address.c_str (), true, false);
|
||||
int rc = norm_sender->init (_addr->address.c_str (), true, false);
|
||||
errno_assert (rc == 0);
|
||||
|
||||
send_attach (this, norm_sender);
|
||||
@ -688,7 +688,7 @@ void zmq::session_base_t::start_connecting (bool wait_)
|
||||
new (std::nothrow) norm_engine_t (io_thread, options);
|
||||
alloc_assert (norm_receiver);
|
||||
|
||||
int rc = norm_receiver->init (addr->address.c_str (), false, true);
|
||||
int rc = norm_receiver->init (_addr->address.c_str (), false, true);
|
||||
errno_assert (rc == 0);
|
||||
|
||||
send_attach (this, norm_receiver);
|
||||
@ -698,9 +698,9 @@ void zmq::session_base_t::start_connecting (bool wait_)
|
||||
#endif // ZMQ_HAVE_NORM
|
||||
|
||||
#if defined ZMQ_HAVE_VMCI
|
||||
if (addr->protocol == "vmci") {
|
||||
if (_addr->protocol == "vmci") {
|
||||
vmci_connecter_t *connecter = new (std::nothrow)
|
||||
vmci_connecter_t (io_thread, this, options, addr, wait_);
|
||||
vmci_connecter_t (io_thread, this, options, _addr, wait_);
|
||||
alloc_assert (connecter);
|
||||
launch_child (connecter);
|
||||
return;
|
||||
|
711
src/session_base.cpp~RF4069b78.TMP
Normal file
711
src/session_base.cpp~RF4069b78.TMP
Normal file
@ -0,0 +1,711 @@
|
||||
/*
|
||||
Copyright (c) 2007-2016 Contributors as noted in the AUTHORS file
|
||||
|
||||
This file is part of libzmq, the ZeroMQ core engine in C++.
|
||||
|
||||
libzmq is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU Lesser General Public License (LGPL) as published
|
||||
by the Free Software Foundation; either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
As a special exception, the Contributors give you permission to link
|
||||
this library with independent modules to produce an executable,
|
||||
regardless of the license terms of these independent modules, and to
|
||||
copy and distribute the resulting executable under terms of your choice,
|
||||
provided that you also meet, for each linked independent module, the
|
||||
terms and conditions of the license of that module. An independent
|
||||
module is a module which is not derived from or based on this library.
|
||||
If you modify this library, you must extend this exception to your
|
||||
version of the library.
|
||||
|
||||
libzmq is distributed in the hope that it will be useful, but WITHOUT
|
||||
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
|
||||
License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "macros.hpp"
|
||||
#include "session_base.hpp"
|
||||
#include "i_engine.hpp"
|
||||
#include "err.hpp"
|
||||
#include "pipe.hpp"
|
||||
#include "likely.hpp"
|
||||
#include "tcp_connecter.hpp"
|
||||
#include "ipc_connecter.hpp"
|
||||
#include "tipc_connecter.hpp"
|
||||
#include "socks_connecter.hpp"
|
||||
#include "vmci_connecter.hpp"
|
||||
#include "pgm_sender.hpp"
|
||||
#include "pgm_receiver.hpp"
|
||||
#include "address.hpp"
|
||||
#include "norm_engine.hpp"
|
||||
#include "udp_engine.hpp"
|
||||
|
||||
#include "ctx.hpp"
|
||||
#include "req.hpp"
|
||||
#include "radio.hpp"
|
||||
#include "dish.hpp"
|
||||
|
||||
zmq::session_base_t *zmq::session_base_t::create (class io_thread_t *io_thread_,
|
||||
bool active_,
|
||||
class socket_base_t *socket_,
|
||||
const options_t &options_,
|
||||
address_t *addr_)
|
||||
{
|
||||
session_base_t *s = NULL;
|
||||
switch (options_.type) {
|
||||
case ZMQ_REQ:
|
||||
s = new (std::nothrow)
|
||||
req_session_t (io_thread_, active_, socket_, options_, addr_);
|
||||
break;
|
||||
case ZMQ_RADIO:
|
||||
s = new (std::nothrow)
|
||||
radio_session_t (io_thread_, active_, socket_, options_, addr_);
|
||||
break;
|
||||
case ZMQ_DISH:
|
||||
s = new (std::nothrow)
|
||||
dish_session_t (io_thread_, active_, socket_, options_, addr_);
|
||||
break;
|
||||
case ZMQ_DEALER:
|
||||
case ZMQ_REP:
|
||||
case ZMQ_ROUTER:
|
||||
case ZMQ_PUB:
|
||||
case ZMQ_XPUB:
|
||||
case ZMQ_SUB:
|
||||
case ZMQ_XSUB:
|
||||
case ZMQ_PUSH:
|
||||
case ZMQ_PULL:
|
||||
case ZMQ_PAIR:
|
||||
case ZMQ_STREAM:
|
||||
case ZMQ_SERVER:
|
||||
case ZMQ_CLIENT:
|
||||
case ZMQ_GATHER:
|
||||
case ZMQ_SCATTER:
|
||||
case ZMQ_DGRAM:
|
||||
s = new (std::nothrow)
|
||||
session_base_t (io_thread_, active_, socket_, options_, addr_);
|
||||
break;
|
||||
default:
|
||||
errno = EINVAL;
|
||||
return NULL;
|
||||
}
|
||||
alloc_assert (s);
|
||||
return s;
|
||||
}
|
||||
|
||||
zmq::session_base_t::session_base_t (class io_thread_t *io_thread_,
|
||||
bool active_,
|
||||
class socket_base_t *socket_,
|
||||
const options_t &options_,
|
||||
address_t *addr_) :
|
||||
own_t (io_thread_, options_),
|
||||
io_object_t (io_thread_),
|
||||
active (active_),
|
||||
pipe (NULL),
|
||||
zap_pipe (NULL),
|
||||
incomplete_in (false),
|
||||
pending (false),
|
||||
engine (NULL),
|
||||
socket (socket_),
|
||||
io_thread (io_thread_),
|
||||
has_linger_timer (false),
|
||||
addr (addr_)
|
||||
{
|
||||
}
|
||||
|
||||
const char *zmq::session_base_t::get_endpoint () const
|
||||
{
|
||||
return engine->get_endpoint ();
|
||||
}
|
||||
|
||||
zmq::session_base_t::~session_base_t ()
|
||||
{
|
||||
zmq_assert (!pipe);
|
||||
zmq_assert (!zap_pipe);
|
||||
|
||||
// If there's still a pending linger timer, remove it.
|
||||
if (has_linger_timer) {
|
||||
cancel_timer (linger_timer_id);
|
||||
has_linger_timer = false;
|
||||
}
|
||||
|
||||
// Close the engine.
|
||||
if (engine)
|
||||
engine->terminate ();
|
||||
|
||||
LIBZMQ_DELETE (addr);
|
||||
}
|
||||
|
||||
void zmq::session_base_t::attach_pipe (pipe_t *pipe_)
|
||||
{
|
||||
zmq_assert (!is_terminating ());
|
||||
zmq_assert (!pipe);
|
||||
zmq_assert (pipe_);
|
||||
pipe = pipe_;
|
||||
pipe->set_event_sink (this);
|
||||
}
|
||||
|
||||
int zmq::session_base_t::pull_msg (msg_t *msg_)
|
||||
{
|
||||
if (!pipe || !pipe->read (msg_)) {
|
||||
errno = EAGAIN;
|
||||
return -1;
|
||||
}
|
||||
|
||||
incomplete_in = (msg_->flags () & msg_t::more) != 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int zmq::session_base_t::push_msg (msg_t *msg_)
|
||||
{
|
||||
if (msg_->flags () & msg_t::command)
|
||||
return 0;
|
||||
if (pipe && pipe->write (msg_)) {
|
||||
int rc = msg_->init ();
|
||||
errno_assert (rc == 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
errno = EAGAIN;
|
||||
return -1;
|
||||
}
|
||||
|
||||
int zmq::session_base_t::read_zap_msg (msg_t *msg_)
|
||||
{
|
||||
if (zap_pipe == NULL) {
|
||||
errno = ENOTCONN;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!zap_pipe->read (msg_)) {
|
||||
errno = EAGAIN;
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int zmq::session_base_t::write_zap_msg (msg_t *msg_)
|
||||
{
|
||||
if (zap_pipe == NULL || !zap_pipe->write (msg_)) {
|
||||
errno = ENOTCONN;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ((msg_->flags () & msg_t::more) == 0)
|
||||
zap_pipe->flush ();
|
||||
|
||||
const int rc = msg_->init ();
|
||||
errno_assert (rc == 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void zmq::session_base_t::reset ()
|
||||
{
|
||||
}
|
||||
|
||||
void zmq::session_base_t::flush ()
|
||||
{
|
||||
if (pipe)
|
||||
pipe->flush ();
|
||||
}
|
||||
|
||||
void zmq::session_base_t::clean_pipes ()
|
||||
{
|
||||
zmq_assert (pipe != NULL);
|
||||
|
||||
// Get rid of half-processed messages in the out pipe. Flush any
|
||||
// unflushed messages upstream.
|
||||
pipe->rollback ();
|
||||
pipe->flush ();
|
||||
|
||||
// Remove any half-read message from the in pipe.
|
||||
while (incomplete_in) {
|
||||
msg_t msg;
|
||||
int rc = msg.init ();
|
||||
errno_assert (rc == 0);
|
||||
rc = pull_msg (&msg);
|
||||
errno_assert (rc == 0);
|
||||
rc = msg.close ();
|
||||
errno_assert (rc == 0);
|
||||
}
|
||||
}
|
||||
|
||||
void zmq::session_base_t::pipe_terminated (pipe_t *pipe_)
|
||||
{
|
||||
// Drop the reference to the deallocated pipe if required.
|
||||
zmq_assert (pipe_ == pipe || pipe_ == zap_pipe
|
||||
|| terminating_pipes.count (pipe_) == 1);
|
||||
|
||||
if (pipe_ == pipe) {
|
||||
// If this is our current pipe, remove it
|
||||
pipe = NULL;
|
||||
if (has_linger_timer) {
|
||||
cancel_timer (linger_timer_id);
|
||||
has_linger_timer = false;
|
||||
}
|
||||
} else if (pipe_ == zap_pipe)
|
||||
zap_pipe = NULL;
|
||||
else
|
||||
// Remove the pipe from the detached pipes set
|
||||
terminating_pipes.erase (pipe_);
|
||||
|
||||
if (!is_terminating () && options.raw_socket) {
|
||||
if (engine) {
|
||||
engine->terminate ();
|
||||
engine = NULL;
|
||||
}
|
||||
terminate ();
|
||||
}
|
||||
|
||||
// If we are waiting for pending messages to be sent, at this point
|
||||
// we are sure that there will be no more messages and we can proceed
|
||||
// with termination safely.
|
||||
if (pending && !pipe && !zap_pipe && terminating_pipes.empty ()) {
|
||||
pending = false;
|
||||
own_t::process_term (0);
|
||||
}
|
||||
}
|
||||
|
||||
void zmq::session_base_t::read_activated (pipe_t *pipe_)
|
||||
{
|
||||
// Skip activating if we're detaching this pipe
|
||||
if (unlikely (pipe_ != pipe && pipe_ != zap_pipe)) {
|
||||
zmq_assert (terminating_pipes.count (pipe_) == 1);
|
||||
return;
|
||||
}
|
||||
|
||||
if (unlikely (engine == NULL)) {
|
||||
pipe->check_read ();
|
||||
return;
|
||||
}
|
||||
|
||||
if (likely (pipe_ == pipe))
|
||||
engine->restart_output ();
|
||||
else {
|
||||
// i.e. pipe_ == zap_pipe
|
||||
engine->zap_msg_available ();
|
||||
}
|
||||
}
|
||||
|
||||
void zmq::session_base_t::write_activated (pipe_t *pipe_)
|
||||
{
|
||||
// Skip activating if we're detaching this pipe
|
||||
if (pipe != pipe_) {
|
||||
zmq_assert (terminating_pipes.count (pipe_) == 1);
|
||||
return;
|
||||
}
|
||||
|
||||
if (engine)
|
||||
engine->restart_input ();
|
||||
}
|
||||
|
||||
void zmq::session_base_t::hiccuped (pipe_t *)
|
||||
{
|
||||
// Hiccups are always sent from session to socket, not the other
|
||||
// way round.
|
||||
zmq_assert (false);
|
||||
}
|
||||
|
||||
zmq::socket_base_t *zmq::session_base_t::get_socket ()
|
||||
{
|
||||
return socket;
|
||||
}
|
||||
|
||||
void zmq::session_base_t::process_plug ()
|
||||
{
|
||||
if (active)
|
||||
start_connecting (false);
|
||||
}
|
||||
|
||||
// This functions can return 0 on success or -1 and errno=ECONNREFUSED if ZAP
|
||||
// is not setup (IE: inproc://zeromq.zap.01 does not exist in the same context)
|
||||
// or it aborts on any other error. In other words, either ZAP is not
|
||||
// configured or if it is configured it MUST be configured correctly and it
|
||||
// MUST work, otherwise authentication cannot be guaranteed and it would be a
|
||||
// security flaw.
|
||||
int zmq::session_base_t::zap_connect ()
|
||||
{
|
||||
if (zap_pipe != NULL)
|
||||
return 0;
|
||||
|
||||
endpoint_t peer = find_endpoint ("inproc://zeromq.zap.01");
|
||||
if (peer.socket == NULL) {
|
||||
errno = ECONNREFUSED;
|
||||
return -1;
|
||||
}
|
||||
zmq_assert (peer.options.type == ZMQ_REP || peer.options.type == ZMQ_ROUTER
|
||||
|| peer.options.type == ZMQ_SERVER);
|
||||
|
||||
// Create a bi-directional pipe that will connect
|
||||
// session with zap socket.
|
||||
object_t *parents[2] = {this, peer.socket};
|
||||
pipe_t *new_pipes[2] = {NULL, NULL};
|
||||
int hwms[2] = {0, 0};
|
||||
bool conflates[2] = {false, false};
|
||||
int rc = pipepair (parents, new_pipes, hwms, conflates);
|
||||
errno_assert (rc == 0);
|
||||
|
||||
// Attach local end of the pipe to this socket object.
|
||||
zap_pipe = new_pipes[0];
|
||||
zap_pipe->set_nodelay ();
|
||||
zap_pipe->set_event_sink (this);
|
||||
|
||||
send_bind (peer.socket, new_pipes[1], false);
|
||||
|
||||
// Send empty routing id if required by the peer.
|
||||
if (peer.options.recv_routing_id) {
|
||||
msg_t id;
|
||||
rc = id.init ();
|
||||
errno_assert (rc == 0);
|
||||
id.set_flags (msg_t::routing_id);
|
||||
bool ok = zap_pipe->write (&id);
|
||||
zmq_assert (ok);
|
||||
zap_pipe->flush ();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool zmq::session_base_t::zap_enabled ()
|
||||
{
|
||||
return (options.mechanism != ZMQ_NULL || !options.zap_domain.empty ());
|
||||
}
|
||||
|
||||
void zmq::session_base_t::process_attach (i_engine *engine_)
|
||||
{
|
||||
zmq_assert (engine_ != NULL);
|
||||
|
||||
// Create the pipe if it does not exist yet.
|
||||
if (!pipe && !is_terminating ()) {
|
||||
object_t *parents[2] = {this, socket};
|
||||
pipe_t *pipes[2] = {NULL, NULL};
|
||||
|
||||
bool conflate =
|
||||
options.conflate
|
||||
&& (options.type == ZMQ_DEALER || options.type == ZMQ_PULL
|
||||
|| options.type == ZMQ_PUSH || options.type == ZMQ_PUB
|
||||
|| options.type == ZMQ_SUB);
|
||||
|
||||
int hwms[2] = {conflate ? -1 : options.rcvhwm,
|
||||
conflate ? -1 : options.sndhwm};
|
||||
bool conflates[2] = {conflate, conflate};
|
||||
int rc = pipepair (parents, pipes, hwms, conflates);
|
||||
errno_assert (rc == 0);
|
||||
|
||||
// Plug the local end of the pipe.
|
||||
pipes[0]->set_event_sink (this);
|
||||
|
||||
// Remember the local end of the pipe.
|
||||
zmq_assert (!pipe);
|
||||
pipe = pipes[0];
|
||||
|
||||
// Ask socket to plug into the remote end of the pipe.
|
||||
send_bind (socket, pipes[1]);
|
||||
}
|
||||
|
||||
// Plug in the engine.
|
||||
zmq_assert (!engine);
|
||||
engine = engine_;
|
||||
engine->plug (io_thread, this);
|
||||
}
|
||||
|
||||
void zmq::session_base_t::engine_error (
|
||||
zmq::stream_engine_t::error_reason_t reason_)
|
||||
{
|
||||
// Engine is dead. Let's forget about it.
|
||||
engine = NULL;
|
||||
|
||||
// Remove any half-done messages from the pipes.
|
||||
if (pipe)
|
||||
clean_pipes ();
|
||||
|
||||
zmq_assert (reason_ == stream_engine_t::connection_error
|
||||
|| reason_ == stream_engine_t::timeout_error
|
||||
|| reason_ == stream_engine_t::protocol_error);
|
||||
|
||||
switch (reason_) {
|
||||
case stream_engine_t::timeout_error:
|
||||
/* FALLTHROUGH */
|
||||
case stream_engine_t::connection_error:
|
||||
if (active) {
|
||||
reconnect ();
|
||||
break;
|
||||
}
|
||||
/* FALLTHROUGH */
|
||||
case stream_engine_t::protocol_error:
|
||||
if (pending) {
|
||||
if (pipe)
|
||||
pipe->terminate (false);
|
||||
if (zap_pipe)
|
||||
zap_pipe->terminate (false);
|
||||
} else {
|
||||
terminate ();
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
// Just in case there's only a delimiter in the pipe.
|
||||
if (pipe)
|
||||
pipe->check_read ();
|
||||
|
||||
if (zap_pipe)
|
||||
zap_pipe->check_read ();
|
||||
}
|
||||
|
||||
void zmq::session_base_t::process_term (int linger_)
|
||||
{
|
||||
zmq_assert (!pending);
|
||||
|
||||
// If the termination of the pipe happens before the term command is
|
||||
// delivered there's nothing much to do. We can proceed with the
|
||||
// standard termination immediately.
|
||||
if (!pipe && !zap_pipe && terminating_pipes.empty ()) {
|
||||
own_t::process_term (0);
|
||||
return;
|
||||
}
|
||||
|
||||
pending = true;
|
||||
|
||||
if (pipe != NULL) {
|
||||
// If there's finite linger value, delay the termination.
|
||||
// If linger is infinite (negative) we don't even have to set
|
||||
// the timer.
|
||||
if (linger_ > 0) {
|
||||
zmq_assert (!has_linger_timer);
|
||||
add_timer (linger_, linger_timer_id);
|
||||
has_linger_timer = true;
|
||||
}
|
||||
|
||||
// Start pipe termination process. Delay the termination till all messages
|
||||
// are processed in case the linger time is non-zero.
|
||||
pipe->terminate (linger_ != 0);
|
||||
|
||||
// TODO: Should this go into pipe_t::terminate ?
|
||||
// In case there's no engine and there's only delimiter in the
|
||||
// pipe it wouldn't be ever read. Thus we check for it explicitly.
|
||||
if (!engine)
|
||||
pipe->check_read ();
|
||||
}
|
||||
|
||||
if (zap_pipe != NULL)
|
||||
zap_pipe->terminate (false);
|
||||
}
|
||||
|
||||
void zmq::session_base_t::timer_event (int id_)
|
||||
{
|
||||
// Linger period expired. We can proceed with termination even though
|
||||
// there are still pending messages to be sent.
|
||||
zmq_assert (id_ == linger_timer_id);
|
||||
has_linger_timer = false;
|
||||
|
||||
// Ask pipe to terminate even though there may be pending messages in it.
|
||||
zmq_assert (pipe);
|
||||
pipe->terminate (false);
|
||||
}
|
||||
|
||||
void zmq::session_base_t::reconnect ()
|
||||
{
|
||||
// For delayed connect situations, terminate the pipe
|
||||
// and reestablish later on
|
||||
if (pipe && options.immediate == 1 && addr->protocol != "pgm"
|
||||
&& addr->protocol != "epgm" && addr->protocol != "norm"
|
||||
&& addr->protocol != "udp") {
|
||||
pipe->hiccup ();
|
||||
pipe->terminate (false);
|
||||
terminating_pipes.insert (pipe);
|
||||
pipe = NULL;
|
||||
|
||||
if (has_linger_timer) {
|
||||
cancel_timer (linger_timer_id);
|
||||
has_linger_timer = false;
|
||||
}
|
||||
}
|
||||
|
||||
reset ();
|
||||
|
||||
// Reconnect.
|
||||
if (options.reconnect_ivl != -1)
|
||||
start_connecting (true);
|
||||
else {
|
||||
std::string *ep = new (std::string);
|
||||
addr->to_string (*ep);
|
||||
send_term_endpoint (socket, ep);
|
||||
}
|
||||
|
||||
// For subscriber sockets we hiccup the inbound pipe, which will cause
|
||||
// the socket object to resend all the subscriptions.
|
||||
if (pipe
|
||||
&& (options.type == ZMQ_SUB || options.type == ZMQ_XSUB
|
||||
|| options.type == ZMQ_DISH))
|
||||
pipe->hiccup ();
|
||||
}
|
||||
|
||||
void zmq::session_base_t::start_connecting (bool wait_)
|
||||
{
|
||||
zmq_assert (active);
|
||||
|
||||
// Choose I/O thread to run connecter in. Given that we are already
|
||||
// running in an I/O thread, there must be at least one available.
|
||||
io_thread_t *io_thread = choose_io_thread (options.affinity);
|
||||
zmq_assert (io_thread);
|
||||
|
||||
// Create the connecter object.
|
||||
|
||||
if (addr->protocol == "tcp") {
|
||||
if (!options.socks_proxy_address.empty ()) {
|
||||
address_t *proxy_address = new (std::nothrow)
|
||||
address_t ("tcp", options.socks_proxy_address, this->get_ctx ());
|
||||
alloc_assert (proxy_address);
|
||||
socks_connecter_t *connecter =
|
||||
new (std::nothrow) socks_connecter_t (io_thread, this, options,
|
||||
addr, proxy_address, wait_);
|
||||
alloc_assert (connecter);
|
||||
launch_child (connecter);
|
||||
} else {
|
||||
tcp_connecter_t *connecter = new (std::nothrow)
|
||||
tcp_connecter_t (io_thread, this, options, addr, wait_);
|
||||
alloc_assert (connecter);
|
||||
launch_child (connecter);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
#if !defined ZMQ_HAVE_WINDOWS && !defined ZMQ_HAVE_OPENVMS \
|
||||
&& !defined ZMQ_HAVE_VXWORKS
|
||||
if (addr->protocol == "ipc") {
|
||||
ipc_connecter_t *connecter = new (std::nothrow)
|
||||
ipc_connecter_t (io_thread, this, options, addr, wait_);
|
||||
alloc_assert (connecter);
|
||||
launch_child (connecter);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
#if defined ZMQ_HAVE_TIPC
|
||||
if (addr->protocol == "tipc") {
|
||||
tipc_connecter_t *connecter = new (std::nothrow)
|
||||
tipc_connecter_t (io_thread, this, options, addr, wait_);
|
||||
alloc_assert (connecter);
|
||||
launch_child (connecter);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (addr->protocol == "udp") {
|
||||
zmq_assert (options.type == ZMQ_DISH || options.type == ZMQ_RADIO
|
||||
|| options.type == ZMQ_DGRAM);
|
||||
|
||||
udp_engine_t *engine = new (std::nothrow) udp_engine_t (options);
|
||||
alloc_assert (engine);
|
||||
|
||||
bool recv = false;
|
||||
bool send = false;
|
||||
|
||||
if (options.type == ZMQ_RADIO) {
|
||||
send = true;
|
||||
recv = false;
|
||||
} else if (options.type == ZMQ_DISH) {
|
||||
send = false;
|
||||
recv = true;
|
||||
} else if (options.type == ZMQ_DGRAM) {
|
||||
send = true;
|
||||
recv = true;
|
||||
}
|
||||
|
||||
int rc = engine->init (addr, send, recv);
|
||||
errno_assert (rc == 0);
|
||||
|
||||
send_attach (this, engine);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef ZMQ_HAVE_OPENPGM
|
||||
|
||||
// Both PGM and EPGM transports are using the same infrastructure.
|
||||
if (addr->protocol == "pgm" || addr->protocol == "epgm") {
|
||||
zmq_assert (options.type == ZMQ_PUB || options.type == ZMQ_XPUB
|
||||
|| options.type == ZMQ_SUB || options.type == ZMQ_XSUB);
|
||||
|
||||
// For EPGM transport with UDP encapsulation of PGM is used.
|
||||
bool const udp_encapsulation = addr->protocol == "epgm";
|
||||
|
||||
// At this point we'll create message pipes to the session straight
|
||||
// away. There's no point in delaying it as no concept of 'connect'
|
||||
// exists with PGM anyway.
|
||||
if (options.type == ZMQ_PUB || options.type == ZMQ_XPUB) {
|
||||
// PGM sender.
|
||||
pgm_sender_t *pgm_sender =
|
||||
new (std::nothrow) pgm_sender_t (io_thread, options);
|
||||
alloc_assert (pgm_sender);
|
||||
|
||||
int rc =
|
||||
pgm_sender->init (udp_encapsulation, addr->address.c_str ());
|
||||
errno_assert (rc == 0);
|
||||
|
||||
send_attach (this, pgm_sender);
|
||||
} else {
|
||||
// PGM receiver.
|
||||
pgm_receiver_t *pgm_receiver =
|
||||
new (std::nothrow) pgm_receiver_t (io_thread, options);
|
||||
alloc_assert (pgm_receiver);
|
||||
|
||||
int rc =
|
||||
pgm_receiver->init (udp_encapsulation, addr->address.c_str ());
|
||||
errno_assert (rc == 0);
|
||||
|
||||
send_attach (this, pgm_receiver);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef ZMQ_HAVE_NORM
|
||||
if (addr->protocol == "norm") {
|
||||
// At this point we'll create message pipes to the session straight
|
||||
// away. There's no point in delaying it as no concept of 'connect'
|
||||
// exists with NORM anyway.
|
||||
if (options.type == ZMQ_PUB || options.type == ZMQ_XPUB) {
|
||||
// NORM sender.
|
||||
norm_engine_t *norm_sender =
|
||||
new (std::nothrow) norm_engine_t (io_thread, options);
|
||||
alloc_assert (norm_sender);
|
||||
|
||||
int rc = norm_sender->init (addr->address.c_str (), true, false);
|
||||
errno_assert (rc == 0);
|
||||
|
||||
send_attach (this, norm_sender);
|
||||
} else { // ZMQ_SUB or ZMQ_XSUB
|
||||
|
||||
// NORM receiver.
|
||||
norm_engine_t *norm_receiver =
|
||||
new (std::nothrow) norm_engine_t (io_thread, options);
|
||||
alloc_assert (norm_receiver);
|
||||
|
||||
int rc = norm_receiver->init (addr->address.c_str (), false, true);
|
||||
errno_assert (rc == 0);
|
||||
|
||||
send_attach (this, norm_receiver);
|
||||
}
|
||||
return;
|
||||
}
|
||||
#endif // ZMQ_HAVE_NORM
|
||||
|
||||
#if defined ZMQ_HAVE_VMCI
|
||||
if (addr->protocol == "vmci") {
|
||||
vmci_connecter_t *connecter = new (std::nothrow)
|
||||
vmci_connecter_t (io_thread, this, options, addr, wait_);
|
||||
alloc_assert (connecter);
|
||||
launch_child (connecter);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
zmq_assert (false);
|
||||
}
|
@ -120,34 +120,34 @@ class session_base_t : public own_t, public io_object_t, public i_pipe_events
|
||||
|
||||
// If true, this session (re)connects to the peer. Otherwise, it's
|
||||
// a transient session created by the listener.
|
||||
const bool active;
|
||||
const bool _active;
|
||||
|
||||
// Pipe connecting the session to its socket.
|
||||
zmq::pipe_t *pipe;
|
||||
zmq::pipe_t *_pipe;
|
||||
|
||||
// Pipe used to exchange messages with ZAP socket.
|
||||
zmq::pipe_t *zap_pipe;
|
||||
zmq::pipe_t *_zap_pipe;
|
||||
|
||||
// This set is added to with pipes we are disconnecting, but haven't yet completed
|
||||
std::set<pipe_t *> terminating_pipes;
|
||||
std::set<pipe_t *> _terminating_pipes;
|
||||
|
||||
// This flag is true if the remainder of the message being processed
|
||||
// is still in the in pipe.
|
||||
bool incomplete_in;
|
||||
bool _incomplete_in;
|
||||
|
||||
// True if termination have been suspended to push the pending
|
||||
// messages to the network.
|
||||
bool pending;
|
||||
bool _pending;
|
||||
|
||||
// The protocol I/O engine connected to the session.
|
||||
zmq::i_engine *engine;
|
||||
zmq::i_engine *_engine;
|
||||
|
||||
// The socket the session belongs to.
|
||||
zmq::socket_base_t *socket;
|
||||
zmq::socket_base_t *_socket;
|
||||
|
||||
// I/O thread the session is living in. It will be used to plug in
|
||||
// the engines into the same thread.
|
||||
zmq::io_thread_t *io_thread;
|
||||
zmq::io_thread_t *_io_thread;
|
||||
|
||||
// ID of the linger timer
|
||||
enum
|
||||
@ -156,10 +156,10 @@ class session_base_t : public own_t, public io_object_t, public i_pipe_events
|
||||
};
|
||||
|
||||
// True is linger timer is running.
|
||||
bool has_linger_timer;
|
||||
bool _has_linger_timer;
|
||||
|
||||
// Protocol and address to use when connecting.
|
||||
address_t *addr;
|
||||
address_t *_addr;
|
||||
|
||||
session_base_t (const session_base_t &);
|
||||
const session_base_t &operator= (const session_base_t &);
|
||||
|
@ -121,9 +121,9 @@ static int close_wait_ms (int fd_, unsigned int max_ms_ = 2000)
|
||||
zmq::signaler_t::signaler_t ()
|
||||
{
|
||||
// Create the socketpair for signaling.
|
||||
if (make_fdpair (&r, &w) == 0) {
|
||||
unblock_socket (w);
|
||||
unblock_socket (r);
|
||||
if (make_fdpair (&_r, &_w) == 0) {
|
||||
unblock_socket (_w);
|
||||
unblock_socket (_r);
|
||||
}
|
||||
#ifdef HAVE_FORK
|
||||
pid = getpid ();
|
||||
@ -131,38 +131,38 @@ zmq::signaler_t::signaler_t ()
|
||||
}
|
||||
|
||||
// This might get run after some part of construction failed, leaving one or
|
||||
// both of r and w retired_fd.
|
||||
// both of _r and _w retired_fd.
|
||||
zmq::signaler_t::~signaler_t ()
|
||||
{
|
||||
#if defined ZMQ_HAVE_EVENTFD
|
||||
if (r == retired_fd)
|
||||
if (_r == retired_fd)
|
||||
return;
|
||||
int rc = close_wait_ms (r);
|
||||
int rc = close_wait_ms (_r);
|
||||
errno_assert (rc == 0);
|
||||
#elif defined ZMQ_HAVE_WINDOWS
|
||||
if (w != retired_fd) {
|
||||
if (_w != retired_fd) {
|
||||
const struct linger so_linger = {1, 0};
|
||||
int rc = setsockopt (w, SOL_SOCKET, SO_LINGER,
|
||||
int rc = setsockopt (_w, SOL_SOCKET, SO_LINGER,
|
||||
reinterpret_cast<const char *> (&so_linger),
|
||||
sizeof so_linger);
|
||||
// Only check shutdown if WSASTARTUP was previously done
|
||||
if (rc == 0 || WSAGetLastError () != WSANOTINITIALISED) {
|
||||
wsa_assert (rc != SOCKET_ERROR);
|
||||
rc = closesocket (w);
|
||||
rc = closesocket (_w);
|
||||
wsa_assert (rc != SOCKET_ERROR);
|
||||
if (r == retired_fd)
|
||||
if (_r == retired_fd)
|
||||
return;
|
||||
rc = closesocket (r);
|
||||
rc = closesocket (_r);
|
||||
wsa_assert (rc != SOCKET_ERROR);
|
||||
}
|
||||
}
|
||||
#else
|
||||
if (w != retired_fd) {
|
||||
int rc = close_wait_ms (w);
|
||||
if (_w != retired_fd) {
|
||||
int rc = close_wait_ms (_w);
|
||||
errno_assert (rc == 0);
|
||||
}
|
||||
if (r != retired_fd) {
|
||||
int rc = close_wait_ms (r);
|
||||
if (_r != retired_fd) {
|
||||
int rc = close_wait_ms (_r);
|
||||
errno_assert (rc == 0);
|
||||
}
|
||||
#endif
|
||||
@ -170,7 +170,7 @@ zmq::signaler_t::~signaler_t ()
|
||||
|
||||
zmq::fd_t zmq::signaler_t::get_fd () const
|
||||
{
|
||||
return r;
|
||||
return _r;
|
||||
}
|
||||
|
||||
void zmq::signaler_t::send ()
|
||||
@ -183,13 +183,13 @@ void zmq::signaler_t::send ()
|
||||
#endif
|
||||
#if defined ZMQ_HAVE_EVENTFD
|
||||
const uint64_t inc = 1;
|
||||
ssize_t sz = write (w, &inc, sizeof (inc));
|
||||
ssize_t sz = write (_w, &inc, sizeof (inc));
|
||||
errno_assert (sz == sizeof (inc));
|
||||
#elif defined ZMQ_HAVE_WINDOWS
|
||||
unsigned char dummy = 0;
|
||||
while (true) {
|
||||
int nbytes =
|
||||
::send (w, reinterpret_cast<char *> (&dummy), sizeof (dummy), 0);
|
||||
::send (_w, reinterpret_cast<char *> (&dummy), sizeof (dummy), 0);
|
||||
wsa_assert (nbytes != SOCKET_ERROR);
|
||||
if (unlikely (nbytes == SOCKET_ERROR))
|
||||
continue;
|
||||
@ -199,7 +199,7 @@ void zmq::signaler_t::send ()
|
||||
#elif defined ZMQ_HAVE_VXWORKS
|
||||
unsigned char dummy = 0;
|
||||
while (true) {
|
||||
ssize_t nbytes = ::send (w, (char *) &dummy, sizeof (dummy), 0);
|
||||
ssize_t nbytes = ::send (_w, (char *) &dummy, sizeof (dummy), 0);
|
||||
if (unlikely (nbytes == -1 && errno == EINTR))
|
||||
continue;
|
||||
#if defined(HAVE_FORK)
|
||||
@ -215,7 +215,7 @@ void zmq::signaler_t::send ()
|
||||
#else
|
||||
unsigned char dummy = 0;
|
||||
while (true) {
|
||||
ssize_t nbytes = ::send (w, &dummy, sizeof (dummy), 0);
|
||||
ssize_t nbytes = ::send (_w, &dummy, sizeof (dummy), 0);
|
||||
if (unlikely (nbytes == -1 && errno == EINTR))
|
||||
continue;
|
||||
#if defined(HAVE_FORK)
|
||||
@ -245,7 +245,7 @@ int zmq::signaler_t::wait (int timeout_)
|
||||
|
||||
#ifdef ZMQ_POLL_BASED_ON_POLL
|
||||
struct pollfd pfd;
|
||||
pfd.fd = r;
|
||||
pfd.fd = _r;
|
||||
pfd.events = POLLIN;
|
||||
int rc = poll (&pfd, 1, timeout_);
|
||||
if (unlikely (rc < 0)) {
|
||||
@ -272,7 +272,7 @@ int zmq::signaler_t::wait (int timeout_)
|
||||
|
||||
fd_set fds;
|
||||
FD_ZERO (&fds);
|
||||
FD_SET (r, &fds);
|
||||
FD_SET (_r, &fds);
|
||||
struct timeval timeout;
|
||||
if (timeout_ >= 0) {
|
||||
timeout.tv_sec = timeout_ / 1000;
|
||||
@ -282,7 +282,7 @@ int zmq::signaler_t::wait (int timeout_)
|
||||
int rc = select (0, &fds, NULL, NULL, timeout_ >= 0 ? &timeout : NULL);
|
||||
wsa_assert (rc != SOCKET_ERROR);
|
||||
#else
|
||||
int rc = select (r + 1, &fds, NULL, NULL, timeout_ >= 0 ? &timeout : NULL);
|
||||
int rc = select (_r + 1, &fds, NULL, NULL, timeout_ >= 0 ? &timeout : NULL);
|
||||
if (unlikely (rc < 0)) {
|
||||
errno_assert (errno == EINTR);
|
||||
return -1;
|
||||
@ -305,14 +305,14 @@ void zmq::signaler_t::recv ()
|
||||
// Attempt to read a signal.
|
||||
#if defined ZMQ_HAVE_EVENTFD
|
||||
uint64_t dummy;
|
||||
ssize_t sz = read (r, &dummy, sizeof (dummy));
|
||||
ssize_t sz = read (_r, &dummy, sizeof (dummy));
|
||||
errno_assert (sz == sizeof (dummy));
|
||||
|
||||
// If we accidentally grabbed the next signal(s) along with the current
|
||||
// one, return it back to the eventfd object.
|
||||
if (unlikely (dummy > 1)) {
|
||||
const uint64_t inc = dummy - 1;
|
||||
ssize_t sz2 = write (w, &inc, sizeof (inc));
|
||||
ssize_t sz2 = write (_w, &inc, sizeof (inc));
|
||||
errno_assert (sz2 == sizeof (inc));
|
||||
return;
|
||||
}
|
||||
@ -322,13 +322,13 @@ void zmq::signaler_t::recv ()
|
||||
unsigned char dummy;
|
||||
#if defined ZMQ_HAVE_WINDOWS
|
||||
int nbytes =
|
||||
::recv (r, reinterpret_cast<char *> (&dummy), sizeof (dummy), 0);
|
||||
::recv (_r, reinterpret_cast<char *> (&dummy), sizeof (dummy), 0);
|
||||
wsa_assert (nbytes != SOCKET_ERROR);
|
||||
#elif defined ZMQ_HAVE_VXWORKS
|
||||
ssize_t nbytes = ::recv (r, (char *) &dummy, sizeof (dummy), 0);
|
||||
ssize_t nbytes = ::recv (_r, (char *) &dummy, sizeof (dummy), 0);
|
||||
errno_assert (nbytes >= 0);
|
||||
#else
|
||||
ssize_t nbytes = ::recv (r, &dummy, sizeof (dummy), 0);
|
||||
ssize_t nbytes = ::recv (_r, &dummy, sizeof (dummy), 0);
|
||||
errno_assert (nbytes >= 0);
|
||||
#endif
|
||||
zmq_assert (nbytes == sizeof (dummy));
|
||||
@ -341,7 +341,7 @@ int zmq::signaler_t::recv_failable ()
|
||||
// Attempt to read a signal.
|
||||
#if defined ZMQ_HAVE_EVENTFD
|
||||
uint64_t dummy;
|
||||
ssize_t sz = read (r, &dummy, sizeof (dummy));
|
||||
ssize_t sz = read (_r, &dummy, sizeof (dummy));
|
||||
if (sz == -1) {
|
||||
errno_assert (errno == EAGAIN);
|
||||
return -1;
|
||||
@ -352,7 +352,7 @@ int zmq::signaler_t::recv_failable ()
|
||||
// one, return it back to the eventfd object.
|
||||
if (unlikely (dummy > 1)) {
|
||||
const uint64_t inc = dummy - 1;
|
||||
ssize_t sz2 = write (w, &inc, sizeof (inc));
|
||||
ssize_t sz2 = write (_w, &inc, sizeof (inc));
|
||||
errno_assert (sz2 == sizeof (inc));
|
||||
return 0;
|
||||
}
|
||||
@ -363,7 +363,7 @@ int zmq::signaler_t::recv_failable ()
|
||||
unsigned char dummy;
|
||||
#if defined ZMQ_HAVE_WINDOWS
|
||||
int nbytes =
|
||||
::recv (r, reinterpret_cast<char *> (&dummy), sizeof (dummy), 0);
|
||||
::recv (_r, reinterpret_cast<char *> (&dummy), sizeof (dummy), 0);
|
||||
if (nbytes == SOCKET_ERROR) {
|
||||
const int last_error = WSAGetLastError ();
|
||||
if (last_error == WSAEWOULDBLOCK) {
|
||||
@ -373,7 +373,7 @@ int zmq::signaler_t::recv_failable ()
|
||||
wsa_assert (last_error == WSAEWOULDBLOCK);
|
||||
}
|
||||
#elif defined ZMQ_HAVE_VXWORKS
|
||||
ssize_t nbytes = ::recv (r, (char *) &dummy, sizeof (dummy), 0);
|
||||
ssize_t nbytes = ::recv (_r, (char *) &dummy, sizeof (dummy), 0);
|
||||
if (nbytes == -1) {
|
||||
if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR) {
|
||||
errno = EAGAIN;
|
||||
@ -383,7 +383,7 @@ int zmq::signaler_t::recv_failable ()
|
||||
|| errno == EINTR);
|
||||
}
|
||||
#else
|
||||
ssize_t nbytes = ::recv (r, &dummy, sizeof (dummy), 0);
|
||||
ssize_t nbytes = ::recv (_r, &dummy, sizeof (dummy), 0);
|
||||
if (nbytes == -1) {
|
||||
if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR) {
|
||||
errno = EAGAIN;
|
||||
@ -401,15 +401,15 @@ int zmq::signaler_t::recv_failable ()
|
||||
|
||||
bool zmq::signaler_t::valid () const
|
||||
{
|
||||
return w != retired_fd;
|
||||
return _w != retired_fd;
|
||||
}
|
||||
|
||||
#ifdef HAVE_FORK
|
||||
void zmq::signaler_t::forked ()
|
||||
{
|
||||
// Close file descriptors created in the parent and create new pair
|
||||
close (r);
|
||||
close (w);
|
||||
make_fdpair (&r, &w);
|
||||
close (_r);
|
||||
close (_w);
|
||||
make_fdpair (&_r, &_w);
|
||||
}
|
||||
#endif
|
||||
|
@ -69,8 +69,8 @@ class signaler_t
|
||||
// Underlying write & read file descriptor
|
||||
// Will be -1 if an error occurred during initialization, e.g. we
|
||||
// exceeded the number of available handles
|
||||
fd_t w;
|
||||
fd_t r;
|
||||
fd_t _w;
|
||||
fd_t _r;
|
||||
|
||||
// Disable copying of signaler_t object.
|
||||
signaler_t (const signaler_t &);
|
||||
|
@ -99,12 +99,12 @@
|
||||
|
||||
bool zmq::socket_base_t::check_tag ()
|
||||
{
|
||||
return tag == 0xbaddecaf;
|
||||
return _tag == 0xbaddecaf;
|
||||
}
|
||||
|
||||
bool zmq::socket_base_t::is_thread_safe () const
|
||||
{
|
||||
return thread_safe;
|
||||
return _thread_safe;
|
||||
}
|
||||
|
||||
zmq::socket_base_t *zmq::socket_base_t::create (int type_,
|
||||
@ -178,8 +178,8 @@ zmq::socket_base_t *zmq::socket_base_t::create (int type_,
|
||||
|
||||
alloc_assert (s);
|
||||
|
||||
if (s->mailbox == NULL) {
|
||||
s->destroyed = true;
|
||||
if (s->_mailbox == NULL) {
|
||||
s->_destroyed = true;
|
||||
LIBZMQ_DELETE (s);
|
||||
return NULL;
|
||||
}
|
||||
@ -192,38 +192,38 @@ zmq::socket_base_t::socket_base_t (ctx_t *parent_,
|
||||
int sid_,
|
||||
bool thread_safe_) :
|
||||
own_t (parent_, tid_),
|
||||
tag (0xbaddecaf),
|
||||
ctx_terminated (false),
|
||||
destroyed (false),
|
||||
poller (NULL),
|
||||
handle (static_cast<poller_t::handle_t> (NULL)),
|
||||
last_tsc (0),
|
||||
ticks (0),
|
||||
rcvmore (false),
|
||||
monitor_socket (NULL),
|
||||
monitor_events (0),
|
||||
thread_safe (thread_safe_),
|
||||
reaper_signaler (NULL),
|
||||
sync (),
|
||||
monitor_sync ()
|
||||
_tag (0xbaddecaf),
|
||||
_ctx_terminated (false),
|
||||
_destroyed (false),
|
||||
_poller (NULL),
|
||||
_handle (static_cast<poller_t::handle_t> (NULL)),
|
||||
_last_tsc (0),
|
||||
_ticks (0),
|
||||
_rcvmore (false),
|
||||
_monitor_socket (NULL),
|
||||
_monitor_events (0),
|
||||
_thread_safe (thread_safe_),
|
||||
_reaper_signaler (NULL),
|
||||
_sync (),
|
||||
_monitor_sync ()
|
||||
{
|
||||
options.socket_id = sid_;
|
||||
options.ipv6 = (parent_->get (ZMQ_IPV6) != 0);
|
||||
options.linger.store (parent_->get (ZMQ_BLOCKY) ? -1 : 0);
|
||||
options.zero_copy = parent_->get (ZMQ_ZERO_COPY_RECV) != 0;
|
||||
|
||||
if (thread_safe) {
|
||||
mailbox = new (std::nothrow) mailbox_safe_t (&sync);
|
||||
zmq_assert (mailbox);
|
||||
if (_thread_safe) {
|
||||
_mailbox = new (std::nothrow) mailbox_safe_t (&_sync);
|
||||
zmq_assert (_mailbox);
|
||||
} else {
|
||||
mailbox_t *m = new (std::nothrow) mailbox_t ();
|
||||
zmq_assert (m);
|
||||
|
||||
if (m->get_fd () != retired_fd)
|
||||
mailbox = m;
|
||||
_mailbox = m;
|
||||
else {
|
||||
LIBZMQ_DELETE (m);
|
||||
mailbox = NULL;
|
||||
_mailbox = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -241,21 +241,21 @@ int zmq::socket_base_t::get_peer_state (const void *routing_id_,
|
||||
|
||||
zmq::socket_base_t::~socket_base_t ()
|
||||
{
|
||||
if (mailbox)
|
||||
LIBZMQ_DELETE (mailbox);
|
||||
if (_mailbox)
|
||||
LIBZMQ_DELETE (_mailbox);
|
||||
|
||||
if (reaper_signaler)
|
||||
LIBZMQ_DELETE (reaper_signaler);
|
||||
if (_reaper_signaler)
|
||||
LIBZMQ_DELETE (_reaper_signaler);
|
||||
|
||||
scoped_lock_t lock (monitor_sync);
|
||||
scoped_lock_t lock (_monitor_sync);
|
||||
stop_monitor ();
|
||||
|
||||
zmq_assert (destroyed);
|
||||
zmq_assert (_destroyed);
|
||||
}
|
||||
|
||||
zmq::i_mailbox *zmq::socket_base_t::get_mailbox ()
|
||||
{
|
||||
return mailbox;
|
||||
return _mailbox;
|
||||
}
|
||||
|
||||
void zmq::socket_base_t::stop ()
|
||||
@ -345,7 +345,7 @@ void zmq::socket_base_t::attach_pipe (pipe_t *pipe_, bool subscribe_to_all_)
|
||||
{
|
||||
// First, register the pipe so that we can terminate it later on.
|
||||
pipe_->set_event_sink (this);
|
||||
pipes.push_back (pipe_);
|
||||
_pipes.push_back (pipe_);
|
||||
|
||||
// Let the derived socket type know about new pipe.
|
||||
xattach_pipe (pipe_, subscribe_to_all_);
|
||||
@ -362,14 +362,14 @@ int zmq::socket_base_t::setsockopt (int option_,
|
||||
const void *optval_,
|
||||
size_t optvallen_)
|
||||
{
|
||||
scoped_optional_lock_t sync_lock (thread_safe ? &sync : NULL);
|
||||
scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL);
|
||||
|
||||
if (!options.is_valid (option_)) {
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (unlikely (ctx_terminated)) {
|
||||
if (unlikely (_ctx_terminated)) {
|
||||
errno = ETERM;
|
||||
return -1;
|
||||
}
|
||||
@ -392,26 +392,27 @@ int zmq::socket_base_t::getsockopt (int option_,
|
||||
void *optval_,
|
||||
size_t *optvallen_)
|
||||
{
|
||||
scoped_optional_lock_t sync_lock (thread_safe ? &sync : NULL);
|
||||
scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL);
|
||||
|
||||
if (unlikely (ctx_terminated)) {
|
||||
if (unlikely (_ctx_terminated)) {
|
||||
errno = ETERM;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (option_ == ZMQ_RCVMORE) {
|
||||
return do_getsockopt<int> (optval_, optvallen_, rcvmore ? 1 : 0);
|
||||
return do_getsockopt<int> (optval_, optvallen_, _rcvmore ? 1 : 0);
|
||||
}
|
||||
|
||||
if (option_ == ZMQ_FD) {
|
||||
if (thread_safe) {
|
||||
if (_thread_safe) {
|
||||
// thread safe socket doesn't provide file descriptor
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
return do_getsockopt<fd_t> (
|
||||
optval_, optvallen_, (static_cast<mailbox_t *> (mailbox))->get_fd ());
|
||||
optval_, optvallen_,
|
||||
(static_cast<mailbox_t *> (_mailbox))->get_fd ());
|
||||
}
|
||||
|
||||
if (option_ == ZMQ_EVENTS) {
|
||||
@ -427,11 +428,11 @@ int zmq::socket_base_t::getsockopt (int option_,
|
||||
}
|
||||
|
||||
if (option_ == ZMQ_LAST_ENDPOINT) {
|
||||
return do_getsockopt (optval_, optvallen_, last_endpoint);
|
||||
return do_getsockopt (optval_, optvallen_, _last_endpoint);
|
||||
}
|
||||
|
||||
if (option_ == ZMQ_THREAD_SAFE) {
|
||||
return do_getsockopt<int> (optval_, optvallen_, thread_safe ? 1 : 0);
|
||||
return do_getsockopt<int> (optval_, optvallen_, _thread_safe ? 1 : 0);
|
||||
}
|
||||
|
||||
return options.getsockopt (option_, optval_, optvallen_);
|
||||
@ -439,7 +440,7 @@ int zmq::socket_base_t::getsockopt (int option_,
|
||||
|
||||
int zmq::socket_base_t::join (const char *group_)
|
||||
{
|
||||
scoped_optional_lock_t sync_lock (thread_safe ? &sync : NULL);
|
||||
scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL);
|
||||
|
||||
int rc = xjoin (group_);
|
||||
|
||||
@ -449,7 +450,7 @@ int zmq::socket_base_t::join (const char *group_)
|
||||
|
||||
int zmq::socket_base_t::leave (const char *group_)
|
||||
{
|
||||
scoped_optional_lock_t sync_lock (thread_safe ? &sync : NULL);
|
||||
scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL);
|
||||
|
||||
int rc = xleave (group_);
|
||||
|
||||
@ -459,25 +460,25 @@ int zmq::socket_base_t::leave (const char *group_)
|
||||
|
||||
void zmq::socket_base_t::add_signaler (signaler_t *s_)
|
||||
{
|
||||
zmq_assert (thread_safe);
|
||||
zmq_assert (_thread_safe);
|
||||
|
||||
scoped_lock_t sync_lock (sync);
|
||||
(static_cast<mailbox_safe_t *> (mailbox))->add_signaler (s_);
|
||||
scoped_lock_t sync_lock (_sync);
|
||||
(static_cast<mailbox_safe_t *> (_mailbox))->add_signaler (s_);
|
||||
}
|
||||
|
||||
void zmq::socket_base_t::remove_signaler (signaler_t *s_)
|
||||
{
|
||||
zmq_assert (thread_safe);
|
||||
zmq_assert (_thread_safe);
|
||||
|
||||
scoped_lock_t sync_lock (sync);
|
||||
(static_cast<mailbox_safe_t *> (mailbox))->remove_signaler (s_);
|
||||
scoped_lock_t sync_lock (_sync);
|
||||
(static_cast<mailbox_safe_t *> (_mailbox))->remove_signaler (s_);
|
||||
}
|
||||
|
||||
int zmq::socket_base_t::bind (const char *addr_)
|
||||
{
|
||||
scoped_optional_lock_t sync_lock (thread_safe ? &sync : NULL);
|
||||
scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL);
|
||||
|
||||
if (unlikely (ctx_terminated)) {
|
||||
if (unlikely (_ctx_terminated)) {
|
||||
errno = ETERM;
|
||||
return -1;
|
||||
}
|
||||
@ -500,7 +501,7 @@ int zmq::socket_base_t::bind (const char *addr_)
|
||||
rc = register_endpoint (addr_, endpoint);
|
||||
if (rc == 0) {
|
||||
connect_pending (addr_, this);
|
||||
last_endpoint.assign (addr_);
|
||||
_last_endpoint.assign (addr_);
|
||||
options.connected = true;
|
||||
}
|
||||
return rc;
|
||||
@ -564,7 +565,7 @@ int zmq::socket_base_t::bind (const char *addr_)
|
||||
session->attach_pipe (new_pipes[1]);
|
||||
|
||||
// Save last endpoint URI
|
||||
paddr->to_string (last_endpoint);
|
||||
paddr->to_string (_last_endpoint);
|
||||
|
||||
add_endpoint (addr_, (own_t *) session, newpipe);
|
||||
|
||||
@ -591,9 +592,9 @@ int zmq::socket_base_t::bind (const char *addr_)
|
||||
}
|
||||
|
||||
// Save last endpoint URI
|
||||
listener->get_address (last_endpoint);
|
||||
listener->get_address (_last_endpoint);
|
||||
|
||||
add_endpoint (last_endpoint.c_str (), (own_t *) listener, NULL);
|
||||
add_endpoint (_last_endpoint.c_str (), (own_t *) listener, NULL);
|
||||
options.connected = true;
|
||||
return 0;
|
||||
}
|
||||
@ -612,9 +613,9 @@ int zmq::socket_base_t::bind (const char *addr_)
|
||||
}
|
||||
|
||||
// Save last endpoint URI
|
||||
listener->get_address (last_endpoint);
|
||||
listener->get_address (_last_endpoint);
|
||||
|
||||
add_endpoint (last_endpoint.c_str (), (own_t *) listener, NULL);
|
||||
add_endpoint (_last_endpoint.c_str (), (own_t *) listener, NULL);
|
||||
options.connected = true;
|
||||
return 0;
|
||||
}
|
||||
@ -632,7 +633,7 @@ int zmq::socket_base_t::bind (const char *addr_)
|
||||
}
|
||||
|
||||
// Save last endpoint URI
|
||||
listener->get_address (last_endpoint);
|
||||
listener->get_address (_last_endpoint);
|
||||
|
||||
add_endpoint (addr_, (own_t *) listener, NULL);
|
||||
options.connected = true;
|
||||
@ -651,9 +652,9 @@ int zmq::socket_base_t::bind (const char *addr_)
|
||||
return -1;
|
||||
}
|
||||
|
||||
listener->get_address (last_endpoint);
|
||||
listener->get_address (_last_endpoint);
|
||||
|
||||
add_endpoint (last_endpoint.c_str (), (own_t *) listener, NULL);
|
||||
add_endpoint (_last_endpoint.c_str (), (own_t *) listener, NULL);
|
||||
options.connected = true;
|
||||
return 0;
|
||||
}
|
||||
@ -665,9 +666,9 @@ int zmq::socket_base_t::bind (const char *addr_)
|
||||
|
||||
int zmq::socket_base_t::connect (const char *addr_)
|
||||
{
|
||||
scoped_optional_lock_t sync_lock (thread_safe ? &sync : NULL);
|
||||
scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL);
|
||||
|
||||
if (unlikely (ctx_terminated)) {
|
||||
if (unlikely (_ctx_terminated)) {
|
||||
errno = ETERM;
|
||||
return -1;
|
||||
}
|
||||
@ -780,10 +781,10 @@ int zmq::socket_base_t::connect (const char *addr_)
|
||||
attach_pipe (new_pipes[0]);
|
||||
|
||||
// Save last endpoint URI
|
||||
last_endpoint.assign (addr_);
|
||||
_last_endpoint.assign (addr_);
|
||||
|
||||
// remember inproc connections for disconnect
|
||||
inprocs.ZMQ_MAP_INSERT_OR_EMPLACE (std::string (addr_), new_pipes[0]);
|
||||
_inprocs.ZMQ_MAP_INSERT_OR_EMPLACE (std::string (addr_), new_pipes[0]);
|
||||
|
||||
options.connected = true;
|
||||
return 0;
|
||||
@ -792,8 +793,8 @@ int zmq::socket_base_t::connect (const char *addr_)
|
||||
(options.type == ZMQ_DEALER || options.type == ZMQ_SUB
|
||||
|| options.type == ZMQ_PUB || options.type == ZMQ_REQ);
|
||||
if (unlikely (is_single_connect)) {
|
||||
const endpoints_t::iterator it = endpoints.find (addr_);
|
||||
if (it != endpoints.end ()) {
|
||||
const endpoints_t::iterator it = _endpoints.find (addr_);
|
||||
if (it != _endpoints.end ()) {
|
||||
// There is no valid use for multiple connects for SUB-PUB nor
|
||||
// DEALER-ROUTER nor REQ-REP. Multiple connects produces
|
||||
// nonsensical results.
|
||||
@ -970,7 +971,7 @@ int zmq::socket_base_t::connect (const char *addr_)
|
||||
}
|
||||
|
||||
// Save last endpoint URI
|
||||
paddr->to_string (last_endpoint);
|
||||
paddr->to_string (_last_endpoint);
|
||||
|
||||
add_endpoint (addr_, (own_t *) session, newpipe);
|
||||
return 0;
|
||||
@ -982,16 +983,16 @@ void zmq::socket_base_t::add_endpoint (const char *addr_,
|
||||
{
|
||||
// Activate the session. Make it a child of this socket.
|
||||
launch_child (endpoint_);
|
||||
endpoints.ZMQ_MAP_INSERT_OR_EMPLACE (std::string (addr_),
|
||||
endpoint_pipe_t (endpoint_, pipe_));
|
||||
_endpoints.ZMQ_MAP_INSERT_OR_EMPLACE (std::string (addr_),
|
||||
endpoint_pipe_t (endpoint_, pipe_));
|
||||
}
|
||||
|
||||
int zmq::socket_base_t::term_endpoint (const char *addr_)
|
||||
{
|
||||
scoped_optional_lock_t sync_lock (thread_safe ? &sync : NULL);
|
||||
scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL);
|
||||
|
||||
// Check whether the library haven't been shut down yet.
|
||||
if (unlikely (ctx_terminated)) {
|
||||
if (unlikely (_ctx_terminated)) {
|
||||
errno = ETERM;
|
||||
return -1;
|
||||
}
|
||||
@ -1024,7 +1025,7 @@ int zmq::socket_base_t::term_endpoint (const char *addr_)
|
||||
return 0;
|
||||
}
|
||||
std::pair<inprocs_t::iterator, inprocs_t::iterator> range =
|
||||
inprocs.equal_range (addr_str);
|
||||
_inprocs.equal_range (addr_str);
|
||||
if (range.first == range.second) {
|
||||
errno = ENOENT;
|
||||
return -1;
|
||||
@ -1032,7 +1033,7 @@ int zmq::socket_base_t::term_endpoint (const char *addr_)
|
||||
|
||||
for (inprocs_t::iterator it = range.first; it != range.second; ++it)
|
||||
it->second->terminate (true);
|
||||
inprocs.erase (range.first, range.second);
|
||||
_inprocs.erase (range.first, range.second);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1044,14 +1045,14 @@ int zmq::socket_base_t::term_endpoint (const char *addr_)
|
||||
// resolve before giving up. Given at this stage we don't know whether a
|
||||
// socket is connected or bound, try with both.
|
||||
if (protocol == "tcp") {
|
||||
if (endpoints.find (resolved_addr) == endpoints.end ()) {
|
||||
if (_endpoints.find (resolved_addr) == _endpoints.end ()) {
|
||||
tcp_address_t *tcp_addr = new (std::nothrow) tcp_address_t ();
|
||||
alloc_assert (tcp_addr);
|
||||
rc = tcp_addr->resolve (address.c_str (), false, options.ipv6);
|
||||
|
||||
if (rc == 0) {
|
||||
tcp_addr->to_string (resolved_addr);
|
||||
if (endpoints.find (resolved_addr) == endpoints.end ()) {
|
||||
if (_endpoints.find (resolved_addr) == _endpoints.end ()) {
|
||||
rc =
|
||||
tcp_addr->resolve (address.c_str (), true, options.ipv6);
|
||||
if (rc == 0) {
|
||||
@ -1065,7 +1066,7 @@ int zmq::socket_base_t::term_endpoint (const char *addr_)
|
||||
|
||||
// Find the endpoints range (if any) corresponding to the addr_ string.
|
||||
const std::pair<endpoints_t::iterator, endpoints_t::iterator> range =
|
||||
endpoints.equal_range (resolved_addr);
|
||||
_endpoints.equal_range (resolved_addr);
|
||||
if (range.first == range.second) {
|
||||
errno = ENOENT;
|
||||
return -1;
|
||||
@ -1077,16 +1078,16 @@ int zmq::socket_base_t::term_endpoint (const char *addr_)
|
||||
it->second.second->terminate (false);
|
||||
term_child (it->second.first);
|
||||
}
|
||||
endpoints.erase (range.first, range.second);
|
||||
_endpoints.erase (range.first, range.second);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int zmq::socket_base_t::send (msg_t *msg_, int flags_)
|
||||
{
|
||||
scoped_optional_lock_t sync_lock (thread_safe ? &sync : NULL);
|
||||
scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL);
|
||||
|
||||
// Check whether the library haven't been shut down yet.
|
||||
if (unlikely (ctx_terminated)) {
|
||||
if (unlikely (_ctx_terminated)) {
|
||||
errno = ETERM;
|
||||
return -1;
|
||||
}
|
||||
@ -1130,7 +1131,7 @@ int zmq::socket_base_t::send (msg_t *msg_, int flags_)
|
||||
// Compute the time when the timeout should occur.
|
||||
// If the timeout is infinite, don't care.
|
||||
int timeout = options.sndtimeo;
|
||||
uint64_t end = timeout < 0 ? 0 : (clock.now_ms () + timeout);
|
||||
uint64_t end = timeout < 0 ? 0 : (_clock.now_ms () + timeout);
|
||||
|
||||
// Oops, we couldn't send the message. Wait for the next
|
||||
// command, process it and try to send the message again.
|
||||
@ -1146,7 +1147,7 @@ int zmq::socket_base_t::send (msg_t *msg_, int flags_)
|
||||
return -1;
|
||||
}
|
||||
if (timeout > 0) {
|
||||
timeout = static_cast<int> (end - clock.now_ms ());
|
||||
timeout = static_cast<int> (end - _clock.now_ms ());
|
||||
if (timeout <= 0) {
|
||||
errno = EAGAIN;
|
||||
return -1;
|
||||
@ -1159,10 +1160,10 @@ int zmq::socket_base_t::send (msg_t *msg_, int flags_)
|
||||
|
||||
int zmq::socket_base_t::recv (msg_t *msg_, int flags_)
|
||||
{
|
||||
scoped_optional_lock_t sync_lock (thread_safe ? &sync : NULL);
|
||||
scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL);
|
||||
|
||||
// Check whether the library haven't been shut down yet.
|
||||
if (unlikely (ctx_terminated)) {
|
||||
if (unlikely (_ctx_terminated)) {
|
||||
errno = ETERM;
|
||||
return -1;
|
||||
}
|
||||
@ -1181,11 +1182,11 @@ int zmq::socket_base_t::recv (msg_t *msg_, int flags_)
|
||||
// Note that 'recv' uses different command throttling algorithm (the one
|
||||
// described above) from the one used by 'send'. This is because counting
|
||||
// ticks is more efficient than doing RDTSC all the time.
|
||||
if (++ticks == inbound_poll_rate) {
|
||||
if (++_ticks == inbound_poll_rate) {
|
||||
if (unlikely (process_commands (0, false) != 0)) {
|
||||
return -1;
|
||||
}
|
||||
ticks = 0;
|
||||
_ticks = 0;
|
||||
}
|
||||
|
||||
// Get the message.
|
||||
@ -1208,7 +1209,7 @@ int zmq::socket_base_t::recv (msg_t *msg_, int flags_)
|
||||
if (unlikely (process_commands (0, false) != 0)) {
|
||||
return -1;
|
||||
}
|
||||
ticks = 0;
|
||||
_ticks = 0;
|
||||
|
||||
rc = xrecv (msg_);
|
||||
if (rc < 0) {
|
||||
@ -1222,18 +1223,18 @@ int zmq::socket_base_t::recv (msg_t *msg_, int flags_)
|
||||
// Compute the time when the timeout should occur.
|
||||
// If the timeout is infinite, don't care.
|
||||
int timeout = options.rcvtimeo;
|
||||
uint64_t end = timeout < 0 ? 0 : (clock.now_ms () + timeout);
|
||||
uint64_t end = timeout < 0 ? 0 : (_clock.now_ms () + timeout);
|
||||
|
||||
// In blocking scenario, commands are processed over and over again until
|
||||
// we are able to fetch a message.
|
||||
bool block = (ticks != 0);
|
||||
bool block = (_ticks != 0);
|
||||
while (true) {
|
||||
if (unlikely (process_commands (block ? timeout : 0, false) != 0)) {
|
||||
return -1;
|
||||
}
|
||||
rc = xrecv (msg_);
|
||||
if (rc == 0) {
|
||||
ticks = 0;
|
||||
_ticks = 0;
|
||||
break;
|
||||
}
|
||||
if (unlikely (errno != EAGAIN)) {
|
||||
@ -1241,7 +1242,7 @@ int zmq::socket_base_t::recv (msg_t *msg_, int flags_)
|
||||
}
|
||||
block = true;
|
||||
if (timeout > 0) {
|
||||
timeout = static_cast<int> (end - clock.now_ms ());
|
||||
timeout = static_cast<int> (end - _clock.now_ms ());
|
||||
if (timeout <= 0) {
|
||||
errno = EAGAIN;
|
||||
return -1;
|
||||
@ -1255,14 +1256,14 @@ int zmq::socket_base_t::recv (msg_t *msg_, int flags_)
|
||||
|
||||
int zmq::socket_base_t::close ()
|
||||
{
|
||||
scoped_optional_lock_t sync_lock (thread_safe ? &sync : NULL);
|
||||
scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL);
|
||||
|
||||
// Remove all existing signalers for thread safe sockets
|
||||
if (thread_safe)
|
||||
(static_cast<mailbox_safe_t *> (mailbox))->clear_signalers ();
|
||||
if (_thread_safe)
|
||||
(static_cast<mailbox_safe_t *> (_mailbox))->clear_signalers ();
|
||||
|
||||
// Mark the socket as dead
|
||||
tag = 0xdeadbeef;
|
||||
_tag = 0xdeadbeef;
|
||||
|
||||
|
||||
// Transfer the ownership of the socket from this application thread
|
||||
@ -1286,29 +1287,29 @@ bool zmq::socket_base_t::has_out ()
|
||||
void zmq::socket_base_t::start_reaping (poller_t *poller_)
|
||||
{
|
||||
// Plug the socket to the reaper thread.
|
||||
poller = poller_;
|
||||
_poller = poller_;
|
||||
|
||||
fd_t fd;
|
||||
|
||||
if (!thread_safe)
|
||||
fd = (static_cast<mailbox_t *> (mailbox))->get_fd ();
|
||||
if (!_thread_safe)
|
||||
fd = (static_cast<mailbox_t *> (_mailbox))->get_fd ();
|
||||
else {
|
||||
scoped_optional_lock_t sync_lock (thread_safe ? &sync : NULL);
|
||||
scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL);
|
||||
|
||||
reaper_signaler = new (std::nothrow) signaler_t ();
|
||||
zmq_assert (reaper_signaler);
|
||||
_reaper_signaler = new (std::nothrow) signaler_t ();
|
||||
zmq_assert (_reaper_signaler);
|
||||
|
||||
// Add signaler to the safe mailbox
|
||||
fd = reaper_signaler->get_fd ();
|
||||
(static_cast<mailbox_safe_t *> (mailbox))
|
||||
->add_signaler (reaper_signaler);
|
||||
fd = _reaper_signaler->get_fd ();
|
||||
(static_cast<mailbox_safe_t *> (_mailbox))
|
||||
->add_signaler (_reaper_signaler);
|
||||
|
||||
// Send a signal to make sure reaper handle existing commands
|
||||
reaper_signaler->send ();
|
||||
_reaper_signaler->send ();
|
||||
}
|
||||
|
||||
handle = poller->add_fd (fd, this);
|
||||
poller->set_pollin (handle);
|
||||
_handle = _poller->add_fd (fd, this);
|
||||
_poller->set_pollin (_handle);
|
||||
|
||||
// Initialise the termination and check whether it can be deallocated
|
||||
// immediately.
|
||||
@ -1322,7 +1323,7 @@ int zmq::socket_base_t::process_commands (int timeout_, bool throttle_)
|
||||
command_t cmd;
|
||||
if (timeout_ != 0) {
|
||||
// If we are asked to wait, simply ask mailbox to wait.
|
||||
rc = mailbox->recv (&cmd, timeout_);
|
||||
rc = _mailbox->recv (&cmd, timeout_);
|
||||
} else {
|
||||
// If we are asked not to wait, check whether we haven't processed
|
||||
// commands recently, so that we can throttle the new commands.
|
||||
@ -1340,19 +1341,19 @@ int zmq::socket_base_t::process_commands (int timeout_, bool throttle_)
|
||||
// Check whether TSC haven't jumped backwards (in case of migration
|
||||
// between CPU cores) and whether certain time have elapsed since
|
||||
// last command processing. If it didn't do nothing.
|
||||
if (tsc >= last_tsc && tsc - last_tsc <= max_command_delay)
|
||||
if (tsc >= _last_tsc && tsc - _last_tsc <= max_command_delay)
|
||||
return 0;
|
||||
last_tsc = tsc;
|
||||
_last_tsc = tsc;
|
||||
}
|
||||
|
||||
// Check whether there are any commands pending for this thread.
|
||||
rc = mailbox->recv (&cmd, 0);
|
||||
rc = _mailbox->recv (&cmd, 0);
|
||||
}
|
||||
|
||||
// Process all available commands.
|
||||
while (rc == 0) {
|
||||
cmd.destination->process_command (cmd);
|
||||
rc = mailbox->recv (&cmd, 0);
|
||||
rc = _mailbox->recv (&cmd, 0);
|
||||
}
|
||||
|
||||
if (errno == EINTR)
|
||||
@ -1360,7 +1361,7 @@ int zmq::socket_base_t::process_commands (int timeout_, bool throttle_)
|
||||
|
||||
zmq_assert (errno == EAGAIN);
|
||||
|
||||
if (ctx_terminated) {
|
||||
if (_ctx_terminated) {
|
||||
errno = ETERM;
|
||||
return -1;
|
||||
}
|
||||
@ -1374,10 +1375,10 @@ void zmq::socket_base_t::process_stop ()
|
||||
// We'll remember the fact so that any blocking call is interrupted and any
|
||||
// further attempt to use the socket will return ETERM. The user is still
|
||||
// responsible for calling zmq_close on the socket though!
|
||||
scoped_lock_t lock (monitor_sync);
|
||||
scoped_lock_t lock (_monitor_sync);
|
||||
stop_monitor ();
|
||||
|
||||
ctx_terminated = true;
|
||||
_ctx_terminated = true;
|
||||
}
|
||||
|
||||
void zmq::socket_base_t::process_bind (pipe_t *pipe_)
|
||||
@ -1393,9 +1394,9 @@ void zmq::socket_base_t::process_term (int linger_)
|
||||
unregister_endpoints (this);
|
||||
|
||||
// Ask all attached pipes to terminate.
|
||||
for (pipes_t::size_type i = 0; i != pipes.size (); ++i)
|
||||
pipes[i]->terminate (false);
|
||||
register_term_acks (static_cast<int> (pipes.size ()));
|
||||
for (pipes_t::size_type i = 0; i != _pipes.size (); ++i)
|
||||
_pipes[i]->terminate (false);
|
||||
register_term_acks (static_cast<int> (_pipes.size ()));
|
||||
|
||||
// Continue the termination process immediately.
|
||||
own_t::process_term (linger_);
|
||||
@ -1410,16 +1411,16 @@ void zmq::socket_base_t::process_term_endpoint (std::string *endpoint_)
|
||||
void zmq::socket_base_t::update_pipe_options (int option_)
|
||||
{
|
||||
if (option_ == ZMQ_SNDHWM || option_ == ZMQ_RCVHWM) {
|
||||
for (pipes_t::size_type i = 0; i != pipes.size (); ++i) {
|
||||
pipes[i]->set_hwms (options.rcvhwm, options.sndhwm);
|
||||
pipes[i]->send_hwms_to_peer (options.sndhwm, options.rcvhwm);
|
||||
for (pipes_t::size_type i = 0; i != _pipes.size (); ++i) {
|
||||
_pipes[i]->set_hwms (options.rcvhwm, options.sndhwm);
|
||||
_pipes[i]->send_hwms_to_peer (options.sndhwm, options.rcvhwm);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void zmq::socket_base_t::process_destroy ()
|
||||
{
|
||||
destroyed = true;
|
||||
_destroyed = true;
|
||||
}
|
||||
|
||||
int zmq::socket_base_t::xsetsockopt (int, const void *, size_t)
|
||||
@ -1492,11 +1493,11 @@ void zmq::socket_base_t::in_event ()
|
||||
// that may be available at the moment. Ultimately, the socket will
|
||||
// be destroyed.
|
||||
{
|
||||
scoped_optional_lock_t sync_lock (thread_safe ? &sync : NULL);
|
||||
scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL);
|
||||
|
||||
// If the socket is thread safe we need to unsignal the reaper signaler
|
||||
if (thread_safe)
|
||||
reaper_signaler->recv ();
|
||||
if (_thread_safe)
|
||||
_reaper_signaler->recv ();
|
||||
|
||||
process_commands (0, false);
|
||||
}
|
||||
@ -1516,9 +1517,9 @@ void zmq::socket_base_t::timer_event (int)
|
||||
void zmq::socket_base_t::check_destroy ()
|
||||
{
|
||||
// If the object was already marked as destroyed, finish the deallocation.
|
||||
if (destroyed) {
|
||||
if (_destroyed) {
|
||||
// Remove the socket from the reaper's poller.
|
||||
poller->rm_fd (handle);
|
||||
_poller->rm_fd (_handle);
|
||||
|
||||
// Remove the socket from the context.
|
||||
destroy_socket (this);
|
||||
@ -1556,15 +1557,16 @@ void zmq::socket_base_t::pipe_terminated (pipe_t *pipe_)
|
||||
xpipe_terminated (pipe_);
|
||||
|
||||
// Remove pipe from inproc pipes
|
||||
for (inprocs_t::iterator it = inprocs.begin (); it != inprocs.end (); ++it)
|
||||
for (inprocs_t::iterator it = _inprocs.begin (); it != _inprocs.end ();
|
||||
++it)
|
||||
if (it->second == pipe_) {
|
||||
inprocs.erase (it);
|
||||
_inprocs.erase (it);
|
||||
break;
|
||||
}
|
||||
|
||||
// Remove the pipe from the list of attached pipes and confirm its
|
||||
// termination if we are already shutting down.
|
||||
pipes.erase (pipe_);
|
||||
_pipes.erase (pipe_);
|
||||
if (is_terminating ())
|
||||
unregister_term_ack ();
|
||||
}
|
||||
@ -1576,14 +1578,14 @@ void zmq::socket_base_t::extract_flags (msg_t *msg_)
|
||||
zmq_assert (options.recv_routing_id);
|
||||
|
||||
// Remove MORE flag.
|
||||
rcvmore = (msg_->flags () & msg_t::more) != 0;
|
||||
_rcvmore = (msg_->flags () & msg_t::more) != 0;
|
||||
}
|
||||
|
||||
int zmq::socket_base_t::monitor (const char *addr_, int events_)
|
||||
{
|
||||
scoped_lock_t lock (monitor_sync);
|
||||
scoped_lock_t lock (_monitor_sync);
|
||||
|
||||
if (unlikely (ctx_terminated)) {
|
||||
if (unlikely (_ctx_terminated)) {
|
||||
errno = ETERM;
|
||||
return -1;
|
||||
}
|
||||
@ -1605,24 +1607,24 @@ int zmq::socket_base_t::monitor (const char *addr_, int events_)
|
||||
return -1;
|
||||
}
|
||||
// already monitoring. Stop previous monitor before starting new one.
|
||||
if (monitor_socket != NULL) {
|
||||
if (_monitor_socket != NULL) {
|
||||
stop_monitor (true);
|
||||
}
|
||||
// Register events to monitor
|
||||
monitor_events = events_;
|
||||
monitor_socket = zmq_socket (get_ctx (), ZMQ_PAIR);
|
||||
if (monitor_socket == NULL)
|
||||
_monitor_events = events_;
|
||||
_monitor_socket = zmq_socket (get_ctx (), ZMQ_PAIR);
|
||||
if (_monitor_socket == NULL)
|
||||
return -1;
|
||||
|
||||
// Never block context termination on pending event messages
|
||||
int linger = 0;
|
||||
int rc =
|
||||
zmq_setsockopt (monitor_socket, ZMQ_LINGER, &linger, sizeof (linger));
|
||||
zmq_setsockopt (_monitor_socket, ZMQ_LINGER, &linger, sizeof (linger));
|
||||
if (rc == -1)
|
||||
stop_monitor (false);
|
||||
|
||||
// Spawn the monitor socket endpoint
|
||||
rc = zmq_bind (monitor_socket, addr_);
|
||||
rc = zmq_bind (_monitor_socket, addr_);
|
||||
if (rc == -1)
|
||||
stop_monitor (false);
|
||||
return rc;
|
||||
@ -1713,8 +1715,8 @@ void zmq::socket_base_t::event (const std::string &addr_,
|
||||
intptr_t value_,
|
||||
int type_)
|
||||
{
|
||||
scoped_lock_t lock (monitor_sync);
|
||||
if (monitor_events & type_) {
|
||||
scoped_lock_t lock (_monitor_sync);
|
||||
if (_monitor_events & type_) {
|
||||
monitor_event (type_, value_, addr_);
|
||||
}
|
||||
}
|
||||
@ -1727,7 +1729,7 @@ void zmq::socket_base_t::monitor_event (int event_,
|
||||
// this is a private method which is only called from
|
||||
// contexts where the mutex has been locked before
|
||||
|
||||
if (monitor_socket) {
|
||||
if (_monitor_socket) {
|
||||
// Send event in first frame
|
||||
zmq_msg_t msg;
|
||||
zmq_msg_init_size (&msg, 6);
|
||||
@ -1737,12 +1739,12 @@ void zmq::socket_base_t::monitor_event (int event_,
|
||||
uint32_t value = static_cast<uint32_t> (value_);
|
||||
memcpy (data + 0, &event, sizeof (event));
|
||||
memcpy (data + 2, &value, sizeof (value));
|
||||
zmq_sendmsg (monitor_socket, &msg, ZMQ_SNDMORE);
|
||||
zmq_sendmsg (_monitor_socket, &msg, ZMQ_SNDMORE);
|
||||
|
||||
// Send address in second frame
|
||||
zmq_msg_init_size (&msg, addr_.size ());
|
||||
memcpy (zmq_msg_data (&msg), addr_.c_str (), addr_.size ());
|
||||
zmq_sendmsg (monitor_socket, &msg, 0);
|
||||
zmq_sendmsg (_monitor_socket, &msg, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1751,12 +1753,12 @@ void zmq::socket_base_t::stop_monitor (bool send_monitor_stopped_event_)
|
||||
// this is a private method which is only called from
|
||||
// contexts where the mutex has been locked before
|
||||
|
||||
if (monitor_socket) {
|
||||
if ((monitor_events & ZMQ_EVENT_MONITOR_STOPPED)
|
||||
if (_monitor_socket) {
|
||||
if ((_monitor_events & ZMQ_EVENT_MONITOR_STOPPED)
|
||||
&& send_monitor_stopped_event_)
|
||||
monitor_event (ZMQ_EVENT_MONITOR_STOPPED, 0, "");
|
||||
zmq_close (monitor_socket);
|
||||
monitor_socket = NULL;
|
||||
monitor_events = 0;
|
||||
zmq_close (_monitor_socket);
|
||||
_monitor_socket = NULL;
|
||||
_monitor_events = 0;
|
||||
}
|
||||
}
|
||||
|
@ -203,11 +203,11 @@ class socket_base_t : public own_t,
|
||||
// Map of open endpoints.
|
||||
typedef std::pair<own_t *, pipe_t *> endpoint_pipe_t;
|
||||
typedef std::multimap<std::string, endpoint_pipe_t> endpoints_t;
|
||||
endpoints_t endpoints;
|
||||
endpoints_t _endpoints;
|
||||
|
||||
// Map of open inproc endpoints.
|
||||
typedef std::multimap<std::string, pipe_t *> inprocs_t;
|
||||
inprocs_t inprocs;
|
||||
inprocs_t _inprocs;
|
||||
|
||||
// To be called after processing commands or invoking any command
|
||||
// handlers explicitly. If required, it will deallocate the socket.
|
||||
@ -218,15 +218,15 @@ class socket_base_t : public own_t,
|
||||
void extract_flags (msg_t *msg_);
|
||||
|
||||
// Used to check whether the object is a socket.
|
||||
uint32_t tag;
|
||||
uint32_t _tag;
|
||||
|
||||
// If true, associated context was already terminated.
|
||||
bool ctx_terminated;
|
||||
bool _ctx_terminated;
|
||||
|
||||
// If true, object should have been already destroyed. However,
|
||||
// destruction is delayed while we unwind the stack to the point
|
||||
// where it doesn't intersect the object being destroyed.
|
||||
bool destroyed;
|
||||
bool _destroyed;
|
||||
|
||||
// Parse URI string.
|
||||
int
|
||||
@ -254,48 +254,48 @@ class socket_base_t : public own_t,
|
||||
void update_pipe_options (int option_);
|
||||
|
||||
// Socket's mailbox object.
|
||||
i_mailbox *mailbox;
|
||||
i_mailbox *_mailbox;
|
||||
|
||||
// List of attached pipes.
|
||||
typedef array_t<pipe_t, 3> pipes_t;
|
||||
pipes_t pipes;
|
||||
pipes_t _pipes;
|
||||
|
||||
// Reaper's poller and handle of this socket within it.
|
||||
poller_t *poller;
|
||||
poller_t::handle_t handle;
|
||||
poller_t *_poller;
|
||||
poller_t::handle_t _handle;
|
||||
|
||||
// Timestamp of when commands were processed the last time.
|
||||
uint64_t last_tsc;
|
||||
uint64_t _last_tsc;
|
||||
|
||||
// Number of messages received since last command processing.
|
||||
int ticks;
|
||||
int _ticks;
|
||||
|
||||
// True if the last message received had MORE flag set.
|
||||
bool rcvmore;
|
||||
bool _rcvmore;
|
||||
|
||||
// Improves efficiency of time measurement.
|
||||
clock_t clock;
|
||||
clock_t _clock;
|
||||
|
||||
// Monitor socket;
|
||||
void *monitor_socket;
|
||||
void *_monitor_socket;
|
||||
|
||||
// Bitmask of events being monitored
|
||||
int monitor_events;
|
||||
int _monitor_events;
|
||||
|
||||
// Last socket endpoint resolved URI
|
||||
std::string last_endpoint;
|
||||
std::string _last_endpoint;
|
||||
|
||||
// Indicate if the socket is thread safe
|
||||
const bool thread_safe;
|
||||
const bool _thread_safe;
|
||||
|
||||
// Signaler to be used in the reaping stage
|
||||
signaler_t *reaper_signaler;
|
||||
signaler_t *_reaper_signaler;
|
||||
|
||||
// Mutex for synchronize access to the socket in thread safe mode
|
||||
mutex_t sync;
|
||||
mutex_t _sync;
|
||||
|
||||
// Mutex to synchronize access to the monitor Pair socket
|
||||
mutex_t monitor_sync;
|
||||
mutex_t _monitor_sync;
|
||||
|
||||
socket_base_t (const socket_base_t &);
|
||||
const socket_base_t &operator= (const socket_base_t &);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user